make "set debug target" take effect immediately
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "infrun.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "dcache.h"
35 #include <signal.h>
36 #include "regcache.h"
37 #include "gdb_assert.h"
38 #include "gdbcore.h"
39 #include "exceptions.h"
40 #include "target-descriptions.h"
41 #include "gdbthread.h"
42 #include "solib.h"
43 #include "exec.h"
44 #include "inline-frame.h"
45 #include "tracepoint.h"
46 #include "gdb/fileio.h"
47 #include "agent.h"
48 #include "auxv.h"
49 #include "target-debug.h"
50
51 static void target_info (char *, int);
52
53 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
54
55 static void default_terminal_info (struct target_ops *, const char *, int);
56
57 static int default_watchpoint_addr_within_range (struct target_ops *,
58 CORE_ADDR, CORE_ADDR, int);
59
60 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
61 CORE_ADDR, int);
62
63 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
64
65 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
66 long lwp, long tid);
67
68 static int default_follow_fork (struct target_ops *self, int follow_child,
69 int detach_fork);
70
71 static void default_mourn_inferior (struct target_ops *self);
72
73 static int default_search_memory (struct target_ops *ops,
74 CORE_ADDR start_addr,
75 ULONGEST search_space_len,
76 const gdb_byte *pattern,
77 ULONGEST pattern_len,
78 CORE_ADDR *found_addrp);
79
80 static int default_verify_memory (struct target_ops *self,
81 const gdb_byte *data,
82 CORE_ADDR memaddr, ULONGEST size);
83
84 static struct address_space *default_thread_address_space
85 (struct target_ops *self, ptid_t ptid);
86
87 static void tcomplain (void) ATTRIBUTE_NORETURN;
88
89 static int return_zero (struct target_ops *);
90
91 static int return_zero_has_execution (struct target_ops *, ptid_t);
92
93 static void target_command (char *, int);
94
95 static struct target_ops *find_default_run_target (char *);
96
97 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
98 ptid_t ptid);
99
100 static int dummy_find_memory_regions (struct target_ops *self,
101 find_memory_region_ftype ignore1,
102 void *ignore2);
103
104 static char *dummy_make_corefile_notes (struct target_ops *self,
105 bfd *ignore1, int *ignore2);
106
107 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
108
109 static enum exec_direction_kind default_execution_direction
110 (struct target_ops *self);
111
112 static CORE_ADDR default_target_decr_pc_after_break (struct target_ops *ops,
113 struct gdbarch *gdbarch);
114
115 static struct target_ops debug_target;
116
117 #include "target-delegates.c"
118
119 static void init_dummy_target (void);
120
121 static void update_current_target (void);
122
123 /* Pointer to array of target architecture structures; the size of the
124 array; the current index into the array; the allocated size of the
125 array. */
126 struct target_ops **target_structs;
127 unsigned target_struct_size;
128 unsigned target_struct_allocsize;
129 #define DEFAULT_ALLOCSIZE 10
130
131 /* The initial current target, so that there is always a semi-valid
132 current target. */
133
134 static struct target_ops dummy_target;
135
136 /* Top of target stack. */
137
138 static struct target_ops *target_stack;
139
140 /* The target structure we are currently using to talk to a process
141 or file or whatever "inferior" we have. */
142
143 struct target_ops current_target;
144
145 /* Command list for target. */
146
147 static struct cmd_list_element *targetlist = NULL;
148
149 /* Nonzero if we should trust readonly sections from the
150 executable when reading memory. */
151
152 static int trust_readonly = 0;
153
154 /* Nonzero if we should show true memory content including
155 memory breakpoint inserted by gdb. */
156
157 static int show_memory_breakpoints = 0;
158
159 /* These globals control whether GDB attempts to perform these
160 operations; they are useful for targets that need to prevent
161 inadvertant disruption, such as in non-stop mode. */
162
163 int may_write_registers = 1;
164
165 int may_write_memory = 1;
166
167 int may_insert_breakpoints = 1;
168
169 int may_insert_tracepoints = 1;
170
171 int may_insert_fast_tracepoints = 1;
172
173 int may_stop = 1;
174
175 /* Non-zero if we want to see trace of target level stuff. */
176
177 static unsigned int targetdebug = 0;
178
179 static void
180 set_targetdebug (char *args, int from_tty, struct cmd_list_element *c)
181 {
182 update_current_target ();
183 }
184
185 static void
186 show_targetdebug (struct ui_file *file, int from_tty,
187 struct cmd_list_element *c, const char *value)
188 {
189 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
190 }
191
192 static void setup_target_debug (void);
193
194 /* The user just typed 'target' without the name of a target. */
195
196 static void
197 target_command (char *arg, int from_tty)
198 {
199 fputs_filtered ("Argument required (target name). Try `help target'\n",
200 gdb_stdout);
201 }
202
203 /* Default target_has_* methods for process_stratum targets. */
204
205 int
206 default_child_has_all_memory (struct target_ops *ops)
207 {
208 /* If no inferior selected, then we can't read memory here. */
209 if (ptid_equal (inferior_ptid, null_ptid))
210 return 0;
211
212 return 1;
213 }
214
215 int
216 default_child_has_memory (struct target_ops *ops)
217 {
218 /* If no inferior selected, then we can't read memory here. */
219 if (ptid_equal (inferior_ptid, null_ptid))
220 return 0;
221
222 return 1;
223 }
224
225 int
226 default_child_has_stack (struct target_ops *ops)
227 {
228 /* If no inferior selected, there's no stack. */
229 if (ptid_equal (inferior_ptid, null_ptid))
230 return 0;
231
232 return 1;
233 }
234
235 int
236 default_child_has_registers (struct target_ops *ops)
237 {
238 /* Can't read registers from no inferior. */
239 if (ptid_equal (inferior_ptid, null_ptid))
240 return 0;
241
242 return 1;
243 }
244
245 int
246 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
247 {
248 /* If there's no thread selected, then we can't make it run through
249 hoops. */
250 if (ptid_equal (the_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256
257 int
258 target_has_all_memory_1 (void)
259 {
260 struct target_ops *t;
261
262 for (t = current_target.beneath; t != NULL; t = t->beneath)
263 if (t->to_has_all_memory (t))
264 return 1;
265
266 return 0;
267 }
268
269 int
270 target_has_memory_1 (void)
271 {
272 struct target_ops *t;
273
274 for (t = current_target.beneath; t != NULL; t = t->beneath)
275 if (t->to_has_memory (t))
276 return 1;
277
278 return 0;
279 }
280
281 int
282 target_has_stack_1 (void)
283 {
284 struct target_ops *t;
285
286 for (t = current_target.beneath; t != NULL; t = t->beneath)
287 if (t->to_has_stack (t))
288 return 1;
289
290 return 0;
291 }
292
293 int
294 target_has_registers_1 (void)
295 {
296 struct target_ops *t;
297
298 for (t = current_target.beneath; t != NULL; t = t->beneath)
299 if (t->to_has_registers (t))
300 return 1;
301
302 return 0;
303 }
304
305 int
306 target_has_execution_1 (ptid_t the_ptid)
307 {
308 struct target_ops *t;
309
310 for (t = current_target.beneath; t != NULL; t = t->beneath)
311 if (t->to_has_execution (t, the_ptid))
312 return 1;
313
314 return 0;
315 }
316
317 int
318 target_has_execution_current (void)
319 {
320 return target_has_execution_1 (inferior_ptid);
321 }
322
323 /* Complete initialization of T. This ensures that various fields in
324 T are set, if needed by the target implementation. */
325
326 void
327 complete_target_initialization (struct target_ops *t)
328 {
329 /* Provide default values for all "must have" methods. */
330
331 if (t->to_has_all_memory == NULL)
332 t->to_has_all_memory = return_zero;
333
334 if (t->to_has_memory == NULL)
335 t->to_has_memory = return_zero;
336
337 if (t->to_has_stack == NULL)
338 t->to_has_stack = return_zero;
339
340 if (t->to_has_registers == NULL)
341 t->to_has_registers = return_zero;
342
343 if (t->to_has_execution == NULL)
344 t->to_has_execution = return_zero_has_execution;
345
346 /* These methods can be called on an unpushed target and so require
347 a default implementation if the target might plausibly be the
348 default run target. */
349 gdb_assert (t->to_can_run == NULL || (t->to_can_async_p != NULL
350 && t->to_supports_non_stop != NULL));
351
352 install_delegators (t);
353 }
354
355 /* This is used to implement the various target commands. */
356
357 static void
358 open_target (char *args, int from_tty, struct cmd_list_element *command)
359 {
360 struct target_ops *ops = get_cmd_context (command);
361
362 if (targetdebug)
363 fprintf_unfiltered (gdb_stdlog, "-> %s->to_open (...)\n",
364 ops->to_shortname);
365
366 ops->to_open (args, from_tty);
367
368 if (targetdebug)
369 fprintf_unfiltered (gdb_stdlog, "<- %s->to_open (%s, %d)\n",
370 ops->to_shortname, args, from_tty);
371 }
372
373 /* Add possible target architecture T to the list and add a new
374 command 'target T->to_shortname'. Set COMPLETER as the command's
375 completer if not NULL. */
376
377 void
378 add_target_with_completer (struct target_ops *t,
379 completer_ftype *completer)
380 {
381 struct cmd_list_element *c;
382
383 complete_target_initialization (t);
384
385 if (!target_structs)
386 {
387 target_struct_allocsize = DEFAULT_ALLOCSIZE;
388 target_structs = (struct target_ops **) xmalloc
389 (target_struct_allocsize * sizeof (*target_structs));
390 }
391 if (target_struct_size >= target_struct_allocsize)
392 {
393 target_struct_allocsize *= 2;
394 target_structs = (struct target_ops **)
395 xrealloc ((char *) target_structs,
396 target_struct_allocsize * sizeof (*target_structs));
397 }
398 target_structs[target_struct_size++] = t;
399
400 if (targetlist == NULL)
401 add_prefix_cmd ("target", class_run, target_command, _("\
402 Connect to a target machine or process.\n\
403 The first argument is the type or protocol of the target machine.\n\
404 Remaining arguments are interpreted by the target protocol. For more\n\
405 information on the arguments for a particular protocol, type\n\
406 `help target ' followed by the protocol name."),
407 &targetlist, "target ", 0, &cmdlist);
408 c = add_cmd (t->to_shortname, no_class, NULL, t->to_doc, &targetlist);
409 set_cmd_sfunc (c, open_target);
410 set_cmd_context (c, t);
411 if (completer != NULL)
412 set_cmd_completer (c, completer);
413 }
414
415 /* Add a possible target architecture to the list. */
416
417 void
418 add_target (struct target_ops *t)
419 {
420 add_target_with_completer (t, NULL);
421 }
422
423 /* See target.h. */
424
425 void
426 add_deprecated_target_alias (struct target_ops *t, char *alias)
427 {
428 struct cmd_list_element *c;
429 char *alt;
430
431 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
432 see PR cli/15104. */
433 c = add_cmd (alias, no_class, NULL, t->to_doc, &targetlist);
434 set_cmd_sfunc (c, open_target);
435 set_cmd_context (c, t);
436 alt = xstrprintf ("target %s", t->to_shortname);
437 deprecate_cmd (c, alt);
438 }
439
440 /* Stub functions */
441
442 void
443 target_kill (void)
444 {
445 current_target.to_kill (&current_target);
446 }
447
448 void
449 target_load (const char *arg, int from_tty)
450 {
451 target_dcache_invalidate ();
452 (*current_target.to_load) (&current_target, arg, from_tty);
453 }
454
455 void
456 target_terminal_inferior (void)
457 {
458 /* A background resume (``run&'') should leave GDB in control of the
459 terminal. Use target_can_async_p, not target_is_async_p, since at
460 this point the target is not async yet. However, if sync_execution
461 is not set, we know it will become async prior to resume. */
462 if (target_can_async_p () && !sync_execution)
463 return;
464
465 /* If GDB is resuming the inferior in the foreground, install
466 inferior's terminal modes. */
467 (*current_target.to_terminal_inferior) (&current_target);
468 }
469
470 /* See target.h. */
471
472 int
473 target_supports_terminal_ours (void)
474 {
475 struct target_ops *t;
476
477 for (t = current_target.beneath; t != NULL; t = t->beneath)
478 {
479 if (t->to_terminal_ours != delegate_terminal_ours
480 && t->to_terminal_ours != tdefault_terminal_ours)
481 return 1;
482 }
483
484 return 0;
485 }
486
487 static void
488 tcomplain (void)
489 {
490 error (_("You can't do that when your target is `%s'"),
491 current_target.to_shortname);
492 }
493
494 void
495 noprocess (void)
496 {
497 error (_("You can't do that without a process to debug."));
498 }
499
500 static void
501 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
502 {
503 printf_unfiltered (_("No saved terminal information.\n"));
504 }
505
506 /* A default implementation for the to_get_ada_task_ptid target method.
507
508 This function builds the PTID by using both LWP and TID as part of
509 the PTID lwp and tid elements. The pid used is the pid of the
510 inferior_ptid. */
511
512 static ptid_t
513 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
514 {
515 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
516 }
517
518 static enum exec_direction_kind
519 default_execution_direction (struct target_ops *self)
520 {
521 if (!target_can_execute_reverse)
522 return EXEC_FORWARD;
523 else if (!target_can_async_p ())
524 return EXEC_FORWARD;
525 else
526 gdb_assert_not_reached ("\
527 to_execution_direction must be implemented for reverse async");
528 }
529
530 /* Go through the target stack from top to bottom, copying over zero
531 entries in current_target, then filling in still empty entries. In
532 effect, we are doing class inheritance through the pushed target
533 vectors.
534
535 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
536 is currently implemented, is that it discards any knowledge of
537 which target an inherited method originally belonged to.
538 Consequently, new new target methods should instead explicitly and
539 locally search the target stack for the target that can handle the
540 request. */
541
542 static void
543 update_current_target (void)
544 {
545 struct target_ops *t;
546
547 /* First, reset current's contents. */
548 memset (&current_target, 0, sizeof (current_target));
549
550 /* Install the delegators. */
551 install_delegators (&current_target);
552
553 current_target.to_stratum = target_stack->to_stratum;
554
555 #define INHERIT(FIELD, TARGET) \
556 if (!current_target.FIELD) \
557 current_target.FIELD = (TARGET)->FIELD
558
559 /* Do not add any new INHERITs here. Instead, use the delegation
560 mechanism provided by make-target-delegates. */
561 for (t = target_stack; t; t = t->beneath)
562 {
563 INHERIT (to_shortname, t);
564 INHERIT (to_longname, t);
565 INHERIT (to_attach_no_wait, t);
566 INHERIT (to_have_steppable_watchpoint, t);
567 INHERIT (to_have_continuable_watchpoint, t);
568 INHERIT (to_has_thread_control, t);
569 }
570 #undef INHERIT
571
572 /* Finally, position the target-stack beneath the squashed
573 "current_target". That way code looking for a non-inherited
574 target method can quickly and simply find it. */
575 current_target.beneath = target_stack;
576
577 if (targetdebug)
578 setup_target_debug ();
579 }
580
581 /* Push a new target type into the stack of the existing target accessors,
582 possibly superseding some of the existing accessors.
583
584 Rather than allow an empty stack, we always have the dummy target at
585 the bottom stratum, so we can call the function vectors without
586 checking them. */
587
588 void
589 push_target (struct target_ops *t)
590 {
591 struct target_ops **cur;
592
593 /* Check magic number. If wrong, it probably means someone changed
594 the struct definition, but not all the places that initialize one. */
595 if (t->to_magic != OPS_MAGIC)
596 {
597 fprintf_unfiltered (gdb_stderr,
598 "Magic number of %s target struct wrong\n",
599 t->to_shortname);
600 internal_error (__FILE__, __LINE__,
601 _("failed internal consistency check"));
602 }
603
604 /* Find the proper stratum to install this target in. */
605 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
606 {
607 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
608 break;
609 }
610
611 /* If there's already targets at this stratum, remove them. */
612 /* FIXME: cagney/2003-10-15: I think this should be popping all
613 targets to CUR, and not just those at this stratum level. */
614 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
615 {
616 /* There's already something at this stratum level. Close it,
617 and un-hook it from the stack. */
618 struct target_ops *tmp = (*cur);
619
620 (*cur) = (*cur)->beneath;
621 tmp->beneath = NULL;
622 target_close (tmp);
623 }
624
625 /* We have removed all targets in our stratum, now add the new one. */
626 t->beneath = (*cur);
627 (*cur) = t;
628
629 update_current_target ();
630 }
631
632 /* Remove a target_ops vector from the stack, wherever it may be.
633 Return how many times it was removed (0 or 1). */
634
635 int
636 unpush_target (struct target_ops *t)
637 {
638 struct target_ops **cur;
639 struct target_ops *tmp;
640
641 if (t->to_stratum == dummy_stratum)
642 internal_error (__FILE__, __LINE__,
643 _("Attempt to unpush the dummy target"));
644
645 /* Look for the specified target. Note that we assume that a target
646 can only occur once in the target stack. */
647
648 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
649 {
650 if ((*cur) == t)
651 break;
652 }
653
654 /* If we don't find target_ops, quit. Only open targets should be
655 closed. */
656 if ((*cur) == NULL)
657 return 0;
658
659 /* Unchain the target. */
660 tmp = (*cur);
661 (*cur) = (*cur)->beneath;
662 tmp->beneath = NULL;
663
664 update_current_target ();
665
666 /* Finally close the target. Note we do this after unchaining, so
667 any target method calls from within the target_close
668 implementation don't end up in T anymore. */
669 target_close (t);
670
671 return 1;
672 }
673
674 void
675 pop_all_targets_above (enum strata above_stratum)
676 {
677 while ((int) (current_target.to_stratum) > (int) above_stratum)
678 {
679 if (!unpush_target (target_stack))
680 {
681 fprintf_unfiltered (gdb_stderr,
682 "pop_all_targets couldn't find target %s\n",
683 target_stack->to_shortname);
684 internal_error (__FILE__, __LINE__,
685 _("failed internal consistency check"));
686 break;
687 }
688 }
689 }
690
691 void
692 pop_all_targets (void)
693 {
694 pop_all_targets_above (dummy_stratum);
695 }
696
697 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
698
699 int
700 target_is_pushed (struct target_ops *t)
701 {
702 struct target_ops *cur;
703
704 /* Check magic number. If wrong, it probably means someone changed
705 the struct definition, but not all the places that initialize one. */
706 if (t->to_magic != OPS_MAGIC)
707 {
708 fprintf_unfiltered (gdb_stderr,
709 "Magic number of %s target struct wrong\n",
710 t->to_shortname);
711 internal_error (__FILE__, __LINE__,
712 _("failed internal consistency check"));
713 }
714
715 for (cur = target_stack; cur != NULL; cur = cur->beneath)
716 if (cur == t)
717 return 1;
718
719 return 0;
720 }
721
722 /* Default implementation of to_get_thread_local_address. */
723
724 static void
725 generic_tls_error (void)
726 {
727 throw_error (TLS_GENERIC_ERROR,
728 _("Cannot find thread-local variables on this target"));
729 }
730
731 /* Using the objfile specified in OBJFILE, find the address for the
732 current thread's thread-local storage with offset OFFSET. */
733 CORE_ADDR
734 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
735 {
736 volatile CORE_ADDR addr = 0;
737 struct target_ops *target = &current_target;
738
739 if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
740 {
741 ptid_t ptid = inferior_ptid;
742 volatile struct gdb_exception ex;
743
744 TRY_CATCH (ex, RETURN_MASK_ALL)
745 {
746 CORE_ADDR lm_addr;
747
748 /* Fetch the load module address for this objfile. */
749 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
750 objfile);
751
752 addr = target->to_get_thread_local_address (target, ptid,
753 lm_addr, offset);
754 }
755 /* If an error occurred, print TLS related messages here. Otherwise,
756 throw the error to some higher catcher. */
757 if (ex.reason < 0)
758 {
759 int objfile_is_library = (objfile->flags & OBJF_SHARED);
760
761 switch (ex.error)
762 {
763 case TLS_NO_LIBRARY_SUPPORT_ERROR:
764 error (_("Cannot find thread-local variables "
765 "in this thread library."));
766 break;
767 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
768 if (objfile_is_library)
769 error (_("Cannot find shared library `%s' in dynamic"
770 " linker's load module list"), objfile_name (objfile));
771 else
772 error (_("Cannot find executable file `%s' in dynamic"
773 " linker's load module list"), objfile_name (objfile));
774 break;
775 case TLS_NOT_ALLOCATED_YET_ERROR:
776 if (objfile_is_library)
777 error (_("The inferior has not yet allocated storage for"
778 " thread-local variables in\n"
779 "the shared library `%s'\n"
780 "for %s"),
781 objfile_name (objfile), target_pid_to_str (ptid));
782 else
783 error (_("The inferior has not yet allocated storage for"
784 " thread-local variables in\n"
785 "the executable `%s'\n"
786 "for %s"),
787 objfile_name (objfile), target_pid_to_str (ptid));
788 break;
789 case TLS_GENERIC_ERROR:
790 if (objfile_is_library)
791 error (_("Cannot find thread-local storage for %s, "
792 "shared library %s:\n%s"),
793 target_pid_to_str (ptid),
794 objfile_name (objfile), ex.message);
795 else
796 error (_("Cannot find thread-local storage for %s, "
797 "executable file %s:\n%s"),
798 target_pid_to_str (ptid),
799 objfile_name (objfile), ex.message);
800 break;
801 default:
802 throw_exception (ex);
803 break;
804 }
805 }
806 }
807 /* It wouldn't be wrong here to try a gdbarch method, too; finding
808 TLS is an ABI-specific thing. But we don't do that yet. */
809 else
810 error (_("Cannot find thread-local variables on this target"));
811
812 return addr;
813 }
814
815 const char *
816 target_xfer_status_to_string (enum target_xfer_status status)
817 {
818 #define CASE(X) case X: return #X
819 switch (status)
820 {
821 CASE(TARGET_XFER_E_IO);
822 CASE(TARGET_XFER_UNAVAILABLE);
823 default:
824 return "<unknown>";
825 }
826 #undef CASE
827 };
828
829
830 #undef MIN
831 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
832
833 /* target_read_string -- read a null terminated string, up to LEN bytes,
834 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
835 Set *STRING to a pointer to malloc'd memory containing the data; the caller
836 is responsible for freeing it. Return the number of bytes successfully
837 read. */
838
839 int
840 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
841 {
842 int tlen, offset, i;
843 gdb_byte buf[4];
844 int errcode = 0;
845 char *buffer;
846 int buffer_allocated;
847 char *bufptr;
848 unsigned int nbytes_read = 0;
849
850 gdb_assert (string);
851
852 /* Small for testing. */
853 buffer_allocated = 4;
854 buffer = xmalloc (buffer_allocated);
855 bufptr = buffer;
856
857 while (len > 0)
858 {
859 tlen = MIN (len, 4 - (memaddr & 3));
860 offset = memaddr & 3;
861
862 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
863 if (errcode != 0)
864 {
865 /* The transfer request might have crossed the boundary to an
866 unallocated region of memory. Retry the transfer, requesting
867 a single byte. */
868 tlen = 1;
869 offset = 0;
870 errcode = target_read_memory (memaddr, buf, 1);
871 if (errcode != 0)
872 goto done;
873 }
874
875 if (bufptr - buffer + tlen > buffer_allocated)
876 {
877 unsigned int bytes;
878
879 bytes = bufptr - buffer;
880 buffer_allocated *= 2;
881 buffer = xrealloc (buffer, buffer_allocated);
882 bufptr = buffer + bytes;
883 }
884
885 for (i = 0; i < tlen; i++)
886 {
887 *bufptr++ = buf[i + offset];
888 if (buf[i + offset] == '\000')
889 {
890 nbytes_read += i + 1;
891 goto done;
892 }
893 }
894
895 memaddr += tlen;
896 len -= tlen;
897 nbytes_read += tlen;
898 }
899 done:
900 *string = buffer;
901 if (errnop != NULL)
902 *errnop = errcode;
903 return nbytes_read;
904 }
905
906 struct target_section_table *
907 target_get_section_table (struct target_ops *target)
908 {
909 return (*target->to_get_section_table) (target);
910 }
911
912 /* Find a section containing ADDR. */
913
914 struct target_section *
915 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
916 {
917 struct target_section_table *table = target_get_section_table (target);
918 struct target_section *secp;
919
920 if (table == NULL)
921 return NULL;
922
923 for (secp = table->sections; secp < table->sections_end; secp++)
924 {
925 if (addr >= secp->addr && addr < secp->endaddr)
926 return secp;
927 }
928 return NULL;
929 }
930
931 /* Read memory from more than one valid target. A core file, for
932 instance, could have some of memory but delegate other bits to
933 the target below it. So, we must manually try all targets. */
934
935 static enum target_xfer_status
936 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
937 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
938 ULONGEST *xfered_len)
939 {
940 enum target_xfer_status res;
941
942 do
943 {
944 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
945 readbuf, writebuf, memaddr, len,
946 xfered_len);
947 if (res == TARGET_XFER_OK)
948 break;
949
950 /* Stop if the target reports that the memory is not available. */
951 if (res == TARGET_XFER_UNAVAILABLE)
952 break;
953
954 /* We want to continue past core files to executables, but not
955 past a running target's memory. */
956 if (ops->to_has_all_memory (ops))
957 break;
958
959 ops = ops->beneath;
960 }
961 while (ops != NULL);
962
963 /* The cache works at the raw memory level. Make sure the cache
964 gets updated with raw contents no matter what kind of memory
965 object was originally being written. Note we do write-through
966 first, so that if it fails, we don't write to the cache contents
967 that never made it to the target. */
968 if (writebuf != NULL
969 && !ptid_equal (inferior_ptid, null_ptid)
970 && target_dcache_init_p ()
971 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
972 {
973 DCACHE *dcache = target_dcache_get ();
974
975 /* Note that writing to an area of memory which wasn't present
976 in the cache doesn't cause it to be loaded in. */
977 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
978 }
979
980 return res;
981 }
982
983 /* Perform a partial memory transfer.
984 For docs see target.h, to_xfer_partial. */
985
986 static enum target_xfer_status
987 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
988 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
989 ULONGEST len, ULONGEST *xfered_len)
990 {
991 enum target_xfer_status res;
992 int reg_len;
993 struct mem_region *region;
994 struct inferior *inf;
995
996 /* For accesses to unmapped overlay sections, read directly from
997 files. Must do this first, as MEMADDR may need adjustment. */
998 if (readbuf != NULL && overlay_debugging)
999 {
1000 struct obj_section *section = find_pc_overlay (memaddr);
1001
1002 if (pc_in_unmapped_range (memaddr, section))
1003 {
1004 struct target_section_table *table
1005 = target_get_section_table (ops);
1006 const char *section_name = section->the_bfd_section->name;
1007
1008 memaddr = overlay_mapped_address (memaddr, section);
1009 return section_table_xfer_memory_partial (readbuf, writebuf,
1010 memaddr, len, xfered_len,
1011 table->sections,
1012 table->sections_end,
1013 section_name);
1014 }
1015 }
1016
1017 /* Try the executable files, if "trust-readonly-sections" is set. */
1018 if (readbuf != NULL && trust_readonly)
1019 {
1020 struct target_section *secp;
1021 struct target_section_table *table;
1022
1023 secp = target_section_by_addr (ops, memaddr);
1024 if (secp != NULL
1025 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1026 secp->the_bfd_section)
1027 & SEC_READONLY))
1028 {
1029 table = target_get_section_table (ops);
1030 return section_table_xfer_memory_partial (readbuf, writebuf,
1031 memaddr, len, xfered_len,
1032 table->sections,
1033 table->sections_end,
1034 NULL);
1035 }
1036 }
1037
1038 /* Try GDB's internal data cache. */
1039 region = lookup_mem_region (memaddr);
1040 /* region->hi == 0 means there's no upper bound. */
1041 if (memaddr + len < region->hi || region->hi == 0)
1042 reg_len = len;
1043 else
1044 reg_len = region->hi - memaddr;
1045
1046 switch (region->attrib.mode)
1047 {
1048 case MEM_RO:
1049 if (writebuf != NULL)
1050 return TARGET_XFER_E_IO;
1051 break;
1052
1053 case MEM_WO:
1054 if (readbuf != NULL)
1055 return TARGET_XFER_E_IO;
1056 break;
1057
1058 case MEM_FLASH:
1059 /* We only support writing to flash during "load" for now. */
1060 if (writebuf != NULL)
1061 error (_("Writing to flash memory forbidden in this context"));
1062 break;
1063
1064 case MEM_NONE:
1065 return TARGET_XFER_E_IO;
1066 }
1067
1068 if (!ptid_equal (inferior_ptid, null_ptid))
1069 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1070 else
1071 inf = NULL;
1072
1073 if (inf != NULL
1074 && readbuf != NULL
1075 /* The dcache reads whole cache lines; that doesn't play well
1076 with reading from a trace buffer, because reading outside of
1077 the collected memory range fails. */
1078 && get_traceframe_number () == -1
1079 && (region->attrib.cache
1080 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1081 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1082 {
1083 DCACHE *dcache = target_dcache_get_or_init ();
1084
1085 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1086 reg_len, xfered_len);
1087 }
1088
1089 /* If none of those methods found the memory we wanted, fall back
1090 to a target partial transfer. Normally a single call to
1091 to_xfer_partial is enough; if it doesn't recognize an object
1092 it will call the to_xfer_partial of the next target down.
1093 But for memory this won't do. Memory is the only target
1094 object which can be read from more than one valid target.
1095 A core file, for instance, could have some of memory but
1096 delegate other bits to the target below it. So, we must
1097 manually try all targets. */
1098
1099 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1100 xfered_len);
1101
1102 /* If we still haven't got anything, return the last error. We
1103 give up. */
1104 return res;
1105 }
1106
1107 /* Perform a partial memory transfer. For docs see target.h,
1108 to_xfer_partial. */
1109
1110 static enum target_xfer_status
1111 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1112 gdb_byte *readbuf, const gdb_byte *writebuf,
1113 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1114 {
1115 enum target_xfer_status res;
1116
1117 /* Zero length requests are ok and require no work. */
1118 if (len == 0)
1119 return TARGET_XFER_EOF;
1120
1121 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1122 breakpoint insns, thus hiding out from higher layers whether
1123 there are software breakpoints inserted in the code stream. */
1124 if (readbuf != NULL)
1125 {
1126 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1127 xfered_len);
1128
1129 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1130 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1131 }
1132 else
1133 {
1134 void *buf;
1135 struct cleanup *old_chain;
1136
1137 /* A large write request is likely to be partially satisfied
1138 by memory_xfer_partial_1. We will continually malloc
1139 and free a copy of the entire write request for breakpoint
1140 shadow handling even though we only end up writing a small
1141 subset of it. Cap writes to 4KB to mitigate this. */
1142 len = min (4096, len);
1143
1144 buf = xmalloc (len);
1145 old_chain = make_cleanup (xfree, buf);
1146 memcpy (buf, writebuf, len);
1147
1148 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1149 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1150 xfered_len);
1151
1152 do_cleanups (old_chain);
1153 }
1154
1155 return res;
1156 }
1157
1158 static void
1159 restore_show_memory_breakpoints (void *arg)
1160 {
1161 show_memory_breakpoints = (uintptr_t) arg;
1162 }
1163
1164 struct cleanup *
1165 make_show_memory_breakpoints_cleanup (int show)
1166 {
1167 int current = show_memory_breakpoints;
1168
1169 show_memory_breakpoints = show;
1170 return make_cleanup (restore_show_memory_breakpoints,
1171 (void *) (uintptr_t) current);
1172 }
1173
1174 /* For docs see target.h, to_xfer_partial. */
1175
1176 enum target_xfer_status
1177 target_xfer_partial (struct target_ops *ops,
1178 enum target_object object, const char *annex,
1179 gdb_byte *readbuf, const gdb_byte *writebuf,
1180 ULONGEST offset, ULONGEST len,
1181 ULONGEST *xfered_len)
1182 {
1183 enum target_xfer_status retval;
1184
1185 gdb_assert (ops->to_xfer_partial != NULL);
1186
1187 /* Transfer is done when LEN is zero. */
1188 if (len == 0)
1189 return TARGET_XFER_EOF;
1190
1191 if (writebuf && !may_write_memory)
1192 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1193 core_addr_to_string_nz (offset), plongest (len));
1194
1195 *xfered_len = 0;
1196
1197 /* If this is a memory transfer, let the memory-specific code
1198 have a look at it instead. Memory transfers are more
1199 complicated. */
1200 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1201 || object == TARGET_OBJECT_CODE_MEMORY)
1202 retval = memory_xfer_partial (ops, object, readbuf,
1203 writebuf, offset, len, xfered_len);
1204 else if (object == TARGET_OBJECT_RAW_MEMORY)
1205 {
1206 /* Request the normal memory object from other layers. */
1207 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1208 xfered_len);
1209 }
1210 else
1211 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1212 writebuf, offset, len, xfered_len);
1213
1214 if (targetdebug)
1215 {
1216 const unsigned char *myaddr = NULL;
1217
1218 fprintf_unfiltered (gdb_stdlog,
1219 "%s:target_xfer_partial "
1220 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1221 ops->to_shortname,
1222 (int) object,
1223 (annex ? annex : "(null)"),
1224 host_address_to_string (readbuf),
1225 host_address_to_string (writebuf),
1226 core_addr_to_string_nz (offset),
1227 pulongest (len), retval,
1228 pulongest (*xfered_len));
1229
1230 if (readbuf)
1231 myaddr = readbuf;
1232 if (writebuf)
1233 myaddr = writebuf;
1234 if (retval == TARGET_XFER_OK && myaddr != NULL)
1235 {
1236 int i;
1237
1238 fputs_unfiltered (", bytes =", gdb_stdlog);
1239 for (i = 0; i < *xfered_len; i++)
1240 {
1241 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1242 {
1243 if (targetdebug < 2 && i > 0)
1244 {
1245 fprintf_unfiltered (gdb_stdlog, " ...");
1246 break;
1247 }
1248 fprintf_unfiltered (gdb_stdlog, "\n");
1249 }
1250
1251 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1252 }
1253 }
1254
1255 fputc_unfiltered ('\n', gdb_stdlog);
1256 }
1257
1258 /* Check implementations of to_xfer_partial update *XFERED_LEN
1259 properly. Do assertion after printing debug messages, so that we
1260 can find more clues on assertion failure from debugging messages. */
1261 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1262 gdb_assert (*xfered_len > 0);
1263
1264 return retval;
1265 }
1266
1267 /* Read LEN bytes of target memory at address MEMADDR, placing the
1268 results in GDB's memory at MYADDR. Returns either 0 for success or
1269 TARGET_XFER_E_IO if any error occurs.
1270
1271 If an error occurs, no guarantee is made about the contents of the data at
1272 MYADDR. In particular, the caller should not depend upon partial reads
1273 filling the buffer with good data. There is no way for the caller to know
1274 how much good data might have been transfered anyway. Callers that can
1275 deal with partial reads should call target_read (which will retry until
1276 it makes no progress, and then return how much was transferred). */
1277
1278 int
1279 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1280 {
1281 /* Dispatch to the topmost target, not the flattened current_target.
1282 Memory accesses check target->to_has_(all_)memory, and the
1283 flattened target doesn't inherit those. */
1284 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1285 myaddr, memaddr, len) == len)
1286 return 0;
1287 else
1288 return TARGET_XFER_E_IO;
1289 }
1290
1291 /* Like target_read_memory, but specify explicitly that this is a read
1292 from the target's raw memory. That is, this read bypasses the
1293 dcache, breakpoint shadowing, etc. */
1294
1295 int
1296 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1297 {
1298 /* See comment in target_read_memory about why the request starts at
1299 current_target.beneath. */
1300 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1301 myaddr, memaddr, len) == len)
1302 return 0;
1303 else
1304 return TARGET_XFER_E_IO;
1305 }
1306
1307 /* Like target_read_memory, but specify explicitly that this is a read from
1308 the target's stack. This may trigger different cache behavior. */
1309
1310 int
1311 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1312 {
1313 /* See comment in target_read_memory about why the request starts at
1314 current_target.beneath. */
1315 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1316 myaddr, memaddr, len) == len)
1317 return 0;
1318 else
1319 return TARGET_XFER_E_IO;
1320 }
1321
1322 /* Like target_read_memory, but specify explicitly that this is a read from
1323 the target's code. This may trigger different cache behavior. */
1324
1325 int
1326 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1327 {
1328 /* See comment in target_read_memory about why the request starts at
1329 current_target.beneath. */
1330 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1331 myaddr, memaddr, len) == len)
1332 return 0;
1333 else
1334 return TARGET_XFER_E_IO;
1335 }
1336
1337 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1338 Returns either 0 for success or TARGET_XFER_E_IO if any
1339 error occurs. If an error occurs, no guarantee is made about how
1340 much data got written. Callers that can deal with partial writes
1341 should call target_write. */
1342
1343 int
1344 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1345 {
1346 /* See comment in target_read_memory about why the request starts at
1347 current_target.beneath. */
1348 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1349 myaddr, memaddr, len) == len)
1350 return 0;
1351 else
1352 return TARGET_XFER_E_IO;
1353 }
1354
1355 /* Write LEN bytes from MYADDR to target raw memory at address
1356 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1357 if any error occurs. If an error occurs, no guarantee is made
1358 about how much data got written. Callers that can deal with
1359 partial writes should call target_write. */
1360
1361 int
1362 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1363 {
1364 /* See comment in target_read_memory about why the request starts at
1365 current_target.beneath. */
1366 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1367 myaddr, memaddr, len) == len)
1368 return 0;
1369 else
1370 return TARGET_XFER_E_IO;
1371 }
1372
1373 /* Fetch the target's memory map. */
1374
1375 VEC(mem_region_s) *
1376 target_memory_map (void)
1377 {
1378 VEC(mem_region_s) *result;
1379 struct mem_region *last_one, *this_one;
1380 int ix;
1381 struct target_ops *t;
1382
1383 result = current_target.to_memory_map (&current_target);
1384 if (result == NULL)
1385 return NULL;
1386
1387 qsort (VEC_address (mem_region_s, result),
1388 VEC_length (mem_region_s, result),
1389 sizeof (struct mem_region), mem_region_cmp);
1390
1391 /* Check that regions do not overlap. Simultaneously assign
1392 a numbering for the "mem" commands to use to refer to
1393 each region. */
1394 last_one = NULL;
1395 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1396 {
1397 this_one->number = ix;
1398
1399 if (last_one && last_one->hi > this_one->lo)
1400 {
1401 warning (_("Overlapping regions in memory map: ignoring"));
1402 VEC_free (mem_region_s, result);
1403 return NULL;
1404 }
1405 last_one = this_one;
1406 }
1407
1408 return result;
1409 }
1410
1411 void
1412 target_flash_erase (ULONGEST address, LONGEST length)
1413 {
1414 current_target.to_flash_erase (&current_target, address, length);
1415 }
1416
1417 void
1418 target_flash_done (void)
1419 {
1420 current_target.to_flash_done (&current_target);
1421 }
1422
1423 static void
1424 show_trust_readonly (struct ui_file *file, int from_tty,
1425 struct cmd_list_element *c, const char *value)
1426 {
1427 fprintf_filtered (file,
1428 _("Mode for reading from readonly sections is %s.\n"),
1429 value);
1430 }
1431
1432 /* Target vector read/write partial wrapper functions. */
1433
1434 static enum target_xfer_status
1435 target_read_partial (struct target_ops *ops,
1436 enum target_object object,
1437 const char *annex, gdb_byte *buf,
1438 ULONGEST offset, ULONGEST len,
1439 ULONGEST *xfered_len)
1440 {
1441 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1442 xfered_len);
1443 }
1444
1445 static enum target_xfer_status
1446 target_write_partial (struct target_ops *ops,
1447 enum target_object object,
1448 const char *annex, const gdb_byte *buf,
1449 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1450 {
1451 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1452 xfered_len);
1453 }
1454
1455 /* Wrappers to perform the full transfer. */
1456
1457 /* For docs on target_read see target.h. */
1458
1459 LONGEST
1460 target_read (struct target_ops *ops,
1461 enum target_object object,
1462 const char *annex, gdb_byte *buf,
1463 ULONGEST offset, LONGEST len)
1464 {
1465 LONGEST xfered = 0;
1466
1467 while (xfered < len)
1468 {
1469 ULONGEST xfered_len;
1470 enum target_xfer_status status;
1471
1472 status = target_read_partial (ops, object, annex,
1473 (gdb_byte *) buf + xfered,
1474 offset + xfered, len - xfered,
1475 &xfered_len);
1476
1477 /* Call an observer, notifying them of the xfer progress? */
1478 if (status == TARGET_XFER_EOF)
1479 return xfered;
1480 else if (status == TARGET_XFER_OK)
1481 {
1482 xfered += xfered_len;
1483 QUIT;
1484 }
1485 else
1486 return -1;
1487
1488 }
1489 return len;
1490 }
1491
1492 /* Assuming that the entire [begin, end) range of memory cannot be
1493 read, try to read whatever subrange is possible to read.
1494
1495 The function returns, in RESULT, either zero or one memory block.
1496 If there's a readable subrange at the beginning, it is completely
1497 read and returned. Any further readable subrange will not be read.
1498 Otherwise, if there's a readable subrange at the end, it will be
1499 completely read and returned. Any readable subranges before it
1500 (obviously, not starting at the beginning), will be ignored. In
1501 other cases -- either no readable subrange, or readable subrange(s)
1502 that is neither at the beginning, or end, nothing is returned.
1503
1504 The purpose of this function is to handle a read across a boundary
1505 of accessible memory in a case when memory map is not available.
1506 The above restrictions are fine for this case, but will give
1507 incorrect results if the memory is 'patchy'. However, supporting
1508 'patchy' memory would require trying to read every single byte,
1509 and it seems unacceptable solution. Explicit memory map is
1510 recommended for this case -- and target_read_memory_robust will
1511 take care of reading multiple ranges then. */
1512
1513 static void
1514 read_whatever_is_readable (struct target_ops *ops,
1515 ULONGEST begin, ULONGEST end,
1516 VEC(memory_read_result_s) **result)
1517 {
1518 gdb_byte *buf = xmalloc (end - begin);
1519 ULONGEST current_begin = begin;
1520 ULONGEST current_end = end;
1521 int forward;
1522 memory_read_result_s r;
1523 ULONGEST xfered_len;
1524
1525 /* If we previously failed to read 1 byte, nothing can be done here. */
1526 if (end - begin <= 1)
1527 {
1528 xfree (buf);
1529 return;
1530 }
1531
1532 /* Check that either first or the last byte is readable, and give up
1533 if not. This heuristic is meant to permit reading accessible memory
1534 at the boundary of accessible region. */
1535 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1536 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1537 {
1538 forward = 1;
1539 ++current_begin;
1540 }
1541 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1542 buf + (end-begin) - 1, end - 1, 1,
1543 &xfered_len) == TARGET_XFER_OK)
1544 {
1545 forward = 0;
1546 --current_end;
1547 }
1548 else
1549 {
1550 xfree (buf);
1551 return;
1552 }
1553
1554 /* Loop invariant is that the [current_begin, current_end) was previously
1555 found to be not readable as a whole.
1556
1557 Note loop condition -- if the range has 1 byte, we can't divide the range
1558 so there's no point trying further. */
1559 while (current_end - current_begin > 1)
1560 {
1561 ULONGEST first_half_begin, first_half_end;
1562 ULONGEST second_half_begin, second_half_end;
1563 LONGEST xfer;
1564 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1565
1566 if (forward)
1567 {
1568 first_half_begin = current_begin;
1569 first_half_end = middle;
1570 second_half_begin = middle;
1571 second_half_end = current_end;
1572 }
1573 else
1574 {
1575 first_half_begin = middle;
1576 first_half_end = current_end;
1577 second_half_begin = current_begin;
1578 second_half_end = middle;
1579 }
1580
1581 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1582 buf + (first_half_begin - begin),
1583 first_half_begin,
1584 first_half_end - first_half_begin);
1585
1586 if (xfer == first_half_end - first_half_begin)
1587 {
1588 /* This half reads up fine. So, the error must be in the
1589 other half. */
1590 current_begin = second_half_begin;
1591 current_end = second_half_end;
1592 }
1593 else
1594 {
1595 /* This half is not readable. Because we've tried one byte, we
1596 know some part of this half if actually redable. Go to the next
1597 iteration to divide again and try to read.
1598
1599 We don't handle the other half, because this function only tries
1600 to read a single readable subrange. */
1601 current_begin = first_half_begin;
1602 current_end = first_half_end;
1603 }
1604 }
1605
1606 if (forward)
1607 {
1608 /* The [begin, current_begin) range has been read. */
1609 r.begin = begin;
1610 r.end = current_begin;
1611 r.data = buf;
1612 }
1613 else
1614 {
1615 /* The [current_end, end) range has been read. */
1616 LONGEST rlen = end - current_end;
1617
1618 r.data = xmalloc (rlen);
1619 memcpy (r.data, buf + current_end - begin, rlen);
1620 r.begin = current_end;
1621 r.end = end;
1622 xfree (buf);
1623 }
1624 VEC_safe_push(memory_read_result_s, (*result), &r);
1625 }
1626
1627 void
1628 free_memory_read_result_vector (void *x)
1629 {
1630 VEC(memory_read_result_s) *v = x;
1631 memory_read_result_s *current;
1632 int ix;
1633
1634 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1635 {
1636 xfree (current->data);
1637 }
1638 VEC_free (memory_read_result_s, v);
1639 }
1640
1641 VEC(memory_read_result_s) *
1642 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1643 {
1644 VEC(memory_read_result_s) *result = 0;
1645
1646 LONGEST xfered = 0;
1647 while (xfered < len)
1648 {
1649 struct mem_region *region = lookup_mem_region (offset + xfered);
1650 LONGEST rlen;
1651
1652 /* If there is no explicit region, a fake one should be created. */
1653 gdb_assert (region);
1654
1655 if (region->hi == 0)
1656 rlen = len - xfered;
1657 else
1658 rlen = region->hi - offset;
1659
1660 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1661 {
1662 /* Cannot read this region. Note that we can end up here only
1663 if the region is explicitly marked inaccessible, or
1664 'inaccessible-by-default' is in effect. */
1665 xfered += rlen;
1666 }
1667 else
1668 {
1669 LONGEST to_read = min (len - xfered, rlen);
1670 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1671
1672 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1673 (gdb_byte *) buffer,
1674 offset + xfered, to_read);
1675 /* Call an observer, notifying them of the xfer progress? */
1676 if (xfer <= 0)
1677 {
1678 /* Got an error reading full chunk. See if maybe we can read
1679 some subrange. */
1680 xfree (buffer);
1681 read_whatever_is_readable (ops, offset + xfered,
1682 offset + xfered + to_read, &result);
1683 xfered += to_read;
1684 }
1685 else
1686 {
1687 struct memory_read_result r;
1688 r.data = buffer;
1689 r.begin = offset + xfered;
1690 r.end = r.begin + xfer;
1691 VEC_safe_push (memory_read_result_s, result, &r);
1692 xfered += xfer;
1693 }
1694 QUIT;
1695 }
1696 }
1697 return result;
1698 }
1699
1700
1701 /* An alternative to target_write with progress callbacks. */
1702
1703 LONGEST
1704 target_write_with_progress (struct target_ops *ops,
1705 enum target_object object,
1706 const char *annex, const gdb_byte *buf,
1707 ULONGEST offset, LONGEST len,
1708 void (*progress) (ULONGEST, void *), void *baton)
1709 {
1710 LONGEST xfered = 0;
1711
1712 /* Give the progress callback a chance to set up. */
1713 if (progress)
1714 (*progress) (0, baton);
1715
1716 while (xfered < len)
1717 {
1718 ULONGEST xfered_len;
1719 enum target_xfer_status status;
1720
1721 status = target_write_partial (ops, object, annex,
1722 (gdb_byte *) buf + xfered,
1723 offset + xfered, len - xfered,
1724 &xfered_len);
1725
1726 if (status != TARGET_XFER_OK)
1727 return status == TARGET_XFER_EOF ? xfered : -1;
1728
1729 if (progress)
1730 (*progress) (xfered_len, baton);
1731
1732 xfered += xfered_len;
1733 QUIT;
1734 }
1735 return len;
1736 }
1737
1738 /* For docs on target_write see target.h. */
1739
1740 LONGEST
1741 target_write (struct target_ops *ops,
1742 enum target_object object,
1743 const char *annex, const gdb_byte *buf,
1744 ULONGEST offset, LONGEST len)
1745 {
1746 return target_write_with_progress (ops, object, annex, buf, offset, len,
1747 NULL, NULL);
1748 }
1749
1750 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1751 the size of the transferred data. PADDING additional bytes are
1752 available in *BUF_P. This is a helper function for
1753 target_read_alloc; see the declaration of that function for more
1754 information. */
1755
1756 static LONGEST
1757 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1758 const char *annex, gdb_byte **buf_p, int padding)
1759 {
1760 size_t buf_alloc, buf_pos;
1761 gdb_byte *buf;
1762
1763 /* This function does not have a length parameter; it reads the
1764 entire OBJECT). Also, it doesn't support objects fetched partly
1765 from one target and partly from another (in a different stratum,
1766 e.g. a core file and an executable). Both reasons make it
1767 unsuitable for reading memory. */
1768 gdb_assert (object != TARGET_OBJECT_MEMORY);
1769
1770 /* Start by reading up to 4K at a time. The target will throttle
1771 this number down if necessary. */
1772 buf_alloc = 4096;
1773 buf = xmalloc (buf_alloc);
1774 buf_pos = 0;
1775 while (1)
1776 {
1777 ULONGEST xfered_len;
1778 enum target_xfer_status status;
1779
1780 status = target_read_partial (ops, object, annex, &buf[buf_pos],
1781 buf_pos, buf_alloc - buf_pos - padding,
1782 &xfered_len);
1783
1784 if (status == TARGET_XFER_EOF)
1785 {
1786 /* Read all there was. */
1787 if (buf_pos == 0)
1788 xfree (buf);
1789 else
1790 *buf_p = buf;
1791 return buf_pos;
1792 }
1793 else if (status != TARGET_XFER_OK)
1794 {
1795 /* An error occurred. */
1796 xfree (buf);
1797 return TARGET_XFER_E_IO;
1798 }
1799
1800 buf_pos += xfered_len;
1801
1802 /* If the buffer is filling up, expand it. */
1803 if (buf_alloc < buf_pos * 2)
1804 {
1805 buf_alloc *= 2;
1806 buf = xrealloc (buf, buf_alloc);
1807 }
1808
1809 QUIT;
1810 }
1811 }
1812
1813 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1814 the size of the transferred data. See the declaration in "target.h"
1815 function for more information about the return value. */
1816
1817 LONGEST
1818 target_read_alloc (struct target_ops *ops, enum target_object object,
1819 const char *annex, gdb_byte **buf_p)
1820 {
1821 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1822 }
1823
1824 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1825 returned as a string, allocated using xmalloc. If an error occurs
1826 or the transfer is unsupported, NULL is returned. Empty objects
1827 are returned as allocated but empty strings. A warning is issued
1828 if the result contains any embedded NUL bytes. */
1829
1830 char *
1831 target_read_stralloc (struct target_ops *ops, enum target_object object,
1832 const char *annex)
1833 {
1834 gdb_byte *buffer;
1835 char *bufstr;
1836 LONGEST i, transferred;
1837
1838 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1839 bufstr = (char *) buffer;
1840
1841 if (transferred < 0)
1842 return NULL;
1843
1844 if (transferred == 0)
1845 return xstrdup ("");
1846
1847 bufstr[transferred] = 0;
1848
1849 /* Check for embedded NUL bytes; but allow trailing NULs. */
1850 for (i = strlen (bufstr); i < transferred; i++)
1851 if (bufstr[i] != 0)
1852 {
1853 warning (_("target object %d, annex %s, "
1854 "contained unexpected null characters"),
1855 (int) object, annex ? annex : "(none)");
1856 break;
1857 }
1858
1859 return bufstr;
1860 }
1861
1862 /* Memory transfer methods. */
1863
1864 void
1865 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1866 LONGEST len)
1867 {
1868 /* This method is used to read from an alternate, non-current
1869 target. This read must bypass the overlay support (as symbols
1870 don't match this target), and GDB's internal cache (wrong cache
1871 for this target). */
1872 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1873 != len)
1874 memory_error (TARGET_XFER_E_IO, addr);
1875 }
1876
1877 ULONGEST
1878 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1879 int len, enum bfd_endian byte_order)
1880 {
1881 gdb_byte buf[sizeof (ULONGEST)];
1882
1883 gdb_assert (len <= sizeof (buf));
1884 get_target_memory (ops, addr, buf, len);
1885 return extract_unsigned_integer (buf, len, byte_order);
1886 }
1887
1888 /* See target.h. */
1889
1890 int
1891 target_insert_breakpoint (struct gdbarch *gdbarch,
1892 struct bp_target_info *bp_tgt)
1893 {
1894 if (!may_insert_breakpoints)
1895 {
1896 warning (_("May not insert breakpoints"));
1897 return 1;
1898 }
1899
1900 return current_target.to_insert_breakpoint (&current_target,
1901 gdbarch, bp_tgt);
1902 }
1903
1904 /* See target.h. */
1905
1906 int
1907 target_remove_breakpoint (struct gdbarch *gdbarch,
1908 struct bp_target_info *bp_tgt)
1909 {
1910 /* This is kind of a weird case to handle, but the permission might
1911 have been changed after breakpoints were inserted - in which case
1912 we should just take the user literally and assume that any
1913 breakpoints should be left in place. */
1914 if (!may_insert_breakpoints)
1915 {
1916 warning (_("May not remove breakpoints"));
1917 return 1;
1918 }
1919
1920 return current_target.to_remove_breakpoint (&current_target,
1921 gdbarch, bp_tgt);
1922 }
1923
1924 static void
1925 target_info (char *args, int from_tty)
1926 {
1927 struct target_ops *t;
1928 int has_all_mem = 0;
1929
1930 if (symfile_objfile != NULL)
1931 printf_unfiltered (_("Symbols from \"%s\".\n"),
1932 objfile_name (symfile_objfile));
1933
1934 for (t = target_stack; t != NULL; t = t->beneath)
1935 {
1936 if (!(*t->to_has_memory) (t))
1937 continue;
1938
1939 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1940 continue;
1941 if (has_all_mem)
1942 printf_unfiltered (_("\tWhile running this, "
1943 "GDB does not access memory from...\n"));
1944 printf_unfiltered ("%s:\n", t->to_longname);
1945 (t->to_files_info) (t);
1946 has_all_mem = (*t->to_has_all_memory) (t);
1947 }
1948 }
1949
1950 /* This function is called before any new inferior is created, e.g.
1951 by running a program, attaching, or connecting to a target.
1952 It cleans up any state from previous invocations which might
1953 change between runs. This is a subset of what target_preopen
1954 resets (things which might change between targets). */
1955
1956 void
1957 target_pre_inferior (int from_tty)
1958 {
1959 /* Clear out solib state. Otherwise the solib state of the previous
1960 inferior might have survived and is entirely wrong for the new
1961 target. This has been observed on GNU/Linux using glibc 2.3. How
1962 to reproduce:
1963
1964 bash$ ./foo&
1965 [1] 4711
1966 bash$ ./foo&
1967 [1] 4712
1968 bash$ gdb ./foo
1969 [...]
1970 (gdb) attach 4711
1971 (gdb) detach
1972 (gdb) attach 4712
1973 Cannot access memory at address 0xdeadbeef
1974 */
1975
1976 /* In some OSs, the shared library list is the same/global/shared
1977 across inferiors. If code is shared between processes, so are
1978 memory regions and features. */
1979 if (!gdbarch_has_global_solist (target_gdbarch ()))
1980 {
1981 no_shared_libraries (NULL, from_tty);
1982
1983 invalidate_target_mem_regions ();
1984
1985 target_clear_description ();
1986 }
1987
1988 agent_capability_invalidate ();
1989 }
1990
1991 /* Callback for iterate_over_inferiors. Gets rid of the given
1992 inferior. */
1993
1994 static int
1995 dispose_inferior (struct inferior *inf, void *args)
1996 {
1997 struct thread_info *thread;
1998
1999 thread = any_thread_of_process (inf->pid);
2000 if (thread)
2001 {
2002 switch_to_thread (thread->ptid);
2003
2004 /* Core inferiors actually should be detached, not killed. */
2005 if (target_has_execution)
2006 target_kill ();
2007 else
2008 target_detach (NULL, 0);
2009 }
2010
2011 return 0;
2012 }
2013
2014 /* This is to be called by the open routine before it does
2015 anything. */
2016
2017 void
2018 target_preopen (int from_tty)
2019 {
2020 dont_repeat ();
2021
2022 if (have_inferiors ())
2023 {
2024 if (!from_tty
2025 || !have_live_inferiors ()
2026 || query (_("A program is being debugged already. Kill it? ")))
2027 iterate_over_inferiors (dispose_inferior, NULL);
2028 else
2029 error (_("Program not killed."));
2030 }
2031
2032 /* Calling target_kill may remove the target from the stack. But if
2033 it doesn't (which seems like a win for UDI), remove it now. */
2034 /* Leave the exec target, though. The user may be switching from a
2035 live process to a core of the same program. */
2036 pop_all_targets_above (file_stratum);
2037
2038 target_pre_inferior (from_tty);
2039 }
2040
2041 /* Detach a target after doing deferred register stores. */
2042
2043 void
2044 target_detach (const char *args, int from_tty)
2045 {
2046 struct target_ops* t;
2047
2048 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2049 /* Don't remove global breakpoints here. They're removed on
2050 disconnection from the target. */
2051 ;
2052 else
2053 /* If we're in breakpoints-always-inserted mode, have to remove
2054 them before detaching. */
2055 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2056
2057 prepare_for_detach ();
2058
2059 current_target.to_detach (&current_target, args, from_tty);
2060 }
2061
2062 void
2063 target_disconnect (const char *args, int from_tty)
2064 {
2065 /* If we're in breakpoints-always-inserted mode or if breakpoints
2066 are global across processes, we have to remove them before
2067 disconnecting. */
2068 remove_breakpoints ();
2069
2070 current_target.to_disconnect (&current_target, args, from_tty);
2071 }
2072
2073 ptid_t
2074 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2075 {
2076 return (current_target.to_wait) (&current_target, ptid, status, options);
2077 }
2078
2079 char *
2080 target_pid_to_str (ptid_t ptid)
2081 {
2082 return (*current_target.to_pid_to_str) (&current_target, ptid);
2083 }
2084
2085 char *
2086 target_thread_name (struct thread_info *info)
2087 {
2088 return current_target.to_thread_name (&current_target, info);
2089 }
2090
2091 void
2092 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2093 {
2094 struct target_ops *t;
2095
2096 target_dcache_invalidate ();
2097
2098 current_target.to_resume (&current_target, ptid, step, signal);
2099
2100 registers_changed_ptid (ptid);
2101 /* We only set the internal executing state here. The user/frontend
2102 running state is set at a higher level. */
2103 set_executing (ptid, 1);
2104 clear_inline_frame_state (ptid);
2105 }
2106
2107 void
2108 target_pass_signals (int numsigs, unsigned char *pass_signals)
2109 {
2110 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2111 }
2112
2113 void
2114 target_program_signals (int numsigs, unsigned char *program_signals)
2115 {
2116 (*current_target.to_program_signals) (&current_target,
2117 numsigs, program_signals);
2118 }
2119
2120 static int
2121 default_follow_fork (struct target_ops *self, int follow_child,
2122 int detach_fork)
2123 {
2124 /* Some target returned a fork event, but did not know how to follow it. */
2125 internal_error (__FILE__, __LINE__,
2126 _("could not find a target to follow fork"));
2127 }
2128
2129 /* Look through the list of possible targets for a target that can
2130 follow forks. */
2131
2132 int
2133 target_follow_fork (int follow_child, int detach_fork)
2134 {
2135 return current_target.to_follow_fork (&current_target,
2136 follow_child, detach_fork);
2137 }
2138
2139 static void
2140 default_mourn_inferior (struct target_ops *self)
2141 {
2142 internal_error (__FILE__, __LINE__,
2143 _("could not find a target to follow mourn inferior"));
2144 }
2145
2146 void
2147 target_mourn_inferior (void)
2148 {
2149 current_target.to_mourn_inferior (&current_target);
2150
2151 /* We no longer need to keep handles on any of the object files.
2152 Make sure to release them to avoid unnecessarily locking any
2153 of them while we're not actually debugging. */
2154 bfd_cache_close_all ();
2155 }
2156
2157 /* Look for a target which can describe architectural features, starting
2158 from TARGET. If we find one, return its description. */
2159
2160 const struct target_desc *
2161 target_read_description (struct target_ops *target)
2162 {
2163 return target->to_read_description (target);
2164 }
2165
2166 /* This implements a basic search of memory, reading target memory and
2167 performing the search here (as opposed to performing the search in on the
2168 target side with, for example, gdbserver). */
2169
2170 int
2171 simple_search_memory (struct target_ops *ops,
2172 CORE_ADDR start_addr, ULONGEST search_space_len,
2173 const gdb_byte *pattern, ULONGEST pattern_len,
2174 CORE_ADDR *found_addrp)
2175 {
2176 /* NOTE: also defined in find.c testcase. */
2177 #define SEARCH_CHUNK_SIZE 16000
2178 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2179 /* Buffer to hold memory contents for searching. */
2180 gdb_byte *search_buf;
2181 unsigned search_buf_size;
2182 struct cleanup *old_cleanups;
2183
2184 search_buf_size = chunk_size + pattern_len - 1;
2185
2186 /* No point in trying to allocate a buffer larger than the search space. */
2187 if (search_space_len < search_buf_size)
2188 search_buf_size = search_space_len;
2189
2190 search_buf = malloc (search_buf_size);
2191 if (search_buf == NULL)
2192 error (_("Unable to allocate memory to perform the search."));
2193 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2194
2195 /* Prime the search buffer. */
2196
2197 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2198 search_buf, start_addr, search_buf_size) != search_buf_size)
2199 {
2200 warning (_("Unable to access %s bytes of target "
2201 "memory at %s, halting search."),
2202 pulongest (search_buf_size), hex_string (start_addr));
2203 do_cleanups (old_cleanups);
2204 return -1;
2205 }
2206
2207 /* Perform the search.
2208
2209 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2210 When we've scanned N bytes we copy the trailing bytes to the start and
2211 read in another N bytes. */
2212
2213 while (search_space_len >= pattern_len)
2214 {
2215 gdb_byte *found_ptr;
2216 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2217
2218 found_ptr = memmem (search_buf, nr_search_bytes,
2219 pattern, pattern_len);
2220
2221 if (found_ptr != NULL)
2222 {
2223 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2224
2225 *found_addrp = found_addr;
2226 do_cleanups (old_cleanups);
2227 return 1;
2228 }
2229
2230 /* Not found in this chunk, skip to next chunk. */
2231
2232 /* Don't let search_space_len wrap here, it's unsigned. */
2233 if (search_space_len >= chunk_size)
2234 search_space_len -= chunk_size;
2235 else
2236 search_space_len = 0;
2237
2238 if (search_space_len >= pattern_len)
2239 {
2240 unsigned keep_len = search_buf_size - chunk_size;
2241 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2242 int nr_to_read;
2243
2244 /* Copy the trailing part of the previous iteration to the front
2245 of the buffer for the next iteration. */
2246 gdb_assert (keep_len == pattern_len - 1);
2247 memcpy (search_buf, search_buf + chunk_size, keep_len);
2248
2249 nr_to_read = min (search_space_len - keep_len, chunk_size);
2250
2251 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2252 search_buf + keep_len, read_addr,
2253 nr_to_read) != nr_to_read)
2254 {
2255 warning (_("Unable to access %s bytes of target "
2256 "memory at %s, halting search."),
2257 plongest (nr_to_read),
2258 hex_string (read_addr));
2259 do_cleanups (old_cleanups);
2260 return -1;
2261 }
2262
2263 start_addr += chunk_size;
2264 }
2265 }
2266
2267 /* Not found. */
2268
2269 do_cleanups (old_cleanups);
2270 return 0;
2271 }
2272
2273 /* Default implementation of memory-searching. */
2274
2275 static int
2276 default_search_memory (struct target_ops *self,
2277 CORE_ADDR start_addr, ULONGEST search_space_len,
2278 const gdb_byte *pattern, ULONGEST pattern_len,
2279 CORE_ADDR *found_addrp)
2280 {
2281 /* Start over from the top of the target stack. */
2282 return simple_search_memory (current_target.beneath,
2283 start_addr, search_space_len,
2284 pattern, pattern_len, found_addrp);
2285 }
2286
2287 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2288 sequence of bytes in PATTERN with length PATTERN_LEN.
2289
2290 The result is 1 if found, 0 if not found, and -1 if there was an error
2291 requiring halting of the search (e.g. memory read error).
2292 If the pattern is found the address is recorded in FOUND_ADDRP. */
2293
2294 int
2295 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2296 const gdb_byte *pattern, ULONGEST pattern_len,
2297 CORE_ADDR *found_addrp)
2298 {
2299 return current_target.to_search_memory (&current_target, start_addr,
2300 search_space_len,
2301 pattern, pattern_len, found_addrp);
2302 }
2303
2304 /* Look through the currently pushed targets. If none of them will
2305 be able to restart the currently running process, issue an error
2306 message. */
2307
2308 void
2309 target_require_runnable (void)
2310 {
2311 struct target_ops *t;
2312
2313 for (t = target_stack; t != NULL; t = t->beneath)
2314 {
2315 /* If this target knows how to create a new program, then
2316 assume we will still be able to after killing the current
2317 one. Either killing and mourning will not pop T, or else
2318 find_default_run_target will find it again. */
2319 if (t->to_create_inferior != NULL)
2320 return;
2321
2322 /* Do not worry about targets at certain strata that can not
2323 create inferiors. Assume they will be pushed again if
2324 necessary, and continue to the process_stratum. */
2325 if (t->to_stratum == thread_stratum
2326 || t->to_stratum == record_stratum
2327 || t->to_stratum == arch_stratum)
2328 continue;
2329
2330 error (_("The \"%s\" target does not support \"run\". "
2331 "Try \"help target\" or \"continue\"."),
2332 t->to_shortname);
2333 }
2334
2335 /* This function is only called if the target is running. In that
2336 case there should have been a process_stratum target and it
2337 should either know how to create inferiors, or not... */
2338 internal_error (__FILE__, __LINE__, _("No targets found"));
2339 }
2340
2341 /* Whether GDB is allowed to fall back to the default run target for
2342 "run", "attach", etc. when no target is connected yet. */
2343 static int auto_connect_native_target = 1;
2344
2345 static void
2346 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2347 struct cmd_list_element *c, const char *value)
2348 {
2349 fprintf_filtered (file,
2350 _("Whether GDB may automatically connect to the "
2351 "native target is %s.\n"),
2352 value);
2353 }
2354
2355 /* Look through the list of possible targets for a target that can
2356 execute a run or attach command without any other data. This is
2357 used to locate the default process stratum.
2358
2359 If DO_MESG is not NULL, the result is always valid (error() is
2360 called for errors); else, return NULL on error. */
2361
2362 static struct target_ops *
2363 find_default_run_target (char *do_mesg)
2364 {
2365 struct target_ops *runable = NULL;
2366
2367 if (auto_connect_native_target)
2368 {
2369 struct target_ops **t;
2370 int count = 0;
2371
2372 for (t = target_structs; t < target_structs + target_struct_size;
2373 ++t)
2374 {
2375 if ((*t)->to_can_run != delegate_can_run && target_can_run (*t))
2376 {
2377 runable = *t;
2378 ++count;
2379 }
2380 }
2381
2382 if (count != 1)
2383 runable = NULL;
2384 }
2385
2386 if (runable == NULL)
2387 {
2388 if (do_mesg)
2389 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2390 else
2391 return NULL;
2392 }
2393
2394 return runable;
2395 }
2396
2397 /* See target.h. */
2398
2399 struct target_ops *
2400 find_attach_target (void)
2401 {
2402 struct target_ops *t;
2403
2404 /* If a target on the current stack can attach, use it. */
2405 for (t = current_target.beneath; t != NULL; t = t->beneath)
2406 {
2407 if (t->to_attach != NULL)
2408 break;
2409 }
2410
2411 /* Otherwise, use the default run target for attaching. */
2412 if (t == NULL)
2413 t = find_default_run_target ("attach");
2414
2415 return t;
2416 }
2417
2418 /* See target.h. */
2419
2420 struct target_ops *
2421 find_run_target (void)
2422 {
2423 struct target_ops *t;
2424
2425 /* If a target on the current stack can attach, use it. */
2426 for (t = current_target.beneath; t != NULL; t = t->beneath)
2427 {
2428 if (t->to_create_inferior != NULL)
2429 break;
2430 }
2431
2432 /* Otherwise, use the default run target. */
2433 if (t == NULL)
2434 t = find_default_run_target ("run");
2435
2436 return t;
2437 }
2438
2439 /* Implement the "info proc" command. */
2440
2441 int
2442 target_info_proc (const char *args, enum info_proc_what what)
2443 {
2444 struct target_ops *t;
2445
2446 /* If we're already connected to something that can get us OS
2447 related data, use it. Otherwise, try using the native
2448 target. */
2449 if (current_target.to_stratum >= process_stratum)
2450 t = current_target.beneath;
2451 else
2452 t = find_default_run_target (NULL);
2453
2454 for (; t != NULL; t = t->beneath)
2455 {
2456 if (t->to_info_proc != NULL)
2457 {
2458 t->to_info_proc (t, args, what);
2459
2460 if (targetdebug)
2461 fprintf_unfiltered (gdb_stdlog,
2462 "target_info_proc (\"%s\", %d)\n", args, what);
2463
2464 return 1;
2465 }
2466 }
2467
2468 return 0;
2469 }
2470
2471 static int
2472 find_default_supports_disable_randomization (struct target_ops *self)
2473 {
2474 struct target_ops *t;
2475
2476 t = find_default_run_target (NULL);
2477 if (t && t->to_supports_disable_randomization)
2478 return (t->to_supports_disable_randomization) (t);
2479 return 0;
2480 }
2481
2482 int
2483 target_supports_disable_randomization (void)
2484 {
2485 struct target_ops *t;
2486
2487 for (t = &current_target; t != NULL; t = t->beneath)
2488 if (t->to_supports_disable_randomization)
2489 return t->to_supports_disable_randomization (t);
2490
2491 return 0;
2492 }
2493
2494 char *
2495 target_get_osdata (const char *type)
2496 {
2497 struct target_ops *t;
2498
2499 /* If we're already connected to something that can get us OS
2500 related data, use it. Otherwise, try using the native
2501 target. */
2502 if (current_target.to_stratum >= process_stratum)
2503 t = current_target.beneath;
2504 else
2505 t = find_default_run_target ("get OS data");
2506
2507 if (!t)
2508 return NULL;
2509
2510 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2511 }
2512
2513 static struct address_space *
2514 default_thread_address_space (struct target_ops *self, ptid_t ptid)
2515 {
2516 struct inferior *inf;
2517
2518 /* Fall-back to the "main" address space of the inferior. */
2519 inf = find_inferior_pid (ptid_get_pid (ptid));
2520
2521 if (inf == NULL || inf->aspace == NULL)
2522 internal_error (__FILE__, __LINE__,
2523 _("Can't determine the current "
2524 "address space of thread %s\n"),
2525 target_pid_to_str (ptid));
2526
2527 return inf->aspace;
2528 }
2529
2530 /* Determine the current address space of thread PTID. */
2531
2532 struct address_space *
2533 target_thread_address_space (ptid_t ptid)
2534 {
2535 struct address_space *aspace;
2536
2537 aspace = current_target.to_thread_address_space (&current_target, ptid);
2538 gdb_assert (aspace != NULL);
2539
2540 return aspace;
2541 }
2542
2543
2544 /* Target file operations. */
2545
2546 static struct target_ops *
2547 default_fileio_target (void)
2548 {
2549 /* If we're already connected to something that can perform
2550 file I/O, use it. Otherwise, try using the native target. */
2551 if (current_target.to_stratum >= process_stratum)
2552 return current_target.beneath;
2553 else
2554 return find_default_run_target ("file I/O");
2555 }
2556
2557 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2558 target file descriptor, or -1 if an error occurs (and set
2559 *TARGET_ERRNO). */
2560 int
2561 target_fileio_open (const char *filename, int flags, int mode,
2562 int *target_errno)
2563 {
2564 struct target_ops *t;
2565
2566 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2567 {
2568 if (t->to_fileio_open != NULL)
2569 {
2570 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2571
2572 if (targetdebug)
2573 fprintf_unfiltered (gdb_stdlog,
2574 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2575 filename, flags, mode,
2576 fd, fd != -1 ? 0 : *target_errno);
2577 return fd;
2578 }
2579 }
2580
2581 *target_errno = FILEIO_ENOSYS;
2582 return -1;
2583 }
2584
2585 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2586 Return the number of bytes written, or -1 if an error occurs
2587 (and set *TARGET_ERRNO). */
2588 int
2589 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2590 ULONGEST offset, int *target_errno)
2591 {
2592 struct target_ops *t;
2593
2594 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2595 {
2596 if (t->to_fileio_pwrite != NULL)
2597 {
2598 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2599 target_errno);
2600
2601 if (targetdebug)
2602 fprintf_unfiltered (gdb_stdlog,
2603 "target_fileio_pwrite (%d,...,%d,%s) "
2604 "= %d (%d)\n",
2605 fd, len, pulongest (offset),
2606 ret, ret != -1 ? 0 : *target_errno);
2607 return ret;
2608 }
2609 }
2610
2611 *target_errno = FILEIO_ENOSYS;
2612 return -1;
2613 }
2614
2615 /* Read up to LEN bytes FD on the target into READ_BUF.
2616 Return the number of bytes read, or -1 if an error occurs
2617 (and set *TARGET_ERRNO). */
2618 int
2619 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2620 ULONGEST offset, int *target_errno)
2621 {
2622 struct target_ops *t;
2623
2624 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2625 {
2626 if (t->to_fileio_pread != NULL)
2627 {
2628 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
2629 target_errno);
2630
2631 if (targetdebug)
2632 fprintf_unfiltered (gdb_stdlog,
2633 "target_fileio_pread (%d,...,%d,%s) "
2634 "= %d (%d)\n",
2635 fd, len, pulongest (offset),
2636 ret, ret != -1 ? 0 : *target_errno);
2637 return ret;
2638 }
2639 }
2640
2641 *target_errno = FILEIO_ENOSYS;
2642 return -1;
2643 }
2644
2645 /* Close FD on the target. Return 0, or -1 if an error occurs
2646 (and set *TARGET_ERRNO). */
2647 int
2648 target_fileio_close (int fd, int *target_errno)
2649 {
2650 struct target_ops *t;
2651
2652 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2653 {
2654 if (t->to_fileio_close != NULL)
2655 {
2656 int ret = t->to_fileio_close (t, fd, target_errno);
2657
2658 if (targetdebug)
2659 fprintf_unfiltered (gdb_stdlog,
2660 "target_fileio_close (%d) = %d (%d)\n",
2661 fd, ret, ret != -1 ? 0 : *target_errno);
2662 return ret;
2663 }
2664 }
2665
2666 *target_errno = FILEIO_ENOSYS;
2667 return -1;
2668 }
2669
2670 /* Unlink FILENAME on the target. Return 0, or -1 if an error
2671 occurs (and set *TARGET_ERRNO). */
2672 int
2673 target_fileio_unlink (const char *filename, int *target_errno)
2674 {
2675 struct target_ops *t;
2676
2677 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2678 {
2679 if (t->to_fileio_unlink != NULL)
2680 {
2681 int ret = t->to_fileio_unlink (t, filename, target_errno);
2682
2683 if (targetdebug)
2684 fprintf_unfiltered (gdb_stdlog,
2685 "target_fileio_unlink (%s) = %d (%d)\n",
2686 filename, ret, ret != -1 ? 0 : *target_errno);
2687 return ret;
2688 }
2689 }
2690
2691 *target_errno = FILEIO_ENOSYS;
2692 return -1;
2693 }
2694
2695 /* Read value of symbolic link FILENAME on the target. Return a
2696 null-terminated string allocated via xmalloc, or NULL if an error
2697 occurs (and set *TARGET_ERRNO). */
2698 char *
2699 target_fileio_readlink (const char *filename, int *target_errno)
2700 {
2701 struct target_ops *t;
2702
2703 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2704 {
2705 if (t->to_fileio_readlink != NULL)
2706 {
2707 char *ret = t->to_fileio_readlink (t, filename, target_errno);
2708
2709 if (targetdebug)
2710 fprintf_unfiltered (gdb_stdlog,
2711 "target_fileio_readlink (%s) = %s (%d)\n",
2712 filename, ret? ret : "(nil)",
2713 ret? 0 : *target_errno);
2714 return ret;
2715 }
2716 }
2717
2718 *target_errno = FILEIO_ENOSYS;
2719 return NULL;
2720 }
2721
2722 static void
2723 target_fileio_close_cleanup (void *opaque)
2724 {
2725 int fd = *(int *) opaque;
2726 int target_errno;
2727
2728 target_fileio_close (fd, &target_errno);
2729 }
2730
2731 /* Read target file FILENAME. Store the result in *BUF_P and
2732 return the size of the transferred data. PADDING additional bytes are
2733 available in *BUF_P. This is a helper function for
2734 target_fileio_read_alloc; see the declaration of that function for more
2735 information. */
2736
2737 static LONGEST
2738 target_fileio_read_alloc_1 (const char *filename,
2739 gdb_byte **buf_p, int padding)
2740 {
2741 struct cleanup *close_cleanup;
2742 size_t buf_alloc, buf_pos;
2743 gdb_byte *buf;
2744 LONGEST n;
2745 int fd;
2746 int target_errno;
2747
2748 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
2749 if (fd == -1)
2750 return -1;
2751
2752 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
2753
2754 /* Start by reading up to 4K at a time. The target will throttle
2755 this number down if necessary. */
2756 buf_alloc = 4096;
2757 buf = xmalloc (buf_alloc);
2758 buf_pos = 0;
2759 while (1)
2760 {
2761 n = target_fileio_pread (fd, &buf[buf_pos],
2762 buf_alloc - buf_pos - padding, buf_pos,
2763 &target_errno);
2764 if (n < 0)
2765 {
2766 /* An error occurred. */
2767 do_cleanups (close_cleanup);
2768 xfree (buf);
2769 return -1;
2770 }
2771 else if (n == 0)
2772 {
2773 /* Read all there was. */
2774 do_cleanups (close_cleanup);
2775 if (buf_pos == 0)
2776 xfree (buf);
2777 else
2778 *buf_p = buf;
2779 return buf_pos;
2780 }
2781
2782 buf_pos += n;
2783
2784 /* If the buffer is filling up, expand it. */
2785 if (buf_alloc < buf_pos * 2)
2786 {
2787 buf_alloc *= 2;
2788 buf = xrealloc (buf, buf_alloc);
2789 }
2790
2791 QUIT;
2792 }
2793 }
2794
2795 /* Read target file FILENAME. Store the result in *BUF_P and return
2796 the size of the transferred data. See the declaration in "target.h"
2797 function for more information about the return value. */
2798
2799 LONGEST
2800 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
2801 {
2802 return target_fileio_read_alloc_1 (filename, buf_p, 0);
2803 }
2804
2805 /* Read target file FILENAME. The result is NUL-terminated and
2806 returned as a string, allocated using xmalloc. If an error occurs
2807 or the transfer is unsupported, NULL is returned. Empty objects
2808 are returned as allocated but empty strings. A warning is issued
2809 if the result contains any embedded NUL bytes. */
2810
2811 char *
2812 target_fileio_read_stralloc (const char *filename)
2813 {
2814 gdb_byte *buffer;
2815 char *bufstr;
2816 LONGEST i, transferred;
2817
2818 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
2819 bufstr = (char *) buffer;
2820
2821 if (transferred < 0)
2822 return NULL;
2823
2824 if (transferred == 0)
2825 return xstrdup ("");
2826
2827 bufstr[transferred] = 0;
2828
2829 /* Check for embedded NUL bytes; but allow trailing NULs. */
2830 for (i = strlen (bufstr); i < transferred; i++)
2831 if (bufstr[i] != 0)
2832 {
2833 warning (_("target file %s "
2834 "contained unexpected null characters"),
2835 filename);
2836 break;
2837 }
2838
2839 return bufstr;
2840 }
2841
2842
2843 static int
2844 default_region_ok_for_hw_watchpoint (struct target_ops *self,
2845 CORE_ADDR addr, int len)
2846 {
2847 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
2848 }
2849
2850 static int
2851 default_watchpoint_addr_within_range (struct target_ops *target,
2852 CORE_ADDR addr,
2853 CORE_ADDR start, int length)
2854 {
2855 return addr >= start && addr < start + length;
2856 }
2857
2858 static struct gdbarch *
2859 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2860 {
2861 return target_gdbarch ();
2862 }
2863
2864 static int
2865 return_zero (struct target_ops *ignore)
2866 {
2867 return 0;
2868 }
2869
2870 static int
2871 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2)
2872 {
2873 return 0;
2874 }
2875
2876 /*
2877 * Find the next target down the stack from the specified target.
2878 */
2879
2880 struct target_ops *
2881 find_target_beneath (struct target_ops *t)
2882 {
2883 return t->beneath;
2884 }
2885
2886 /* See target.h. */
2887
2888 struct target_ops *
2889 find_target_at (enum strata stratum)
2890 {
2891 struct target_ops *t;
2892
2893 for (t = current_target.beneath; t != NULL; t = t->beneath)
2894 if (t->to_stratum == stratum)
2895 return t;
2896
2897 return NULL;
2898 }
2899
2900 \f
2901 /* The inferior process has died. Long live the inferior! */
2902
2903 void
2904 generic_mourn_inferior (void)
2905 {
2906 ptid_t ptid;
2907
2908 ptid = inferior_ptid;
2909 inferior_ptid = null_ptid;
2910
2911 /* Mark breakpoints uninserted in case something tries to delete a
2912 breakpoint while we delete the inferior's threads (which would
2913 fail, since the inferior is long gone). */
2914 mark_breakpoints_out ();
2915
2916 if (!ptid_equal (ptid, null_ptid))
2917 {
2918 int pid = ptid_get_pid (ptid);
2919 exit_inferior (pid);
2920 }
2921
2922 /* Note this wipes step-resume breakpoints, so needs to be done
2923 after exit_inferior, which ends up referencing the step-resume
2924 breakpoints through clear_thread_inferior_resources. */
2925 breakpoint_init_inferior (inf_exited);
2926
2927 registers_changed ();
2928
2929 reopen_exec_file ();
2930 reinit_frame_cache ();
2931
2932 if (deprecated_detach_hook)
2933 deprecated_detach_hook ();
2934 }
2935 \f
2936 /* Convert a normal process ID to a string. Returns the string in a
2937 static buffer. */
2938
2939 char *
2940 normal_pid_to_str (ptid_t ptid)
2941 {
2942 static char buf[32];
2943
2944 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2945 return buf;
2946 }
2947
2948 static char *
2949 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
2950 {
2951 return normal_pid_to_str (ptid);
2952 }
2953
2954 /* Error-catcher for target_find_memory_regions. */
2955 static int
2956 dummy_find_memory_regions (struct target_ops *self,
2957 find_memory_region_ftype ignore1, void *ignore2)
2958 {
2959 error (_("Command not implemented for this target."));
2960 return 0;
2961 }
2962
2963 /* Error-catcher for target_make_corefile_notes. */
2964 static char *
2965 dummy_make_corefile_notes (struct target_ops *self,
2966 bfd *ignore1, int *ignore2)
2967 {
2968 error (_("Command not implemented for this target."));
2969 return NULL;
2970 }
2971
2972 /* Set up the handful of non-empty slots needed by the dummy target
2973 vector. */
2974
2975 static void
2976 init_dummy_target (void)
2977 {
2978 dummy_target.to_shortname = "None";
2979 dummy_target.to_longname = "None";
2980 dummy_target.to_doc = "";
2981 dummy_target.to_supports_disable_randomization
2982 = find_default_supports_disable_randomization;
2983 dummy_target.to_stratum = dummy_stratum;
2984 dummy_target.to_has_all_memory = return_zero;
2985 dummy_target.to_has_memory = return_zero;
2986 dummy_target.to_has_stack = return_zero;
2987 dummy_target.to_has_registers = return_zero;
2988 dummy_target.to_has_execution = return_zero_has_execution;
2989 dummy_target.to_magic = OPS_MAGIC;
2990
2991 install_dummy_methods (&dummy_target);
2992 }
2993 \f
2994
2995 void
2996 target_close (struct target_ops *targ)
2997 {
2998 gdb_assert (!target_is_pushed (targ));
2999
3000 if (targ->to_xclose != NULL)
3001 targ->to_xclose (targ);
3002 else if (targ->to_close != NULL)
3003 targ->to_close (targ);
3004
3005 if (targetdebug)
3006 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3007 }
3008
3009 int
3010 target_thread_alive (ptid_t ptid)
3011 {
3012 return current_target.to_thread_alive (&current_target, ptid);
3013 }
3014
3015 void
3016 target_find_new_threads (void)
3017 {
3018 current_target.to_find_new_threads (&current_target);
3019 }
3020
3021 void
3022 target_stop (ptid_t ptid)
3023 {
3024 if (!may_stop)
3025 {
3026 warning (_("May not interrupt or stop the target, ignoring attempt"));
3027 return;
3028 }
3029
3030 (*current_target.to_stop) (&current_target, ptid);
3031 }
3032
3033 /* Concatenate ELEM to LIST, a comma separate list, and return the
3034 result. The LIST incoming argument is released. */
3035
3036 static char *
3037 str_comma_list_concat_elem (char *list, const char *elem)
3038 {
3039 if (list == NULL)
3040 return xstrdup (elem);
3041 else
3042 return reconcat (list, list, ", ", elem, (char *) NULL);
3043 }
3044
3045 /* Helper for target_options_to_string. If OPT is present in
3046 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3047 Returns the new resulting string. OPT is removed from
3048 TARGET_OPTIONS. */
3049
3050 static char *
3051 do_option (int *target_options, char *ret,
3052 int opt, char *opt_str)
3053 {
3054 if ((*target_options & opt) != 0)
3055 {
3056 ret = str_comma_list_concat_elem (ret, opt_str);
3057 *target_options &= ~opt;
3058 }
3059
3060 return ret;
3061 }
3062
3063 char *
3064 target_options_to_string (int target_options)
3065 {
3066 char *ret = NULL;
3067
3068 #define DO_TARG_OPTION(OPT) \
3069 ret = do_option (&target_options, ret, OPT, #OPT)
3070
3071 DO_TARG_OPTION (TARGET_WNOHANG);
3072
3073 if (target_options != 0)
3074 ret = str_comma_list_concat_elem (ret, "unknown???");
3075
3076 if (ret == NULL)
3077 ret = xstrdup ("");
3078 return ret;
3079 }
3080
3081 static void
3082 debug_print_register (const char * func,
3083 struct regcache *regcache, int regno)
3084 {
3085 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3086
3087 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3088 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3089 && gdbarch_register_name (gdbarch, regno) != NULL
3090 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3091 fprintf_unfiltered (gdb_stdlog, "(%s)",
3092 gdbarch_register_name (gdbarch, regno));
3093 else
3094 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3095 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3096 {
3097 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3098 int i, size = register_size (gdbarch, regno);
3099 gdb_byte buf[MAX_REGISTER_SIZE];
3100
3101 regcache_raw_collect (regcache, regno, buf);
3102 fprintf_unfiltered (gdb_stdlog, " = ");
3103 for (i = 0; i < size; i++)
3104 {
3105 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3106 }
3107 if (size <= sizeof (LONGEST))
3108 {
3109 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3110
3111 fprintf_unfiltered (gdb_stdlog, " %s %s",
3112 core_addr_to_string_nz (val), plongest (val));
3113 }
3114 }
3115 fprintf_unfiltered (gdb_stdlog, "\n");
3116 }
3117
3118 void
3119 target_fetch_registers (struct regcache *regcache, int regno)
3120 {
3121 current_target.to_fetch_registers (&current_target, regcache, regno);
3122 if (targetdebug)
3123 debug_print_register ("target_fetch_registers", regcache, regno);
3124 }
3125
3126 void
3127 target_store_registers (struct regcache *regcache, int regno)
3128 {
3129 struct target_ops *t;
3130
3131 if (!may_write_registers)
3132 error (_("Writing to registers is not allowed (regno %d)"), regno);
3133
3134 current_target.to_store_registers (&current_target, regcache, regno);
3135 if (targetdebug)
3136 {
3137 debug_print_register ("target_store_registers", regcache, regno);
3138 }
3139 }
3140
3141 int
3142 target_core_of_thread (ptid_t ptid)
3143 {
3144 return current_target.to_core_of_thread (&current_target, ptid);
3145 }
3146
3147 int
3148 simple_verify_memory (struct target_ops *ops,
3149 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3150 {
3151 LONGEST total_xfered = 0;
3152
3153 while (total_xfered < size)
3154 {
3155 ULONGEST xfered_len;
3156 enum target_xfer_status status;
3157 gdb_byte buf[1024];
3158 ULONGEST howmuch = min (sizeof (buf), size - total_xfered);
3159
3160 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3161 buf, NULL, lma + total_xfered, howmuch,
3162 &xfered_len);
3163 if (status == TARGET_XFER_OK
3164 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3165 {
3166 total_xfered += xfered_len;
3167 QUIT;
3168 }
3169 else
3170 return 0;
3171 }
3172 return 1;
3173 }
3174
3175 /* Default implementation of memory verification. */
3176
3177 static int
3178 default_verify_memory (struct target_ops *self,
3179 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3180 {
3181 /* Start over from the top of the target stack. */
3182 return simple_verify_memory (current_target.beneath,
3183 data, memaddr, size);
3184 }
3185
3186 int
3187 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3188 {
3189 return current_target.to_verify_memory (&current_target,
3190 data, memaddr, size);
3191 }
3192
3193 /* The documentation for this function is in its prototype declaration in
3194 target.h. */
3195
3196 int
3197 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3198 {
3199 return current_target.to_insert_mask_watchpoint (&current_target,
3200 addr, mask, rw);
3201 }
3202
3203 /* The documentation for this function is in its prototype declaration in
3204 target.h. */
3205
3206 int
3207 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3208 {
3209 return current_target.to_remove_mask_watchpoint (&current_target,
3210 addr, mask, rw);
3211 }
3212
3213 /* The documentation for this function is in its prototype declaration
3214 in target.h. */
3215
3216 int
3217 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3218 {
3219 return current_target.to_masked_watch_num_registers (&current_target,
3220 addr, mask);
3221 }
3222
3223 /* The documentation for this function is in its prototype declaration
3224 in target.h. */
3225
3226 int
3227 target_ranged_break_num_registers (void)
3228 {
3229 return current_target.to_ranged_break_num_registers (&current_target);
3230 }
3231
3232 /* See target.h. */
3233
3234 struct btrace_target_info *
3235 target_enable_btrace (ptid_t ptid)
3236 {
3237 return current_target.to_enable_btrace (&current_target, ptid);
3238 }
3239
3240 /* See target.h. */
3241
3242 void
3243 target_disable_btrace (struct btrace_target_info *btinfo)
3244 {
3245 current_target.to_disable_btrace (&current_target, btinfo);
3246 }
3247
3248 /* See target.h. */
3249
3250 void
3251 target_teardown_btrace (struct btrace_target_info *btinfo)
3252 {
3253 current_target.to_teardown_btrace (&current_target, btinfo);
3254 }
3255
3256 /* See target.h. */
3257
3258 enum btrace_error
3259 target_read_btrace (VEC (btrace_block_s) **btrace,
3260 struct btrace_target_info *btinfo,
3261 enum btrace_read_type type)
3262 {
3263 return current_target.to_read_btrace (&current_target, btrace, btinfo, type);
3264 }
3265
3266 /* See target.h. */
3267
3268 void
3269 target_stop_recording (void)
3270 {
3271 current_target.to_stop_recording (&current_target);
3272 }
3273
3274 /* See target.h. */
3275
3276 void
3277 target_save_record (const char *filename)
3278 {
3279 current_target.to_save_record (&current_target, filename);
3280 }
3281
3282 /* See target.h. */
3283
3284 int
3285 target_supports_delete_record (void)
3286 {
3287 struct target_ops *t;
3288
3289 for (t = current_target.beneath; t != NULL; t = t->beneath)
3290 if (t->to_delete_record != delegate_delete_record
3291 && t->to_delete_record != tdefault_delete_record)
3292 return 1;
3293
3294 return 0;
3295 }
3296
3297 /* See target.h. */
3298
3299 void
3300 target_delete_record (void)
3301 {
3302 current_target.to_delete_record (&current_target);
3303 }
3304
3305 /* See target.h. */
3306
3307 int
3308 target_record_is_replaying (void)
3309 {
3310 return current_target.to_record_is_replaying (&current_target);
3311 }
3312
3313 /* See target.h. */
3314
3315 void
3316 target_goto_record_begin (void)
3317 {
3318 current_target.to_goto_record_begin (&current_target);
3319 }
3320
3321 /* See target.h. */
3322
3323 void
3324 target_goto_record_end (void)
3325 {
3326 current_target.to_goto_record_end (&current_target);
3327 }
3328
3329 /* See target.h. */
3330
3331 void
3332 target_goto_record (ULONGEST insn)
3333 {
3334 current_target.to_goto_record (&current_target, insn);
3335 }
3336
3337 /* See target.h. */
3338
3339 void
3340 target_insn_history (int size, int flags)
3341 {
3342 current_target.to_insn_history (&current_target, size, flags);
3343 }
3344
3345 /* See target.h. */
3346
3347 void
3348 target_insn_history_from (ULONGEST from, int size, int flags)
3349 {
3350 current_target.to_insn_history_from (&current_target, from, size, flags);
3351 }
3352
3353 /* See target.h. */
3354
3355 void
3356 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3357 {
3358 current_target.to_insn_history_range (&current_target, begin, end, flags);
3359 }
3360
3361 /* See target.h. */
3362
3363 void
3364 target_call_history (int size, int flags)
3365 {
3366 current_target.to_call_history (&current_target, size, flags);
3367 }
3368
3369 /* See target.h. */
3370
3371 void
3372 target_call_history_from (ULONGEST begin, int size, int flags)
3373 {
3374 current_target.to_call_history_from (&current_target, begin, size, flags);
3375 }
3376
3377 /* See target.h. */
3378
3379 void
3380 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3381 {
3382 current_target.to_call_history_range (&current_target, begin, end, flags);
3383 }
3384
3385 /* See target.h. */
3386
3387 const struct frame_unwind *
3388 target_get_unwinder (void)
3389 {
3390 return current_target.to_get_unwinder (&current_target);
3391 }
3392
3393 /* See target.h. */
3394
3395 const struct frame_unwind *
3396 target_get_tailcall_unwinder (void)
3397 {
3398 return current_target.to_get_tailcall_unwinder (&current_target);
3399 }
3400
3401 /* Default implementation of to_decr_pc_after_break. */
3402
3403 static CORE_ADDR
3404 default_target_decr_pc_after_break (struct target_ops *ops,
3405 struct gdbarch *gdbarch)
3406 {
3407 return gdbarch_decr_pc_after_break (gdbarch);
3408 }
3409
3410 /* See target.h. */
3411
3412 CORE_ADDR
3413 target_decr_pc_after_break (struct gdbarch *gdbarch)
3414 {
3415 return current_target.to_decr_pc_after_break (&current_target, gdbarch);
3416 }
3417
3418 /* See target.h. */
3419
3420 void
3421 target_prepare_to_generate_core (void)
3422 {
3423 current_target.to_prepare_to_generate_core (&current_target);
3424 }
3425
3426 /* See target.h. */
3427
3428 void
3429 target_done_generating_core (void)
3430 {
3431 current_target.to_done_generating_core (&current_target);
3432 }
3433
3434 static void
3435 setup_target_debug (void)
3436 {
3437 memcpy (&debug_target, &current_target, sizeof debug_target);
3438
3439 init_debug_target (&current_target);
3440 }
3441 \f
3442
3443 static char targ_desc[] =
3444 "Names of targets and files being debugged.\nShows the entire \
3445 stack of targets currently in use (including the exec-file,\n\
3446 core-file, and process, if any), as well as the symbol file name.";
3447
3448 static void
3449 default_rcmd (struct target_ops *self, const char *command,
3450 struct ui_file *output)
3451 {
3452 error (_("\"monitor\" command not supported by this target."));
3453 }
3454
3455 static void
3456 do_monitor_command (char *cmd,
3457 int from_tty)
3458 {
3459 target_rcmd (cmd, gdb_stdtarg);
3460 }
3461
3462 /* Print the name of each layers of our target stack. */
3463
3464 static void
3465 maintenance_print_target_stack (char *cmd, int from_tty)
3466 {
3467 struct target_ops *t;
3468
3469 printf_filtered (_("The current target stack is:\n"));
3470
3471 for (t = target_stack; t != NULL; t = t->beneath)
3472 {
3473 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3474 }
3475 }
3476
3477 /* Controls if targets can report that they can/are async. This is
3478 just for maintainers to use when debugging gdb. */
3479 int target_async_permitted = 1;
3480
3481 /* The set command writes to this variable. If the inferior is
3482 executing, target_async_permitted is *not* updated. */
3483 static int target_async_permitted_1 = 1;
3484
3485 static void
3486 maint_set_target_async_command (char *args, int from_tty,
3487 struct cmd_list_element *c)
3488 {
3489 if (have_live_inferiors ())
3490 {
3491 target_async_permitted_1 = target_async_permitted;
3492 error (_("Cannot change this setting while the inferior is running."));
3493 }
3494
3495 target_async_permitted = target_async_permitted_1;
3496 }
3497
3498 static void
3499 maint_show_target_async_command (struct ui_file *file, int from_tty,
3500 struct cmd_list_element *c,
3501 const char *value)
3502 {
3503 fprintf_filtered (file,
3504 _("Controlling the inferior in "
3505 "asynchronous mode is %s.\n"), value);
3506 }
3507
3508 /* Temporary copies of permission settings. */
3509
3510 static int may_write_registers_1 = 1;
3511 static int may_write_memory_1 = 1;
3512 static int may_insert_breakpoints_1 = 1;
3513 static int may_insert_tracepoints_1 = 1;
3514 static int may_insert_fast_tracepoints_1 = 1;
3515 static int may_stop_1 = 1;
3516
3517 /* Make the user-set values match the real values again. */
3518
3519 void
3520 update_target_permissions (void)
3521 {
3522 may_write_registers_1 = may_write_registers;
3523 may_write_memory_1 = may_write_memory;
3524 may_insert_breakpoints_1 = may_insert_breakpoints;
3525 may_insert_tracepoints_1 = may_insert_tracepoints;
3526 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3527 may_stop_1 = may_stop;
3528 }
3529
3530 /* The one function handles (most of) the permission flags in the same
3531 way. */
3532
3533 static void
3534 set_target_permissions (char *args, int from_tty,
3535 struct cmd_list_element *c)
3536 {
3537 if (target_has_execution)
3538 {
3539 update_target_permissions ();
3540 error (_("Cannot change this setting while the inferior is running."));
3541 }
3542
3543 /* Make the real values match the user-changed values. */
3544 may_write_registers = may_write_registers_1;
3545 may_insert_breakpoints = may_insert_breakpoints_1;
3546 may_insert_tracepoints = may_insert_tracepoints_1;
3547 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
3548 may_stop = may_stop_1;
3549 update_observer_mode ();
3550 }
3551
3552 /* Set memory write permission independently of observer mode. */
3553
3554 static void
3555 set_write_memory_permission (char *args, int from_tty,
3556 struct cmd_list_element *c)
3557 {
3558 /* Make the real values match the user-changed values. */
3559 may_write_memory = may_write_memory_1;
3560 update_observer_mode ();
3561 }
3562
3563
3564 void
3565 initialize_targets (void)
3566 {
3567 init_dummy_target ();
3568 push_target (&dummy_target);
3569
3570 add_info ("target", target_info, targ_desc);
3571 add_info ("files", target_info, targ_desc);
3572
3573 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3574 Set target debugging."), _("\
3575 Show target debugging."), _("\
3576 When non-zero, target debugging is enabled. Higher numbers are more\n\
3577 verbose."),
3578 set_targetdebug,
3579 show_targetdebug,
3580 &setdebuglist, &showdebuglist);
3581
3582 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3583 &trust_readonly, _("\
3584 Set mode for reading from readonly sections."), _("\
3585 Show mode for reading from readonly sections."), _("\
3586 When this mode is on, memory reads from readonly sections (such as .text)\n\
3587 will be read from the object file instead of from the target. This will\n\
3588 result in significant performance improvement for remote targets."),
3589 NULL,
3590 show_trust_readonly,
3591 &setlist, &showlist);
3592
3593 add_com ("monitor", class_obscure, do_monitor_command,
3594 _("Send a command to the remote monitor (remote targets only)."));
3595
3596 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3597 _("Print the name of each layer of the internal target stack."),
3598 &maintenanceprintlist);
3599
3600 add_setshow_boolean_cmd ("target-async", no_class,
3601 &target_async_permitted_1, _("\
3602 Set whether gdb controls the inferior in asynchronous mode."), _("\
3603 Show whether gdb controls the inferior in asynchronous mode."), _("\
3604 Tells gdb whether to control the inferior in asynchronous mode."),
3605 maint_set_target_async_command,
3606 maint_show_target_async_command,
3607 &maintenance_set_cmdlist,
3608 &maintenance_show_cmdlist);
3609
3610 add_setshow_boolean_cmd ("may-write-registers", class_support,
3611 &may_write_registers_1, _("\
3612 Set permission to write into registers."), _("\
3613 Show permission to write into registers."), _("\
3614 When this permission is on, GDB may write into the target's registers.\n\
3615 Otherwise, any sort of write attempt will result in an error."),
3616 set_target_permissions, NULL,
3617 &setlist, &showlist);
3618
3619 add_setshow_boolean_cmd ("may-write-memory", class_support,
3620 &may_write_memory_1, _("\
3621 Set permission to write into target memory."), _("\
3622 Show permission to write into target memory."), _("\
3623 When this permission is on, GDB may write into the target's memory.\n\
3624 Otherwise, any sort of write attempt will result in an error."),
3625 set_write_memory_permission, NULL,
3626 &setlist, &showlist);
3627
3628 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
3629 &may_insert_breakpoints_1, _("\
3630 Set permission to insert breakpoints in the target."), _("\
3631 Show permission to insert breakpoints in the target."), _("\
3632 When this permission is on, GDB may insert breakpoints in the program.\n\
3633 Otherwise, any sort of insertion attempt will result in an error."),
3634 set_target_permissions, NULL,
3635 &setlist, &showlist);
3636
3637 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
3638 &may_insert_tracepoints_1, _("\
3639 Set permission to insert tracepoints in the target."), _("\
3640 Show permission to insert tracepoints in the target."), _("\
3641 When this permission is on, GDB may insert tracepoints in the program.\n\
3642 Otherwise, any sort of insertion attempt will result in an error."),
3643 set_target_permissions, NULL,
3644 &setlist, &showlist);
3645
3646 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
3647 &may_insert_fast_tracepoints_1, _("\
3648 Set permission to insert fast tracepoints in the target."), _("\
3649 Show permission to insert fast tracepoints in the target."), _("\
3650 When this permission is on, GDB may insert fast tracepoints.\n\
3651 Otherwise, any sort of insertion attempt will result in an error."),
3652 set_target_permissions, NULL,
3653 &setlist, &showlist);
3654
3655 add_setshow_boolean_cmd ("may-interrupt", class_support,
3656 &may_stop_1, _("\
3657 Set permission to interrupt or signal the target."), _("\
3658 Show permission to interrupt or signal the target."), _("\
3659 When this permission is on, GDB may interrupt/stop the target's execution.\n\
3660 Otherwise, any attempt to interrupt or stop will be ignored."),
3661 set_target_permissions, NULL,
3662 &setlist, &showlist);
3663
3664 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
3665 &auto_connect_native_target, _("\
3666 Set whether GDB may automatically connect to the native target."), _("\
3667 Show whether GDB may automatically connect to the native target."), _("\
3668 When on, and GDB is not connected to a target yet, GDB\n\
3669 attempts \"run\" and other commands with the native target."),
3670 NULL, show_auto_connect_native_target,
3671 &setlist, &showlist);
3672 }