Use filtered output in terminal_info implementations
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2022 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "gdbsupport/agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "gdbsupport/byte-vector.h"
50 #include "gdbsupport/search.h"
51 #include "terminal.h"
52 #include <unordered_map>
53 #include "target-connection.h"
54 #include "valprint.h"
55 #include "cli/cli-decode.h"
56
57 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
58
59 static void default_terminal_info (struct target_ops *, const char *, int);
60
61 static int default_watchpoint_addr_within_range (struct target_ops *,
62 CORE_ADDR, CORE_ADDR, int);
63
64 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
65 CORE_ADDR, int);
66
67 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
68
69 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
70 long lwp, ULONGEST tid);
71
72 static void default_mourn_inferior (struct target_ops *self);
73
74 static int default_search_memory (struct target_ops *ops,
75 CORE_ADDR start_addr,
76 ULONGEST search_space_len,
77 const gdb_byte *pattern,
78 ULONGEST pattern_len,
79 CORE_ADDR *found_addrp);
80
81 static int default_verify_memory (struct target_ops *self,
82 const gdb_byte *data,
83 CORE_ADDR memaddr, ULONGEST size);
84
85 static void tcomplain (void) ATTRIBUTE_NORETURN;
86
87 static struct target_ops *find_default_run_target (const char *);
88
89 static int dummy_find_memory_regions (struct target_ops *self,
90 find_memory_region_ftype ignore1,
91 void *ignore2);
92
93 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
94 (struct target_ops *self, bfd *ignore1, int *ignore2);
95
96 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
97
98 static enum exec_direction_kind default_execution_direction
99 (struct target_ops *self);
100
101 /* Mapping between target_info objects (which have address identity)
102 and corresponding open/factory function/callback. Each add_target
103 call adds one entry to this map, and registers a "target
104 TARGET_NAME" command that when invoked calls the factory registered
105 here. The target_info object is associated with the command via
106 the command's context. */
107 static std::unordered_map<const target_info *, target_open_ftype *>
108 target_factories;
109
110 /* The singleton debug target. */
111
112 static struct target_ops *the_debug_target;
113
114 /* Command list for target. */
115
116 static struct cmd_list_element *targetlist = NULL;
117
118 /* True if we should trust readonly sections from the
119 executable when reading memory. */
120
121 static bool trust_readonly = false;
122
123 /* Nonzero if we should show true memory content including
124 memory breakpoint inserted by gdb. */
125
126 static int show_memory_breakpoints = 0;
127
128 /* These globals control whether GDB attempts to perform these
129 operations; they are useful for targets that need to prevent
130 inadvertent disruption, such as in non-stop mode. */
131
132 bool may_write_registers = true;
133
134 bool may_write_memory = true;
135
136 bool may_insert_breakpoints = true;
137
138 bool may_insert_tracepoints = true;
139
140 bool may_insert_fast_tracepoints = true;
141
142 bool may_stop = true;
143
144 /* Non-zero if we want to see trace of target level stuff. */
145
146 static unsigned int targetdebug = 0;
147
148 static void
149 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
150 {
151 if (targetdebug)
152 current_inferior ()->push_target (the_debug_target);
153 else
154 current_inferior ()->unpush_target (the_debug_target);
155 }
156
157 static void
158 show_targetdebug (struct ui_file *file, int from_tty,
159 struct cmd_list_element *c, const char *value)
160 {
161 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
162 }
163
164 int
165 target_has_memory ()
166 {
167 for (target_ops *t = current_inferior ()->top_target ();
168 t != NULL;
169 t = t->beneath ())
170 if (t->has_memory ())
171 return 1;
172
173 return 0;
174 }
175
176 int
177 target_has_stack ()
178 {
179 for (target_ops *t = current_inferior ()->top_target ();
180 t != NULL;
181 t = t->beneath ())
182 if (t->has_stack ())
183 return 1;
184
185 return 0;
186 }
187
188 int
189 target_has_registers ()
190 {
191 for (target_ops *t = current_inferior ()->top_target ();
192 t != NULL;
193 t = t->beneath ())
194 if (t->has_registers ())
195 return 1;
196
197 return 0;
198 }
199
200 bool
201 target_has_execution (inferior *inf)
202 {
203 if (inf == nullptr)
204 inf = current_inferior ();
205
206 for (target_ops *t = inf->top_target ();
207 t != nullptr;
208 t = inf->find_target_beneath (t))
209 if (t->has_execution (inf))
210 return true;
211
212 return false;
213 }
214
215 const char *
216 target_shortname ()
217 {
218 return current_inferior ()->top_target ()->shortname ();
219 }
220
221 /* See target.h. */
222
223 bool
224 target_attach_no_wait ()
225 {
226 return current_inferior ()->top_target ()->attach_no_wait ();
227 }
228
229 /* See target.h. */
230
231 void
232 target_post_attach (int pid)
233 {
234 return current_inferior ()->top_target ()->post_attach (pid);
235 }
236
237 /* See target.h. */
238
239 void
240 target_prepare_to_store (regcache *regcache)
241 {
242 return current_inferior ()->top_target ()->prepare_to_store (regcache);
243 }
244
245 /* See target.h. */
246
247 bool
248 target_supports_enable_disable_tracepoint ()
249 {
250 target_ops *target = current_inferior ()->top_target ();
251
252 return target->supports_enable_disable_tracepoint ();
253 }
254
255 bool
256 target_supports_string_tracing ()
257 {
258 return current_inferior ()->top_target ()->supports_string_tracing ();
259 }
260
261 /* See target.h. */
262
263 bool
264 target_supports_evaluation_of_breakpoint_conditions ()
265 {
266 target_ops *target = current_inferior ()->top_target ();
267
268 return target->supports_evaluation_of_breakpoint_conditions ();
269 }
270
271 /* See target.h. */
272
273 bool
274 target_supports_dumpcore ()
275 {
276 return current_inferior ()->top_target ()->supports_dumpcore ();
277 }
278
279 /* See target.h. */
280
281 void
282 target_dumpcore (const char *filename)
283 {
284 return current_inferior ()->top_target ()->dumpcore (filename);
285 }
286
287 /* See target.h. */
288
289 bool
290 target_can_run_breakpoint_commands ()
291 {
292 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
293 }
294
295 /* See target.h. */
296
297 void
298 target_files_info ()
299 {
300 return current_inferior ()->top_target ()->files_info ();
301 }
302
303 /* See target.h. */
304
305 int
306 target_insert_fork_catchpoint (int pid)
307 {
308 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
309 }
310
311 /* See target.h. */
312
313 int
314 target_remove_fork_catchpoint (int pid)
315 {
316 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
317 }
318
319 /* See target.h. */
320
321 int
322 target_insert_vfork_catchpoint (int pid)
323 {
324 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
325 }
326
327 /* See target.h. */
328
329 int
330 target_remove_vfork_catchpoint (int pid)
331 {
332 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
333 }
334
335 /* See target.h. */
336
337 int
338 target_insert_exec_catchpoint (int pid)
339 {
340 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
341 }
342
343 /* See target.h. */
344
345 int
346 target_remove_exec_catchpoint (int pid)
347 {
348 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
349 }
350
351 /* See target.h. */
352
353 int
354 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
355 gdb::array_view<const int> syscall_counts)
356 {
357 target_ops *target = current_inferior ()->top_target ();
358
359 return target->set_syscall_catchpoint (pid, needed, any_count,
360 syscall_counts);
361 }
362
363 /* See target.h. */
364
365 void
366 target_rcmd (const char *command, struct ui_file *outbuf)
367 {
368 return current_inferior ()->top_target ()->rcmd (command, outbuf);
369 }
370
371 /* See target.h. */
372
373 bool
374 target_can_lock_scheduler ()
375 {
376 target_ops *target = current_inferior ()->top_target ();
377
378 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
379 }
380
381 /* See target.h. */
382
383 bool
384 target_can_async_p ()
385 {
386 return target_can_async_p (current_inferior ()->top_target ());
387 }
388
389 /* See target.h. */
390
391 bool
392 target_can_async_p (struct target_ops *target)
393 {
394 if (!target_async_permitted)
395 return false;
396 return target->can_async_p ();
397 }
398
399 /* See target.h. */
400
401 bool
402 target_is_async_p ()
403 {
404 bool result = current_inferior ()->top_target ()->is_async_p ();
405 gdb_assert (target_async_permitted || !result);
406 return result;
407 }
408
409 exec_direction_kind
410 target_execution_direction ()
411 {
412 return current_inferior ()->top_target ()->execution_direction ();
413 }
414
415 /* See target.h. */
416
417 const char *
418 target_extra_thread_info (thread_info *tp)
419 {
420 return current_inferior ()->top_target ()->extra_thread_info (tp);
421 }
422
423 /* See target.h. */
424
425 char *
426 target_pid_to_exec_file (int pid)
427 {
428 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
429 }
430
431 /* See target.h. */
432
433 gdbarch *
434 target_thread_architecture (ptid_t ptid)
435 {
436 return current_inferior ()->top_target ()->thread_architecture (ptid);
437 }
438
439 /* See target.h. */
440
441 int
442 target_find_memory_regions (find_memory_region_ftype func, void *data)
443 {
444 return current_inferior ()->top_target ()->find_memory_regions (func, data);
445 }
446
447 /* See target.h. */
448
449 gdb::unique_xmalloc_ptr<char>
450 target_make_corefile_notes (bfd *bfd, int *size_p)
451 {
452 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
453 }
454
455 gdb_byte *
456 target_get_bookmark (const char *args, int from_tty)
457 {
458 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
459 }
460
461 void
462 target_goto_bookmark (const gdb_byte *arg, int from_tty)
463 {
464 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
465 }
466
467 /* See target.h. */
468
469 bool
470 target_stopped_by_watchpoint ()
471 {
472 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
473 }
474
475 /* See target.h. */
476
477 bool
478 target_stopped_by_sw_breakpoint ()
479 {
480 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
481 }
482
483 bool
484 target_supports_stopped_by_sw_breakpoint ()
485 {
486 target_ops *target = current_inferior ()->top_target ();
487
488 return target->supports_stopped_by_sw_breakpoint ();
489 }
490
491 bool
492 target_stopped_by_hw_breakpoint ()
493 {
494 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
495 }
496
497 bool
498 target_supports_stopped_by_hw_breakpoint ()
499 {
500 target_ops *target = current_inferior ()->top_target ();
501
502 return target->supports_stopped_by_hw_breakpoint ();
503 }
504
505 /* See target.h. */
506
507 bool
508 target_have_steppable_watchpoint ()
509 {
510 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
511 }
512
513 /* See target.h. */
514
515 int
516 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
517 {
518 target_ops *target = current_inferior ()->top_target ();
519
520 return target->can_use_hw_breakpoint (type, cnt, othertype);
521 }
522
523 /* See target.h. */
524
525 int
526 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
527 {
528 target_ops *target = current_inferior ()->top_target ();
529
530 return target->region_ok_for_hw_watchpoint (addr, len);
531 }
532
533
534 int
535 target_can_do_single_step ()
536 {
537 return current_inferior ()->top_target ()->can_do_single_step ();
538 }
539
540 /* See target.h. */
541
542 int
543 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
544 expression *cond)
545 {
546 target_ops *target = current_inferior ()->top_target ();
547
548 return target->insert_watchpoint (addr, len, type, cond);
549 }
550
551 /* See target.h. */
552
553 int
554 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
555 expression *cond)
556 {
557 target_ops *target = current_inferior ()->top_target ();
558
559 return target->remove_watchpoint (addr, len, type, cond);
560 }
561
562 /* See target.h. */
563
564 int
565 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
566 {
567 target_ops *target = current_inferior ()->top_target ();
568
569 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
570 }
571
572 /* See target.h. */
573
574 int
575 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
576 {
577 target_ops *target = current_inferior ()->top_target ();
578
579 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
580 }
581
582 /* See target.h. */
583
584 bool
585 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
586 expression *cond)
587 {
588 target_ops *target = current_inferior ()->top_target ();
589
590 return target->can_accel_watchpoint_condition (addr, len, type, cond);
591 }
592
593 /* See target.h. */
594
595 bool
596 target_can_execute_reverse ()
597 {
598 return current_inferior ()->top_target ()->can_execute_reverse ();
599 }
600
601 ptid_t
602 target_get_ada_task_ptid (long lwp, ULONGEST tid)
603 {
604 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
605 }
606
607 bool
608 target_filesystem_is_local ()
609 {
610 return current_inferior ()->top_target ()->filesystem_is_local ();
611 }
612
613 void
614 target_trace_init ()
615 {
616 return current_inferior ()->top_target ()->trace_init ();
617 }
618
619 void
620 target_download_tracepoint (bp_location *location)
621 {
622 return current_inferior ()->top_target ()->download_tracepoint (location);
623 }
624
625 bool
626 target_can_download_tracepoint ()
627 {
628 return current_inferior ()->top_target ()->can_download_tracepoint ();
629 }
630
631 void
632 target_download_trace_state_variable (const trace_state_variable &tsv)
633 {
634 target_ops *target = current_inferior ()->top_target ();
635
636 return target->download_trace_state_variable (tsv);
637 }
638
639 void
640 target_enable_tracepoint (bp_location *loc)
641 {
642 return current_inferior ()->top_target ()->enable_tracepoint (loc);
643 }
644
645 void
646 target_disable_tracepoint (bp_location *loc)
647 {
648 return current_inferior ()->top_target ()->disable_tracepoint (loc);
649 }
650
651 void
652 target_trace_start ()
653 {
654 return current_inferior ()->top_target ()->trace_start ();
655 }
656
657 void
658 target_trace_set_readonly_regions ()
659 {
660 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
661 }
662
663 int
664 target_get_trace_status (trace_status *ts)
665 {
666 return current_inferior ()->top_target ()->get_trace_status (ts);
667 }
668
669 void
670 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
671 {
672 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
673 }
674
675 void
676 target_trace_stop ()
677 {
678 return current_inferior ()->top_target ()->trace_stop ();
679 }
680
681 int
682 target_trace_find (trace_find_type type, int num,
683 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
684 {
685 target_ops *target = current_inferior ()->top_target ();
686
687 return target->trace_find (type, num, addr1, addr2, tpp);
688 }
689
690 bool
691 target_get_trace_state_variable_value (int tsv, LONGEST *val)
692 {
693 target_ops *target = current_inferior ()->top_target ();
694
695 return target->get_trace_state_variable_value (tsv, val);
696 }
697
698 int
699 target_save_trace_data (const char *filename)
700 {
701 return current_inferior ()->top_target ()->save_trace_data (filename);
702 }
703
704 int
705 target_upload_tracepoints (uploaded_tp **utpp)
706 {
707 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
708 }
709
710 int
711 target_upload_trace_state_variables (uploaded_tsv **utsvp)
712 {
713 target_ops *target = current_inferior ()->top_target ();
714
715 return target->upload_trace_state_variables (utsvp);
716 }
717
718 LONGEST
719 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
720 {
721 target_ops *target = current_inferior ()->top_target ();
722
723 return target->get_raw_trace_data (buf, offset, len);
724 }
725
726 int
727 target_get_min_fast_tracepoint_insn_len ()
728 {
729 target_ops *target = current_inferior ()->top_target ();
730
731 return target->get_min_fast_tracepoint_insn_len ();
732 }
733
734 void
735 target_set_disconnected_tracing (int val)
736 {
737 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
738 }
739
740 void
741 target_set_circular_trace_buffer (int val)
742 {
743 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
744 }
745
746 void
747 target_set_trace_buffer_size (LONGEST val)
748 {
749 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
750 }
751
752 bool
753 target_set_trace_notes (const char *user, const char *notes,
754 const char *stopnotes)
755 {
756 target_ops *target = current_inferior ()->top_target ();
757
758 return target->set_trace_notes (user, notes, stopnotes);
759 }
760
761 bool
762 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
763 {
764 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
765 }
766
767 void
768 target_set_permissions ()
769 {
770 return current_inferior ()->top_target ()->set_permissions ();
771 }
772
773 bool
774 target_static_tracepoint_marker_at (CORE_ADDR addr,
775 static_tracepoint_marker *marker)
776 {
777 target_ops *target = current_inferior ()->top_target ();
778
779 return target->static_tracepoint_marker_at (addr, marker);
780 }
781
782 std::vector<static_tracepoint_marker>
783 target_static_tracepoint_markers_by_strid (const char *marker_id)
784 {
785 target_ops *target = current_inferior ()->top_target ();
786
787 return target->static_tracepoint_markers_by_strid (marker_id);
788 }
789
790 traceframe_info_up
791 target_traceframe_info ()
792 {
793 return current_inferior ()->top_target ()->traceframe_info ();
794 }
795
796 bool
797 target_use_agent (bool use)
798 {
799 return current_inferior ()->top_target ()->use_agent (use);
800 }
801
802 bool
803 target_can_use_agent ()
804 {
805 return current_inferior ()->top_target ()->can_use_agent ();
806 }
807
808 bool
809 target_augmented_libraries_svr4_read ()
810 {
811 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
812 }
813
814 bool
815 target_supports_memory_tagging ()
816 {
817 return current_inferior ()->top_target ()->supports_memory_tagging ();
818 }
819
820 bool
821 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
822 int type)
823 {
824 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
825 }
826
827 bool
828 target_store_memtags (CORE_ADDR address, size_t len,
829 const gdb::byte_vector &tags, int type)
830 {
831 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
832 }
833
834 void
835 target_log_command (const char *p)
836 {
837 return current_inferior ()->top_target ()->log_command (p);
838 }
839
840 /* This is used to implement the various target commands. */
841
842 static void
843 open_target (const char *args, int from_tty, struct cmd_list_element *command)
844 {
845 auto *ti = static_cast<target_info *> (command->context ());
846 target_open_ftype *func = target_factories[ti];
847
848 if (targetdebug)
849 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
850 ti->shortname);
851
852 func (args, from_tty);
853
854 if (targetdebug)
855 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
856 ti->shortname, args, from_tty);
857 }
858
859 /* See target.h. */
860
861 void
862 add_target (const target_info &t, target_open_ftype *func,
863 completer_ftype *completer)
864 {
865 struct cmd_list_element *c;
866
867 auto &func_slot = target_factories[&t];
868 if (func_slot != nullptr)
869 internal_error (__FILE__, __LINE__,
870 _("target already added (\"%s\")."), t.shortname);
871 func_slot = func;
872
873 if (targetlist == NULL)
874 add_basic_prefix_cmd ("target", class_run, _("\
875 Connect to a target machine or process.\n\
876 The first argument is the type or protocol of the target machine.\n\
877 Remaining arguments are interpreted by the target protocol. For more\n\
878 information on the arguments for a particular protocol, type\n\
879 `help target ' followed by the protocol name."),
880 &targetlist, 0, &cmdlist);
881 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
882 c->set_context ((void *) &t);
883 c->func = open_target;
884 if (completer != NULL)
885 set_cmd_completer (c, completer);
886 }
887
888 /* See target.h. */
889
890 void
891 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
892 {
893 struct cmd_list_element *c;
894
895 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
896 see PR cli/15104. */
897 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
898 c->func = open_target;
899 c->set_context ((void *) &tinfo);
900 gdb::unique_xmalloc_ptr<char> alt
901 = xstrprintf ("target %s", tinfo.shortname);
902 deprecate_cmd (c, alt.release ());
903 }
904
905 /* Stub functions */
906
907 void
908 target_kill (void)
909 {
910 current_inferior ()->top_target ()->kill ();
911 }
912
913 void
914 target_load (const char *arg, int from_tty)
915 {
916 target_dcache_invalidate ();
917 current_inferior ()->top_target ()->load (arg, from_tty);
918 }
919
920 /* Define it. */
921
922 target_terminal_state target_terminal::m_terminal_state
923 = target_terminal_state::is_ours;
924
925 /* See target/target.h. */
926
927 void
928 target_terminal::init (void)
929 {
930 current_inferior ()->top_target ()->terminal_init ();
931
932 m_terminal_state = target_terminal_state::is_ours;
933 }
934
935 /* See target/target.h. */
936
937 void
938 target_terminal::inferior (void)
939 {
940 struct ui *ui = current_ui;
941
942 /* A background resume (``run&'') should leave GDB in control of the
943 terminal. */
944 if (ui->prompt_state != PROMPT_BLOCKED)
945 return;
946
947 /* Since we always run the inferior in the main console (unless "set
948 inferior-tty" is in effect), when some UI other than the main one
949 calls target_terminal::inferior, then we leave the main UI's
950 terminal settings as is. */
951 if (ui != main_ui)
952 return;
953
954 /* If GDB is resuming the inferior in the foreground, install
955 inferior's terminal modes. */
956
957 struct inferior *inf = current_inferior ();
958
959 if (inf->terminal_state != target_terminal_state::is_inferior)
960 {
961 current_inferior ()->top_target ()->terminal_inferior ();
962 inf->terminal_state = target_terminal_state::is_inferior;
963 }
964
965 m_terminal_state = target_terminal_state::is_inferior;
966
967 /* If the user hit C-c before, pretend that it was hit right
968 here. */
969 if (check_quit_flag ())
970 target_pass_ctrlc ();
971 }
972
973 /* See target/target.h. */
974
975 void
976 target_terminal::restore_inferior (void)
977 {
978 struct ui *ui = current_ui;
979
980 /* See target_terminal::inferior(). */
981 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
982 return;
983
984 /* Restore the terminal settings of inferiors that were in the
985 foreground but are now ours_for_output due to a temporary
986 target_target::ours_for_output() call. */
987
988 {
989 scoped_restore_current_inferior restore_inferior;
990
991 for (::inferior *inf : all_inferiors ())
992 {
993 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
994 {
995 set_current_inferior (inf);
996 current_inferior ()->top_target ()->terminal_inferior ();
997 inf->terminal_state = target_terminal_state::is_inferior;
998 }
999 }
1000 }
1001
1002 m_terminal_state = target_terminal_state::is_inferior;
1003
1004 /* If the user hit C-c before, pretend that it was hit right
1005 here. */
1006 if (check_quit_flag ())
1007 target_pass_ctrlc ();
1008 }
1009
1010 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1011 is_ours_for_output. */
1012
1013 static void
1014 target_terminal_is_ours_kind (target_terminal_state desired_state)
1015 {
1016 scoped_restore_current_inferior restore_inferior;
1017
1018 /* Must do this in two passes. First, have all inferiors save the
1019 current terminal settings. Then, after all inferiors have add a
1020 chance to safely save the terminal settings, restore GDB's
1021 terminal settings. */
1022
1023 for (inferior *inf : all_inferiors ())
1024 {
1025 if (inf->terminal_state == target_terminal_state::is_inferior)
1026 {
1027 set_current_inferior (inf);
1028 current_inferior ()->top_target ()->terminal_save_inferior ();
1029 }
1030 }
1031
1032 for (inferior *inf : all_inferiors ())
1033 {
1034 /* Note we don't check is_inferior here like above because we
1035 need to handle 'is_ours_for_output -> is_ours' too. Careful
1036 to never transition from 'is_ours' to 'is_ours_for_output',
1037 though. */
1038 if (inf->terminal_state != target_terminal_state::is_ours
1039 && inf->terminal_state != desired_state)
1040 {
1041 set_current_inferior (inf);
1042 if (desired_state == target_terminal_state::is_ours)
1043 current_inferior ()->top_target ()->terminal_ours ();
1044 else if (desired_state == target_terminal_state::is_ours_for_output)
1045 current_inferior ()->top_target ()->terminal_ours_for_output ();
1046 else
1047 gdb_assert_not_reached ("unhandled desired state");
1048 inf->terminal_state = desired_state;
1049 }
1050 }
1051 }
1052
1053 /* See target/target.h. */
1054
1055 void
1056 target_terminal::ours ()
1057 {
1058 struct ui *ui = current_ui;
1059
1060 /* See target_terminal::inferior. */
1061 if (ui != main_ui)
1062 return;
1063
1064 if (m_terminal_state == target_terminal_state::is_ours)
1065 return;
1066
1067 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1068 m_terminal_state = target_terminal_state::is_ours;
1069 }
1070
1071 /* See target/target.h. */
1072
1073 void
1074 target_terminal::ours_for_output ()
1075 {
1076 struct ui *ui = current_ui;
1077
1078 /* See target_terminal::inferior. */
1079 if (ui != main_ui)
1080 return;
1081
1082 if (!target_terminal::is_inferior ())
1083 return;
1084
1085 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1086 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1087 }
1088
1089 /* See target/target.h. */
1090
1091 void
1092 target_terminal::info (const char *arg, int from_tty)
1093 {
1094 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1095 }
1096
1097 /* See target.h. */
1098
1099 bool
1100 target_supports_terminal_ours (void)
1101 {
1102 /* The current top target is the target at the top of the target
1103 stack of the current inferior. While normally there's always an
1104 inferior, we must check for nullptr here because we can get here
1105 very early during startup, before the initial inferior is first
1106 created. */
1107 inferior *inf = current_inferior ();
1108
1109 if (inf == nullptr)
1110 return false;
1111 return inf->top_target ()->supports_terminal_ours ();
1112 }
1113
1114 static void
1115 tcomplain (void)
1116 {
1117 error (_("You can't do that when your target is `%s'"),
1118 current_inferior ()->top_target ()->shortname ());
1119 }
1120
1121 void
1122 noprocess (void)
1123 {
1124 error (_("You can't do that without a process to debug."));
1125 }
1126
1127 static void
1128 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1129 {
1130 printf_filtered (_("No saved terminal information.\n"));
1131 }
1132
1133 /* A default implementation for the to_get_ada_task_ptid target method.
1134
1135 This function builds the PTID by using both LWP and TID as part of
1136 the PTID lwp and tid elements. The pid used is the pid of the
1137 inferior_ptid. */
1138
1139 static ptid_t
1140 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1141 {
1142 return ptid_t (inferior_ptid.pid (), lwp, tid);
1143 }
1144
1145 static enum exec_direction_kind
1146 default_execution_direction (struct target_ops *self)
1147 {
1148 if (!target_can_execute_reverse ())
1149 return EXEC_FORWARD;
1150 else if (!target_can_async_p ())
1151 return EXEC_FORWARD;
1152 else
1153 gdb_assert_not_reached ("\
1154 to_execution_direction must be implemented for reverse async");
1155 }
1156
1157 /* See target.h. */
1158
1159 void
1160 decref_target (target_ops *t)
1161 {
1162 t->decref ();
1163 if (t->refcount () == 0)
1164 {
1165 if (t->stratum () == process_stratum)
1166 connection_list_remove (as_process_stratum_target (t));
1167 target_close (t);
1168 }
1169 }
1170
1171 /* See target.h. */
1172
1173 void
1174 target_stack::push (target_ops *t)
1175 {
1176 t->incref ();
1177
1178 strata stratum = t->stratum ();
1179
1180 if (stratum == process_stratum)
1181 connection_list_add (as_process_stratum_target (t));
1182
1183 /* If there's already a target at this stratum, remove it. */
1184
1185 if (m_stack[stratum] != NULL)
1186 unpush (m_stack[stratum]);
1187
1188 /* Now add the new one. */
1189 m_stack[stratum] = t;
1190
1191 if (m_top < stratum)
1192 m_top = stratum;
1193 }
1194
1195 /* See target.h. */
1196
1197 bool
1198 target_stack::unpush (target_ops *t)
1199 {
1200 gdb_assert (t != NULL);
1201
1202 strata stratum = t->stratum ();
1203
1204 if (stratum == dummy_stratum)
1205 internal_error (__FILE__, __LINE__,
1206 _("Attempt to unpush the dummy target"));
1207
1208 /* Look for the specified target. Note that a target can only occur
1209 once in the target stack. */
1210
1211 if (m_stack[stratum] != t)
1212 {
1213 /* If T wasn't pushed, quit. Only open targets should be
1214 closed. */
1215 return false;
1216 }
1217
1218 /* Unchain the target. */
1219 m_stack[stratum] = NULL;
1220
1221 if (m_top == stratum)
1222 m_top = this->find_beneath (t)->stratum ();
1223
1224 /* Finally close the target, if there are no inferiors
1225 referencing this target still. Note we do this after unchaining,
1226 so any target method calls from within the target_close
1227 implementation don't end up in T anymore. Do leave the target
1228 open if we have are other inferiors referencing this target
1229 still. */
1230 decref_target (t);
1231
1232 return true;
1233 }
1234
1235 /* Unpush TARGET and assert that it worked. */
1236
1237 static void
1238 unpush_target_and_assert (struct target_ops *target)
1239 {
1240 if (!current_inferior ()->unpush_target (target))
1241 {
1242 fprintf_unfiltered (gdb_stderr,
1243 "pop_all_targets couldn't find target %s\n",
1244 target->shortname ());
1245 internal_error (__FILE__, __LINE__,
1246 _("failed internal consistency check"));
1247 }
1248 }
1249
1250 void
1251 pop_all_targets_above (enum strata above_stratum)
1252 {
1253 while ((int) (current_inferior ()->top_target ()->stratum ())
1254 > (int) above_stratum)
1255 unpush_target_and_assert (current_inferior ()->top_target ());
1256 }
1257
1258 /* See target.h. */
1259
1260 void
1261 pop_all_targets_at_and_above (enum strata stratum)
1262 {
1263 while ((int) (current_inferior ()->top_target ()->stratum ())
1264 >= (int) stratum)
1265 unpush_target_and_assert (current_inferior ()->top_target ());
1266 }
1267
1268 void
1269 pop_all_targets (void)
1270 {
1271 pop_all_targets_above (dummy_stratum);
1272 }
1273
1274 void
1275 target_unpusher::operator() (struct target_ops *ops) const
1276 {
1277 current_inferior ()->unpush_target (ops);
1278 }
1279
1280 /* Default implementation of to_get_thread_local_address. */
1281
1282 static void
1283 generic_tls_error (void)
1284 {
1285 throw_error (TLS_GENERIC_ERROR,
1286 _("Cannot find thread-local variables on this target"));
1287 }
1288
1289 /* Using the objfile specified in OBJFILE, find the address for the
1290 current thread's thread-local storage with offset OFFSET. */
1291 CORE_ADDR
1292 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1293 {
1294 volatile CORE_ADDR addr = 0;
1295 struct target_ops *target = current_inferior ()->top_target ();
1296 struct gdbarch *gdbarch = target_gdbarch ();
1297
1298 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1299 {
1300 ptid_t ptid = inferior_ptid;
1301
1302 try
1303 {
1304 CORE_ADDR lm_addr;
1305
1306 /* Fetch the load module address for this objfile. */
1307 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1308 objfile);
1309
1310 if (gdbarch_get_thread_local_address_p (gdbarch))
1311 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1312 offset);
1313 else
1314 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1315 }
1316 /* If an error occurred, print TLS related messages here. Otherwise,
1317 throw the error to some higher catcher. */
1318 catch (const gdb_exception &ex)
1319 {
1320 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1321
1322 switch (ex.error)
1323 {
1324 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1325 error (_("Cannot find thread-local variables "
1326 "in this thread library."));
1327 break;
1328 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1329 if (objfile_is_library)
1330 error (_("Cannot find shared library `%s' in dynamic"
1331 " linker's load module list"), objfile_name (objfile));
1332 else
1333 error (_("Cannot find executable file `%s' in dynamic"
1334 " linker's load module list"), objfile_name (objfile));
1335 break;
1336 case TLS_NOT_ALLOCATED_YET_ERROR:
1337 if (objfile_is_library)
1338 error (_("The inferior has not yet allocated storage for"
1339 " thread-local variables in\n"
1340 "the shared library `%s'\n"
1341 "for %s"),
1342 objfile_name (objfile),
1343 target_pid_to_str (ptid).c_str ());
1344 else
1345 error (_("The inferior has not yet allocated storage for"
1346 " thread-local variables in\n"
1347 "the executable `%s'\n"
1348 "for %s"),
1349 objfile_name (objfile),
1350 target_pid_to_str (ptid).c_str ());
1351 break;
1352 case TLS_GENERIC_ERROR:
1353 if (objfile_is_library)
1354 error (_("Cannot find thread-local storage for %s, "
1355 "shared library %s:\n%s"),
1356 target_pid_to_str (ptid).c_str (),
1357 objfile_name (objfile), ex.what ());
1358 else
1359 error (_("Cannot find thread-local storage for %s, "
1360 "executable file %s:\n%s"),
1361 target_pid_to_str (ptid).c_str (),
1362 objfile_name (objfile), ex.what ());
1363 break;
1364 default:
1365 throw;
1366 break;
1367 }
1368 }
1369 }
1370 else
1371 error (_("Cannot find thread-local variables on this target"));
1372
1373 return addr;
1374 }
1375
1376 const char *
1377 target_xfer_status_to_string (enum target_xfer_status status)
1378 {
1379 #define CASE(X) case X: return #X
1380 switch (status)
1381 {
1382 CASE(TARGET_XFER_E_IO);
1383 CASE(TARGET_XFER_UNAVAILABLE);
1384 default:
1385 return "<unknown>";
1386 }
1387 #undef CASE
1388 };
1389
1390
1391 /* See target.h. */
1392
1393 gdb::unique_xmalloc_ptr<char>
1394 target_read_string (CORE_ADDR memaddr, int len, int *bytes_read)
1395 {
1396 gdb::unique_xmalloc_ptr<gdb_byte> buffer;
1397
1398 int ignore;
1399 if (bytes_read == nullptr)
1400 bytes_read = &ignore;
1401
1402 /* Note that the endian-ness does not matter here. */
1403 int errcode = read_string (memaddr, -1, 1, len, BFD_ENDIAN_LITTLE,
1404 &buffer, bytes_read);
1405 if (errcode != 0)
1406 return {};
1407
1408 return gdb::unique_xmalloc_ptr<char> ((char *) buffer.release ());
1409 }
1410
1411 const target_section_table *
1412 target_get_section_table (struct target_ops *target)
1413 {
1414 return target->get_section_table ();
1415 }
1416
1417 /* Find a section containing ADDR. */
1418
1419 const struct target_section *
1420 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1421 {
1422 const target_section_table *table = target_get_section_table (target);
1423
1424 if (table == NULL)
1425 return NULL;
1426
1427 for (const target_section &secp : *table)
1428 {
1429 if (addr >= secp.addr && addr < secp.endaddr)
1430 return &secp;
1431 }
1432 return NULL;
1433 }
1434
1435 /* See target.h. */
1436
1437 const target_section_table *
1438 default_get_section_table ()
1439 {
1440 return &current_program_space->target_sections ();
1441 }
1442
1443 /* Helper for the memory xfer routines. Checks the attributes of the
1444 memory region of MEMADDR against the read or write being attempted.
1445 If the access is permitted returns true, otherwise returns false.
1446 REGION_P is an optional output parameter. If not-NULL, it is
1447 filled with a pointer to the memory region of MEMADDR. REG_LEN
1448 returns LEN trimmed to the end of the region. This is how much the
1449 caller can continue requesting, if the access is permitted. A
1450 single xfer request must not straddle memory region boundaries. */
1451
1452 static int
1453 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1454 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1455 struct mem_region **region_p)
1456 {
1457 struct mem_region *region;
1458
1459 region = lookup_mem_region (memaddr);
1460
1461 if (region_p != NULL)
1462 *region_p = region;
1463
1464 switch (region->attrib.mode)
1465 {
1466 case MEM_RO:
1467 if (writebuf != NULL)
1468 return 0;
1469 break;
1470
1471 case MEM_WO:
1472 if (readbuf != NULL)
1473 return 0;
1474 break;
1475
1476 case MEM_FLASH:
1477 /* We only support writing to flash during "load" for now. */
1478 if (writebuf != NULL)
1479 error (_("Writing to flash memory forbidden in this context"));
1480 break;
1481
1482 case MEM_NONE:
1483 return 0;
1484 }
1485
1486 /* region->hi == 0 means there's no upper bound. */
1487 if (memaddr + len < region->hi || region->hi == 0)
1488 *reg_len = len;
1489 else
1490 *reg_len = region->hi - memaddr;
1491
1492 return 1;
1493 }
1494
1495 /* Read memory from more than one valid target. A core file, for
1496 instance, could have some of memory but delegate other bits to
1497 the target below it. So, we must manually try all targets. */
1498
1499 enum target_xfer_status
1500 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1501 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1502 ULONGEST *xfered_len)
1503 {
1504 enum target_xfer_status res;
1505
1506 do
1507 {
1508 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1509 readbuf, writebuf, memaddr, len,
1510 xfered_len);
1511 if (res == TARGET_XFER_OK)
1512 break;
1513
1514 /* Stop if the target reports that the memory is not available. */
1515 if (res == TARGET_XFER_UNAVAILABLE)
1516 break;
1517
1518 /* Don't continue past targets which have all the memory.
1519 At one time, this code was necessary to read data from
1520 executables / shared libraries when data for the requested
1521 addresses weren't available in the core file. But now the
1522 core target handles this case itself. */
1523 if (ops->has_all_memory ())
1524 break;
1525
1526 ops = ops->beneath ();
1527 }
1528 while (ops != NULL);
1529
1530 /* The cache works at the raw memory level. Make sure the cache
1531 gets updated with raw contents no matter what kind of memory
1532 object was originally being written. Note we do write-through
1533 first, so that if it fails, we don't write to the cache contents
1534 that never made it to the target. */
1535 if (writebuf != NULL
1536 && inferior_ptid != null_ptid
1537 && target_dcache_init_p ()
1538 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1539 {
1540 DCACHE *dcache = target_dcache_get ();
1541
1542 /* Note that writing to an area of memory which wasn't present
1543 in the cache doesn't cause it to be loaded in. */
1544 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1545 }
1546
1547 return res;
1548 }
1549
1550 /* Perform a partial memory transfer.
1551 For docs see target.h, to_xfer_partial. */
1552
1553 static enum target_xfer_status
1554 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1555 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1556 ULONGEST len, ULONGEST *xfered_len)
1557 {
1558 enum target_xfer_status res;
1559 ULONGEST reg_len;
1560 struct mem_region *region;
1561 struct inferior *inf;
1562
1563 /* For accesses to unmapped overlay sections, read directly from
1564 files. Must do this first, as MEMADDR may need adjustment. */
1565 if (readbuf != NULL && overlay_debugging)
1566 {
1567 struct obj_section *section = find_pc_overlay (memaddr);
1568
1569 if (pc_in_unmapped_range (memaddr, section))
1570 {
1571 const target_section_table *table = target_get_section_table (ops);
1572 const char *section_name = section->the_bfd_section->name;
1573
1574 memaddr = overlay_mapped_address (memaddr, section);
1575
1576 auto match_cb = [=] (const struct target_section *s)
1577 {
1578 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1579 };
1580
1581 return section_table_xfer_memory_partial (readbuf, writebuf,
1582 memaddr, len, xfered_len,
1583 *table, match_cb);
1584 }
1585 }
1586
1587 /* Try the executable files, if "trust-readonly-sections" is set. */
1588 if (readbuf != NULL && trust_readonly)
1589 {
1590 const struct target_section *secp
1591 = target_section_by_addr (ops, memaddr);
1592 if (secp != NULL
1593 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1594 {
1595 const target_section_table *table = target_get_section_table (ops);
1596 return section_table_xfer_memory_partial (readbuf, writebuf,
1597 memaddr, len, xfered_len,
1598 *table);
1599 }
1600 }
1601
1602 /* Try GDB's internal data cache. */
1603
1604 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1605 &region))
1606 return TARGET_XFER_E_IO;
1607
1608 if (inferior_ptid != null_ptid)
1609 inf = current_inferior ();
1610 else
1611 inf = NULL;
1612
1613 if (inf != NULL
1614 && readbuf != NULL
1615 /* The dcache reads whole cache lines; that doesn't play well
1616 with reading from a trace buffer, because reading outside of
1617 the collected memory range fails. */
1618 && get_traceframe_number () == -1
1619 && (region->attrib.cache
1620 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1621 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1622 {
1623 DCACHE *dcache = target_dcache_get_or_init ();
1624
1625 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1626 reg_len, xfered_len);
1627 }
1628
1629 /* If none of those methods found the memory we wanted, fall back
1630 to a target partial transfer. Normally a single call to
1631 to_xfer_partial is enough; if it doesn't recognize an object
1632 it will call the to_xfer_partial of the next target down.
1633 But for memory this won't do. Memory is the only target
1634 object which can be read from more than one valid target.
1635 A core file, for instance, could have some of memory but
1636 delegate other bits to the target below it. So, we must
1637 manually try all targets. */
1638
1639 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1640 xfered_len);
1641
1642 /* If we still haven't got anything, return the last error. We
1643 give up. */
1644 return res;
1645 }
1646
1647 /* Perform a partial memory transfer. For docs see target.h,
1648 to_xfer_partial. */
1649
1650 static enum target_xfer_status
1651 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1652 gdb_byte *readbuf, const gdb_byte *writebuf,
1653 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1654 {
1655 enum target_xfer_status res;
1656
1657 /* Zero length requests are ok and require no work. */
1658 if (len == 0)
1659 return TARGET_XFER_EOF;
1660
1661 memaddr = address_significant (target_gdbarch (), memaddr);
1662
1663 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1664 breakpoint insns, thus hiding out from higher layers whether
1665 there are software breakpoints inserted in the code stream. */
1666 if (readbuf != NULL)
1667 {
1668 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1669 xfered_len);
1670
1671 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1672 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1673 }
1674 else
1675 {
1676 /* A large write request is likely to be partially satisfied
1677 by memory_xfer_partial_1. We will continually malloc
1678 and free a copy of the entire write request for breakpoint
1679 shadow handling even though we only end up writing a small
1680 subset of it. Cap writes to a limit specified by the target
1681 to mitigate this. */
1682 len = std::min (ops->get_memory_xfer_limit (), len);
1683
1684 gdb::byte_vector buf (writebuf, writebuf + len);
1685 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1686 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1687 xfered_len);
1688 }
1689
1690 return res;
1691 }
1692
1693 scoped_restore_tmpl<int>
1694 make_scoped_restore_show_memory_breakpoints (int show)
1695 {
1696 return make_scoped_restore (&show_memory_breakpoints, show);
1697 }
1698
1699 /* For docs see target.h, to_xfer_partial. */
1700
1701 enum target_xfer_status
1702 target_xfer_partial (struct target_ops *ops,
1703 enum target_object object, const char *annex,
1704 gdb_byte *readbuf, const gdb_byte *writebuf,
1705 ULONGEST offset, ULONGEST len,
1706 ULONGEST *xfered_len)
1707 {
1708 enum target_xfer_status retval;
1709
1710 /* Transfer is done when LEN is zero. */
1711 if (len == 0)
1712 return TARGET_XFER_EOF;
1713
1714 if (writebuf && !may_write_memory)
1715 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1716 core_addr_to_string_nz (offset), plongest (len));
1717
1718 *xfered_len = 0;
1719
1720 /* If this is a memory transfer, let the memory-specific code
1721 have a look at it instead. Memory transfers are more
1722 complicated. */
1723 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1724 || object == TARGET_OBJECT_CODE_MEMORY)
1725 retval = memory_xfer_partial (ops, object, readbuf,
1726 writebuf, offset, len, xfered_len);
1727 else if (object == TARGET_OBJECT_RAW_MEMORY)
1728 {
1729 /* Skip/avoid accessing the target if the memory region
1730 attributes block the access. Check this here instead of in
1731 raw_memory_xfer_partial as otherwise we'd end up checking
1732 this twice in the case of the memory_xfer_partial path is
1733 taken; once before checking the dcache, and another in the
1734 tail call to raw_memory_xfer_partial. */
1735 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1736 NULL))
1737 return TARGET_XFER_E_IO;
1738
1739 /* Request the normal memory object from other layers. */
1740 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1741 xfered_len);
1742 }
1743 else
1744 retval = ops->xfer_partial (object, annex, readbuf,
1745 writebuf, offset, len, xfered_len);
1746
1747 if (targetdebug)
1748 {
1749 const unsigned char *myaddr = NULL;
1750
1751 fprintf_unfiltered (gdb_stdlog,
1752 "%s:target_xfer_partial "
1753 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1754 ops->shortname (),
1755 (int) object,
1756 (annex ? annex : "(null)"),
1757 host_address_to_string (readbuf),
1758 host_address_to_string (writebuf),
1759 core_addr_to_string_nz (offset),
1760 pulongest (len), retval,
1761 pulongest (*xfered_len));
1762
1763 if (readbuf)
1764 myaddr = readbuf;
1765 if (writebuf)
1766 myaddr = writebuf;
1767 if (retval == TARGET_XFER_OK && myaddr != NULL)
1768 {
1769 int i;
1770
1771 fputs_unfiltered (", bytes =", gdb_stdlog);
1772 for (i = 0; i < *xfered_len; i++)
1773 {
1774 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1775 {
1776 if (targetdebug < 2 && i > 0)
1777 {
1778 fprintf_unfiltered (gdb_stdlog, " ...");
1779 break;
1780 }
1781 fprintf_unfiltered (gdb_stdlog, "\n");
1782 }
1783
1784 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1785 }
1786 }
1787
1788 fputc_unfiltered ('\n', gdb_stdlog);
1789 }
1790
1791 /* Check implementations of to_xfer_partial update *XFERED_LEN
1792 properly. Do assertion after printing debug messages, so that we
1793 can find more clues on assertion failure from debugging messages. */
1794 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1795 gdb_assert (*xfered_len > 0);
1796
1797 return retval;
1798 }
1799
1800 /* Read LEN bytes of target memory at address MEMADDR, placing the
1801 results in GDB's memory at MYADDR. Returns either 0 for success or
1802 -1 if any error occurs.
1803
1804 If an error occurs, no guarantee is made about the contents of the data at
1805 MYADDR. In particular, the caller should not depend upon partial reads
1806 filling the buffer with good data. There is no way for the caller to know
1807 how much good data might have been transfered anyway. Callers that can
1808 deal with partial reads should call target_read (which will retry until
1809 it makes no progress, and then return how much was transferred). */
1810
1811 int
1812 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1813 {
1814 if (target_read (current_inferior ()->top_target (),
1815 TARGET_OBJECT_MEMORY, NULL,
1816 myaddr, memaddr, len) == len)
1817 return 0;
1818 else
1819 return -1;
1820 }
1821
1822 /* See target/target.h. */
1823
1824 int
1825 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1826 {
1827 gdb_byte buf[4];
1828 int r;
1829
1830 r = target_read_memory (memaddr, buf, sizeof buf);
1831 if (r != 0)
1832 return r;
1833 *result = extract_unsigned_integer (buf, sizeof buf,
1834 gdbarch_byte_order (target_gdbarch ()));
1835 return 0;
1836 }
1837
1838 /* Like target_read_memory, but specify explicitly that this is a read
1839 from the target's raw memory. That is, this read bypasses the
1840 dcache, breakpoint shadowing, etc. */
1841
1842 int
1843 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1844 {
1845 if (target_read (current_inferior ()->top_target (),
1846 TARGET_OBJECT_RAW_MEMORY, NULL,
1847 myaddr, memaddr, len) == len)
1848 return 0;
1849 else
1850 return -1;
1851 }
1852
1853 /* Like target_read_memory, but specify explicitly that this is a read from
1854 the target's stack. This may trigger different cache behavior. */
1855
1856 int
1857 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1858 {
1859 if (target_read (current_inferior ()->top_target (),
1860 TARGET_OBJECT_STACK_MEMORY, NULL,
1861 myaddr, memaddr, len) == len)
1862 return 0;
1863 else
1864 return -1;
1865 }
1866
1867 /* Like target_read_memory, but specify explicitly that this is a read from
1868 the target's code. This may trigger different cache behavior. */
1869
1870 int
1871 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1872 {
1873 if (target_read (current_inferior ()->top_target (),
1874 TARGET_OBJECT_CODE_MEMORY, NULL,
1875 myaddr, memaddr, len) == len)
1876 return 0;
1877 else
1878 return -1;
1879 }
1880
1881 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1882 Returns either 0 for success or -1 if any error occurs. If an
1883 error occurs, no guarantee is made about how much data got written.
1884 Callers that can deal with partial writes should call
1885 target_write. */
1886
1887 int
1888 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1889 {
1890 if (target_write (current_inferior ()->top_target (),
1891 TARGET_OBJECT_MEMORY, NULL,
1892 myaddr, memaddr, len) == len)
1893 return 0;
1894 else
1895 return -1;
1896 }
1897
1898 /* Write LEN bytes from MYADDR to target raw memory at address
1899 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1900 If an error occurs, no guarantee is made about how much data got
1901 written. Callers that can deal with partial writes should call
1902 target_write. */
1903
1904 int
1905 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1906 {
1907 if (target_write (current_inferior ()->top_target (),
1908 TARGET_OBJECT_RAW_MEMORY, NULL,
1909 myaddr, memaddr, len) == len)
1910 return 0;
1911 else
1912 return -1;
1913 }
1914
1915 /* Fetch the target's memory map. */
1916
1917 std::vector<mem_region>
1918 target_memory_map (void)
1919 {
1920 target_ops *target = current_inferior ()->top_target ();
1921 std::vector<mem_region> result = target->memory_map ();
1922 if (result.empty ())
1923 return result;
1924
1925 std::sort (result.begin (), result.end ());
1926
1927 /* Check that regions do not overlap. Simultaneously assign
1928 a numbering for the "mem" commands to use to refer to
1929 each region. */
1930 mem_region *last_one = NULL;
1931 for (size_t ix = 0; ix < result.size (); ix++)
1932 {
1933 mem_region *this_one = &result[ix];
1934 this_one->number = ix;
1935
1936 if (last_one != NULL && last_one->hi > this_one->lo)
1937 {
1938 warning (_("Overlapping regions in memory map: ignoring"));
1939 return std::vector<mem_region> ();
1940 }
1941
1942 last_one = this_one;
1943 }
1944
1945 return result;
1946 }
1947
1948 void
1949 target_flash_erase (ULONGEST address, LONGEST length)
1950 {
1951 current_inferior ()->top_target ()->flash_erase (address, length);
1952 }
1953
1954 void
1955 target_flash_done (void)
1956 {
1957 current_inferior ()->top_target ()->flash_done ();
1958 }
1959
1960 static void
1961 show_trust_readonly (struct ui_file *file, int from_tty,
1962 struct cmd_list_element *c, const char *value)
1963 {
1964 fprintf_filtered (file,
1965 _("Mode for reading from readonly sections is %s.\n"),
1966 value);
1967 }
1968
1969 /* Target vector read/write partial wrapper functions. */
1970
1971 static enum target_xfer_status
1972 target_read_partial (struct target_ops *ops,
1973 enum target_object object,
1974 const char *annex, gdb_byte *buf,
1975 ULONGEST offset, ULONGEST len,
1976 ULONGEST *xfered_len)
1977 {
1978 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1979 xfered_len);
1980 }
1981
1982 static enum target_xfer_status
1983 target_write_partial (struct target_ops *ops,
1984 enum target_object object,
1985 const char *annex, const gdb_byte *buf,
1986 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1987 {
1988 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1989 xfered_len);
1990 }
1991
1992 /* Wrappers to perform the full transfer. */
1993
1994 /* For docs on target_read see target.h. */
1995
1996 LONGEST
1997 target_read (struct target_ops *ops,
1998 enum target_object object,
1999 const char *annex, gdb_byte *buf,
2000 ULONGEST offset, LONGEST len)
2001 {
2002 LONGEST xfered_total = 0;
2003 int unit_size = 1;
2004
2005 /* If we are reading from a memory object, find the length of an addressable
2006 unit for that architecture. */
2007 if (object == TARGET_OBJECT_MEMORY
2008 || object == TARGET_OBJECT_STACK_MEMORY
2009 || object == TARGET_OBJECT_CODE_MEMORY
2010 || object == TARGET_OBJECT_RAW_MEMORY)
2011 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2012
2013 while (xfered_total < len)
2014 {
2015 ULONGEST xfered_partial;
2016 enum target_xfer_status status;
2017
2018 status = target_read_partial (ops, object, annex,
2019 buf + xfered_total * unit_size,
2020 offset + xfered_total, len - xfered_total,
2021 &xfered_partial);
2022
2023 /* Call an observer, notifying them of the xfer progress? */
2024 if (status == TARGET_XFER_EOF)
2025 return xfered_total;
2026 else if (status == TARGET_XFER_OK)
2027 {
2028 xfered_total += xfered_partial;
2029 QUIT;
2030 }
2031 else
2032 return TARGET_XFER_E_IO;
2033
2034 }
2035 return len;
2036 }
2037
2038 /* Assuming that the entire [begin, end) range of memory cannot be
2039 read, try to read whatever subrange is possible to read.
2040
2041 The function returns, in RESULT, either zero or one memory block.
2042 If there's a readable subrange at the beginning, it is completely
2043 read and returned. Any further readable subrange will not be read.
2044 Otherwise, if there's a readable subrange at the end, it will be
2045 completely read and returned. Any readable subranges before it
2046 (obviously, not starting at the beginning), will be ignored. In
2047 other cases -- either no readable subrange, or readable subrange(s)
2048 that is neither at the beginning, or end, nothing is returned.
2049
2050 The purpose of this function is to handle a read across a boundary
2051 of accessible memory in a case when memory map is not available.
2052 The above restrictions are fine for this case, but will give
2053 incorrect results if the memory is 'patchy'. However, supporting
2054 'patchy' memory would require trying to read every single byte,
2055 and it seems unacceptable solution. Explicit memory map is
2056 recommended for this case -- and target_read_memory_robust will
2057 take care of reading multiple ranges then. */
2058
2059 static void
2060 read_whatever_is_readable (struct target_ops *ops,
2061 const ULONGEST begin, const ULONGEST end,
2062 int unit_size,
2063 std::vector<memory_read_result> *result)
2064 {
2065 ULONGEST current_begin = begin;
2066 ULONGEST current_end = end;
2067 int forward;
2068 ULONGEST xfered_len;
2069
2070 /* If we previously failed to read 1 byte, nothing can be done here. */
2071 if (end - begin <= 1)
2072 return;
2073
2074 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2075
2076 /* Check that either first or the last byte is readable, and give up
2077 if not. This heuristic is meant to permit reading accessible memory
2078 at the boundary of accessible region. */
2079 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2080 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2081 {
2082 forward = 1;
2083 ++current_begin;
2084 }
2085 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2086 buf.get () + (end - begin) - 1, end - 1, 1,
2087 &xfered_len) == TARGET_XFER_OK)
2088 {
2089 forward = 0;
2090 --current_end;
2091 }
2092 else
2093 return;
2094
2095 /* Loop invariant is that the [current_begin, current_end) was previously
2096 found to be not readable as a whole.
2097
2098 Note loop condition -- if the range has 1 byte, we can't divide the range
2099 so there's no point trying further. */
2100 while (current_end - current_begin > 1)
2101 {
2102 ULONGEST first_half_begin, first_half_end;
2103 ULONGEST second_half_begin, second_half_end;
2104 LONGEST xfer;
2105 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2106
2107 if (forward)
2108 {
2109 first_half_begin = current_begin;
2110 first_half_end = middle;
2111 second_half_begin = middle;
2112 second_half_end = current_end;
2113 }
2114 else
2115 {
2116 first_half_begin = middle;
2117 first_half_end = current_end;
2118 second_half_begin = current_begin;
2119 second_half_end = middle;
2120 }
2121
2122 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2123 buf.get () + (first_half_begin - begin) * unit_size,
2124 first_half_begin,
2125 first_half_end - first_half_begin);
2126
2127 if (xfer == first_half_end - first_half_begin)
2128 {
2129 /* This half reads up fine. So, the error must be in the
2130 other half. */
2131 current_begin = second_half_begin;
2132 current_end = second_half_end;
2133 }
2134 else
2135 {
2136 /* This half is not readable. Because we've tried one byte, we
2137 know some part of this half if actually readable. Go to the next
2138 iteration to divide again and try to read.
2139
2140 We don't handle the other half, because this function only tries
2141 to read a single readable subrange. */
2142 current_begin = first_half_begin;
2143 current_end = first_half_end;
2144 }
2145 }
2146
2147 if (forward)
2148 {
2149 /* The [begin, current_begin) range has been read. */
2150 result->emplace_back (begin, current_end, std::move (buf));
2151 }
2152 else
2153 {
2154 /* The [current_end, end) range has been read. */
2155 LONGEST region_len = end - current_end;
2156
2157 gdb::unique_xmalloc_ptr<gdb_byte> data
2158 ((gdb_byte *) xmalloc (region_len * unit_size));
2159 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2160 region_len * unit_size);
2161 result->emplace_back (current_end, end, std::move (data));
2162 }
2163 }
2164
2165 std::vector<memory_read_result>
2166 read_memory_robust (struct target_ops *ops,
2167 const ULONGEST offset, const LONGEST len)
2168 {
2169 std::vector<memory_read_result> result;
2170 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2171
2172 LONGEST xfered_total = 0;
2173 while (xfered_total < len)
2174 {
2175 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2176 LONGEST region_len;
2177
2178 /* If there is no explicit region, a fake one should be created. */
2179 gdb_assert (region);
2180
2181 if (region->hi == 0)
2182 region_len = len - xfered_total;
2183 else
2184 region_len = region->hi - offset;
2185
2186 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2187 {
2188 /* Cannot read this region. Note that we can end up here only
2189 if the region is explicitly marked inaccessible, or
2190 'inaccessible-by-default' is in effect. */
2191 xfered_total += region_len;
2192 }
2193 else
2194 {
2195 LONGEST to_read = std::min (len - xfered_total, region_len);
2196 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2197 ((gdb_byte *) xmalloc (to_read * unit_size));
2198
2199 LONGEST xfered_partial =
2200 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2201 offset + xfered_total, to_read);
2202 /* Call an observer, notifying them of the xfer progress? */
2203 if (xfered_partial <= 0)
2204 {
2205 /* Got an error reading full chunk. See if maybe we can read
2206 some subrange. */
2207 read_whatever_is_readable (ops, offset + xfered_total,
2208 offset + xfered_total + to_read,
2209 unit_size, &result);
2210 xfered_total += to_read;
2211 }
2212 else
2213 {
2214 result.emplace_back (offset + xfered_total,
2215 offset + xfered_total + xfered_partial,
2216 std::move (buffer));
2217 xfered_total += xfered_partial;
2218 }
2219 QUIT;
2220 }
2221 }
2222
2223 return result;
2224 }
2225
2226
2227 /* An alternative to target_write with progress callbacks. */
2228
2229 LONGEST
2230 target_write_with_progress (struct target_ops *ops,
2231 enum target_object object,
2232 const char *annex, const gdb_byte *buf,
2233 ULONGEST offset, LONGEST len,
2234 void (*progress) (ULONGEST, void *), void *baton)
2235 {
2236 LONGEST xfered_total = 0;
2237 int unit_size = 1;
2238
2239 /* If we are writing to a memory object, find the length of an addressable
2240 unit for that architecture. */
2241 if (object == TARGET_OBJECT_MEMORY
2242 || object == TARGET_OBJECT_STACK_MEMORY
2243 || object == TARGET_OBJECT_CODE_MEMORY
2244 || object == TARGET_OBJECT_RAW_MEMORY)
2245 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2246
2247 /* Give the progress callback a chance to set up. */
2248 if (progress)
2249 (*progress) (0, baton);
2250
2251 while (xfered_total < len)
2252 {
2253 ULONGEST xfered_partial;
2254 enum target_xfer_status status;
2255
2256 status = target_write_partial (ops, object, annex,
2257 buf + xfered_total * unit_size,
2258 offset + xfered_total, len - xfered_total,
2259 &xfered_partial);
2260
2261 if (status != TARGET_XFER_OK)
2262 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2263
2264 if (progress)
2265 (*progress) (xfered_partial, baton);
2266
2267 xfered_total += xfered_partial;
2268 QUIT;
2269 }
2270 return len;
2271 }
2272
2273 /* For docs on target_write see target.h. */
2274
2275 LONGEST
2276 target_write (struct target_ops *ops,
2277 enum target_object object,
2278 const char *annex, const gdb_byte *buf,
2279 ULONGEST offset, LONGEST len)
2280 {
2281 return target_write_with_progress (ops, object, annex, buf, offset, len,
2282 NULL, NULL);
2283 }
2284
2285 /* Help for target_read_alloc and target_read_stralloc. See their comments
2286 for details. */
2287
2288 template <typename T>
2289 gdb::optional<gdb::def_vector<T>>
2290 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2291 const char *annex)
2292 {
2293 gdb::def_vector<T> buf;
2294 size_t buf_pos = 0;
2295 const int chunk = 4096;
2296
2297 /* This function does not have a length parameter; it reads the
2298 entire OBJECT). Also, it doesn't support objects fetched partly
2299 from one target and partly from another (in a different stratum,
2300 e.g. a core file and an executable). Both reasons make it
2301 unsuitable for reading memory. */
2302 gdb_assert (object != TARGET_OBJECT_MEMORY);
2303
2304 /* Start by reading up to 4K at a time. The target will throttle
2305 this number down if necessary. */
2306 while (1)
2307 {
2308 ULONGEST xfered_len;
2309 enum target_xfer_status status;
2310
2311 buf.resize (buf_pos + chunk);
2312
2313 status = target_read_partial (ops, object, annex,
2314 (gdb_byte *) &buf[buf_pos],
2315 buf_pos, chunk,
2316 &xfered_len);
2317
2318 if (status == TARGET_XFER_EOF)
2319 {
2320 /* Read all there was. */
2321 buf.resize (buf_pos);
2322 return buf;
2323 }
2324 else if (status != TARGET_XFER_OK)
2325 {
2326 /* An error occurred. */
2327 return {};
2328 }
2329
2330 buf_pos += xfered_len;
2331
2332 QUIT;
2333 }
2334 }
2335
2336 /* See target.h */
2337
2338 gdb::optional<gdb::byte_vector>
2339 target_read_alloc (struct target_ops *ops, enum target_object object,
2340 const char *annex)
2341 {
2342 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2343 }
2344
2345 /* See target.h. */
2346
2347 gdb::optional<gdb::char_vector>
2348 target_read_stralloc (struct target_ops *ops, enum target_object object,
2349 const char *annex)
2350 {
2351 gdb::optional<gdb::char_vector> buf
2352 = target_read_alloc_1<char> (ops, object, annex);
2353
2354 if (!buf)
2355 return {};
2356
2357 if (buf->empty () || buf->back () != '\0')
2358 buf->push_back ('\0');
2359
2360 /* Check for embedded NUL bytes; but allow trailing NULs. */
2361 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2362 it != buf->end (); it++)
2363 if (*it != '\0')
2364 {
2365 warning (_("target object %d, annex %s, "
2366 "contained unexpected null characters"),
2367 (int) object, annex ? annex : "(none)");
2368 break;
2369 }
2370
2371 return buf;
2372 }
2373
2374 /* Memory transfer methods. */
2375
2376 void
2377 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2378 LONGEST len)
2379 {
2380 /* This method is used to read from an alternate, non-current
2381 target. This read must bypass the overlay support (as symbols
2382 don't match this target), and GDB's internal cache (wrong cache
2383 for this target). */
2384 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2385 != len)
2386 memory_error (TARGET_XFER_E_IO, addr);
2387 }
2388
2389 ULONGEST
2390 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2391 int len, enum bfd_endian byte_order)
2392 {
2393 gdb_byte buf[sizeof (ULONGEST)];
2394
2395 gdb_assert (len <= sizeof (buf));
2396 get_target_memory (ops, addr, buf, len);
2397 return extract_unsigned_integer (buf, len, byte_order);
2398 }
2399
2400 /* See target.h. */
2401
2402 int
2403 target_insert_breakpoint (struct gdbarch *gdbarch,
2404 struct bp_target_info *bp_tgt)
2405 {
2406 if (!may_insert_breakpoints)
2407 {
2408 warning (_("May not insert breakpoints"));
2409 return 1;
2410 }
2411
2412 target_ops *target = current_inferior ()->top_target ();
2413
2414 return target->insert_breakpoint (gdbarch, bp_tgt);
2415 }
2416
2417 /* See target.h. */
2418
2419 int
2420 target_remove_breakpoint (struct gdbarch *gdbarch,
2421 struct bp_target_info *bp_tgt,
2422 enum remove_bp_reason reason)
2423 {
2424 /* This is kind of a weird case to handle, but the permission might
2425 have been changed after breakpoints were inserted - in which case
2426 we should just take the user literally and assume that any
2427 breakpoints should be left in place. */
2428 if (!may_insert_breakpoints)
2429 {
2430 warning (_("May not remove breakpoints"));
2431 return 1;
2432 }
2433
2434 target_ops *target = current_inferior ()->top_target ();
2435
2436 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2437 }
2438
2439 static void
2440 info_target_command (const char *args, int from_tty)
2441 {
2442 int has_all_mem = 0;
2443
2444 if (current_program_space->symfile_object_file != NULL)
2445 {
2446 objfile *objf = current_program_space->symfile_object_file;
2447 printf_filtered (_("Symbols from \"%s\".\n"),
2448 objfile_name (objf));
2449 }
2450
2451 for (target_ops *t = current_inferior ()->top_target ();
2452 t != NULL;
2453 t = t->beneath ())
2454 {
2455 if (!t->has_memory ())
2456 continue;
2457
2458 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2459 continue;
2460 if (has_all_mem)
2461 printf_filtered (_("\tWhile running this, "
2462 "GDB does not access memory from...\n"));
2463 printf_filtered ("%s:\n", t->longname ());
2464 t->files_info ();
2465 has_all_mem = t->has_all_memory ();
2466 }
2467 }
2468
2469 /* This function is called before any new inferior is created, e.g.
2470 by running a program, attaching, or connecting to a target.
2471 It cleans up any state from previous invocations which might
2472 change between runs. This is a subset of what target_preopen
2473 resets (things which might change between targets). */
2474
2475 void
2476 target_pre_inferior (int from_tty)
2477 {
2478 /* Clear out solib state. Otherwise the solib state of the previous
2479 inferior might have survived and is entirely wrong for the new
2480 target. This has been observed on GNU/Linux using glibc 2.3. How
2481 to reproduce:
2482
2483 bash$ ./foo&
2484 [1] 4711
2485 bash$ ./foo&
2486 [1] 4712
2487 bash$ gdb ./foo
2488 [...]
2489 (gdb) attach 4711
2490 (gdb) detach
2491 (gdb) attach 4712
2492 Cannot access memory at address 0xdeadbeef
2493 */
2494
2495 /* In some OSs, the shared library list is the same/global/shared
2496 across inferiors. If code is shared between processes, so are
2497 memory regions and features. */
2498 if (!gdbarch_has_global_solist (target_gdbarch ()))
2499 {
2500 no_shared_libraries (NULL, from_tty);
2501
2502 invalidate_target_mem_regions ();
2503
2504 target_clear_description ();
2505 }
2506
2507 /* attach_flag may be set if the previous process associated with
2508 the inferior was attached to. */
2509 current_inferior ()->attach_flag = 0;
2510
2511 current_inferior ()->highest_thread_num = 0;
2512
2513 agent_capability_invalidate ();
2514 }
2515
2516 /* This is to be called by the open routine before it does
2517 anything. */
2518
2519 void
2520 target_preopen (int from_tty)
2521 {
2522 dont_repeat ();
2523
2524 if (current_inferior ()->pid != 0)
2525 {
2526 if (!from_tty
2527 || !target_has_execution ()
2528 || query (_("A program is being debugged already. Kill it? ")))
2529 {
2530 /* Core inferiors actually should be detached, not
2531 killed. */
2532 if (target_has_execution ())
2533 target_kill ();
2534 else
2535 target_detach (current_inferior (), 0);
2536 }
2537 else
2538 error (_("Program not killed."));
2539 }
2540
2541 /* Calling target_kill may remove the target from the stack. But if
2542 it doesn't (which seems like a win for UDI), remove it now. */
2543 /* Leave the exec target, though. The user may be switching from a
2544 live process to a core of the same program. */
2545 pop_all_targets_above (file_stratum);
2546
2547 target_pre_inferior (from_tty);
2548 }
2549
2550 /* See target.h. */
2551
2552 void
2553 target_detach (inferior *inf, int from_tty)
2554 {
2555 /* After we have detached, we will clear the register cache for this inferior
2556 by calling registers_changed_ptid. We must save the pid_ptid before
2557 detaching, as the target detach method will clear inf->pid. */
2558 ptid_t save_pid_ptid = ptid_t (inf->pid);
2559
2560 /* As long as some to_detach implementations rely on the current_inferior
2561 (either directly, or indirectly, like through target_gdbarch or by
2562 reading memory), INF needs to be the current inferior. When that
2563 requirement will become no longer true, then we can remove this
2564 assertion. */
2565 gdb_assert (inf == current_inferior ());
2566
2567 prepare_for_detach ();
2568
2569 /* Hold a strong reference because detaching may unpush the
2570 target. */
2571 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2572
2573 current_inferior ()->top_target ()->detach (inf, from_tty);
2574
2575 process_stratum_target *proc_target
2576 = as_process_stratum_target (proc_target_ref.get ());
2577
2578 registers_changed_ptid (proc_target, save_pid_ptid);
2579
2580 /* We have to ensure we have no frame cache left. Normally,
2581 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2582 inferior_ptid matches save_pid_ptid, but in our case, it does not
2583 call it, as inferior_ptid has been reset. */
2584 reinit_frame_cache ();
2585 }
2586
2587 void
2588 target_disconnect (const char *args, int from_tty)
2589 {
2590 /* If we're in breakpoints-always-inserted mode or if breakpoints
2591 are global across processes, we have to remove them before
2592 disconnecting. */
2593 remove_breakpoints ();
2594
2595 current_inferior ()->top_target ()->disconnect (args, from_tty);
2596 }
2597
2598 /* See target/target.h. */
2599
2600 ptid_t
2601 target_wait (ptid_t ptid, struct target_waitstatus *status,
2602 target_wait_flags options)
2603 {
2604 target_ops *target = current_inferior ()->top_target ();
2605 process_stratum_target *proc_target = current_inferior ()->process_target ();
2606
2607 gdb_assert (!proc_target->commit_resumed_state);
2608
2609 if (!target_can_async_p (target))
2610 gdb_assert ((options & TARGET_WNOHANG) == 0);
2611
2612 return target->wait (ptid, status, options);
2613 }
2614
2615 /* See target.h. */
2616
2617 ptid_t
2618 default_target_wait (struct target_ops *ops,
2619 ptid_t ptid, struct target_waitstatus *status,
2620 target_wait_flags options)
2621 {
2622 status->set_ignore ();
2623 return minus_one_ptid;
2624 }
2625
2626 std::string
2627 target_pid_to_str (ptid_t ptid)
2628 {
2629 return current_inferior ()->top_target ()->pid_to_str (ptid);
2630 }
2631
2632 const char *
2633 target_thread_name (struct thread_info *info)
2634 {
2635 gdb_assert (info->inf == current_inferior ());
2636
2637 return current_inferior ()->top_target ()->thread_name (info);
2638 }
2639
2640 struct thread_info *
2641 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2642 int handle_len,
2643 struct inferior *inf)
2644 {
2645 target_ops *target = current_inferior ()->top_target ();
2646
2647 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2648 }
2649
2650 /* See target.h. */
2651
2652 gdb::byte_vector
2653 target_thread_info_to_thread_handle (struct thread_info *tip)
2654 {
2655 target_ops *target = current_inferior ()->top_target ();
2656
2657 return target->thread_info_to_thread_handle (tip);
2658 }
2659
2660 void
2661 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2662 {
2663 process_stratum_target *curr_target = current_inferior ()->process_target ();
2664 gdb_assert (!curr_target->commit_resumed_state);
2665
2666 target_dcache_invalidate ();
2667
2668 current_inferior ()->top_target ()->resume (ptid, step, signal);
2669
2670 registers_changed_ptid (curr_target, ptid);
2671 /* We only set the internal executing state here. The user/frontend
2672 running state is set at a higher level. This also clears the
2673 thread's stop_pc as side effect. */
2674 set_executing (curr_target, ptid, true);
2675 clear_inline_frame_state (curr_target, ptid);
2676 }
2677
2678 /* See target.h. */
2679
2680 void
2681 target_commit_resumed ()
2682 {
2683 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2684 current_inferior ()->top_target ()->commit_resumed ();
2685 }
2686
2687 /* See target.h. */
2688
2689 bool
2690 target_has_pending_events ()
2691 {
2692 return current_inferior ()->top_target ()->has_pending_events ();
2693 }
2694
2695 void
2696 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2697 {
2698 current_inferior ()->top_target ()->pass_signals (pass_signals);
2699 }
2700
2701 void
2702 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2703 {
2704 current_inferior ()->top_target ()->program_signals (program_signals);
2705 }
2706
2707 static void
2708 default_follow_fork (struct target_ops *self, inferior *child_inf,
2709 ptid_t child_ptid, target_waitkind fork_kind,
2710 bool follow_child, bool detach_fork)
2711 {
2712 /* Some target returned a fork event, but did not know how to follow it. */
2713 internal_error (__FILE__, __LINE__,
2714 _("could not find a target to follow fork"));
2715 }
2716
2717 /* See target.h. */
2718
2719 void
2720 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2721 target_waitkind fork_kind, bool follow_child,
2722 bool detach_fork)
2723 {
2724 target_ops *target = current_inferior ()->top_target ();
2725
2726 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2727 DETACH_FORK. */
2728 if (child_inf != nullptr)
2729 {
2730 gdb_assert (follow_child || !detach_fork);
2731 gdb_assert (child_inf->pid == child_ptid.pid ());
2732 }
2733 else
2734 gdb_assert (!follow_child && detach_fork);
2735
2736 return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2737 detach_fork);
2738 }
2739
2740 /* See target.h. */
2741
2742 void
2743 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2744 const char *execd_pathname)
2745 {
2746 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2747 execd_pathname);
2748 }
2749
2750 static void
2751 default_mourn_inferior (struct target_ops *self)
2752 {
2753 internal_error (__FILE__, __LINE__,
2754 _("could not find a target to follow mourn inferior"));
2755 }
2756
2757 void
2758 target_mourn_inferior (ptid_t ptid)
2759 {
2760 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2761 current_inferior ()->top_target ()->mourn_inferior ();
2762
2763 /* We no longer need to keep handles on any of the object files.
2764 Make sure to release them to avoid unnecessarily locking any
2765 of them while we're not actually debugging. */
2766 bfd_cache_close_all ();
2767 }
2768
2769 /* Look for a target which can describe architectural features, starting
2770 from TARGET. If we find one, return its description. */
2771
2772 const struct target_desc *
2773 target_read_description (struct target_ops *target)
2774 {
2775 return target->read_description ();
2776 }
2777
2778
2779 /* Default implementation of memory-searching. */
2780
2781 static int
2782 default_search_memory (struct target_ops *self,
2783 CORE_ADDR start_addr, ULONGEST search_space_len,
2784 const gdb_byte *pattern, ULONGEST pattern_len,
2785 CORE_ADDR *found_addrp)
2786 {
2787 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2788 {
2789 return target_read (current_inferior ()->top_target (),
2790 TARGET_OBJECT_MEMORY, NULL,
2791 result, addr, len) == len;
2792 };
2793
2794 /* Start over from the top of the target stack. */
2795 return simple_search_memory (read_memory, start_addr, search_space_len,
2796 pattern, pattern_len, found_addrp);
2797 }
2798
2799 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2800 sequence of bytes in PATTERN with length PATTERN_LEN.
2801
2802 The result is 1 if found, 0 if not found, and -1 if there was an error
2803 requiring halting of the search (e.g. memory read error).
2804 If the pattern is found the address is recorded in FOUND_ADDRP. */
2805
2806 int
2807 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2808 const gdb_byte *pattern, ULONGEST pattern_len,
2809 CORE_ADDR *found_addrp)
2810 {
2811 target_ops *target = current_inferior ()->top_target ();
2812
2813 return target->search_memory (start_addr, search_space_len, pattern,
2814 pattern_len, found_addrp);
2815 }
2816
2817 /* Look through the currently pushed targets. If none of them will
2818 be able to restart the currently running process, issue an error
2819 message. */
2820
2821 void
2822 target_require_runnable (void)
2823 {
2824 for (target_ops *t = current_inferior ()->top_target ();
2825 t != NULL;
2826 t = t->beneath ())
2827 {
2828 /* If this target knows how to create a new program, then
2829 assume we will still be able to after killing the current
2830 one. Either killing and mourning will not pop T, or else
2831 find_default_run_target will find it again. */
2832 if (t->can_create_inferior ())
2833 return;
2834
2835 /* Do not worry about targets at certain strata that can not
2836 create inferiors. Assume they will be pushed again if
2837 necessary, and continue to the process_stratum. */
2838 if (t->stratum () > process_stratum)
2839 continue;
2840
2841 error (_("The \"%s\" target does not support \"run\". "
2842 "Try \"help target\" or \"continue\"."),
2843 t->shortname ());
2844 }
2845
2846 /* This function is only called if the target is running. In that
2847 case there should have been a process_stratum target and it
2848 should either know how to create inferiors, or not... */
2849 internal_error (__FILE__, __LINE__, _("No targets found"));
2850 }
2851
2852 /* Whether GDB is allowed to fall back to the default run target for
2853 "run", "attach", etc. when no target is connected yet. */
2854 static bool auto_connect_native_target = true;
2855
2856 static void
2857 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2858 struct cmd_list_element *c, const char *value)
2859 {
2860 fprintf_filtered (file,
2861 _("Whether GDB may automatically connect to the "
2862 "native target is %s.\n"),
2863 value);
2864 }
2865
2866 /* A pointer to the target that can respond to "run" or "attach".
2867 Native targets are always singletons and instantiated early at GDB
2868 startup. */
2869 static target_ops *the_native_target;
2870
2871 /* See target.h. */
2872
2873 void
2874 set_native_target (target_ops *target)
2875 {
2876 if (the_native_target != NULL)
2877 internal_error (__FILE__, __LINE__,
2878 _("native target already set (\"%s\")."),
2879 the_native_target->longname ());
2880
2881 the_native_target = target;
2882 }
2883
2884 /* See target.h. */
2885
2886 target_ops *
2887 get_native_target ()
2888 {
2889 return the_native_target;
2890 }
2891
2892 /* Look through the list of possible targets for a target that can
2893 execute a run or attach command without any other data. This is
2894 used to locate the default process stratum.
2895
2896 If DO_MESG is not NULL, the result is always valid (error() is
2897 called for errors); else, return NULL on error. */
2898
2899 static struct target_ops *
2900 find_default_run_target (const char *do_mesg)
2901 {
2902 if (auto_connect_native_target && the_native_target != NULL)
2903 return the_native_target;
2904
2905 if (do_mesg != NULL)
2906 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2907 return NULL;
2908 }
2909
2910 /* See target.h. */
2911
2912 struct target_ops *
2913 find_attach_target (void)
2914 {
2915 /* If a target on the current stack can attach, use it. */
2916 for (target_ops *t = current_inferior ()->top_target ();
2917 t != NULL;
2918 t = t->beneath ())
2919 {
2920 if (t->can_attach ())
2921 return t;
2922 }
2923
2924 /* Otherwise, use the default run target for attaching. */
2925 return find_default_run_target ("attach");
2926 }
2927
2928 /* See target.h. */
2929
2930 struct target_ops *
2931 find_run_target (void)
2932 {
2933 /* If a target on the current stack can run, use it. */
2934 for (target_ops *t = current_inferior ()->top_target ();
2935 t != NULL;
2936 t = t->beneath ())
2937 {
2938 if (t->can_create_inferior ())
2939 return t;
2940 }
2941
2942 /* Otherwise, use the default run target. */
2943 return find_default_run_target ("run");
2944 }
2945
2946 bool
2947 target_ops::info_proc (const char *args, enum info_proc_what what)
2948 {
2949 return false;
2950 }
2951
2952 /* Implement the "info proc" command. */
2953
2954 int
2955 target_info_proc (const char *args, enum info_proc_what what)
2956 {
2957 struct target_ops *t;
2958
2959 /* If we're already connected to something that can get us OS
2960 related data, use it. Otherwise, try using the native
2961 target. */
2962 t = find_target_at (process_stratum);
2963 if (t == NULL)
2964 t = find_default_run_target (NULL);
2965
2966 for (; t != NULL; t = t->beneath ())
2967 {
2968 if (t->info_proc (args, what))
2969 {
2970 if (targetdebug)
2971 fprintf_unfiltered (gdb_stdlog,
2972 "target_info_proc (\"%s\", %d)\n", args, what);
2973
2974 return 1;
2975 }
2976 }
2977
2978 return 0;
2979 }
2980
2981 static int
2982 find_default_supports_disable_randomization (struct target_ops *self)
2983 {
2984 struct target_ops *t;
2985
2986 t = find_default_run_target (NULL);
2987 if (t != NULL)
2988 return t->supports_disable_randomization ();
2989 return 0;
2990 }
2991
2992 int
2993 target_supports_disable_randomization (void)
2994 {
2995 return current_inferior ()->top_target ()->supports_disable_randomization ();
2996 }
2997
2998 /* See target/target.h. */
2999
3000 int
3001 target_supports_multi_process (void)
3002 {
3003 return current_inferior ()->top_target ()->supports_multi_process ();
3004 }
3005
3006 /* See target.h. */
3007
3008 gdb::optional<gdb::char_vector>
3009 target_get_osdata (const char *type)
3010 {
3011 struct target_ops *t;
3012
3013 /* If we're already connected to something that can get us OS
3014 related data, use it. Otherwise, try using the native
3015 target. */
3016 t = find_target_at (process_stratum);
3017 if (t == NULL)
3018 t = find_default_run_target ("get OS data");
3019
3020 if (!t)
3021 return {};
3022
3023 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3024 }
3025
3026 /* Determine the current address space of thread PTID. */
3027
3028 struct address_space *
3029 target_thread_address_space (ptid_t ptid)
3030 {
3031 struct address_space *aspace;
3032
3033 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3034 gdb_assert (aspace != NULL);
3035
3036 return aspace;
3037 }
3038
3039 /* See target.h. */
3040
3041 target_ops *
3042 target_ops::beneath () const
3043 {
3044 return current_inferior ()->find_target_beneath (this);
3045 }
3046
3047 void
3048 target_ops::close ()
3049 {
3050 }
3051
3052 bool
3053 target_ops::can_attach ()
3054 {
3055 return 0;
3056 }
3057
3058 void
3059 target_ops::attach (const char *, int)
3060 {
3061 gdb_assert_not_reached ("target_ops::attach called");
3062 }
3063
3064 bool
3065 target_ops::can_create_inferior ()
3066 {
3067 return 0;
3068 }
3069
3070 void
3071 target_ops::create_inferior (const char *, const std::string &,
3072 char **, int)
3073 {
3074 gdb_assert_not_reached ("target_ops::create_inferior called");
3075 }
3076
3077 bool
3078 target_ops::can_run ()
3079 {
3080 return false;
3081 }
3082
3083 int
3084 target_can_run ()
3085 {
3086 for (target_ops *t = current_inferior ()->top_target ();
3087 t != NULL;
3088 t = t->beneath ())
3089 {
3090 if (t->can_run ())
3091 return 1;
3092 }
3093
3094 return 0;
3095 }
3096
3097 /* Target file operations. */
3098
3099 static struct target_ops *
3100 default_fileio_target (void)
3101 {
3102 struct target_ops *t;
3103
3104 /* If we're already connected to something that can perform
3105 file I/O, use it. Otherwise, try using the native target. */
3106 t = find_target_at (process_stratum);
3107 if (t != NULL)
3108 return t;
3109 return find_default_run_target ("file I/O");
3110 }
3111
3112 /* File handle for target file operations. */
3113
3114 struct fileio_fh_t
3115 {
3116 /* The target on which this file is open. NULL if the target is
3117 meanwhile closed while the handle is open. */
3118 target_ops *target;
3119
3120 /* The file descriptor on the target. */
3121 int target_fd;
3122
3123 /* Check whether this fileio_fh_t represents a closed file. */
3124 bool is_closed ()
3125 {
3126 return target_fd < 0;
3127 }
3128 };
3129
3130 /* Vector of currently open file handles. The value returned by
3131 target_fileio_open and passed as the FD argument to other
3132 target_fileio_* functions is an index into this vector. This
3133 vector's entries are never freed; instead, files are marked as
3134 closed, and the handle becomes available for reuse. */
3135 static std::vector<fileio_fh_t> fileio_fhandles;
3136
3137 /* Index into fileio_fhandles of the lowest handle that might be
3138 closed. This permits handle reuse without searching the whole
3139 list each time a new file is opened. */
3140 static int lowest_closed_fd;
3141
3142 /* See target.h. */
3143
3144 void
3145 fileio_handles_invalidate_target (target_ops *targ)
3146 {
3147 for (fileio_fh_t &fh : fileio_fhandles)
3148 if (fh.target == targ)
3149 fh.target = NULL;
3150 }
3151
3152 /* Acquire a target fileio file descriptor. */
3153
3154 static int
3155 acquire_fileio_fd (target_ops *target, int target_fd)
3156 {
3157 /* Search for closed handles to reuse. */
3158 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3159 {
3160 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3161
3162 if (fh.is_closed ())
3163 break;
3164 }
3165
3166 /* Push a new handle if no closed handles were found. */
3167 if (lowest_closed_fd == fileio_fhandles.size ())
3168 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3169 else
3170 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3171
3172 /* Should no longer be marked closed. */
3173 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3174
3175 /* Return its index, and start the next lookup at
3176 the next index. */
3177 return lowest_closed_fd++;
3178 }
3179
3180 /* Release a target fileio file descriptor. */
3181
3182 static void
3183 release_fileio_fd (int fd, fileio_fh_t *fh)
3184 {
3185 fh->target_fd = -1;
3186 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3187 }
3188
3189 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3190
3191 static fileio_fh_t *
3192 fileio_fd_to_fh (int fd)
3193 {
3194 return &fileio_fhandles[fd];
3195 }
3196
3197
3198 /* Default implementations of file i/o methods. We don't want these
3199 to delegate automatically, because we need to know which target
3200 supported the method, in order to call it directly from within
3201 pread/pwrite, etc. */
3202
3203 int
3204 target_ops::fileio_open (struct inferior *inf, const char *filename,
3205 int flags, int mode, int warn_if_slow,
3206 int *target_errno)
3207 {
3208 *target_errno = FILEIO_ENOSYS;
3209 return -1;
3210 }
3211
3212 int
3213 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3214 ULONGEST offset, int *target_errno)
3215 {
3216 *target_errno = FILEIO_ENOSYS;
3217 return -1;
3218 }
3219
3220 int
3221 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3222 ULONGEST offset, int *target_errno)
3223 {
3224 *target_errno = FILEIO_ENOSYS;
3225 return -1;
3226 }
3227
3228 int
3229 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3230 {
3231 *target_errno = FILEIO_ENOSYS;
3232 return -1;
3233 }
3234
3235 int
3236 target_ops::fileio_close (int fd, int *target_errno)
3237 {
3238 *target_errno = FILEIO_ENOSYS;
3239 return -1;
3240 }
3241
3242 int
3243 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3244 int *target_errno)
3245 {
3246 *target_errno = FILEIO_ENOSYS;
3247 return -1;
3248 }
3249
3250 gdb::optional<std::string>
3251 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3252 int *target_errno)
3253 {
3254 *target_errno = FILEIO_ENOSYS;
3255 return {};
3256 }
3257
3258 /* See target.h. */
3259
3260 int
3261 target_fileio_open (struct inferior *inf, const char *filename,
3262 int flags, int mode, bool warn_if_slow, int *target_errno)
3263 {
3264 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3265 {
3266 int fd = t->fileio_open (inf, filename, flags, mode,
3267 warn_if_slow, target_errno);
3268
3269 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3270 continue;
3271
3272 if (fd < 0)
3273 fd = -1;
3274 else
3275 fd = acquire_fileio_fd (t, fd);
3276
3277 if (targetdebug)
3278 fprintf_unfiltered (gdb_stdlog,
3279 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3280 " = %d (%d)\n",
3281 inf == NULL ? 0 : inf->num,
3282 filename, flags, mode,
3283 warn_if_slow, fd,
3284 fd != -1 ? 0 : *target_errno);
3285 return fd;
3286 }
3287
3288 *target_errno = FILEIO_ENOSYS;
3289 return -1;
3290 }
3291
3292 /* See target.h. */
3293
3294 int
3295 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3296 ULONGEST offset, int *target_errno)
3297 {
3298 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3299 int ret = -1;
3300
3301 if (fh->is_closed ())
3302 *target_errno = EBADF;
3303 else if (fh->target == NULL)
3304 *target_errno = EIO;
3305 else
3306 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3307 len, offset, target_errno);
3308
3309 if (targetdebug)
3310 fprintf_unfiltered (gdb_stdlog,
3311 "target_fileio_pwrite (%d,...,%d,%s) "
3312 "= %d (%d)\n",
3313 fd, len, pulongest (offset),
3314 ret, ret != -1 ? 0 : *target_errno);
3315 return ret;
3316 }
3317
3318 /* See target.h. */
3319
3320 int
3321 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3322 ULONGEST offset, int *target_errno)
3323 {
3324 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3325 int ret = -1;
3326
3327 if (fh->is_closed ())
3328 *target_errno = EBADF;
3329 else if (fh->target == NULL)
3330 *target_errno = EIO;
3331 else
3332 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3333 len, offset, target_errno);
3334
3335 if (targetdebug)
3336 fprintf_unfiltered (gdb_stdlog,
3337 "target_fileio_pread (%d,...,%d,%s) "
3338 "= %d (%d)\n",
3339 fd, len, pulongest (offset),
3340 ret, ret != -1 ? 0 : *target_errno);
3341 return ret;
3342 }
3343
3344 /* See target.h. */
3345
3346 int
3347 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3348 {
3349 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3350 int ret = -1;
3351
3352 if (fh->is_closed ())
3353 *target_errno = EBADF;
3354 else if (fh->target == NULL)
3355 *target_errno = EIO;
3356 else
3357 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3358
3359 if (targetdebug)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "target_fileio_fstat (%d) = %d (%d)\n",
3362 fd, ret, ret != -1 ? 0 : *target_errno);
3363 return ret;
3364 }
3365
3366 /* See target.h. */
3367
3368 int
3369 target_fileio_close (int fd, int *target_errno)
3370 {
3371 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3372 int ret = -1;
3373
3374 if (fh->is_closed ())
3375 *target_errno = EBADF;
3376 else
3377 {
3378 if (fh->target != NULL)
3379 ret = fh->target->fileio_close (fh->target_fd,
3380 target_errno);
3381 else
3382 ret = 0;
3383 release_fileio_fd (fd, fh);
3384 }
3385
3386 if (targetdebug)
3387 fprintf_unfiltered (gdb_stdlog,
3388 "target_fileio_close (%d) = %d (%d)\n",
3389 fd, ret, ret != -1 ? 0 : *target_errno);
3390 return ret;
3391 }
3392
3393 /* See target.h. */
3394
3395 int
3396 target_fileio_unlink (struct inferior *inf, const char *filename,
3397 int *target_errno)
3398 {
3399 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3400 {
3401 int ret = t->fileio_unlink (inf, filename, target_errno);
3402
3403 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3404 continue;
3405
3406 if (targetdebug)
3407 fprintf_unfiltered (gdb_stdlog,
3408 "target_fileio_unlink (%d,%s)"
3409 " = %d (%d)\n",
3410 inf == NULL ? 0 : inf->num, filename,
3411 ret, ret != -1 ? 0 : *target_errno);
3412 return ret;
3413 }
3414
3415 *target_errno = FILEIO_ENOSYS;
3416 return -1;
3417 }
3418
3419 /* See target.h. */
3420
3421 gdb::optional<std::string>
3422 target_fileio_readlink (struct inferior *inf, const char *filename,
3423 int *target_errno)
3424 {
3425 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3426 {
3427 gdb::optional<std::string> ret
3428 = t->fileio_readlink (inf, filename, target_errno);
3429
3430 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3431 continue;
3432
3433 if (targetdebug)
3434 fprintf_unfiltered (gdb_stdlog,
3435 "target_fileio_readlink (%d,%s)"
3436 " = %s (%d)\n",
3437 inf == NULL ? 0 : inf->num,
3438 filename, ret ? ret->c_str () : "(nil)",
3439 ret ? 0 : *target_errno);
3440 return ret;
3441 }
3442
3443 *target_errno = FILEIO_ENOSYS;
3444 return {};
3445 }
3446
3447 /* Like scoped_fd, but specific to target fileio. */
3448
3449 class scoped_target_fd
3450 {
3451 public:
3452 explicit scoped_target_fd (int fd) noexcept
3453 : m_fd (fd)
3454 {
3455 }
3456
3457 ~scoped_target_fd ()
3458 {
3459 if (m_fd >= 0)
3460 {
3461 int target_errno;
3462
3463 target_fileio_close (m_fd, &target_errno);
3464 }
3465 }
3466
3467 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3468
3469 int get () const noexcept
3470 {
3471 return m_fd;
3472 }
3473
3474 private:
3475 int m_fd;
3476 };
3477
3478 /* Read target file FILENAME, in the filesystem as seen by INF. If
3479 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3480 remote targets, the remote stub). Store the result in *BUF_P and
3481 return the size of the transferred data. PADDING additional bytes
3482 are available in *BUF_P. This is a helper function for
3483 target_fileio_read_alloc; see the declaration of that function for
3484 more information. */
3485
3486 static LONGEST
3487 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3488 gdb_byte **buf_p, int padding)
3489 {
3490 size_t buf_alloc, buf_pos;
3491 gdb_byte *buf;
3492 LONGEST n;
3493 int target_errno;
3494
3495 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3496 0700, false, &target_errno));
3497 if (fd.get () == -1)
3498 return -1;
3499
3500 /* Start by reading up to 4K at a time. The target will throttle
3501 this number down if necessary. */
3502 buf_alloc = 4096;
3503 buf = (gdb_byte *) xmalloc (buf_alloc);
3504 buf_pos = 0;
3505 while (1)
3506 {
3507 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3508 buf_alloc - buf_pos - padding, buf_pos,
3509 &target_errno);
3510 if (n < 0)
3511 {
3512 /* An error occurred. */
3513 xfree (buf);
3514 return -1;
3515 }
3516 else if (n == 0)
3517 {
3518 /* Read all there was. */
3519 if (buf_pos == 0)
3520 xfree (buf);
3521 else
3522 *buf_p = buf;
3523 return buf_pos;
3524 }
3525
3526 buf_pos += n;
3527
3528 /* If the buffer is filling up, expand it. */
3529 if (buf_alloc < buf_pos * 2)
3530 {
3531 buf_alloc *= 2;
3532 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3533 }
3534
3535 QUIT;
3536 }
3537 }
3538
3539 /* See target.h. */
3540
3541 LONGEST
3542 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3543 gdb_byte **buf_p)
3544 {
3545 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3546 }
3547
3548 /* See target.h. */
3549
3550 gdb::unique_xmalloc_ptr<char>
3551 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3552 {
3553 gdb_byte *buffer;
3554 char *bufstr;
3555 LONGEST i, transferred;
3556
3557 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3558 bufstr = (char *) buffer;
3559
3560 if (transferred < 0)
3561 return gdb::unique_xmalloc_ptr<char> (nullptr);
3562
3563 if (transferred == 0)
3564 return make_unique_xstrdup ("");
3565
3566 bufstr[transferred] = 0;
3567
3568 /* Check for embedded NUL bytes; but allow trailing NULs. */
3569 for (i = strlen (bufstr); i < transferred; i++)
3570 if (bufstr[i] != 0)
3571 {
3572 warning (_("target file %s "
3573 "contained unexpected null characters"),
3574 filename);
3575 break;
3576 }
3577
3578 return gdb::unique_xmalloc_ptr<char> (bufstr);
3579 }
3580
3581
3582 static int
3583 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3584 CORE_ADDR addr, int len)
3585 {
3586 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3587 }
3588
3589 static int
3590 default_watchpoint_addr_within_range (struct target_ops *target,
3591 CORE_ADDR addr,
3592 CORE_ADDR start, int length)
3593 {
3594 return addr >= start && addr < start + length;
3595 }
3596
3597 /* See target.h. */
3598
3599 target_ops *
3600 target_stack::find_beneath (const target_ops *t) const
3601 {
3602 /* Look for a non-empty slot at stratum levels beneath T's. */
3603 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3604 if (m_stack[stratum] != NULL)
3605 return m_stack[stratum];
3606
3607 return NULL;
3608 }
3609
3610 /* See target.h. */
3611
3612 struct target_ops *
3613 find_target_at (enum strata stratum)
3614 {
3615 return current_inferior ()->target_at (stratum);
3616 }
3617
3618 \f
3619
3620 /* See target.h */
3621
3622 void
3623 target_announce_detach (int from_tty)
3624 {
3625 pid_t pid;
3626 const char *exec_file;
3627
3628 if (!from_tty)
3629 return;
3630
3631 exec_file = get_exec_file (0);
3632 if (exec_file == NULL)
3633 exec_file = "";
3634
3635 pid = inferior_ptid.pid ();
3636 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3637 target_pid_to_str (ptid_t (pid)).c_str ());
3638 }
3639
3640 /* The inferior process has died. Long live the inferior! */
3641
3642 void
3643 generic_mourn_inferior (void)
3644 {
3645 inferior *inf = current_inferior ();
3646
3647 switch_to_no_thread ();
3648
3649 /* Mark breakpoints uninserted in case something tries to delete a
3650 breakpoint while we delete the inferior's threads (which would
3651 fail, since the inferior is long gone). */
3652 mark_breakpoints_out ();
3653
3654 if (inf->pid != 0)
3655 exit_inferior (inf);
3656
3657 /* Note this wipes step-resume breakpoints, so needs to be done
3658 after exit_inferior, which ends up referencing the step-resume
3659 breakpoints through clear_thread_inferior_resources. */
3660 breakpoint_init_inferior (inf_exited);
3661
3662 registers_changed ();
3663
3664 reopen_exec_file ();
3665 reinit_frame_cache ();
3666
3667 if (deprecated_detach_hook)
3668 deprecated_detach_hook ();
3669 }
3670 \f
3671 /* Convert a normal process ID to a string. Returns the string in a
3672 static buffer. */
3673
3674 std::string
3675 normal_pid_to_str (ptid_t ptid)
3676 {
3677 return string_printf ("process %d", ptid.pid ());
3678 }
3679
3680 static std::string
3681 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3682 {
3683 return normal_pid_to_str (ptid);
3684 }
3685
3686 /* Error-catcher for target_find_memory_regions. */
3687 static int
3688 dummy_find_memory_regions (struct target_ops *self,
3689 find_memory_region_ftype ignore1, void *ignore2)
3690 {
3691 error (_("Command not implemented for this target."));
3692 return 0;
3693 }
3694
3695 /* Error-catcher for target_make_corefile_notes. */
3696 static gdb::unique_xmalloc_ptr<char>
3697 dummy_make_corefile_notes (struct target_ops *self,
3698 bfd *ignore1, int *ignore2)
3699 {
3700 error (_("Command not implemented for this target."));
3701 return NULL;
3702 }
3703
3704 #include "target-delegates.c"
3705
3706 /* The initial current target, so that there is always a semi-valid
3707 current target. */
3708
3709 static dummy_target the_dummy_target;
3710
3711 /* See target.h. */
3712
3713 target_ops *
3714 get_dummy_target ()
3715 {
3716 return &the_dummy_target;
3717 }
3718
3719 static const target_info dummy_target_info = {
3720 "None",
3721 N_("None"),
3722 ""
3723 };
3724
3725 strata
3726 dummy_target::stratum () const
3727 {
3728 return dummy_stratum;
3729 }
3730
3731 strata
3732 debug_target::stratum () const
3733 {
3734 return debug_stratum;
3735 }
3736
3737 const target_info &
3738 dummy_target::info () const
3739 {
3740 return dummy_target_info;
3741 }
3742
3743 const target_info &
3744 debug_target::info () const
3745 {
3746 return beneath ()->info ();
3747 }
3748
3749 \f
3750
3751 void
3752 target_close (struct target_ops *targ)
3753 {
3754 for (inferior *inf : all_inferiors ())
3755 gdb_assert (!inf->target_is_pushed (targ));
3756
3757 fileio_handles_invalidate_target (targ);
3758
3759 targ->close ();
3760
3761 if (targetdebug)
3762 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3763 }
3764
3765 int
3766 target_thread_alive (ptid_t ptid)
3767 {
3768 return current_inferior ()->top_target ()->thread_alive (ptid);
3769 }
3770
3771 void
3772 target_update_thread_list (void)
3773 {
3774 current_inferior ()->top_target ()->update_thread_list ();
3775 }
3776
3777 void
3778 target_stop (ptid_t ptid)
3779 {
3780 process_stratum_target *proc_target = current_inferior ()->process_target ();
3781
3782 gdb_assert (!proc_target->commit_resumed_state);
3783
3784 if (!may_stop)
3785 {
3786 warning (_("May not interrupt or stop the target, ignoring attempt"));
3787 return;
3788 }
3789
3790 current_inferior ()->top_target ()->stop (ptid);
3791 }
3792
3793 void
3794 target_interrupt ()
3795 {
3796 if (!may_stop)
3797 {
3798 warning (_("May not interrupt or stop the target, ignoring attempt"));
3799 return;
3800 }
3801
3802 current_inferior ()->top_target ()->interrupt ();
3803 }
3804
3805 /* See target.h. */
3806
3807 void
3808 target_pass_ctrlc (void)
3809 {
3810 /* Pass the Ctrl-C to the first target that has a thread
3811 running. */
3812 for (inferior *inf : all_inferiors ())
3813 {
3814 target_ops *proc_target = inf->process_target ();
3815 if (proc_target == NULL)
3816 continue;
3817
3818 for (thread_info *thr : inf->non_exited_threads ())
3819 {
3820 /* A thread can be THREAD_STOPPED and executing, while
3821 running an infcall. */
3822 if (thr->state == THREAD_RUNNING || thr->executing ())
3823 {
3824 /* We can get here quite deep in target layers. Avoid
3825 switching thread context or anything that would
3826 communicate with the target (e.g., to fetch
3827 registers), or flushing e.g., the frame cache. We
3828 just switch inferior in order to be able to call
3829 through the target_stack. */
3830 scoped_restore_current_inferior restore_inferior;
3831 set_current_inferior (inf);
3832 current_inferior ()->top_target ()->pass_ctrlc ();
3833 return;
3834 }
3835 }
3836 }
3837 }
3838
3839 /* See target.h. */
3840
3841 void
3842 default_target_pass_ctrlc (struct target_ops *ops)
3843 {
3844 target_interrupt ();
3845 }
3846
3847 /* See target/target.h. */
3848
3849 void
3850 target_stop_and_wait (ptid_t ptid)
3851 {
3852 struct target_waitstatus status;
3853 bool was_non_stop = non_stop;
3854
3855 non_stop = true;
3856 target_stop (ptid);
3857
3858 target_wait (ptid, &status, 0);
3859
3860 non_stop = was_non_stop;
3861 }
3862
3863 /* See target/target.h. */
3864
3865 void
3866 target_continue_no_signal (ptid_t ptid)
3867 {
3868 target_resume (ptid, 0, GDB_SIGNAL_0);
3869 }
3870
3871 /* See target/target.h. */
3872
3873 void
3874 target_continue (ptid_t ptid, enum gdb_signal signal)
3875 {
3876 target_resume (ptid, 0, signal);
3877 }
3878
3879 /* Concatenate ELEM to LIST, a comma-separated list. */
3880
3881 static void
3882 str_comma_list_concat_elem (std::string *list, const char *elem)
3883 {
3884 if (!list->empty ())
3885 list->append (", ");
3886
3887 list->append (elem);
3888 }
3889
3890 /* Helper for target_options_to_string. If OPT is present in
3891 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3892 OPT is removed from TARGET_OPTIONS. */
3893
3894 static void
3895 do_option (target_wait_flags *target_options, std::string *ret,
3896 target_wait_flag opt, const char *opt_str)
3897 {
3898 if ((*target_options & opt) != 0)
3899 {
3900 str_comma_list_concat_elem (ret, opt_str);
3901 *target_options &= ~opt;
3902 }
3903 }
3904
3905 /* See target.h. */
3906
3907 std::string
3908 target_options_to_string (target_wait_flags target_options)
3909 {
3910 std::string ret;
3911
3912 #define DO_TARG_OPTION(OPT) \
3913 do_option (&target_options, &ret, OPT, #OPT)
3914
3915 DO_TARG_OPTION (TARGET_WNOHANG);
3916
3917 if (target_options != 0)
3918 str_comma_list_concat_elem (&ret, "unknown???");
3919
3920 return ret;
3921 }
3922
3923 void
3924 target_fetch_registers (struct regcache *regcache, int regno)
3925 {
3926 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3927 if (targetdebug)
3928 regcache->debug_print_register ("target_fetch_registers", regno);
3929 }
3930
3931 void
3932 target_store_registers (struct regcache *regcache, int regno)
3933 {
3934 if (!may_write_registers)
3935 error (_("Writing to registers is not allowed (regno %d)"), regno);
3936
3937 current_inferior ()->top_target ()->store_registers (regcache, regno);
3938 if (targetdebug)
3939 {
3940 regcache->debug_print_register ("target_store_registers", regno);
3941 }
3942 }
3943
3944 int
3945 target_core_of_thread (ptid_t ptid)
3946 {
3947 return current_inferior ()->top_target ()->core_of_thread (ptid);
3948 }
3949
3950 int
3951 simple_verify_memory (struct target_ops *ops,
3952 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3953 {
3954 LONGEST total_xfered = 0;
3955
3956 while (total_xfered < size)
3957 {
3958 ULONGEST xfered_len;
3959 enum target_xfer_status status;
3960 gdb_byte buf[1024];
3961 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3962
3963 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3964 buf, NULL, lma + total_xfered, howmuch,
3965 &xfered_len);
3966 if (status == TARGET_XFER_OK
3967 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3968 {
3969 total_xfered += xfered_len;
3970 QUIT;
3971 }
3972 else
3973 return 0;
3974 }
3975 return 1;
3976 }
3977
3978 /* Default implementation of memory verification. */
3979
3980 static int
3981 default_verify_memory (struct target_ops *self,
3982 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3983 {
3984 /* Start over from the top of the target stack. */
3985 return simple_verify_memory (current_inferior ()->top_target (),
3986 data, memaddr, size);
3987 }
3988
3989 int
3990 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3991 {
3992 target_ops *target = current_inferior ()->top_target ();
3993
3994 return target->verify_memory (data, memaddr, size);
3995 }
3996
3997 /* The documentation for this function is in its prototype declaration in
3998 target.h. */
3999
4000 int
4001 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4002 enum target_hw_bp_type rw)
4003 {
4004 target_ops *target = current_inferior ()->top_target ();
4005
4006 return target->insert_mask_watchpoint (addr, mask, rw);
4007 }
4008
4009 /* The documentation for this function is in its prototype declaration in
4010 target.h. */
4011
4012 int
4013 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4014 enum target_hw_bp_type rw)
4015 {
4016 target_ops *target = current_inferior ()->top_target ();
4017
4018 return target->remove_mask_watchpoint (addr, mask, rw);
4019 }
4020
4021 /* The documentation for this function is in its prototype declaration
4022 in target.h. */
4023
4024 int
4025 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4026 {
4027 target_ops *target = current_inferior ()->top_target ();
4028
4029 return target->masked_watch_num_registers (addr, mask);
4030 }
4031
4032 /* The documentation for this function is in its prototype declaration
4033 in target.h. */
4034
4035 int
4036 target_ranged_break_num_registers (void)
4037 {
4038 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4039 }
4040
4041 /* See target.h. */
4042
4043 struct btrace_target_info *
4044 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
4045 {
4046 return current_inferior ()->top_target ()->enable_btrace (ptid, conf);
4047 }
4048
4049 /* See target.h. */
4050
4051 void
4052 target_disable_btrace (struct btrace_target_info *btinfo)
4053 {
4054 current_inferior ()->top_target ()->disable_btrace (btinfo);
4055 }
4056
4057 /* See target.h. */
4058
4059 void
4060 target_teardown_btrace (struct btrace_target_info *btinfo)
4061 {
4062 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4063 }
4064
4065 /* See target.h. */
4066
4067 enum btrace_error
4068 target_read_btrace (struct btrace_data *btrace,
4069 struct btrace_target_info *btinfo,
4070 enum btrace_read_type type)
4071 {
4072 target_ops *target = current_inferior ()->top_target ();
4073
4074 return target->read_btrace (btrace, btinfo, type);
4075 }
4076
4077 /* See target.h. */
4078
4079 const struct btrace_config *
4080 target_btrace_conf (const struct btrace_target_info *btinfo)
4081 {
4082 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4083 }
4084
4085 /* See target.h. */
4086
4087 void
4088 target_stop_recording (void)
4089 {
4090 current_inferior ()->top_target ()->stop_recording ();
4091 }
4092
4093 /* See target.h. */
4094
4095 void
4096 target_save_record (const char *filename)
4097 {
4098 current_inferior ()->top_target ()->save_record (filename);
4099 }
4100
4101 /* See target.h. */
4102
4103 int
4104 target_supports_delete_record ()
4105 {
4106 return current_inferior ()->top_target ()->supports_delete_record ();
4107 }
4108
4109 /* See target.h. */
4110
4111 void
4112 target_delete_record (void)
4113 {
4114 current_inferior ()->top_target ()->delete_record ();
4115 }
4116
4117 /* See target.h. */
4118
4119 enum record_method
4120 target_record_method (ptid_t ptid)
4121 {
4122 return current_inferior ()->top_target ()->record_method (ptid);
4123 }
4124
4125 /* See target.h. */
4126
4127 int
4128 target_record_is_replaying (ptid_t ptid)
4129 {
4130 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4131 }
4132
4133 /* See target.h. */
4134
4135 int
4136 target_record_will_replay (ptid_t ptid, int dir)
4137 {
4138 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4139 }
4140
4141 /* See target.h. */
4142
4143 void
4144 target_record_stop_replaying (void)
4145 {
4146 current_inferior ()->top_target ()->record_stop_replaying ();
4147 }
4148
4149 /* See target.h. */
4150
4151 void
4152 target_goto_record_begin (void)
4153 {
4154 current_inferior ()->top_target ()->goto_record_begin ();
4155 }
4156
4157 /* See target.h. */
4158
4159 void
4160 target_goto_record_end (void)
4161 {
4162 current_inferior ()->top_target ()->goto_record_end ();
4163 }
4164
4165 /* See target.h. */
4166
4167 void
4168 target_goto_record (ULONGEST insn)
4169 {
4170 current_inferior ()->top_target ()->goto_record (insn);
4171 }
4172
4173 /* See target.h. */
4174
4175 void
4176 target_insn_history (int size, gdb_disassembly_flags flags)
4177 {
4178 current_inferior ()->top_target ()->insn_history (size, flags);
4179 }
4180
4181 /* See target.h. */
4182
4183 void
4184 target_insn_history_from (ULONGEST from, int size,
4185 gdb_disassembly_flags flags)
4186 {
4187 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4188 }
4189
4190 /* See target.h. */
4191
4192 void
4193 target_insn_history_range (ULONGEST begin, ULONGEST end,
4194 gdb_disassembly_flags flags)
4195 {
4196 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4197 }
4198
4199 /* See target.h. */
4200
4201 void
4202 target_call_history (int size, record_print_flags flags)
4203 {
4204 current_inferior ()->top_target ()->call_history (size, flags);
4205 }
4206
4207 /* See target.h. */
4208
4209 void
4210 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4211 {
4212 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4213 }
4214
4215 /* See target.h. */
4216
4217 void
4218 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4219 {
4220 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4221 }
4222
4223 /* See target.h. */
4224
4225 const struct frame_unwind *
4226 target_get_unwinder (void)
4227 {
4228 return current_inferior ()->top_target ()->get_unwinder ();
4229 }
4230
4231 /* See target.h. */
4232
4233 const struct frame_unwind *
4234 target_get_tailcall_unwinder (void)
4235 {
4236 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4237 }
4238
4239 /* See target.h. */
4240
4241 void
4242 target_prepare_to_generate_core (void)
4243 {
4244 current_inferior ()->top_target ()->prepare_to_generate_core ();
4245 }
4246
4247 /* See target.h. */
4248
4249 void
4250 target_done_generating_core (void)
4251 {
4252 current_inferior ()->top_target ()->done_generating_core ();
4253 }
4254
4255 \f
4256
4257 static char targ_desc[] =
4258 "Names of targets and files being debugged.\nShows the entire \
4259 stack of targets currently in use (including the exec-file,\n\
4260 core-file, and process, if any), as well as the symbol file name.";
4261
4262 static void
4263 default_rcmd (struct target_ops *self, const char *command,
4264 struct ui_file *output)
4265 {
4266 error (_("\"monitor\" command not supported by this target."));
4267 }
4268
4269 static void
4270 do_monitor_command (const char *cmd, int from_tty)
4271 {
4272 target_rcmd (cmd, gdb_stdtarg);
4273 }
4274
4275 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4276 ignored. */
4277
4278 void
4279 flash_erase_command (const char *cmd, int from_tty)
4280 {
4281 /* Used to communicate termination of flash operations to the target. */
4282 bool found_flash_region = false;
4283 struct gdbarch *gdbarch = target_gdbarch ();
4284
4285 std::vector<mem_region> mem_regions = target_memory_map ();
4286
4287 /* Iterate over all memory regions. */
4288 for (const mem_region &m : mem_regions)
4289 {
4290 /* Is this a flash memory region? */
4291 if (m.attrib.mode == MEM_FLASH)
4292 {
4293 found_flash_region = true;
4294 target_flash_erase (m.lo, m.hi - m.lo);
4295
4296 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4297
4298 current_uiout->message (_("Erasing flash memory region at address "));
4299 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4300 current_uiout->message (", size = ");
4301 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4302 current_uiout->message ("\n");
4303 }
4304 }
4305
4306 /* Did we do any flash operations? If so, we need to finalize them. */
4307 if (found_flash_region)
4308 target_flash_done ();
4309 else
4310 current_uiout->message (_("No flash memory regions found.\n"));
4311 }
4312
4313 /* Print the name of each layers of our target stack. */
4314
4315 static void
4316 maintenance_print_target_stack (const char *cmd, int from_tty)
4317 {
4318 printf_filtered (_("The current target stack is:\n"));
4319
4320 for (target_ops *t = current_inferior ()->top_target ();
4321 t != NULL;
4322 t = t->beneath ())
4323 {
4324 if (t->stratum () == debug_stratum)
4325 continue;
4326 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
4327 }
4328 }
4329
4330 /* See target.h. */
4331
4332 void
4333 target_async (int enable)
4334 {
4335 /* If we are trying to enable async mode then it must be the case that
4336 async mode is possible for this target. */
4337 gdb_assert (!enable || target_can_async_p ());
4338 infrun_async (enable);
4339 current_inferior ()->top_target ()->async (enable);
4340 }
4341
4342 /* See target.h. */
4343
4344 void
4345 target_thread_events (int enable)
4346 {
4347 current_inferior ()->top_target ()->thread_events (enable);
4348 }
4349
4350 /* Controls if targets can report that they can/are async. This is
4351 just for maintainers to use when debugging gdb. */
4352 bool target_async_permitted = true;
4353
4354 /* The set command writes to this variable. If the inferior is
4355 executing, target_async_permitted is *not* updated. */
4356 static bool target_async_permitted_1 = true;
4357
4358 static void
4359 maint_set_target_async_command (const char *args, int from_tty,
4360 struct cmd_list_element *c)
4361 {
4362 if (have_live_inferiors ())
4363 {
4364 target_async_permitted_1 = target_async_permitted;
4365 error (_("Cannot change this setting while the inferior is running."));
4366 }
4367
4368 target_async_permitted = target_async_permitted_1;
4369 }
4370
4371 static void
4372 maint_show_target_async_command (struct ui_file *file, int from_tty,
4373 struct cmd_list_element *c,
4374 const char *value)
4375 {
4376 fprintf_filtered (file,
4377 _("Controlling the inferior in "
4378 "asynchronous mode is %s.\n"), value);
4379 }
4380
4381 /* Return true if the target operates in non-stop mode even with "set
4382 non-stop off". */
4383
4384 static int
4385 target_always_non_stop_p (void)
4386 {
4387 return current_inferior ()->top_target ()->always_non_stop_p ();
4388 }
4389
4390 /* See target.h. */
4391
4392 bool
4393 target_is_non_stop_p ()
4394 {
4395 return ((non_stop
4396 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4397 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4398 && target_always_non_stop_p ()))
4399 && target_can_async_p ());
4400 }
4401
4402 /* See target.h. */
4403
4404 bool
4405 exists_non_stop_target ()
4406 {
4407 if (target_is_non_stop_p ())
4408 return true;
4409
4410 scoped_restore_current_thread restore_thread;
4411
4412 for (inferior *inf : all_inferiors ())
4413 {
4414 switch_to_inferior_no_thread (inf);
4415 if (target_is_non_stop_p ())
4416 return true;
4417 }
4418
4419 return false;
4420 }
4421
4422 /* Controls if targets can report that they always run in non-stop
4423 mode. This is just for maintainers to use when debugging gdb. */
4424 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4425
4426 /* The set command writes to this variable. If the inferior is
4427 executing, target_non_stop_enabled is *not* updated. */
4428 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
4429
4430 /* Implementation of "maint set target-non-stop". */
4431
4432 static void
4433 maint_set_target_non_stop_command (const char *args, int from_tty,
4434 struct cmd_list_element *c)
4435 {
4436 if (have_live_inferiors ())
4437 {
4438 target_non_stop_enabled_1 = target_non_stop_enabled;
4439 error (_("Cannot change this setting while the inferior is running."));
4440 }
4441
4442 target_non_stop_enabled = target_non_stop_enabled_1;
4443 }
4444
4445 /* Implementation of "maint show target-non-stop". */
4446
4447 static void
4448 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
4449 struct cmd_list_element *c,
4450 const char *value)
4451 {
4452 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4453 fprintf_filtered (file,
4454 _("Whether the target is always in non-stop mode "
4455 "is %s (currently %s).\n"), value,
4456 target_always_non_stop_p () ? "on" : "off");
4457 else
4458 fprintf_filtered (file,
4459 _("Whether the target is always in non-stop mode "
4460 "is %s.\n"), value);
4461 }
4462
4463 /* Temporary copies of permission settings. */
4464
4465 static bool may_write_registers_1 = true;
4466 static bool may_write_memory_1 = true;
4467 static bool may_insert_breakpoints_1 = true;
4468 static bool may_insert_tracepoints_1 = true;
4469 static bool may_insert_fast_tracepoints_1 = true;
4470 static bool may_stop_1 = true;
4471
4472 /* Make the user-set values match the real values again. */
4473
4474 void
4475 update_target_permissions (void)
4476 {
4477 may_write_registers_1 = may_write_registers;
4478 may_write_memory_1 = may_write_memory;
4479 may_insert_breakpoints_1 = may_insert_breakpoints;
4480 may_insert_tracepoints_1 = may_insert_tracepoints;
4481 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4482 may_stop_1 = may_stop;
4483 }
4484
4485 /* The one function handles (most of) the permission flags in the same
4486 way. */
4487
4488 static void
4489 set_target_permissions (const char *args, int from_tty,
4490 struct cmd_list_element *c)
4491 {
4492 if (target_has_execution ())
4493 {
4494 update_target_permissions ();
4495 error (_("Cannot change this setting while the inferior is running."));
4496 }
4497
4498 /* Make the real values match the user-changed values. */
4499 may_write_registers = may_write_registers_1;
4500 may_insert_breakpoints = may_insert_breakpoints_1;
4501 may_insert_tracepoints = may_insert_tracepoints_1;
4502 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4503 may_stop = may_stop_1;
4504 update_observer_mode ();
4505 }
4506
4507 /* Set memory write permission independently of observer mode. */
4508
4509 static void
4510 set_write_memory_permission (const char *args, int from_tty,
4511 struct cmd_list_element *c)
4512 {
4513 /* Make the real values match the user-changed values. */
4514 may_write_memory = may_write_memory_1;
4515 update_observer_mode ();
4516 }
4517
4518 void _initialize_target ();
4519
4520 void
4521 _initialize_target ()
4522 {
4523 the_debug_target = new debug_target ();
4524
4525 add_info ("target", info_target_command, targ_desc);
4526 add_info ("files", info_target_command, targ_desc);
4527
4528 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4529 Set target debugging."), _("\
4530 Show target debugging."), _("\
4531 When non-zero, target debugging is enabled. Higher numbers are more\n\
4532 verbose."),
4533 set_targetdebug,
4534 show_targetdebug,
4535 &setdebuglist, &showdebuglist);
4536
4537 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4538 &trust_readonly, _("\
4539 Set mode for reading from readonly sections."), _("\
4540 Show mode for reading from readonly sections."), _("\
4541 When this mode is on, memory reads from readonly sections (such as .text)\n\
4542 will be read from the object file instead of from the target. This will\n\
4543 result in significant performance improvement for remote targets."),
4544 NULL,
4545 show_trust_readonly,
4546 &setlist, &showlist);
4547
4548 add_com ("monitor", class_obscure, do_monitor_command,
4549 _("Send a command to the remote monitor (remote targets only)."));
4550
4551 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4552 _("Print the name of each layer of the internal target stack."),
4553 &maintenanceprintlist);
4554
4555 add_setshow_boolean_cmd ("target-async", no_class,
4556 &target_async_permitted_1, _("\
4557 Set whether gdb controls the inferior in asynchronous mode."), _("\
4558 Show whether gdb controls the inferior in asynchronous mode."), _("\
4559 Tells gdb whether to control the inferior in asynchronous mode."),
4560 maint_set_target_async_command,
4561 maint_show_target_async_command,
4562 &maintenance_set_cmdlist,
4563 &maintenance_show_cmdlist);
4564
4565 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4566 &target_non_stop_enabled_1, _("\
4567 Set whether gdb always controls the inferior in non-stop mode."), _("\
4568 Show whether gdb always controls the inferior in non-stop mode."), _("\
4569 Tells gdb whether to control the inferior in non-stop mode."),
4570 maint_set_target_non_stop_command,
4571 maint_show_target_non_stop_command,
4572 &maintenance_set_cmdlist,
4573 &maintenance_show_cmdlist);
4574
4575 add_setshow_boolean_cmd ("may-write-registers", class_support,
4576 &may_write_registers_1, _("\
4577 Set permission to write into registers."), _("\
4578 Show permission to write into registers."), _("\
4579 When this permission is on, GDB may write into the target's registers.\n\
4580 Otherwise, any sort of write attempt will result in an error."),
4581 set_target_permissions, NULL,
4582 &setlist, &showlist);
4583
4584 add_setshow_boolean_cmd ("may-write-memory", class_support,
4585 &may_write_memory_1, _("\
4586 Set permission to write into target memory."), _("\
4587 Show permission to write into target memory."), _("\
4588 When this permission is on, GDB may write into the target's memory.\n\
4589 Otherwise, any sort of write attempt will result in an error."),
4590 set_write_memory_permission, NULL,
4591 &setlist, &showlist);
4592
4593 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4594 &may_insert_breakpoints_1, _("\
4595 Set permission to insert breakpoints in the target."), _("\
4596 Show permission to insert breakpoints in the target."), _("\
4597 When this permission is on, GDB may insert breakpoints in the program.\n\
4598 Otherwise, any sort of insertion attempt will result in an error."),
4599 set_target_permissions, NULL,
4600 &setlist, &showlist);
4601
4602 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4603 &may_insert_tracepoints_1, _("\
4604 Set permission to insert tracepoints in the target."), _("\
4605 Show permission to insert tracepoints in the target."), _("\
4606 When this permission is on, GDB may insert tracepoints in the program.\n\
4607 Otherwise, any sort of insertion attempt will result in an error."),
4608 set_target_permissions, NULL,
4609 &setlist, &showlist);
4610
4611 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4612 &may_insert_fast_tracepoints_1, _("\
4613 Set permission to insert fast tracepoints in the target."), _("\
4614 Show permission to insert fast tracepoints in the target."), _("\
4615 When this permission is on, GDB may insert fast tracepoints.\n\
4616 Otherwise, any sort of insertion attempt will result in an error."),
4617 set_target_permissions, NULL,
4618 &setlist, &showlist);
4619
4620 add_setshow_boolean_cmd ("may-interrupt", class_support,
4621 &may_stop_1, _("\
4622 Set permission to interrupt or signal the target."), _("\
4623 Show permission to interrupt or signal the target."), _("\
4624 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4625 Otherwise, any attempt to interrupt or stop will be ignored."),
4626 set_target_permissions, NULL,
4627 &setlist, &showlist);
4628
4629 add_com ("flash-erase", no_class, flash_erase_command,
4630 _("Erase all flash memory regions."));
4631
4632 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4633 &auto_connect_native_target, _("\
4634 Set whether GDB may automatically connect to the native target."), _("\
4635 Show whether GDB may automatically connect to the native target."), _("\
4636 When on, and GDB is not connected to a target yet, GDB\n\
4637 attempts \"run\" and other commands with the native target."),
4638 NULL, show_auto_connect_native_target,
4639 &setlist, &showlist);
4640 }