gdb: generate the prefix name for prefix commands on demand
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2021 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "gdbsupport/agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "gdbsupport/byte-vector.h"
50 #include "gdbsupport/search.h"
51 #include "terminal.h"
52 #include <unordered_map>
53 #include "target-connection.h"
54 #include "valprint.h"
55
56 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
57
58 static void default_terminal_info (struct target_ops *, const char *, int);
59
60 static int default_watchpoint_addr_within_range (struct target_ops *,
61 CORE_ADDR, CORE_ADDR, int);
62
63 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
64 CORE_ADDR, int);
65
66 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
67
68 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
69 long lwp, long tid);
70
71 static void default_mourn_inferior (struct target_ops *self);
72
73 static int default_search_memory (struct target_ops *ops,
74 CORE_ADDR start_addr,
75 ULONGEST search_space_len,
76 const gdb_byte *pattern,
77 ULONGEST pattern_len,
78 CORE_ADDR *found_addrp);
79
80 static int default_verify_memory (struct target_ops *self,
81 const gdb_byte *data,
82 CORE_ADDR memaddr, ULONGEST size);
83
84 static void tcomplain (void) ATTRIBUTE_NORETURN;
85
86 static struct target_ops *find_default_run_target (const char *);
87
88 static int dummy_find_memory_regions (struct target_ops *self,
89 find_memory_region_ftype ignore1,
90 void *ignore2);
91
92 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
93 (struct target_ops *self, bfd *ignore1, int *ignore2);
94
95 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
96
97 static enum exec_direction_kind default_execution_direction
98 (struct target_ops *self);
99
100 /* Mapping between target_info objects (which have address identity)
101 and corresponding open/factory function/callback. Each add_target
102 call adds one entry to this map, and registers a "target
103 TARGET_NAME" command that when invoked calls the factory registered
104 here. The target_info object is associated with the command via
105 the command's context. */
106 static std::unordered_map<const target_info *, target_open_ftype *>
107 target_factories;
108
109 /* The singleton debug target. */
110
111 static struct target_ops *the_debug_target;
112
113 /* Command list for target. */
114
115 static struct cmd_list_element *targetlist = NULL;
116
117 /* True if we should trust readonly sections from the
118 executable when reading memory. */
119
120 static bool trust_readonly = false;
121
122 /* Nonzero if we should show true memory content including
123 memory breakpoint inserted by gdb. */
124
125 static int show_memory_breakpoints = 0;
126
127 /* These globals control whether GDB attempts to perform these
128 operations; they are useful for targets that need to prevent
129 inadvertent disruption, such as in non-stop mode. */
130
131 bool may_write_registers = true;
132
133 bool may_write_memory = true;
134
135 bool may_insert_breakpoints = true;
136
137 bool may_insert_tracepoints = true;
138
139 bool may_insert_fast_tracepoints = true;
140
141 bool may_stop = true;
142
143 /* Non-zero if we want to see trace of target level stuff. */
144
145 static unsigned int targetdebug = 0;
146
147 static void
148 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
149 {
150 if (targetdebug)
151 current_inferior ()->push_target (the_debug_target);
152 else
153 current_inferior ()->unpush_target (the_debug_target);
154 }
155
156 static void
157 show_targetdebug (struct ui_file *file, int from_tty,
158 struct cmd_list_element *c, const char *value)
159 {
160 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
161 }
162
163 int
164 target_has_memory ()
165 {
166 for (target_ops *t = current_inferior ()->top_target ();
167 t != NULL;
168 t = t->beneath ())
169 if (t->has_memory ())
170 return 1;
171
172 return 0;
173 }
174
175 int
176 target_has_stack ()
177 {
178 for (target_ops *t = current_inferior ()->top_target ();
179 t != NULL;
180 t = t->beneath ())
181 if (t->has_stack ())
182 return 1;
183
184 return 0;
185 }
186
187 int
188 target_has_registers ()
189 {
190 for (target_ops *t = current_inferior ()->top_target ();
191 t != NULL;
192 t = t->beneath ())
193 if (t->has_registers ())
194 return 1;
195
196 return 0;
197 }
198
199 bool
200 target_has_execution (inferior *inf)
201 {
202 if (inf == nullptr)
203 inf = current_inferior ();
204
205 for (target_ops *t = inf->top_target ();
206 t != nullptr;
207 t = inf->find_target_beneath (t))
208 if (t->has_execution (inf))
209 return true;
210
211 return false;
212 }
213
214 const char *
215 target_shortname ()
216 {
217 return current_inferior ()->top_target ()->shortname ();
218 }
219
220 /* See target.h. */
221
222 bool
223 target_attach_no_wait ()
224 {
225 return current_inferior ()->top_target ()->attach_no_wait ();
226 }
227
228 /* See target.h. */
229
230 void
231 target_post_attach (int pid)
232 {
233 return current_inferior ()->top_target ()->post_attach (pid);
234 }
235
236 /* See target.h. */
237
238 void
239 target_prepare_to_store (regcache *regcache)
240 {
241 return current_inferior ()->top_target ()->prepare_to_store (regcache);
242 }
243
244 /* See target.h. */
245
246 bool
247 target_supports_enable_disable_tracepoint ()
248 {
249 target_ops *target = current_inferior ()->top_target ();
250
251 return target->supports_enable_disable_tracepoint ();
252 }
253
254 bool
255 target_supports_string_tracing ()
256 {
257 return current_inferior ()->top_target ()->supports_string_tracing ();
258 }
259
260 /* See target.h. */
261
262 bool
263 target_supports_evaluation_of_breakpoint_conditions ()
264 {
265 target_ops *target = current_inferior ()->top_target ();
266
267 return target->supports_evaluation_of_breakpoint_conditions ();
268 }
269
270 /* See target.h. */
271
272 bool
273 target_supports_dumpcore ()
274 {
275 return current_inferior ()->top_target ()->supports_dumpcore ();
276 }
277
278 /* See target.h. */
279
280 void
281 target_dumpcore (const char *filename)
282 {
283 return current_inferior ()->top_target ()->dumpcore (filename);
284 }
285
286 /* See target.h. */
287
288 bool
289 target_can_run_breakpoint_commands ()
290 {
291 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
292 }
293
294 /* See target.h. */
295
296 void
297 target_files_info ()
298 {
299 return current_inferior ()->top_target ()->files_info ();
300 }
301
302 /* See target.h. */
303
304 void
305 target_post_startup_inferior (ptid_t ptid)
306 {
307 return current_inferior ()->top_target ()->post_startup_inferior (ptid);
308 }
309
310 /* See target.h. */
311
312 int
313 target_insert_fork_catchpoint (int pid)
314 {
315 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
316 }
317
318 /* See target.h. */
319
320 int
321 target_remove_fork_catchpoint (int pid)
322 {
323 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
324 }
325
326 /* See target.h. */
327
328 int
329 target_insert_vfork_catchpoint (int pid)
330 {
331 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
332 }
333
334 /* See target.h. */
335
336 int
337 target_remove_vfork_catchpoint (int pid)
338 {
339 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
340 }
341
342 /* See target.h. */
343
344 int
345 target_insert_exec_catchpoint (int pid)
346 {
347 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
348 }
349
350 /* See target.h. */
351
352 int
353 target_remove_exec_catchpoint (int pid)
354 {
355 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
356 }
357
358 /* See target.h. */
359
360 int
361 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
362 gdb::array_view<const int> syscall_counts)
363 {
364 target_ops *target = current_inferior ()->top_target ();
365
366 return target->set_syscall_catchpoint (pid, needed, any_count,
367 syscall_counts);
368 }
369
370 /* See target.h. */
371
372 void
373 target_rcmd (const char *command, struct ui_file *outbuf)
374 {
375 return current_inferior ()->top_target ()->rcmd (command, outbuf);
376 }
377
378 /* See target.h. */
379
380 bool
381 target_can_lock_scheduler ()
382 {
383 target_ops *target = current_inferior ()->top_target ();
384
385 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
386 }
387
388 /* See target.h. */
389
390 bool
391 target_can_async_p ()
392 {
393 return current_inferior ()->top_target ()->can_async_p ();
394 }
395
396 /* See target.h. */
397
398 bool
399 target_is_async_p ()
400 {
401 return current_inferior ()->top_target ()->is_async_p ();
402 }
403
404 exec_direction_kind
405 target_execution_direction ()
406 {
407 return current_inferior ()->top_target ()->execution_direction ();
408 }
409
410 /* See target.h. */
411
412 const char *
413 target_extra_thread_info (thread_info *tp)
414 {
415 return current_inferior ()->top_target ()->extra_thread_info (tp);
416 }
417
418 /* See target.h. */
419
420 char *
421 target_pid_to_exec_file (int pid)
422 {
423 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
424 }
425
426 /* See target.h. */
427
428 gdbarch *
429 target_thread_architecture (ptid_t ptid)
430 {
431 return current_inferior ()->top_target ()->thread_architecture (ptid);
432 }
433
434 /* See target.h. */
435
436 int
437 target_find_memory_regions (find_memory_region_ftype func, void *data)
438 {
439 return current_inferior ()->top_target ()->find_memory_regions (func, data);
440 }
441
442 /* See target.h. */
443
444 gdb::unique_xmalloc_ptr<char>
445 target_make_corefile_notes (bfd *bfd, int *size_p)
446 {
447 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
448 }
449
450 gdb_byte *
451 target_get_bookmark (const char *args, int from_tty)
452 {
453 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
454 }
455
456 void
457 target_goto_bookmark (const gdb_byte *arg, int from_tty)
458 {
459 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
460 }
461
462 /* See target.h. */
463
464 bool
465 target_stopped_by_watchpoint ()
466 {
467 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
468 }
469
470 /* See target.h. */
471
472 bool
473 target_stopped_by_sw_breakpoint ()
474 {
475 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
476 }
477
478 bool
479 target_supports_stopped_by_sw_breakpoint ()
480 {
481 target_ops *target = current_inferior ()->top_target ();
482
483 return target->supports_stopped_by_sw_breakpoint ();
484 }
485
486 bool
487 target_stopped_by_hw_breakpoint ()
488 {
489 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
490 }
491
492 bool
493 target_supports_stopped_by_hw_breakpoint ()
494 {
495 target_ops *target = current_inferior ()->top_target ();
496
497 return target->supports_stopped_by_hw_breakpoint ();
498 }
499
500 /* See target.h. */
501
502 bool
503 target_have_steppable_watchpoint ()
504 {
505 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
506 }
507
508 /* See target.h. */
509
510 int
511 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
512 {
513 target_ops *target = current_inferior ()->top_target ();
514
515 return target->can_use_hw_breakpoint (type, cnt, othertype);
516 }
517
518 /* See target.h. */
519
520 int
521 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
522 {
523 target_ops *target = current_inferior ()->top_target ();
524
525 return target->region_ok_for_hw_watchpoint (addr, len);
526 }
527
528
529 int
530 target_can_do_single_step ()
531 {
532 return current_inferior ()->top_target ()->can_do_single_step ();
533 }
534
535 /* See target.h. */
536
537 int
538 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
539 expression *cond)
540 {
541 target_ops *target = current_inferior ()->top_target ();
542
543 return target->insert_watchpoint (addr, len, type, cond);
544 }
545
546 /* See target.h. */
547
548 int
549 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
550 expression *cond)
551 {
552 target_ops *target = current_inferior ()->top_target ();
553
554 return target->remove_watchpoint (addr, len, type, cond);
555 }
556
557 /* See target.h. */
558
559 int
560 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
561 {
562 target_ops *target = current_inferior ()->top_target ();
563
564 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
565 }
566
567 /* See target.h. */
568
569 int
570 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
571 {
572 target_ops *target = current_inferior ()->top_target ();
573
574 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
575 }
576
577 /* See target.h. */
578
579 bool
580 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
581 expression *cond)
582 {
583 target_ops *target = current_inferior ()->top_target ();
584
585 return target->can_accel_watchpoint_condition (addr, len, type, cond);
586 }
587
588 /* See target.h. */
589
590 bool
591 target_can_execute_reverse ()
592 {
593 return current_inferior ()->top_target ()->can_execute_reverse ();
594 }
595
596 ptid_t
597 target_get_ada_task_ptid (long lwp, long tid)
598 {
599 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
600 }
601
602 bool
603 target_filesystem_is_local ()
604 {
605 return current_inferior ()->top_target ()->filesystem_is_local ();
606 }
607
608 void
609 target_trace_init ()
610 {
611 return current_inferior ()->top_target ()->trace_init ();
612 }
613
614 void
615 target_download_tracepoint (bp_location *location)
616 {
617 return current_inferior ()->top_target ()->download_tracepoint (location);
618 }
619
620 bool
621 target_can_download_tracepoint ()
622 {
623 return current_inferior ()->top_target ()->can_download_tracepoint ();
624 }
625
626 void
627 target_download_trace_state_variable (const trace_state_variable &tsv)
628 {
629 target_ops *target = current_inferior ()->top_target ();
630
631 return target->download_trace_state_variable (tsv);
632 }
633
634 void
635 target_enable_tracepoint (bp_location *loc)
636 {
637 return current_inferior ()->top_target ()->enable_tracepoint (loc);
638 }
639
640 void
641 target_disable_tracepoint (bp_location *loc)
642 {
643 return current_inferior ()->top_target ()->disable_tracepoint (loc);
644 }
645
646 void
647 target_trace_start ()
648 {
649 return current_inferior ()->top_target ()->trace_start ();
650 }
651
652 void
653 target_trace_set_readonly_regions ()
654 {
655 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
656 }
657
658 int
659 target_get_trace_status (trace_status *ts)
660 {
661 return current_inferior ()->top_target ()->get_trace_status (ts);
662 }
663
664 void
665 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
666 {
667 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
668 }
669
670 void
671 target_trace_stop ()
672 {
673 return current_inferior ()->top_target ()->trace_stop ();
674 }
675
676 int
677 target_trace_find (trace_find_type type, int num,
678 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
679 {
680 target_ops *target = current_inferior ()->top_target ();
681
682 return target->trace_find (type, num, addr1, addr2, tpp);
683 }
684
685 bool
686 target_get_trace_state_variable_value (int tsv, LONGEST *val)
687 {
688 target_ops *target = current_inferior ()->top_target ();
689
690 return target->get_trace_state_variable_value (tsv, val);
691 }
692
693 int
694 target_save_trace_data (const char *filename)
695 {
696 return current_inferior ()->top_target ()->save_trace_data (filename);
697 }
698
699 int
700 target_upload_tracepoints (uploaded_tp **utpp)
701 {
702 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
703 }
704
705 int
706 target_upload_trace_state_variables (uploaded_tsv **utsvp)
707 {
708 target_ops *target = current_inferior ()->top_target ();
709
710 return target->upload_trace_state_variables (utsvp);
711 }
712
713 LONGEST
714 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
715 {
716 target_ops *target = current_inferior ()->top_target ();
717
718 return target->get_raw_trace_data (buf, offset, len);
719 }
720
721 int
722 target_get_min_fast_tracepoint_insn_len ()
723 {
724 target_ops *target = current_inferior ()->top_target ();
725
726 return target->get_min_fast_tracepoint_insn_len ();
727 }
728
729 void
730 target_set_disconnected_tracing (int val)
731 {
732 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
733 }
734
735 void
736 target_set_circular_trace_buffer (int val)
737 {
738 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
739 }
740
741 void
742 target_set_trace_buffer_size (LONGEST val)
743 {
744 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
745 }
746
747 bool
748 target_set_trace_notes (const char *user, const char *notes,
749 const char *stopnotes)
750 {
751 target_ops *target = current_inferior ()->top_target ();
752
753 return target->set_trace_notes (user, notes, stopnotes);
754 }
755
756 bool
757 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
758 {
759 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
760 }
761
762 void
763 target_set_permissions ()
764 {
765 return current_inferior ()->top_target ()->set_permissions ();
766 }
767
768 bool
769 target_static_tracepoint_marker_at (CORE_ADDR addr,
770 static_tracepoint_marker *marker)
771 {
772 target_ops *target = current_inferior ()->top_target ();
773
774 return target->static_tracepoint_marker_at (addr, marker);
775 }
776
777 std::vector<static_tracepoint_marker>
778 target_static_tracepoint_markers_by_strid (const char *marker_id)
779 {
780 target_ops *target = current_inferior ()->top_target ();
781
782 return target->static_tracepoint_markers_by_strid (marker_id);
783 }
784
785 traceframe_info_up
786 target_traceframe_info ()
787 {
788 return current_inferior ()->top_target ()->traceframe_info ();
789 }
790
791 bool
792 target_use_agent (bool use)
793 {
794 return current_inferior ()->top_target ()->use_agent (use);
795 }
796
797 bool
798 target_can_use_agent ()
799 {
800 return current_inferior ()->top_target ()->can_use_agent ();
801 }
802
803 bool
804 target_augmented_libraries_svr4_read ()
805 {
806 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
807 }
808
809 bool
810 target_supports_memory_tagging ()
811 {
812 return current_inferior ()->top_target ()->supports_memory_tagging ();
813 }
814
815 bool
816 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
817 int type)
818 {
819 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
820 }
821
822 bool
823 target_store_memtags (CORE_ADDR address, size_t len,
824 const gdb::byte_vector &tags, int type)
825 {
826 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
827 }
828
829 void
830 target_log_command (const char *p)
831 {
832 return current_inferior ()->top_target ()->log_command (p);
833 }
834
835 /* This is used to implement the various target commands. */
836
837 static void
838 open_target (const char *args, int from_tty, struct cmd_list_element *command)
839 {
840 auto *ti = static_cast<target_info *> (get_cmd_context (command));
841 target_open_ftype *func = target_factories[ti];
842
843 if (targetdebug)
844 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
845 ti->shortname);
846
847 func (args, from_tty);
848
849 if (targetdebug)
850 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
851 ti->shortname, args, from_tty);
852 }
853
854 /* See target.h. */
855
856 void
857 add_target (const target_info &t, target_open_ftype *func,
858 completer_ftype *completer)
859 {
860 struct cmd_list_element *c;
861
862 auto &func_slot = target_factories[&t];
863 if (func_slot != nullptr)
864 internal_error (__FILE__, __LINE__,
865 _("target already added (\"%s\")."), t.shortname);
866 func_slot = func;
867
868 if (targetlist == NULL)
869 add_basic_prefix_cmd ("target", class_run, _("\
870 Connect to a target machine or process.\n\
871 The first argument is the type or protocol of the target machine.\n\
872 Remaining arguments are interpreted by the target protocol. For more\n\
873 information on the arguments for a particular protocol, type\n\
874 `help target ' followed by the protocol name."),
875 &targetlist, 0, &cmdlist);
876 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
877 set_cmd_context (c, (void *) &t);
878 set_cmd_sfunc (c, open_target);
879 if (completer != NULL)
880 set_cmd_completer (c, completer);
881 }
882
883 /* See target.h. */
884
885 void
886 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
887 {
888 struct cmd_list_element *c;
889 char *alt;
890
891 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
892 see PR cli/15104. */
893 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
894 set_cmd_sfunc (c, open_target);
895 set_cmd_context (c, (void *) &tinfo);
896 alt = xstrprintf ("target %s", tinfo.shortname);
897 deprecate_cmd (c, alt);
898 }
899
900 /* Stub functions */
901
902 void
903 target_kill (void)
904 {
905 current_inferior ()->top_target ()->kill ();
906 }
907
908 void
909 target_load (const char *arg, int from_tty)
910 {
911 target_dcache_invalidate ();
912 current_inferior ()->top_target ()->load (arg, from_tty);
913 }
914
915 /* Define it. */
916
917 target_terminal_state target_terminal::m_terminal_state
918 = target_terminal_state::is_ours;
919
920 /* See target/target.h. */
921
922 void
923 target_terminal::init (void)
924 {
925 current_inferior ()->top_target ()->terminal_init ();
926
927 m_terminal_state = target_terminal_state::is_ours;
928 }
929
930 /* See target/target.h. */
931
932 void
933 target_terminal::inferior (void)
934 {
935 struct ui *ui = current_ui;
936
937 /* A background resume (``run&'') should leave GDB in control of the
938 terminal. */
939 if (ui->prompt_state != PROMPT_BLOCKED)
940 return;
941
942 /* Since we always run the inferior in the main console (unless "set
943 inferior-tty" is in effect), when some UI other than the main one
944 calls target_terminal::inferior, then we leave the main UI's
945 terminal settings as is. */
946 if (ui != main_ui)
947 return;
948
949 /* If GDB is resuming the inferior in the foreground, install
950 inferior's terminal modes. */
951
952 struct inferior *inf = current_inferior ();
953
954 if (inf->terminal_state != target_terminal_state::is_inferior)
955 {
956 current_inferior ()->top_target ()->terminal_inferior ();
957 inf->terminal_state = target_terminal_state::is_inferior;
958 }
959
960 m_terminal_state = target_terminal_state::is_inferior;
961
962 /* If the user hit C-c before, pretend that it was hit right
963 here. */
964 if (check_quit_flag ())
965 target_pass_ctrlc ();
966 }
967
968 /* See target/target.h. */
969
970 void
971 target_terminal::restore_inferior (void)
972 {
973 struct ui *ui = current_ui;
974
975 /* See target_terminal::inferior(). */
976 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
977 return;
978
979 /* Restore the terminal settings of inferiors that were in the
980 foreground but are now ours_for_output due to a temporary
981 target_target::ours_for_output() call. */
982
983 {
984 scoped_restore_current_inferior restore_inferior;
985
986 for (::inferior *inf : all_inferiors ())
987 {
988 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
989 {
990 set_current_inferior (inf);
991 current_inferior ()->top_target ()->terminal_inferior ();
992 inf->terminal_state = target_terminal_state::is_inferior;
993 }
994 }
995 }
996
997 m_terminal_state = target_terminal_state::is_inferior;
998
999 /* If the user hit C-c before, pretend that it was hit right
1000 here. */
1001 if (check_quit_flag ())
1002 target_pass_ctrlc ();
1003 }
1004
1005 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1006 is_ours_for_output. */
1007
1008 static void
1009 target_terminal_is_ours_kind (target_terminal_state desired_state)
1010 {
1011 scoped_restore_current_inferior restore_inferior;
1012
1013 /* Must do this in two passes. First, have all inferiors save the
1014 current terminal settings. Then, after all inferiors have add a
1015 chance to safely save the terminal settings, restore GDB's
1016 terminal settings. */
1017
1018 for (inferior *inf : all_inferiors ())
1019 {
1020 if (inf->terminal_state == target_terminal_state::is_inferior)
1021 {
1022 set_current_inferior (inf);
1023 current_inferior ()->top_target ()->terminal_save_inferior ();
1024 }
1025 }
1026
1027 for (inferior *inf : all_inferiors ())
1028 {
1029 /* Note we don't check is_inferior here like above because we
1030 need to handle 'is_ours_for_output -> is_ours' too. Careful
1031 to never transition from 'is_ours' to 'is_ours_for_output',
1032 though. */
1033 if (inf->terminal_state != target_terminal_state::is_ours
1034 && inf->terminal_state != desired_state)
1035 {
1036 set_current_inferior (inf);
1037 if (desired_state == target_terminal_state::is_ours)
1038 current_inferior ()->top_target ()->terminal_ours ();
1039 else if (desired_state == target_terminal_state::is_ours_for_output)
1040 current_inferior ()->top_target ()->terminal_ours_for_output ();
1041 else
1042 gdb_assert_not_reached ("unhandled desired state");
1043 inf->terminal_state = desired_state;
1044 }
1045 }
1046 }
1047
1048 /* See target/target.h. */
1049
1050 void
1051 target_terminal::ours ()
1052 {
1053 struct ui *ui = current_ui;
1054
1055 /* See target_terminal::inferior. */
1056 if (ui != main_ui)
1057 return;
1058
1059 if (m_terminal_state == target_terminal_state::is_ours)
1060 return;
1061
1062 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1063 m_terminal_state = target_terminal_state::is_ours;
1064 }
1065
1066 /* See target/target.h. */
1067
1068 void
1069 target_terminal::ours_for_output ()
1070 {
1071 struct ui *ui = current_ui;
1072
1073 /* See target_terminal::inferior. */
1074 if (ui != main_ui)
1075 return;
1076
1077 if (!target_terminal::is_inferior ())
1078 return;
1079
1080 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1081 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1082 }
1083
1084 /* See target/target.h. */
1085
1086 void
1087 target_terminal::info (const char *arg, int from_tty)
1088 {
1089 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1090 }
1091
1092 /* See target.h. */
1093
1094 bool
1095 target_supports_terminal_ours (void)
1096 {
1097 /* The current top target is the target at the top of the target
1098 stack of the current inferior. While normally there's always an
1099 inferior, we must check for nullptr here because we can get here
1100 very early during startup, before the initial inferior is first
1101 created. */
1102 inferior *inf = current_inferior ();
1103
1104 if (inf == nullptr)
1105 return false;
1106 return inf->top_target ()->supports_terminal_ours ();
1107 }
1108
1109 static void
1110 tcomplain (void)
1111 {
1112 error (_("You can't do that when your target is `%s'"),
1113 current_inferior ()->top_target ()->shortname ());
1114 }
1115
1116 void
1117 noprocess (void)
1118 {
1119 error (_("You can't do that without a process to debug."));
1120 }
1121
1122 static void
1123 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1124 {
1125 printf_unfiltered (_("No saved terminal information.\n"));
1126 }
1127
1128 /* A default implementation for the to_get_ada_task_ptid target method.
1129
1130 This function builds the PTID by using both LWP and TID as part of
1131 the PTID lwp and tid elements. The pid used is the pid of the
1132 inferior_ptid. */
1133
1134 static ptid_t
1135 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
1136 {
1137 return ptid_t (inferior_ptid.pid (), lwp, tid);
1138 }
1139
1140 static enum exec_direction_kind
1141 default_execution_direction (struct target_ops *self)
1142 {
1143 if (!target_can_execute_reverse ())
1144 return EXEC_FORWARD;
1145 else if (!target_can_async_p ())
1146 return EXEC_FORWARD;
1147 else
1148 gdb_assert_not_reached ("\
1149 to_execution_direction must be implemented for reverse async");
1150 }
1151
1152 /* See target.h. */
1153
1154 void
1155 decref_target (target_ops *t)
1156 {
1157 t->decref ();
1158 if (t->refcount () == 0)
1159 {
1160 if (t->stratum () == process_stratum)
1161 connection_list_remove (as_process_stratum_target (t));
1162 target_close (t);
1163 }
1164 }
1165
1166 /* See target.h. */
1167
1168 void
1169 target_stack::push (target_ops *t)
1170 {
1171 t->incref ();
1172
1173 strata stratum = t->stratum ();
1174
1175 if (stratum == process_stratum)
1176 connection_list_add (as_process_stratum_target (t));
1177
1178 /* If there's already a target at this stratum, remove it. */
1179
1180 if (m_stack[stratum] != NULL)
1181 unpush (m_stack[stratum]);
1182
1183 /* Now add the new one. */
1184 m_stack[stratum] = t;
1185
1186 if (m_top < stratum)
1187 m_top = stratum;
1188 }
1189
1190 /* See target.h. */
1191
1192 bool
1193 target_stack::unpush (target_ops *t)
1194 {
1195 gdb_assert (t != NULL);
1196
1197 strata stratum = t->stratum ();
1198
1199 if (stratum == dummy_stratum)
1200 internal_error (__FILE__, __LINE__,
1201 _("Attempt to unpush the dummy target"));
1202
1203 /* Look for the specified target. Note that a target can only occur
1204 once in the target stack. */
1205
1206 if (m_stack[stratum] != t)
1207 {
1208 /* If T wasn't pushed, quit. Only open targets should be
1209 closed. */
1210 return false;
1211 }
1212
1213 /* Unchain the target. */
1214 m_stack[stratum] = NULL;
1215
1216 if (m_top == stratum)
1217 m_top = this->find_beneath (t)->stratum ();
1218
1219 /* Finally close the target, if there are no inferiors
1220 referencing this target still. Note we do this after unchaining,
1221 so any target method calls from within the target_close
1222 implementation don't end up in T anymore. Do leave the target
1223 open if we have are other inferiors referencing this target
1224 still. */
1225 decref_target (t);
1226
1227 return true;
1228 }
1229
1230 /* Unpush TARGET and assert that it worked. */
1231
1232 static void
1233 unpush_target_and_assert (struct target_ops *target)
1234 {
1235 if (!current_inferior ()->unpush_target (target))
1236 {
1237 fprintf_unfiltered (gdb_stderr,
1238 "pop_all_targets couldn't find target %s\n",
1239 target->shortname ());
1240 internal_error (__FILE__, __LINE__,
1241 _("failed internal consistency check"));
1242 }
1243 }
1244
1245 void
1246 pop_all_targets_above (enum strata above_stratum)
1247 {
1248 while ((int) (current_inferior ()->top_target ()->stratum ())
1249 > (int) above_stratum)
1250 unpush_target_and_assert (current_inferior ()->top_target ());
1251 }
1252
1253 /* See target.h. */
1254
1255 void
1256 pop_all_targets_at_and_above (enum strata stratum)
1257 {
1258 while ((int) (current_inferior ()->top_target ()->stratum ())
1259 >= (int) stratum)
1260 unpush_target_and_assert (current_inferior ()->top_target ());
1261 }
1262
1263 void
1264 pop_all_targets (void)
1265 {
1266 pop_all_targets_above (dummy_stratum);
1267 }
1268
1269 void
1270 target_unpusher::operator() (struct target_ops *ops) const
1271 {
1272 current_inferior ()->unpush_target (ops);
1273 }
1274
1275 /* Default implementation of to_get_thread_local_address. */
1276
1277 static void
1278 generic_tls_error (void)
1279 {
1280 throw_error (TLS_GENERIC_ERROR,
1281 _("Cannot find thread-local variables on this target"));
1282 }
1283
1284 /* Using the objfile specified in OBJFILE, find the address for the
1285 current thread's thread-local storage with offset OFFSET. */
1286 CORE_ADDR
1287 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1288 {
1289 volatile CORE_ADDR addr = 0;
1290 struct target_ops *target = current_inferior ()->top_target ();
1291 struct gdbarch *gdbarch = target_gdbarch ();
1292
1293 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1294 {
1295 ptid_t ptid = inferior_ptid;
1296
1297 try
1298 {
1299 CORE_ADDR lm_addr;
1300
1301 /* Fetch the load module address for this objfile. */
1302 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1303 objfile);
1304
1305 if (gdbarch_get_thread_local_address_p (gdbarch))
1306 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1307 offset);
1308 else
1309 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1310 }
1311 /* If an error occurred, print TLS related messages here. Otherwise,
1312 throw the error to some higher catcher. */
1313 catch (const gdb_exception &ex)
1314 {
1315 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1316
1317 switch (ex.error)
1318 {
1319 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1320 error (_("Cannot find thread-local variables "
1321 "in this thread library."));
1322 break;
1323 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1324 if (objfile_is_library)
1325 error (_("Cannot find shared library `%s' in dynamic"
1326 " linker's load module list"), objfile_name (objfile));
1327 else
1328 error (_("Cannot find executable file `%s' in dynamic"
1329 " linker's load module list"), objfile_name (objfile));
1330 break;
1331 case TLS_NOT_ALLOCATED_YET_ERROR:
1332 if (objfile_is_library)
1333 error (_("The inferior has not yet allocated storage for"
1334 " thread-local variables in\n"
1335 "the shared library `%s'\n"
1336 "for %s"),
1337 objfile_name (objfile),
1338 target_pid_to_str (ptid).c_str ());
1339 else
1340 error (_("The inferior has not yet allocated storage for"
1341 " thread-local variables in\n"
1342 "the executable `%s'\n"
1343 "for %s"),
1344 objfile_name (objfile),
1345 target_pid_to_str (ptid).c_str ());
1346 break;
1347 case TLS_GENERIC_ERROR:
1348 if (objfile_is_library)
1349 error (_("Cannot find thread-local storage for %s, "
1350 "shared library %s:\n%s"),
1351 target_pid_to_str (ptid).c_str (),
1352 objfile_name (objfile), ex.what ());
1353 else
1354 error (_("Cannot find thread-local storage for %s, "
1355 "executable file %s:\n%s"),
1356 target_pid_to_str (ptid).c_str (),
1357 objfile_name (objfile), ex.what ());
1358 break;
1359 default:
1360 throw;
1361 break;
1362 }
1363 }
1364 }
1365 else
1366 error (_("Cannot find thread-local variables on this target"));
1367
1368 return addr;
1369 }
1370
1371 const char *
1372 target_xfer_status_to_string (enum target_xfer_status status)
1373 {
1374 #define CASE(X) case X: return #X
1375 switch (status)
1376 {
1377 CASE(TARGET_XFER_E_IO);
1378 CASE(TARGET_XFER_UNAVAILABLE);
1379 default:
1380 return "<unknown>";
1381 }
1382 #undef CASE
1383 };
1384
1385
1386 /* See target.h. */
1387
1388 gdb::unique_xmalloc_ptr<char>
1389 target_read_string (CORE_ADDR memaddr, int len, int *bytes_read)
1390 {
1391 gdb::unique_xmalloc_ptr<gdb_byte> buffer;
1392
1393 int ignore;
1394 if (bytes_read == nullptr)
1395 bytes_read = &ignore;
1396
1397 /* Note that the endian-ness does not matter here. */
1398 int errcode = read_string (memaddr, -1, 1, len, BFD_ENDIAN_LITTLE,
1399 &buffer, bytes_read);
1400 if (errcode != 0)
1401 return {};
1402
1403 return gdb::unique_xmalloc_ptr<char> ((char *) buffer.release ());
1404 }
1405
1406 const target_section_table *
1407 target_get_section_table (struct target_ops *target)
1408 {
1409 return target->get_section_table ();
1410 }
1411
1412 /* Find a section containing ADDR. */
1413
1414 const struct target_section *
1415 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1416 {
1417 const target_section_table *table = target_get_section_table (target);
1418
1419 if (table == NULL)
1420 return NULL;
1421
1422 for (const target_section &secp : *table)
1423 {
1424 if (addr >= secp.addr && addr < secp.endaddr)
1425 return &secp;
1426 }
1427 return NULL;
1428 }
1429
1430 /* See target.h. */
1431
1432 const target_section_table *
1433 default_get_section_table ()
1434 {
1435 return &current_program_space->target_sections ();
1436 }
1437
1438 /* Helper for the memory xfer routines. Checks the attributes of the
1439 memory region of MEMADDR against the read or write being attempted.
1440 If the access is permitted returns true, otherwise returns false.
1441 REGION_P is an optional output parameter. If not-NULL, it is
1442 filled with a pointer to the memory region of MEMADDR. REG_LEN
1443 returns LEN trimmed to the end of the region. This is how much the
1444 caller can continue requesting, if the access is permitted. A
1445 single xfer request must not straddle memory region boundaries. */
1446
1447 static int
1448 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1449 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1450 struct mem_region **region_p)
1451 {
1452 struct mem_region *region;
1453
1454 region = lookup_mem_region (memaddr);
1455
1456 if (region_p != NULL)
1457 *region_p = region;
1458
1459 switch (region->attrib.mode)
1460 {
1461 case MEM_RO:
1462 if (writebuf != NULL)
1463 return 0;
1464 break;
1465
1466 case MEM_WO:
1467 if (readbuf != NULL)
1468 return 0;
1469 break;
1470
1471 case MEM_FLASH:
1472 /* We only support writing to flash during "load" for now. */
1473 if (writebuf != NULL)
1474 error (_("Writing to flash memory forbidden in this context"));
1475 break;
1476
1477 case MEM_NONE:
1478 return 0;
1479 }
1480
1481 /* region->hi == 0 means there's no upper bound. */
1482 if (memaddr + len < region->hi || region->hi == 0)
1483 *reg_len = len;
1484 else
1485 *reg_len = region->hi - memaddr;
1486
1487 return 1;
1488 }
1489
1490 /* Read memory from more than one valid target. A core file, for
1491 instance, could have some of memory but delegate other bits to
1492 the target below it. So, we must manually try all targets. */
1493
1494 enum target_xfer_status
1495 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1496 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1497 ULONGEST *xfered_len)
1498 {
1499 enum target_xfer_status res;
1500
1501 do
1502 {
1503 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1504 readbuf, writebuf, memaddr, len,
1505 xfered_len);
1506 if (res == TARGET_XFER_OK)
1507 break;
1508
1509 /* Stop if the target reports that the memory is not available. */
1510 if (res == TARGET_XFER_UNAVAILABLE)
1511 break;
1512
1513 /* Don't continue past targets which have all the memory.
1514 At one time, this code was necessary to read data from
1515 executables / shared libraries when data for the requested
1516 addresses weren't available in the core file. But now the
1517 core target handles this case itself. */
1518 if (ops->has_all_memory ())
1519 break;
1520
1521 ops = ops->beneath ();
1522 }
1523 while (ops != NULL);
1524
1525 /* The cache works at the raw memory level. Make sure the cache
1526 gets updated with raw contents no matter what kind of memory
1527 object was originally being written. Note we do write-through
1528 first, so that if it fails, we don't write to the cache contents
1529 that never made it to the target. */
1530 if (writebuf != NULL
1531 && inferior_ptid != null_ptid
1532 && target_dcache_init_p ()
1533 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1534 {
1535 DCACHE *dcache = target_dcache_get ();
1536
1537 /* Note that writing to an area of memory which wasn't present
1538 in the cache doesn't cause it to be loaded in. */
1539 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1540 }
1541
1542 return res;
1543 }
1544
1545 /* Perform a partial memory transfer.
1546 For docs see target.h, to_xfer_partial. */
1547
1548 static enum target_xfer_status
1549 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1550 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1551 ULONGEST len, ULONGEST *xfered_len)
1552 {
1553 enum target_xfer_status res;
1554 ULONGEST reg_len;
1555 struct mem_region *region;
1556 struct inferior *inf;
1557
1558 /* For accesses to unmapped overlay sections, read directly from
1559 files. Must do this first, as MEMADDR may need adjustment. */
1560 if (readbuf != NULL && overlay_debugging)
1561 {
1562 struct obj_section *section = find_pc_overlay (memaddr);
1563
1564 if (pc_in_unmapped_range (memaddr, section))
1565 {
1566 const target_section_table *table = target_get_section_table (ops);
1567 const char *section_name = section->the_bfd_section->name;
1568
1569 memaddr = overlay_mapped_address (memaddr, section);
1570
1571 auto match_cb = [=] (const struct target_section *s)
1572 {
1573 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1574 };
1575
1576 return section_table_xfer_memory_partial (readbuf, writebuf,
1577 memaddr, len, xfered_len,
1578 *table, match_cb);
1579 }
1580 }
1581
1582 /* Try the executable files, if "trust-readonly-sections" is set. */
1583 if (readbuf != NULL && trust_readonly)
1584 {
1585 const struct target_section *secp
1586 = target_section_by_addr (ops, memaddr);
1587 if (secp != NULL
1588 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1589 {
1590 const target_section_table *table = target_get_section_table (ops);
1591 return section_table_xfer_memory_partial (readbuf, writebuf,
1592 memaddr, len, xfered_len,
1593 *table);
1594 }
1595 }
1596
1597 /* Try GDB's internal data cache. */
1598
1599 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1600 &region))
1601 return TARGET_XFER_E_IO;
1602
1603 if (inferior_ptid != null_ptid)
1604 inf = current_inferior ();
1605 else
1606 inf = NULL;
1607
1608 if (inf != NULL
1609 && readbuf != NULL
1610 /* The dcache reads whole cache lines; that doesn't play well
1611 with reading from a trace buffer, because reading outside of
1612 the collected memory range fails. */
1613 && get_traceframe_number () == -1
1614 && (region->attrib.cache
1615 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1616 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1617 {
1618 DCACHE *dcache = target_dcache_get_or_init ();
1619
1620 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1621 reg_len, xfered_len);
1622 }
1623
1624 /* If none of those methods found the memory we wanted, fall back
1625 to a target partial transfer. Normally a single call to
1626 to_xfer_partial is enough; if it doesn't recognize an object
1627 it will call the to_xfer_partial of the next target down.
1628 But for memory this won't do. Memory is the only target
1629 object which can be read from more than one valid target.
1630 A core file, for instance, could have some of memory but
1631 delegate other bits to the target below it. So, we must
1632 manually try all targets. */
1633
1634 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1635 xfered_len);
1636
1637 /* If we still haven't got anything, return the last error. We
1638 give up. */
1639 return res;
1640 }
1641
1642 /* Perform a partial memory transfer. For docs see target.h,
1643 to_xfer_partial. */
1644
1645 static enum target_xfer_status
1646 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1647 gdb_byte *readbuf, const gdb_byte *writebuf,
1648 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1649 {
1650 enum target_xfer_status res;
1651
1652 /* Zero length requests are ok and require no work. */
1653 if (len == 0)
1654 return TARGET_XFER_EOF;
1655
1656 memaddr = address_significant (target_gdbarch (), memaddr);
1657
1658 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1659 breakpoint insns, thus hiding out from higher layers whether
1660 there are software breakpoints inserted in the code stream. */
1661 if (readbuf != NULL)
1662 {
1663 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1664 xfered_len);
1665
1666 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1667 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1668 }
1669 else
1670 {
1671 /* A large write request is likely to be partially satisfied
1672 by memory_xfer_partial_1. We will continually malloc
1673 and free a copy of the entire write request for breakpoint
1674 shadow handling even though we only end up writing a small
1675 subset of it. Cap writes to a limit specified by the target
1676 to mitigate this. */
1677 len = std::min (ops->get_memory_xfer_limit (), len);
1678
1679 gdb::byte_vector buf (writebuf, writebuf + len);
1680 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1681 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1682 xfered_len);
1683 }
1684
1685 return res;
1686 }
1687
1688 scoped_restore_tmpl<int>
1689 make_scoped_restore_show_memory_breakpoints (int show)
1690 {
1691 return make_scoped_restore (&show_memory_breakpoints, show);
1692 }
1693
1694 /* For docs see target.h, to_xfer_partial. */
1695
1696 enum target_xfer_status
1697 target_xfer_partial (struct target_ops *ops,
1698 enum target_object object, const char *annex,
1699 gdb_byte *readbuf, const gdb_byte *writebuf,
1700 ULONGEST offset, ULONGEST len,
1701 ULONGEST *xfered_len)
1702 {
1703 enum target_xfer_status retval;
1704
1705 /* Transfer is done when LEN is zero. */
1706 if (len == 0)
1707 return TARGET_XFER_EOF;
1708
1709 if (writebuf && !may_write_memory)
1710 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1711 core_addr_to_string_nz (offset), plongest (len));
1712
1713 *xfered_len = 0;
1714
1715 /* If this is a memory transfer, let the memory-specific code
1716 have a look at it instead. Memory transfers are more
1717 complicated. */
1718 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1719 || object == TARGET_OBJECT_CODE_MEMORY)
1720 retval = memory_xfer_partial (ops, object, readbuf,
1721 writebuf, offset, len, xfered_len);
1722 else if (object == TARGET_OBJECT_RAW_MEMORY)
1723 {
1724 /* Skip/avoid accessing the target if the memory region
1725 attributes block the access. Check this here instead of in
1726 raw_memory_xfer_partial as otherwise we'd end up checking
1727 this twice in the case of the memory_xfer_partial path is
1728 taken; once before checking the dcache, and another in the
1729 tail call to raw_memory_xfer_partial. */
1730 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1731 NULL))
1732 return TARGET_XFER_E_IO;
1733
1734 /* Request the normal memory object from other layers. */
1735 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1736 xfered_len);
1737 }
1738 else
1739 retval = ops->xfer_partial (object, annex, readbuf,
1740 writebuf, offset, len, xfered_len);
1741
1742 if (targetdebug)
1743 {
1744 const unsigned char *myaddr = NULL;
1745
1746 fprintf_unfiltered (gdb_stdlog,
1747 "%s:target_xfer_partial "
1748 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1749 ops->shortname (),
1750 (int) object,
1751 (annex ? annex : "(null)"),
1752 host_address_to_string (readbuf),
1753 host_address_to_string (writebuf),
1754 core_addr_to_string_nz (offset),
1755 pulongest (len), retval,
1756 pulongest (*xfered_len));
1757
1758 if (readbuf)
1759 myaddr = readbuf;
1760 if (writebuf)
1761 myaddr = writebuf;
1762 if (retval == TARGET_XFER_OK && myaddr != NULL)
1763 {
1764 int i;
1765
1766 fputs_unfiltered (", bytes =", gdb_stdlog);
1767 for (i = 0; i < *xfered_len; i++)
1768 {
1769 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1770 {
1771 if (targetdebug < 2 && i > 0)
1772 {
1773 fprintf_unfiltered (gdb_stdlog, " ...");
1774 break;
1775 }
1776 fprintf_unfiltered (gdb_stdlog, "\n");
1777 }
1778
1779 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1780 }
1781 }
1782
1783 fputc_unfiltered ('\n', gdb_stdlog);
1784 }
1785
1786 /* Check implementations of to_xfer_partial update *XFERED_LEN
1787 properly. Do assertion after printing debug messages, so that we
1788 can find more clues on assertion failure from debugging messages. */
1789 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1790 gdb_assert (*xfered_len > 0);
1791
1792 return retval;
1793 }
1794
1795 /* Read LEN bytes of target memory at address MEMADDR, placing the
1796 results in GDB's memory at MYADDR. Returns either 0 for success or
1797 -1 if any error occurs.
1798
1799 If an error occurs, no guarantee is made about the contents of the data at
1800 MYADDR. In particular, the caller should not depend upon partial reads
1801 filling the buffer with good data. There is no way for the caller to know
1802 how much good data might have been transfered anyway. Callers that can
1803 deal with partial reads should call target_read (which will retry until
1804 it makes no progress, and then return how much was transferred). */
1805
1806 int
1807 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1808 {
1809 if (target_read (current_inferior ()->top_target (),
1810 TARGET_OBJECT_MEMORY, NULL,
1811 myaddr, memaddr, len) == len)
1812 return 0;
1813 else
1814 return -1;
1815 }
1816
1817 /* See target/target.h. */
1818
1819 int
1820 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1821 {
1822 gdb_byte buf[4];
1823 int r;
1824
1825 r = target_read_memory (memaddr, buf, sizeof buf);
1826 if (r != 0)
1827 return r;
1828 *result = extract_unsigned_integer (buf, sizeof buf,
1829 gdbarch_byte_order (target_gdbarch ()));
1830 return 0;
1831 }
1832
1833 /* Like target_read_memory, but specify explicitly that this is a read
1834 from the target's raw memory. That is, this read bypasses the
1835 dcache, breakpoint shadowing, etc. */
1836
1837 int
1838 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1839 {
1840 if (target_read (current_inferior ()->top_target (),
1841 TARGET_OBJECT_RAW_MEMORY, NULL,
1842 myaddr, memaddr, len) == len)
1843 return 0;
1844 else
1845 return -1;
1846 }
1847
1848 /* Like target_read_memory, but specify explicitly that this is a read from
1849 the target's stack. This may trigger different cache behavior. */
1850
1851 int
1852 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1853 {
1854 if (target_read (current_inferior ()->top_target (),
1855 TARGET_OBJECT_STACK_MEMORY, NULL,
1856 myaddr, memaddr, len) == len)
1857 return 0;
1858 else
1859 return -1;
1860 }
1861
1862 /* Like target_read_memory, but specify explicitly that this is a read from
1863 the target's code. This may trigger different cache behavior. */
1864
1865 int
1866 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1867 {
1868 if (target_read (current_inferior ()->top_target (),
1869 TARGET_OBJECT_CODE_MEMORY, NULL,
1870 myaddr, memaddr, len) == len)
1871 return 0;
1872 else
1873 return -1;
1874 }
1875
1876 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1877 Returns either 0 for success or -1 if any error occurs. If an
1878 error occurs, no guarantee is made about how much data got written.
1879 Callers that can deal with partial writes should call
1880 target_write. */
1881
1882 int
1883 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1884 {
1885 if (target_write (current_inferior ()->top_target (),
1886 TARGET_OBJECT_MEMORY, NULL,
1887 myaddr, memaddr, len) == len)
1888 return 0;
1889 else
1890 return -1;
1891 }
1892
1893 /* Write LEN bytes from MYADDR to target raw memory at address
1894 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1895 If an error occurs, no guarantee is made about how much data got
1896 written. Callers that can deal with partial writes should call
1897 target_write. */
1898
1899 int
1900 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1901 {
1902 if (target_write (current_inferior ()->top_target (),
1903 TARGET_OBJECT_RAW_MEMORY, NULL,
1904 myaddr, memaddr, len) == len)
1905 return 0;
1906 else
1907 return -1;
1908 }
1909
1910 /* Fetch the target's memory map. */
1911
1912 std::vector<mem_region>
1913 target_memory_map (void)
1914 {
1915 target_ops *target = current_inferior ()->top_target ();
1916 std::vector<mem_region> result = target->memory_map ();
1917 if (result.empty ())
1918 return result;
1919
1920 std::sort (result.begin (), result.end ());
1921
1922 /* Check that regions do not overlap. Simultaneously assign
1923 a numbering for the "mem" commands to use to refer to
1924 each region. */
1925 mem_region *last_one = NULL;
1926 for (size_t ix = 0; ix < result.size (); ix++)
1927 {
1928 mem_region *this_one = &result[ix];
1929 this_one->number = ix;
1930
1931 if (last_one != NULL && last_one->hi > this_one->lo)
1932 {
1933 warning (_("Overlapping regions in memory map: ignoring"));
1934 return std::vector<mem_region> ();
1935 }
1936
1937 last_one = this_one;
1938 }
1939
1940 return result;
1941 }
1942
1943 void
1944 target_flash_erase (ULONGEST address, LONGEST length)
1945 {
1946 current_inferior ()->top_target ()->flash_erase (address, length);
1947 }
1948
1949 void
1950 target_flash_done (void)
1951 {
1952 current_inferior ()->top_target ()->flash_done ();
1953 }
1954
1955 static void
1956 show_trust_readonly (struct ui_file *file, int from_tty,
1957 struct cmd_list_element *c, const char *value)
1958 {
1959 fprintf_filtered (file,
1960 _("Mode for reading from readonly sections is %s.\n"),
1961 value);
1962 }
1963
1964 /* Target vector read/write partial wrapper functions. */
1965
1966 static enum target_xfer_status
1967 target_read_partial (struct target_ops *ops,
1968 enum target_object object,
1969 const char *annex, gdb_byte *buf,
1970 ULONGEST offset, ULONGEST len,
1971 ULONGEST *xfered_len)
1972 {
1973 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1974 xfered_len);
1975 }
1976
1977 static enum target_xfer_status
1978 target_write_partial (struct target_ops *ops,
1979 enum target_object object,
1980 const char *annex, const gdb_byte *buf,
1981 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1982 {
1983 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1984 xfered_len);
1985 }
1986
1987 /* Wrappers to perform the full transfer. */
1988
1989 /* For docs on target_read see target.h. */
1990
1991 LONGEST
1992 target_read (struct target_ops *ops,
1993 enum target_object object,
1994 const char *annex, gdb_byte *buf,
1995 ULONGEST offset, LONGEST len)
1996 {
1997 LONGEST xfered_total = 0;
1998 int unit_size = 1;
1999
2000 /* If we are reading from a memory object, find the length of an addressable
2001 unit for that architecture. */
2002 if (object == TARGET_OBJECT_MEMORY
2003 || object == TARGET_OBJECT_STACK_MEMORY
2004 || object == TARGET_OBJECT_CODE_MEMORY
2005 || object == TARGET_OBJECT_RAW_MEMORY)
2006 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2007
2008 while (xfered_total < len)
2009 {
2010 ULONGEST xfered_partial;
2011 enum target_xfer_status status;
2012
2013 status = target_read_partial (ops, object, annex,
2014 buf + xfered_total * unit_size,
2015 offset + xfered_total, len - xfered_total,
2016 &xfered_partial);
2017
2018 /* Call an observer, notifying them of the xfer progress? */
2019 if (status == TARGET_XFER_EOF)
2020 return xfered_total;
2021 else if (status == TARGET_XFER_OK)
2022 {
2023 xfered_total += xfered_partial;
2024 QUIT;
2025 }
2026 else
2027 return TARGET_XFER_E_IO;
2028
2029 }
2030 return len;
2031 }
2032
2033 /* Assuming that the entire [begin, end) range of memory cannot be
2034 read, try to read whatever subrange is possible to read.
2035
2036 The function returns, in RESULT, either zero or one memory block.
2037 If there's a readable subrange at the beginning, it is completely
2038 read and returned. Any further readable subrange will not be read.
2039 Otherwise, if there's a readable subrange at the end, it will be
2040 completely read and returned. Any readable subranges before it
2041 (obviously, not starting at the beginning), will be ignored. In
2042 other cases -- either no readable subrange, or readable subrange(s)
2043 that is neither at the beginning, or end, nothing is returned.
2044
2045 The purpose of this function is to handle a read across a boundary
2046 of accessible memory in a case when memory map is not available.
2047 The above restrictions are fine for this case, but will give
2048 incorrect results if the memory is 'patchy'. However, supporting
2049 'patchy' memory would require trying to read every single byte,
2050 and it seems unacceptable solution. Explicit memory map is
2051 recommended for this case -- and target_read_memory_robust will
2052 take care of reading multiple ranges then. */
2053
2054 static void
2055 read_whatever_is_readable (struct target_ops *ops,
2056 const ULONGEST begin, const ULONGEST end,
2057 int unit_size,
2058 std::vector<memory_read_result> *result)
2059 {
2060 ULONGEST current_begin = begin;
2061 ULONGEST current_end = end;
2062 int forward;
2063 ULONGEST xfered_len;
2064
2065 /* If we previously failed to read 1 byte, nothing can be done here. */
2066 if (end - begin <= 1)
2067 return;
2068
2069 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2070
2071 /* Check that either first or the last byte is readable, and give up
2072 if not. This heuristic is meant to permit reading accessible memory
2073 at the boundary of accessible region. */
2074 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2075 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2076 {
2077 forward = 1;
2078 ++current_begin;
2079 }
2080 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2081 buf.get () + (end - begin) - 1, end - 1, 1,
2082 &xfered_len) == TARGET_XFER_OK)
2083 {
2084 forward = 0;
2085 --current_end;
2086 }
2087 else
2088 return;
2089
2090 /* Loop invariant is that the [current_begin, current_end) was previously
2091 found to be not readable as a whole.
2092
2093 Note loop condition -- if the range has 1 byte, we can't divide the range
2094 so there's no point trying further. */
2095 while (current_end - current_begin > 1)
2096 {
2097 ULONGEST first_half_begin, first_half_end;
2098 ULONGEST second_half_begin, second_half_end;
2099 LONGEST xfer;
2100 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2101
2102 if (forward)
2103 {
2104 first_half_begin = current_begin;
2105 first_half_end = middle;
2106 second_half_begin = middle;
2107 second_half_end = current_end;
2108 }
2109 else
2110 {
2111 first_half_begin = middle;
2112 first_half_end = current_end;
2113 second_half_begin = current_begin;
2114 second_half_end = middle;
2115 }
2116
2117 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2118 buf.get () + (first_half_begin - begin) * unit_size,
2119 first_half_begin,
2120 first_half_end - first_half_begin);
2121
2122 if (xfer == first_half_end - first_half_begin)
2123 {
2124 /* This half reads up fine. So, the error must be in the
2125 other half. */
2126 current_begin = second_half_begin;
2127 current_end = second_half_end;
2128 }
2129 else
2130 {
2131 /* This half is not readable. Because we've tried one byte, we
2132 know some part of this half if actually readable. Go to the next
2133 iteration to divide again and try to read.
2134
2135 We don't handle the other half, because this function only tries
2136 to read a single readable subrange. */
2137 current_begin = first_half_begin;
2138 current_end = first_half_end;
2139 }
2140 }
2141
2142 if (forward)
2143 {
2144 /* The [begin, current_begin) range has been read. */
2145 result->emplace_back (begin, current_end, std::move (buf));
2146 }
2147 else
2148 {
2149 /* The [current_end, end) range has been read. */
2150 LONGEST region_len = end - current_end;
2151
2152 gdb::unique_xmalloc_ptr<gdb_byte> data
2153 ((gdb_byte *) xmalloc (region_len * unit_size));
2154 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2155 region_len * unit_size);
2156 result->emplace_back (current_end, end, std::move (data));
2157 }
2158 }
2159
2160 std::vector<memory_read_result>
2161 read_memory_robust (struct target_ops *ops,
2162 const ULONGEST offset, const LONGEST len)
2163 {
2164 std::vector<memory_read_result> result;
2165 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2166
2167 LONGEST xfered_total = 0;
2168 while (xfered_total < len)
2169 {
2170 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2171 LONGEST region_len;
2172
2173 /* If there is no explicit region, a fake one should be created. */
2174 gdb_assert (region);
2175
2176 if (region->hi == 0)
2177 region_len = len - xfered_total;
2178 else
2179 region_len = region->hi - offset;
2180
2181 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2182 {
2183 /* Cannot read this region. Note that we can end up here only
2184 if the region is explicitly marked inaccessible, or
2185 'inaccessible-by-default' is in effect. */
2186 xfered_total += region_len;
2187 }
2188 else
2189 {
2190 LONGEST to_read = std::min (len - xfered_total, region_len);
2191 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2192 ((gdb_byte *) xmalloc (to_read * unit_size));
2193
2194 LONGEST xfered_partial =
2195 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2196 offset + xfered_total, to_read);
2197 /* Call an observer, notifying them of the xfer progress? */
2198 if (xfered_partial <= 0)
2199 {
2200 /* Got an error reading full chunk. See if maybe we can read
2201 some subrange. */
2202 read_whatever_is_readable (ops, offset + xfered_total,
2203 offset + xfered_total + to_read,
2204 unit_size, &result);
2205 xfered_total += to_read;
2206 }
2207 else
2208 {
2209 result.emplace_back (offset + xfered_total,
2210 offset + xfered_total + xfered_partial,
2211 std::move (buffer));
2212 xfered_total += xfered_partial;
2213 }
2214 QUIT;
2215 }
2216 }
2217
2218 return result;
2219 }
2220
2221
2222 /* An alternative to target_write with progress callbacks. */
2223
2224 LONGEST
2225 target_write_with_progress (struct target_ops *ops,
2226 enum target_object object,
2227 const char *annex, const gdb_byte *buf,
2228 ULONGEST offset, LONGEST len,
2229 void (*progress) (ULONGEST, void *), void *baton)
2230 {
2231 LONGEST xfered_total = 0;
2232 int unit_size = 1;
2233
2234 /* If we are writing to a memory object, find the length of an addressable
2235 unit for that architecture. */
2236 if (object == TARGET_OBJECT_MEMORY
2237 || object == TARGET_OBJECT_STACK_MEMORY
2238 || object == TARGET_OBJECT_CODE_MEMORY
2239 || object == TARGET_OBJECT_RAW_MEMORY)
2240 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2241
2242 /* Give the progress callback a chance to set up. */
2243 if (progress)
2244 (*progress) (0, baton);
2245
2246 while (xfered_total < len)
2247 {
2248 ULONGEST xfered_partial;
2249 enum target_xfer_status status;
2250
2251 status = target_write_partial (ops, object, annex,
2252 buf + xfered_total * unit_size,
2253 offset + xfered_total, len - xfered_total,
2254 &xfered_partial);
2255
2256 if (status != TARGET_XFER_OK)
2257 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2258
2259 if (progress)
2260 (*progress) (xfered_partial, baton);
2261
2262 xfered_total += xfered_partial;
2263 QUIT;
2264 }
2265 return len;
2266 }
2267
2268 /* For docs on target_write see target.h. */
2269
2270 LONGEST
2271 target_write (struct target_ops *ops,
2272 enum target_object object,
2273 const char *annex, const gdb_byte *buf,
2274 ULONGEST offset, LONGEST len)
2275 {
2276 return target_write_with_progress (ops, object, annex, buf, offset, len,
2277 NULL, NULL);
2278 }
2279
2280 /* Help for target_read_alloc and target_read_stralloc. See their comments
2281 for details. */
2282
2283 template <typename T>
2284 gdb::optional<gdb::def_vector<T>>
2285 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2286 const char *annex)
2287 {
2288 gdb::def_vector<T> buf;
2289 size_t buf_pos = 0;
2290 const int chunk = 4096;
2291
2292 /* This function does not have a length parameter; it reads the
2293 entire OBJECT). Also, it doesn't support objects fetched partly
2294 from one target and partly from another (in a different stratum,
2295 e.g. a core file and an executable). Both reasons make it
2296 unsuitable for reading memory. */
2297 gdb_assert (object != TARGET_OBJECT_MEMORY);
2298
2299 /* Start by reading up to 4K at a time. The target will throttle
2300 this number down if necessary. */
2301 while (1)
2302 {
2303 ULONGEST xfered_len;
2304 enum target_xfer_status status;
2305
2306 buf.resize (buf_pos + chunk);
2307
2308 status = target_read_partial (ops, object, annex,
2309 (gdb_byte *) &buf[buf_pos],
2310 buf_pos, chunk,
2311 &xfered_len);
2312
2313 if (status == TARGET_XFER_EOF)
2314 {
2315 /* Read all there was. */
2316 buf.resize (buf_pos);
2317 return buf;
2318 }
2319 else if (status != TARGET_XFER_OK)
2320 {
2321 /* An error occurred. */
2322 return {};
2323 }
2324
2325 buf_pos += xfered_len;
2326
2327 QUIT;
2328 }
2329 }
2330
2331 /* See target.h */
2332
2333 gdb::optional<gdb::byte_vector>
2334 target_read_alloc (struct target_ops *ops, enum target_object object,
2335 const char *annex)
2336 {
2337 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2338 }
2339
2340 /* See target.h. */
2341
2342 gdb::optional<gdb::char_vector>
2343 target_read_stralloc (struct target_ops *ops, enum target_object object,
2344 const char *annex)
2345 {
2346 gdb::optional<gdb::char_vector> buf
2347 = target_read_alloc_1<char> (ops, object, annex);
2348
2349 if (!buf)
2350 return {};
2351
2352 if (buf->empty () || buf->back () != '\0')
2353 buf->push_back ('\0');
2354
2355 /* Check for embedded NUL bytes; but allow trailing NULs. */
2356 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2357 it != buf->end (); it++)
2358 if (*it != '\0')
2359 {
2360 warning (_("target object %d, annex %s, "
2361 "contained unexpected null characters"),
2362 (int) object, annex ? annex : "(none)");
2363 break;
2364 }
2365
2366 return buf;
2367 }
2368
2369 /* Memory transfer methods. */
2370
2371 void
2372 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2373 LONGEST len)
2374 {
2375 /* This method is used to read from an alternate, non-current
2376 target. This read must bypass the overlay support (as symbols
2377 don't match this target), and GDB's internal cache (wrong cache
2378 for this target). */
2379 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2380 != len)
2381 memory_error (TARGET_XFER_E_IO, addr);
2382 }
2383
2384 ULONGEST
2385 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2386 int len, enum bfd_endian byte_order)
2387 {
2388 gdb_byte buf[sizeof (ULONGEST)];
2389
2390 gdb_assert (len <= sizeof (buf));
2391 get_target_memory (ops, addr, buf, len);
2392 return extract_unsigned_integer (buf, len, byte_order);
2393 }
2394
2395 /* See target.h. */
2396
2397 int
2398 target_insert_breakpoint (struct gdbarch *gdbarch,
2399 struct bp_target_info *bp_tgt)
2400 {
2401 if (!may_insert_breakpoints)
2402 {
2403 warning (_("May not insert breakpoints"));
2404 return 1;
2405 }
2406
2407 target_ops *target = current_inferior ()->top_target ();
2408
2409 return target->insert_breakpoint (gdbarch, bp_tgt);
2410 }
2411
2412 /* See target.h. */
2413
2414 int
2415 target_remove_breakpoint (struct gdbarch *gdbarch,
2416 struct bp_target_info *bp_tgt,
2417 enum remove_bp_reason reason)
2418 {
2419 /* This is kind of a weird case to handle, but the permission might
2420 have been changed after breakpoints were inserted - in which case
2421 we should just take the user literally and assume that any
2422 breakpoints should be left in place. */
2423 if (!may_insert_breakpoints)
2424 {
2425 warning (_("May not remove breakpoints"));
2426 return 1;
2427 }
2428
2429 target_ops *target = current_inferior ()->top_target ();
2430
2431 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2432 }
2433
2434 static void
2435 info_target_command (const char *args, int from_tty)
2436 {
2437 int has_all_mem = 0;
2438
2439 if (current_program_space->symfile_object_file != NULL)
2440 {
2441 objfile *objf = current_program_space->symfile_object_file;
2442 printf_unfiltered (_("Symbols from \"%s\".\n"),
2443 objfile_name (objf));
2444 }
2445
2446 for (target_ops *t = current_inferior ()->top_target ();
2447 t != NULL;
2448 t = t->beneath ())
2449 {
2450 if (!t->has_memory ())
2451 continue;
2452
2453 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2454 continue;
2455 if (has_all_mem)
2456 printf_unfiltered (_("\tWhile running this, "
2457 "GDB does not access memory from...\n"));
2458 printf_unfiltered ("%s:\n", t->longname ());
2459 t->files_info ();
2460 has_all_mem = t->has_all_memory ();
2461 }
2462 }
2463
2464 /* This function is called before any new inferior is created, e.g.
2465 by running a program, attaching, or connecting to a target.
2466 It cleans up any state from previous invocations which might
2467 change between runs. This is a subset of what target_preopen
2468 resets (things which might change between targets). */
2469
2470 void
2471 target_pre_inferior (int from_tty)
2472 {
2473 /* Clear out solib state. Otherwise the solib state of the previous
2474 inferior might have survived and is entirely wrong for the new
2475 target. This has been observed on GNU/Linux using glibc 2.3. How
2476 to reproduce:
2477
2478 bash$ ./foo&
2479 [1] 4711
2480 bash$ ./foo&
2481 [1] 4712
2482 bash$ gdb ./foo
2483 [...]
2484 (gdb) attach 4711
2485 (gdb) detach
2486 (gdb) attach 4712
2487 Cannot access memory at address 0xdeadbeef
2488 */
2489
2490 /* In some OSs, the shared library list is the same/global/shared
2491 across inferiors. If code is shared between processes, so are
2492 memory regions and features. */
2493 if (!gdbarch_has_global_solist (target_gdbarch ()))
2494 {
2495 no_shared_libraries (NULL, from_tty);
2496
2497 invalidate_target_mem_regions ();
2498
2499 target_clear_description ();
2500 }
2501
2502 /* attach_flag may be set if the previous process associated with
2503 the inferior was attached to. */
2504 current_inferior ()->attach_flag = 0;
2505
2506 current_inferior ()->highest_thread_num = 0;
2507
2508 agent_capability_invalidate ();
2509 }
2510
2511 /* This is to be called by the open routine before it does
2512 anything. */
2513
2514 void
2515 target_preopen (int from_tty)
2516 {
2517 dont_repeat ();
2518
2519 if (current_inferior ()->pid != 0)
2520 {
2521 if (!from_tty
2522 || !target_has_execution ()
2523 || query (_("A program is being debugged already. Kill it? ")))
2524 {
2525 /* Core inferiors actually should be detached, not
2526 killed. */
2527 if (target_has_execution ())
2528 target_kill ();
2529 else
2530 target_detach (current_inferior (), 0);
2531 }
2532 else
2533 error (_("Program not killed."));
2534 }
2535
2536 /* Calling target_kill may remove the target from the stack. But if
2537 it doesn't (which seems like a win for UDI), remove it now. */
2538 /* Leave the exec target, though. The user may be switching from a
2539 live process to a core of the same program. */
2540 pop_all_targets_above (file_stratum);
2541
2542 target_pre_inferior (from_tty);
2543 }
2544
2545 /* See target.h. */
2546
2547 void
2548 target_detach (inferior *inf, int from_tty)
2549 {
2550 /* After we have detached, we will clear the register cache for this inferior
2551 by calling registers_changed_ptid. We must save the pid_ptid before
2552 detaching, as the target detach method will clear inf->pid. */
2553 ptid_t save_pid_ptid = ptid_t (inf->pid);
2554
2555 /* As long as some to_detach implementations rely on the current_inferior
2556 (either directly, or indirectly, like through target_gdbarch or by
2557 reading memory), INF needs to be the current inferior. When that
2558 requirement will become no longer true, then we can remove this
2559 assertion. */
2560 gdb_assert (inf == current_inferior ());
2561
2562 prepare_for_detach ();
2563
2564 /* Hold a strong reference because detaching may unpush the
2565 target. */
2566 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2567
2568 current_inferior ()->top_target ()->detach (inf, from_tty);
2569
2570 process_stratum_target *proc_target
2571 = as_process_stratum_target (proc_target_ref.get ());
2572
2573 registers_changed_ptid (proc_target, save_pid_ptid);
2574
2575 /* We have to ensure we have no frame cache left. Normally,
2576 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2577 inferior_ptid matches save_pid_ptid, but in our case, it does not
2578 call it, as inferior_ptid has been reset. */
2579 reinit_frame_cache ();
2580 }
2581
2582 void
2583 target_disconnect (const char *args, int from_tty)
2584 {
2585 /* If we're in breakpoints-always-inserted mode or if breakpoints
2586 are global across processes, we have to remove them before
2587 disconnecting. */
2588 remove_breakpoints ();
2589
2590 current_inferior ()->top_target ()->disconnect (args, from_tty);
2591 }
2592
2593 /* See target/target.h. */
2594
2595 ptid_t
2596 target_wait (ptid_t ptid, struct target_waitstatus *status,
2597 target_wait_flags options)
2598 {
2599 target_ops *target = current_inferior ()->top_target ();
2600 process_stratum_target *proc_target = current_inferior ()->process_target ();
2601
2602 gdb_assert (!proc_target->commit_resumed_state);
2603
2604 if (!target->can_async_p ())
2605 gdb_assert ((options & TARGET_WNOHANG) == 0);
2606
2607 return target->wait (ptid, status, options);
2608 }
2609
2610 /* See target.h. */
2611
2612 ptid_t
2613 default_target_wait (struct target_ops *ops,
2614 ptid_t ptid, struct target_waitstatus *status,
2615 target_wait_flags options)
2616 {
2617 status->kind = TARGET_WAITKIND_IGNORE;
2618 return minus_one_ptid;
2619 }
2620
2621 std::string
2622 target_pid_to_str (ptid_t ptid)
2623 {
2624 return current_inferior ()->top_target ()->pid_to_str (ptid);
2625 }
2626
2627 const char *
2628 target_thread_name (struct thread_info *info)
2629 {
2630 gdb_assert (info->inf == current_inferior ());
2631
2632 return current_inferior ()->top_target ()->thread_name (info);
2633 }
2634
2635 struct thread_info *
2636 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2637 int handle_len,
2638 struct inferior *inf)
2639 {
2640 target_ops *target = current_inferior ()->top_target ();
2641
2642 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2643 }
2644
2645 /* See target.h. */
2646
2647 gdb::byte_vector
2648 target_thread_info_to_thread_handle (struct thread_info *tip)
2649 {
2650 target_ops *target = current_inferior ()->top_target ();
2651
2652 return target->thread_info_to_thread_handle (tip);
2653 }
2654
2655 void
2656 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2657 {
2658 process_stratum_target *curr_target = current_inferior ()->process_target ();
2659 gdb_assert (!curr_target->commit_resumed_state);
2660
2661 target_dcache_invalidate ();
2662
2663 current_inferior ()->top_target ()->resume (ptid, step, signal);
2664
2665 registers_changed_ptid (curr_target, ptid);
2666 /* We only set the internal executing state here. The user/frontend
2667 running state is set at a higher level. This also clears the
2668 thread's stop_pc as side effect. */
2669 set_executing (curr_target, ptid, true);
2670 clear_inline_frame_state (curr_target, ptid);
2671 }
2672
2673 /* See target.h. */
2674
2675 void
2676 target_commit_resumed ()
2677 {
2678 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2679 current_inferior ()->top_target ()->commit_resumed ();
2680 }
2681
2682 /* See target.h. */
2683
2684 bool
2685 target_has_pending_events ()
2686 {
2687 return current_inferior ()->top_target ()->has_pending_events ();
2688 }
2689
2690 void
2691 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2692 {
2693 current_inferior ()->top_target ()->pass_signals (pass_signals);
2694 }
2695
2696 void
2697 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2698 {
2699 current_inferior ()->top_target ()->program_signals (program_signals);
2700 }
2701
2702 static void
2703 default_follow_fork (struct target_ops *self, bool follow_child,
2704 bool detach_fork)
2705 {
2706 /* Some target returned a fork event, but did not know how to follow it. */
2707 internal_error (__FILE__, __LINE__,
2708 _("could not find a target to follow fork"));
2709 }
2710
2711 /* See target.h. */
2712
2713 void
2714 target_follow_fork (bool follow_child, bool detach_fork)
2715 {
2716 target_ops *target = current_inferior ()->top_target ();
2717
2718 return target->follow_fork (follow_child, detach_fork);
2719 }
2720
2721 /* Target wrapper for follow exec hook. */
2722
2723 void
2724 target_follow_exec (struct inferior *inf, const char *execd_pathname)
2725 {
2726 current_inferior ()->top_target ()->follow_exec (inf, execd_pathname);
2727 }
2728
2729 static void
2730 default_mourn_inferior (struct target_ops *self)
2731 {
2732 internal_error (__FILE__, __LINE__,
2733 _("could not find a target to follow mourn inferior"));
2734 }
2735
2736 void
2737 target_mourn_inferior (ptid_t ptid)
2738 {
2739 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2740 current_inferior ()->top_target ()->mourn_inferior ();
2741
2742 /* We no longer need to keep handles on any of the object files.
2743 Make sure to release them to avoid unnecessarily locking any
2744 of them while we're not actually debugging. */
2745 bfd_cache_close_all ();
2746 }
2747
2748 /* Look for a target which can describe architectural features, starting
2749 from TARGET. If we find one, return its description. */
2750
2751 const struct target_desc *
2752 target_read_description (struct target_ops *target)
2753 {
2754 return target->read_description ();
2755 }
2756
2757
2758 /* Default implementation of memory-searching. */
2759
2760 static int
2761 default_search_memory (struct target_ops *self,
2762 CORE_ADDR start_addr, ULONGEST search_space_len,
2763 const gdb_byte *pattern, ULONGEST pattern_len,
2764 CORE_ADDR *found_addrp)
2765 {
2766 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2767 {
2768 return target_read (current_inferior ()->top_target (),
2769 TARGET_OBJECT_MEMORY, NULL,
2770 result, addr, len) == len;
2771 };
2772
2773 /* Start over from the top of the target stack. */
2774 return simple_search_memory (read_memory, start_addr, search_space_len,
2775 pattern, pattern_len, found_addrp);
2776 }
2777
2778 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2779 sequence of bytes in PATTERN with length PATTERN_LEN.
2780
2781 The result is 1 if found, 0 if not found, and -1 if there was an error
2782 requiring halting of the search (e.g. memory read error).
2783 If the pattern is found the address is recorded in FOUND_ADDRP. */
2784
2785 int
2786 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2787 const gdb_byte *pattern, ULONGEST pattern_len,
2788 CORE_ADDR *found_addrp)
2789 {
2790 target_ops *target = current_inferior ()->top_target ();
2791
2792 return target->search_memory (start_addr, search_space_len, pattern,
2793 pattern_len, found_addrp);
2794 }
2795
2796 /* Look through the currently pushed targets. If none of them will
2797 be able to restart the currently running process, issue an error
2798 message. */
2799
2800 void
2801 target_require_runnable (void)
2802 {
2803 for (target_ops *t = current_inferior ()->top_target ();
2804 t != NULL;
2805 t = t->beneath ())
2806 {
2807 /* If this target knows how to create a new program, then
2808 assume we will still be able to after killing the current
2809 one. Either killing and mourning will not pop T, or else
2810 find_default_run_target will find it again. */
2811 if (t->can_create_inferior ())
2812 return;
2813
2814 /* Do not worry about targets at certain strata that can not
2815 create inferiors. Assume they will be pushed again if
2816 necessary, and continue to the process_stratum. */
2817 if (t->stratum () > process_stratum)
2818 continue;
2819
2820 error (_("The \"%s\" target does not support \"run\". "
2821 "Try \"help target\" or \"continue\"."),
2822 t->shortname ());
2823 }
2824
2825 /* This function is only called if the target is running. In that
2826 case there should have been a process_stratum target and it
2827 should either know how to create inferiors, or not... */
2828 internal_error (__FILE__, __LINE__, _("No targets found"));
2829 }
2830
2831 /* Whether GDB is allowed to fall back to the default run target for
2832 "run", "attach", etc. when no target is connected yet. */
2833 static bool auto_connect_native_target = true;
2834
2835 static void
2836 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2837 struct cmd_list_element *c, const char *value)
2838 {
2839 fprintf_filtered (file,
2840 _("Whether GDB may automatically connect to the "
2841 "native target is %s.\n"),
2842 value);
2843 }
2844
2845 /* A pointer to the target that can respond to "run" or "attach".
2846 Native targets are always singletons and instantiated early at GDB
2847 startup. */
2848 static target_ops *the_native_target;
2849
2850 /* See target.h. */
2851
2852 void
2853 set_native_target (target_ops *target)
2854 {
2855 if (the_native_target != NULL)
2856 internal_error (__FILE__, __LINE__,
2857 _("native target already set (\"%s\")."),
2858 the_native_target->longname ());
2859
2860 the_native_target = target;
2861 }
2862
2863 /* See target.h. */
2864
2865 target_ops *
2866 get_native_target ()
2867 {
2868 return the_native_target;
2869 }
2870
2871 /* Look through the list of possible targets for a target that can
2872 execute a run or attach command without any other data. This is
2873 used to locate the default process stratum.
2874
2875 If DO_MESG is not NULL, the result is always valid (error() is
2876 called for errors); else, return NULL on error. */
2877
2878 static struct target_ops *
2879 find_default_run_target (const char *do_mesg)
2880 {
2881 if (auto_connect_native_target && the_native_target != NULL)
2882 return the_native_target;
2883
2884 if (do_mesg != NULL)
2885 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2886 return NULL;
2887 }
2888
2889 /* See target.h. */
2890
2891 struct target_ops *
2892 find_attach_target (void)
2893 {
2894 /* If a target on the current stack can attach, use it. */
2895 for (target_ops *t = current_inferior ()->top_target ();
2896 t != NULL;
2897 t = t->beneath ())
2898 {
2899 if (t->can_attach ())
2900 return t;
2901 }
2902
2903 /* Otherwise, use the default run target for attaching. */
2904 return find_default_run_target ("attach");
2905 }
2906
2907 /* See target.h. */
2908
2909 struct target_ops *
2910 find_run_target (void)
2911 {
2912 /* If a target on the current stack can run, use it. */
2913 for (target_ops *t = current_inferior ()->top_target ();
2914 t != NULL;
2915 t = t->beneath ())
2916 {
2917 if (t->can_create_inferior ())
2918 return t;
2919 }
2920
2921 /* Otherwise, use the default run target. */
2922 return find_default_run_target ("run");
2923 }
2924
2925 bool
2926 target_ops::info_proc (const char *args, enum info_proc_what what)
2927 {
2928 return false;
2929 }
2930
2931 /* Implement the "info proc" command. */
2932
2933 int
2934 target_info_proc (const char *args, enum info_proc_what what)
2935 {
2936 struct target_ops *t;
2937
2938 /* If we're already connected to something that can get us OS
2939 related data, use it. Otherwise, try using the native
2940 target. */
2941 t = find_target_at (process_stratum);
2942 if (t == NULL)
2943 t = find_default_run_target (NULL);
2944
2945 for (; t != NULL; t = t->beneath ())
2946 {
2947 if (t->info_proc (args, what))
2948 {
2949 if (targetdebug)
2950 fprintf_unfiltered (gdb_stdlog,
2951 "target_info_proc (\"%s\", %d)\n", args, what);
2952
2953 return 1;
2954 }
2955 }
2956
2957 return 0;
2958 }
2959
2960 static int
2961 find_default_supports_disable_randomization (struct target_ops *self)
2962 {
2963 struct target_ops *t;
2964
2965 t = find_default_run_target (NULL);
2966 if (t != NULL)
2967 return t->supports_disable_randomization ();
2968 return 0;
2969 }
2970
2971 int
2972 target_supports_disable_randomization (void)
2973 {
2974 return current_inferior ()->top_target ()->supports_disable_randomization ();
2975 }
2976
2977 /* See target/target.h. */
2978
2979 int
2980 target_supports_multi_process (void)
2981 {
2982 return current_inferior ()->top_target ()->supports_multi_process ();
2983 }
2984
2985 /* See target.h. */
2986
2987 gdb::optional<gdb::char_vector>
2988 target_get_osdata (const char *type)
2989 {
2990 struct target_ops *t;
2991
2992 /* If we're already connected to something that can get us OS
2993 related data, use it. Otherwise, try using the native
2994 target. */
2995 t = find_target_at (process_stratum);
2996 if (t == NULL)
2997 t = find_default_run_target ("get OS data");
2998
2999 if (!t)
3000 return {};
3001
3002 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3003 }
3004
3005 /* Determine the current address space of thread PTID. */
3006
3007 struct address_space *
3008 target_thread_address_space (ptid_t ptid)
3009 {
3010 struct address_space *aspace;
3011
3012 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3013 gdb_assert (aspace != NULL);
3014
3015 return aspace;
3016 }
3017
3018 /* See target.h. */
3019
3020 target_ops *
3021 target_ops::beneath () const
3022 {
3023 return current_inferior ()->find_target_beneath (this);
3024 }
3025
3026 void
3027 target_ops::close ()
3028 {
3029 }
3030
3031 bool
3032 target_ops::can_attach ()
3033 {
3034 return 0;
3035 }
3036
3037 void
3038 target_ops::attach (const char *, int)
3039 {
3040 gdb_assert_not_reached ("target_ops::attach called");
3041 }
3042
3043 bool
3044 target_ops::can_create_inferior ()
3045 {
3046 return 0;
3047 }
3048
3049 void
3050 target_ops::create_inferior (const char *, const std::string &,
3051 char **, int)
3052 {
3053 gdb_assert_not_reached ("target_ops::create_inferior called");
3054 }
3055
3056 bool
3057 target_ops::can_run ()
3058 {
3059 return false;
3060 }
3061
3062 int
3063 target_can_run ()
3064 {
3065 for (target_ops *t = current_inferior ()->top_target ();
3066 t != NULL;
3067 t = t->beneath ())
3068 {
3069 if (t->can_run ())
3070 return 1;
3071 }
3072
3073 return 0;
3074 }
3075
3076 /* Target file operations. */
3077
3078 static struct target_ops *
3079 default_fileio_target (void)
3080 {
3081 struct target_ops *t;
3082
3083 /* If we're already connected to something that can perform
3084 file I/O, use it. Otherwise, try using the native target. */
3085 t = find_target_at (process_stratum);
3086 if (t != NULL)
3087 return t;
3088 return find_default_run_target ("file I/O");
3089 }
3090
3091 /* File handle for target file operations. */
3092
3093 struct fileio_fh_t
3094 {
3095 /* The target on which this file is open. NULL if the target is
3096 meanwhile closed while the handle is open. */
3097 target_ops *target;
3098
3099 /* The file descriptor on the target. */
3100 int target_fd;
3101
3102 /* Check whether this fileio_fh_t represents a closed file. */
3103 bool is_closed ()
3104 {
3105 return target_fd < 0;
3106 }
3107 };
3108
3109 /* Vector of currently open file handles. The value returned by
3110 target_fileio_open and passed as the FD argument to other
3111 target_fileio_* functions is an index into this vector. This
3112 vector's entries are never freed; instead, files are marked as
3113 closed, and the handle becomes available for reuse. */
3114 static std::vector<fileio_fh_t> fileio_fhandles;
3115
3116 /* Index into fileio_fhandles of the lowest handle that might be
3117 closed. This permits handle reuse without searching the whole
3118 list each time a new file is opened. */
3119 static int lowest_closed_fd;
3120
3121 /* Invalidate the target associated with open handles that were open
3122 on target TARG, since we're about to close (and maybe destroy) the
3123 target. The handles remain open from the client's perspective, but
3124 trying to do anything with them other than closing them will fail
3125 with EIO. */
3126
3127 static void
3128 fileio_handles_invalidate_target (target_ops *targ)
3129 {
3130 for (fileio_fh_t &fh : fileio_fhandles)
3131 if (fh.target == targ)
3132 fh.target = NULL;
3133 }
3134
3135 /* Acquire a target fileio file descriptor. */
3136
3137 static int
3138 acquire_fileio_fd (target_ops *target, int target_fd)
3139 {
3140 /* Search for closed handles to reuse. */
3141 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3142 {
3143 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3144
3145 if (fh.is_closed ())
3146 break;
3147 }
3148
3149 /* Push a new handle if no closed handles were found. */
3150 if (lowest_closed_fd == fileio_fhandles.size ())
3151 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3152 else
3153 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3154
3155 /* Should no longer be marked closed. */
3156 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3157
3158 /* Return its index, and start the next lookup at
3159 the next index. */
3160 return lowest_closed_fd++;
3161 }
3162
3163 /* Release a target fileio file descriptor. */
3164
3165 static void
3166 release_fileio_fd (int fd, fileio_fh_t *fh)
3167 {
3168 fh->target_fd = -1;
3169 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3170 }
3171
3172 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3173
3174 static fileio_fh_t *
3175 fileio_fd_to_fh (int fd)
3176 {
3177 return &fileio_fhandles[fd];
3178 }
3179
3180
3181 /* Default implementations of file i/o methods. We don't want these
3182 to delegate automatically, because we need to know which target
3183 supported the method, in order to call it directly from within
3184 pread/pwrite, etc. */
3185
3186 int
3187 target_ops::fileio_open (struct inferior *inf, const char *filename,
3188 int flags, int mode, int warn_if_slow,
3189 int *target_errno)
3190 {
3191 *target_errno = FILEIO_ENOSYS;
3192 return -1;
3193 }
3194
3195 int
3196 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3197 ULONGEST offset, int *target_errno)
3198 {
3199 *target_errno = FILEIO_ENOSYS;
3200 return -1;
3201 }
3202
3203 int
3204 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3205 ULONGEST offset, int *target_errno)
3206 {
3207 *target_errno = FILEIO_ENOSYS;
3208 return -1;
3209 }
3210
3211 int
3212 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3213 {
3214 *target_errno = FILEIO_ENOSYS;
3215 return -1;
3216 }
3217
3218 int
3219 target_ops::fileio_close (int fd, int *target_errno)
3220 {
3221 *target_errno = FILEIO_ENOSYS;
3222 return -1;
3223 }
3224
3225 int
3226 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3227 int *target_errno)
3228 {
3229 *target_errno = FILEIO_ENOSYS;
3230 return -1;
3231 }
3232
3233 gdb::optional<std::string>
3234 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3235 int *target_errno)
3236 {
3237 *target_errno = FILEIO_ENOSYS;
3238 return {};
3239 }
3240
3241 /* See target.h. */
3242
3243 int
3244 target_fileio_open (struct inferior *inf, const char *filename,
3245 int flags, int mode, bool warn_if_slow, int *target_errno)
3246 {
3247 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3248 {
3249 int fd = t->fileio_open (inf, filename, flags, mode,
3250 warn_if_slow, target_errno);
3251
3252 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3253 continue;
3254
3255 if (fd < 0)
3256 fd = -1;
3257 else
3258 fd = acquire_fileio_fd (t, fd);
3259
3260 if (targetdebug)
3261 fprintf_unfiltered (gdb_stdlog,
3262 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3263 " = %d (%d)\n",
3264 inf == NULL ? 0 : inf->num,
3265 filename, flags, mode,
3266 warn_if_slow, fd,
3267 fd != -1 ? 0 : *target_errno);
3268 return fd;
3269 }
3270
3271 *target_errno = FILEIO_ENOSYS;
3272 return -1;
3273 }
3274
3275 /* See target.h. */
3276
3277 int
3278 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3279 ULONGEST offset, int *target_errno)
3280 {
3281 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3282 int ret = -1;
3283
3284 if (fh->is_closed ())
3285 *target_errno = EBADF;
3286 else if (fh->target == NULL)
3287 *target_errno = EIO;
3288 else
3289 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3290 len, offset, target_errno);
3291
3292 if (targetdebug)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "target_fileio_pwrite (%d,...,%d,%s) "
3295 "= %d (%d)\n",
3296 fd, len, pulongest (offset),
3297 ret, ret != -1 ? 0 : *target_errno);
3298 return ret;
3299 }
3300
3301 /* See target.h. */
3302
3303 int
3304 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3305 ULONGEST offset, int *target_errno)
3306 {
3307 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3308 int ret = -1;
3309
3310 if (fh->is_closed ())
3311 *target_errno = EBADF;
3312 else if (fh->target == NULL)
3313 *target_errno = EIO;
3314 else
3315 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3316 len, offset, target_errno);
3317
3318 if (targetdebug)
3319 fprintf_unfiltered (gdb_stdlog,
3320 "target_fileio_pread (%d,...,%d,%s) "
3321 "= %d (%d)\n",
3322 fd, len, pulongest (offset),
3323 ret, ret != -1 ? 0 : *target_errno);
3324 return ret;
3325 }
3326
3327 /* See target.h. */
3328
3329 int
3330 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3331 {
3332 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3333 int ret = -1;
3334
3335 if (fh->is_closed ())
3336 *target_errno = EBADF;
3337 else if (fh->target == NULL)
3338 *target_errno = EIO;
3339 else
3340 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3341
3342 if (targetdebug)
3343 fprintf_unfiltered (gdb_stdlog,
3344 "target_fileio_fstat (%d) = %d (%d)\n",
3345 fd, ret, ret != -1 ? 0 : *target_errno);
3346 return ret;
3347 }
3348
3349 /* See target.h. */
3350
3351 int
3352 target_fileio_close (int fd, int *target_errno)
3353 {
3354 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3355 int ret = -1;
3356
3357 if (fh->is_closed ())
3358 *target_errno = EBADF;
3359 else
3360 {
3361 if (fh->target != NULL)
3362 ret = fh->target->fileio_close (fh->target_fd,
3363 target_errno);
3364 else
3365 ret = 0;
3366 release_fileio_fd (fd, fh);
3367 }
3368
3369 if (targetdebug)
3370 fprintf_unfiltered (gdb_stdlog,
3371 "target_fileio_close (%d) = %d (%d)\n",
3372 fd, ret, ret != -1 ? 0 : *target_errno);
3373 return ret;
3374 }
3375
3376 /* See target.h. */
3377
3378 int
3379 target_fileio_unlink (struct inferior *inf, const char *filename,
3380 int *target_errno)
3381 {
3382 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3383 {
3384 int ret = t->fileio_unlink (inf, filename, target_errno);
3385
3386 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3387 continue;
3388
3389 if (targetdebug)
3390 fprintf_unfiltered (gdb_stdlog,
3391 "target_fileio_unlink (%d,%s)"
3392 " = %d (%d)\n",
3393 inf == NULL ? 0 : inf->num, filename,
3394 ret, ret != -1 ? 0 : *target_errno);
3395 return ret;
3396 }
3397
3398 *target_errno = FILEIO_ENOSYS;
3399 return -1;
3400 }
3401
3402 /* See target.h. */
3403
3404 gdb::optional<std::string>
3405 target_fileio_readlink (struct inferior *inf, const char *filename,
3406 int *target_errno)
3407 {
3408 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3409 {
3410 gdb::optional<std::string> ret
3411 = t->fileio_readlink (inf, filename, target_errno);
3412
3413 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3414 continue;
3415
3416 if (targetdebug)
3417 fprintf_unfiltered (gdb_stdlog,
3418 "target_fileio_readlink (%d,%s)"
3419 " = %s (%d)\n",
3420 inf == NULL ? 0 : inf->num,
3421 filename, ret ? ret->c_str () : "(nil)",
3422 ret ? 0 : *target_errno);
3423 return ret;
3424 }
3425
3426 *target_errno = FILEIO_ENOSYS;
3427 return {};
3428 }
3429
3430 /* Like scoped_fd, but specific to target fileio. */
3431
3432 class scoped_target_fd
3433 {
3434 public:
3435 explicit scoped_target_fd (int fd) noexcept
3436 : m_fd (fd)
3437 {
3438 }
3439
3440 ~scoped_target_fd ()
3441 {
3442 if (m_fd >= 0)
3443 {
3444 int target_errno;
3445
3446 target_fileio_close (m_fd, &target_errno);
3447 }
3448 }
3449
3450 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3451
3452 int get () const noexcept
3453 {
3454 return m_fd;
3455 }
3456
3457 private:
3458 int m_fd;
3459 };
3460
3461 /* Read target file FILENAME, in the filesystem as seen by INF. If
3462 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3463 remote targets, the remote stub). Store the result in *BUF_P and
3464 return the size of the transferred data. PADDING additional bytes
3465 are available in *BUF_P. This is a helper function for
3466 target_fileio_read_alloc; see the declaration of that function for
3467 more information. */
3468
3469 static LONGEST
3470 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3471 gdb_byte **buf_p, int padding)
3472 {
3473 size_t buf_alloc, buf_pos;
3474 gdb_byte *buf;
3475 LONGEST n;
3476 int target_errno;
3477
3478 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3479 0700, false, &target_errno));
3480 if (fd.get () == -1)
3481 return -1;
3482
3483 /* Start by reading up to 4K at a time. The target will throttle
3484 this number down if necessary. */
3485 buf_alloc = 4096;
3486 buf = (gdb_byte *) xmalloc (buf_alloc);
3487 buf_pos = 0;
3488 while (1)
3489 {
3490 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3491 buf_alloc - buf_pos - padding, buf_pos,
3492 &target_errno);
3493 if (n < 0)
3494 {
3495 /* An error occurred. */
3496 xfree (buf);
3497 return -1;
3498 }
3499 else if (n == 0)
3500 {
3501 /* Read all there was. */
3502 if (buf_pos == 0)
3503 xfree (buf);
3504 else
3505 *buf_p = buf;
3506 return buf_pos;
3507 }
3508
3509 buf_pos += n;
3510
3511 /* If the buffer is filling up, expand it. */
3512 if (buf_alloc < buf_pos * 2)
3513 {
3514 buf_alloc *= 2;
3515 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3516 }
3517
3518 QUIT;
3519 }
3520 }
3521
3522 /* See target.h. */
3523
3524 LONGEST
3525 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3526 gdb_byte **buf_p)
3527 {
3528 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3529 }
3530
3531 /* See target.h. */
3532
3533 gdb::unique_xmalloc_ptr<char>
3534 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3535 {
3536 gdb_byte *buffer;
3537 char *bufstr;
3538 LONGEST i, transferred;
3539
3540 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3541 bufstr = (char *) buffer;
3542
3543 if (transferred < 0)
3544 return gdb::unique_xmalloc_ptr<char> (nullptr);
3545
3546 if (transferred == 0)
3547 return make_unique_xstrdup ("");
3548
3549 bufstr[transferred] = 0;
3550
3551 /* Check for embedded NUL bytes; but allow trailing NULs. */
3552 for (i = strlen (bufstr); i < transferred; i++)
3553 if (bufstr[i] != 0)
3554 {
3555 warning (_("target file %s "
3556 "contained unexpected null characters"),
3557 filename);
3558 break;
3559 }
3560
3561 return gdb::unique_xmalloc_ptr<char> (bufstr);
3562 }
3563
3564
3565 static int
3566 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3567 CORE_ADDR addr, int len)
3568 {
3569 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3570 }
3571
3572 static int
3573 default_watchpoint_addr_within_range (struct target_ops *target,
3574 CORE_ADDR addr,
3575 CORE_ADDR start, int length)
3576 {
3577 return addr >= start && addr < start + length;
3578 }
3579
3580 /* See target.h. */
3581
3582 target_ops *
3583 target_stack::find_beneath (const target_ops *t) const
3584 {
3585 /* Look for a non-empty slot at stratum levels beneath T's. */
3586 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3587 if (m_stack[stratum] != NULL)
3588 return m_stack[stratum];
3589
3590 return NULL;
3591 }
3592
3593 /* See target.h. */
3594
3595 struct target_ops *
3596 find_target_at (enum strata stratum)
3597 {
3598 return current_inferior ()->target_at (stratum);
3599 }
3600
3601 \f
3602
3603 /* See target.h */
3604
3605 void
3606 target_announce_detach (int from_tty)
3607 {
3608 pid_t pid;
3609 const char *exec_file;
3610
3611 if (!from_tty)
3612 return;
3613
3614 exec_file = get_exec_file (0);
3615 if (exec_file == NULL)
3616 exec_file = "";
3617
3618 pid = inferior_ptid.pid ();
3619 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3620 target_pid_to_str (ptid_t (pid)).c_str ());
3621 }
3622
3623 /* The inferior process has died. Long live the inferior! */
3624
3625 void
3626 generic_mourn_inferior (void)
3627 {
3628 inferior *inf = current_inferior ();
3629
3630 switch_to_no_thread ();
3631
3632 /* Mark breakpoints uninserted in case something tries to delete a
3633 breakpoint while we delete the inferior's threads (which would
3634 fail, since the inferior is long gone). */
3635 mark_breakpoints_out ();
3636
3637 if (inf->pid != 0)
3638 exit_inferior (inf);
3639
3640 /* Note this wipes step-resume breakpoints, so needs to be done
3641 after exit_inferior, which ends up referencing the step-resume
3642 breakpoints through clear_thread_inferior_resources. */
3643 breakpoint_init_inferior (inf_exited);
3644
3645 registers_changed ();
3646
3647 reopen_exec_file ();
3648 reinit_frame_cache ();
3649
3650 if (deprecated_detach_hook)
3651 deprecated_detach_hook ();
3652 }
3653 \f
3654 /* Convert a normal process ID to a string. Returns the string in a
3655 static buffer. */
3656
3657 std::string
3658 normal_pid_to_str (ptid_t ptid)
3659 {
3660 return string_printf ("process %d", ptid.pid ());
3661 }
3662
3663 static std::string
3664 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3665 {
3666 return normal_pid_to_str (ptid);
3667 }
3668
3669 /* Error-catcher for target_find_memory_regions. */
3670 static int
3671 dummy_find_memory_regions (struct target_ops *self,
3672 find_memory_region_ftype ignore1, void *ignore2)
3673 {
3674 error (_("Command not implemented for this target."));
3675 return 0;
3676 }
3677
3678 /* Error-catcher for target_make_corefile_notes. */
3679 static gdb::unique_xmalloc_ptr<char>
3680 dummy_make_corefile_notes (struct target_ops *self,
3681 bfd *ignore1, int *ignore2)
3682 {
3683 error (_("Command not implemented for this target."));
3684 return NULL;
3685 }
3686
3687 #include "target-delegates.c"
3688
3689 /* The initial current target, so that there is always a semi-valid
3690 current target. */
3691
3692 static dummy_target the_dummy_target;
3693
3694 /* See target.h. */
3695
3696 target_ops *
3697 get_dummy_target ()
3698 {
3699 return &the_dummy_target;
3700 }
3701
3702 static const target_info dummy_target_info = {
3703 "None",
3704 N_("None"),
3705 ""
3706 };
3707
3708 strata
3709 dummy_target::stratum () const
3710 {
3711 return dummy_stratum;
3712 }
3713
3714 strata
3715 debug_target::stratum () const
3716 {
3717 return debug_stratum;
3718 }
3719
3720 const target_info &
3721 dummy_target::info () const
3722 {
3723 return dummy_target_info;
3724 }
3725
3726 const target_info &
3727 debug_target::info () const
3728 {
3729 return beneath ()->info ();
3730 }
3731
3732 \f
3733
3734 void
3735 target_close (struct target_ops *targ)
3736 {
3737 for (inferior *inf : all_inferiors ())
3738 gdb_assert (!inf->target_is_pushed (targ));
3739
3740 fileio_handles_invalidate_target (targ);
3741
3742 targ->close ();
3743
3744 if (targetdebug)
3745 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3746 }
3747
3748 int
3749 target_thread_alive (ptid_t ptid)
3750 {
3751 return current_inferior ()->top_target ()->thread_alive (ptid);
3752 }
3753
3754 void
3755 target_update_thread_list (void)
3756 {
3757 current_inferior ()->top_target ()->update_thread_list ();
3758 }
3759
3760 void
3761 target_stop (ptid_t ptid)
3762 {
3763 process_stratum_target *proc_target = current_inferior ()->process_target ();
3764
3765 gdb_assert (!proc_target->commit_resumed_state);
3766
3767 if (!may_stop)
3768 {
3769 warning (_("May not interrupt or stop the target, ignoring attempt"));
3770 return;
3771 }
3772
3773 current_inferior ()->top_target ()->stop (ptid);
3774 }
3775
3776 void
3777 target_interrupt ()
3778 {
3779 if (!may_stop)
3780 {
3781 warning (_("May not interrupt or stop the target, ignoring attempt"));
3782 return;
3783 }
3784
3785 current_inferior ()->top_target ()->interrupt ();
3786 }
3787
3788 /* See target.h. */
3789
3790 void
3791 target_pass_ctrlc (void)
3792 {
3793 /* Pass the Ctrl-C to the first target that has a thread
3794 running. */
3795 for (inferior *inf : all_inferiors ())
3796 {
3797 target_ops *proc_target = inf->process_target ();
3798 if (proc_target == NULL)
3799 continue;
3800
3801 for (thread_info *thr : inf->non_exited_threads ())
3802 {
3803 /* A thread can be THREAD_STOPPED and executing, while
3804 running an infcall. */
3805 if (thr->state == THREAD_RUNNING || thr->executing)
3806 {
3807 /* We can get here quite deep in target layers. Avoid
3808 switching thread context or anything that would
3809 communicate with the target (e.g., to fetch
3810 registers), or flushing e.g., the frame cache. We
3811 just switch inferior in order to be able to call
3812 through the target_stack. */
3813 scoped_restore_current_inferior restore_inferior;
3814 set_current_inferior (inf);
3815 current_inferior ()->top_target ()->pass_ctrlc ();
3816 return;
3817 }
3818 }
3819 }
3820 }
3821
3822 /* See target.h. */
3823
3824 void
3825 default_target_pass_ctrlc (struct target_ops *ops)
3826 {
3827 target_interrupt ();
3828 }
3829
3830 /* See target/target.h. */
3831
3832 void
3833 target_stop_and_wait (ptid_t ptid)
3834 {
3835 struct target_waitstatus status;
3836 bool was_non_stop = non_stop;
3837
3838 non_stop = true;
3839 target_stop (ptid);
3840
3841 memset (&status, 0, sizeof (status));
3842 target_wait (ptid, &status, 0);
3843
3844 non_stop = was_non_stop;
3845 }
3846
3847 /* See target/target.h. */
3848
3849 void
3850 target_continue_no_signal (ptid_t ptid)
3851 {
3852 target_resume (ptid, 0, GDB_SIGNAL_0);
3853 }
3854
3855 /* See target/target.h. */
3856
3857 void
3858 target_continue (ptid_t ptid, enum gdb_signal signal)
3859 {
3860 target_resume (ptid, 0, signal);
3861 }
3862
3863 /* Concatenate ELEM to LIST, a comma-separated list. */
3864
3865 static void
3866 str_comma_list_concat_elem (std::string *list, const char *elem)
3867 {
3868 if (!list->empty ())
3869 list->append (", ");
3870
3871 list->append (elem);
3872 }
3873
3874 /* Helper for target_options_to_string. If OPT is present in
3875 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3876 OPT is removed from TARGET_OPTIONS. */
3877
3878 static void
3879 do_option (target_wait_flags *target_options, std::string *ret,
3880 target_wait_flag opt, const char *opt_str)
3881 {
3882 if ((*target_options & opt) != 0)
3883 {
3884 str_comma_list_concat_elem (ret, opt_str);
3885 *target_options &= ~opt;
3886 }
3887 }
3888
3889 /* See target.h. */
3890
3891 std::string
3892 target_options_to_string (target_wait_flags target_options)
3893 {
3894 std::string ret;
3895
3896 #define DO_TARG_OPTION(OPT) \
3897 do_option (&target_options, &ret, OPT, #OPT)
3898
3899 DO_TARG_OPTION (TARGET_WNOHANG);
3900
3901 if (target_options != 0)
3902 str_comma_list_concat_elem (&ret, "unknown???");
3903
3904 return ret;
3905 }
3906
3907 void
3908 target_fetch_registers (struct regcache *regcache, int regno)
3909 {
3910 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3911 if (targetdebug)
3912 regcache->debug_print_register ("target_fetch_registers", regno);
3913 }
3914
3915 void
3916 target_store_registers (struct regcache *regcache, int regno)
3917 {
3918 if (!may_write_registers)
3919 error (_("Writing to registers is not allowed (regno %d)"), regno);
3920
3921 current_inferior ()->top_target ()->store_registers (regcache, regno);
3922 if (targetdebug)
3923 {
3924 regcache->debug_print_register ("target_store_registers", regno);
3925 }
3926 }
3927
3928 int
3929 target_core_of_thread (ptid_t ptid)
3930 {
3931 return current_inferior ()->top_target ()->core_of_thread (ptid);
3932 }
3933
3934 int
3935 simple_verify_memory (struct target_ops *ops,
3936 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3937 {
3938 LONGEST total_xfered = 0;
3939
3940 while (total_xfered < size)
3941 {
3942 ULONGEST xfered_len;
3943 enum target_xfer_status status;
3944 gdb_byte buf[1024];
3945 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3946
3947 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3948 buf, NULL, lma + total_xfered, howmuch,
3949 &xfered_len);
3950 if (status == TARGET_XFER_OK
3951 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3952 {
3953 total_xfered += xfered_len;
3954 QUIT;
3955 }
3956 else
3957 return 0;
3958 }
3959 return 1;
3960 }
3961
3962 /* Default implementation of memory verification. */
3963
3964 static int
3965 default_verify_memory (struct target_ops *self,
3966 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3967 {
3968 /* Start over from the top of the target stack. */
3969 return simple_verify_memory (current_inferior ()->top_target (),
3970 data, memaddr, size);
3971 }
3972
3973 int
3974 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3975 {
3976 target_ops *target = current_inferior ()->top_target ();
3977
3978 return target->verify_memory (data, memaddr, size);
3979 }
3980
3981 /* The documentation for this function is in its prototype declaration in
3982 target.h. */
3983
3984 int
3985 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3986 enum target_hw_bp_type rw)
3987 {
3988 target_ops *target = current_inferior ()->top_target ();
3989
3990 return target->insert_mask_watchpoint (addr, mask, rw);
3991 }
3992
3993 /* The documentation for this function is in its prototype declaration in
3994 target.h. */
3995
3996 int
3997 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3998 enum target_hw_bp_type rw)
3999 {
4000 target_ops *target = current_inferior ()->top_target ();
4001
4002 return target->remove_mask_watchpoint (addr, mask, rw);
4003 }
4004
4005 /* The documentation for this function is in its prototype declaration
4006 in target.h. */
4007
4008 int
4009 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4010 {
4011 target_ops *target = current_inferior ()->top_target ();
4012
4013 return target->masked_watch_num_registers (addr, mask);
4014 }
4015
4016 /* The documentation for this function is in its prototype declaration
4017 in target.h. */
4018
4019 int
4020 target_ranged_break_num_registers (void)
4021 {
4022 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4023 }
4024
4025 /* See target.h. */
4026
4027 struct btrace_target_info *
4028 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
4029 {
4030 return current_inferior ()->top_target ()->enable_btrace (ptid, conf);
4031 }
4032
4033 /* See target.h. */
4034
4035 void
4036 target_disable_btrace (struct btrace_target_info *btinfo)
4037 {
4038 current_inferior ()->top_target ()->disable_btrace (btinfo);
4039 }
4040
4041 /* See target.h. */
4042
4043 void
4044 target_teardown_btrace (struct btrace_target_info *btinfo)
4045 {
4046 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4047 }
4048
4049 /* See target.h. */
4050
4051 enum btrace_error
4052 target_read_btrace (struct btrace_data *btrace,
4053 struct btrace_target_info *btinfo,
4054 enum btrace_read_type type)
4055 {
4056 target_ops *target = current_inferior ()->top_target ();
4057
4058 return target->read_btrace (btrace, btinfo, type);
4059 }
4060
4061 /* See target.h. */
4062
4063 const struct btrace_config *
4064 target_btrace_conf (const struct btrace_target_info *btinfo)
4065 {
4066 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4067 }
4068
4069 /* See target.h. */
4070
4071 void
4072 target_stop_recording (void)
4073 {
4074 current_inferior ()->top_target ()->stop_recording ();
4075 }
4076
4077 /* See target.h. */
4078
4079 void
4080 target_save_record (const char *filename)
4081 {
4082 current_inferior ()->top_target ()->save_record (filename);
4083 }
4084
4085 /* See target.h. */
4086
4087 int
4088 target_supports_delete_record ()
4089 {
4090 return current_inferior ()->top_target ()->supports_delete_record ();
4091 }
4092
4093 /* See target.h. */
4094
4095 void
4096 target_delete_record (void)
4097 {
4098 current_inferior ()->top_target ()->delete_record ();
4099 }
4100
4101 /* See target.h. */
4102
4103 enum record_method
4104 target_record_method (ptid_t ptid)
4105 {
4106 return current_inferior ()->top_target ()->record_method (ptid);
4107 }
4108
4109 /* See target.h. */
4110
4111 int
4112 target_record_is_replaying (ptid_t ptid)
4113 {
4114 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4115 }
4116
4117 /* See target.h. */
4118
4119 int
4120 target_record_will_replay (ptid_t ptid, int dir)
4121 {
4122 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4123 }
4124
4125 /* See target.h. */
4126
4127 void
4128 target_record_stop_replaying (void)
4129 {
4130 current_inferior ()->top_target ()->record_stop_replaying ();
4131 }
4132
4133 /* See target.h. */
4134
4135 void
4136 target_goto_record_begin (void)
4137 {
4138 current_inferior ()->top_target ()->goto_record_begin ();
4139 }
4140
4141 /* See target.h. */
4142
4143 void
4144 target_goto_record_end (void)
4145 {
4146 current_inferior ()->top_target ()->goto_record_end ();
4147 }
4148
4149 /* See target.h. */
4150
4151 void
4152 target_goto_record (ULONGEST insn)
4153 {
4154 current_inferior ()->top_target ()->goto_record (insn);
4155 }
4156
4157 /* See target.h. */
4158
4159 void
4160 target_insn_history (int size, gdb_disassembly_flags flags)
4161 {
4162 current_inferior ()->top_target ()->insn_history (size, flags);
4163 }
4164
4165 /* See target.h. */
4166
4167 void
4168 target_insn_history_from (ULONGEST from, int size,
4169 gdb_disassembly_flags flags)
4170 {
4171 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4172 }
4173
4174 /* See target.h. */
4175
4176 void
4177 target_insn_history_range (ULONGEST begin, ULONGEST end,
4178 gdb_disassembly_flags flags)
4179 {
4180 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4181 }
4182
4183 /* See target.h. */
4184
4185 void
4186 target_call_history (int size, record_print_flags flags)
4187 {
4188 current_inferior ()->top_target ()->call_history (size, flags);
4189 }
4190
4191 /* See target.h. */
4192
4193 void
4194 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4195 {
4196 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4197 }
4198
4199 /* See target.h. */
4200
4201 void
4202 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4203 {
4204 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4205 }
4206
4207 /* See target.h. */
4208
4209 const struct frame_unwind *
4210 target_get_unwinder (void)
4211 {
4212 return current_inferior ()->top_target ()->get_unwinder ();
4213 }
4214
4215 /* See target.h. */
4216
4217 const struct frame_unwind *
4218 target_get_tailcall_unwinder (void)
4219 {
4220 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4221 }
4222
4223 /* See target.h. */
4224
4225 void
4226 target_prepare_to_generate_core (void)
4227 {
4228 current_inferior ()->top_target ()->prepare_to_generate_core ();
4229 }
4230
4231 /* See target.h. */
4232
4233 void
4234 target_done_generating_core (void)
4235 {
4236 current_inferior ()->top_target ()->done_generating_core ();
4237 }
4238
4239 \f
4240
4241 static char targ_desc[] =
4242 "Names of targets and files being debugged.\nShows the entire \
4243 stack of targets currently in use (including the exec-file,\n\
4244 core-file, and process, if any), as well as the symbol file name.";
4245
4246 static void
4247 default_rcmd (struct target_ops *self, const char *command,
4248 struct ui_file *output)
4249 {
4250 error (_("\"monitor\" command not supported by this target."));
4251 }
4252
4253 static void
4254 do_monitor_command (const char *cmd, int from_tty)
4255 {
4256 target_rcmd (cmd, gdb_stdtarg);
4257 }
4258
4259 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4260 ignored. */
4261
4262 void
4263 flash_erase_command (const char *cmd, int from_tty)
4264 {
4265 /* Used to communicate termination of flash operations to the target. */
4266 bool found_flash_region = false;
4267 struct gdbarch *gdbarch = target_gdbarch ();
4268
4269 std::vector<mem_region> mem_regions = target_memory_map ();
4270
4271 /* Iterate over all memory regions. */
4272 for (const mem_region &m : mem_regions)
4273 {
4274 /* Is this a flash memory region? */
4275 if (m.attrib.mode == MEM_FLASH)
4276 {
4277 found_flash_region = true;
4278 target_flash_erase (m.lo, m.hi - m.lo);
4279
4280 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4281
4282 current_uiout->message (_("Erasing flash memory region at address "));
4283 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4284 current_uiout->message (", size = ");
4285 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4286 current_uiout->message ("\n");
4287 }
4288 }
4289
4290 /* Did we do any flash operations? If so, we need to finalize them. */
4291 if (found_flash_region)
4292 target_flash_done ();
4293 else
4294 current_uiout->message (_("No flash memory regions found.\n"));
4295 }
4296
4297 /* Print the name of each layers of our target stack. */
4298
4299 static void
4300 maintenance_print_target_stack (const char *cmd, int from_tty)
4301 {
4302 printf_filtered (_("The current target stack is:\n"));
4303
4304 for (target_ops *t = current_inferior ()->top_target ();
4305 t != NULL;
4306 t = t->beneath ())
4307 {
4308 if (t->stratum () == debug_stratum)
4309 continue;
4310 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
4311 }
4312 }
4313
4314 /* See target.h. */
4315
4316 void
4317 target_async (int enable)
4318 {
4319 infrun_async (enable);
4320 current_inferior ()->top_target ()->async (enable);
4321 }
4322
4323 /* See target.h. */
4324
4325 void
4326 target_thread_events (int enable)
4327 {
4328 current_inferior ()->top_target ()->thread_events (enable);
4329 }
4330
4331 /* Controls if targets can report that they can/are async. This is
4332 just for maintainers to use when debugging gdb. */
4333 bool target_async_permitted = true;
4334
4335 /* The set command writes to this variable. If the inferior is
4336 executing, target_async_permitted is *not* updated. */
4337 static bool target_async_permitted_1 = true;
4338
4339 static void
4340 maint_set_target_async_command (const char *args, int from_tty,
4341 struct cmd_list_element *c)
4342 {
4343 if (have_live_inferiors ())
4344 {
4345 target_async_permitted_1 = target_async_permitted;
4346 error (_("Cannot change this setting while the inferior is running."));
4347 }
4348
4349 target_async_permitted = target_async_permitted_1;
4350 }
4351
4352 static void
4353 maint_show_target_async_command (struct ui_file *file, int from_tty,
4354 struct cmd_list_element *c,
4355 const char *value)
4356 {
4357 fprintf_filtered (file,
4358 _("Controlling the inferior in "
4359 "asynchronous mode is %s.\n"), value);
4360 }
4361
4362 /* Return true if the target operates in non-stop mode even with "set
4363 non-stop off". */
4364
4365 static int
4366 target_always_non_stop_p (void)
4367 {
4368 return current_inferior ()->top_target ()->always_non_stop_p ();
4369 }
4370
4371 /* See target.h. */
4372
4373 bool
4374 target_is_non_stop_p ()
4375 {
4376 return ((non_stop
4377 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4378 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4379 && target_always_non_stop_p ()))
4380 && target_can_async_p ());
4381 }
4382
4383 /* See target.h. */
4384
4385 bool
4386 exists_non_stop_target ()
4387 {
4388 if (target_is_non_stop_p ())
4389 return true;
4390
4391 scoped_restore_current_thread restore_thread;
4392
4393 for (inferior *inf : all_inferiors ())
4394 {
4395 switch_to_inferior_no_thread (inf);
4396 if (target_is_non_stop_p ())
4397 return true;
4398 }
4399
4400 return false;
4401 }
4402
4403 /* Controls if targets can report that they always run in non-stop
4404 mode. This is just for maintainers to use when debugging gdb. */
4405 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4406
4407 /* The set command writes to this variable. If the inferior is
4408 executing, target_non_stop_enabled is *not* updated. */
4409 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
4410
4411 /* Implementation of "maint set target-non-stop". */
4412
4413 static void
4414 maint_set_target_non_stop_command (const char *args, int from_tty,
4415 struct cmd_list_element *c)
4416 {
4417 if (have_live_inferiors ())
4418 {
4419 target_non_stop_enabled_1 = target_non_stop_enabled;
4420 error (_("Cannot change this setting while the inferior is running."));
4421 }
4422
4423 target_non_stop_enabled = target_non_stop_enabled_1;
4424 }
4425
4426 /* Implementation of "maint show target-non-stop". */
4427
4428 static void
4429 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
4430 struct cmd_list_element *c,
4431 const char *value)
4432 {
4433 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4434 fprintf_filtered (file,
4435 _("Whether the target is always in non-stop mode "
4436 "is %s (currently %s).\n"), value,
4437 target_always_non_stop_p () ? "on" : "off");
4438 else
4439 fprintf_filtered (file,
4440 _("Whether the target is always in non-stop mode "
4441 "is %s.\n"), value);
4442 }
4443
4444 /* Temporary copies of permission settings. */
4445
4446 static bool may_write_registers_1 = true;
4447 static bool may_write_memory_1 = true;
4448 static bool may_insert_breakpoints_1 = true;
4449 static bool may_insert_tracepoints_1 = true;
4450 static bool may_insert_fast_tracepoints_1 = true;
4451 static bool may_stop_1 = true;
4452
4453 /* Make the user-set values match the real values again. */
4454
4455 void
4456 update_target_permissions (void)
4457 {
4458 may_write_registers_1 = may_write_registers;
4459 may_write_memory_1 = may_write_memory;
4460 may_insert_breakpoints_1 = may_insert_breakpoints;
4461 may_insert_tracepoints_1 = may_insert_tracepoints;
4462 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4463 may_stop_1 = may_stop;
4464 }
4465
4466 /* The one function handles (most of) the permission flags in the same
4467 way. */
4468
4469 static void
4470 set_target_permissions (const char *args, int from_tty,
4471 struct cmd_list_element *c)
4472 {
4473 if (target_has_execution ())
4474 {
4475 update_target_permissions ();
4476 error (_("Cannot change this setting while the inferior is running."));
4477 }
4478
4479 /* Make the real values match the user-changed values. */
4480 may_write_registers = may_write_registers_1;
4481 may_insert_breakpoints = may_insert_breakpoints_1;
4482 may_insert_tracepoints = may_insert_tracepoints_1;
4483 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4484 may_stop = may_stop_1;
4485 update_observer_mode ();
4486 }
4487
4488 /* Set memory write permission independently of observer mode. */
4489
4490 static void
4491 set_write_memory_permission (const char *args, int from_tty,
4492 struct cmd_list_element *c)
4493 {
4494 /* Make the real values match the user-changed values. */
4495 may_write_memory = may_write_memory_1;
4496 update_observer_mode ();
4497 }
4498
4499 void _initialize_target ();
4500
4501 void
4502 _initialize_target ()
4503 {
4504 the_debug_target = new debug_target ();
4505
4506 add_info ("target", info_target_command, targ_desc);
4507 add_info ("files", info_target_command, targ_desc);
4508
4509 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4510 Set target debugging."), _("\
4511 Show target debugging."), _("\
4512 When non-zero, target debugging is enabled. Higher numbers are more\n\
4513 verbose."),
4514 set_targetdebug,
4515 show_targetdebug,
4516 &setdebuglist, &showdebuglist);
4517
4518 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4519 &trust_readonly, _("\
4520 Set mode for reading from readonly sections."), _("\
4521 Show mode for reading from readonly sections."), _("\
4522 When this mode is on, memory reads from readonly sections (such as .text)\n\
4523 will be read from the object file instead of from the target. This will\n\
4524 result in significant performance improvement for remote targets."),
4525 NULL,
4526 show_trust_readonly,
4527 &setlist, &showlist);
4528
4529 add_com ("monitor", class_obscure, do_monitor_command,
4530 _("Send a command to the remote monitor (remote targets only)."));
4531
4532 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4533 _("Print the name of each layer of the internal target stack."),
4534 &maintenanceprintlist);
4535
4536 add_setshow_boolean_cmd ("target-async", no_class,
4537 &target_async_permitted_1, _("\
4538 Set whether gdb controls the inferior in asynchronous mode."), _("\
4539 Show whether gdb controls the inferior in asynchronous mode."), _("\
4540 Tells gdb whether to control the inferior in asynchronous mode."),
4541 maint_set_target_async_command,
4542 maint_show_target_async_command,
4543 &maintenance_set_cmdlist,
4544 &maintenance_show_cmdlist);
4545
4546 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4547 &target_non_stop_enabled_1, _("\
4548 Set whether gdb always controls the inferior in non-stop mode."), _("\
4549 Show whether gdb always controls the inferior in non-stop mode."), _("\
4550 Tells gdb whether to control the inferior in non-stop mode."),
4551 maint_set_target_non_stop_command,
4552 maint_show_target_non_stop_command,
4553 &maintenance_set_cmdlist,
4554 &maintenance_show_cmdlist);
4555
4556 add_setshow_boolean_cmd ("may-write-registers", class_support,
4557 &may_write_registers_1, _("\
4558 Set permission to write into registers."), _("\
4559 Show permission to write into registers."), _("\
4560 When this permission is on, GDB may write into the target's registers.\n\
4561 Otherwise, any sort of write attempt will result in an error."),
4562 set_target_permissions, NULL,
4563 &setlist, &showlist);
4564
4565 add_setshow_boolean_cmd ("may-write-memory", class_support,
4566 &may_write_memory_1, _("\
4567 Set permission to write into target memory."), _("\
4568 Show permission to write into target memory."), _("\
4569 When this permission is on, GDB may write into the target's memory.\n\
4570 Otherwise, any sort of write attempt will result in an error."),
4571 set_write_memory_permission, NULL,
4572 &setlist, &showlist);
4573
4574 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4575 &may_insert_breakpoints_1, _("\
4576 Set permission to insert breakpoints in the target."), _("\
4577 Show permission to insert breakpoints in the target."), _("\
4578 When this permission is on, GDB may insert breakpoints in the program.\n\
4579 Otherwise, any sort of insertion attempt will result in an error."),
4580 set_target_permissions, NULL,
4581 &setlist, &showlist);
4582
4583 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4584 &may_insert_tracepoints_1, _("\
4585 Set permission to insert tracepoints in the target."), _("\
4586 Show permission to insert tracepoints in the target."), _("\
4587 When this permission is on, GDB may insert tracepoints in the program.\n\
4588 Otherwise, any sort of insertion attempt will result in an error."),
4589 set_target_permissions, NULL,
4590 &setlist, &showlist);
4591
4592 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4593 &may_insert_fast_tracepoints_1, _("\
4594 Set permission to insert fast tracepoints in the target."), _("\
4595 Show permission to insert fast tracepoints in the target."), _("\
4596 When this permission is on, GDB may insert fast tracepoints.\n\
4597 Otherwise, any sort of insertion attempt will result in an error."),
4598 set_target_permissions, NULL,
4599 &setlist, &showlist);
4600
4601 add_setshow_boolean_cmd ("may-interrupt", class_support,
4602 &may_stop_1, _("\
4603 Set permission to interrupt or signal the target."), _("\
4604 Show permission to interrupt or signal the target."), _("\
4605 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4606 Otherwise, any attempt to interrupt or stop will be ignored."),
4607 set_target_permissions, NULL,
4608 &setlist, &showlist);
4609
4610 add_com ("flash-erase", no_class, flash_erase_command,
4611 _("Erase all flash memory regions."));
4612
4613 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4614 &auto_connect_native_target, _("\
4615 Set whether GDB may automatically connect to the native target."), _("\
4616 Show whether GDB may automatically connect to the native target."), _("\
4617 When on, and GDB is not connected to a target yet, GDB\n\
4618 attempts \"run\" and other commands with the native target."),
4619 NULL, show_auto_connect_native_target,
4620 &setlist, &showlist);
4621 }