target_is_non_stop_p and sync targets
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2021 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "gdbsupport/agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "gdbsupport/byte-vector.h"
50 #include "gdbsupport/search.h"
51 #include "terminal.h"
52 #include <unordered_map>
53 #include "target-connection.h"
54 #include "valprint.h"
55
56 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
57
58 static void default_terminal_info (struct target_ops *, const char *, int);
59
60 static int default_watchpoint_addr_within_range (struct target_ops *,
61 CORE_ADDR, CORE_ADDR, int);
62
63 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
64 CORE_ADDR, int);
65
66 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
67
68 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
69 long lwp, long tid);
70
71 static void default_mourn_inferior (struct target_ops *self);
72
73 static int default_search_memory (struct target_ops *ops,
74 CORE_ADDR start_addr,
75 ULONGEST search_space_len,
76 const gdb_byte *pattern,
77 ULONGEST pattern_len,
78 CORE_ADDR *found_addrp);
79
80 static int default_verify_memory (struct target_ops *self,
81 const gdb_byte *data,
82 CORE_ADDR memaddr, ULONGEST size);
83
84 static void tcomplain (void) ATTRIBUTE_NORETURN;
85
86 static struct target_ops *find_default_run_target (const char *);
87
88 static int dummy_find_memory_regions (struct target_ops *self,
89 find_memory_region_ftype ignore1,
90 void *ignore2);
91
92 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
93 (struct target_ops *self, bfd *ignore1, int *ignore2);
94
95 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
96
97 static enum exec_direction_kind default_execution_direction
98 (struct target_ops *self);
99
100 /* Mapping between target_info objects (which have address identity)
101 and corresponding open/factory function/callback. Each add_target
102 call adds one entry to this map, and registers a "target
103 TARGET_NAME" command that when invoked calls the factory registered
104 here. The target_info object is associated with the command via
105 the command's context. */
106 static std::unordered_map<const target_info *, target_open_ftype *>
107 target_factories;
108
109 /* The singleton debug target. */
110
111 static struct target_ops *the_debug_target;
112
113 /* Command list for target. */
114
115 static struct cmd_list_element *targetlist = NULL;
116
117 /* True if we should trust readonly sections from the
118 executable when reading memory. */
119
120 static bool trust_readonly = false;
121
122 /* Nonzero if we should show true memory content including
123 memory breakpoint inserted by gdb. */
124
125 static int show_memory_breakpoints = 0;
126
127 /* These globals control whether GDB attempts to perform these
128 operations; they are useful for targets that need to prevent
129 inadvertent disruption, such as in non-stop mode. */
130
131 bool may_write_registers = true;
132
133 bool may_write_memory = true;
134
135 bool may_insert_breakpoints = true;
136
137 bool may_insert_tracepoints = true;
138
139 bool may_insert_fast_tracepoints = true;
140
141 bool may_stop = true;
142
143 /* Non-zero if we want to see trace of target level stuff. */
144
145 static unsigned int targetdebug = 0;
146
147 static void
148 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
149 {
150 if (targetdebug)
151 current_inferior ()->push_target (the_debug_target);
152 else
153 current_inferior ()->unpush_target (the_debug_target);
154 }
155
156 static void
157 show_targetdebug (struct ui_file *file, int from_tty,
158 struct cmd_list_element *c, const char *value)
159 {
160 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
161 }
162
163 int
164 target_has_memory ()
165 {
166 for (target_ops *t = current_inferior ()->top_target ();
167 t != NULL;
168 t = t->beneath ())
169 if (t->has_memory ())
170 return 1;
171
172 return 0;
173 }
174
175 int
176 target_has_stack ()
177 {
178 for (target_ops *t = current_inferior ()->top_target ();
179 t != NULL;
180 t = t->beneath ())
181 if (t->has_stack ())
182 return 1;
183
184 return 0;
185 }
186
187 int
188 target_has_registers ()
189 {
190 for (target_ops *t = current_inferior ()->top_target ();
191 t != NULL;
192 t = t->beneath ())
193 if (t->has_registers ())
194 return 1;
195
196 return 0;
197 }
198
199 bool
200 target_has_execution (inferior *inf)
201 {
202 if (inf == nullptr)
203 inf = current_inferior ();
204
205 for (target_ops *t = inf->top_target ();
206 t != nullptr;
207 t = inf->find_target_beneath (t))
208 if (t->has_execution (inf))
209 return true;
210
211 return false;
212 }
213
214 const char *
215 target_shortname ()
216 {
217 return current_inferior ()->top_target ()->shortname ();
218 }
219
220 /* See target.h. */
221
222 bool
223 target_attach_no_wait ()
224 {
225 return current_inferior ()->top_target ()->attach_no_wait ();
226 }
227
228 /* See target.h. */
229
230 void
231 target_post_attach (int pid)
232 {
233 return current_inferior ()->top_target ()->post_attach (pid);
234 }
235
236 /* See target.h. */
237
238 void
239 target_prepare_to_store (regcache *regcache)
240 {
241 return current_inferior ()->top_target ()->prepare_to_store (regcache);
242 }
243
244 /* See target.h. */
245
246 bool
247 target_supports_enable_disable_tracepoint ()
248 {
249 target_ops *target = current_inferior ()->top_target ();
250
251 return target->supports_enable_disable_tracepoint ();
252 }
253
254 bool
255 target_supports_string_tracing ()
256 {
257 return current_inferior ()->top_target ()->supports_string_tracing ();
258 }
259
260 /* See target.h. */
261
262 bool
263 target_supports_evaluation_of_breakpoint_conditions ()
264 {
265 target_ops *target = current_inferior ()->top_target ();
266
267 return target->supports_evaluation_of_breakpoint_conditions ();
268 }
269
270 /* See target.h. */
271
272 bool
273 target_supports_dumpcore ()
274 {
275 return current_inferior ()->top_target ()->supports_dumpcore ();
276 }
277
278 /* See target.h. */
279
280 void
281 target_dumpcore (const char *filename)
282 {
283 return current_inferior ()->top_target ()->dumpcore (filename);
284 }
285
286 /* See target.h. */
287
288 bool
289 target_can_run_breakpoint_commands ()
290 {
291 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
292 }
293
294 /* See target.h. */
295
296 void
297 target_files_info ()
298 {
299 return current_inferior ()->top_target ()->files_info ();
300 }
301
302 /* See target.h. */
303
304 void
305 target_post_startup_inferior (ptid_t ptid)
306 {
307 return current_inferior ()->top_target ()->post_startup_inferior (ptid);
308 }
309
310 /* See target.h. */
311
312 int
313 target_insert_fork_catchpoint (int pid)
314 {
315 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
316 }
317
318 /* See target.h. */
319
320 int
321 target_remove_fork_catchpoint (int pid)
322 {
323 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
324 }
325
326 /* See target.h. */
327
328 int
329 target_insert_vfork_catchpoint (int pid)
330 {
331 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
332 }
333
334 /* See target.h. */
335
336 int
337 target_remove_vfork_catchpoint (int pid)
338 {
339 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
340 }
341
342 /* See target.h. */
343
344 int
345 target_insert_exec_catchpoint (int pid)
346 {
347 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
348 }
349
350 /* See target.h. */
351
352 int
353 target_remove_exec_catchpoint (int pid)
354 {
355 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
356 }
357
358 /* See target.h. */
359
360 int
361 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
362 gdb::array_view<const int> syscall_counts)
363 {
364 target_ops *target = current_inferior ()->top_target ();
365
366 return target->set_syscall_catchpoint (pid, needed, any_count,
367 syscall_counts);
368 }
369
370 /* See target.h. */
371
372 void
373 target_rcmd (const char *command, struct ui_file *outbuf)
374 {
375 return current_inferior ()->top_target ()->rcmd (command, outbuf);
376 }
377
378 /* See target.h. */
379
380 bool
381 target_can_lock_scheduler ()
382 {
383 target_ops *target = current_inferior ()->top_target ();
384
385 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
386 }
387
388 /* See target.h. */
389
390 bool
391 target_can_async_p ()
392 {
393 return current_inferior ()->top_target ()->can_async_p ();
394 }
395
396 /* See target.h. */
397
398 bool
399 target_is_async_p ()
400 {
401 return current_inferior ()->top_target ()->is_async_p ();
402 }
403
404 exec_direction_kind
405 target_execution_direction ()
406 {
407 return current_inferior ()->top_target ()->execution_direction ();
408 }
409
410 /* See target.h. */
411
412 const char *
413 target_extra_thread_info (thread_info *tp)
414 {
415 return current_inferior ()->top_target ()->extra_thread_info (tp);
416 }
417
418 /* See target.h. */
419
420 char *
421 target_pid_to_exec_file (int pid)
422 {
423 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
424 }
425
426 /* See target.h. */
427
428 gdbarch *
429 target_thread_architecture (ptid_t ptid)
430 {
431 return current_inferior ()->top_target ()->thread_architecture (ptid);
432 }
433
434 /* See target.h. */
435
436 int
437 target_find_memory_regions (find_memory_region_ftype func, void *data)
438 {
439 return current_inferior ()->top_target ()->find_memory_regions (func, data);
440 }
441
442 /* See target.h. */
443
444 gdb::unique_xmalloc_ptr<char>
445 target_make_corefile_notes (bfd *bfd, int *size_p)
446 {
447 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
448 }
449
450 gdb_byte *
451 target_get_bookmark (const char *args, int from_tty)
452 {
453 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
454 }
455
456 void
457 target_goto_bookmark (const gdb_byte *arg, int from_tty)
458 {
459 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
460 }
461
462 /* See target.h. */
463
464 bool
465 target_stopped_by_watchpoint ()
466 {
467 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
468 }
469
470 /* See target.h. */
471
472 bool
473 target_stopped_by_sw_breakpoint ()
474 {
475 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
476 }
477
478 bool
479 target_supports_stopped_by_sw_breakpoint ()
480 {
481 target_ops *target = current_inferior ()->top_target ();
482
483 return target->supports_stopped_by_sw_breakpoint ();
484 }
485
486 bool
487 target_stopped_by_hw_breakpoint ()
488 {
489 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
490 }
491
492 bool
493 target_supports_stopped_by_hw_breakpoint ()
494 {
495 target_ops *target = current_inferior ()->top_target ();
496
497 return target->supports_stopped_by_hw_breakpoint ();
498 }
499
500 /* See target.h. */
501
502 bool
503 target_have_steppable_watchpoint ()
504 {
505 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
506 }
507
508 /* See target.h. */
509
510 int
511 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
512 {
513 target_ops *target = current_inferior ()->top_target ();
514
515 return target->can_use_hw_breakpoint (type, cnt, othertype);
516 }
517
518 /* See target.h. */
519
520 int
521 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
522 {
523 target_ops *target = current_inferior ()->top_target ();
524
525 return target->region_ok_for_hw_watchpoint (addr, len);
526 }
527
528
529 int
530 target_can_do_single_step ()
531 {
532 return current_inferior ()->top_target ()->can_do_single_step ();
533 }
534
535 /* See target.h. */
536
537 int
538 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
539 expression *cond)
540 {
541 target_ops *target = current_inferior ()->top_target ();
542
543 return target->insert_watchpoint (addr, len, type, cond);
544 }
545
546 /* See target.h. */
547
548 int
549 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
550 expression *cond)
551 {
552 target_ops *target = current_inferior ()->top_target ();
553
554 return target->remove_watchpoint (addr, len, type, cond);
555 }
556
557 /* See target.h. */
558
559 int
560 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
561 {
562 target_ops *target = current_inferior ()->top_target ();
563
564 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
565 }
566
567 /* See target.h. */
568
569 int
570 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
571 {
572 target_ops *target = current_inferior ()->top_target ();
573
574 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
575 }
576
577 /* See target.h. */
578
579 bool
580 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
581 expression *cond)
582 {
583 target_ops *target = current_inferior ()->top_target ();
584
585 return target->can_accel_watchpoint_condition (addr, len, type, cond);
586 }
587
588 /* See target.h. */
589
590 bool
591 target_can_execute_reverse ()
592 {
593 return current_inferior ()->top_target ()->can_execute_reverse ();
594 }
595
596 ptid_t
597 target_get_ada_task_ptid (long lwp, long tid)
598 {
599 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
600 }
601
602 bool
603 target_filesystem_is_local ()
604 {
605 return current_inferior ()->top_target ()->filesystem_is_local ();
606 }
607
608 void
609 target_trace_init ()
610 {
611 return current_inferior ()->top_target ()->trace_init ();
612 }
613
614 void
615 target_download_tracepoint (bp_location *location)
616 {
617 return current_inferior ()->top_target ()->download_tracepoint (location);
618 }
619
620 bool
621 target_can_download_tracepoint ()
622 {
623 return current_inferior ()->top_target ()->can_download_tracepoint ();
624 }
625
626 void
627 target_download_trace_state_variable (const trace_state_variable &tsv)
628 {
629 target_ops *target = current_inferior ()->top_target ();
630
631 return target->download_trace_state_variable (tsv);
632 }
633
634 void
635 target_enable_tracepoint (bp_location *loc)
636 {
637 return current_inferior ()->top_target ()->enable_tracepoint (loc);
638 }
639
640 void
641 target_disable_tracepoint (bp_location *loc)
642 {
643 return current_inferior ()->top_target ()->disable_tracepoint (loc);
644 }
645
646 void
647 target_trace_start ()
648 {
649 return current_inferior ()->top_target ()->trace_start ();
650 }
651
652 void
653 target_trace_set_readonly_regions ()
654 {
655 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
656 }
657
658 int
659 target_get_trace_status (trace_status *ts)
660 {
661 return current_inferior ()->top_target ()->get_trace_status (ts);
662 }
663
664 void
665 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
666 {
667 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
668 }
669
670 void
671 target_trace_stop ()
672 {
673 return current_inferior ()->top_target ()->trace_stop ();
674 }
675
676 int
677 target_trace_find (trace_find_type type, int num,
678 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
679 {
680 target_ops *target = current_inferior ()->top_target ();
681
682 return target->trace_find (type, num, addr1, addr2, tpp);
683 }
684
685 bool
686 target_get_trace_state_variable_value (int tsv, LONGEST *val)
687 {
688 target_ops *target = current_inferior ()->top_target ();
689
690 return target->get_trace_state_variable_value (tsv, val);
691 }
692
693 int
694 target_save_trace_data (const char *filename)
695 {
696 return current_inferior ()->top_target ()->save_trace_data (filename);
697 }
698
699 int
700 target_upload_tracepoints (uploaded_tp **utpp)
701 {
702 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
703 }
704
705 int
706 target_upload_trace_state_variables (uploaded_tsv **utsvp)
707 {
708 target_ops *target = current_inferior ()->top_target ();
709
710 return target->upload_trace_state_variables (utsvp);
711 }
712
713 LONGEST
714 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
715 {
716 target_ops *target = current_inferior ()->top_target ();
717
718 return target->get_raw_trace_data (buf, offset, len);
719 }
720
721 int
722 target_get_min_fast_tracepoint_insn_len ()
723 {
724 target_ops *target = current_inferior ()->top_target ();
725
726 return target->get_min_fast_tracepoint_insn_len ();
727 }
728
729 void
730 target_set_disconnected_tracing (int val)
731 {
732 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
733 }
734
735 void
736 target_set_circular_trace_buffer (int val)
737 {
738 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
739 }
740
741 void
742 target_set_trace_buffer_size (LONGEST val)
743 {
744 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
745 }
746
747 bool
748 target_set_trace_notes (const char *user, const char *notes,
749 const char *stopnotes)
750 {
751 target_ops *target = current_inferior ()->top_target ();
752
753 return target->set_trace_notes (user, notes, stopnotes);
754 }
755
756 bool
757 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
758 {
759 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
760 }
761
762 void
763 target_set_permissions ()
764 {
765 return current_inferior ()->top_target ()->set_permissions ();
766 }
767
768 bool
769 target_static_tracepoint_marker_at (CORE_ADDR addr,
770 static_tracepoint_marker *marker)
771 {
772 target_ops *target = current_inferior ()->top_target ();
773
774 return target->static_tracepoint_marker_at (addr, marker);
775 }
776
777 std::vector<static_tracepoint_marker>
778 target_static_tracepoint_markers_by_strid (const char *marker_id)
779 {
780 target_ops *target = current_inferior ()->top_target ();
781
782 return target->static_tracepoint_markers_by_strid (marker_id);
783 }
784
785 traceframe_info_up
786 target_traceframe_info ()
787 {
788 return current_inferior ()->top_target ()->traceframe_info ();
789 }
790
791 bool
792 target_use_agent (bool use)
793 {
794 return current_inferior ()->top_target ()->use_agent (use);
795 }
796
797 bool
798 target_can_use_agent ()
799 {
800 return current_inferior ()->top_target ()->can_use_agent ();
801 }
802
803 bool
804 target_augmented_libraries_svr4_read ()
805 {
806 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
807 }
808
809 bool
810 target_supports_memory_tagging ()
811 {
812 return current_inferior ()->top_target ()->supports_memory_tagging ();
813 }
814
815 bool
816 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
817 int type)
818 {
819 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
820 }
821
822 bool
823 target_store_memtags (CORE_ADDR address, size_t len,
824 const gdb::byte_vector &tags, int type)
825 {
826 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
827 }
828
829 void
830 target_log_command (const char *p)
831 {
832 return current_inferior ()->top_target ()->log_command (p);
833 }
834
835 /* This is used to implement the various target commands. */
836
837 static void
838 open_target (const char *args, int from_tty, struct cmd_list_element *command)
839 {
840 auto *ti = static_cast<target_info *> (get_cmd_context (command));
841 target_open_ftype *func = target_factories[ti];
842
843 if (targetdebug)
844 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
845 ti->shortname);
846
847 func (args, from_tty);
848
849 if (targetdebug)
850 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
851 ti->shortname, args, from_tty);
852 }
853
854 /* See target.h. */
855
856 void
857 add_target (const target_info &t, target_open_ftype *func,
858 completer_ftype *completer)
859 {
860 struct cmd_list_element *c;
861
862 auto &func_slot = target_factories[&t];
863 if (func_slot != nullptr)
864 internal_error (__FILE__, __LINE__,
865 _("target already added (\"%s\")."), t.shortname);
866 func_slot = func;
867
868 if (targetlist == NULL)
869 add_basic_prefix_cmd ("target", class_run, _("\
870 Connect to a target machine or process.\n\
871 The first argument is the type or protocol of the target machine.\n\
872 Remaining arguments are interpreted by the target protocol. For more\n\
873 information on the arguments for a particular protocol, type\n\
874 `help target ' followed by the protocol name."),
875 &targetlist, "target ", 0, &cmdlist);
876 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
877 set_cmd_context (c, (void *) &t);
878 set_cmd_sfunc (c, open_target);
879 if (completer != NULL)
880 set_cmd_completer (c, completer);
881 }
882
883 /* See target.h. */
884
885 void
886 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
887 {
888 struct cmd_list_element *c;
889 char *alt;
890
891 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
892 see PR cli/15104. */
893 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
894 set_cmd_sfunc (c, open_target);
895 set_cmd_context (c, (void *) &tinfo);
896 alt = xstrprintf ("target %s", tinfo.shortname);
897 deprecate_cmd (c, alt);
898 }
899
900 /* Stub functions */
901
902 void
903 target_kill (void)
904 {
905 current_inferior ()->top_target ()->kill ();
906 }
907
908 void
909 target_load (const char *arg, int from_tty)
910 {
911 target_dcache_invalidate ();
912 current_inferior ()->top_target ()->load (arg, from_tty);
913 }
914
915 /* Define it. */
916
917 target_terminal_state target_terminal::m_terminal_state
918 = target_terminal_state::is_ours;
919
920 /* See target/target.h. */
921
922 void
923 target_terminal::init (void)
924 {
925 current_inferior ()->top_target ()->terminal_init ();
926
927 m_terminal_state = target_terminal_state::is_ours;
928 }
929
930 /* See target/target.h. */
931
932 void
933 target_terminal::inferior (void)
934 {
935 struct ui *ui = current_ui;
936
937 /* A background resume (``run&'') should leave GDB in control of the
938 terminal. */
939 if (ui->prompt_state != PROMPT_BLOCKED)
940 return;
941
942 /* Since we always run the inferior in the main console (unless "set
943 inferior-tty" is in effect), when some UI other than the main one
944 calls target_terminal::inferior, then we leave the main UI's
945 terminal settings as is. */
946 if (ui != main_ui)
947 return;
948
949 /* If GDB is resuming the inferior in the foreground, install
950 inferior's terminal modes. */
951
952 struct inferior *inf = current_inferior ();
953
954 if (inf->terminal_state != target_terminal_state::is_inferior)
955 {
956 current_inferior ()->top_target ()->terminal_inferior ();
957 inf->terminal_state = target_terminal_state::is_inferior;
958 }
959
960 m_terminal_state = target_terminal_state::is_inferior;
961
962 /* If the user hit C-c before, pretend that it was hit right
963 here. */
964 if (check_quit_flag ())
965 target_pass_ctrlc ();
966 }
967
968 /* See target/target.h. */
969
970 void
971 target_terminal::restore_inferior (void)
972 {
973 struct ui *ui = current_ui;
974
975 /* See target_terminal::inferior(). */
976 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
977 return;
978
979 /* Restore the terminal settings of inferiors that were in the
980 foreground but are now ours_for_output due to a temporary
981 target_target::ours_for_output() call. */
982
983 {
984 scoped_restore_current_inferior restore_inferior;
985
986 for (::inferior *inf : all_inferiors ())
987 {
988 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
989 {
990 set_current_inferior (inf);
991 current_inferior ()->top_target ()->terminal_inferior ();
992 inf->terminal_state = target_terminal_state::is_inferior;
993 }
994 }
995 }
996
997 m_terminal_state = target_terminal_state::is_inferior;
998
999 /* If the user hit C-c before, pretend that it was hit right
1000 here. */
1001 if (check_quit_flag ())
1002 target_pass_ctrlc ();
1003 }
1004
1005 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1006 is_ours_for_output. */
1007
1008 static void
1009 target_terminal_is_ours_kind (target_terminal_state desired_state)
1010 {
1011 scoped_restore_current_inferior restore_inferior;
1012
1013 /* Must do this in two passes. First, have all inferiors save the
1014 current terminal settings. Then, after all inferiors have add a
1015 chance to safely save the terminal settings, restore GDB's
1016 terminal settings. */
1017
1018 for (inferior *inf : all_inferiors ())
1019 {
1020 if (inf->terminal_state == target_terminal_state::is_inferior)
1021 {
1022 set_current_inferior (inf);
1023 current_inferior ()->top_target ()->terminal_save_inferior ();
1024 }
1025 }
1026
1027 for (inferior *inf : all_inferiors ())
1028 {
1029 /* Note we don't check is_inferior here like above because we
1030 need to handle 'is_ours_for_output -> is_ours' too. Careful
1031 to never transition from 'is_ours' to 'is_ours_for_output',
1032 though. */
1033 if (inf->terminal_state != target_terminal_state::is_ours
1034 && inf->terminal_state != desired_state)
1035 {
1036 set_current_inferior (inf);
1037 if (desired_state == target_terminal_state::is_ours)
1038 current_inferior ()->top_target ()->terminal_ours ();
1039 else if (desired_state == target_terminal_state::is_ours_for_output)
1040 current_inferior ()->top_target ()->terminal_ours_for_output ();
1041 else
1042 gdb_assert_not_reached ("unhandled desired state");
1043 inf->terminal_state = desired_state;
1044 }
1045 }
1046 }
1047
1048 /* See target/target.h. */
1049
1050 void
1051 target_terminal::ours ()
1052 {
1053 struct ui *ui = current_ui;
1054
1055 /* See target_terminal::inferior. */
1056 if (ui != main_ui)
1057 return;
1058
1059 if (m_terminal_state == target_terminal_state::is_ours)
1060 return;
1061
1062 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1063 m_terminal_state = target_terminal_state::is_ours;
1064 }
1065
1066 /* See target/target.h. */
1067
1068 void
1069 target_terminal::ours_for_output ()
1070 {
1071 struct ui *ui = current_ui;
1072
1073 /* See target_terminal::inferior. */
1074 if (ui != main_ui)
1075 return;
1076
1077 if (!target_terminal::is_inferior ())
1078 return;
1079
1080 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1081 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1082 }
1083
1084 /* See target/target.h. */
1085
1086 void
1087 target_terminal::info (const char *arg, int from_tty)
1088 {
1089 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1090 }
1091
1092 /* See target.h. */
1093
1094 bool
1095 target_supports_terminal_ours (void)
1096 {
1097 /* The current top target is the target at the top of the target
1098 stack of the current inferior. While normally there's always an
1099 inferior, we must check for nullptr here because we can get here
1100 very early during startup, before the initial inferior is first
1101 created. */
1102 inferior *inf = current_inferior ();
1103
1104 if (inf == nullptr)
1105 return false;
1106 return inf->top_target ()->supports_terminal_ours ();
1107 }
1108
1109 static void
1110 tcomplain (void)
1111 {
1112 error (_("You can't do that when your target is `%s'"),
1113 current_inferior ()->top_target ()->shortname ());
1114 }
1115
1116 void
1117 noprocess (void)
1118 {
1119 error (_("You can't do that without a process to debug."));
1120 }
1121
1122 static void
1123 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1124 {
1125 printf_unfiltered (_("No saved terminal information.\n"));
1126 }
1127
1128 /* A default implementation for the to_get_ada_task_ptid target method.
1129
1130 This function builds the PTID by using both LWP and TID as part of
1131 the PTID lwp and tid elements. The pid used is the pid of the
1132 inferior_ptid. */
1133
1134 static ptid_t
1135 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
1136 {
1137 return ptid_t (inferior_ptid.pid (), lwp, tid);
1138 }
1139
1140 static enum exec_direction_kind
1141 default_execution_direction (struct target_ops *self)
1142 {
1143 if (!target_can_execute_reverse ())
1144 return EXEC_FORWARD;
1145 else if (!target_can_async_p ())
1146 return EXEC_FORWARD;
1147 else
1148 gdb_assert_not_reached ("\
1149 to_execution_direction must be implemented for reverse async");
1150 }
1151
1152 /* See target.h. */
1153
1154 void
1155 decref_target (target_ops *t)
1156 {
1157 t->decref ();
1158 if (t->refcount () == 0)
1159 {
1160 if (t->stratum () == process_stratum)
1161 connection_list_remove (as_process_stratum_target (t));
1162 target_close (t);
1163 }
1164 }
1165
1166 /* See target.h. */
1167
1168 void
1169 target_stack::push (target_ops *t)
1170 {
1171 t->incref ();
1172
1173 strata stratum = t->stratum ();
1174
1175 if (stratum == process_stratum)
1176 connection_list_add (as_process_stratum_target (t));
1177
1178 /* If there's already a target at this stratum, remove it. */
1179
1180 if (m_stack[stratum] != NULL)
1181 unpush (m_stack[stratum]);
1182
1183 /* Now add the new one. */
1184 m_stack[stratum] = t;
1185
1186 if (m_top < stratum)
1187 m_top = stratum;
1188 }
1189
1190 /* See target.h. */
1191
1192 bool
1193 target_stack::unpush (target_ops *t)
1194 {
1195 gdb_assert (t != NULL);
1196
1197 strata stratum = t->stratum ();
1198
1199 if (stratum == dummy_stratum)
1200 internal_error (__FILE__, __LINE__,
1201 _("Attempt to unpush the dummy target"));
1202
1203 /* Look for the specified target. Note that a target can only occur
1204 once in the target stack. */
1205
1206 if (m_stack[stratum] != t)
1207 {
1208 /* If T wasn't pushed, quit. Only open targets should be
1209 closed. */
1210 return false;
1211 }
1212
1213 /* Unchain the target. */
1214 m_stack[stratum] = NULL;
1215
1216 if (m_top == stratum)
1217 m_top = t->beneath ()->stratum ();
1218
1219 /* Finally close the target, if there are no inferiors
1220 referencing this target still. Note we do this after unchaining,
1221 so any target method calls from within the target_close
1222 implementation don't end up in T anymore. Do leave the target
1223 open if we have are other inferiors referencing this target
1224 still. */
1225 decref_target (t);
1226
1227 return true;
1228 }
1229
1230 /* Unpush TARGET and assert that it worked. */
1231
1232 static void
1233 unpush_target_and_assert (struct target_ops *target)
1234 {
1235 if (!current_inferior ()->unpush_target (target))
1236 {
1237 fprintf_unfiltered (gdb_stderr,
1238 "pop_all_targets couldn't find target %s\n",
1239 target->shortname ());
1240 internal_error (__FILE__, __LINE__,
1241 _("failed internal consistency check"));
1242 }
1243 }
1244
1245 void
1246 pop_all_targets_above (enum strata above_stratum)
1247 {
1248 while ((int) (current_inferior ()->top_target ()->stratum ())
1249 > (int) above_stratum)
1250 unpush_target_and_assert (current_inferior ()->top_target ());
1251 }
1252
1253 /* See target.h. */
1254
1255 void
1256 pop_all_targets_at_and_above (enum strata stratum)
1257 {
1258 while ((int) (current_inferior ()->top_target ()->stratum ())
1259 >= (int) stratum)
1260 unpush_target_and_assert (current_inferior ()->top_target ());
1261 }
1262
1263 void
1264 pop_all_targets (void)
1265 {
1266 pop_all_targets_above (dummy_stratum);
1267 }
1268
1269 void
1270 target_unpusher::operator() (struct target_ops *ops) const
1271 {
1272 current_inferior ()->unpush_target (ops);
1273 }
1274
1275 /* Default implementation of to_get_thread_local_address. */
1276
1277 static void
1278 generic_tls_error (void)
1279 {
1280 throw_error (TLS_GENERIC_ERROR,
1281 _("Cannot find thread-local variables on this target"));
1282 }
1283
1284 /* Using the objfile specified in OBJFILE, find the address for the
1285 current thread's thread-local storage with offset OFFSET. */
1286 CORE_ADDR
1287 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1288 {
1289 volatile CORE_ADDR addr = 0;
1290 struct target_ops *target = current_inferior ()->top_target ();
1291 struct gdbarch *gdbarch = target_gdbarch ();
1292
1293 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1294 {
1295 ptid_t ptid = inferior_ptid;
1296
1297 try
1298 {
1299 CORE_ADDR lm_addr;
1300
1301 /* Fetch the load module address for this objfile. */
1302 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1303 objfile);
1304
1305 if (gdbarch_get_thread_local_address_p (gdbarch))
1306 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1307 offset);
1308 else
1309 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1310 }
1311 /* If an error occurred, print TLS related messages here. Otherwise,
1312 throw the error to some higher catcher. */
1313 catch (const gdb_exception &ex)
1314 {
1315 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1316
1317 switch (ex.error)
1318 {
1319 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1320 error (_("Cannot find thread-local variables "
1321 "in this thread library."));
1322 break;
1323 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1324 if (objfile_is_library)
1325 error (_("Cannot find shared library `%s' in dynamic"
1326 " linker's load module list"), objfile_name (objfile));
1327 else
1328 error (_("Cannot find executable file `%s' in dynamic"
1329 " linker's load module list"), objfile_name (objfile));
1330 break;
1331 case TLS_NOT_ALLOCATED_YET_ERROR:
1332 if (objfile_is_library)
1333 error (_("The inferior has not yet allocated storage for"
1334 " thread-local variables in\n"
1335 "the shared library `%s'\n"
1336 "for %s"),
1337 objfile_name (objfile),
1338 target_pid_to_str (ptid).c_str ());
1339 else
1340 error (_("The inferior has not yet allocated storage for"
1341 " thread-local variables in\n"
1342 "the executable `%s'\n"
1343 "for %s"),
1344 objfile_name (objfile),
1345 target_pid_to_str (ptid).c_str ());
1346 break;
1347 case TLS_GENERIC_ERROR:
1348 if (objfile_is_library)
1349 error (_("Cannot find thread-local storage for %s, "
1350 "shared library %s:\n%s"),
1351 target_pid_to_str (ptid).c_str (),
1352 objfile_name (objfile), ex.what ());
1353 else
1354 error (_("Cannot find thread-local storage for %s, "
1355 "executable file %s:\n%s"),
1356 target_pid_to_str (ptid).c_str (),
1357 objfile_name (objfile), ex.what ());
1358 break;
1359 default:
1360 throw;
1361 break;
1362 }
1363 }
1364 }
1365 else
1366 error (_("Cannot find thread-local variables on this target"));
1367
1368 return addr;
1369 }
1370
1371 const char *
1372 target_xfer_status_to_string (enum target_xfer_status status)
1373 {
1374 #define CASE(X) case X: return #X
1375 switch (status)
1376 {
1377 CASE(TARGET_XFER_E_IO);
1378 CASE(TARGET_XFER_UNAVAILABLE);
1379 default:
1380 return "<unknown>";
1381 }
1382 #undef CASE
1383 };
1384
1385
1386 /* See target.h. */
1387
1388 gdb::unique_xmalloc_ptr<char>
1389 target_read_string (CORE_ADDR memaddr, int len, int *bytes_read)
1390 {
1391 gdb::unique_xmalloc_ptr<gdb_byte> buffer;
1392
1393 int ignore;
1394 if (bytes_read == nullptr)
1395 bytes_read = &ignore;
1396
1397 /* Note that the endian-ness does not matter here. */
1398 int errcode = read_string (memaddr, -1, 1, len, BFD_ENDIAN_LITTLE,
1399 &buffer, bytes_read);
1400 if (errcode != 0)
1401 return {};
1402
1403 return gdb::unique_xmalloc_ptr<char> ((char *) buffer.release ());
1404 }
1405
1406 const target_section_table *
1407 target_get_section_table (struct target_ops *target)
1408 {
1409 return target->get_section_table ();
1410 }
1411
1412 /* Find a section containing ADDR. */
1413
1414 const struct target_section *
1415 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1416 {
1417 const target_section_table *table = target_get_section_table (target);
1418
1419 if (table == NULL)
1420 return NULL;
1421
1422 for (const target_section &secp : *table)
1423 {
1424 if (addr >= secp.addr && addr < secp.endaddr)
1425 return &secp;
1426 }
1427 return NULL;
1428 }
1429
1430 /* See target.h. */
1431
1432 const target_section_table *
1433 default_get_section_table ()
1434 {
1435 return &current_program_space->target_sections ();
1436 }
1437
1438 /* Helper for the memory xfer routines. Checks the attributes of the
1439 memory region of MEMADDR against the read or write being attempted.
1440 If the access is permitted returns true, otherwise returns false.
1441 REGION_P is an optional output parameter. If not-NULL, it is
1442 filled with a pointer to the memory region of MEMADDR. REG_LEN
1443 returns LEN trimmed to the end of the region. This is how much the
1444 caller can continue requesting, if the access is permitted. A
1445 single xfer request must not straddle memory region boundaries. */
1446
1447 static int
1448 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1449 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1450 struct mem_region **region_p)
1451 {
1452 struct mem_region *region;
1453
1454 region = lookup_mem_region (memaddr);
1455
1456 if (region_p != NULL)
1457 *region_p = region;
1458
1459 switch (region->attrib.mode)
1460 {
1461 case MEM_RO:
1462 if (writebuf != NULL)
1463 return 0;
1464 break;
1465
1466 case MEM_WO:
1467 if (readbuf != NULL)
1468 return 0;
1469 break;
1470
1471 case MEM_FLASH:
1472 /* We only support writing to flash during "load" for now. */
1473 if (writebuf != NULL)
1474 error (_("Writing to flash memory forbidden in this context"));
1475 break;
1476
1477 case MEM_NONE:
1478 return 0;
1479 }
1480
1481 /* region->hi == 0 means there's no upper bound. */
1482 if (memaddr + len < region->hi || region->hi == 0)
1483 *reg_len = len;
1484 else
1485 *reg_len = region->hi - memaddr;
1486
1487 return 1;
1488 }
1489
1490 /* Read memory from more than one valid target. A core file, for
1491 instance, could have some of memory but delegate other bits to
1492 the target below it. So, we must manually try all targets. */
1493
1494 enum target_xfer_status
1495 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1496 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1497 ULONGEST *xfered_len)
1498 {
1499 enum target_xfer_status res;
1500
1501 do
1502 {
1503 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1504 readbuf, writebuf, memaddr, len,
1505 xfered_len);
1506 if (res == TARGET_XFER_OK)
1507 break;
1508
1509 /* Stop if the target reports that the memory is not available. */
1510 if (res == TARGET_XFER_UNAVAILABLE)
1511 break;
1512
1513 /* Don't continue past targets which have all the memory.
1514 At one time, this code was necessary to read data from
1515 executables / shared libraries when data for the requested
1516 addresses weren't available in the core file. But now the
1517 core target handles this case itself. */
1518 if (ops->has_all_memory ())
1519 break;
1520
1521 ops = ops->beneath ();
1522 }
1523 while (ops != NULL);
1524
1525 /* The cache works at the raw memory level. Make sure the cache
1526 gets updated with raw contents no matter what kind of memory
1527 object was originally being written. Note we do write-through
1528 first, so that if it fails, we don't write to the cache contents
1529 that never made it to the target. */
1530 if (writebuf != NULL
1531 && inferior_ptid != null_ptid
1532 && target_dcache_init_p ()
1533 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1534 {
1535 DCACHE *dcache = target_dcache_get ();
1536
1537 /* Note that writing to an area of memory which wasn't present
1538 in the cache doesn't cause it to be loaded in. */
1539 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1540 }
1541
1542 return res;
1543 }
1544
1545 /* Perform a partial memory transfer.
1546 For docs see target.h, to_xfer_partial. */
1547
1548 static enum target_xfer_status
1549 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1550 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1551 ULONGEST len, ULONGEST *xfered_len)
1552 {
1553 enum target_xfer_status res;
1554 ULONGEST reg_len;
1555 struct mem_region *region;
1556 struct inferior *inf;
1557
1558 /* For accesses to unmapped overlay sections, read directly from
1559 files. Must do this first, as MEMADDR may need adjustment. */
1560 if (readbuf != NULL && overlay_debugging)
1561 {
1562 struct obj_section *section = find_pc_overlay (memaddr);
1563
1564 if (pc_in_unmapped_range (memaddr, section))
1565 {
1566 const target_section_table *table = target_get_section_table (ops);
1567 const char *section_name = section->the_bfd_section->name;
1568
1569 memaddr = overlay_mapped_address (memaddr, section);
1570
1571 auto match_cb = [=] (const struct target_section *s)
1572 {
1573 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1574 };
1575
1576 return section_table_xfer_memory_partial (readbuf, writebuf,
1577 memaddr, len, xfered_len,
1578 *table, match_cb);
1579 }
1580 }
1581
1582 /* Try the executable files, if "trust-readonly-sections" is set. */
1583 if (readbuf != NULL && trust_readonly)
1584 {
1585 const struct target_section *secp
1586 = target_section_by_addr (ops, memaddr);
1587 if (secp != NULL
1588 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1589 {
1590 const target_section_table *table = target_get_section_table (ops);
1591 return section_table_xfer_memory_partial (readbuf, writebuf,
1592 memaddr, len, xfered_len,
1593 *table);
1594 }
1595 }
1596
1597 /* Try GDB's internal data cache. */
1598
1599 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1600 &region))
1601 return TARGET_XFER_E_IO;
1602
1603 if (inferior_ptid != null_ptid)
1604 inf = current_inferior ();
1605 else
1606 inf = NULL;
1607
1608 if (inf != NULL
1609 && readbuf != NULL
1610 /* The dcache reads whole cache lines; that doesn't play well
1611 with reading from a trace buffer, because reading outside of
1612 the collected memory range fails. */
1613 && get_traceframe_number () == -1
1614 && (region->attrib.cache
1615 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1616 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1617 {
1618 DCACHE *dcache = target_dcache_get_or_init ();
1619
1620 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1621 reg_len, xfered_len);
1622 }
1623
1624 /* If none of those methods found the memory we wanted, fall back
1625 to a target partial transfer. Normally a single call to
1626 to_xfer_partial is enough; if it doesn't recognize an object
1627 it will call the to_xfer_partial of the next target down.
1628 But for memory this won't do. Memory is the only target
1629 object which can be read from more than one valid target.
1630 A core file, for instance, could have some of memory but
1631 delegate other bits to the target below it. So, we must
1632 manually try all targets. */
1633
1634 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1635 xfered_len);
1636
1637 /* If we still haven't got anything, return the last error. We
1638 give up. */
1639 return res;
1640 }
1641
1642 /* Perform a partial memory transfer. For docs see target.h,
1643 to_xfer_partial. */
1644
1645 static enum target_xfer_status
1646 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1647 gdb_byte *readbuf, const gdb_byte *writebuf,
1648 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1649 {
1650 enum target_xfer_status res;
1651
1652 /* Zero length requests are ok and require no work. */
1653 if (len == 0)
1654 return TARGET_XFER_EOF;
1655
1656 memaddr = address_significant (target_gdbarch (), memaddr);
1657
1658 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1659 breakpoint insns, thus hiding out from higher layers whether
1660 there are software breakpoints inserted in the code stream. */
1661 if (readbuf != NULL)
1662 {
1663 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1664 xfered_len);
1665
1666 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1667 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1668 }
1669 else
1670 {
1671 /* A large write request is likely to be partially satisfied
1672 by memory_xfer_partial_1. We will continually malloc
1673 and free a copy of the entire write request for breakpoint
1674 shadow handling even though we only end up writing a small
1675 subset of it. Cap writes to a limit specified by the target
1676 to mitigate this. */
1677 len = std::min (ops->get_memory_xfer_limit (), len);
1678
1679 gdb::byte_vector buf (writebuf, writebuf + len);
1680 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1681 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1682 xfered_len);
1683 }
1684
1685 return res;
1686 }
1687
1688 scoped_restore_tmpl<int>
1689 make_scoped_restore_show_memory_breakpoints (int show)
1690 {
1691 return make_scoped_restore (&show_memory_breakpoints, show);
1692 }
1693
1694 /* For docs see target.h, to_xfer_partial. */
1695
1696 enum target_xfer_status
1697 target_xfer_partial (struct target_ops *ops,
1698 enum target_object object, const char *annex,
1699 gdb_byte *readbuf, const gdb_byte *writebuf,
1700 ULONGEST offset, ULONGEST len,
1701 ULONGEST *xfered_len)
1702 {
1703 enum target_xfer_status retval;
1704
1705 /* Transfer is done when LEN is zero. */
1706 if (len == 0)
1707 return TARGET_XFER_EOF;
1708
1709 if (writebuf && !may_write_memory)
1710 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1711 core_addr_to_string_nz (offset), plongest (len));
1712
1713 *xfered_len = 0;
1714
1715 /* If this is a memory transfer, let the memory-specific code
1716 have a look at it instead. Memory transfers are more
1717 complicated. */
1718 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1719 || object == TARGET_OBJECT_CODE_MEMORY)
1720 retval = memory_xfer_partial (ops, object, readbuf,
1721 writebuf, offset, len, xfered_len);
1722 else if (object == TARGET_OBJECT_RAW_MEMORY)
1723 {
1724 /* Skip/avoid accessing the target if the memory region
1725 attributes block the access. Check this here instead of in
1726 raw_memory_xfer_partial as otherwise we'd end up checking
1727 this twice in the case of the memory_xfer_partial path is
1728 taken; once before checking the dcache, and another in the
1729 tail call to raw_memory_xfer_partial. */
1730 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1731 NULL))
1732 return TARGET_XFER_E_IO;
1733
1734 /* Request the normal memory object from other layers. */
1735 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1736 xfered_len);
1737 }
1738 else
1739 retval = ops->xfer_partial (object, annex, readbuf,
1740 writebuf, offset, len, xfered_len);
1741
1742 if (targetdebug)
1743 {
1744 const unsigned char *myaddr = NULL;
1745
1746 fprintf_unfiltered (gdb_stdlog,
1747 "%s:target_xfer_partial "
1748 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1749 ops->shortname (),
1750 (int) object,
1751 (annex ? annex : "(null)"),
1752 host_address_to_string (readbuf),
1753 host_address_to_string (writebuf),
1754 core_addr_to_string_nz (offset),
1755 pulongest (len), retval,
1756 pulongest (*xfered_len));
1757
1758 if (readbuf)
1759 myaddr = readbuf;
1760 if (writebuf)
1761 myaddr = writebuf;
1762 if (retval == TARGET_XFER_OK && myaddr != NULL)
1763 {
1764 int i;
1765
1766 fputs_unfiltered (", bytes =", gdb_stdlog);
1767 for (i = 0; i < *xfered_len; i++)
1768 {
1769 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1770 {
1771 if (targetdebug < 2 && i > 0)
1772 {
1773 fprintf_unfiltered (gdb_stdlog, " ...");
1774 break;
1775 }
1776 fprintf_unfiltered (gdb_stdlog, "\n");
1777 }
1778
1779 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1780 }
1781 }
1782
1783 fputc_unfiltered ('\n', gdb_stdlog);
1784 }
1785
1786 /* Check implementations of to_xfer_partial update *XFERED_LEN
1787 properly. Do assertion after printing debug messages, so that we
1788 can find more clues on assertion failure from debugging messages. */
1789 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1790 gdb_assert (*xfered_len > 0);
1791
1792 return retval;
1793 }
1794
1795 /* Read LEN bytes of target memory at address MEMADDR, placing the
1796 results in GDB's memory at MYADDR. Returns either 0 for success or
1797 -1 if any error occurs.
1798
1799 If an error occurs, no guarantee is made about the contents of the data at
1800 MYADDR. In particular, the caller should not depend upon partial reads
1801 filling the buffer with good data. There is no way for the caller to know
1802 how much good data might have been transfered anyway. Callers that can
1803 deal with partial reads should call target_read (which will retry until
1804 it makes no progress, and then return how much was transferred). */
1805
1806 int
1807 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1808 {
1809 if (target_read (current_inferior ()->top_target (),
1810 TARGET_OBJECT_MEMORY, NULL,
1811 myaddr, memaddr, len) == len)
1812 return 0;
1813 else
1814 return -1;
1815 }
1816
1817 /* See target/target.h. */
1818
1819 int
1820 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1821 {
1822 gdb_byte buf[4];
1823 int r;
1824
1825 r = target_read_memory (memaddr, buf, sizeof buf);
1826 if (r != 0)
1827 return r;
1828 *result = extract_unsigned_integer (buf, sizeof buf,
1829 gdbarch_byte_order (target_gdbarch ()));
1830 return 0;
1831 }
1832
1833 /* Like target_read_memory, but specify explicitly that this is a read
1834 from the target's raw memory. That is, this read bypasses the
1835 dcache, breakpoint shadowing, etc. */
1836
1837 int
1838 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1839 {
1840 if (target_read (current_inferior ()->top_target (),
1841 TARGET_OBJECT_RAW_MEMORY, NULL,
1842 myaddr, memaddr, len) == len)
1843 return 0;
1844 else
1845 return -1;
1846 }
1847
1848 /* Like target_read_memory, but specify explicitly that this is a read from
1849 the target's stack. This may trigger different cache behavior. */
1850
1851 int
1852 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1853 {
1854 if (target_read (current_inferior ()->top_target (),
1855 TARGET_OBJECT_STACK_MEMORY, NULL,
1856 myaddr, memaddr, len) == len)
1857 return 0;
1858 else
1859 return -1;
1860 }
1861
1862 /* Like target_read_memory, but specify explicitly that this is a read from
1863 the target's code. This may trigger different cache behavior. */
1864
1865 int
1866 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1867 {
1868 if (target_read (current_inferior ()->top_target (),
1869 TARGET_OBJECT_CODE_MEMORY, NULL,
1870 myaddr, memaddr, len) == len)
1871 return 0;
1872 else
1873 return -1;
1874 }
1875
1876 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1877 Returns either 0 for success or -1 if any error occurs. If an
1878 error occurs, no guarantee is made about how much data got written.
1879 Callers that can deal with partial writes should call
1880 target_write. */
1881
1882 int
1883 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1884 {
1885 if (target_write (current_inferior ()->top_target (),
1886 TARGET_OBJECT_MEMORY, NULL,
1887 myaddr, memaddr, len) == len)
1888 return 0;
1889 else
1890 return -1;
1891 }
1892
1893 /* Write LEN bytes from MYADDR to target raw memory at address
1894 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1895 If an error occurs, no guarantee is made about how much data got
1896 written. Callers that can deal with partial writes should call
1897 target_write. */
1898
1899 int
1900 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1901 {
1902 if (target_write (current_inferior ()->top_target (),
1903 TARGET_OBJECT_RAW_MEMORY, NULL,
1904 myaddr, memaddr, len) == len)
1905 return 0;
1906 else
1907 return -1;
1908 }
1909
1910 /* Fetch the target's memory map. */
1911
1912 std::vector<mem_region>
1913 target_memory_map (void)
1914 {
1915 target_ops *target = current_inferior ()->top_target ();
1916 std::vector<mem_region> result = target->memory_map ();
1917 if (result.empty ())
1918 return result;
1919
1920 std::sort (result.begin (), result.end ());
1921
1922 /* Check that regions do not overlap. Simultaneously assign
1923 a numbering for the "mem" commands to use to refer to
1924 each region. */
1925 mem_region *last_one = NULL;
1926 for (size_t ix = 0; ix < result.size (); ix++)
1927 {
1928 mem_region *this_one = &result[ix];
1929 this_one->number = ix;
1930
1931 if (last_one != NULL && last_one->hi > this_one->lo)
1932 {
1933 warning (_("Overlapping regions in memory map: ignoring"));
1934 return std::vector<mem_region> ();
1935 }
1936
1937 last_one = this_one;
1938 }
1939
1940 return result;
1941 }
1942
1943 void
1944 target_flash_erase (ULONGEST address, LONGEST length)
1945 {
1946 current_inferior ()->top_target ()->flash_erase (address, length);
1947 }
1948
1949 void
1950 target_flash_done (void)
1951 {
1952 current_inferior ()->top_target ()->flash_done ();
1953 }
1954
1955 static void
1956 show_trust_readonly (struct ui_file *file, int from_tty,
1957 struct cmd_list_element *c, const char *value)
1958 {
1959 fprintf_filtered (file,
1960 _("Mode for reading from readonly sections is %s.\n"),
1961 value);
1962 }
1963
1964 /* Target vector read/write partial wrapper functions. */
1965
1966 static enum target_xfer_status
1967 target_read_partial (struct target_ops *ops,
1968 enum target_object object,
1969 const char *annex, gdb_byte *buf,
1970 ULONGEST offset, ULONGEST len,
1971 ULONGEST *xfered_len)
1972 {
1973 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1974 xfered_len);
1975 }
1976
1977 static enum target_xfer_status
1978 target_write_partial (struct target_ops *ops,
1979 enum target_object object,
1980 const char *annex, const gdb_byte *buf,
1981 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1982 {
1983 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1984 xfered_len);
1985 }
1986
1987 /* Wrappers to perform the full transfer. */
1988
1989 /* For docs on target_read see target.h. */
1990
1991 LONGEST
1992 target_read (struct target_ops *ops,
1993 enum target_object object,
1994 const char *annex, gdb_byte *buf,
1995 ULONGEST offset, LONGEST len)
1996 {
1997 LONGEST xfered_total = 0;
1998 int unit_size = 1;
1999
2000 /* If we are reading from a memory object, find the length of an addressable
2001 unit for that architecture. */
2002 if (object == TARGET_OBJECT_MEMORY
2003 || object == TARGET_OBJECT_STACK_MEMORY
2004 || object == TARGET_OBJECT_CODE_MEMORY
2005 || object == TARGET_OBJECT_RAW_MEMORY)
2006 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2007
2008 while (xfered_total < len)
2009 {
2010 ULONGEST xfered_partial;
2011 enum target_xfer_status status;
2012
2013 status = target_read_partial (ops, object, annex,
2014 buf + xfered_total * unit_size,
2015 offset + xfered_total, len - xfered_total,
2016 &xfered_partial);
2017
2018 /* Call an observer, notifying them of the xfer progress? */
2019 if (status == TARGET_XFER_EOF)
2020 return xfered_total;
2021 else if (status == TARGET_XFER_OK)
2022 {
2023 xfered_total += xfered_partial;
2024 QUIT;
2025 }
2026 else
2027 return TARGET_XFER_E_IO;
2028
2029 }
2030 return len;
2031 }
2032
2033 /* Assuming that the entire [begin, end) range of memory cannot be
2034 read, try to read whatever subrange is possible to read.
2035
2036 The function returns, in RESULT, either zero or one memory block.
2037 If there's a readable subrange at the beginning, it is completely
2038 read and returned. Any further readable subrange will not be read.
2039 Otherwise, if there's a readable subrange at the end, it will be
2040 completely read and returned. Any readable subranges before it
2041 (obviously, not starting at the beginning), will be ignored. In
2042 other cases -- either no readable subrange, or readable subrange(s)
2043 that is neither at the beginning, or end, nothing is returned.
2044
2045 The purpose of this function is to handle a read across a boundary
2046 of accessible memory in a case when memory map is not available.
2047 The above restrictions are fine for this case, but will give
2048 incorrect results if the memory is 'patchy'. However, supporting
2049 'patchy' memory would require trying to read every single byte,
2050 and it seems unacceptable solution. Explicit memory map is
2051 recommended for this case -- and target_read_memory_robust will
2052 take care of reading multiple ranges then. */
2053
2054 static void
2055 read_whatever_is_readable (struct target_ops *ops,
2056 const ULONGEST begin, const ULONGEST end,
2057 int unit_size,
2058 std::vector<memory_read_result> *result)
2059 {
2060 ULONGEST current_begin = begin;
2061 ULONGEST current_end = end;
2062 int forward;
2063 ULONGEST xfered_len;
2064
2065 /* If we previously failed to read 1 byte, nothing can be done here. */
2066 if (end - begin <= 1)
2067 return;
2068
2069 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2070
2071 /* Check that either first or the last byte is readable, and give up
2072 if not. This heuristic is meant to permit reading accessible memory
2073 at the boundary of accessible region. */
2074 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2075 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2076 {
2077 forward = 1;
2078 ++current_begin;
2079 }
2080 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2081 buf.get () + (end - begin) - 1, end - 1, 1,
2082 &xfered_len) == TARGET_XFER_OK)
2083 {
2084 forward = 0;
2085 --current_end;
2086 }
2087 else
2088 return;
2089
2090 /* Loop invariant is that the [current_begin, current_end) was previously
2091 found to be not readable as a whole.
2092
2093 Note loop condition -- if the range has 1 byte, we can't divide the range
2094 so there's no point trying further. */
2095 while (current_end - current_begin > 1)
2096 {
2097 ULONGEST first_half_begin, first_half_end;
2098 ULONGEST second_half_begin, second_half_end;
2099 LONGEST xfer;
2100 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2101
2102 if (forward)
2103 {
2104 first_half_begin = current_begin;
2105 first_half_end = middle;
2106 second_half_begin = middle;
2107 second_half_end = current_end;
2108 }
2109 else
2110 {
2111 first_half_begin = middle;
2112 first_half_end = current_end;
2113 second_half_begin = current_begin;
2114 second_half_end = middle;
2115 }
2116
2117 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2118 buf.get () + (first_half_begin - begin) * unit_size,
2119 first_half_begin,
2120 first_half_end - first_half_begin);
2121
2122 if (xfer == first_half_end - first_half_begin)
2123 {
2124 /* This half reads up fine. So, the error must be in the
2125 other half. */
2126 current_begin = second_half_begin;
2127 current_end = second_half_end;
2128 }
2129 else
2130 {
2131 /* This half is not readable. Because we've tried one byte, we
2132 know some part of this half if actually readable. Go to the next
2133 iteration to divide again and try to read.
2134
2135 We don't handle the other half, because this function only tries
2136 to read a single readable subrange. */
2137 current_begin = first_half_begin;
2138 current_end = first_half_end;
2139 }
2140 }
2141
2142 if (forward)
2143 {
2144 /* The [begin, current_begin) range has been read. */
2145 result->emplace_back (begin, current_end, std::move (buf));
2146 }
2147 else
2148 {
2149 /* The [current_end, end) range has been read. */
2150 LONGEST region_len = end - current_end;
2151
2152 gdb::unique_xmalloc_ptr<gdb_byte> data
2153 ((gdb_byte *) xmalloc (region_len * unit_size));
2154 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2155 region_len * unit_size);
2156 result->emplace_back (current_end, end, std::move (data));
2157 }
2158 }
2159
2160 std::vector<memory_read_result>
2161 read_memory_robust (struct target_ops *ops,
2162 const ULONGEST offset, const LONGEST len)
2163 {
2164 std::vector<memory_read_result> result;
2165 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2166
2167 LONGEST xfered_total = 0;
2168 while (xfered_total < len)
2169 {
2170 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2171 LONGEST region_len;
2172
2173 /* If there is no explicit region, a fake one should be created. */
2174 gdb_assert (region);
2175
2176 if (region->hi == 0)
2177 region_len = len - xfered_total;
2178 else
2179 region_len = region->hi - offset;
2180
2181 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2182 {
2183 /* Cannot read this region. Note that we can end up here only
2184 if the region is explicitly marked inaccessible, or
2185 'inaccessible-by-default' is in effect. */
2186 xfered_total += region_len;
2187 }
2188 else
2189 {
2190 LONGEST to_read = std::min (len - xfered_total, region_len);
2191 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2192 ((gdb_byte *) xmalloc (to_read * unit_size));
2193
2194 LONGEST xfered_partial =
2195 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2196 offset + xfered_total, to_read);
2197 /* Call an observer, notifying them of the xfer progress? */
2198 if (xfered_partial <= 0)
2199 {
2200 /* Got an error reading full chunk. See if maybe we can read
2201 some subrange. */
2202 read_whatever_is_readable (ops, offset + xfered_total,
2203 offset + xfered_total + to_read,
2204 unit_size, &result);
2205 xfered_total += to_read;
2206 }
2207 else
2208 {
2209 result.emplace_back (offset + xfered_total,
2210 offset + xfered_total + xfered_partial,
2211 std::move (buffer));
2212 xfered_total += xfered_partial;
2213 }
2214 QUIT;
2215 }
2216 }
2217
2218 return result;
2219 }
2220
2221
2222 /* An alternative to target_write with progress callbacks. */
2223
2224 LONGEST
2225 target_write_with_progress (struct target_ops *ops,
2226 enum target_object object,
2227 const char *annex, const gdb_byte *buf,
2228 ULONGEST offset, LONGEST len,
2229 void (*progress) (ULONGEST, void *), void *baton)
2230 {
2231 LONGEST xfered_total = 0;
2232 int unit_size = 1;
2233
2234 /* If we are writing to a memory object, find the length of an addressable
2235 unit for that architecture. */
2236 if (object == TARGET_OBJECT_MEMORY
2237 || object == TARGET_OBJECT_STACK_MEMORY
2238 || object == TARGET_OBJECT_CODE_MEMORY
2239 || object == TARGET_OBJECT_RAW_MEMORY)
2240 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2241
2242 /* Give the progress callback a chance to set up. */
2243 if (progress)
2244 (*progress) (0, baton);
2245
2246 while (xfered_total < len)
2247 {
2248 ULONGEST xfered_partial;
2249 enum target_xfer_status status;
2250
2251 status = target_write_partial (ops, object, annex,
2252 buf + xfered_total * unit_size,
2253 offset + xfered_total, len - xfered_total,
2254 &xfered_partial);
2255
2256 if (status != TARGET_XFER_OK)
2257 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2258
2259 if (progress)
2260 (*progress) (xfered_partial, baton);
2261
2262 xfered_total += xfered_partial;
2263 QUIT;
2264 }
2265 return len;
2266 }
2267
2268 /* For docs on target_write see target.h. */
2269
2270 LONGEST
2271 target_write (struct target_ops *ops,
2272 enum target_object object,
2273 const char *annex, const gdb_byte *buf,
2274 ULONGEST offset, LONGEST len)
2275 {
2276 return target_write_with_progress (ops, object, annex, buf, offset, len,
2277 NULL, NULL);
2278 }
2279
2280 /* Help for target_read_alloc and target_read_stralloc. See their comments
2281 for details. */
2282
2283 template <typename T>
2284 gdb::optional<gdb::def_vector<T>>
2285 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2286 const char *annex)
2287 {
2288 gdb::def_vector<T> buf;
2289 size_t buf_pos = 0;
2290 const int chunk = 4096;
2291
2292 /* This function does not have a length parameter; it reads the
2293 entire OBJECT). Also, it doesn't support objects fetched partly
2294 from one target and partly from another (in a different stratum,
2295 e.g. a core file and an executable). Both reasons make it
2296 unsuitable for reading memory. */
2297 gdb_assert (object != TARGET_OBJECT_MEMORY);
2298
2299 /* Start by reading up to 4K at a time. The target will throttle
2300 this number down if necessary. */
2301 while (1)
2302 {
2303 ULONGEST xfered_len;
2304 enum target_xfer_status status;
2305
2306 buf.resize (buf_pos + chunk);
2307
2308 status = target_read_partial (ops, object, annex,
2309 (gdb_byte *) &buf[buf_pos],
2310 buf_pos, chunk,
2311 &xfered_len);
2312
2313 if (status == TARGET_XFER_EOF)
2314 {
2315 /* Read all there was. */
2316 buf.resize (buf_pos);
2317 return buf;
2318 }
2319 else if (status != TARGET_XFER_OK)
2320 {
2321 /* An error occurred. */
2322 return {};
2323 }
2324
2325 buf_pos += xfered_len;
2326
2327 QUIT;
2328 }
2329 }
2330
2331 /* See target.h */
2332
2333 gdb::optional<gdb::byte_vector>
2334 target_read_alloc (struct target_ops *ops, enum target_object object,
2335 const char *annex)
2336 {
2337 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2338 }
2339
2340 /* See target.h. */
2341
2342 gdb::optional<gdb::char_vector>
2343 target_read_stralloc (struct target_ops *ops, enum target_object object,
2344 const char *annex)
2345 {
2346 gdb::optional<gdb::char_vector> buf
2347 = target_read_alloc_1<char> (ops, object, annex);
2348
2349 if (!buf)
2350 return {};
2351
2352 if (buf->empty () || buf->back () != '\0')
2353 buf->push_back ('\0');
2354
2355 /* Check for embedded NUL bytes; but allow trailing NULs. */
2356 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2357 it != buf->end (); it++)
2358 if (*it != '\0')
2359 {
2360 warning (_("target object %d, annex %s, "
2361 "contained unexpected null characters"),
2362 (int) object, annex ? annex : "(none)");
2363 break;
2364 }
2365
2366 return buf;
2367 }
2368
2369 /* Memory transfer methods. */
2370
2371 void
2372 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2373 LONGEST len)
2374 {
2375 /* This method is used to read from an alternate, non-current
2376 target. This read must bypass the overlay support (as symbols
2377 don't match this target), and GDB's internal cache (wrong cache
2378 for this target). */
2379 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2380 != len)
2381 memory_error (TARGET_XFER_E_IO, addr);
2382 }
2383
2384 ULONGEST
2385 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2386 int len, enum bfd_endian byte_order)
2387 {
2388 gdb_byte buf[sizeof (ULONGEST)];
2389
2390 gdb_assert (len <= sizeof (buf));
2391 get_target_memory (ops, addr, buf, len);
2392 return extract_unsigned_integer (buf, len, byte_order);
2393 }
2394
2395 /* See target.h. */
2396
2397 int
2398 target_insert_breakpoint (struct gdbarch *gdbarch,
2399 struct bp_target_info *bp_tgt)
2400 {
2401 if (!may_insert_breakpoints)
2402 {
2403 warning (_("May not insert breakpoints"));
2404 return 1;
2405 }
2406
2407 target_ops *target = current_inferior ()->top_target ();
2408
2409 return target->insert_breakpoint (gdbarch, bp_tgt);
2410 }
2411
2412 /* See target.h. */
2413
2414 int
2415 target_remove_breakpoint (struct gdbarch *gdbarch,
2416 struct bp_target_info *bp_tgt,
2417 enum remove_bp_reason reason)
2418 {
2419 /* This is kind of a weird case to handle, but the permission might
2420 have been changed after breakpoints were inserted - in which case
2421 we should just take the user literally and assume that any
2422 breakpoints should be left in place. */
2423 if (!may_insert_breakpoints)
2424 {
2425 warning (_("May not remove breakpoints"));
2426 return 1;
2427 }
2428
2429 target_ops *target = current_inferior ()->top_target ();
2430
2431 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2432 }
2433
2434 static void
2435 info_target_command (const char *args, int from_tty)
2436 {
2437 int has_all_mem = 0;
2438
2439 if (current_program_space->symfile_object_file != NULL)
2440 {
2441 objfile *objf = current_program_space->symfile_object_file;
2442 printf_unfiltered (_("Symbols from \"%s\".\n"),
2443 objfile_name (objf));
2444 }
2445
2446 for (target_ops *t = current_inferior ()->top_target ();
2447 t != NULL;
2448 t = t->beneath ())
2449 {
2450 if (!t->has_memory ())
2451 continue;
2452
2453 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2454 continue;
2455 if (has_all_mem)
2456 printf_unfiltered (_("\tWhile running this, "
2457 "GDB does not access memory from...\n"));
2458 printf_unfiltered ("%s:\n", t->longname ());
2459 t->files_info ();
2460 has_all_mem = t->has_all_memory ();
2461 }
2462 }
2463
2464 /* This function is called before any new inferior is created, e.g.
2465 by running a program, attaching, or connecting to a target.
2466 It cleans up any state from previous invocations which might
2467 change between runs. This is a subset of what target_preopen
2468 resets (things which might change between targets). */
2469
2470 void
2471 target_pre_inferior (int from_tty)
2472 {
2473 /* Clear out solib state. Otherwise the solib state of the previous
2474 inferior might have survived and is entirely wrong for the new
2475 target. This has been observed on GNU/Linux using glibc 2.3. How
2476 to reproduce:
2477
2478 bash$ ./foo&
2479 [1] 4711
2480 bash$ ./foo&
2481 [1] 4712
2482 bash$ gdb ./foo
2483 [...]
2484 (gdb) attach 4711
2485 (gdb) detach
2486 (gdb) attach 4712
2487 Cannot access memory at address 0xdeadbeef
2488 */
2489
2490 /* In some OSs, the shared library list is the same/global/shared
2491 across inferiors. If code is shared between processes, so are
2492 memory regions and features. */
2493 if (!gdbarch_has_global_solist (target_gdbarch ()))
2494 {
2495 no_shared_libraries (NULL, from_tty);
2496
2497 invalidate_target_mem_regions ();
2498
2499 target_clear_description ();
2500 }
2501
2502 /* attach_flag may be set if the previous process associated with
2503 the inferior was attached to. */
2504 current_inferior ()->attach_flag = 0;
2505
2506 current_inferior ()->highest_thread_num = 0;
2507
2508 agent_capability_invalidate ();
2509 }
2510
2511 /* This is to be called by the open routine before it does
2512 anything. */
2513
2514 void
2515 target_preopen (int from_tty)
2516 {
2517 dont_repeat ();
2518
2519 if (current_inferior ()->pid != 0)
2520 {
2521 if (!from_tty
2522 || !target_has_execution ()
2523 || query (_("A program is being debugged already. Kill it? ")))
2524 {
2525 /* Core inferiors actually should be detached, not
2526 killed. */
2527 if (target_has_execution ())
2528 target_kill ();
2529 else
2530 target_detach (current_inferior (), 0);
2531 }
2532 else
2533 error (_("Program not killed."));
2534 }
2535
2536 /* Calling target_kill may remove the target from the stack. But if
2537 it doesn't (which seems like a win for UDI), remove it now. */
2538 /* Leave the exec target, though. The user may be switching from a
2539 live process to a core of the same program. */
2540 pop_all_targets_above (file_stratum);
2541
2542 target_pre_inferior (from_tty);
2543 }
2544
2545 /* See target.h. */
2546
2547 void
2548 target_detach (inferior *inf, int from_tty)
2549 {
2550 /* After we have detached, we will clear the register cache for this inferior
2551 by calling registers_changed_ptid. We must save the pid_ptid before
2552 detaching, as the target detach method will clear inf->pid. */
2553 ptid_t save_pid_ptid = ptid_t (inf->pid);
2554
2555 /* As long as some to_detach implementations rely on the current_inferior
2556 (either directly, or indirectly, like through target_gdbarch or by
2557 reading memory), INF needs to be the current inferior. When that
2558 requirement will become no longer true, then we can remove this
2559 assertion. */
2560 gdb_assert (inf == current_inferior ());
2561
2562 prepare_for_detach ();
2563
2564 /* Hold a strong reference because detaching may unpush the
2565 target. */
2566 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2567
2568 current_inferior ()->top_target ()->detach (inf, from_tty);
2569
2570 process_stratum_target *proc_target
2571 = as_process_stratum_target (proc_target_ref.get ());
2572
2573 registers_changed_ptid (proc_target, save_pid_ptid);
2574
2575 /* We have to ensure we have no frame cache left. Normally,
2576 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2577 inferior_ptid matches save_pid_ptid, but in our case, it does not
2578 call it, as inferior_ptid has been reset. */
2579 reinit_frame_cache ();
2580 }
2581
2582 void
2583 target_disconnect (const char *args, int from_tty)
2584 {
2585 /* If we're in breakpoints-always-inserted mode or if breakpoints
2586 are global across processes, we have to remove them before
2587 disconnecting. */
2588 remove_breakpoints ();
2589
2590 current_inferior ()->top_target ()->disconnect (args, from_tty);
2591 }
2592
2593 /* See target/target.h. */
2594
2595 ptid_t
2596 target_wait (ptid_t ptid, struct target_waitstatus *status,
2597 target_wait_flags options)
2598 {
2599 target_ops *target = current_inferior ()->top_target ();
2600
2601 if (!target->can_async_p ())
2602 gdb_assert ((options & TARGET_WNOHANG) == 0);
2603
2604 return target->wait (ptid, status, options);
2605 }
2606
2607 /* See target.h. */
2608
2609 ptid_t
2610 default_target_wait (struct target_ops *ops,
2611 ptid_t ptid, struct target_waitstatus *status,
2612 target_wait_flags options)
2613 {
2614 status->kind = TARGET_WAITKIND_IGNORE;
2615 return minus_one_ptid;
2616 }
2617
2618 std::string
2619 target_pid_to_str (ptid_t ptid)
2620 {
2621 return current_inferior ()->top_target ()->pid_to_str (ptid);
2622 }
2623
2624 const char *
2625 target_thread_name (struct thread_info *info)
2626 {
2627 gdb_assert (info->inf == current_inferior ());
2628
2629 return current_inferior ()->top_target ()->thread_name (info);
2630 }
2631
2632 struct thread_info *
2633 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2634 int handle_len,
2635 struct inferior *inf)
2636 {
2637 target_ops *target = current_inferior ()->top_target ();
2638
2639 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2640 }
2641
2642 /* See target.h. */
2643
2644 gdb::byte_vector
2645 target_thread_info_to_thread_handle (struct thread_info *tip)
2646 {
2647 target_ops *target = current_inferior ()->top_target ();
2648
2649 return target->thread_info_to_thread_handle (tip);
2650 }
2651
2652 void
2653 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2654 {
2655 process_stratum_target *curr_target = current_inferior ()->process_target ();
2656
2657 target_dcache_invalidate ();
2658
2659 current_inferior ()->top_target ()->resume (ptid, step, signal);
2660
2661 registers_changed_ptid (curr_target, ptid);
2662 /* We only set the internal executing state here. The user/frontend
2663 running state is set at a higher level. This also clears the
2664 thread's stop_pc as side effect. */
2665 set_executing (curr_target, ptid, true);
2666 clear_inline_frame_state (curr_target, ptid);
2667 }
2668
2669 /* If true, target_commit_resume is a nop. */
2670 static int defer_target_commit_resume;
2671
2672 /* See target.h. */
2673
2674 void
2675 target_commit_resume (void)
2676 {
2677 if (defer_target_commit_resume)
2678 return;
2679
2680 current_inferior ()->top_target ()->commit_resume ();
2681 }
2682
2683 /* See target.h. */
2684
2685 scoped_restore_tmpl<int>
2686 make_scoped_defer_target_commit_resume ()
2687 {
2688 return make_scoped_restore (&defer_target_commit_resume, 1);
2689 }
2690
2691 void
2692 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2693 {
2694 current_inferior ()->top_target ()->pass_signals (pass_signals);
2695 }
2696
2697 void
2698 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2699 {
2700 current_inferior ()->top_target ()->program_signals (program_signals);
2701 }
2702
2703 static bool
2704 default_follow_fork (struct target_ops *self, bool follow_child,
2705 bool detach_fork)
2706 {
2707 /* Some target returned a fork event, but did not know how to follow it. */
2708 internal_error (__FILE__, __LINE__,
2709 _("could not find a target to follow fork"));
2710 }
2711
2712 /* Look through the list of possible targets for a target that can
2713 follow forks. */
2714
2715 bool
2716 target_follow_fork (bool follow_child, bool detach_fork)
2717 {
2718 target_ops *target = current_inferior ()->top_target ();
2719
2720 return target->follow_fork (follow_child, detach_fork);
2721 }
2722
2723 /* Target wrapper for follow exec hook. */
2724
2725 void
2726 target_follow_exec (struct inferior *inf, const char *execd_pathname)
2727 {
2728 current_inferior ()->top_target ()->follow_exec (inf, execd_pathname);
2729 }
2730
2731 static void
2732 default_mourn_inferior (struct target_ops *self)
2733 {
2734 internal_error (__FILE__, __LINE__,
2735 _("could not find a target to follow mourn inferior"));
2736 }
2737
2738 void
2739 target_mourn_inferior (ptid_t ptid)
2740 {
2741 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2742 current_inferior ()->top_target ()->mourn_inferior ();
2743
2744 /* We no longer need to keep handles on any of the object files.
2745 Make sure to release them to avoid unnecessarily locking any
2746 of them while we're not actually debugging. */
2747 bfd_cache_close_all ();
2748 }
2749
2750 /* Look for a target which can describe architectural features, starting
2751 from TARGET. If we find one, return its description. */
2752
2753 const struct target_desc *
2754 target_read_description (struct target_ops *target)
2755 {
2756 return target->read_description ();
2757 }
2758
2759
2760 /* Default implementation of memory-searching. */
2761
2762 static int
2763 default_search_memory (struct target_ops *self,
2764 CORE_ADDR start_addr, ULONGEST search_space_len,
2765 const gdb_byte *pattern, ULONGEST pattern_len,
2766 CORE_ADDR *found_addrp)
2767 {
2768 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2769 {
2770 return target_read (current_inferior ()->top_target (),
2771 TARGET_OBJECT_MEMORY, NULL,
2772 result, addr, len) == len;
2773 };
2774
2775 /* Start over from the top of the target stack. */
2776 return simple_search_memory (read_memory, start_addr, search_space_len,
2777 pattern, pattern_len, found_addrp);
2778 }
2779
2780 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2781 sequence of bytes in PATTERN with length PATTERN_LEN.
2782
2783 The result is 1 if found, 0 if not found, and -1 if there was an error
2784 requiring halting of the search (e.g. memory read error).
2785 If the pattern is found the address is recorded in FOUND_ADDRP. */
2786
2787 int
2788 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2789 const gdb_byte *pattern, ULONGEST pattern_len,
2790 CORE_ADDR *found_addrp)
2791 {
2792 target_ops *target = current_inferior ()->top_target ();
2793
2794 return target->search_memory (start_addr, search_space_len, pattern,
2795 pattern_len, found_addrp);
2796 }
2797
2798 /* Look through the currently pushed targets. If none of them will
2799 be able to restart the currently running process, issue an error
2800 message. */
2801
2802 void
2803 target_require_runnable (void)
2804 {
2805 for (target_ops *t = current_inferior ()->top_target ();
2806 t != NULL;
2807 t = t->beneath ())
2808 {
2809 /* If this target knows how to create a new program, then
2810 assume we will still be able to after killing the current
2811 one. Either killing and mourning will not pop T, or else
2812 find_default_run_target will find it again. */
2813 if (t->can_create_inferior ())
2814 return;
2815
2816 /* Do not worry about targets at certain strata that can not
2817 create inferiors. Assume they will be pushed again if
2818 necessary, and continue to the process_stratum. */
2819 if (t->stratum () > process_stratum)
2820 continue;
2821
2822 error (_("The \"%s\" target does not support \"run\". "
2823 "Try \"help target\" or \"continue\"."),
2824 t->shortname ());
2825 }
2826
2827 /* This function is only called if the target is running. In that
2828 case there should have been a process_stratum target and it
2829 should either know how to create inferiors, or not... */
2830 internal_error (__FILE__, __LINE__, _("No targets found"));
2831 }
2832
2833 /* Whether GDB is allowed to fall back to the default run target for
2834 "run", "attach", etc. when no target is connected yet. */
2835 static bool auto_connect_native_target = true;
2836
2837 static void
2838 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2839 struct cmd_list_element *c, const char *value)
2840 {
2841 fprintf_filtered (file,
2842 _("Whether GDB may automatically connect to the "
2843 "native target is %s.\n"),
2844 value);
2845 }
2846
2847 /* A pointer to the target that can respond to "run" or "attach".
2848 Native targets are always singletons and instantiated early at GDB
2849 startup. */
2850 static target_ops *the_native_target;
2851
2852 /* See target.h. */
2853
2854 void
2855 set_native_target (target_ops *target)
2856 {
2857 if (the_native_target != NULL)
2858 internal_error (__FILE__, __LINE__,
2859 _("native target already set (\"%s\")."),
2860 the_native_target->longname ());
2861
2862 the_native_target = target;
2863 }
2864
2865 /* See target.h. */
2866
2867 target_ops *
2868 get_native_target ()
2869 {
2870 return the_native_target;
2871 }
2872
2873 /* Look through the list of possible targets for a target that can
2874 execute a run or attach command without any other data. This is
2875 used to locate the default process stratum.
2876
2877 If DO_MESG is not NULL, the result is always valid (error() is
2878 called for errors); else, return NULL on error. */
2879
2880 static struct target_ops *
2881 find_default_run_target (const char *do_mesg)
2882 {
2883 if (auto_connect_native_target && the_native_target != NULL)
2884 return the_native_target;
2885
2886 if (do_mesg != NULL)
2887 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2888 return NULL;
2889 }
2890
2891 /* See target.h. */
2892
2893 struct target_ops *
2894 find_attach_target (void)
2895 {
2896 /* If a target on the current stack can attach, use it. */
2897 for (target_ops *t = current_inferior ()->top_target ();
2898 t != NULL;
2899 t = t->beneath ())
2900 {
2901 if (t->can_attach ())
2902 return t;
2903 }
2904
2905 /* Otherwise, use the default run target for attaching. */
2906 return find_default_run_target ("attach");
2907 }
2908
2909 /* See target.h. */
2910
2911 struct target_ops *
2912 find_run_target (void)
2913 {
2914 /* If a target on the current stack can run, use it. */
2915 for (target_ops *t = current_inferior ()->top_target ();
2916 t != NULL;
2917 t = t->beneath ())
2918 {
2919 if (t->can_create_inferior ())
2920 return t;
2921 }
2922
2923 /* Otherwise, use the default run target. */
2924 return find_default_run_target ("run");
2925 }
2926
2927 bool
2928 target_ops::info_proc (const char *args, enum info_proc_what what)
2929 {
2930 return false;
2931 }
2932
2933 /* Implement the "info proc" command. */
2934
2935 int
2936 target_info_proc (const char *args, enum info_proc_what what)
2937 {
2938 struct target_ops *t;
2939
2940 /* If we're already connected to something that can get us OS
2941 related data, use it. Otherwise, try using the native
2942 target. */
2943 t = find_target_at (process_stratum);
2944 if (t == NULL)
2945 t = find_default_run_target (NULL);
2946
2947 for (; t != NULL; t = t->beneath ())
2948 {
2949 if (t->info_proc (args, what))
2950 {
2951 if (targetdebug)
2952 fprintf_unfiltered (gdb_stdlog,
2953 "target_info_proc (\"%s\", %d)\n", args, what);
2954
2955 return 1;
2956 }
2957 }
2958
2959 return 0;
2960 }
2961
2962 static int
2963 find_default_supports_disable_randomization (struct target_ops *self)
2964 {
2965 struct target_ops *t;
2966
2967 t = find_default_run_target (NULL);
2968 if (t != NULL)
2969 return t->supports_disable_randomization ();
2970 return 0;
2971 }
2972
2973 int
2974 target_supports_disable_randomization (void)
2975 {
2976 return current_inferior ()->top_target ()->supports_disable_randomization ();
2977 }
2978
2979 /* See target/target.h. */
2980
2981 int
2982 target_supports_multi_process (void)
2983 {
2984 return current_inferior ()->top_target ()->supports_multi_process ();
2985 }
2986
2987 /* See target.h. */
2988
2989 gdb::optional<gdb::char_vector>
2990 target_get_osdata (const char *type)
2991 {
2992 struct target_ops *t;
2993
2994 /* If we're already connected to something that can get us OS
2995 related data, use it. Otherwise, try using the native
2996 target. */
2997 t = find_target_at (process_stratum);
2998 if (t == NULL)
2999 t = find_default_run_target ("get OS data");
3000
3001 if (!t)
3002 return {};
3003
3004 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3005 }
3006
3007 /* Determine the current address space of thread PTID. */
3008
3009 struct address_space *
3010 target_thread_address_space (ptid_t ptid)
3011 {
3012 struct address_space *aspace;
3013
3014 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3015 gdb_assert (aspace != NULL);
3016
3017 return aspace;
3018 }
3019
3020 /* See target.h. */
3021
3022 target_ops *
3023 target_ops::beneath () const
3024 {
3025 return current_inferior ()->find_target_beneath (this);
3026 }
3027
3028 void
3029 target_ops::close ()
3030 {
3031 }
3032
3033 bool
3034 target_ops::can_attach ()
3035 {
3036 return 0;
3037 }
3038
3039 void
3040 target_ops::attach (const char *, int)
3041 {
3042 gdb_assert_not_reached ("target_ops::attach called");
3043 }
3044
3045 bool
3046 target_ops::can_create_inferior ()
3047 {
3048 return 0;
3049 }
3050
3051 void
3052 target_ops::create_inferior (const char *, const std::string &,
3053 char **, int)
3054 {
3055 gdb_assert_not_reached ("target_ops::create_inferior called");
3056 }
3057
3058 bool
3059 target_ops::can_run ()
3060 {
3061 return false;
3062 }
3063
3064 int
3065 target_can_run ()
3066 {
3067 for (target_ops *t = current_inferior ()->top_target ();
3068 t != NULL;
3069 t = t->beneath ())
3070 {
3071 if (t->can_run ())
3072 return 1;
3073 }
3074
3075 return 0;
3076 }
3077
3078 /* Target file operations. */
3079
3080 static struct target_ops *
3081 default_fileio_target (void)
3082 {
3083 struct target_ops *t;
3084
3085 /* If we're already connected to something that can perform
3086 file I/O, use it. Otherwise, try using the native target. */
3087 t = find_target_at (process_stratum);
3088 if (t != NULL)
3089 return t;
3090 return find_default_run_target ("file I/O");
3091 }
3092
3093 /* File handle for target file operations. */
3094
3095 struct fileio_fh_t
3096 {
3097 /* The target on which this file is open. NULL if the target is
3098 meanwhile closed while the handle is open. */
3099 target_ops *target;
3100
3101 /* The file descriptor on the target. */
3102 int target_fd;
3103
3104 /* Check whether this fileio_fh_t represents a closed file. */
3105 bool is_closed ()
3106 {
3107 return target_fd < 0;
3108 }
3109 };
3110
3111 /* Vector of currently open file handles. The value returned by
3112 target_fileio_open and passed as the FD argument to other
3113 target_fileio_* functions is an index into this vector. This
3114 vector's entries are never freed; instead, files are marked as
3115 closed, and the handle becomes available for reuse. */
3116 static std::vector<fileio_fh_t> fileio_fhandles;
3117
3118 /* Index into fileio_fhandles of the lowest handle that might be
3119 closed. This permits handle reuse without searching the whole
3120 list each time a new file is opened. */
3121 static int lowest_closed_fd;
3122
3123 /* Invalidate the target associated with open handles that were open
3124 on target TARG, since we're about to close (and maybe destroy) the
3125 target. The handles remain open from the client's perspective, but
3126 trying to do anything with them other than closing them will fail
3127 with EIO. */
3128
3129 static void
3130 fileio_handles_invalidate_target (target_ops *targ)
3131 {
3132 for (fileio_fh_t &fh : fileio_fhandles)
3133 if (fh.target == targ)
3134 fh.target = NULL;
3135 }
3136
3137 /* Acquire a target fileio file descriptor. */
3138
3139 static int
3140 acquire_fileio_fd (target_ops *target, int target_fd)
3141 {
3142 /* Search for closed handles to reuse. */
3143 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3144 {
3145 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3146
3147 if (fh.is_closed ())
3148 break;
3149 }
3150
3151 /* Push a new handle if no closed handles were found. */
3152 if (lowest_closed_fd == fileio_fhandles.size ())
3153 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3154 else
3155 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3156
3157 /* Should no longer be marked closed. */
3158 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3159
3160 /* Return its index, and start the next lookup at
3161 the next index. */
3162 return lowest_closed_fd++;
3163 }
3164
3165 /* Release a target fileio file descriptor. */
3166
3167 static void
3168 release_fileio_fd (int fd, fileio_fh_t *fh)
3169 {
3170 fh->target_fd = -1;
3171 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3172 }
3173
3174 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3175
3176 static fileio_fh_t *
3177 fileio_fd_to_fh (int fd)
3178 {
3179 return &fileio_fhandles[fd];
3180 }
3181
3182
3183 /* Default implementations of file i/o methods. We don't want these
3184 to delegate automatically, because we need to know which target
3185 supported the method, in order to call it directly from within
3186 pread/pwrite, etc. */
3187
3188 int
3189 target_ops::fileio_open (struct inferior *inf, const char *filename,
3190 int flags, int mode, int warn_if_slow,
3191 int *target_errno)
3192 {
3193 *target_errno = FILEIO_ENOSYS;
3194 return -1;
3195 }
3196
3197 int
3198 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3199 ULONGEST offset, int *target_errno)
3200 {
3201 *target_errno = FILEIO_ENOSYS;
3202 return -1;
3203 }
3204
3205 int
3206 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3207 ULONGEST offset, int *target_errno)
3208 {
3209 *target_errno = FILEIO_ENOSYS;
3210 return -1;
3211 }
3212
3213 int
3214 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3215 {
3216 *target_errno = FILEIO_ENOSYS;
3217 return -1;
3218 }
3219
3220 int
3221 target_ops::fileio_close (int fd, int *target_errno)
3222 {
3223 *target_errno = FILEIO_ENOSYS;
3224 return -1;
3225 }
3226
3227 int
3228 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3229 int *target_errno)
3230 {
3231 *target_errno = FILEIO_ENOSYS;
3232 return -1;
3233 }
3234
3235 gdb::optional<std::string>
3236 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3237 int *target_errno)
3238 {
3239 *target_errno = FILEIO_ENOSYS;
3240 return {};
3241 }
3242
3243 /* See target.h. */
3244
3245 int
3246 target_fileio_open (struct inferior *inf, const char *filename,
3247 int flags, int mode, bool warn_if_slow, int *target_errno)
3248 {
3249 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3250 {
3251 int fd = t->fileio_open (inf, filename, flags, mode,
3252 warn_if_slow, target_errno);
3253
3254 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3255 continue;
3256
3257 if (fd < 0)
3258 fd = -1;
3259 else
3260 fd = acquire_fileio_fd (t, fd);
3261
3262 if (targetdebug)
3263 fprintf_unfiltered (gdb_stdlog,
3264 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3265 " = %d (%d)\n",
3266 inf == NULL ? 0 : inf->num,
3267 filename, flags, mode,
3268 warn_if_slow, fd,
3269 fd != -1 ? 0 : *target_errno);
3270 return fd;
3271 }
3272
3273 *target_errno = FILEIO_ENOSYS;
3274 return -1;
3275 }
3276
3277 /* See target.h. */
3278
3279 int
3280 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3281 ULONGEST offset, int *target_errno)
3282 {
3283 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3284 int ret = -1;
3285
3286 if (fh->is_closed ())
3287 *target_errno = EBADF;
3288 else if (fh->target == NULL)
3289 *target_errno = EIO;
3290 else
3291 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3292 len, offset, target_errno);
3293
3294 if (targetdebug)
3295 fprintf_unfiltered (gdb_stdlog,
3296 "target_fileio_pwrite (%d,...,%d,%s) "
3297 "= %d (%d)\n",
3298 fd, len, pulongest (offset),
3299 ret, ret != -1 ? 0 : *target_errno);
3300 return ret;
3301 }
3302
3303 /* See target.h. */
3304
3305 int
3306 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3307 ULONGEST offset, int *target_errno)
3308 {
3309 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3310 int ret = -1;
3311
3312 if (fh->is_closed ())
3313 *target_errno = EBADF;
3314 else if (fh->target == NULL)
3315 *target_errno = EIO;
3316 else
3317 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3318 len, offset, target_errno);
3319
3320 if (targetdebug)
3321 fprintf_unfiltered (gdb_stdlog,
3322 "target_fileio_pread (%d,...,%d,%s) "
3323 "= %d (%d)\n",
3324 fd, len, pulongest (offset),
3325 ret, ret != -1 ? 0 : *target_errno);
3326 return ret;
3327 }
3328
3329 /* See target.h. */
3330
3331 int
3332 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3333 {
3334 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3335 int ret = -1;
3336
3337 if (fh->is_closed ())
3338 *target_errno = EBADF;
3339 else if (fh->target == NULL)
3340 *target_errno = EIO;
3341 else
3342 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3343
3344 if (targetdebug)
3345 fprintf_unfiltered (gdb_stdlog,
3346 "target_fileio_fstat (%d) = %d (%d)\n",
3347 fd, ret, ret != -1 ? 0 : *target_errno);
3348 return ret;
3349 }
3350
3351 /* See target.h. */
3352
3353 int
3354 target_fileio_close (int fd, int *target_errno)
3355 {
3356 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3357 int ret = -1;
3358
3359 if (fh->is_closed ())
3360 *target_errno = EBADF;
3361 else
3362 {
3363 if (fh->target != NULL)
3364 ret = fh->target->fileio_close (fh->target_fd,
3365 target_errno);
3366 else
3367 ret = 0;
3368 release_fileio_fd (fd, fh);
3369 }
3370
3371 if (targetdebug)
3372 fprintf_unfiltered (gdb_stdlog,
3373 "target_fileio_close (%d) = %d (%d)\n",
3374 fd, ret, ret != -1 ? 0 : *target_errno);
3375 return ret;
3376 }
3377
3378 /* See target.h. */
3379
3380 int
3381 target_fileio_unlink (struct inferior *inf, const char *filename,
3382 int *target_errno)
3383 {
3384 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3385 {
3386 int ret = t->fileio_unlink (inf, filename, target_errno);
3387
3388 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3389 continue;
3390
3391 if (targetdebug)
3392 fprintf_unfiltered (gdb_stdlog,
3393 "target_fileio_unlink (%d,%s)"
3394 " = %d (%d)\n",
3395 inf == NULL ? 0 : inf->num, filename,
3396 ret, ret != -1 ? 0 : *target_errno);
3397 return ret;
3398 }
3399
3400 *target_errno = FILEIO_ENOSYS;
3401 return -1;
3402 }
3403
3404 /* See target.h. */
3405
3406 gdb::optional<std::string>
3407 target_fileio_readlink (struct inferior *inf, const char *filename,
3408 int *target_errno)
3409 {
3410 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3411 {
3412 gdb::optional<std::string> ret
3413 = t->fileio_readlink (inf, filename, target_errno);
3414
3415 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3416 continue;
3417
3418 if (targetdebug)
3419 fprintf_unfiltered (gdb_stdlog,
3420 "target_fileio_readlink (%d,%s)"
3421 " = %s (%d)\n",
3422 inf == NULL ? 0 : inf->num,
3423 filename, ret ? ret->c_str () : "(nil)",
3424 ret ? 0 : *target_errno);
3425 return ret;
3426 }
3427
3428 *target_errno = FILEIO_ENOSYS;
3429 return {};
3430 }
3431
3432 /* Like scoped_fd, but specific to target fileio. */
3433
3434 class scoped_target_fd
3435 {
3436 public:
3437 explicit scoped_target_fd (int fd) noexcept
3438 : m_fd (fd)
3439 {
3440 }
3441
3442 ~scoped_target_fd ()
3443 {
3444 if (m_fd >= 0)
3445 {
3446 int target_errno;
3447
3448 target_fileio_close (m_fd, &target_errno);
3449 }
3450 }
3451
3452 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3453
3454 int get () const noexcept
3455 {
3456 return m_fd;
3457 }
3458
3459 private:
3460 int m_fd;
3461 };
3462
3463 /* Read target file FILENAME, in the filesystem as seen by INF. If
3464 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3465 remote targets, the remote stub). Store the result in *BUF_P and
3466 return the size of the transferred data. PADDING additional bytes
3467 are available in *BUF_P. This is a helper function for
3468 target_fileio_read_alloc; see the declaration of that function for
3469 more information. */
3470
3471 static LONGEST
3472 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3473 gdb_byte **buf_p, int padding)
3474 {
3475 size_t buf_alloc, buf_pos;
3476 gdb_byte *buf;
3477 LONGEST n;
3478 int target_errno;
3479
3480 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3481 0700, false, &target_errno));
3482 if (fd.get () == -1)
3483 return -1;
3484
3485 /* Start by reading up to 4K at a time. The target will throttle
3486 this number down if necessary. */
3487 buf_alloc = 4096;
3488 buf = (gdb_byte *) xmalloc (buf_alloc);
3489 buf_pos = 0;
3490 while (1)
3491 {
3492 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3493 buf_alloc - buf_pos - padding, buf_pos,
3494 &target_errno);
3495 if (n < 0)
3496 {
3497 /* An error occurred. */
3498 xfree (buf);
3499 return -1;
3500 }
3501 else if (n == 0)
3502 {
3503 /* Read all there was. */
3504 if (buf_pos == 0)
3505 xfree (buf);
3506 else
3507 *buf_p = buf;
3508 return buf_pos;
3509 }
3510
3511 buf_pos += n;
3512
3513 /* If the buffer is filling up, expand it. */
3514 if (buf_alloc < buf_pos * 2)
3515 {
3516 buf_alloc *= 2;
3517 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3518 }
3519
3520 QUIT;
3521 }
3522 }
3523
3524 /* See target.h. */
3525
3526 LONGEST
3527 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3528 gdb_byte **buf_p)
3529 {
3530 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3531 }
3532
3533 /* See target.h. */
3534
3535 gdb::unique_xmalloc_ptr<char>
3536 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3537 {
3538 gdb_byte *buffer;
3539 char *bufstr;
3540 LONGEST i, transferred;
3541
3542 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3543 bufstr = (char *) buffer;
3544
3545 if (transferred < 0)
3546 return gdb::unique_xmalloc_ptr<char> (nullptr);
3547
3548 if (transferred == 0)
3549 return make_unique_xstrdup ("");
3550
3551 bufstr[transferred] = 0;
3552
3553 /* Check for embedded NUL bytes; but allow trailing NULs. */
3554 for (i = strlen (bufstr); i < transferred; i++)
3555 if (bufstr[i] != 0)
3556 {
3557 warning (_("target file %s "
3558 "contained unexpected null characters"),
3559 filename);
3560 break;
3561 }
3562
3563 return gdb::unique_xmalloc_ptr<char> (bufstr);
3564 }
3565
3566
3567 static int
3568 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3569 CORE_ADDR addr, int len)
3570 {
3571 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3572 }
3573
3574 static int
3575 default_watchpoint_addr_within_range (struct target_ops *target,
3576 CORE_ADDR addr,
3577 CORE_ADDR start, int length)
3578 {
3579 return addr >= start && addr < start + length;
3580 }
3581
3582 /* See target.h. */
3583
3584 target_ops *
3585 target_stack::find_beneath (const target_ops *t) const
3586 {
3587 /* Look for a non-empty slot at stratum levels beneath T's. */
3588 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3589 if (m_stack[stratum] != NULL)
3590 return m_stack[stratum];
3591
3592 return NULL;
3593 }
3594
3595 /* See target.h. */
3596
3597 struct target_ops *
3598 find_target_at (enum strata stratum)
3599 {
3600 return current_inferior ()->target_at (stratum);
3601 }
3602
3603 \f
3604
3605 /* See target.h */
3606
3607 void
3608 target_announce_detach (int from_tty)
3609 {
3610 pid_t pid;
3611 const char *exec_file;
3612
3613 if (!from_tty)
3614 return;
3615
3616 exec_file = get_exec_file (0);
3617 if (exec_file == NULL)
3618 exec_file = "";
3619
3620 pid = inferior_ptid.pid ();
3621 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3622 target_pid_to_str (ptid_t (pid)).c_str ());
3623 }
3624
3625 /* The inferior process has died. Long live the inferior! */
3626
3627 void
3628 generic_mourn_inferior (void)
3629 {
3630 inferior *inf = current_inferior ();
3631
3632 switch_to_no_thread ();
3633
3634 /* Mark breakpoints uninserted in case something tries to delete a
3635 breakpoint while we delete the inferior's threads (which would
3636 fail, since the inferior is long gone). */
3637 mark_breakpoints_out ();
3638
3639 if (inf->pid != 0)
3640 exit_inferior (inf);
3641
3642 /* Note this wipes step-resume breakpoints, so needs to be done
3643 after exit_inferior, which ends up referencing the step-resume
3644 breakpoints through clear_thread_inferior_resources. */
3645 breakpoint_init_inferior (inf_exited);
3646
3647 registers_changed ();
3648
3649 reopen_exec_file ();
3650 reinit_frame_cache ();
3651
3652 if (deprecated_detach_hook)
3653 deprecated_detach_hook ();
3654 }
3655 \f
3656 /* Convert a normal process ID to a string. Returns the string in a
3657 static buffer. */
3658
3659 std::string
3660 normal_pid_to_str (ptid_t ptid)
3661 {
3662 return string_printf ("process %d", ptid.pid ());
3663 }
3664
3665 static std::string
3666 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3667 {
3668 return normal_pid_to_str (ptid);
3669 }
3670
3671 /* Error-catcher for target_find_memory_regions. */
3672 static int
3673 dummy_find_memory_regions (struct target_ops *self,
3674 find_memory_region_ftype ignore1, void *ignore2)
3675 {
3676 error (_("Command not implemented for this target."));
3677 return 0;
3678 }
3679
3680 /* Error-catcher for target_make_corefile_notes. */
3681 static gdb::unique_xmalloc_ptr<char>
3682 dummy_make_corefile_notes (struct target_ops *self,
3683 bfd *ignore1, int *ignore2)
3684 {
3685 error (_("Command not implemented for this target."));
3686 return NULL;
3687 }
3688
3689 #include "target-delegates.c"
3690
3691 /* The initial current target, so that there is always a semi-valid
3692 current target. */
3693
3694 static dummy_target the_dummy_target;
3695
3696 /* See target.h. */
3697
3698 target_ops *
3699 get_dummy_target ()
3700 {
3701 return &the_dummy_target;
3702 }
3703
3704 static const target_info dummy_target_info = {
3705 "None",
3706 N_("None"),
3707 ""
3708 };
3709
3710 strata
3711 dummy_target::stratum () const
3712 {
3713 return dummy_stratum;
3714 }
3715
3716 strata
3717 debug_target::stratum () const
3718 {
3719 return debug_stratum;
3720 }
3721
3722 const target_info &
3723 dummy_target::info () const
3724 {
3725 return dummy_target_info;
3726 }
3727
3728 const target_info &
3729 debug_target::info () const
3730 {
3731 return beneath ()->info ();
3732 }
3733
3734 \f
3735
3736 void
3737 target_close (struct target_ops *targ)
3738 {
3739 gdb_assert (!current_inferior ()->target_is_pushed (targ));
3740
3741 fileio_handles_invalidate_target (targ);
3742
3743 targ->close ();
3744
3745 if (targetdebug)
3746 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3747 }
3748
3749 int
3750 target_thread_alive (ptid_t ptid)
3751 {
3752 return current_inferior ()->top_target ()->thread_alive (ptid);
3753 }
3754
3755 void
3756 target_update_thread_list (void)
3757 {
3758 current_inferior ()->top_target ()->update_thread_list ();
3759 }
3760
3761 void
3762 target_stop (ptid_t ptid)
3763 {
3764 if (!may_stop)
3765 {
3766 warning (_("May not interrupt or stop the target, ignoring attempt"));
3767 return;
3768 }
3769
3770 current_inferior ()->top_target ()->stop (ptid);
3771 }
3772
3773 void
3774 target_interrupt ()
3775 {
3776 if (!may_stop)
3777 {
3778 warning (_("May not interrupt or stop the target, ignoring attempt"));
3779 return;
3780 }
3781
3782 current_inferior ()->top_target ()->interrupt ();
3783 }
3784
3785 /* See target.h. */
3786
3787 void
3788 target_pass_ctrlc (void)
3789 {
3790 /* Pass the Ctrl-C to the first target that has a thread
3791 running. */
3792 for (inferior *inf : all_inferiors ())
3793 {
3794 target_ops *proc_target = inf->process_target ();
3795 if (proc_target == NULL)
3796 continue;
3797
3798 for (thread_info *thr : inf->non_exited_threads ())
3799 {
3800 /* A thread can be THREAD_STOPPED and executing, while
3801 running an infcall. */
3802 if (thr->state == THREAD_RUNNING || thr->executing)
3803 {
3804 /* We can get here quite deep in target layers. Avoid
3805 switching thread context or anything that would
3806 communicate with the target (e.g., to fetch
3807 registers), or flushing e.g., the frame cache. We
3808 just switch inferior in order to be able to call
3809 through the target_stack. */
3810 scoped_restore_current_inferior restore_inferior;
3811 set_current_inferior (inf);
3812 current_inferior ()->top_target ()->pass_ctrlc ();
3813 return;
3814 }
3815 }
3816 }
3817 }
3818
3819 /* See target.h. */
3820
3821 void
3822 default_target_pass_ctrlc (struct target_ops *ops)
3823 {
3824 target_interrupt ();
3825 }
3826
3827 /* See target/target.h. */
3828
3829 void
3830 target_stop_and_wait (ptid_t ptid)
3831 {
3832 struct target_waitstatus status;
3833 bool was_non_stop = non_stop;
3834
3835 non_stop = true;
3836 target_stop (ptid);
3837
3838 memset (&status, 0, sizeof (status));
3839 target_wait (ptid, &status, 0);
3840
3841 non_stop = was_non_stop;
3842 }
3843
3844 /* See target/target.h. */
3845
3846 void
3847 target_continue_no_signal (ptid_t ptid)
3848 {
3849 target_resume (ptid, 0, GDB_SIGNAL_0);
3850 }
3851
3852 /* See target/target.h. */
3853
3854 void
3855 target_continue (ptid_t ptid, enum gdb_signal signal)
3856 {
3857 target_resume (ptid, 0, signal);
3858 }
3859
3860 /* Concatenate ELEM to LIST, a comma-separated list. */
3861
3862 static void
3863 str_comma_list_concat_elem (std::string *list, const char *elem)
3864 {
3865 if (!list->empty ())
3866 list->append (", ");
3867
3868 list->append (elem);
3869 }
3870
3871 /* Helper for target_options_to_string. If OPT is present in
3872 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3873 OPT is removed from TARGET_OPTIONS. */
3874
3875 static void
3876 do_option (target_wait_flags *target_options, std::string *ret,
3877 target_wait_flag opt, const char *opt_str)
3878 {
3879 if ((*target_options & opt) != 0)
3880 {
3881 str_comma_list_concat_elem (ret, opt_str);
3882 *target_options &= ~opt;
3883 }
3884 }
3885
3886 /* See target.h. */
3887
3888 std::string
3889 target_options_to_string (target_wait_flags target_options)
3890 {
3891 std::string ret;
3892
3893 #define DO_TARG_OPTION(OPT) \
3894 do_option (&target_options, &ret, OPT, #OPT)
3895
3896 DO_TARG_OPTION (TARGET_WNOHANG);
3897
3898 if (target_options != 0)
3899 str_comma_list_concat_elem (&ret, "unknown???");
3900
3901 return ret;
3902 }
3903
3904 void
3905 target_fetch_registers (struct regcache *regcache, int regno)
3906 {
3907 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3908 if (targetdebug)
3909 regcache->debug_print_register ("target_fetch_registers", regno);
3910 }
3911
3912 void
3913 target_store_registers (struct regcache *regcache, int regno)
3914 {
3915 if (!may_write_registers)
3916 error (_("Writing to registers is not allowed (regno %d)"), regno);
3917
3918 current_inferior ()->top_target ()->store_registers (regcache, regno);
3919 if (targetdebug)
3920 {
3921 regcache->debug_print_register ("target_store_registers", regno);
3922 }
3923 }
3924
3925 int
3926 target_core_of_thread (ptid_t ptid)
3927 {
3928 return current_inferior ()->top_target ()->core_of_thread (ptid);
3929 }
3930
3931 int
3932 simple_verify_memory (struct target_ops *ops,
3933 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3934 {
3935 LONGEST total_xfered = 0;
3936
3937 while (total_xfered < size)
3938 {
3939 ULONGEST xfered_len;
3940 enum target_xfer_status status;
3941 gdb_byte buf[1024];
3942 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3943
3944 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3945 buf, NULL, lma + total_xfered, howmuch,
3946 &xfered_len);
3947 if (status == TARGET_XFER_OK
3948 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3949 {
3950 total_xfered += xfered_len;
3951 QUIT;
3952 }
3953 else
3954 return 0;
3955 }
3956 return 1;
3957 }
3958
3959 /* Default implementation of memory verification. */
3960
3961 static int
3962 default_verify_memory (struct target_ops *self,
3963 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3964 {
3965 /* Start over from the top of the target stack. */
3966 return simple_verify_memory (current_inferior ()->top_target (),
3967 data, memaddr, size);
3968 }
3969
3970 int
3971 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3972 {
3973 target_ops *target = current_inferior ()->top_target ();
3974
3975 return target->verify_memory (data, memaddr, size);
3976 }
3977
3978 /* The documentation for this function is in its prototype declaration in
3979 target.h. */
3980
3981 int
3982 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3983 enum target_hw_bp_type rw)
3984 {
3985 target_ops *target = current_inferior ()->top_target ();
3986
3987 return target->insert_mask_watchpoint (addr, mask, rw);
3988 }
3989
3990 /* The documentation for this function is in its prototype declaration in
3991 target.h. */
3992
3993 int
3994 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3995 enum target_hw_bp_type rw)
3996 {
3997 target_ops *target = current_inferior ()->top_target ();
3998
3999 return target->remove_mask_watchpoint (addr, mask, rw);
4000 }
4001
4002 /* The documentation for this function is in its prototype declaration
4003 in target.h. */
4004
4005 int
4006 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4007 {
4008 target_ops *target = current_inferior ()->top_target ();
4009
4010 return target->masked_watch_num_registers (addr, mask);
4011 }
4012
4013 /* The documentation for this function is in its prototype declaration
4014 in target.h. */
4015
4016 int
4017 target_ranged_break_num_registers (void)
4018 {
4019 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4020 }
4021
4022 /* See target.h. */
4023
4024 struct btrace_target_info *
4025 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
4026 {
4027 return current_inferior ()->top_target ()->enable_btrace (ptid, conf);
4028 }
4029
4030 /* See target.h. */
4031
4032 void
4033 target_disable_btrace (struct btrace_target_info *btinfo)
4034 {
4035 current_inferior ()->top_target ()->disable_btrace (btinfo);
4036 }
4037
4038 /* See target.h. */
4039
4040 void
4041 target_teardown_btrace (struct btrace_target_info *btinfo)
4042 {
4043 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4044 }
4045
4046 /* See target.h. */
4047
4048 enum btrace_error
4049 target_read_btrace (struct btrace_data *btrace,
4050 struct btrace_target_info *btinfo,
4051 enum btrace_read_type type)
4052 {
4053 target_ops *target = current_inferior ()->top_target ();
4054
4055 return target->read_btrace (btrace, btinfo, type);
4056 }
4057
4058 /* See target.h. */
4059
4060 const struct btrace_config *
4061 target_btrace_conf (const struct btrace_target_info *btinfo)
4062 {
4063 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4064 }
4065
4066 /* See target.h. */
4067
4068 void
4069 target_stop_recording (void)
4070 {
4071 current_inferior ()->top_target ()->stop_recording ();
4072 }
4073
4074 /* See target.h. */
4075
4076 void
4077 target_save_record (const char *filename)
4078 {
4079 current_inferior ()->top_target ()->save_record (filename);
4080 }
4081
4082 /* See target.h. */
4083
4084 int
4085 target_supports_delete_record ()
4086 {
4087 return current_inferior ()->top_target ()->supports_delete_record ();
4088 }
4089
4090 /* See target.h. */
4091
4092 void
4093 target_delete_record (void)
4094 {
4095 current_inferior ()->top_target ()->delete_record ();
4096 }
4097
4098 /* See target.h. */
4099
4100 enum record_method
4101 target_record_method (ptid_t ptid)
4102 {
4103 return current_inferior ()->top_target ()->record_method (ptid);
4104 }
4105
4106 /* See target.h. */
4107
4108 int
4109 target_record_is_replaying (ptid_t ptid)
4110 {
4111 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4112 }
4113
4114 /* See target.h. */
4115
4116 int
4117 target_record_will_replay (ptid_t ptid, int dir)
4118 {
4119 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4120 }
4121
4122 /* See target.h. */
4123
4124 void
4125 target_record_stop_replaying (void)
4126 {
4127 current_inferior ()->top_target ()->record_stop_replaying ();
4128 }
4129
4130 /* See target.h. */
4131
4132 void
4133 target_goto_record_begin (void)
4134 {
4135 current_inferior ()->top_target ()->goto_record_begin ();
4136 }
4137
4138 /* See target.h. */
4139
4140 void
4141 target_goto_record_end (void)
4142 {
4143 current_inferior ()->top_target ()->goto_record_end ();
4144 }
4145
4146 /* See target.h. */
4147
4148 void
4149 target_goto_record (ULONGEST insn)
4150 {
4151 current_inferior ()->top_target ()->goto_record (insn);
4152 }
4153
4154 /* See target.h. */
4155
4156 void
4157 target_insn_history (int size, gdb_disassembly_flags flags)
4158 {
4159 current_inferior ()->top_target ()->insn_history (size, flags);
4160 }
4161
4162 /* See target.h. */
4163
4164 void
4165 target_insn_history_from (ULONGEST from, int size,
4166 gdb_disassembly_flags flags)
4167 {
4168 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4169 }
4170
4171 /* See target.h. */
4172
4173 void
4174 target_insn_history_range (ULONGEST begin, ULONGEST end,
4175 gdb_disassembly_flags flags)
4176 {
4177 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4178 }
4179
4180 /* See target.h. */
4181
4182 void
4183 target_call_history (int size, record_print_flags flags)
4184 {
4185 current_inferior ()->top_target ()->call_history (size, flags);
4186 }
4187
4188 /* See target.h. */
4189
4190 void
4191 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4192 {
4193 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4194 }
4195
4196 /* See target.h. */
4197
4198 void
4199 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4200 {
4201 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4202 }
4203
4204 /* See target.h. */
4205
4206 const struct frame_unwind *
4207 target_get_unwinder (void)
4208 {
4209 return current_inferior ()->top_target ()->get_unwinder ();
4210 }
4211
4212 /* See target.h. */
4213
4214 const struct frame_unwind *
4215 target_get_tailcall_unwinder (void)
4216 {
4217 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4218 }
4219
4220 /* See target.h. */
4221
4222 void
4223 target_prepare_to_generate_core (void)
4224 {
4225 current_inferior ()->top_target ()->prepare_to_generate_core ();
4226 }
4227
4228 /* See target.h. */
4229
4230 void
4231 target_done_generating_core (void)
4232 {
4233 current_inferior ()->top_target ()->done_generating_core ();
4234 }
4235
4236 \f
4237
4238 static char targ_desc[] =
4239 "Names of targets and files being debugged.\nShows the entire \
4240 stack of targets currently in use (including the exec-file,\n\
4241 core-file, and process, if any), as well as the symbol file name.";
4242
4243 static void
4244 default_rcmd (struct target_ops *self, const char *command,
4245 struct ui_file *output)
4246 {
4247 error (_("\"monitor\" command not supported by this target."));
4248 }
4249
4250 static void
4251 do_monitor_command (const char *cmd, int from_tty)
4252 {
4253 target_rcmd (cmd, gdb_stdtarg);
4254 }
4255
4256 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4257 ignored. */
4258
4259 void
4260 flash_erase_command (const char *cmd, int from_tty)
4261 {
4262 /* Used to communicate termination of flash operations to the target. */
4263 bool found_flash_region = false;
4264 struct gdbarch *gdbarch = target_gdbarch ();
4265
4266 std::vector<mem_region> mem_regions = target_memory_map ();
4267
4268 /* Iterate over all memory regions. */
4269 for (const mem_region &m : mem_regions)
4270 {
4271 /* Is this a flash memory region? */
4272 if (m.attrib.mode == MEM_FLASH)
4273 {
4274 found_flash_region = true;
4275 target_flash_erase (m.lo, m.hi - m.lo);
4276
4277 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4278
4279 current_uiout->message (_("Erasing flash memory region at address "));
4280 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4281 current_uiout->message (", size = ");
4282 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4283 current_uiout->message ("\n");
4284 }
4285 }
4286
4287 /* Did we do any flash operations? If so, we need to finalize them. */
4288 if (found_flash_region)
4289 target_flash_done ();
4290 else
4291 current_uiout->message (_("No flash memory regions found.\n"));
4292 }
4293
4294 /* Print the name of each layers of our target stack. */
4295
4296 static void
4297 maintenance_print_target_stack (const char *cmd, int from_tty)
4298 {
4299 printf_filtered (_("The current target stack is:\n"));
4300
4301 for (target_ops *t = current_inferior ()->top_target ();
4302 t != NULL;
4303 t = t->beneath ())
4304 {
4305 if (t->stratum () == debug_stratum)
4306 continue;
4307 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
4308 }
4309 }
4310
4311 /* See target.h. */
4312
4313 void
4314 target_async (int enable)
4315 {
4316 infrun_async (enable);
4317 current_inferior ()->top_target ()->async (enable);
4318 }
4319
4320 /* See target.h. */
4321
4322 void
4323 target_thread_events (int enable)
4324 {
4325 current_inferior ()->top_target ()->thread_events (enable);
4326 }
4327
4328 /* Controls if targets can report that they can/are async. This is
4329 just for maintainers to use when debugging gdb. */
4330 bool target_async_permitted = true;
4331
4332 /* The set command writes to this variable. If the inferior is
4333 executing, target_async_permitted is *not* updated. */
4334 static bool target_async_permitted_1 = true;
4335
4336 static void
4337 maint_set_target_async_command (const char *args, int from_tty,
4338 struct cmd_list_element *c)
4339 {
4340 if (have_live_inferiors ())
4341 {
4342 target_async_permitted_1 = target_async_permitted;
4343 error (_("Cannot change this setting while the inferior is running."));
4344 }
4345
4346 target_async_permitted = target_async_permitted_1;
4347 }
4348
4349 static void
4350 maint_show_target_async_command (struct ui_file *file, int from_tty,
4351 struct cmd_list_element *c,
4352 const char *value)
4353 {
4354 fprintf_filtered (file,
4355 _("Controlling the inferior in "
4356 "asynchronous mode is %s.\n"), value);
4357 }
4358
4359 /* Return true if the target operates in non-stop mode even with "set
4360 non-stop off". */
4361
4362 static int
4363 target_always_non_stop_p (void)
4364 {
4365 return current_inferior ()->top_target ()->always_non_stop_p ();
4366 }
4367
4368 /* See target.h. */
4369
4370 bool
4371 target_is_non_stop_p ()
4372 {
4373 return ((non_stop
4374 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4375 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4376 && target_always_non_stop_p ()))
4377 && target_can_async_p ());
4378 }
4379
4380 /* See target.h. */
4381
4382 bool
4383 exists_non_stop_target ()
4384 {
4385 if (target_is_non_stop_p ())
4386 return true;
4387
4388 scoped_restore_current_thread restore_thread;
4389
4390 for (inferior *inf : all_inferiors ())
4391 {
4392 switch_to_inferior_no_thread (inf);
4393 if (target_is_non_stop_p ())
4394 return true;
4395 }
4396
4397 return false;
4398 }
4399
4400 /* Controls if targets can report that they always run in non-stop
4401 mode. This is just for maintainers to use when debugging gdb. */
4402 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4403
4404 /* The set command writes to this variable. If the inferior is
4405 executing, target_non_stop_enabled is *not* updated. */
4406 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
4407
4408 /* Implementation of "maint set target-non-stop". */
4409
4410 static void
4411 maint_set_target_non_stop_command (const char *args, int from_tty,
4412 struct cmd_list_element *c)
4413 {
4414 if (have_live_inferiors ())
4415 {
4416 target_non_stop_enabled_1 = target_non_stop_enabled;
4417 error (_("Cannot change this setting while the inferior is running."));
4418 }
4419
4420 target_non_stop_enabled = target_non_stop_enabled_1;
4421 }
4422
4423 /* Implementation of "maint show target-non-stop". */
4424
4425 static void
4426 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
4427 struct cmd_list_element *c,
4428 const char *value)
4429 {
4430 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4431 fprintf_filtered (file,
4432 _("Whether the target is always in non-stop mode "
4433 "is %s (currently %s).\n"), value,
4434 target_always_non_stop_p () ? "on" : "off");
4435 else
4436 fprintf_filtered (file,
4437 _("Whether the target is always in non-stop mode "
4438 "is %s.\n"), value);
4439 }
4440
4441 /* Temporary copies of permission settings. */
4442
4443 static bool may_write_registers_1 = true;
4444 static bool may_write_memory_1 = true;
4445 static bool may_insert_breakpoints_1 = true;
4446 static bool may_insert_tracepoints_1 = true;
4447 static bool may_insert_fast_tracepoints_1 = true;
4448 static bool may_stop_1 = true;
4449
4450 /* Make the user-set values match the real values again. */
4451
4452 void
4453 update_target_permissions (void)
4454 {
4455 may_write_registers_1 = may_write_registers;
4456 may_write_memory_1 = may_write_memory;
4457 may_insert_breakpoints_1 = may_insert_breakpoints;
4458 may_insert_tracepoints_1 = may_insert_tracepoints;
4459 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4460 may_stop_1 = may_stop;
4461 }
4462
4463 /* The one function handles (most of) the permission flags in the same
4464 way. */
4465
4466 static void
4467 set_target_permissions (const char *args, int from_tty,
4468 struct cmd_list_element *c)
4469 {
4470 if (target_has_execution ())
4471 {
4472 update_target_permissions ();
4473 error (_("Cannot change this setting while the inferior is running."));
4474 }
4475
4476 /* Make the real values match the user-changed values. */
4477 may_write_registers = may_write_registers_1;
4478 may_insert_breakpoints = may_insert_breakpoints_1;
4479 may_insert_tracepoints = may_insert_tracepoints_1;
4480 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4481 may_stop = may_stop_1;
4482 update_observer_mode ();
4483 }
4484
4485 /* Set memory write permission independently of observer mode. */
4486
4487 static void
4488 set_write_memory_permission (const char *args, int from_tty,
4489 struct cmd_list_element *c)
4490 {
4491 /* Make the real values match the user-changed values. */
4492 may_write_memory = may_write_memory_1;
4493 update_observer_mode ();
4494 }
4495
4496 void _initialize_target ();
4497
4498 void
4499 _initialize_target ()
4500 {
4501 the_debug_target = new debug_target ();
4502
4503 add_info ("target", info_target_command, targ_desc);
4504 add_info ("files", info_target_command, targ_desc);
4505
4506 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4507 Set target debugging."), _("\
4508 Show target debugging."), _("\
4509 When non-zero, target debugging is enabled. Higher numbers are more\n\
4510 verbose."),
4511 set_targetdebug,
4512 show_targetdebug,
4513 &setdebuglist, &showdebuglist);
4514
4515 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4516 &trust_readonly, _("\
4517 Set mode for reading from readonly sections."), _("\
4518 Show mode for reading from readonly sections."), _("\
4519 When this mode is on, memory reads from readonly sections (such as .text)\n\
4520 will be read from the object file instead of from the target. This will\n\
4521 result in significant performance improvement for remote targets."),
4522 NULL,
4523 show_trust_readonly,
4524 &setlist, &showlist);
4525
4526 add_com ("monitor", class_obscure, do_monitor_command,
4527 _("Send a command to the remote monitor (remote targets only)."));
4528
4529 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4530 _("Print the name of each layer of the internal target stack."),
4531 &maintenanceprintlist);
4532
4533 add_setshow_boolean_cmd ("target-async", no_class,
4534 &target_async_permitted_1, _("\
4535 Set whether gdb controls the inferior in asynchronous mode."), _("\
4536 Show whether gdb controls the inferior in asynchronous mode."), _("\
4537 Tells gdb whether to control the inferior in asynchronous mode."),
4538 maint_set_target_async_command,
4539 maint_show_target_async_command,
4540 &maintenance_set_cmdlist,
4541 &maintenance_show_cmdlist);
4542
4543 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4544 &target_non_stop_enabled_1, _("\
4545 Set whether gdb always controls the inferior in non-stop mode."), _("\
4546 Show whether gdb always controls the inferior in non-stop mode."), _("\
4547 Tells gdb whether to control the inferior in non-stop mode."),
4548 maint_set_target_non_stop_command,
4549 maint_show_target_non_stop_command,
4550 &maintenance_set_cmdlist,
4551 &maintenance_show_cmdlist);
4552
4553 add_setshow_boolean_cmd ("may-write-registers", class_support,
4554 &may_write_registers_1, _("\
4555 Set permission to write into registers."), _("\
4556 Show permission to write into registers."), _("\
4557 When this permission is on, GDB may write into the target's registers.\n\
4558 Otherwise, any sort of write attempt will result in an error."),
4559 set_target_permissions, NULL,
4560 &setlist, &showlist);
4561
4562 add_setshow_boolean_cmd ("may-write-memory", class_support,
4563 &may_write_memory_1, _("\
4564 Set permission to write into target memory."), _("\
4565 Show permission to write into target memory."), _("\
4566 When this permission is on, GDB may write into the target's memory.\n\
4567 Otherwise, any sort of write attempt will result in an error."),
4568 set_write_memory_permission, NULL,
4569 &setlist, &showlist);
4570
4571 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4572 &may_insert_breakpoints_1, _("\
4573 Set permission to insert breakpoints in the target."), _("\
4574 Show permission to insert breakpoints in the target."), _("\
4575 When this permission is on, GDB may insert breakpoints in the program.\n\
4576 Otherwise, any sort of insertion attempt will result in an error."),
4577 set_target_permissions, NULL,
4578 &setlist, &showlist);
4579
4580 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4581 &may_insert_tracepoints_1, _("\
4582 Set permission to insert tracepoints in the target."), _("\
4583 Show permission to insert tracepoints in the target."), _("\
4584 When this permission is on, GDB may insert tracepoints in the program.\n\
4585 Otherwise, any sort of insertion attempt will result in an error."),
4586 set_target_permissions, NULL,
4587 &setlist, &showlist);
4588
4589 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4590 &may_insert_fast_tracepoints_1, _("\
4591 Set permission to insert fast tracepoints in the target."), _("\
4592 Show permission to insert fast tracepoints in the target."), _("\
4593 When this permission is on, GDB may insert fast tracepoints.\n\
4594 Otherwise, any sort of insertion attempt will result in an error."),
4595 set_target_permissions, NULL,
4596 &setlist, &showlist);
4597
4598 add_setshow_boolean_cmd ("may-interrupt", class_support,
4599 &may_stop_1, _("\
4600 Set permission to interrupt or signal the target."), _("\
4601 Show permission to interrupt or signal the target."), _("\
4602 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4603 Otherwise, any attempt to interrupt or stop will be ignored."),
4604 set_target_permissions, NULL,
4605 &setlist, &showlist);
4606
4607 add_com ("flash-erase", no_class, flash_erase_command,
4608 _("Erase all flash memory regions."));
4609
4610 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4611 &auto_connect_native_target, _("\
4612 Set whether GDB may automatically connect to the native target."), _("\
4613 Show whether GDB may automatically connect to the native target."), _("\
4614 When on, and GDB is not connected to a target yet, GDB\n\
4615 attempts \"run\" and other commands with the native target."),
4616 NULL, show_auto_connect_native_target,
4617 &setlist, &showlist);
4618 }