5a416d5ee3a24415991b5c331fe76fafb1f62b67
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2022 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "observable.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdbcore.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
39 #include "solib.h"
40 #include "exec.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdb/fileio.h"
44 #include "gdbsupport/agent.h"
45 #include "auxv.h"
46 #include "target-debug.h"
47 #include "top.h"
48 #include "event-top.h"
49 #include <algorithm>
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
52 #include "terminal.h"
53 #include <unordered_map>
54 #include "target-connection.h"
55 #include "valprint.h"
56 #include "cli/cli-decode.h"
57
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
59
60 static void default_terminal_info (struct target_ops *, const char *, int);
61
62 static int default_watchpoint_addr_within_range (struct target_ops *,
63 CORE_ADDR, CORE_ADDR, int);
64
65 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
66 CORE_ADDR, int);
67
68 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
69
70 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
71 long lwp, ULONGEST tid);
72
73 static void default_mourn_inferior (struct target_ops *self);
74
75 static int default_search_memory (struct target_ops *ops,
76 CORE_ADDR start_addr,
77 ULONGEST search_space_len,
78 const gdb_byte *pattern,
79 ULONGEST pattern_len,
80 CORE_ADDR *found_addrp);
81
82 static int default_verify_memory (struct target_ops *self,
83 const gdb_byte *data,
84 CORE_ADDR memaddr, ULONGEST size);
85
86 static void tcomplain (void) ATTRIBUTE_NORETURN;
87
88 static struct target_ops *find_default_run_target (const char *);
89
90 static int dummy_find_memory_regions (struct target_ops *self,
91 find_memory_region_ftype ignore1,
92 void *ignore2);
93
94 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
95 (struct target_ops *self, bfd *ignore1, int *ignore2);
96
97 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
98
99 static enum exec_direction_kind default_execution_direction
100 (struct target_ops *self);
101
102 /* Mapping between target_info objects (which have address identity)
103 and corresponding open/factory function/callback. Each add_target
104 call adds one entry to this map, and registers a "target
105 TARGET_NAME" command that when invoked calls the factory registered
106 here. The target_info object is associated with the command via
107 the command's context. */
108 static std::unordered_map<const target_info *, target_open_ftype *>
109 target_factories;
110
111 /* The singleton debug target. */
112
113 static struct target_ops *the_debug_target;
114
115 /* Command list for target. */
116
117 static struct cmd_list_element *targetlist = NULL;
118
119 /* True if we should trust readonly sections from the
120 executable when reading memory. */
121
122 static bool trust_readonly = false;
123
124 /* Nonzero if we should show true memory content including
125 memory breakpoint inserted by gdb. */
126
127 static int show_memory_breakpoints = 0;
128
129 /* These globals control whether GDB attempts to perform these
130 operations; they are useful for targets that need to prevent
131 inadvertent disruption, such as in non-stop mode. */
132
133 bool may_write_registers = true;
134
135 bool may_write_memory = true;
136
137 bool may_insert_breakpoints = true;
138
139 bool may_insert_tracepoints = true;
140
141 bool may_insert_fast_tracepoints = true;
142
143 bool may_stop = true;
144
145 /* Non-zero if we want to see trace of target level stuff. */
146
147 static unsigned int targetdebug = 0;
148
149 static void
150 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
151 {
152 if (targetdebug)
153 current_inferior ()->push_target (the_debug_target);
154 else
155 current_inferior ()->unpush_target (the_debug_target);
156 }
157
158 static void
159 show_targetdebug (struct ui_file *file, int from_tty,
160 struct cmd_list_element *c, const char *value)
161 {
162 gdb_printf (file, _("Target debugging is %s.\n"), value);
163 }
164
165 int
166 target_has_memory ()
167 {
168 for (target_ops *t = current_inferior ()->top_target ();
169 t != NULL;
170 t = t->beneath ())
171 if (t->has_memory ())
172 return 1;
173
174 return 0;
175 }
176
177 int
178 target_has_stack ()
179 {
180 for (target_ops *t = current_inferior ()->top_target ();
181 t != NULL;
182 t = t->beneath ())
183 if (t->has_stack ())
184 return 1;
185
186 return 0;
187 }
188
189 int
190 target_has_registers ()
191 {
192 for (target_ops *t = current_inferior ()->top_target ();
193 t != NULL;
194 t = t->beneath ())
195 if (t->has_registers ())
196 return 1;
197
198 return 0;
199 }
200
201 bool
202 target_has_execution (inferior *inf)
203 {
204 if (inf == nullptr)
205 inf = current_inferior ();
206
207 for (target_ops *t = inf->top_target ();
208 t != nullptr;
209 t = inf->find_target_beneath (t))
210 if (t->has_execution (inf))
211 return true;
212
213 return false;
214 }
215
216 const char *
217 target_shortname ()
218 {
219 return current_inferior ()->top_target ()->shortname ();
220 }
221
222 /* See target.h. */
223
224 bool
225 target_attach_no_wait ()
226 {
227 return current_inferior ()->top_target ()->attach_no_wait ();
228 }
229
230 /* See target.h. */
231
232 void
233 target_post_attach (int pid)
234 {
235 return current_inferior ()->top_target ()->post_attach (pid);
236 }
237
238 /* See target.h. */
239
240 void
241 target_prepare_to_store (regcache *regcache)
242 {
243 return current_inferior ()->top_target ()->prepare_to_store (regcache);
244 }
245
246 /* See target.h. */
247
248 bool
249 target_supports_enable_disable_tracepoint ()
250 {
251 target_ops *target = current_inferior ()->top_target ();
252
253 return target->supports_enable_disable_tracepoint ();
254 }
255
256 bool
257 target_supports_string_tracing ()
258 {
259 return current_inferior ()->top_target ()->supports_string_tracing ();
260 }
261
262 /* See target.h. */
263
264 bool
265 target_supports_evaluation_of_breakpoint_conditions ()
266 {
267 target_ops *target = current_inferior ()->top_target ();
268
269 return target->supports_evaluation_of_breakpoint_conditions ();
270 }
271
272 /* See target.h. */
273
274 bool
275 target_supports_dumpcore ()
276 {
277 return current_inferior ()->top_target ()->supports_dumpcore ();
278 }
279
280 /* See target.h. */
281
282 void
283 target_dumpcore (const char *filename)
284 {
285 return current_inferior ()->top_target ()->dumpcore (filename);
286 }
287
288 /* See target.h. */
289
290 bool
291 target_can_run_breakpoint_commands ()
292 {
293 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
294 }
295
296 /* See target.h. */
297
298 void
299 target_files_info ()
300 {
301 return current_inferior ()->top_target ()->files_info ();
302 }
303
304 /* See target.h. */
305
306 int
307 target_insert_fork_catchpoint (int pid)
308 {
309 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
310 }
311
312 /* See target.h. */
313
314 int
315 target_remove_fork_catchpoint (int pid)
316 {
317 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
318 }
319
320 /* See target.h. */
321
322 int
323 target_insert_vfork_catchpoint (int pid)
324 {
325 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
326 }
327
328 /* See target.h. */
329
330 int
331 target_remove_vfork_catchpoint (int pid)
332 {
333 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
334 }
335
336 /* See target.h. */
337
338 int
339 target_insert_exec_catchpoint (int pid)
340 {
341 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
342 }
343
344 /* See target.h. */
345
346 int
347 target_remove_exec_catchpoint (int pid)
348 {
349 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
350 }
351
352 /* See target.h. */
353
354 int
355 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
356 gdb::array_view<const int> syscall_counts)
357 {
358 target_ops *target = current_inferior ()->top_target ();
359
360 return target->set_syscall_catchpoint (pid, needed, any_count,
361 syscall_counts);
362 }
363
364 /* See target.h. */
365
366 void
367 target_rcmd (const char *command, struct ui_file *outbuf)
368 {
369 return current_inferior ()->top_target ()->rcmd (command, outbuf);
370 }
371
372 /* See target.h. */
373
374 bool
375 target_can_lock_scheduler ()
376 {
377 target_ops *target = current_inferior ()->top_target ();
378
379 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
380 }
381
382 /* See target.h. */
383
384 bool
385 target_can_async_p ()
386 {
387 return target_can_async_p (current_inferior ()->top_target ());
388 }
389
390 /* See target.h. */
391
392 bool
393 target_can_async_p (struct target_ops *target)
394 {
395 if (!target_async_permitted)
396 return false;
397 return target->can_async_p ();
398 }
399
400 /* See target.h. */
401
402 bool
403 target_is_async_p ()
404 {
405 bool result = current_inferior ()->top_target ()->is_async_p ();
406 gdb_assert (target_async_permitted || !result);
407 return result;
408 }
409
410 exec_direction_kind
411 target_execution_direction ()
412 {
413 return current_inferior ()->top_target ()->execution_direction ();
414 }
415
416 /* See target.h. */
417
418 const char *
419 target_extra_thread_info (thread_info *tp)
420 {
421 return current_inferior ()->top_target ()->extra_thread_info (tp);
422 }
423
424 /* See target.h. */
425
426 char *
427 target_pid_to_exec_file (int pid)
428 {
429 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
430 }
431
432 /* See target.h. */
433
434 gdbarch *
435 target_thread_architecture (ptid_t ptid)
436 {
437 return current_inferior ()->top_target ()->thread_architecture (ptid);
438 }
439
440 /* See target.h. */
441
442 int
443 target_find_memory_regions (find_memory_region_ftype func, void *data)
444 {
445 return current_inferior ()->top_target ()->find_memory_regions (func, data);
446 }
447
448 /* See target.h. */
449
450 gdb::unique_xmalloc_ptr<char>
451 target_make_corefile_notes (bfd *bfd, int *size_p)
452 {
453 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
454 }
455
456 gdb_byte *
457 target_get_bookmark (const char *args, int from_tty)
458 {
459 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
460 }
461
462 void
463 target_goto_bookmark (const gdb_byte *arg, int from_tty)
464 {
465 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
466 }
467
468 /* See target.h. */
469
470 bool
471 target_stopped_by_watchpoint ()
472 {
473 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
474 }
475
476 /* See target.h. */
477
478 bool
479 target_stopped_by_sw_breakpoint ()
480 {
481 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
482 }
483
484 bool
485 target_supports_stopped_by_sw_breakpoint ()
486 {
487 target_ops *target = current_inferior ()->top_target ();
488
489 return target->supports_stopped_by_sw_breakpoint ();
490 }
491
492 bool
493 target_stopped_by_hw_breakpoint ()
494 {
495 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
496 }
497
498 bool
499 target_supports_stopped_by_hw_breakpoint ()
500 {
501 target_ops *target = current_inferior ()->top_target ();
502
503 return target->supports_stopped_by_hw_breakpoint ();
504 }
505
506 /* See target.h. */
507
508 bool
509 target_have_steppable_watchpoint ()
510 {
511 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
512 }
513
514 /* See target.h. */
515
516 int
517 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
518 {
519 target_ops *target = current_inferior ()->top_target ();
520
521 return target->can_use_hw_breakpoint (type, cnt, othertype);
522 }
523
524 /* See target.h. */
525
526 int
527 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
528 {
529 target_ops *target = current_inferior ()->top_target ();
530
531 return target->region_ok_for_hw_watchpoint (addr, len);
532 }
533
534
535 int
536 target_can_do_single_step ()
537 {
538 return current_inferior ()->top_target ()->can_do_single_step ();
539 }
540
541 /* See target.h. */
542
543 int
544 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
545 expression *cond)
546 {
547 target_ops *target = current_inferior ()->top_target ();
548
549 return target->insert_watchpoint (addr, len, type, cond);
550 }
551
552 /* See target.h. */
553
554 int
555 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
556 expression *cond)
557 {
558 target_ops *target = current_inferior ()->top_target ();
559
560 return target->remove_watchpoint (addr, len, type, cond);
561 }
562
563 /* See target.h. */
564
565 int
566 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
567 {
568 target_ops *target = current_inferior ()->top_target ();
569
570 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
571 }
572
573 /* See target.h. */
574
575 int
576 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
577 {
578 target_ops *target = current_inferior ()->top_target ();
579
580 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
581 }
582
583 /* See target.h. */
584
585 bool
586 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
587 expression *cond)
588 {
589 target_ops *target = current_inferior ()->top_target ();
590
591 return target->can_accel_watchpoint_condition (addr, len, type, cond);
592 }
593
594 /* See target.h. */
595
596 bool
597 target_can_execute_reverse ()
598 {
599 return current_inferior ()->top_target ()->can_execute_reverse ();
600 }
601
602 ptid_t
603 target_get_ada_task_ptid (long lwp, ULONGEST tid)
604 {
605 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
606 }
607
608 bool
609 target_filesystem_is_local ()
610 {
611 return current_inferior ()->top_target ()->filesystem_is_local ();
612 }
613
614 void
615 target_trace_init ()
616 {
617 return current_inferior ()->top_target ()->trace_init ();
618 }
619
620 void
621 target_download_tracepoint (bp_location *location)
622 {
623 return current_inferior ()->top_target ()->download_tracepoint (location);
624 }
625
626 bool
627 target_can_download_tracepoint ()
628 {
629 return current_inferior ()->top_target ()->can_download_tracepoint ();
630 }
631
632 void
633 target_download_trace_state_variable (const trace_state_variable &tsv)
634 {
635 target_ops *target = current_inferior ()->top_target ();
636
637 return target->download_trace_state_variable (tsv);
638 }
639
640 void
641 target_enable_tracepoint (bp_location *loc)
642 {
643 return current_inferior ()->top_target ()->enable_tracepoint (loc);
644 }
645
646 void
647 target_disable_tracepoint (bp_location *loc)
648 {
649 return current_inferior ()->top_target ()->disable_tracepoint (loc);
650 }
651
652 void
653 target_trace_start ()
654 {
655 return current_inferior ()->top_target ()->trace_start ();
656 }
657
658 void
659 target_trace_set_readonly_regions ()
660 {
661 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
662 }
663
664 int
665 target_get_trace_status (trace_status *ts)
666 {
667 return current_inferior ()->top_target ()->get_trace_status (ts);
668 }
669
670 void
671 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
672 {
673 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
674 }
675
676 void
677 target_trace_stop ()
678 {
679 return current_inferior ()->top_target ()->trace_stop ();
680 }
681
682 int
683 target_trace_find (trace_find_type type, int num,
684 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
685 {
686 target_ops *target = current_inferior ()->top_target ();
687
688 return target->trace_find (type, num, addr1, addr2, tpp);
689 }
690
691 bool
692 target_get_trace_state_variable_value (int tsv, LONGEST *val)
693 {
694 target_ops *target = current_inferior ()->top_target ();
695
696 return target->get_trace_state_variable_value (tsv, val);
697 }
698
699 int
700 target_save_trace_data (const char *filename)
701 {
702 return current_inferior ()->top_target ()->save_trace_data (filename);
703 }
704
705 int
706 target_upload_tracepoints (uploaded_tp **utpp)
707 {
708 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
709 }
710
711 int
712 target_upload_trace_state_variables (uploaded_tsv **utsvp)
713 {
714 target_ops *target = current_inferior ()->top_target ();
715
716 return target->upload_trace_state_variables (utsvp);
717 }
718
719 LONGEST
720 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
721 {
722 target_ops *target = current_inferior ()->top_target ();
723
724 return target->get_raw_trace_data (buf, offset, len);
725 }
726
727 int
728 target_get_min_fast_tracepoint_insn_len ()
729 {
730 target_ops *target = current_inferior ()->top_target ();
731
732 return target->get_min_fast_tracepoint_insn_len ();
733 }
734
735 void
736 target_set_disconnected_tracing (int val)
737 {
738 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
739 }
740
741 void
742 target_set_circular_trace_buffer (int val)
743 {
744 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
745 }
746
747 void
748 target_set_trace_buffer_size (LONGEST val)
749 {
750 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
751 }
752
753 bool
754 target_set_trace_notes (const char *user, const char *notes,
755 const char *stopnotes)
756 {
757 target_ops *target = current_inferior ()->top_target ();
758
759 return target->set_trace_notes (user, notes, stopnotes);
760 }
761
762 bool
763 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
764 {
765 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
766 }
767
768 void
769 target_set_permissions ()
770 {
771 return current_inferior ()->top_target ()->set_permissions ();
772 }
773
774 bool
775 target_static_tracepoint_marker_at (CORE_ADDR addr,
776 static_tracepoint_marker *marker)
777 {
778 target_ops *target = current_inferior ()->top_target ();
779
780 return target->static_tracepoint_marker_at (addr, marker);
781 }
782
783 std::vector<static_tracepoint_marker>
784 target_static_tracepoint_markers_by_strid (const char *marker_id)
785 {
786 target_ops *target = current_inferior ()->top_target ();
787
788 return target->static_tracepoint_markers_by_strid (marker_id);
789 }
790
791 traceframe_info_up
792 target_traceframe_info ()
793 {
794 return current_inferior ()->top_target ()->traceframe_info ();
795 }
796
797 bool
798 target_use_agent (bool use)
799 {
800 return current_inferior ()->top_target ()->use_agent (use);
801 }
802
803 bool
804 target_can_use_agent ()
805 {
806 return current_inferior ()->top_target ()->can_use_agent ();
807 }
808
809 bool
810 target_augmented_libraries_svr4_read ()
811 {
812 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
813 }
814
815 bool
816 target_supports_memory_tagging ()
817 {
818 return current_inferior ()->top_target ()->supports_memory_tagging ();
819 }
820
821 bool
822 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
823 int type)
824 {
825 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
826 }
827
828 bool
829 target_store_memtags (CORE_ADDR address, size_t len,
830 const gdb::byte_vector &tags, int type)
831 {
832 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
833 }
834
835 void
836 target_log_command (const char *p)
837 {
838 return current_inferior ()->top_target ()->log_command (p);
839 }
840
841 /* This is used to implement the various target commands. */
842
843 static void
844 open_target (const char *args, int from_tty, struct cmd_list_element *command)
845 {
846 auto *ti = static_cast<target_info *> (command->context ());
847 target_open_ftype *func = target_factories[ti];
848
849 if (targetdebug)
850 gdb_printf (gdb_stdlog, "-> %s->open (...)\n",
851 ti->shortname);
852
853 func (args, from_tty);
854
855 if (targetdebug)
856 gdb_printf (gdb_stdlog, "<- %s->open (%s, %d)\n",
857 ti->shortname, args, from_tty);
858 }
859
860 /* See target.h. */
861
862 void
863 add_target (const target_info &t, target_open_ftype *func,
864 completer_ftype *completer)
865 {
866 struct cmd_list_element *c;
867
868 auto &func_slot = target_factories[&t];
869 if (func_slot != nullptr)
870 internal_error (__FILE__, __LINE__,
871 _("target already added (\"%s\")."), t.shortname);
872 func_slot = func;
873
874 if (targetlist == NULL)
875 add_basic_prefix_cmd ("target", class_run, _("\
876 Connect to a target machine or process.\n\
877 The first argument is the type or protocol of the target machine.\n\
878 Remaining arguments are interpreted by the target protocol. For more\n\
879 information on the arguments for a particular protocol, type\n\
880 `help target ' followed by the protocol name."),
881 &targetlist, 0, &cmdlist);
882 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
883 c->set_context ((void *) &t);
884 c->func = open_target;
885 if (completer != NULL)
886 set_cmd_completer (c, completer);
887 }
888
889 /* See target.h. */
890
891 void
892 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
893 {
894 struct cmd_list_element *c;
895
896 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
897 see PR cli/15104. */
898 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
899 c->func = open_target;
900 c->set_context ((void *) &tinfo);
901 gdb::unique_xmalloc_ptr<char> alt
902 = xstrprintf ("target %s", tinfo.shortname);
903 deprecate_cmd (c, alt.release ());
904 }
905
906 /* Stub functions */
907
908 void
909 target_kill (void)
910 {
911 current_inferior ()->top_target ()->kill ();
912 }
913
914 void
915 target_load (const char *arg, int from_tty)
916 {
917 target_dcache_invalidate ();
918 current_inferior ()->top_target ()->load (arg, from_tty);
919 }
920
921 /* Define it. */
922
923 target_terminal_state target_terminal::m_terminal_state
924 = target_terminal_state::is_ours;
925
926 /* See target/target.h. */
927
928 void
929 target_terminal::init (void)
930 {
931 current_inferior ()->top_target ()->terminal_init ();
932
933 m_terminal_state = target_terminal_state::is_ours;
934 }
935
936 /* See target/target.h. */
937
938 void
939 target_terminal::inferior (void)
940 {
941 struct ui *ui = current_ui;
942
943 /* A background resume (``run&'') should leave GDB in control of the
944 terminal. */
945 if (ui->prompt_state != PROMPT_BLOCKED)
946 return;
947
948 /* Since we always run the inferior in the main console (unless "set
949 inferior-tty" is in effect), when some UI other than the main one
950 calls target_terminal::inferior, then we leave the main UI's
951 terminal settings as is. */
952 if (ui != main_ui)
953 return;
954
955 /* If GDB is resuming the inferior in the foreground, install
956 inferior's terminal modes. */
957
958 struct inferior *inf = current_inferior ();
959
960 if (inf->terminal_state != target_terminal_state::is_inferior)
961 {
962 current_inferior ()->top_target ()->terminal_inferior ();
963 inf->terminal_state = target_terminal_state::is_inferior;
964 }
965
966 m_terminal_state = target_terminal_state::is_inferior;
967
968 /* If the user hit C-c before, pretend that it was hit right
969 here. */
970 if (check_quit_flag ())
971 target_pass_ctrlc ();
972 }
973
974 /* See target/target.h. */
975
976 void
977 target_terminal::restore_inferior (void)
978 {
979 struct ui *ui = current_ui;
980
981 /* See target_terminal::inferior(). */
982 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
983 return;
984
985 /* Restore the terminal settings of inferiors that were in the
986 foreground but are now ours_for_output due to a temporary
987 target_target::ours_for_output() call. */
988
989 {
990 scoped_restore_current_inferior restore_inferior;
991
992 for (::inferior *inf : all_inferiors ())
993 {
994 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
995 {
996 set_current_inferior (inf);
997 current_inferior ()->top_target ()->terminal_inferior ();
998 inf->terminal_state = target_terminal_state::is_inferior;
999 }
1000 }
1001 }
1002
1003 m_terminal_state = target_terminal_state::is_inferior;
1004
1005 /* If the user hit C-c before, pretend that it was hit right
1006 here. */
1007 if (check_quit_flag ())
1008 target_pass_ctrlc ();
1009 }
1010
1011 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1012 is_ours_for_output. */
1013
1014 static void
1015 target_terminal_is_ours_kind (target_terminal_state desired_state)
1016 {
1017 scoped_restore_current_inferior restore_inferior;
1018
1019 /* Must do this in two passes. First, have all inferiors save the
1020 current terminal settings. Then, after all inferiors have add a
1021 chance to safely save the terminal settings, restore GDB's
1022 terminal settings. */
1023
1024 for (inferior *inf : all_inferiors ())
1025 {
1026 if (inf->terminal_state == target_terminal_state::is_inferior)
1027 {
1028 set_current_inferior (inf);
1029 current_inferior ()->top_target ()->terminal_save_inferior ();
1030 }
1031 }
1032
1033 for (inferior *inf : all_inferiors ())
1034 {
1035 /* Note we don't check is_inferior here like above because we
1036 need to handle 'is_ours_for_output -> is_ours' too. Careful
1037 to never transition from 'is_ours' to 'is_ours_for_output',
1038 though. */
1039 if (inf->terminal_state != target_terminal_state::is_ours
1040 && inf->terminal_state != desired_state)
1041 {
1042 set_current_inferior (inf);
1043 if (desired_state == target_terminal_state::is_ours)
1044 current_inferior ()->top_target ()->terminal_ours ();
1045 else if (desired_state == target_terminal_state::is_ours_for_output)
1046 current_inferior ()->top_target ()->terminal_ours_for_output ();
1047 else
1048 gdb_assert_not_reached ("unhandled desired state");
1049 inf->terminal_state = desired_state;
1050 }
1051 }
1052 }
1053
1054 /* See target/target.h. */
1055
1056 void
1057 target_terminal::ours ()
1058 {
1059 struct ui *ui = current_ui;
1060
1061 /* See target_terminal::inferior. */
1062 if (ui != main_ui)
1063 return;
1064
1065 if (m_terminal_state == target_terminal_state::is_ours)
1066 return;
1067
1068 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1069 m_terminal_state = target_terminal_state::is_ours;
1070 }
1071
1072 /* See target/target.h. */
1073
1074 void
1075 target_terminal::ours_for_output ()
1076 {
1077 struct ui *ui = current_ui;
1078
1079 /* See target_terminal::inferior. */
1080 if (ui != main_ui)
1081 return;
1082
1083 if (!target_terminal::is_inferior ())
1084 return;
1085
1086 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1087 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1088 }
1089
1090 /* See target/target.h. */
1091
1092 void
1093 target_terminal::info (const char *arg, int from_tty)
1094 {
1095 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1096 }
1097
1098 /* See target.h. */
1099
1100 bool
1101 target_supports_terminal_ours (void)
1102 {
1103 /* The current top target is the target at the top of the target
1104 stack of the current inferior. While normally there's always an
1105 inferior, we must check for nullptr here because we can get here
1106 very early during startup, before the initial inferior is first
1107 created. */
1108 inferior *inf = current_inferior ();
1109
1110 if (inf == nullptr)
1111 return false;
1112 return inf->top_target ()->supports_terminal_ours ();
1113 }
1114
1115 static void
1116 tcomplain (void)
1117 {
1118 error (_("You can't do that when your target is `%s'"),
1119 current_inferior ()->top_target ()->shortname ());
1120 }
1121
1122 void
1123 noprocess (void)
1124 {
1125 error (_("You can't do that without a process to debug."));
1126 }
1127
1128 static void
1129 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1130 {
1131 gdb_printf (_("No saved terminal information.\n"));
1132 }
1133
1134 /* A default implementation for the to_get_ada_task_ptid target method.
1135
1136 This function builds the PTID by using both LWP and TID as part of
1137 the PTID lwp and tid elements. The pid used is the pid of the
1138 inferior_ptid. */
1139
1140 static ptid_t
1141 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1142 {
1143 return ptid_t (inferior_ptid.pid (), lwp, tid);
1144 }
1145
1146 static enum exec_direction_kind
1147 default_execution_direction (struct target_ops *self)
1148 {
1149 if (!target_can_execute_reverse ())
1150 return EXEC_FORWARD;
1151 else if (!target_can_async_p ())
1152 return EXEC_FORWARD;
1153 else
1154 gdb_assert_not_reached ("\
1155 to_execution_direction must be implemented for reverse async");
1156 }
1157
1158 /* See target.h. */
1159
1160 void
1161 decref_target (target_ops *t)
1162 {
1163 t->decref ();
1164 if (t->refcount () == 0)
1165 {
1166 if (t->stratum () == process_stratum)
1167 connection_list_remove (as_process_stratum_target (t));
1168 target_close (t);
1169 }
1170 }
1171
1172 /* See target.h. */
1173
1174 void
1175 target_stack::push (target_ops *t)
1176 {
1177 t->incref ();
1178
1179 strata stratum = t->stratum ();
1180
1181 if (stratum == process_stratum)
1182 connection_list_add (as_process_stratum_target (t));
1183
1184 /* If there's already a target at this stratum, remove it. */
1185
1186 if (m_stack[stratum] != NULL)
1187 unpush (m_stack[stratum]);
1188
1189 /* Now add the new one. */
1190 m_stack[stratum] = t;
1191
1192 if (m_top < stratum)
1193 m_top = stratum;
1194 }
1195
1196 /* See target.h. */
1197
1198 bool
1199 target_stack::unpush (target_ops *t)
1200 {
1201 gdb_assert (t != NULL);
1202
1203 strata stratum = t->stratum ();
1204
1205 if (stratum == dummy_stratum)
1206 internal_error (__FILE__, __LINE__,
1207 _("Attempt to unpush the dummy target"));
1208
1209 /* Look for the specified target. Note that a target can only occur
1210 once in the target stack. */
1211
1212 if (m_stack[stratum] != t)
1213 {
1214 /* If T wasn't pushed, quit. Only open targets should be
1215 closed. */
1216 return false;
1217 }
1218
1219 /* Unchain the target. */
1220 m_stack[stratum] = NULL;
1221
1222 if (m_top == stratum)
1223 m_top = this->find_beneath (t)->stratum ();
1224
1225 /* Finally close the target, if there are no inferiors
1226 referencing this target still. Note we do this after unchaining,
1227 so any target method calls from within the target_close
1228 implementation don't end up in T anymore. Do leave the target
1229 open if we have are other inferiors referencing this target
1230 still. */
1231 decref_target (t);
1232
1233 return true;
1234 }
1235
1236 /* Unpush TARGET and assert that it worked. */
1237
1238 static void
1239 unpush_target_and_assert (struct target_ops *target)
1240 {
1241 if (!current_inferior ()->unpush_target (target))
1242 {
1243 gdb_printf (gdb_stderr,
1244 "pop_all_targets couldn't find target %s\n",
1245 target->shortname ());
1246 internal_error (__FILE__, __LINE__,
1247 _("failed internal consistency check"));
1248 }
1249 }
1250
1251 void
1252 pop_all_targets_above (enum strata above_stratum)
1253 {
1254 while ((int) (current_inferior ()->top_target ()->stratum ())
1255 > (int) above_stratum)
1256 unpush_target_and_assert (current_inferior ()->top_target ());
1257 }
1258
1259 /* See target.h. */
1260
1261 void
1262 pop_all_targets_at_and_above (enum strata stratum)
1263 {
1264 while ((int) (current_inferior ()->top_target ()->stratum ())
1265 >= (int) stratum)
1266 unpush_target_and_assert (current_inferior ()->top_target ());
1267 }
1268
1269 void
1270 pop_all_targets (void)
1271 {
1272 pop_all_targets_above (dummy_stratum);
1273 }
1274
1275 void
1276 target_unpusher::operator() (struct target_ops *ops) const
1277 {
1278 current_inferior ()->unpush_target (ops);
1279 }
1280
1281 /* Default implementation of to_get_thread_local_address. */
1282
1283 static void
1284 generic_tls_error (void)
1285 {
1286 throw_error (TLS_GENERIC_ERROR,
1287 _("Cannot find thread-local variables on this target"));
1288 }
1289
1290 /* Using the objfile specified in OBJFILE, find the address for the
1291 current thread's thread-local storage with offset OFFSET. */
1292 CORE_ADDR
1293 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1294 {
1295 volatile CORE_ADDR addr = 0;
1296 struct target_ops *target = current_inferior ()->top_target ();
1297 struct gdbarch *gdbarch = target_gdbarch ();
1298
1299 /* If OBJFILE is a separate debug object file, look for the
1300 original object file. */
1301 if (objfile->separate_debug_objfile_backlink != NULL)
1302 objfile = objfile->separate_debug_objfile_backlink;
1303
1304 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1305 {
1306 ptid_t ptid = inferior_ptid;
1307
1308 try
1309 {
1310 CORE_ADDR lm_addr;
1311
1312 /* Fetch the load module address for this objfile. */
1313 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1314 objfile);
1315
1316 if (gdbarch_get_thread_local_address_p (gdbarch))
1317 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1318 offset);
1319 else
1320 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1321 }
1322 /* If an error occurred, print TLS related messages here. Otherwise,
1323 throw the error to some higher catcher. */
1324 catch (const gdb_exception &ex)
1325 {
1326 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1327
1328 switch (ex.error)
1329 {
1330 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1331 error (_("Cannot find thread-local variables "
1332 "in this thread library."));
1333 break;
1334 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1335 if (objfile_is_library)
1336 error (_("Cannot find shared library `%s' in dynamic"
1337 " linker's load module list"), objfile_name (objfile));
1338 else
1339 error (_("Cannot find executable file `%s' in dynamic"
1340 " linker's load module list"), objfile_name (objfile));
1341 break;
1342 case TLS_NOT_ALLOCATED_YET_ERROR:
1343 if (objfile_is_library)
1344 error (_("The inferior has not yet allocated storage for"
1345 " thread-local variables in\n"
1346 "the shared library `%s'\n"
1347 "for %s"),
1348 objfile_name (objfile),
1349 target_pid_to_str (ptid).c_str ());
1350 else
1351 error (_("The inferior has not yet allocated storage for"
1352 " thread-local variables in\n"
1353 "the executable `%s'\n"
1354 "for %s"),
1355 objfile_name (objfile),
1356 target_pid_to_str (ptid).c_str ());
1357 break;
1358 case TLS_GENERIC_ERROR:
1359 if (objfile_is_library)
1360 error (_("Cannot find thread-local storage for %s, "
1361 "shared library %s:\n%s"),
1362 target_pid_to_str (ptid).c_str (),
1363 objfile_name (objfile), ex.what ());
1364 else
1365 error (_("Cannot find thread-local storage for %s, "
1366 "executable file %s:\n%s"),
1367 target_pid_to_str (ptid).c_str (),
1368 objfile_name (objfile), ex.what ());
1369 break;
1370 default:
1371 throw;
1372 break;
1373 }
1374 }
1375 }
1376 else
1377 error (_("Cannot find thread-local variables on this target"));
1378
1379 return addr;
1380 }
1381
1382 const char *
1383 target_xfer_status_to_string (enum target_xfer_status status)
1384 {
1385 #define CASE(X) case X: return #X
1386 switch (status)
1387 {
1388 CASE(TARGET_XFER_E_IO);
1389 CASE(TARGET_XFER_UNAVAILABLE);
1390 default:
1391 return "<unknown>";
1392 }
1393 #undef CASE
1394 };
1395
1396
1397 /* See target.h. */
1398
1399 gdb::unique_xmalloc_ptr<char>
1400 target_read_string (CORE_ADDR memaddr, int len, int *bytes_read)
1401 {
1402 gdb::unique_xmalloc_ptr<gdb_byte> buffer;
1403
1404 int ignore;
1405 if (bytes_read == nullptr)
1406 bytes_read = &ignore;
1407
1408 /* Note that the endian-ness does not matter here. */
1409 int errcode = target_read_string (memaddr, -1, 1, len, &buffer, bytes_read);
1410 if (errcode != 0)
1411 return {};
1412
1413 return gdb::unique_xmalloc_ptr<char> ((char *) buffer.release ());
1414 }
1415
1416 const target_section_table *
1417 target_get_section_table (struct target_ops *target)
1418 {
1419 return target->get_section_table ();
1420 }
1421
1422 /* Find a section containing ADDR. */
1423
1424 const struct target_section *
1425 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1426 {
1427 const target_section_table *table = target_get_section_table (target);
1428
1429 if (table == NULL)
1430 return NULL;
1431
1432 for (const target_section &secp : *table)
1433 {
1434 if (addr >= secp.addr && addr < secp.endaddr)
1435 return &secp;
1436 }
1437 return NULL;
1438 }
1439
1440 /* See target.h. */
1441
1442 const target_section_table *
1443 default_get_section_table ()
1444 {
1445 return &current_program_space->target_sections ();
1446 }
1447
1448 /* Helper for the memory xfer routines. Checks the attributes of the
1449 memory region of MEMADDR against the read or write being attempted.
1450 If the access is permitted returns true, otherwise returns false.
1451 REGION_P is an optional output parameter. If not-NULL, it is
1452 filled with a pointer to the memory region of MEMADDR. REG_LEN
1453 returns LEN trimmed to the end of the region. This is how much the
1454 caller can continue requesting, if the access is permitted. A
1455 single xfer request must not straddle memory region boundaries. */
1456
1457 static int
1458 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1459 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1460 struct mem_region **region_p)
1461 {
1462 struct mem_region *region;
1463
1464 region = lookup_mem_region (memaddr);
1465
1466 if (region_p != NULL)
1467 *region_p = region;
1468
1469 switch (region->attrib.mode)
1470 {
1471 case MEM_RO:
1472 if (writebuf != NULL)
1473 return 0;
1474 break;
1475
1476 case MEM_WO:
1477 if (readbuf != NULL)
1478 return 0;
1479 break;
1480
1481 case MEM_FLASH:
1482 /* We only support writing to flash during "load" for now. */
1483 if (writebuf != NULL)
1484 error (_("Writing to flash memory forbidden in this context"));
1485 break;
1486
1487 case MEM_NONE:
1488 return 0;
1489 }
1490
1491 /* region->hi == 0 means there's no upper bound. */
1492 if (memaddr + len < region->hi || region->hi == 0)
1493 *reg_len = len;
1494 else
1495 *reg_len = region->hi - memaddr;
1496
1497 return 1;
1498 }
1499
1500 /* Read memory from more than one valid target. A core file, for
1501 instance, could have some of memory but delegate other bits to
1502 the target below it. So, we must manually try all targets. */
1503
1504 enum target_xfer_status
1505 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1506 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1507 ULONGEST *xfered_len)
1508 {
1509 enum target_xfer_status res;
1510
1511 do
1512 {
1513 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1514 readbuf, writebuf, memaddr, len,
1515 xfered_len);
1516 if (res == TARGET_XFER_OK)
1517 break;
1518
1519 /* Stop if the target reports that the memory is not available. */
1520 if (res == TARGET_XFER_UNAVAILABLE)
1521 break;
1522
1523 /* Don't continue past targets which have all the memory.
1524 At one time, this code was necessary to read data from
1525 executables / shared libraries when data for the requested
1526 addresses weren't available in the core file. But now the
1527 core target handles this case itself. */
1528 if (ops->has_all_memory ())
1529 break;
1530
1531 ops = ops->beneath ();
1532 }
1533 while (ops != NULL);
1534
1535 /* The cache works at the raw memory level. Make sure the cache
1536 gets updated with raw contents no matter what kind of memory
1537 object was originally being written. Note we do write-through
1538 first, so that if it fails, we don't write to the cache contents
1539 that never made it to the target. */
1540 if (writebuf != NULL
1541 && inferior_ptid != null_ptid
1542 && target_dcache_init_p ()
1543 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1544 {
1545 DCACHE *dcache = target_dcache_get ();
1546
1547 /* Note that writing to an area of memory which wasn't present
1548 in the cache doesn't cause it to be loaded in. */
1549 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1550 }
1551
1552 return res;
1553 }
1554
1555 /* Perform a partial memory transfer.
1556 For docs see target.h, to_xfer_partial. */
1557
1558 static enum target_xfer_status
1559 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1560 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1561 ULONGEST len, ULONGEST *xfered_len)
1562 {
1563 enum target_xfer_status res;
1564 ULONGEST reg_len;
1565 struct mem_region *region;
1566 struct inferior *inf;
1567
1568 /* For accesses to unmapped overlay sections, read directly from
1569 files. Must do this first, as MEMADDR may need adjustment. */
1570 if (readbuf != NULL && overlay_debugging)
1571 {
1572 struct obj_section *section = find_pc_overlay (memaddr);
1573
1574 if (pc_in_unmapped_range (memaddr, section))
1575 {
1576 const target_section_table *table = target_get_section_table (ops);
1577 const char *section_name = section->the_bfd_section->name;
1578
1579 memaddr = overlay_mapped_address (memaddr, section);
1580
1581 auto match_cb = [=] (const struct target_section *s)
1582 {
1583 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1584 };
1585
1586 return section_table_xfer_memory_partial (readbuf, writebuf,
1587 memaddr, len, xfered_len,
1588 *table, match_cb);
1589 }
1590 }
1591
1592 /* Try the executable files, if "trust-readonly-sections" is set. */
1593 if (readbuf != NULL && trust_readonly)
1594 {
1595 const struct target_section *secp
1596 = target_section_by_addr (ops, memaddr);
1597 if (secp != NULL
1598 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1599 {
1600 const target_section_table *table = target_get_section_table (ops);
1601 return section_table_xfer_memory_partial (readbuf, writebuf,
1602 memaddr, len, xfered_len,
1603 *table);
1604 }
1605 }
1606
1607 /* Try GDB's internal data cache. */
1608
1609 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1610 &region))
1611 return TARGET_XFER_E_IO;
1612
1613 if (inferior_ptid != null_ptid)
1614 inf = current_inferior ();
1615 else
1616 inf = NULL;
1617
1618 if (inf != NULL
1619 && readbuf != NULL
1620 /* The dcache reads whole cache lines; that doesn't play well
1621 with reading from a trace buffer, because reading outside of
1622 the collected memory range fails. */
1623 && get_traceframe_number () == -1
1624 && (region->attrib.cache
1625 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1626 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1627 {
1628 DCACHE *dcache = target_dcache_get_or_init ();
1629
1630 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1631 reg_len, xfered_len);
1632 }
1633
1634 /* If none of those methods found the memory we wanted, fall back
1635 to a target partial transfer. Normally a single call to
1636 to_xfer_partial is enough; if it doesn't recognize an object
1637 it will call the to_xfer_partial of the next target down.
1638 But for memory this won't do. Memory is the only target
1639 object which can be read from more than one valid target.
1640 A core file, for instance, could have some of memory but
1641 delegate other bits to the target below it. So, we must
1642 manually try all targets. */
1643
1644 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1645 xfered_len);
1646
1647 /* If we still haven't got anything, return the last error. We
1648 give up. */
1649 return res;
1650 }
1651
1652 /* Perform a partial memory transfer. For docs see target.h,
1653 to_xfer_partial. */
1654
1655 static enum target_xfer_status
1656 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1657 gdb_byte *readbuf, const gdb_byte *writebuf,
1658 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1659 {
1660 enum target_xfer_status res;
1661
1662 /* Zero length requests are ok and require no work. */
1663 if (len == 0)
1664 return TARGET_XFER_EOF;
1665
1666 memaddr = address_significant (target_gdbarch (), memaddr);
1667
1668 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1669 breakpoint insns, thus hiding out from higher layers whether
1670 there are software breakpoints inserted in the code stream. */
1671 if (readbuf != NULL)
1672 {
1673 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1674 xfered_len);
1675
1676 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1677 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1678 }
1679 else
1680 {
1681 /* A large write request is likely to be partially satisfied
1682 by memory_xfer_partial_1. We will continually malloc
1683 and free a copy of the entire write request for breakpoint
1684 shadow handling even though we only end up writing a small
1685 subset of it. Cap writes to a limit specified by the target
1686 to mitigate this. */
1687 len = std::min (ops->get_memory_xfer_limit (), len);
1688
1689 gdb::byte_vector buf (writebuf, writebuf + len);
1690 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1691 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1692 xfered_len);
1693 }
1694
1695 return res;
1696 }
1697
1698 scoped_restore_tmpl<int>
1699 make_scoped_restore_show_memory_breakpoints (int show)
1700 {
1701 return make_scoped_restore (&show_memory_breakpoints, show);
1702 }
1703
1704 /* For docs see target.h, to_xfer_partial. */
1705
1706 enum target_xfer_status
1707 target_xfer_partial (struct target_ops *ops,
1708 enum target_object object, const char *annex,
1709 gdb_byte *readbuf, const gdb_byte *writebuf,
1710 ULONGEST offset, ULONGEST len,
1711 ULONGEST *xfered_len)
1712 {
1713 enum target_xfer_status retval;
1714
1715 /* Transfer is done when LEN is zero. */
1716 if (len == 0)
1717 return TARGET_XFER_EOF;
1718
1719 if (writebuf && !may_write_memory)
1720 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1721 core_addr_to_string_nz (offset), plongest (len));
1722
1723 *xfered_len = 0;
1724
1725 /* If this is a memory transfer, let the memory-specific code
1726 have a look at it instead. Memory transfers are more
1727 complicated. */
1728 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1729 || object == TARGET_OBJECT_CODE_MEMORY)
1730 retval = memory_xfer_partial (ops, object, readbuf,
1731 writebuf, offset, len, xfered_len);
1732 else if (object == TARGET_OBJECT_RAW_MEMORY)
1733 {
1734 /* Skip/avoid accessing the target if the memory region
1735 attributes block the access. Check this here instead of in
1736 raw_memory_xfer_partial as otherwise we'd end up checking
1737 this twice in the case of the memory_xfer_partial path is
1738 taken; once before checking the dcache, and another in the
1739 tail call to raw_memory_xfer_partial. */
1740 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1741 NULL))
1742 return TARGET_XFER_E_IO;
1743
1744 /* Request the normal memory object from other layers. */
1745 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1746 xfered_len);
1747 }
1748 else
1749 retval = ops->xfer_partial (object, annex, readbuf,
1750 writebuf, offset, len, xfered_len);
1751
1752 if (targetdebug)
1753 {
1754 const unsigned char *myaddr = NULL;
1755
1756 gdb_printf (gdb_stdlog,
1757 "%s:target_xfer_partial "
1758 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1759 ops->shortname (),
1760 (int) object,
1761 (annex ? annex : "(null)"),
1762 host_address_to_string (readbuf),
1763 host_address_to_string (writebuf),
1764 core_addr_to_string_nz (offset),
1765 pulongest (len), retval,
1766 pulongest (*xfered_len));
1767
1768 if (readbuf)
1769 myaddr = readbuf;
1770 if (writebuf)
1771 myaddr = writebuf;
1772 if (retval == TARGET_XFER_OK && myaddr != NULL)
1773 {
1774 int i;
1775
1776 gdb_puts (", bytes =", gdb_stdlog);
1777 for (i = 0; i < *xfered_len; i++)
1778 {
1779 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1780 {
1781 if (targetdebug < 2 && i > 0)
1782 {
1783 gdb_printf (gdb_stdlog, " ...");
1784 break;
1785 }
1786 gdb_printf (gdb_stdlog, "\n");
1787 }
1788
1789 gdb_printf (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1790 }
1791 }
1792
1793 gdb_putc ('\n', gdb_stdlog);
1794 }
1795
1796 /* Check implementations of to_xfer_partial update *XFERED_LEN
1797 properly. Do assertion after printing debug messages, so that we
1798 can find more clues on assertion failure from debugging messages. */
1799 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1800 gdb_assert (*xfered_len > 0);
1801
1802 return retval;
1803 }
1804
1805 /* Read LEN bytes of target memory at address MEMADDR, placing the
1806 results in GDB's memory at MYADDR. Returns either 0 for success or
1807 -1 if any error occurs.
1808
1809 If an error occurs, no guarantee is made about the contents of the data at
1810 MYADDR. In particular, the caller should not depend upon partial reads
1811 filling the buffer with good data. There is no way for the caller to know
1812 how much good data might have been transfered anyway. Callers that can
1813 deal with partial reads should call target_read (which will retry until
1814 it makes no progress, and then return how much was transferred). */
1815
1816 int
1817 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1818 {
1819 if (target_read (current_inferior ()->top_target (),
1820 TARGET_OBJECT_MEMORY, NULL,
1821 myaddr, memaddr, len) == len)
1822 return 0;
1823 else
1824 return -1;
1825 }
1826
1827 /* See target/target.h. */
1828
1829 int
1830 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1831 {
1832 gdb_byte buf[4];
1833 int r;
1834
1835 r = target_read_memory (memaddr, buf, sizeof buf);
1836 if (r != 0)
1837 return r;
1838 *result = extract_unsigned_integer (buf, sizeof buf,
1839 gdbarch_byte_order (target_gdbarch ()));
1840 return 0;
1841 }
1842
1843 /* Like target_read_memory, but specify explicitly that this is a read
1844 from the target's raw memory. That is, this read bypasses the
1845 dcache, breakpoint shadowing, etc. */
1846
1847 int
1848 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1849 {
1850 if (target_read (current_inferior ()->top_target (),
1851 TARGET_OBJECT_RAW_MEMORY, NULL,
1852 myaddr, memaddr, len) == len)
1853 return 0;
1854 else
1855 return -1;
1856 }
1857
1858 /* Like target_read_memory, but specify explicitly that this is a read from
1859 the target's stack. This may trigger different cache behavior. */
1860
1861 int
1862 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1863 {
1864 if (target_read (current_inferior ()->top_target (),
1865 TARGET_OBJECT_STACK_MEMORY, NULL,
1866 myaddr, memaddr, len) == len)
1867 return 0;
1868 else
1869 return -1;
1870 }
1871
1872 /* Like target_read_memory, but specify explicitly that this is a read from
1873 the target's code. This may trigger different cache behavior. */
1874
1875 int
1876 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1877 {
1878 if (target_read (current_inferior ()->top_target (),
1879 TARGET_OBJECT_CODE_MEMORY, NULL,
1880 myaddr, memaddr, len) == len)
1881 return 0;
1882 else
1883 return -1;
1884 }
1885
1886 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1887 Returns either 0 for success or -1 if any error occurs. If an
1888 error occurs, no guarantee is made about how much data got written.
1889 Callers that can deal with partial writes should call
1890 target_write. */
1891
1892 int
1893 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1894 {
1895 if (target_write (current_inferior ()->top_target (),
1896 TARGET_OBJECT_MEMORY, NULL,
1897 myaddr, memaddr, len) == len)
1898 return 0;
1899 else
1900 return -1;
1901 }
1902
1903 /* Write LEN bytes from MYADDR to target raw memory at address
1904 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1905 If an error occurs, no guarantee is made about how much data got
1906 written. Callers that can deal with partial writes should call
1907 target_write. */
1908
1909 int
1910 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1911 {
1912 if (target_write (current_inferior ()->top_target (),
1913 TARGET_OBJECT_RAW_MEMORY, NULL,
1914 myaddr, memaddr, len) == len)
1915 return 0;
1916 else
1917 return -1;
1918 }
1919
1920 /* Fetch the target's memory map. */
1921
1922 std::vector<mem_region>
1923 target_memory_map (void)
1924 {
1925 target_ops *target = current_inferior ()->top_target ();
1926 std::vector<mem_region> result = target->memory_map ();
1927 if (result.empty ())
1928 return result;
1929
1930 std::sort (result.begin (), result.end ());
1931
1932 /* Check that regions do not overlap. Simultaneously assign
1933 a numbering for the "mem" commands to use to refer to
1934 each region. */
1935 mem_region *last_one = NULL;
1936 for (size_t ix = 0; ix < result.size (); ix++)
1937 {
1938 mem_region *this_one = &result[ix];
1939 this_one->number = ix;
1940
1941 if (last_one != NULL && last_one->hi > this_one->lo)
1942 {
1943 warning (_("Overlapping regions in memory map: ignoring"));
1944 return std::vector<mem_region> ();
1945 }
1946
1947 last_one = this_one;
1948 }
1949
1950 return result;
1951 }
1952
1953 void
1954 target_flash_erase (ULONGEST address, LONGEST length)
1955 {
1956 current_inferior ()->top_target ()->flash_erase (address, length);
1957 }
1958
1959 void
1960 target_flash_done (void)
1961 {
1962 current_inferior ()->top_target ()->flash_done ();
1963 }
1964
1965 static void
1966 show_trust_readonly (struct ui_file *file, int from_tty,
1967 struct cmd_list_element *c, const char *value)
1968 {
1969 gdb_printf (file,
1970 _("Mode for reading from readonly sections is %s.\n"),
1971 value);
1972 }
1973
1974 /* Target vector read/write partial wrapper functions. */
1975
1976 static enum target_xfer_status
1977 target_read_partial (struct target_ops *ops,
1978 enum target_object object,
1979 const char *annex, gdb_byte *buf,
1980 ULONGEST offset, ULONGEST len,
1981 ULONGEST *xfered_len)
1982 {
1983 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1984 xfered_len);
1985 }
1986
1987 static enum target_xfer_status
1988 target_write_partial (struct target_ops *ops,
1989 enum target_object object,
1990 const char *annex, const gdb_byte *buf,
1991 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1992 {
1993 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1994 xfered_len);
1995 }
1996
1997 /* Wrappers to perform the full transfer. */
1998
1999 /* For docs on target_read see target.h. */
2000
2001 LONGEST
2002 target_read (struct target_ops *ops,
2003 enum target_object object,
2004 const char *annex, gdb_byte *buf,
2005 ULONGEST offset, LONGEST len)
2006 {
2007 LONGEST xfered_total = 0;
2008 int unit_size = 1;
2009
2010 /* If we are reading from a memory object, find the length of an addressable
2011 unit for that architecture. */
2012 if (object == TARGET_OBJECT_MEMORY
2013 || object == TARGET_OBJECT_STACK_MEMORY
2014 || object == TARGET_OBJECT_CODE_MEMORY
2015 || object == TARGET_OBJECT_RAW_MEMORY)
2016 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2017
2018 while (xfered_total < len)
2019 {
2020 ULONGEST xfered_partial;
2021 enum target_xfer_status status;
2022
2023 status = target_read_partial (ops, object, annex,
2024 buf + xfered_total * unit_size,
2025 offset + xfered_total, len - xfered_total,
2026 &xfered_partial);
2027
2028 /* Call an observer, notifying them of the xfer progress? */
2029 if (status == TARGET_XFER_EOF)
2030 return xfered_total;
2031 else if (status == TARGET_XFER_OK)
2032 {
2033 xfered_total += xfered_partial;
2034 QUIT;
2035 }
2036 else
2037 return TARGET_XFER_E_IO;
2038
2039 }
2040 return len;
2041 }
2042
2043 /* Assuming that the entire [begin, end) range of memory cannot be
2044 read, try to read whatever subrange is possible to read.
2045
2046 The function returns, in RESULT, either zero or one memory block.
2047 If there's a readable subrange at the beginning, it is completely
2048 read and returned. Any further readable subrange will not be read.
2049 Otherwise, if there's a readable subrange at the end, it will be
2050 completely read and returned. Any readable subranges before it
2051 (obviously, not starting at the beginning), will be ignored. In
2052 other cases -- either no readable subrange, or readable subrange(s)
2053 that is neither at the beginning, or end, nothing is returned.
2054
2055 The purpose of this function is to handle a read across a boundary
2056 of accessible memory in a case when memory map is not available.
2057 The above restrictions are fine for this case, but will give
2058 incorrect results if the memory is 'patchy'. However, supporting
2059 'patchy' memory would require trying to read every single byte,
2060 and it seems unacceptable solution. Explicit memory map is
2061 recommended for this case -- and target_read_memory_robust will
2062 take care of reading multiple ranges then. */
2063
2064 static void
2065 read_whatever_is_readable (struct target_ops *ops,
2066 const ULONGEST begin, const ULONGEST end,
2067 int unit_size,
2068 std::vector<memory_read_result> *result)
2069 {
2070 ULONGEST current_begin = begin;
2071 ULONGEST current_end = end;
2072 int forward;
2073 ULONGEST xfered_len;
2074
2075 /* If we previously failed to read 1 byte, nothing can be done here. */
2076 if (end - begin <= 1)
2077 return;
2078
2079 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2080
2081 /* Check that either first or the last byte is readable, and give up
2082 if not. This heuristic is meant to permit reading accessible memory
2083 at the boundary of accessible region. */
2084 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2085 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2086 {
2087 forward = 1;
2088 ++current_begin;
2089 }
2090 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2091 buf.get () + (end - begin) - 1, end - 1, 1,
2092 &xfered_len) == TARGET_XFER_OK)
2093 {
2094 forward = 0;
2095 --current_end;
2096 }
2097 else
2098 return;
2099
2100 /* Loop invariant is that the [current_begin, current_end) was previously
2101 found to be not readable as a whole.
2102
2103 Note loop condition -- if the range has 1 byte, we can't divide the range
2104 so there's no point trying further. */
2105 while (current_end - current_begin > 1)
2106 {
2107 ULONGEST first_half_begin, first_half_end;
2108 ULONGEST second_half_begin, second_half_end;
2109 LONGEST xfer;
2110 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2111
2112 if (forward)
2113 {
2114 first_half_begin = current_begin;
2115 first_half_end = middle;
2116 second_half_begin = middle;
2117 second_half_end = current_end;
2118 }
2119 else
2120 {
2121 first_half_begin = middle;
2122 first_half_end = current_end;
2123 second_half_begin = current_begin;
2124 second_half_end = middle;
2125 }
2126
2127 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2128 buf.get () + (first_half_begin - begin) * unit_size,
2129 first_half_begin,
2130 first_half_end - first_half_begin);
2131
2132 if (xfer == first_half_end - first_half_begin)
2133 {
2134 /* This half reads up fine. So, the error must be in the
2135 other half. */
2136 current_begin = second_half_begin;
2137 current_end = second_half_end;
2138 }
2139 else
2140 {
2141 /* This half is not readable. Because we've tried one byte, we
2142 know some part of this half if actually readable. Go to the next
2143 iteration to divide again and try to read.
2144
2145 We don't handle the other half, because this function only tries
2146 to read a single readable subrange. */
2147 current_begin = first_half_begin;
2148 current_end = first_half_end;
2149 }
2150 }
2151
2152 if (forward)
2153 {
2154 /* The [begin, current_begin) range has been read. */
2155 result->emplace_back (begin, current_end, std::move (buf));
2156 }
2157 else
2158 {
2159 /* The [current_end, end) range has been read. */
2160 LONGEST region_len = end - current_end;
2161
2162 gdb::unique_xmalloc_ptr<gdb_byte> data
2163 ((gdb_byte *) xmalloc (region_len * unit_size));
2164 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2165 region_len * unit_size);
2166 result->emplace_back (current_end, end, std::move (data));
2167 }
2168 }
2169
2170 std::vector<memory_read_result>
2171 read_memory_robust (struct target_ops *ops,
2172 const ULONGEST offset, const LONGEST len)
2173 {
2174 std::vector<memory_read_result> result;
2175 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2176
2177 LONGEST xfered_total = 0;
2178 while (xfered_total < len)
2179 {
2180 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2181 LONGEST region_len;
2182
2183 /* If there is no explicit region, a fake one should be created. */
2184 gdb_assert (region);
2185
2186 if (region->hi == 0)
2187 region_len = len - xfered_total;
2188 else
2189 region_len = region->hi - offset;
2190
2191 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2192 {
2193 /* Cannot read this region. Note that we can end up here only
2194 if the region is explicitly marked inaccessible, or
2195 'inaccessible-by-default' is in effect. */
2196 xfered_total += region_len;
2197 }
2198 else
2199 {
2200 LONGEST to_read = std::min (len - xfered_total, region_len);
2201 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2202 ((gdb_byte *) xmalloc (to_read * unit_size));
2203
2204 LONGEST xfered_partial =
2205 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2206 offset + xfered_total, to_read);
2207 /* Call an observer, notifying them of the xfer progress? */
2208 if (xfered_partial <= 0)
2209 {
2210 /* Got an error reading full chunk. See if maybe we can read
2211 some subrange. */
2212 read_whatever_is_readable (ops, offset + xfered_total,
2213 offset + xfered_total + to_read,
2214 unit_size, &result);
2215 xfered_total += to_read;
2216 }
2217 else
2218 {
2219 result.emplace_back (offset + xfered_total,
2220 offset + xfered_total + xfered_partial,
2221 std::move (buffer));
2222 xfered_total += xfered_partial;
2223 }
2224 QUIT;
2225 }
2226 }
2227
2228 return result;
2229 }
2230
2231
2232 /* An alternative to target_write with progress callbacks. */
2233
2234 LONGEST
2235 target_write_with_progress (struct target_ops *ops,
2236 enum target_object object,
2237 const char *annex, const gdb_byte *buf,
2238 ULONGEST offset, LONGEST len,
2239 void (*progress) (ULONGEST, void *), void *baton)
2240 {
2241 LONGEST xfered_total = 0;
2242 int unit_size = 1;
2243
2244 /* If we are writing to a memory object, find the length of an addressable
2245 unit for that architecture. */
2246 if (object == TARGET_OBJECT_MEMORY
2247 || object == TARGET_OBJECT_STACK_MEMORY
2248 || object == TARGET_OBJECT_CODE_MEMORY
2249 || object == TARGET_OBJECT_RAW_MEMORY)
2250 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2251
2252 /* Give the progress callback a chance to set up. */
2253 if (progress)
2254 (*progress) (0, baton);
2255
2256 while (xfered_total < len)
2257 {
2258 ULONGEST xfered_partial;
2259 enum target_xfer_status status;
2260
2261 status = target_write_partial (ops, object, annex,
2262 buf + xfered_total * unit_size,
2263 offset + xfered_total, len - xfered_total,
2264 &xfered_partial);
2265
2266 if (status != TARGET_XFER_OK)
2267 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2268
2269 if (progress)
2270 (*progress) (xfered_partial, baton);
2271
2272 xfered_total += xfered_partial;
2273 QUIT;
2274 }
2275 return len;
2276 }
2277
2278 /* For docs on target_write see target.h. */
2279
2280 LONGEST
2281 target_write (struct target_ops *ops,
2282 enum target_object object,
2283 const char *annex, const gdb_byte *buf,
2284 ULONGEST offset, LONGEST len)
2285 {
2286 return target_write_with_progress (ops, object, annex, buf, offset, len,
2287 NULL, NULL);
2288 }
2289
2290 /* Help for target_read_alloc and target_read_stralloc. See their comments
2291 for details. */
2292
2293 template <typename T>
2294 gdb::optional<gdb::def_vector<T>>
2295 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2296 const char *annex)
2297 {
2298 gdb::def_vector<T> buf;
2299 size_t buf_pos = 0;
2300 const int chunk = 4096;
2301
2302 /* This function does not have a length parameter; it reads the
2303 entire OBJECT). Also, it doesn't support objects fetched partly
2304 from one target and partly from another (in a different stratum,
2305 e.g. a core file and an executable). Both reasons make it
2306 unsuitable for reading memory. */
2307 gdb_assert (object != TARGET_OBJECT_MEMORY);
2308
2309 /* Start by reading up to 4K at a time. The target will throttle
2310 this number down if necessary. */
2311 while (1)
2312 {
2313 ULONGEST xfered_len;
2314 enum target_xfer_status status;
2315
2316 buf.resize (buf_pos + chunk);
2317
2318 status = target_read_partial (ops, object, annex,
2319 (gdb_byte *) &buf[buf_pos],
2320 buf_pos, chunk,
2321 &xfered_len);
2322
2323 if (status == TARGET_XFER_EOF)
2324 {
2325 /* Read all there was. */
2326 buf.resize (buf_pos);
2327 return buf;
2328 }
2329 else if (status != TARGET_XFER_OK)
2330 {
2331 /* An error occurred. */
2332 return {};
2333 }
2334
2335 buf_pos += xfered_len;
2336
2337 QUIT;
2338 }
2339 }
2340
2341 /* See target.h */
2342
2343 gdb::optional<gdb::byte_vector>
2344 target_read_alloc (struct target_ops *ops, enum target_object object,
2345 const char *annex)
2346 {
2347 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2348 }
2349
2350 /* See target.h. */
2351
2352 gdb::optional<gdb::char_vector>
2353 target_read_stralloc (struct target_ops *ops, enum target_object object,
2354 const char *annex)
2355 {
2356 gdb::optional<gdb::char_vector> buf
2357 = target_read_alloc_1<char> (ops, object, annex);
2358
2359 if (!buf)
2360 return {};
2361
2362 if (buf->empty () || buf->back () != '\0')
2363 buf->push_back ('\0');
2364
2365 /* Check for embedded NUL bytes; but allow trailing NULs. */
2366 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2367 it != buf->end (); it++)
2368 if (*it != '\0')
2369 {
2370 warning (_("target object %d, annex %s, "
2371 "contained unexpected null characters"),
2372 (int) object, annex ? annex : "(none)");
2373 break;
2374 }
2375
2376 return buf;
2377 }
2378
2379 /* Memory transfer methods. */
2380
2381 void
2382 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2383 LONGEST len)
2384 {
2385 /* This method is used to read from an alternate, non-current
2386 target. This read must bypass the overlay support (as symbols
2387 don't match this target), and GDB's internal cache (wrong cache
2388 for this target). */
2389 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2390 != len)
2391 memory_error (TARGET_XFER_E_IO, addr);
2392 }
2393
2394 ULONGEST
2395 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2396 int len, enum bfd_endian byte_order)
2397 {
2398 gdb_byte buf[sizeof (ULONGEST)];
2399
2400 gdb_assert (len <= sizeof (buf));
2401 get_target_memory (ops, addr, buf, len);
2402 return extract_unsigned_integer (buf, len, byte_order);
2403 }
2404
2405 /* See target.h. */
2406
2407 int
2408 target_insert_breakpoint (struct gdbarch *gdbarch,
2409 struct bp_target_info *bp_tgt)
2410 {
2411 if (!may_insert_breakpoints)
2412 {
2413 warning (_("May not insert breakpoints"));
2414 return 1;
2415 }
2416
2417 target_ops *target = current_inferior ()->top_target ();
2418
2419 return target->insert_breakpoint (gdbarch, bp_tgt);
2420 }
2421
2422 /* See target.h. */
2423
2424 int
2425 target_remove_breakpoint (struct gdbarch *gdbarch,
2426 struct bp_target_info *bp_tgt,
2427 enum remove_bp_reason reason)
2428 {
2429 /* This is kind of a weird case to handle, but the permission might
2430 have been changed after breakpoints were inserted - in which case
2431 we should just take the user literally and assume that any
2432 breakpoints should be left in place. */
2433 if (!may_insert_breakpoints)
2434 {
2435 warning (_("May not remove breakpoints"));
2436 return 1;
2437 }
2438
2439 target_ops *target = current_inferior ()->top_target ();
2440
2441 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2442 }
2443
2444 static void
2445 info_target_command (const char *args, int from_tty)
2446 {
2447 int has_all_mem = 0;
2448
2449 if (current_program_space->symfile_object_file != NULL)
2450 {
2451 objfile *objf = current_program_space->symfile_object_file;
2452 gdb_printf (_("Symbols from \"%s\".\n"),
2453 objfile_name (objf));
2454 }
2455
2456 for (target_ops *t = current_inferior ()->top_target ();
2457 t != NULL;
2458 t = t->beneath ())
2459 {
2460 if (!t->has_memory ())
2461 continue;
2462
2463 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2464 continue;
2465 if (has_all_mem)
2466 gdb_printf (_("\tWhile running this, "
2467 "GDB does not access memory from...\n"));
2468 gdb_printf ("%s:\n", t->longname ());
2469 t->files_info ();
2470 has_all_mem = t->has_all_memory ();
2471 }
2472 }
2473
2474 /* This function is called before any new inferior is created, e.g.
2475 by running a program, attaching, or connecting to a target.
2476 It cleans up any state from previous invocations which might
2477 change between runs. This is a subset of what target_preopen
2478 resets (things which might change between targets). */
2479
2480 void
2481 target_pre_inferior (int from_tty)
2482 {
2483 /* Clear out solib state. Otherwise the solib state of the previous
2484 inferior might have survived and is entirely wrong for the new
2485 target. This has been observed on GNU/Linux using glibc 2.3. How
2486 to reproduce:
2487
2488 bash$ ./foo&
2489 [1] 4711
2490 bash$ ./foo&
2491 [1] 4712
2492 bash$ gdb ./foo
2493 [...]
2494 (gdb) attach 4711
2495 (gdb) detach
2496 (gdb) attach 4712
2497 Cannot access memory at address 0xdeadbeef
2498 */
2499
2500 /* In some OSs, the shared library list is the same/global/shared
2501 across inferiors. If code is shared between processes, so are
2502 memory regions and features. */
2503 if (!gdbarch_has_global_solist (target_gdbarch ()))
2504 {
2505 no_shared_libraries (NULL, from_tty);
2506
2507 invalidate_target_mem_regions ();
2508
2509 target_clear_description ();
2510 }
2511
2512 /* attach_flag may be set if the previous process associated with
2513 the inferior was attached to. */
2514 current_inferior ()->attach_flag = 0;
2515
2516 current_inferior ()->highest_thread_num = 0;
2517
2518 agent_capability_invalidate ();
2519 }
2520
2521 /* This is to be called by the open routine before it does
2522 anything. */
2523
2524 void
2525 target_preopen (int from_tty)
2526 {
2527 dont_repeat ();
2528
2529 if (current_inferior ()->pid != 0)
2530 {
2531 if (!from_tty
2532 || !target_has_execution ()
2533 || query (_("A program is being debugged already. Kill it? ")))
2534 {
2535 /* Core inferiors actually should be detached, not
2536 killed. */
2537 if (target_has_execution ())
2538 target_kill ();
2539 else
2540 target_detach (current_inferior (), 0);
2541 }
2542 else
2543 error (_("Program not killed."));
2544 }
2545
2546 /* Calling target_kill may remove the target from the stack. But if
2547 it doesn't (which seems like a win for UDI), remove it now. */
2548 /* Leave the exec target, though. The user may be switching from a
2549 live process to a core of the same program. */
2550 pop_all_targets_above (file_stratum);
2551
2552 target_pre_inferior (from_tty);
2553 }
2554
2555 /* See target.h. */
2556
2557 void
2558 target_detach (inferior *inf, int from_tty)
2559 {
2560 /* After we have detached, we will clear the register cache for this inferior
2561 by calling registers_changed_ptid. We must save the pid_ptid before
2562 detaching, as the target detach method will clear inf->pid. */
2563 ptid_t save_pid_ptid = ptid_t (inf->pid);
2564
2565 /* As long as some to_detach implementations rely on the current_inferior
2566 (either directly, or indirectly, like through target_gdbarch or by
2567 reading memory), INF needs to be the current inferior. When that
2568 requirement will become no longer true, then we can remove this
2569 assertion. */
2570 gdb_assert (inf == current_inferior ());
2571
2572 prepare_for_detach ();
2573
2574 /* Hold a strong reference because detaching may unpush the
2575 target. */
2576 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2577
2578 current_inferior ()->top_target ()->detach (inf, from_tty);
2579
2580 process_stratum_target *proc_target
2581 = as_process_stratum_target (proc_target_ref.get ());
2582
2583 registers_changed_ptid (proc_target, save_pid_ptid);
2584
2585 /* We have to ensure we have no frame cache left. Normally,
2586 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2587 inferior_ptid matches save_pid_ptid, but in our case, it does not
2588 call it, as inferior_ptid has been reset. */
2589 reinit_frame_cache ();
2590 }
2591
2592 void
2593 target_disconnect (const char *args, int from_tty)
2594 {
2595 /* If we're in breakpoints-always-inserted mode or if breakpoints
2596 are global across processes, we have to remove them before
2597 disconnecting. */
2598 remove_breakpoints ();
2599
2600 current_inferior ()->top_target ()->disconnect (args, from_tty);
2601 }
2602
2603 /* See target/target.h. */
2604
2605 ptid_t
2606 target_wait (ptid_t ptid, struct target_waitstatus *status,
2607 target_wait_flags options)
2608 {
2609 target_ops *target = current_inferior ()->top_target ();
2610 process_stratum_target *proc_target = current_inferior ()->process_target ();
2611
2612 gdb_assert (!proc_target->commit_resumed_state);
2613
2614 if (!target_can_async_p (target))
2615 gdb_assert ((options & TARGET_WNOHANG) == 0);
2616
2617 try
2618 {
2619 gdb::observers::target_pre_wait.notify (ptid);
2620 ptid_t event_ptid = target->wait (ptid, status, options);
2621 gdb::observers::target_post_wait.notify (event_ptid);
2622 return event_ptid;
2623 }
2624 catch (...)
2625 {
2626 gdb::observers::target_post_wait.notify (null_ptid);
2627 throw;
2628 }
2629 }
2630
2631 /* See target.h. */
2632
2633 ptid_t
2634 default_target_wait (struct target_ops *ops,
2635 ptid_t ptid, struct target_waitstatus *status,
2636 target_wait_flags options)
2637 {
2638 status->set_ignore ();
2639 return minus_one_ptid;
2640 }
2641
2642 std::string
2643 target_pid_to_str (ptid_t ptid)
2644 {
2645 return current_inferior ()->top_target ()->pid_to_str (ptid);
2646 }
2647
2648 const char *
2649 target_thread_name (struct thread_info *info)
2650 {
2651 gdb_assert (info->inf == current_inferior ());
2652
2653 return current_inferior ()->top_target ()->thread_name (info);
2654 }
2655
2656 struct thread_info *
2657 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2658 int handle_len,
2659 struct inferior *inf)
2660 {
2661 target_ops *target = current_inferior ()->top_target ();
2662
2663 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2664 }
2665
2666 /* See target.h. */
2667
2668 gdb::byte_vector
2669 target_thread_info_to_thread_handle (struct thread_info *tip)
2670 {
2671 target_ops *target = current_inferior ()->top_target ();
2672
2673 return target->thread_info_to_thread_handle (tip);
2674 }
2675
2676 void
2677 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2678 {
2679 process_stratum_target *curr_target = current_inferior ()->process_target ();
2680 gdb_assert (!curr_target->commit_resumed_state);
2681
2682 target_dcache_invalidate ();
2683
2684 current_inferior ()->top_target ()->resume (ptid, step, signal);
2685
2686 registers_changed_ptid (curr_target, ptid);
2687 /* We only set the internal executing state here. The user/frontend
2688 running state is set at a higher level. This also clears the
2689 thread's stop_pc as side effect. */
2690 set_executing (curr_target, ptid, true);
2691 clear_inline_frame_state (curr_target, ptid);
2692
2693 if (target_can_async_p ())
2694 target_async (1);
2695 }
2696
2697 /* See target.h. */
2698
2699 void
2700 target_commit_resumed ()
2701 {
2702 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2703 current_inferior ()->top_target ()->commit_resumed ();
2704 }
2705
2706 /* See target.h. */
2707
2708 bool
2709 target_has_pending_events ()
2710 {
2711 return current_inferior ()->top_target ()->has_pending_events ();
2712 }
2713
2714 void
2715 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2716 {
2717 current_inferior ()->top_target ()->pass_signals (pass_signals);
2718 }
2719
2720 void
2721 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2722 {
2723 current_inferior ()->top_target ()->program_signals (program_signals);
2724 }
2725
2726 static void
2727 default_follow_fork (struct target_ops *self, inferior *child_inf,
2728 ptid_t child_ptid, target_waitkind fork_kind,
2729 bool follow_child, bool detach_fork)
2730 {
2731 /* Some target returned a fork event, but did not know how to follow it. */
2732 internal_error (__FILE__, __LINE__,
2733 _("could not find a target to follow fork"));
2734 }
2735
2736 /* See target.h. */
2737
2738 void
2739 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2740 target_waitkind fork_kind, bool follow_child,
2741 bool detach_fork)
2742 {
2743 target_ops *target = current_inferior ()->top_target ();
2744
2745 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2746 DETACH_FORK. */
2747 if (child_inf != nullptr)
2748 {
2749 gdb_assert (follow_child || !detach_fork);
2750 gdb_assert (child_inf->pid == child_ptid.pid ());
2751 }
2752 else
2753 gdb_assert (!follow_child && detach_fork);
2754
2755 return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2756 detach_fork);
2757 }
2758
2759 /* See target.h. */
2760
2761 void
2762 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2763 const char *execd_pathname)
2764 {
2765 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2766 execd_pathname);
2767 }
2768
2769 static void
2770 default_mourn_inferior (struct target_ops *self)
2771 {
2772 internal_error (__FILE__, __LINE__,
2773 _("could not find a target to follow mourn inferior"));
2774 }
2775
2776 void
2777 target_mourn_inferior (ptid_t ptid)
2778 {
2779 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2780 current_inferior ()->top_target ()->mourn_inferior ();
2781
2782 /* We no longer need to keep handles on any of the object files.
2783 Make sure to release them to avoid unnecessarily locking any
2784 of them while we're not actually debugging. */
2785 bfd_cache_close_all ();
2786 }
2787
2788 /* Look for a target which can describe architectural features, starting
2789 from TARGET. If we find one, return its description. */
2790
2791 const struct target_desc *
2792 target_read_description (struct target_ops *target)
2793 {
2794 return target->read_description ();
2795 }
2796
2797
2798 /* Default implementation of memory-searching. */
2799
2800 static int
2801 default_search_memory (struct target_ops *self,
2802 CORE_ADDR start_addr, ULONGEST search_space_len,
2803 const gdb_byte *pattern, ULONGEST pattern_len,
2804 CORE_ADDR *found_addrp)
2805 {
2806 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2807 {
2808 return target_read (current_inferior ()->top_target (),
2809 TARGET_OBJECT_MEMORY, NULL,
2810 result, addr, len) == len;
2811 };
2812
2813 /* Start over from the top of the target stack. */
2814 return simple_search_memory (read_memory, start_addr, search_space_len,
2815 pattern, pattern_len, found_addrp);
2816 }
2817
2818 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2819 sequence of bytes in PATTERN with length PATTERN_LEN.
2820
2821 The result is 1 if found, 0 if not found, and -1 if there was an error
2822 requiring halting of the search (e.g. memory read error).
2823 If the pattern is found the address is recorded in FOUND_ADDRP. */
2824
2825 int
2826 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2827 const gdb_byte *pattern, ULONGEST pattern_len,
2828 CORE_ADDR *found_addrp)
2829 {
2830 target_ops *target = current_inferior ()->top_target ();
2831
2832 return target->search_memory (start_addr, search_space_len, pattern,
2833 pattern_len, found_addrp);
2834 }
2835
2836 /* Look through the currently pushed targets. If none of them will
2837 be able to restart the currently running process, issue an error
2838 message. */
2839
2840 void
2841 target_require_runnable (void)
2842 {
2843 for (target_ops *t = current_inferior ()->top_target ();
2844 t != NULL;
2845 t = t->beneath ())
2846 {
2847 /* If this target knows how to create a new program, then
2848 assume we will still be able to after killing the current
2849 one. Either killing and mourning will not pop T, or else
2850 find_default_run_target will find it again. */
2851 if (t->can_create_inferior ())
2852 return;
2853
2854 /* Do not worry about targets at certain strata that can not
2855 create inferiors. Assume they will be pushed again if
2856 necessary, and continue to the process_stratum. */
2857 if (t->stratum () > process_stratum)
2858 continue;
2859
2860 error (_("The \"%s\" target does not support \"run\". "
2861 "Try \"help target\" or \"continue\"."),
2862 t->shortname ());
2863 }
2864
2865 /* This function is only called if the target is running. In that
2866 case there should have been a process_stratum target and it
2867 should either know how to create inferiors, or not... */
2868 internal_error (__FILE__, __LINE__, _("No targets found"));
2869 }
2870
2871 /* Whether GDB is allowed to fall back to the default run target for
2872 "run", "attach", etc. when no target is connected yet. */
2873 static bool auto_connect_native_target = true;
2874
2875 static void
2876 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2877 struct cmd_list_element *c, const char *value)
2878 {
2879 gdb_printf (file,
2880 _("Whether GDB may automatically connect to the "
2881 "native target is %s.\n"),
2882 value);
2883 }
2884
2885 /* A pointer to the target that can respond to "run" or "attach".
2886 Native targets are always singletons and instantiated early at GDB
2887 startup. */
2888 static target_ops *the_native_target;
2889
2890 /* See target.h. */
2891
2892 void
2893 set_native_target (target_ops *target)
2894 {
2895 if (the_native_target != NULL)
2896 internal_error (__FILE__, __LINE__,
2897 _("native target already set (\"%s\")."),
2898 the_native_target->longname ());
2899
2900 the_native_target = target;
2901 }
2902
2903 /* See target.h. */
2904
2905 target_ops *
2906 get_native_target ()
2907 {
2908 return the_native_target;
2909 }
2910
2911 /* Look through the list of possible targets for a target that can
2912 execute a run or attach command without any other data. This is
2913 used to locate the default process stratum.
2914
2915 If DO_MESG is not NULL, the result is always valid (error() is
2916 called for errors); else, return NULL on error. */
2917
2918 static struct target_ops *
2919 find_default_run_target (const char *do_mesg)
2920 {
2921 if (auto_connect_native_target && the_native_target != NULL)
2922 return the_native_target;
2923
2924 if (do_mesg != NULL)
2925 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2926 return NULL;
2927 }
2928
2929 /* See target.h. */
2930
2931 struct target_ops *
2932 find_attach_target (void)
2933 {
2934 /* If a target on the current stack can attach, use it. */
2935 for (target_ops *t = current_inferior ()->top_target ();
2936 t != NULL;
2937 t = t->beneath ())
2938 {
2939 if (t->can_attach ())
2940 return t;
2941 }
2942
2943 /* Otherwise, use the default run target for attaching. */
2944 return find_default_run_target ("attach");
2945 }
2946
2947 /* See target.h. */
2948
2949 struct target_ops *
2950 find_run_target (void)
2951 {
2952 /* If a target on the current stack can run, use it. */
2953 for (target_ops *t = current_inferior ()->top_target ();
2954 t != NULL;
2955 t = t->beneath ())
2956 {
2957 if (t->can_create_inferior ())
2958 return t;
2959 }
2960
2961 /* Otherwise, use the default run target. */
2962 return find_default_run_target ("run");
2963 }
2964
2965 bool
2966 target_ops::info_proc (const char *args, enum info_proc_what what)
2967 {
2968 return false;
2969 }
2970
2971 /* Implement the "info proc" command. */
2972
2973 int
2974 target_info_proc (const char *args, enum info_proc_what what)
2975 {
2976 struct target_ops *t;
2977
2978 /* If we're already connected to something that can get us OS
2979 related data, use it. Otherwise, try using the native
2980 target. */
2981 t = find_target_at (process_stratum);
2982 if (t == NULL)
2983 t = find_default_run_target (NULL);
2984
2985 for (; t != NULL; t = t->beneath ())
2986 {
2987 if (t->info_proc (args, what))
2988 {
2989 if (targetdebug)
2990 gdb_printf (gdb_stdlog,
2991 "target_info_proc (\"%s\", %d)\n", args, what);
2992
2993 return 1;
2994 }
2995 }
2996
2997 return 0;
2998 }
2999
3000 static int
3001 find_default_supports_disable_randomization (struct target_ops *self)
3002 {
3003 struct target_ops *t;
3004
3005 t = find_default_run_target (NULL);
3006 if (t != NULL)
3007 return t->supports_disable_randomization ();
3008 return 0;
3009 }
3010
3011 int
3012 target_supports_disable_randomization (void)
3013 {
3014 return current_inferior ()->top_target ()->supports_disable_randomization ();
3015 }
3016
3017 /* See target/target.h. */
3018
3019 int
3020 target_supports_multi_process (void)
3021 {
3022 return current_inferior ()->top_target ()->supports_multi_process ();
3023 }
3024
3025 /* See target.h. */
3026
3027 gdb::optional<gdb::char_vector>
3028 target_get_osdata (const char *type)
3029 {
3030 struct target_ops *t;
3031
3032 /* If we're already connected to something that can get us OS
3033 related data, use it. Otherwise, try using the native
3034 target. */
3035 t = find_target_at (process_stratum);
3036 if (t == NULL)
3037 t = find_default_run_target ("get OS data");
3038
3039 if (!t)
3040 return {};
3041
3042 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3043 }
3044
3045 /* Determine the current address space of thread PTID. */
3046
3047 struct address_space *
3048 target_thread_address_space (ptid_t ptid)
3049 {
3050 struct address_space *aspace;
3051
3052 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3053 gdb_assert (aspace != NULL);
3054
3055 return aspace;
3056 }
3057
3058 /* See target.h. */
3059
3060 target_ops *
3061 target_ops::beneath () const
3062 {
3063 return current_inferior ()->find_target_beneath (this);
3064 }
3065
3066 void
3067 target_ops::close ()
3068 {
3069 }
3070
3071 bool
3072 target_ops::can_attach ()
3073 {
3074 return 0;
3075 }
3076
3077 void
3078 target_ops::attach (const char *, int)
3079 {
3080 gdb_assert_not_reached ("target_ops::attach called");
3081 }
3082
3083 bool
3084 target_ops::can_create_inferior ()
3085 {
3086 return 0;
3087 }
3088
3089 void
3090 target_ops::create_inferior (const char *, const std::string &,
3091 char **, int)
3092 {
3093 gdb_assert_not_reached ("target_ops::create_inferior called");
3094 }
3095
3096 bool
3097 target_ops::can_run ()
3098 {
3099 return false;
3100 }
3101
3102 int
3103 target_can_run ()
3104 {
3105 for (target_ops *t = current_inferior ()->top_target ();
3106 t != NULL;
3107 t = t->beneath ())
3108 {
3109 if (t->can_run ())
3110 return 1;
3111 }
3112
3113 return 0;
3114 }
3115
3116 /* Target file operations. */
3117
3118 static struct target_ops *
3119 default_fileio_target (void)
3120 {
3121 struct target_ops *t;
3122
3123 /* If we're already connected to something that can perform
3124 file I/O, use it. Otherwise, try using the native target. */
3125 t = find_target_at (process_stratum);
3126 if (t != NULL)
3127 return t;
3128 return find_default_run_target ("file I/O");
3129 }
3130
3131 /* File handle for target file operations. */
3132
3133 struct fileio_fh_t
3134 {
3135 /* The target on which this file is open. NULL if the target is
3136 meanwhile closed while the handle is open. */
3137 target_ops *target;
3138
3139 /* The file descriptor on the target. */
3140 int target_fd;
3141
3142 /* Check whether this fileio_fh_t represents a closed file. */
3143 bool is_closed ()
3144 {
3145 return target_fd < 0;
3146 }
3147 };
3148
3149 /* Vector of currently open file handles. The value returned by
3150 target_fileio_open and passed as the FD argument to other
3151 target_fileio_* functions is an index into this vector. This
3152 vector's entries are never freed; instead, files are marked as
3153 closed, and the handle becomes available for reuse. */
3154 static std::vector<fileio_fh_t> fileio_fhandles;
3155
3156 /* Index into fileio_fhandles of the lowest handle that might be
3157 closed. This permits handle reuse without searching the whole
3158 list each time a new file is opened. */
3159 static int lowest_closed_fd;
3160
3161 /* See target.h. */
3162
3163 void
3164 fileio_handles_invalidate_target (target_ops *targ)
3165 {
3166 for (fileio_fh_t &fh : fileio_fhandles)
3167 if (fh.target == targ)
3168 fh.target = NULL;
3169 }
3170
3171 /* Acquire a target fileio file descriptor. */
3172
3173 static int
3174 acquire_fileio_fd (target_ops *target, int target_fd)
3175 {
3176 /* Search for closed handles to reuse. */
3177 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3178 {
3179 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3180
3181 if (fh.is_closed ())
3182 break;
3183 }
3184
3185 /* Push a new handle if no closed handles were found. */
3186 if (lowest_closed_fd == fileio_fhandles.size ())
3187 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3188 else
3189 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3190
3191 /* Should no longer be marked closed. */
3192 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3193
3194 /* Return its index, and start the next lookup at
3195 the next index. */
3196 return lowest_closed_fd++;
3197 }
3198
3199 /* Release a target fileio file descriptor. */
3200
3201 static void
3202 release_fileio_fd (int fd, fileio_fh_t *fh)
3203 {
3204 fh->target_fd = -1;
3205 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3206 }
3207
3208 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3209
3210 static fileio_fh_t *
3211 fileio_fd_to_fh (int fd)
3212 {
3213 return &fileio_fhandles[fd];
3214 }
3215
3216
3217 /* Default implementations of file i/o methods. We don't want these
3218 to delegate automatically, because we need to know which target
3219 supported the method, in order to call it directly from within
3220 pread/pwrite, etc. */
3221
3222 int
3223 target_ops::fileio_open (struct inferior *inf, const char *filename,
3224 int flags, int mode, int warn_if_slow,
3225 int *target_errno)
3226 {
3227 *target_errno = FILEIO_ENOSYS;
3228 return -1;
3229 }
3230
3231 int
3232 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3233 ULONGEST offset, int *target_errno)
3234 {
3235 *target_errno = FILEIO_ENOSYS;
3236 return -1;
3237 }
3238
3239 int
3240 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3241 ULONGEST offset, int *target_errno)
3242 {
3243 *target_errno = FILEIO_ENOSYS;
3244 return -1;
3245 }
3246
3247 int
3248 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3249 {
3250 *target_errno = FILEIO_ENOSYS;
3251 return -1;
3252 }
3253
3254 int
3255 target_ops::fileio_close (int fd, int *target_errno)
3256 {
3257 *target_errno = FILEIO_ENOSYS;
3258 return -1;
3259 }
3260
3261 int
3262 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3263 int *target_errno)
3264 {
3265 *target_errno = FILEIO_ENOSYS;
3266 return -1;
3267 }
3268
3269 gdb::optional<std::string>
3270 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3271 int *target_errno)
3272 {
3273 *target_errno = FILEIO_ENOSYS;
3274 return {};
3275 }
3276
3277 /* See target.h. */
3278
3279 int
3280 target_fileio_open (struct inferior *inf, const char *filename,
3281 int flags, int mode, bool warn_if_slow, int *target_errno)
3282 {
3283 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3284 {
3285 int fd = t->fileio_open (inf, filename, flags, mode,
3286 warn_if_slow, target_errno);
3287
3288 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3289 continue;
3290
3291 if (fd < 0)
3292 fd = -1;
3293 else
3294 fd = acquire_fileio_fd (t, fd);
3295
3296 if (targetdebug)
3297 gdb_printf (gdb_stdlog,
3298 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3299 " = %d (%d)\n",
3300 inf == NULL ? 0 : inf->num,
3301 filename, flags, mode,
3302 warn_if_slow, fd,
3303 fd != -1 ? 0 : *target_errno);
3304 return fd;
3305 }
3306
3307 *target_errno = FILEIO_ENOSYS;
3308 return -1;
3309 }
3310
3311 /* See target.h. */
3312
3313 int
3314 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3315 ULONGEST offset, int *target_errno)
3316 {
3317 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3318 int ret = -1;
3319
3320 if (fh->is_closed ())
3321 *target_errno = EBADF;
3322 else if (fh->target == NULL)
3323 *target_errno = EIO;
3324 else
3325 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3326 len, offset, target_errno);
3327
3328 if (targetdebug)
3329 gdb_printf (gdb_stdlog,
3330 "target_fileio_pwrite (%d,...,%d,%s) "
3331 "= %d (%d)\n",
3332 fd, len, pulongest (offset),
3333 ret, ret != -1 ? 0 : *target_errno);
3334 return ret;
3335 }
3336
3337 /* See target.h. */
3338
3339 int
3340 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3341 ULONGEST offset, int *target_errno)
3342 {
3343 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3344 int ret = -1;
3345
3346 if (fh->is_closed ())
3347 *target_errno = EBADF;
3348 else if (fh->target == NULL)
3349 *target_errno = EIO;
3350 else
3351 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3352 len, offset, target_errno);
3353
3354 if (targetdebug)
3355 gdb_printf (gdb_stdlog,
3356 "target_fileio_pread (%d,...,%d,%s) "
3357 "= %d (%d)\n",
3358 fd, len, pulongest (offset),
3359 ret, ret != -1 ? 0 : *target_errno);
3360 return ret;
3361 }
3362
3363 /* See target.h. */
3364
3365 int
3366 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3367 {
3368 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3369 int ret = -1;
3370
3371 if (fh->is_closed ())
3372 *target_errno = EBADF;
3373 else if (fh->target == NULL)
3374 *target_errno = EIO;
3375 else
3376 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3377
3378 if (targetdebug)
3379 gdb_printf (gdb_stdlog,
3380 "target_fileio_fstat (%d) = %d (%d)\n",
3381 fd, ret, ret != -1 ? 0 : *target_errno);
3382 return ret;
3383 }
3384
3385 /* See target.h. */
3386
3387 int
3388 target_fileio_close (int fd, int *target_errno)
3389 {
3390 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3391 int ret = -1;
3392
3393 if (fh->is_closed ())
3394 *target_errno = EBADF;
3395 else
3396 {
3397 if (fh->target != NULL)
3398 ret = fh->target->fileio_close (fh->target_fd,
3399 target_errno);
3400 else
3401 ret = 0;
3402 release_fileio_fd (fd, fh);
3403 }
3404
3405 if (targetdebug)
3406 gdb_printf (gdb_stdlog,
3407 "target_fileio_close (%d) = %d (%d)\n",
3408 fd, ret, ret != -1 ? 0 : *target_errno);
3409 return ret;
3410 }
3411
3412 /* See target.h. */
3413
3414 int
3415 target_fileio_unlink (struct inferior *inf, const char *filename,
3416 int *target_errno)
3417 {
3418 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3419 {
3420 int ret = t->fileio_unlink (inf, filename, target_errno);
3421
3422 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3423 continue;
3424
3425 if (targetdebug)
3426 gdb_printf (gdb_stdlog,
3427 "target_fileio_unlink (%d,%s)"
3428 " = %d (%d)\n",
3429 inf == NULL ? 0 : inf->num, filename,
3430 ret, ret != -1 ? 0 : *target_errno);
3431 return ret;
3432 }
3433
3434 *target_errno = FILEIO_ENOSYS;
3435 return -1;
3436 }
3437
3438 /* See target.h. */
3439
3440 gdb::optional<std::string>
3441 target_fileio_readlink (struct inferior *inf, const char *filename,
3442 int *target_errno)
3443 {
3444 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3445 {
3446 gdb::optional<std::string> ret
3447 = t->fileio_readlink (inf, filename, target_errno);
3448
3449 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3450 continue;
3451
3452 if (targetdebug)
3453 gdb_printf (gdb_stdlog,
3454 "target_fileio_readlink (%d,%s)"
3455 " = %s (%d)\n",
3456 inf == NULL ? 0 : inf->num,
3457 filename, ret ? ret->c_str () : "(nil)",
3458 ret ? 0 : *target_errno);
3459 return ret;
3460 }
3461
3462 *target_errno = FILEIO_ENOSYS;
3463 return {};
3464 }
3465
3466 /* Like scoped_fd, but specific to target fileio. */
3467
3468 class scoped_target_fd
3469 {
3470 public:
3471 explicit scoped_target_fd (int fd) noexcept
3472 : m_fd (fd)
3473 {
3474 }
3475
3476 ~scoped_target_fd ()
3477 {
3478 if (m_fd >= 0)
3479 {
3480 int target_errno;
3481
3482 target_fileio_close (m_fd, &target_errno);
3483 }
3484 }
3485
3486 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3487
3488 int get () const noexcept
3489 {
3490 return m_fd;
3491 }
3492
3493 private:
3494 int m_fd;
3495 };
3496
3497 /* Read target file FILENAME, in the filesystem as seen by INF. If
3498 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3499 remote targets, the remote stub). Store the result in *BUF_P and
3500 return the size of the transferred data. PADDING additional bytes
3501 are available in *BUF_P. This is a helper function for
3502 target_fileio_read_alloc; see the declaration of that function for
3503 more information. */
3504
3505 static LONGEST
3506 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3507 gdb_byte **buf_p, int padding)
3508 {
3509 size_t buf_alloc, buf_pos;
3510 gdb_byte *buf;
3511 LONGEST n;
3512 int target_errno;
3513
3514 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3515 0700, false, &target_errno));
3516 if (fd.get () == -1)
3517 return -1;
3518
3519 /* Start by reading up to 4K at a time. The target will throttle
3520 this number down if necessary. */
3521 buf_alloc = 4096;
3522 buf = (gdb_byte *) xmalloc (buf_alloc);
3523 buf_pos = 0;
3524 while (1)
3525 {
3526 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3527 buf_alloc - buf_pos - padding, buf_pos,
3528 &target_errno);
3529 if (n < 0)
3530 {
3531 /* An error occurred. */
3532 xfree (buf);
3533 return -1;
3534 }
3535 else if (n == 0)
3536 {
3537 /* Read all there was. */
3538 if (buf_pos == 0)
3539 xfree (buf);
3540 else
3541 *buf_p = buf;
3542 return buf_pos;
3543 }
3544
3545 buf_pos += n;
3546
3547 /* If the buffer is filling up, expand it. */
3548 if (buf_alloc < buf_pos * 2)
3549 {
3550 buf_alloc *= 2;
3551 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3552 }
3553
3554 QUIT;
3555 }
3556 }
3557
3558 /* See target.h. */
3559
3560 LONGEST
3561 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3562 gdb_byte **buf_p)
3563 {
3564 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3565 }
3566
3567 /* See target.h. */
3568
3569 gdb::unique_xmalloc_ptr<char>
3570 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3571 {
3572 gdb_byte *buffer;
3573 char *bufstr;
3574 LONGEST i, transferred;
3575
3576 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3577 bufstr = (char *) buffer;
3578
3579 if (transferred < 0)
3580 return gdb::unique_xmalloc_ptr<char> (nullptr);
3581
3582 if (transferred == 0)
3583 return make_unique_xstrdup ("");
3584
3585 bufstr[transferred] = 0;
3586
3587 /* Check for embedded NUL bytes; but allow trailing NULs. */
3588 for (i = strlen (bufstr); i < transferred; i++)
3589 if (bufstr[i] != 0)
3590 {
3591 warning (_("target file %s "
3592 "contained unexpected null characters"),
3593 filename);
3594 break;
3595 }
3596
3597 return gdb::unique_xmalloc_ptr<char> (bufstr);
3598 }
3599
3600
3601 static int
3602 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3603 CORE_ADDR addr, int len)
3604 {
3605 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3606 }
3607
3608 static int
3609 default_watchpoint_addr_within_range (struct target_ops *target,
3610 CORE_ADDR addr,
3611 CORE_ADDR start, int length)
3612 {
3613 return addr >= start && addr < start + length;
3614 }
3615
3616 /* See target.h. */
3617
3618 target_ops *
3619 target_stack::find_beneath (const target_ops *t) const
3620 {
3621 /* Look for a non-empty slot at stratum levels beneath T's. */
3622 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3623 if (m_stack[stratum] != NULL)
3624 return m_stack[stratum];
3625
3626 return NULL;
3627 }
3628
3629 /* See target.h. */
3630
3631 struct target_ops *
3632 find_target_at (enum strata stratum)
3633 {
3634 return current_inferior ()->target_at (stratum);
3635 }
3636
3637 \f
3638
3639 /* See target.h */
3640
3641 void
3642 target_announce_detach (int from_tty)
3643 {
3644 pid_t pid;
3645 const char *exec_file;
3646
3647 if (!from_tty)
3648 return;
3649
3650 pid = inferior_ptid.pid ();
3651 exec_file = get_exec_file (0);
3652 if (exec_file == nullptr)
3653 gdb_printf ("Detaching from pid %s\n",
3654 target_pid_to_str (ptid_t (pid)).c_str ());
3655 else
3656 gdb_printf (_("Detaching from program: %s, %s\n"), exec_file,
3657 target_pid_to_str (ptid_t (pid)).c_str ());
3658 }
3659
3660 /* See target.h */
3661
3662 void
3663 target_announce_attach (int from_tty, int pid)
3664 {
3665 if (!from_tty)
3666 return;
3667
3668 const char *exec_file = get_exec_file (0);
3669
3670 if (exec_file != nullptr)
3671 gdb_printf ("Attaching to program: %s, %s\n", exec_file,
3672 target_pid_to_str (ptid_t (pid)).c_str ());
3673 else
3674 gdb_printf ("Attaching to %s\n",
3675 target_pid_to_str (ptid_t (pid)).c_str ());
3676 }
3677
3678 /* The inferior process has died. Long live the inferior! */
3679
3680 void
3681 generic_mourn_inferior (void)
3682 {
3683 inferior *inf = current_inferior ();
3684
3685 switch_to_no_thread ();
3686
3687 /* Mark breakpoints uninserted in case something tries to delete a
3688 breakpoint while we delete the inferior's threads (which would
3689 fail, since the inferior is long gone). */
3690 mark_breakpoints_out ();
3691
3692 if (inf->pid != 0)
3693 exit_inferior (inf);
3694
3695 /* Note this wipes step-resume breakpoints, so needs to be done
3696 after exit_inferior, which ends up referencing the step-resume
3697 breakpoints through clear_thread_inferior_resources. */
3698 breakpoint_init_inferior (inf_exited);
3699
3700 registers_changed ();
3701
3702 reopen_exec_file ();
3703 reinit_frame_cache ();
3704
3705 if (deprecated_detach_hook)
3706 deprecated_detach_hook ();
3707 }
3708 \f
3709 /* Convert a normal process ID to a string. Returns the string in a
3710 static buffer. */
3711
3712 std::string
3713 normal_pid_to_str (ptid_t ptid)
3714 {
3715 return string_printf ("process %d", ptid.pid ());
3716 }
3717
3718 static std::string
3719 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3720 {
3721 return normal_pid_to_str (ptid);
3722 }
3723
3724 /* Error-catcher for target_find_memory_regions. */
3725 static int
3726 dummy_find_memory_regions (struct target_ops *self,
3727 find_memory_region_ftype ignore1, void *ignore2)
3728 {
3729 error (_("Command not implemented for this target."));
3730 return 0;
3731 }
3732
3733 /* Error-catcher for target_make_corefile_notes. */
3734 static gdb::unique_xmalloc_ptr<char>
3735 dummy_make_corefile_notes (struct target_ops *self,
3736 bfd *ignore1, int *ignore2)
3737 {
3738 error (_("Command not implemented for this target."));
3739 return NULL;
3740 }
3741
3742 #include "target-delegates.c"
3743
3744 /* The initial current target, so that there is always a semi-valid
3745 current target. */
3746
3747 static dummy_target the_dummy_target;
3748
3749 /* See target.h. */
3750
3751 target_ops *
3752 get_dummy_target ()
3753 {
3754 return &the_dummy_target;
3755 }
3756
3757 static const target_info dummy_target_info = {
3758 "None",
3759 N_("None"),
3760 ""
3761 };
3762
3763 strata
3764 dummy_target::stratum () const
3765 {
3766 return dummy_stratum;
3767 }
3768
3769 strata
3770 debug_target::stratum () const
3771 {
3772 return debug_stratum;
3773 }
3774
3775 const target_info &
3776 dummy_target::info () const
3777 {
3778 return dummy_target_info;
3779 }
3780
3781 const target_info &
3782 debug_target::info () const
3783 {
3784 return beneath ()->info ();
3785 }
3786
3787 \f
3788
3789 void
3790 target_close (struct target_ops *targ)
3791 {
3792 for (inferior *inf : all_inferiors ())
3793 gdb_assert (!inf->target_is_pushed (targ));
3794
3795 fileio_handles_invalidate_target (targ);
3796
3797 targ->close ();
3798
3799 if (targetdebug)
3800 gdb_printf (gdb_stdlog, "target_close ()\n");
3801 }
3802
3803 int
3804 target_thread_alive (ptid_t ptid)
3805 {
3806 return current_inferior ()->top_target ()->thread_alive (ptid);
3807 }
3808
3809 void
3810 target_update_thread_list (void)
3811 {
3812 current_inferior ()->top_target ()->update_thread_list ();
3813 }
3814
3815 void
3816 target_stop (ptid_t ptid)
3817 {
3818 process_stratum_target *proc_target = current_inferior ()->process_target ();
3819
3820 gdb_assert (!proc_target->commit_resumed_state);
3821
3822 if (!may_stop)
3823 {
3824 warning (_("May not interrupt or stop the target, ignoring attempt"));
3825 return;
3826 }
3827
3828 current_inferior ()->top_target ()->stop (ptid);
3829 }
3830
3831 void
3832 target_interrupt ()
3833 {
3834 if (!may_stop)
3835 {
3836 warning (_("May not interrupt or stop the target, ignoring attempt"));
3837 return;
3838 }
3839
3840 current_inferior ()->top_target ()->interrupt ();
3841 }
3842
3843 /* See target.h. */
3844
3845 void
3846 target_pass_ctrlc (void)
3847 {
3848 /* Pass the Ctrl-C to the first target that has a thread
3849 running. */
3850 for (inferior *inf : all_inferiors ())
3851 {
3852 target_ops *proc_target = inf->process_target ();
3853 if (proc_target == NULL)
3854 continue;
3855
3856 for (thread_info *thr : inf->non_exited_threads ())
3857 {
3858 /* A thread can be THREAD_STOPPED and executing, while
3859 running an infcall. */
3860 if (thr->state == THREAD_RUNNING || thr->executing ())
3861 {
3862 /* We can get here quite deep in target layers. Avoid
3863 switching thread context or anything that would
3864 communicate with the target (e.g., to fetch
3865 registers), or flushing e.g., the frame cache. We
3866 just switch inferior in order to be able to call
3867 through the target_stack. */
3868 scoped_restore_current_inferior restore_inferior;
3869 set_current_inferior (inf);
3870 current_inferior ()->top_target ()->pass_ctrlc ();
3871 return;
3872 }
3873 }
3874 }
3875 }
3876
3877 /* See target.h. */
3878
3879 void
3880 default_target_pass_ctrlc (struct target_ops *ops)
3881 {
3882 target_interrupt ();
3883 }
3884
3885 /* See target/target.h. */
3886
3887 void
3888 target_stop_and_wait (ptid_t ptid)
3889 {
3890 struct target_waitstatus status;
3891 bool was_non_stop = non_stop;
3892
3893 non_stop = true;
3894 target_stop (ptid);
3895
3896 target_wait (ptid, &status, 0);
3897
3898 non_stop = was_non_stop;
3899 }
3900
3901 /* See target/target.h. */
3902
3903 void
3904 target_continue_no_signal (ptid_t ptid)
3905 {
3906 target_resume (ptid, 0, GDB_SIGNAL_0);
3907 }
3908
3909 /* See target/target.h. */
3910
3911 void
3912 target_continue (ptid_t ptid, enum gdb_signal signal)
3913 {
3914 target_resume (ptid, 0, signal);
3915 }
3916
3917 /* Concatenate ELEM to LIST, a comma-separated list. */
3918
3919 static void
3920 str_comma_list_concat_elem (std::string *list, const char *elem)
3921 {
3922 if (!list->empty ())
3923 list->append (", ");
3924
3925 list->append (elem);
3926 }
3927
3928 /* Helper for target_options_to_string. If OPT is present in
3929 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3930 OPT is removed from TARGET_OPTIONS. */
3931
3932 static void
3933 do_option (target_wait_flags *target_options, std::string *ret,
3934 target_wait_flag opt, const char *opt_str)
3935 {
3936 if ((*target_options & opt) != 0)
3937 {
3938 str_comma_list_concat_elem (ret, opt_str);
3939 *target_options &= ~opt;
3940 }
3941 }
3942
3943 /* See target.h. */
3944
3945 std::string
3946 target_options_to_string (target_wait_flags target_options)
3947 {
3948 std::string ret;
3949
3950 #define DO_TARG_OPTION(OPT) \
3951 do_option (&target_options, &ret, OPT, #OPT)
3952
3953 DO_TARG_OPTION (TARGET_WNOHANG);
3954
3955 if (target_options != 0)
3956 str_comma_list_concat_elem (&ret, "unknown???");
3957
3958 return ret;
3959 }
3960
3961 void
3962 target_fetch_registers (struct regcache *regcache, int regno)
3963 {
3964 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3965 if (targetdebug)
3966 regcache->debug_print_register ("target_fetch_registers", regno);
3967 }
3968
3969 void
3970 target_store_registers (struct regcache *regcache, int regno)
3971 {
3972 if (!may_write_registers)
3973 error (_("Writing to registers is not allowed (regno %d)"), regno);
3974
3975 current_inferior ()->top_target ()->store_registers (regcache, regno);
3976 if (targetdebug)
3977 {
3978 regcache->debug_print_register ("target_store_registers", regno);
3979 }
3980 }
3981
3982 int
3983 target_core_of_thread (ptid_t ptid)
3984 {
3985 return current_inferior ()->top_target ()->core_of_thread (ptid);
3986 }
3987
3988 int
3989 simple_verify_memory (struct target_ops *ops,
3990 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3991 {
3992 LONGEST total_xfered = 0;
3993
3994 while (total_xfered < size)
3995 {
3996 ULONGEST xfered_len;
3997 enum target_xfer_status status;
3998 gdb_byte buf[1024];
3999 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
4000
4001 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
4002 buf, NULL, lma + total_xfered, howmuch,
4003 &xfered_len);
4004 if (status == TARGET_XFER_OK
4005 && memcmp (data + total_xfered, buf, xfered_len) == 0)
4006 {
4007 total_xfered += xfered_len;
4008 QUIT;
4009 }
4010 else
4011 return 0;
4012 }
4013 return 1;
4014 }
4015
4016 /* Default implementation of memory verification. */
4017
4018 static int
4019 default_verify_memory (struct target_ops *self,
4020 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4021 {
4022 /* Start over from the top of the target stack. */
4023 return simple_verify_memory (current_inferior ()->top_target (),
4024 data, memaddr, size);
4025 }
4026
4027 int
4028 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4029 {
4030 target_ops *target = current_inferior ()->top_target ();
4031
4032 return target->verify_memory (data, memaddr, size);
4033 }
4034
4035 /* The documentation for this function is in its prototype declaration in
4036 target.h. */
4037
4038 int
4039 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4040 enum target_hw_bp_type rw)
4041 {
4042 target_ops *target = current_inferior ()->top_target ();
4043
4044 return target->insert_mask_watchpoint (addr, mask, rw);
4045 }
4046
4047 /* The documentation for this function is in its prototype declaration in
4048 target.h. */
4049
4050 int
4051 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4052 enum target_hw_bp_type rw)
4053 {
4054 target_ops *target = current_inferior ()->top_target ();
4055
4056 return target->remove_mask_watchpoint (addr, mask, rw);
4057 }
4058
4059 /* The documentation for this function is in its prototype declaration
4060 in target.h. */
4061
4062 int
4063 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4064 {
4065 target_ops *target = current_inferior ()->top_target ();
4066
4067 return target->masked_watch_num_registers (addr, mask);
4068 }
4069
4070 /* The documentation for this function is in its prototype declaration
4071 in target.h. */
4072
4073 int
4074 target_ranged_break_num_registers (void)
4075 {
4076 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4077 }
4078
4079 /* See target.h. */
4080
4081 struct btrace_target_info *
4082 target_enable_btrace (thread_info *tp, const struct btrace_config *conf)
4083 {
4084 return current_inferior ()->top_target ()->enable_btrace (tp, conf);
4085 }
4086
4087 /* See target.h. */
4088
4089 void
4090 target_disable_btrace (struct btrace_target_info *btinfo)
4091 {
4092 current_inferior ()->top_target ()->disable_btrace (btinfo);
4093 }
4094
4095 /* See target.h. */
4096
4097 void
4098 target_teardown_btrace (struct btrace_target_info *btinfo)
4099 {
4100 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4101 }
4102
4103 /* See target.h. */
4104
4105 enum btrace_error
4106 target_read_btrace (struct btrace_data *btrace,
4107 struct btrace_target_info *btinfo,
4108 enum btrace_read_type type)
4109 {
4110 target_ops *target = current_inferior ()->top_target ();
4111
4112 return target->read_btrace (btrace, btinfo, type);
4113 }
4114
4115 /* See target.h. */
4116
4117 const struct btrace_config *
4118 target_btrace_conf (const struct btrace_target_info *btinfo)
4119 {
4120 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4121 }
4122
4123 /* See target.h. */
4124
4125 void
4126 target_stop_recording (void)
4127 {
4128 current_inferior ()->top_target ()->stop_recording ();
4129 }
4130
4131 /* See target.h. */
4132
4133 void
4134 target_save_record (const char *filename)
4135 {
4136 current_inferior ()->top_target ()->save_record (filename);
4137 }
4138
4139 /* See target.h. */
4140
4141 int
4142 target_supports_delete_record ()
4143 {
4144 return current_inferior ()->top_target ()->supports_delete_record ();
4145 }
4146
4147 /* See target.h. */
4148
4149 void
4150 target_delete_record (void)
4151 {
4152 current_inferior ()->top_target ()->delete_record ();
4153 }
4154
4155 /* See target.h. */
4156
4157 enum record_method
4158 target_record_method (ptid_t ptid)
4159 {
4160 return current_inferior ()->top_target ()->record_method (ptid);
4161 }
4162
4163 /* See target.h. */
4164
4165 int
4166 target_record_is_replaying (ptid_t ptid)
4167 {
4168 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4169 }
4170
4171 /* See target.h. */
4172
4173 int
4174 target_record_will_replay (ptid_t ptid, int dir)
4175 {
4176 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4177 }
4178
4179 /* See target.h. */
4180
4181 void
4182 target_record_stop_replaying (void)
4183 {
4184 current_inferior ()->top_target ()->record_stop_replaying ();
4185 }
4186
4187 /* See target.h. */
4188
4189 void
4190 target_goto_record_begin (void)
4191 {
4192 current_inferior ()->top_target ()->goto_record_begin ();
4193 }
4194
4195 /* See target.h. */
4196
4197 void
4198 target_goto_record_end (void)
4199 {
4200 current_inferior ()->top_target ()->goto_record_end ();
4201 }
4202
4203 /* See target.h. */
4204
4205 void
4206 target_goto_record (ULONGEST insn)
4207 {
4208 current_inferior ()->top_target ()->goto_record (insn);
4209 }
4210
4211 /* See target.h. */
4212
4213 void
4214 target_insn_history (int size, gdb_disassembly_flags flags)
4215 {
4216 current_inferior ()->top_target ()->insn_history (size, flags);
4217 }
4218
4219 /* See target.h. */
4220
4221 void
4222 target_insn_history_from (ULONGEST from, int size,
4223 gdb_disassembly_flags flags)
4224 {
4225 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4226 }
4227
4228 /* See target.h. */
4229
4230 void
4231 target_insn_history_range (ULONGEST begin, ULONGEST end,
4232 gdb_disassembly_flags flags)
4233 {
4234 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4235 }
4236
4237 /* See target.h. */
4238
4239 void
4240 target_call_history (int size, record_print_flags flags)
4241 {
4242 current_inferior ()->top_target ()->call_history (size, flags);
4243 }
4244
4245 /* See target.h. */
4246
4247 void
4248 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4249 {
4250 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4251 }
4252
4253 /* See target.h. */
4254
4255 void
4256 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4257 {
4258 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4259 }
4260
4261 /* See target.h. */
4262
4263 const struct frame_unwind *
4264 target_get_unwinder (void)
4265 {
4266 return current_inferior ()->top_target ()->get_unwinder ();
4267 }
4268
4269 /* See target.h. */
4270
4271 const struct frame_unwind *
4272 target_get_tailcall_unwinder (void)
4273 {
4274 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4275 }
4276
4277 /* See target.h. */
4278
4279 void
4280 target_prepare_to_generate_core (void)
4281 {
4282 current_inferior ()->top_target ()->prepare_to_generate_core ();
4283 }
4284
4285 /* See target.h. */
4286
4287 void
4288 target_done_generating_core (void)
4289 {
4290 current_inferior ()->top_target ()->done_generating_core ();
4291 }
4292
4293 \f
4294
4295 static char targ_desc[] =
4296 "Names of targets and files being debugged.\nShows the entire \
4297 stack of targets currently in use (including the exec-file,\n\
4298 core-file, and process, if any), as well as the symbol file name.";
4299
4300 static void
4301 default_rcmd (struct target_ops *self, const char *command,
4302 struct ui_file *output)
4303 {
4304 error (_("\"monitor\" command not supported by this target."));
4305 }
4306
4307 static void
4308 do_monitor_command (const char *cmd, int from_tty)
4309 {
4310 target_rcmd (cmd, gdb_stdtarg);
4311 }
4312
4313 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4314 ignored. */
4315
4316 void
4317 flash_erase_command (const char *cmd, int from_tty)
4318 {
4319 /* Used to communicate termination of flash operations to the target. */
4320 bool found_flash_region = false;
4321 struct gdbarch *gdbarch = target_gdbarch ();
4322
4323 std::vector<mem_region> mem_regions = target_memory_map ();
4324
4325 /* Iterate over all memory regions. */
4326 for (const mem_region &m : mem_regions)
4327 {
4328 /* Is this a flash memory region? */
4329 if (m.attrib.mode == MEM_FLASH)
4330 {
4331 found_flash_region = true;
4332 target_flash_erase (m.lo, m.hi - m.lo);
4333
4334 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4335
4336 current_uiout->message (_("Erasing flash memory region at address "));
4337 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4338 current_uiout->message (", size = ");
4339 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4340 current_uiout->message ("\n");
4341 }
4342 }
4343
4344 /* Did we do any flash operations? If so, we need to finalize them. */
4345 if (found_flash_region)
4346 target_flash_done ();
4347 else
4348 current_uiout->message (_("No flash memory regions found.\n"));
4349 }
4350
4351 /* Print the name of each layers of our target stack. */
4352
4353 static void
4354 maintenance_print_target_stack (const char *cmd, int from_tty)
4355 {
4356 gdb_printf (_("The current target stack is:\n"));
4357
4358 for (target_ops *t = current_inferior ()->top_target ();
4359 t != NULL;
4360 t = t->beneath ())
4361 {
4362 if (t->stratum () == debug_stratum)
4363 continue;
4364 gdb_printf (" - %s (%s)\n", t->shortname (), t->longname ());
4365 }
4366 }
4367
4368 /* See target.h. */
4369
4370 void
4371 target_async (int enable)
4372 {
4373 /* If we are trying to enable async mode then it must be the case that
4374 async mode is possible for this target. */
4375 gdb_assert (!enable || target_can_async_p ());
4376 infrun_async (enable);
4377 current_inferior ()->top_target ()->async (enable);
4378 }
4379
4380 /* See target.h. */
4381
4382 void
4383 target_thread_events (int enable)
4384 {
4385 current_inferior ()->top_target ()->thread_events (enable);
4386 }
4387
4388 /* Controls if targets can report that they can/are async. This is
4389 just for maintainers to use when debugging gdb. */
4390 bool target_async_permitted = true;
4391
4392 static void
4393 set_maint_target_async (bool permitted)
4394 {
4395 if (have_live_inferiors ())
4396 error (_("Cannot change this setting while the inferior is running."));
4397
4398 target_async_permitted = permitted;
4399 }
4400
4401 static bool
4402 get_maint_target_async ()
4403 {
4404 return target_async_permitted;
4405 }
4406
4407 static void
4408 show_maint_target_async (ui_file *file, int from_tty,
4409 cmd_list_element *c, const char *value)
4410 {
4411 gdb_printf (file,
4412 _("Controlling the inferior in "
4413 "asynchronous mode is %s.\n"), value);
4414 }
4415
4416 /* Return true if the target operates in non-stop mode even with "set
4417 non-stop off". */
4418
4419 static int
4420 target_always_non_stop_p (void)
4421 {
4422 return current_inferior ()->top_target ()->always_non_stop_p ();
4423 }
4424
4425 /* See target.h. */
4426
4427 bool
4428 target_is_non_stop_p ()
4429 {
4430 return ((non_stop
4431 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4432 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4433 && target_always_non_stop_p ()))
4434 && target_can_async_p ());
4435 }
4436
4437 /* See target.h. */
4438
4439 bool
4440 exists_non_stop_target ()
4441 {
4442 if (target_is_non_stop_p ())
4443 return true;
4444
4445 scoped_restore_current_thread restore_thread;
4446
4447 for (inferior *inf : all_inferiors ())
4448 {
4449 switch_to_inferior_no_thread (inf);
4450 if (target_is_non_stop_p ())
4451 return true;
4452 }
4453
4454 return false;
4455 }
4456
4457 /* Controls if targets can report that they always run in non-stop
4458 mode. This is just for maintainers to use when debugging gdb. */
4459 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4460
4461 /* Set callback for maint target-non-stop setting. */
4462
4463 static void
4464 set_maint_target_non_stop (auto_boolean enabled)
4465 {
4466 if (have_live_inferiors ())
4467 error (_("Cannot change this setting while the inferior is running."));
4468
4469 target_non_stop_enabled = enabled;
4470 }
4471
4472 /* Get callback for maint target-non-stop setting. */
4473
4474 static auto_boolean
4475 get_maint_target_non_stop ()
4476 {
4477 return target_non_stop_enabled;
4478 }
4479
4480 static void
4481 show_maint_target_non_stop (ui_file *file, int from_tty,
4482 cmd_list_element *c, const char *value)
4483 {
4484 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4485 gdb_printf (file,
4486 _("Whether the target is always in non-stop mode "
4487 "is %s (currently %s).\n"), value,
4488 target_always_non_stop_p () ? "on" : "off");
4489 else
4490 gdb_printf (file,
4491 _("Whether the target is always in non-stop mode "
4492 "is %s.\n"), value);
4493 }
4494
4495 /* Temporary copies of permission settings. */
4496
4497 static bool may_write_registers_1 = true;
4498 static bool may_write_memory_1 = true;
4499 static bool may_insert_breakpoints_1 = true;
4500 static bool may_insert_tracepoints_1 = true;
4501 static bool may_insert_fast_tracepoints_1 = true;
4502 static bool may_stop_1 = true;
4503
4504 /* Make the user-set values match the real values again. */
4505
4506 void
4507 update_target_permissions (void)
4508 {
4509 may_write_registers_1 = may_write_registers;
4510 may_write_memory_1 = may_write_memory;
4511 may_insert_breakpoints_1 = may_insert_breakpoints;
4512 may_insert_tracepoints_1 = may_insert_tracepoints;
4513 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4514 may_stop_1 = may_stop;
4515 }
4516
4517 /* The one function handles (most of) the permission flags in the same
4518 way. */
4519
4520 static void
4521 set_target_permissions (const char *args, int from_tty,
4522 struct cmd_list_element *c)
4523 {
4524 if (target_has_execution ())
4525 {
4526 update_target_permissions ();
4527 error (_("Cannot change this setting while the inferior is running."));
4528 }
4529
4530 /* Make the real values match the user-changed values. */
4531 may_write_registers = may_write_registers_1;
4532 may_insert_breakpoints = may_insert_breakpoints_1;
4533 may_insert_tracepoints = may_insert_tracepoints_1;
4534 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4535 may_stop = may_stop_1;
4536 update_observer_mode ();
4537 }
4538
4539 /* Set memory write permission independently of observer mode. */
4540
4541 static void
4542 set_write_memory_permission (const char *args, int from_tty,
4543 struct cmd_list_element *c)
4544 {
4545 /* Make the real values match the user-changed values. */
4546 may_write_memory = may_write_memory_1;
4547 update_observer_mode ();
4548 }
4549
4550 void _initialize_target ();
4551
4552 void
4553 _initialize_target ()
4554 {
4555 the_debug_target = new debug_target ();
4556
4557 add_info ("target", info_target_command, targ_desc);
4558 add_info ("files", info_target_command, targ_desc);
4559
4560 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4561 Set target debugging."), _("\
4562 Show target debugging."), _("\
4563 When non-zero, target debugging is enabled. Higher numbers are more\n\
4564 verbose."),
4565 set_targetdebug,
4566 show_targetdebug,
4567 &setdebuglist, &showdebuglist);
4568
4569 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4570 &trust_readonly, _("\
4571 Set mode for reading from readonly sections."), _("\
4572 Show mode for reading from readonly sections."), _("\
4573 When this mode is on, memory reads from readonly sections (such as .text)\n\
4574 will be read from the object file instead of from the target. This will\n\
4575 result in significant performance improvement for remote targets."),
4576 NULL,
4577 show_trust_readonly,
4578 &setlist, &showlist);
4579
4580 add_com ("monitor", class_obscure, do_monitor_command,
4581 _("Send a command to the remote monitor (remote targets only)."));
4582
4583 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4584 _("Print the name of each layer of the internal target stack."),
4585 &maintenanceprintlist);
4586
4587 add_setshow_boolean_cmd ("target-async", no_class,
4588 _("\
4589 Set whether gdb controls the inferior in asynchronous mode."), _("\
4590 Show whether gdb controls the inferior in asynchronous mode."), _("\
4591 Tells gdb whether to control the inferior in asynchronous mode."),
4592 set_maint_target_async,
4593 get_maint_target_async,
4594 show_maint_target_async,
4595 &maintenance_set_cmdlist,
4596 &maintenance_show_cmdlist);
4597
4598 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4599 _("\
4600 Set whether gdb always controls the inferior in non-stop mode."), _("\
4601 Show whether gdb always controls the inferior in non-stop mode."), _("\
4602 Tells gdb whether to control the inferior in non-stop mode."),
4603 set_maint_target_non_stop,
4604 get_maint_target_non_stop,
4605 show_maint_target_non_stop,
4606 &maintenance_set_cmdlist,
4607 &maintenance_show_cmdlist);
4608
4609 add_setshow_boolean_cmd ("may-write-registers", class_support,
4610 &may_write_registers_1, _("\
4611 Set permission to write into registers."), _("\
4612 Show permission to write into registers."), _("\
4613 When this permission is on, GDB may write into the target's registers.\n\
4614 Otherwise, any sort of write attempt will result in an error."),
4615 set_target_permissions, NULL,
4616 &setlist, &showlist);
4617
4618 add_setshow_boolean_cmd ("may-write-memory", class_support,
4619 &may_write_memory_1, _("\
4620 Set permission to write into target memory."), _("\
4621 Show permission to write into target memory."), _("\
4622 When this permission is on, GDB may write into the target's memory.\n\
4623 Otherwise, any sort of write attempt will result in an error."),
4624 set_write_memory_permission, NULL,
4625 &setlist, &showlist);
4626
4627 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4628 &may_insert_breakpoints_1, _("\
4629 Set permission to insert breakpoints in the target."), _("\
4630 Show permission to insert breakpoints in the target."), _("\
4631 When this permission is on, GDB may insert breakpoints in the program.\n\
4632 Otherwise, any sort of insertion attempt will result in an error."),
4633 set_target_permissions, NULL,
4634 &setlist, &showlist);
4635
4636 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4637 &may_insert_tracepoints_1, _("\
4638 Set permission to insert tracepoints in the target."), _("\
4639 Show permission to insert tracepoints in the target."), _("\
4640 When this permission is on, GDB may insert tracepoints in the program.\n\
4641 Otherwise, any sort of insertion attempt will result in an error."),
4642 set_target_permissions, NULL,
4643 &setlist, &showlist);
4644
4645 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4646 &may_insert_fast_tracepoints_1, _("\
4647 Set permission to insert fast tracepoints in the target."), _("\
4648 Show permission to insert fast tracepoints in the target."), _("\
4649 When this permission is on, GDB may insert fast tracepoints.\n\
4650 Otherwise, any sort of insertion attempt will result in an error."),
4651 set_target_permissions, NULL,
4652 &setlist, &showlist);
4653
4654 add_setshow_boolean_cmd ("may-interrupt", class_support,
4655 &may_stop_1, _("\
4656 Set permission to interrupt or signal the target."), _("\
4657 Show permission to interrupt or signal the target."), _("\
4658 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4659 Otherwise, any attempt to interrupt or stop will be ignored."),
4660 set_target_permissions, NULL,
4661 &setlist, &showlist);
4662
4663 add_com ("flash-erase", no_class, flash_erase_command,
4664 _("Erase all flash memory regions."));
4665
4666 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4667 &auto_connect_native_target, _("\
4668 Set whether GDB may automatically connect to the native target."), _("\
4669 Show whether GDB may automatically connect to the native target."), _("\
4670 When on, and GDB is not connected to a target yet, GDB\n\
4671 attempts \"run\" and other commands with the native target."),
4672 NULL, show_auto_connect_native_target,
4673 &setlist, &showlist);
4674 }