gdb/debuginfod: Prevent out_of_range exception
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2022 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "observable.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdbcore.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
39 #include "solib.h"
40 #include "exec.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdb/fileio.h"
44 #include "gdbsupport/agent.h"
45 #include "auxv.h"
46 #include "target-debug.h"
47 #include "top.h"
48 #include "event-top.h"
49 #include <algorithm>
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
52 #include "terminal.h"
53 #include <unordered_map>
54 #include "target-connection.h"
55 #include "valprint.h"
56 #include "cli/cli-decode.h"
57
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
59
60 static void default_terminal_info (struct target_ops *, const char *, int);
61
62 static int default_watchpoint_addr_within_range (struct target_ops *,
63 CORE_ADDR, CORE_ADDR, int);
64
65 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
66 CORE_ADDR, int);
67
68 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
69
70 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
71 long lwp, ULONGEST tid);
72
73 static void default_mourn_inferior (struct target_ops *self);
74
75 static int default_search_memory (struct target_ops *ops,
76 CORE_ADDR start_addr,
77 ULONGEST search_space_len,
78 const gdb_byte *pattern,
79 ULONGEST pattern_len,
80 CORE_ADDR *found_addrp);
81
82 static int default_verify_memory (struct target_ops *self,
83 const gdb_byte *data,
84 CORE_ADDR memaddr, ULONGEST size);
85
86 static void tcomplain (void) ATTRIBUTE_NORETURN;
87
88 static struct target_ops *find_default_run_target (const char *);
89
90 static int dummy_find_memory_regions (struct target_ops *self,
91 find_memory_region_ftype ignore1,
92 void *ignore2);
93
94 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
95 (struct target_ops *self, bfd *ignore1, int *ignore2);
96
97 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
98
99 static enum exec_direction_kind default_execution_direction
100 (struct target_ops *self);
101
102 /* Mapping between target_info objects (which have address identity)
103 and corresponding open/factory function/callback. Each add_target
104 call adds one entry to this map, and registers a "target
105 TARGET_NAME" command that when invoked calls the factory registered
106 here. The target_info object is associated with the command via
107 the command's context. */
108 static std::unordered_map<const target_info *, target_open_ftype *>
109 target_factories;
110
111 /* The singleton debug target. */
112
113 static struct target_ops *the_debug_target;
114
115 /* Command list for target. */
116
117 static struct cmd_list_element *targetlist = NULL;
118
119 /* True if we should trust readonly sections from the
120 executable when reading memory. */
121
122 static bool trust_readonly = false;
123
124 /* Nonzero if we should show true memory content including
125 memory breakpoint inserted by gdb. */
126
127 static int show_memory_breakpoints = 0;
128
129 /* These globals control whether GDB attempts to perform these
130 operations; they are useful for targets that need to prevent
131 inadvertent disruption, such as in non-stop mode. */
132
133 bool may_write_registers = true;
134
135 bool may_write_memory = true;
136
137 bool may_insert_breakpoints = true;
138
139 bool may_insert_tracepoints = true;
140
141 bool may_insert_fast_tracepoints = true;
142
143 bool may_stop = true;
144
145 /* Non-zero if we want to see trace of target level stuff. */
146
147 static unsigned int targetdebug = 0;
148
149 static void
150 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
151 {
152 if (targetdebug)
153 current_inferior ()->push_target (the_debug_target);
154 else
155 current_inferior ()->unpush_target (the_debug_target);
156 }
157
158 static void
159 show_targetdebug (struct ui_file *file, int from_tty,
160 struct cmd_list_element *c, const char *value)
161 {
162 gdb_printf (file, _("Target debugging is %s.\n"), value);
163 }
164
165 int
166 target_has_memory ()
167 {
168 for (target_ops *t = current_inferior ()->top_target ();
169 t != NULL;
170 t = t->beneath ())
171 if (t->has_memory ())
172 return 1;
173
174 return 0;
175 }
176
177 int
178 target_has_stack ()
179 {
180 for (target_ops *t = current_inferior ()->top_target ();
181 t != NULL;
182 t = t->beneath ())
183 if (t->has_stack ())
184 return 1;
185
186 return 0;
187 }
188
189 int
190 target_has_registers ()
191 {
192 for (target_ops *t = current_inferior ()->top_target ();
193 t != NULL;
194 t = t->beneath ())
195 if (t->has_registers ())
196 return 1;
197
198 return 0;
199 }
200
201 bool
202 target_has_execution (inferior *inf)
203 {
204 if (inf == nullptr)
205 inf = current_inferior ();
206
207 for (target_ops *t = inf->top_target ();
208 t != nullptr;
209 t = inf->find_target_beneath (t))
210 if (t->has_execution (inf))
211 return true;
212
213 return false;
214 }
215
216 const char *
217 target_shortname ()
218 {
219 return current_inferior ()->top_target ()->shortname ();
220 }
221
222 /* See target.h. */
223
224 bool
225 target_attach_no_wait ()
226 {
227 return current_inferior ()->top_target ()->attach_no_wait ();
228 }
229
230 /* See target.h. */
231
232 void
233 target_post_attach (int pid)
234 {
235 return current_inferior ()->top_target ()->post_attach (pid);
236 }
237
238 /* See target.h. */
239
240 void
241 target_prepare_to_store (regcache *regcache)
242 {
243 return current_inferior ()->top_target ()->prepare_to_store (regcache);
244 }
245
246 /* See target.h. */
247
248 bool
249 target_supports_enable_disable_tracepoint ()
250 {
251 target_ops *target = current_inferior ()->top_target ();
252
253 return target->supports_enable_disable_tracepoint ();
254 }
255
256 bool
257 target_supports_string_tracing ()
258 {
259 return current_inferior ()->top_target ()->supports_string_tracing ();
260 }
261
262 /* See target.h. */
263
264 bool
265 target_supports_evaluation_of_breakpoint_conditions ()
266 {
267 target_ops *target = current_inferior ()->top_target ();
268
269 return target->supports_evaluation_of_breakpoint_conditions ();
270 }
271
272 /* See target.h. */
273
274 bool
275 target_supports_dumpcore ()
276 {
277 return current_inferior ()->top_target ()->supports_dumpcore ();
278 }
279
280 /* See target.h. */
281
282 void
283 target_dumpcore (const char *filename)
284 {
285 return current_inferior ()->top_target ()->dumpcore (filename);
286 }
287
288 /* See target.h. */
289
290 bool
291 target_can_run_breakpoint_commands ()
292 {
293 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
294 }
295
296 /* See target.h. */
297
298 void
299 target_files_info ()
300 {
301 return current_inferior ()->top_target ()->files_info ();
302 }
303
304 /* See target.h. */
305
306 int
307 target_insert_fork_catchpoint (int pid)
308 {
309 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
310 }
311
312 /* See target.h. */
313
314 int
315 target_remove_fork_catchpoint (int pid)
316 {
317 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
318 }
319
320 /* See target.h. */
321
322 int
323 target_insert_vfork_catchpoint (int pid)
324 {
325 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
326 }
327
328 /* See target.h. */
329
330 int
331 target_remove_vfork_catchpoint (int pid)
332 {
333 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
334 }
335
336 /* See target.h. */
337
338 int
339 target_insert_exec_catchpoint (int pid)
340 {
341 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
342 }
343
344 /* See target.h. */
345
346 int
347 target_remove_exec_catchpoint (int pid)
348 {
349 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
350 }
351
352 /* See target.h. */
353
354 int
355 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
356 gdb::array_view<const int> syscall_counts)
357 {
358 target_ops *target = current_inferior ()->top_target ();
359
360 return target->set_syscall_catchpoint (pid, needed, any_count,
361 syscall_counts);
362 }
363
364 /* See target.h. */
365
366 void
367 target_rcmd (const char *command, struct ui_file *outbuf)
368 {
369 return current_inferior ()->top_target ()->rcmd (command, outbuf);
370 }
371
372 /* See target.h. */
373
374 bool
375 target_can_lock_scheduler ()
376 {
377 target_ops *target = current_inferior ()->top_target ();
378
379 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
380 }
381
382 /* See target.h. */
383
384 bool
385 target_can_async_p ()
386 {
387 return target_can_async_p (current_inferior ()->top_target ());
388 }
389
390 /* See target.h. */
391
392 bool
393 target_can_async_p (struct target_ops *target)
394 {
395 if (!target_async_permitted)
396 return false;
397 return target->can_async_p ();
398 }
399
400 /* See target.h. */
401
402 bool
403 target_is_async_p ()
404 {
405 bool result = current_inferior ()->top_target ()->is_async_p ();
406 gdb_assert (target_async_permitted || !result);
407 return result;
408 }
409
410 exec_direction_kind
411 target_execution_direction ()
412 {
413 return current_inferior ()->top_target ()->execution_direction ();
414 }
415
416 /* See target.h. */
417
418 const char *
419 target_extra_thread_info (thread_info *tp)
420 {
421 return current_inferior ()->top_target ()->extra_thread_info (tp);
422 }
423
424 /* See target.h. */
425
426 char *
427 target_pid_to_exec_file (int pid)
428 {
429 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
430 }
431
432 /* See target.h. */
433
434 gdbarch *
435 target_thread_architecture (ptid_t ptid)
436 {
437 return current_inferior ()->top_target ()->thread_architecture (ptid);
438 }
439
440 /* See target.h. */
441
442 int
443 target_find_memory_regions (find_memory_region_ftype func, void *data)
444 {
445 return current_inferior ()->top_target ()->find_memory_regions (func, data);
446 }
447
448 /* See target.h. */
449
450 gdb::unique_xmalloc_ptr<char>
451 target_make_corefile_notes (bfd *bfd, int *size_p)
452 {
453 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
454 }
455
456 gdb_byte *
457 target_get_bookmark (const char *args, int from_tty)
458 {
459 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
460 }
461
462 void
463 target_goto_bookmark (const gdb_byte *arg, int from_tty)
464 {
465 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
466 }
467
468 /* See target.h. */
469
470 bool
471 target_stopped_by_watchpoint ()
472 {
473 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
474 }
475
476 /* See target.h. */
477
478 bool
479 target_stopped_by_sw_breakpoint ()
480 {
481 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
482 }
483
484 bool
485 target_supports_stopped_by_sw_breakpoint ()
486 {
487 target_ops *target = current_inferior ()->top_target ();
488
489 return target->supports_stopped_by_sw_breakpoint ();
490 }
491
492 bool
493 target_stopped_by_hw_breakpoint ()
494 {
495 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
496 }
497
498 bool
499 target_supports_stopped_by_hw_breakpoint ()
500 {
501 target_ops *target = current_inferior ()->top_target ();
502
503 return target->supports_stopped_by_hw_breakpoint ();
504 }
505
506 /* See target.h. */
507
508 bool
509 target_have_steppable_watchpoint ()
510 {
511 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
512 }
513
514 /* See target.h. */
515
516 int
517 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
518 {
519 target_ops *target = current_inferior ()->top_target ();
520
521 return target->can_use_hw_breakpoint (type, cnt, othertype);
522 }
523
524 /* See target.h. */
525
526 int
527 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
528 {
529 target_ops *target = current_inferior ()->top_target ();
530
531 return target->region_ok_for_hw_watchpoint (addr, len);
532 }
533
534
535 int
536 target_can_do_single_step ()
537 {
538 return current_inferior ()->top_target ()->can_do_single_step ();
539 }
540
541 /* See target.h. */
542
543 int
544 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
545 expression *cond)
546 {
547 target_ops *target = current_inferior ()->top_target ();
548
549 return target->insert_watchpoint (addr, len, type, cond);
550 }
551
552 /* See target.h. */
553
554 int
555 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
556 expression *cond)
557 {
558 target_ops *target = current_inferior ()->top_target ();
559
560 return target->remove_watchpoint (addr, len, type, cond);
561 }
562
563 /* See target.h. */
564
565 int
566 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
567 {
568 target_ops *target = current_inferior ()->top_target ();
569
570 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
571 }
572
573 /* See target.h. */
574
575 int
576 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
577 {
578 target_ops *target = current_inferior ()->top_target ();
579
580 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
581 }
582
583 /* See target.h. */
584
585 bool
586 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
587 expression *cond)
588 {
589 target_ops *target = current_inferior ()->top_target ();
590
591 return target->can_accel_watchpoint_condition (addr, len, type, cond);
592 }
593
594 /* See target.h. */
595
596 bool
597 target_can_execute_reverse ()
598 {
599 return current_inferior ()->top_target ()->can_execute_reverse ();
600 }
601
602 ptid_t
603 target_get_ada_task_ptid (long lwp, ULONGEST tid)
604 {
605 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
606 }
607
608 bool
609 target_filesystem_is_local ()
610 {
611 return current_inferior ()->top_target ()->filesystem_is_local ();
612 }
613
614 void
615 target_trace_init ()
616 {
617 return current_inferior ()->top_target ()->trace_init ();
618 }
619
620 void
621 target_download_tracepoint (bp_location *location)
622 {
623 return current_inferior ()->top_target ()->download_tracepoint (location);
624 }
625
626 bool
627 target_can_download_tracepoint ()
628 {
629 return current_inferior ()->top_target ()->can_download_tracepoint ();
630 }
631
632 void
633 target_download_trace_state_variable (const trace_state_variable &tsv)
634 {
635 target_ops *target = current_inferior ()->top_target ();
636
637 return target->download_trace_state_variable (tsv);
638 }
639
640 void
641 target_enable_tracepoint (bp_location *loc)
642 {
643 return current_inferior ()->top_target ()->enable_tracepoint (loc);
644 }
645
646 void
647 target_disable_tracepoint (bp_location *loc)
648 {
649 return current_inferior ()->top_target ()->disable_tracepoint (loc);
650 }
651
652 void
653 target_trace_start ()
654 {
655 return current_inferior ()->top_target ()->trace_start ();
656 }
657
658 void
659 target_trace_set_readonly_regions ()
660 {
661 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
662 }
663
664 int
665 target_get_trace_status (trace_status *ts)
666 {
667 return current_inferior ()->top_target ()->get_trace_status (ts);
668 }
669
670 void
671 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
672 {
673 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
674 }
675
676 void
677 target_trace_stop ()
678 {
679 return current_inferior ()->top_target ()->trace_stop ();
680 }
681
682 int
683 target_trace_find (trace_find_type type, int num,
684 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
685 {
686 target_ops *target = current_inferior ()->top_target ();
687
688 return target->trace_find (type, num, addr1, addr2, tpp);
689 }
690
691 bool
692 target_get_trace_state_variable_value (int tsv, LONGEST *val)
693 {
694 target_ops *target = current_inferior ()->top_target ();
695
696 return target->get_trace_state_variable_value (tsv, val);
697 }
698
699 int
700 target_save_trace_data (const char *filename)
701 {
702 return current_inferior ()->top_target ()->save_trace_data (filename);
703 }
704
705 int
706 target_upload_tracepoints (uploaded_tp **utpp)
707 {
708 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
709 }
710
711 int
712 target_upload_trace_state_variables (uploaded_tsv **utsvp)
713 {
714 target_ops *target = current_inferior ()->top_target ();
715
716 return target->upload_trace_state_variables (utsvp);
717 }
718
719 LONGEST
720 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
721 {
722 target_ops *target = current_inferior ()->top_target ();
723
724 return target->get_raw_trace_data (buf, offset, len);
725 }
726
727 int
728 target_get_min_fast_tracepoint_insn_len ()
729 {
730 target_ops *target = current_inferior ()->top_target ();
731
732 return target->get_min_fast_tracepoint_insn_len ();
733 }
734
735 void
736 target_set_disconnected_tracing (int val)
737 {
738 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
739 }
740
741 void
742 target_set_circular_trace_buffer (int val)
743 {
744 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
745 }
746
747 void
748 target_set_trace_buffer_size (LONGEST val)
749 {
750 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
751 }
752
753 bool
754 target_set_trace_notes (const char *user, const char *notes,
755 const char *stopnotes)
756 {
757 target_ops *target = current_inferior ()->top_target ();
758
759 return target->set_trace_notes (user, notes, stopnotes);
760 }
761
762 bool
763 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
764 {
765 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
766 }
767
768 void
769 target_set_permissions ()
770 {
771 return current_inferior ()->top_target ()->set_permissions ();
772 }
773
774 bool
775 target_static_tracepoint_marker_at (CORE_ADDR addr,
776 static_tracepoint_marker *marker)
777 {
778 target_ops *target = current_inferior ()->top_target ();
779
780 return target->static_tracepoint_marker_at (addr, marker);
781 }
782
783 std::vector<static_tracepoint_marker>
784 target_static_tracepoint_markers_by_strid (const char *marker_id)
785 {
786 target_ops *target = current_inferior ()->top_target ();
787
788 return target->static_tracepoint_markers_by_strid (marker_id);
789 }
790
791 traceframe_info_up
792 target_traceframe_info ()
793 {
794 return current_inferior ()->top_target ()->traceframe_info ();
795 }
796
797 bool
798 target_use_agent (bool use)
799 {
800 return current_inferior ()->top_target ()->use_agent (use);
801 }
802
803 bool
804 target_can_use_agent ()
805 {
806 return current_inferior ()->top_target ()->can_use_agent ();
807 }
808
809 bool
810 target_augmented_libraries_svr4_read ()
811 {
812 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
813 }
814
815 bool
816 target_supports_memory_tagging ()
817 {
818 return current_inferior ()->top_target ()->supports_memory_tagging ();
819 }
820
821 bool
822 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
823 int type)
824 {
825 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
826 }
827
828 bool
829 target_store_memtags (CORE_ADDR address, size_t len,
830 const gdb::byte_vector &tags, int type)
831 {
832 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
833 }
834
835 void
836 target_log_command (const char *p)
837 {
838 return current_inferior ()->top_target ()->log_command (p);
839 }
840
841 /* This is used to implement the various target commands. */
842
843 static void
844 open_target (const char *args, int from_tty, struct cmd_list_element *command)
845 {
846 auto *ti = static_cast<target_info *> (command->context ());
847 target_open_ftype *func = target_factories[ti];
848
849 if (targetdebug)
850 gdb_printf (gdb_stdlog, "-> %s->open (...)\n",
851 ti->shortname);
852
853 func (args, from_tty);
854
855 if (targetdebug)
856 gdb_printf (gdb_stdlog, "<- %s->open (%s, %d)\n",
857 ti->shortname, args, from_tty);
858 }
859
860 /* See target.h. */
861
862 void
863 add_target (const target_info &t, target_open_ftype *func,
864 completer_ftype *completer)
865 {
866 struct cmd_list_element *c;
867
868 auto &func_slot = target_factories[&t];
869 if (func_slot != nullptr)
870 internal_error (__FILE__, __LINE__,
871 _("target already added (\"%s\")."), t.shortname);
872 func_slot = func;
873
874 if (targetlist == NULL)
875 add_basic_prefix_cmd ("target", class_run, _("\
876 Connect to a target machine or process.\n\
877 The first argument is the type or protocol of the target machine.\n\
878 Remaining arguments are interpreted by the target protocol. For more\n\
879 information on the arguments for a particular protocol, type\n\
880 `help target ' followed by the protocol name."),
881 &targetlist, 0, &cmdlist);
882 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
883 c->set_context ((void *) &t);
884 c->func = open_target;
885 if (completer != NULL)
886 set_cmd_completer (c, completer);
887 }
888
889 /* See target.h. */
890
891 void
892 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
893 {
894 struct cmd_list_element *c;
895
896 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
897 see PR cli/15104. */
898 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
899 c->func = open_target;
900 c->set_context ((void *) &tinfo);
901 gdb::unique_xmalloc_ptr<char> alt
902 = xstrprintf ("target %s", tinfo.shortname);
903 deprecate_cmd (c, alt.release ());
904 }
905
906 /* Stub functions */
907
908 void
909 target_kill (void)
910 {
911 current_inferior ()->top_target ()->kill ();
912 }
913
914 void
915 target_load (const char *arg, int from_tty)
916 {
917 target_dcache_invalidate ();
918 current_inferior ()->top_target ()->load (arg, from_tty);
919 }
920
921 /* Define it. */
922
923 target_terminal_state target_terminal::m_terminal_state
924 = target_terminal_state::is_ours;
925
926 /* See target/target.h. */
927
928 void
929 target_terminal::init (void)
930 {
931 current_inferior ()->top_target ()->terminal_init ();
932
933 m_terminal_state = target_terminal_state::is_ours;
934 }
935
936 /* See target/target.h. */
937
938 void
939 target_terminal::inferior (void)
940 {
941 struct ui *ui = current_ui;
942
943 /* A background resume (``run&'') should leave GDB in control of the
944 terminal. */
945 if (ui->prompt_state != PROMPT_BLOCKED)
946 return;
947
948 /* Since we always run the inferior in the main console (unless "set
949 inferior-tty" is in effect), when some UI other than the main one
950 calls target_terminal::inferior, then we leave the main UI's
951 terminal settings as is. */
952 if (ui != main_ui)
953 return;
954
955 /* If GDB is resuming the inferior in the foreground, install
956 inferior's terminal modes. */
957
958 struct inferior *inf = current_inferior ();
959
960 if (inf->terminal_state != target_terminal_state::is_inferior)
961 {
962 current_inferior ()->top_target ()->terminal_inferior ();
963 inf->terminal_state = target_terminal_state::is_inferior;
964 }
965
966 m_terminal_state = target_terminal_state::is_inferior;
967
968 /* If the user hit C-c before, pretend that it was hit right
969 here. */
970 if (check_quit_flag ())
971 target_pass_ctrlc ();
972 }
973
974 /* See target/target.h. */
975
976 void
977 target_terminal::restore_inferior (void)
978 {
979 struct ui *ui = current_ui;
980
981 /* See target_terminal::inferior(). */
982 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
983 return;
984
985 /* Restore the terminal settings of inferiors that were in the
986 foreground but are now ours_for_output due to a temporary
987 target_target::ours_for_output() call. */
988
989 {
990 scoped_restore_current_inferior restore_inferior;
991
992 for (::inferior *inf : all_inferiors ())
993 {
994 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
995 {
996 set_current_inferior (inf);
997 current_inferior ()->top_target ()->terminal_inferior ();
998 inf->terminal_state = target_terminal_state::is_inferior;
999 }
1000 }
1001 }
1002
1003 m_terminal_state = target_terminal_state::is_inferior;
1004
1005 /* If the user hit C-c before, pretend that it was hit right
1006 here. */
1007 if (check_quit_flag ())
1008 target_pass_ctrlc ();
1009 }
1010
1011 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1012 is_ours_for_output. */
1013
1014 static void
1015 target_terminal_is_ours_kind (target_terminal_state desired_state)
1016 {
1017 scoped_restore_current_inferior restore_inferior;
1018
1019 /* Must do this in two passes. First, have all inferiors save the
1020 current terminal settings. Then, after all inferiors have add a
1021 chance to safely save the terminal settings, restore GDB's
1022 terminal settings. */
1023
1024 for (inferior *inf : all_inferiors ())
1025 {
1026 if (inf->terminal_state == target_terminal_state::is_inferior)
1027 {
1028 set_current_inferior (inf);
1029 current_inferior ()->top_target ()->terminal_save_inferior ();
1030 }
1031 }
1032
1033 for (inferior *inf : all_inferiors ())
1034 {
1035 /* Note we don't check is_inferior here like above because we
1036 need to handle 'is_ours_for_output -> is_ours' too. Careful
1037 to never transition from 'is_ours' to 'is_ours_for_output',
1038 though. */
1039 if (inf->terminal_state != target_terminal_state::is_ours
1040 && inf->terminal_state != desired_state)
1041 {
1042 set_current_inferior (inf);
1043 if (desired_state == target_terminal_state::is_ours)
1044 current_inferior ()->top_target ()->terminal_ours ();
1045 else if (desired_state == target_terminal_state::is_ours_for_output)
1046 current_inferior ()->top_target ()->terminal_ours_for_output ();
1047 else
1048 gdb_assert_not_reached ("unhandled desired state");
1049 inf->terminal_state = desired_state;
1050 }
1051 }
1052 }
1053
1054 /* See target/target.h. */
1055
1056 void
1057 target_terminal::ours ()
1058 {
1059 struct ui *ui = current_ui;
1060
1061 /* See target_terminal::inferior. */
1062 if (ui != main_ui)
1063 return;
1064
1065 if (m_terminal_state == target_terminal_state::is_ours)
1066 return;
1067
1068 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1069 m_terminal_state = target_terminal_state::is_ours;
1070 }
1071
1072 /* See target/target.h. */
1073
1074 void
1075 target_terminal::ours_for_output ()
1076 {
1077 struct ui *ui = current_ui;
1078
1079 /* See target_terminal::inferior. */
1080 if (ui != main_ui)
1081 return;
1082
1083 if (!target_terminal::is_inferior ())
1084 return;
1085
1086 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1087 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1088 }
1089
1090 /* See target/target.h. */
1091
1092 void
1093 target_terminal::info (const char *arg, int from_tty)
1094 {
1095 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1096 }
1097
1098 /* See target.h. */
1099
1100 bool
1101 target_supports_terminal_ours (void)
1102 {
1103 /* The current top target is the target at the top of the target
1104 stack of the current inferior. While normally there's always an
1105 inferior, we must check for nullptr here because we can get here
1106 very early during startup, before the initial inferior is first
1107 created. */
1108 inferior *inf = current_inferior ();
1109
1110 if (inf == nullptr)
1111 return false;
1112 return inf->top_target ()->supports_terminal_ours ();
1113 }
1114
1115 static void
1116 tcomplain (void)
1117 {
1118 error (_("You can't do that when your target is `%s'"),
1119 current_inferior ()->top_target ()->shortname ());
1120 }
1121
1122 void
1123 noprocess (void)
1124 {
1125 error (_("You can't do that without a process to debug."));
1126 }
1127
1128 static void
1129 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1130 {
1131 gdb_printf (_("No saved terminal information.\n"));
1132 }
1133
1134 /* A default implementation for the to_get_ada_task_ptid target method.
1135
1136 This function builds the PTID by using both LWP and TID as part of
1137 the PTID lwp and tid elements. The pid used is the pid of the
1138 inferior_ptid. */
1139
1140 static ptid_t
1141 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1142 {
1143 return ptid_t (inferior_ptid.pid (), lwp, tid);
1144 }
1145
1146 static enum exec_direction_kind
1147 default_execution_direction (struct target_ops *self)
1148 {
1149 if (!target_can_execute_reverse ())
1150 return EXEC_FORWARD;
1151 else if (!target_can_async_p ())
1152 return EXEC_FORWARD;
1153 else
1154 gdb_assert_not_reached ("\
1155 to_execution_direction must be implemented for reverse async");
1156 }
1157
1158 /* See target.h. */
1159
1160 void
1161 decref_target (target_ops *t)
1162 {
1163 t->decref ();
1164 if (t->refcount () == 0)
1165 {
1166 if (t->stratum () == process_stratum)
1167 connection_list_remove (as_process_stratum_target (t));
1168 target_close (t);
1169 }
1170 }
1171
1172 /* See target.h. */
1173
1174 void
1175 target_stack::push (target_ops *t)
1176 {
1177 t->incref ();
1178
1179 strata stratum = t->stratum ();
1180
1181 if (stratum == process_stratum)
1182 connection_list_add (as_process_stratum_target (t));
1183
1184 /* If there's already a target at this stratum, remove it. */
1185
1186 if (m_stack[stratum] != NULL)
1187 unpush (m_stack[stratum]);
1188
1189 /* Now add the new one. */
1190 m_stack[stratum] = t;
1191
1192 if (m_top < stratum)
1193 m_top = stratum;
1194 }
1195
1196 /* See target.h. */
1197
1198 bool
1199 target_stack::unpush (target_ops *t)
1200 {
1201 gdb_assert (t != NULL);
1202
1203 strata stratum = t->stratum ();
1204
1205 if (stratum == dummy_stratum)
1206 internal_error (__FILE__, __LINE__,
1207 _("Attempt to unpush the dummy target"));
1208
1209 /* Look for the specified target. Note that a target can only occur
1210 once in the target stack. */
1211
1212 if (m_stack[stratum] != t)
1213 {
1214 /* If T wasn't pushed, quit. Only open targets should be
1215 closed. */
1216 return false;
1217 }
1218
1219 /* Unchain the target. */
1220 m_stack[stratum] = NULL;
1221
1222 if (m_top == stratum)
1223 m_top = this->find_beneath (t)->stratum ();
1224
1225 /* Finally close the target, if there are no inferiors
1226 referencing this target still. Note we do this after unchaining,
1227 so any target method calls from within the target_close
1228 implementation don't end up in T anymore. Do leave the target
1229 open if we have are other inferiors referencing this target
1230 still. */
1231 decref_target (t);
1232
1233 return true;
1234 }
1235
1236 /* Unpush TARGET and assert that it worked. */
1237
1238 static void
1239 unpush_target_and_assert (struct target_ops *target)
1240 {
1241 if (!current_inferior ()->unpush_target (target))
1242 {
1243 gdb_printf (gdb_stderr,
1244 "pop_all_targets couldn't find target %s\n",
1245 target->shortname ());
1246 internal_error (__FILE__, __LINE__,
1247 _("failed internal consistency check"));
1248 }
1249 }
1250
1251 void
1252 pop_all_targets_above (enum strata above_stratum)
1253 {
1254 while ((int) (current_inferior ()->top_target ()->stratum ())
1255 > (int) above_stratum)
1256 unpush_target_and_assert (current_inferior ()->top_target ());
1257 }
1258
1259 /* See target.h. */
1260
1261 void
1262 pop_all_targets_at_and_above (enum strata stratum)
1263 {
1264 while ((int) (current_inferior ()->top_target ()->stratum ())
1265 >= (int) stratum)
1266 unpush_target_and_assert (current_inferior ()->top_target ());
1267 }
1268
1269 void
1270 pop_all_targets (void)
1271 {
1272 pop_all_targets_above (dummy_stratum);
1273 }
1274
1275 void
1276 target_unpusher::operator() (struct target_ops *ops) const
1277 {
1278 current_inferior ()->unpush_target (ops);
1279 }
1280
1281 /* Default implementation of to_get_thread_local_address. */
1282
1283 static void
1284 generic_tls_error (void)
1285 {
1286 throw_error (TLS_GENERIC_ERROR,
1287 _("Cannot find thread-local variables on this target"));
1288 }
1289
1290 /* Using the objfile specified in OBJFILE, find the address for the
1291 current thread's thread-local storage with offset OFFSET. */
1292 CORE_ADDR
1293 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1294 {
1295 volatile CORE_ADDR addr = 0;
1296 struct target_ops *target = current_inferior ()->top_target ();
1297 struct gdbarch *gdbarch = target_gdbarch ();
1298
1299 /* If OBJFILE is a separate debug object file, look for the
1300 original object file. */
1301 if (objfile->separate_debug_objfile_backlink != NULL)
1302 objfile = objfile->separate_debug_objfile_backlink;
1303
1304 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1305 {
1306 ptid_t ptid = inferior_ptid;
1307
1308 try
1309 {
1310 CORE_ADDR lm_addr;
1311
1312 /* Fetch the load module address for this objfile. */
1313 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1314 objfile);
1315
1316 if (gdbarch_get_thread_local_address_p (gdbarch))
1317 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1318 offset);
1319 else
1320 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1321 }
1322 /* If an error occurred, print TLS related messages here. Otherwise,
1323 throw the error to some higher catcher. */
1324 catch (const gdb_exception &ex)
1325 {
1326 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1327
1328 switch (ex.error)
1329 {
1330 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1331 error (_("Cannot find thread-local variables "
1332 "in this thread library."));
1333 break;
1334 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1335 if (objfile_is_library)
1336 error (_("Cannot find shared library `%s' in dynamic"
1337 " linker's load module list"), objfile_name (objfile));
1338 else
1339 error (_("Cannot find executable file `%s' in dynamic"
1340 " linker's load module list"), objfile_name (objfile));
1341 break;
1342 case TLS_NOT_ALLOCATED_YET_ERROR:
1343 if (objfile_is_library)
1344 error (_("The inferior has not yet allocated storage for"
1345 " thread-local variables in\n"
1346 "the shared library `%s'\n"
1347 "for %s"),
1348 objfile_name (objfile),
1349 target_pid_to_str (ptid).c_str ());
1350 else
1351 error (_("The inferior has not yet allocated storage for"
1352 " thread-local variables in\n"
1353 "the executable `%s'\n"
1354 "for %s"),
1355 objfile_name (objfile),
1356 target_pid_to_str (ptid).c_str ());
1357 break;
1358 case TLS_GENERIC_ERROR:
1359 if (objfile_is_library)
1360 error (_("Cannot find thread-local storage for %s, "
1361 "shared library %s:\n%s"),
1362 target_pid_to_str (ptid).c_str (),
1363 objfile_name (objfile), ex.what ());
1364 else
1365 error (_("Cannot find thread-local storage for %s, "
1366 "executable file %s:\n%s"),
1367 target_pid_to_str (ptid).c_str (),
1368 objfile_name (objfile), ex.what ());
1369 break;
1370 default:
1371 throw;
1372 break;
1373 }
1374 }
1375 }
1376 else
1377 error (_("Cannot find thread-local variables on this target"));
1378
1379 return addr;
1380 }
1381
1382 const char *
1383 target_xfer_status_to_string (enum target_xfer_status status)
1384 {
1385 #define CASE(X) case X: return #X
1386 switch (status)
1387 {
1388 CASE(TARGET_XFER_E_IO);
1389 CASE(TARGET_XFER_UNAVAILABLE);
1390 default:
1391 return "<unknown>";
1392 }
1393 #undef CASE
1394 };
1395
1396
1397 const target_section_table *
1398 target_get_section_table (struct target_ops *target)
1399 {
1400 return target->get_section_table ();
1401 }
1402
1403 /* Find a section containing ADDR. */
1404
1405 const struct target_section *
1406 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1407 {
1408 const target_section_table *table = target_get_section_table (target);
1409
1410 if (table == NULL)
1411 return NULL;
1412
1413 for (const target_section &secp : *table)
1414 {
1415 if (addr >= secp.addr && addr < secp.endaddr)
1416 return &secp;
1417 }
1418 return NULL;
1419 }
1420
1421 /* See target.h. */
1422
1423 const target_section_table *
1424 default_get_section_table ()
1425 {
1426 return &current_program_space->target_sections ();
1427 }
1428
1429 /* Helper for the memory xfer routines. Checks the attributes of the
1430 memory region of MEMADDR against the read or write being attempted.
1431 If the access is permitted returns true, otherwise returns false.
1432 REGION_P is an optional output parameter. If not-NULL, it is
1433 filled with a pointer to the memory region of MEMADDR. REG_LEN
1434 returns LEN trimmed to the end of the region. This is how much the
1435 caller can continue requesting, if the access is permitted. A
1436 single xfer request must not straddle memory region boundaries. */
1437
1438 static int
1439 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1440 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1441 struct mem_region **region_p)
1442 {
1443 struct mem_region *region;
1444
1445 region = lookup_mem_region (memaddr);
1446
1447 if (region_p != NULL)
1448 *region_p = region;
1449
1450 switch (region->attrib.mode)
1451 {
1452 case MEM_RO:
1453 if (writebuf != NULL)
1454 return 0;
1455 break;
1456
1457 case MEM_WO:
1458 if (readbuf != NULL)
1459 return 0;
1460 break;
1461
1462 case MEM_FLASH:
1463 /* We only support writing to flash during "load" for now. */
1464 if (writebuf != NULL)
1465 error (_("Writing to flash memory forbidden in this context"));
1466 break;
1467
1468 case MEM_NONE:
1469 return 0;
1470 }
1471
1472 /* region->hi == 0 means there's no upper bound. */
1473 if (memaddr + len < region->hi || region->hi == 0)
1474 *reg_len = len;
1475 else
1476 *reg_len = region->hi - memaddr;
1477
1478 return 1;
1479 }
1480
1481 /* Read memory from more than one valid target. A core file, for
1482 instance, could have some of memory but delegate other bits to
1483 the target below it. So, we must manually try all targets. */
1484
1485 enum target_xfer_status
1486 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1487 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1488 ULONGEST *xfered_len)
1489 {
1490 enum target_xfer_status res;
1491
1492 do
1493 {
1494 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1495 readbuf, writebuf, memaddr, len,
1496 xfered_len);
1497 if (res == TARGET_XFER_OK)
1498 break;
1499
1500 /* Stop if the target reports that the memory is not available. */
1501 if (res == TARGET_XFER_UNAVAILABLE)
1502 break;
1503
1504 /* Don't continue past targets which have all the memory.
1505 At one time, this code was necessary to read data from
1506 executables / shared libraries when data for the requested
1507 addresses weren't available in the core file. But now the
1508 core target handles this case itself. */
1509 if (ops->has_all_memory ())
1510 break;
1511
1512 ops = ops->beneath ();
1513 }
1514 while (ops != NULL);
1515
1516 /* The cache works at the raw memory level. Make sure the cache
1517 gets updated with raw contents no matter what kind of memory
1518 object was originally being written. Note we do write-through
1519 first, so that if it fails, we don't write to the cache contents
1520 that never made it to the target. */
1521 if (writebuf != NULL
1522 && inferior_ptid != null_ptid
1523 && target_dcache_init_p ()
1524 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1525 {
1526 DCACHE *dcache = target_dcache_get ();
1527
1528 /* Note that writing to an area of memory which wasn't present
1529 in the cache doesn't cause it to be loaded in. */
1530 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1531 }
1532
1533 return res;
1534 }
1535
1536 /* Perform a partial memory transfer.
1537 For docs see target.h, to_xfer_partial. */
1538
1539 static enum target_xfer_status
1540 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1541 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1542 ULONGEST len, ULONGEST *xfered_len)
1543 {
1544 enum target_xfer_status res;
1545 ULONGEST reg_len;
1546 struct mem_region *region;
1547 struct inferior *inf;
1548
1549 /* For accesses to unmapped overlay sections, read directly from
1550 files. Must do this first, as MEMADDR may need adjustment. */
1551 if (readbuf != NULL && overlay_debugging)
1552 {
1553 struct obj_section *section = find_pc_overlay (memaddr);
1554
1555 if (pc_in_unmapped_range (memaddr, section))
1556 {
1557 const target_section_table *table = target_get_section_table (ops);
1558 const char *section_name = section->the_bfd_section->name;
1559
1560 memaddr = overlay_mapped_address (memaddr, section);
1561
1562 auto match_cb = [=] (const struct target_section *s)
1563 {
1564 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1565 };
1566
1567 return section_table_xfer_memory_partial (readbuf, writebuf,
1568 memaddr, len, xfered_len,
1569 *table, match_cb);
1570 }
1571 }
1572
1573 /* Try the executable files, if "trust-readonly-sections" is set. */
1574 if (readbuf != NULL && trust_readonly)
1575 {
1576 const struct target_section *secp
1577 = target_section_by_addr (ops, memaddr);
1578 if (secp != NULL
1579 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1580 {
1581 const target_section_table *table = target_get_section_table (ops);
1582 return section_table_xfer_memory_partial (readbuf, writebuf,
1583 memaddr, len, xfered_len,
1584 *table);
1585 }
1586 }
1587
1588 /* Try GDB's internal data cache. */
1589
1590 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1591 &region))
1592 return TARGET_XFER_E_IO;
1593
1594 if (inferior_ptid != null_ptid)
1595 inf = current_inferior ();
1596 else
1597 inf = NULL;
1598
1599 if (inf != NULL
1600 && readbuf != NULL
1601 /* The dcache reads whole cache lines; that doesn't play well
1602 with reading from a trace buffer, because reading outside of
1603 the collected memory range fails. */
1604 && get_traceframe_number () == -1
1605 && (region->attrib.cache
1606 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1607 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1608 {
1609 DCACHE *dcache = target_dcache_get_or_init ();
1610
1611 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1612 reg_len, xfered_len);
1613 }
1614
1615 /* If none of those methods found the memory we wanted, fall back
1616 to a target partial transfer. Normally a single call to
1617 to_xfer_partial is enough; if it doesn't recognize an object
1618 it will call the to_xfer_partial of the next target down.
1619 But for memory this won't do. Memory is the only target
1620 object which can be read from more than one valid target.
1621 A core file, for instance, could have some of memory but
1622 delegate other bits to the target below it. So, we must
1623 manually try all targets. */
1624
1625 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1626 xfered_len);
1627
1628 /* If we still haven't got anything, return the last error. We
1629 give up. */
1630 return res;
1631 }
1632
1633 /* Perform a partial memory transfer. For docs see target.h,
1634 to_xfer_partial. */
1635
1636 static enum target_xfer_status
1637 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1638 gdb_byte *readbuf, const gdb_byte *writebuf,
1639 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1640 {
1641 enum target_xfer_status res;
1642
1643 /* Zero length requests are ok and require no work. */
1644 if (len == 0)
1645 return TARGET_XFER_EOF;
1646
1647 memaddr = address_significant (target_gdbarch (), memaddr);
1648
1649 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1650 breakpoint insns, thus hiding out from higher layers whether
1651 there are software breakpoints inserted in the code stream. */
1652 if (readbuf != NULL)
1653 {
1654 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1655 xfered_len);
1656
1657 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1658 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1659 }
1660 else
1661 {
1662 /* A large write request is likely to be partially satisfied
1663 by memory_xfer_partial_1. We will continually malloc
1664 and free a copy of the entire write request for breakpoint
1665 shadow handling even though we only end up writing a small
1666 subset of it. Cap writes to a limit specified by the target
1667 to mitigate this. */
1668 len = std::min (ops->get_memory_xfer_limit (), len);
1669
1670 gdb::byte_vector buf (writebuf, writebuf + len);
1671 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1672 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1673 xfered_len);
1674 }
1675
1676 return res;
1677 }
1678
1679 scoped_restore_tmpl<int>
1680 make_scoped_restore_show_memory_breakpoints (int show)
1681 {
1682 return make_scoped_restore (&show_memory_breakpoints, show);
1683 }
1684
1685 /* For docs see target.h, to_xfer_partial. */
1686
1687 enum target_xfer_status
1688 target_xfer_partial (struct target_ops *ops,
1689 enum target_object object, const char *annex,
1690 gdb_byte *readbuf, const gdb_byte *writebuf,
1691 ULONGEST offset, ULONGEST len,
1692 ULONGEST *xfered_len)
1693 {
1694 enum target_xfer_status retval;
1695
1696 /* Transfer is done when LEN is zero. */
1697 if (len == 0)
1698 return TARGET_XFER_EOF;
1699
1700 if (writebuf && !may_write_memory)
1701 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1702 core_addr_to_string_nz (offset), plongest (len));
1703
1704 *xfered_len = 0;
1705
1706 /* If this is a memory transfer, let the memory-specific code
1707 have a look at it instead. Memory transfers are more
1708 complicated. */
1709 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1710 || object == TARGET_OBJECT_CODE_MEMORY)
1711 retval = memory_xfer_partial (ops, object, readbuf,
1712 writebuf, offset, len, xfered_len);
1713 else if (object == TARGET_OBJECT_RAW_MEMORY)
1714 {
1715 /* Skip/avoid accessing the target if the memory region
1716 attributes block the access. Check this here instead of in
1717 raw_memory_xfer_partial as otherwise we'd end up checking
1718 this twice in the case of the memory_xfer_partial path is
1719 taken; once before checking the dcache, and another in the
1720 tail call to raw_memory_xfer_partial. */
1721 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1722 NULL))
1723 return TARGET_XFER_E_IO;
1724
1725 /* Request the normal memory object from other layers. */
1726 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1727 xfered_len);
1728 }
1729 else
1730 retval = ops->xfer_partial (object, annex, readbuf,
1731 writebuf, offset, len, xfered_len);
1732
1733 if (targetdebug)
1734 {
1735 const unsigned char *myaddr = NULL;
1736
1737 gdb_printf (gdb_stdlog,
1738 "%s:target_xfer_partial "
1739 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1740 ops->shortname (),
1741 (int) object,
1742 (annex ? annex : "(null)"),
1743 host_address_to_string (readbuf),
1744 host_address_to_string (writebuf),
1745 core_addr_to_string_nz (offset),
1746 pulongest (len), retval,
1747 pulongest (*xfered_len));
1748
1749 if (readbuf)
1750 myaddr = readbuf;
1751 if (writebuf)
1752 myaddr = writebuf;
1753 if (retval == TARGET_XFER_OK && myaddr != NULL)
1754 {
1755 int i;
1756
1757 gdb_puts (", bytes =", gdb_stdlog);
1758 for (i = 0; i < *xfered_len; i++)
1759 {
1760 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1761 {
1762 if (targetdebug < 2 && i > 0)
1763 {
1764 gdb_printf (gdb_stdlog, " ...");
1765 break;
1766 }
1767 gdb_printf (gdb_stdlog, "\n");
1768 }
1769
1770 gdb_printf (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1771 }
1772 }
1773
1774 gdb_putc ('\n', gdb_stdlog);
1775 }
1776
1777 /* Check implementations of to_xfer_partial update *XFERED_LEN
1778 properly. Do assertion after printing debug messages, so that we
1779 can find more clues on assertion failure from debugging messages. */
1780 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1781 gdb_assert (*xfered_len > 0);
1782
1783 return retval;
1784 }
1785
1786 /* Read LEN bytes of target memory at address MEMADDR, placing the
1787 results in GDB's memory at MYADDR. Returns either 0 for success or
1788 -1 if any error occurs.
1789
1790 If an error occurs, no guarantee is made about the contents of the data at
1791 MYADDR. In particular, the caller should not depend upon partial reads
1792 filling the buffer with good data. There is no way for the caller to know
1793 how much good data might have been transfered anyway. Callers that can
1794 deal with partial reads should call target_read (which will retry until
1795 it makes no progress, and then return how much was transferred). */
1796
1797 int
1798 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1799 {
1800 if (target_read (current_inferior ()->top_target (),
1801 TARGET_OBJECT_MEMORY, NULL,
1802 myaddr, memaddr, len) == len)
1803 return 0;
1804 else
1805 return -1;
1806 }
1807
1808 /* See target/target.h. */
1809
1810 int
1811 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1812 {
1813 gdb_byte buf[4];
1814 int r;
1815
1816 r = target_read_memory (memaddr, buf, sizeof buf);
1817 if (r != 0)
1818 return r;
1819 *result = extract_unsigned_integer (buf, sizeof buf,
1820 gdbarch_byte_order (target_gdbarch ()));
1821 return 0;
1822 }
1823
1824 /* Like target_read_memory, but specify explicitly that this is a read
1825 from the target's raw memory. That is, this read bypasses the
1826 dcache, breakpoint shadowing, etc. */
1827
1828 int
1829 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1830 {
1831 if (target_read (current_inferior ()->top_target (),
1832 TARGET_OBJECT_RAW_MEMORY, NULL,
1833 myaddr, memaddr, len) == len)
1834 return 0;
1835 else
1836 return -1;
1837 }
1838
1839 /* Like target_read_memory, but specify explicitly that this is a read from
1840 the target's stack. This may trigger different cache behavior. */
1841
1842 int
1843 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1844 {
1845 if (target_read (current_inferior ()->top_target (),
1846 TARGET_OBJECT_STACK_MEMORY, NULL,
1847 myaddr, memaddr, len) == len)
1848 return 0;
1849 else
1850 return -1;
1851 }
1852
1853 /* Like target_read_memory, but specify explicitly that this is a read from
1854 the target's code. This may trigger different cache behavior. */
1855
1856 int
1857 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1858 {
1859 if (target_read (current_inferior ()->top_target (),
1860 TARGET_OBJECT_CODE_MEMORY, NULL,
1861 myaddr, memaddr, len) == len)
1862 return 0;
1863 else
1864 return -1;
1865 }
1866
1867 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1868 Returns either 0 for success or -1 if any error occurs. If an
1869 error occurs, no guarantee is made about how much data got written.
1870 Callers that can deal with partial writes should call
1871 target_write. */
1872
1873 int
1874 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1875 {
1876 if (target_write (current_inferior ()->top_target (),
1877 TARGET_OBJECT_MEMORY, NULL,
1878 myaddr, memaddr, len) == len)
1879 return 0;
1880 else
1881 return -1;
1882 }
1883
1884 /* Write LEN bytes from MYADDR to target raw memory at address
1885 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1886 If an error occurs, no guarantee is made about how much data got
1887 written. Callers that can deal with partial writes should call
1888 target_write. */
1889
1890 int
1891 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1892 {
1893 if (target_write (current_inferior ()->top_target (),
1894 TARGET_OBJECT_RAW_MEMORY, NULL,
1895 myaddr, memaddr, len) == len)
1896 return 0;
1897 else
1898 return -1;
1899 }
1900
1901 /* Fetch the target's memory map. */
1902
1903 std::vector<mem_region>
1904 target_memory_map (void)
1905 {
1906 target_ops *target = current_inferior ()->top_target ();
1907 std::vector<mem_region> result = target->memory_map ();
1908 if (result.empty ())
1909 return result;
1910
1911 std::sort (result.begin (), result.end ());
1912
1913 /* Check that regions do not overlap. Simultaneously assign
1914 a numbering for the "mem" commands to use to refer to
1915 each region. */
1916 mem_region *last_one = NULL;
1917 for (size_t ix = 0; ix < result.size (); ix++)
1918 {
1919 mem_region *this_one = &result[ix];
1920 this_one->number = ix;
1921
1922 if (last_one != NULL && last_one->hi > this_one->lo)
1923 {
1924 warning (_("Overlapping regions in memory map: ignoring"));
1925 return std::vector<mem_region> ();
1926 }
1927
1928 last_one = this_one;
1929 }
1930
1931 return result;
1932 }
1933
1934 void
1935 target_flash_erase (ULONGEST address, LONGEST length)
1936 {
1937 current_inferior ()->top_target ()->flash_erase (address, length);
1938 }
1939
1940 void
1941 target_flash_done (void)
1942 {
1943 current_inferior ()->top_target ()->flash_done ();
1944 }
1945
1946 static void
1947 show_trust_readonly (struct ui_file *file, int from_tty,
1948 struct cmd_list_element *c, const char *value)
1949 {
1950 gdb_printf (file,
1951 _("Mode for reading from readonly sections is %s.\n"),
1952 value);
1953 }
1954
1955 /* Target vector read/write partial wrapper functions. */
1956
1957 static enum target_xfer_status
1958 target_read_partial (struct target_ops *ops,
1959 enum target_object object,
1960 const char *annex, gdb_byte *buf,
1961 ULONGEST offset, ULONGEST len,
1962 ULONGEST *xfered_len)
1963 {
1964 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1965 xfered_len);
1966 }
1967
1968 static enum target_xfer_status
1969 target_write_partial (struct target_ops *ops,
1970 enum target_object object,
1971 const char *annex, const gdb_byte *buf,
1972 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1973 {
1974 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1975 xfered_len);
1976 }
1977
1978 /* Wrappers to perform the full transfer. */
1979
1980 /* For docs on target_read see target.h. */
1981
1982 LONGEST
1983 target_read (struct target_ops *ops,
1984 enum target_object object,
1985 const char *annex, gdb_byte *buf,
1986 ULONGEST offset, LONGEST len)
1987 {
1988 LONGEST xfered_total = 0;
1989 int unit_size = 1;
1990
1991 /* If we are reading from a memory object, find the length of an addressable
1992 unit for that architecture. */
1993 if (object == TARGET_OBJECT_MEMORY
1994 || object == TARGET_OBJECT_STACK_MEMORY
1995 || object == TARGET_OBJECT_CODE_MEMORY
1996 || object == TARGET_OBJECT_RAW_MEMORY)
1997 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1998
1999 while (xfered_total < len)
2000 {
2001 ULONGEST xfered_partial;
2002 enum target_xfer_status status;
2003
2004 status = target_read_partial (ops, object, annex,
2005 buf + xfered_total * unit_size,
2006 offset + xfered_total, len - xfered_total,
2007 &xfered_partial);
2008
2009 /* Call an observer, notifying them of the xfer progress? */
2010 if (status == TARGET_XFER_EOF)
2011 return xfered_total;
2012 else if (status == TARGET_XFER_OK)
2013 {
2014 xfered_total += xfered_partial;
2015 QUIT;
2016 }
2017 else
2018 return TARGET_XFER_E_IO;
2019
2020 }
2021 return len;
2022 }
2023
2024 /* Assuming that the entire [begin, end) range of memory cannot be
2025 read, try to read whatever subrange is possible to read.
2026
2027 The function returns, in RESULT, either zero or one memory block.
2028 If there's a readable subrange at the beginning, it is completely
2029 read and returned. Any further readable subrange will not be read.
2030 Otherwise, if there's a readable subrange at the end, it will be
2031 completely read and returned. Any readable subranges before it
2032 (obviously, not starting at the beginning), will be ignored. In
2033 other cases -- either no readable subrange, or readable subrange(s)
2034 that is neither at the beginning, or end, nothing is returned.
2035
2036 The purpose of this function is to handle a read across a boundary
2037 of accessible memory in a case when memory map is not available.
2038 The above restrictions are fine for this case, but will give
2039 incorrect results if the memory is 'patchy'. However, supporting
2040 'patchy' memory would require trying to read every single byte,
2041 and it seems unacceptable solution. Explicit memory map is
2042 recommended for this case -- and target_read_memory_robust will
2043 take care of reading multiple ranges then. */
2044
2045 static void
2046 read_whatever_is_readable (struct target_ops *ops,
2047 const ULONGEST begin, const ULONGEST end,
2048 int unit_size,
2049 std::vector<memory_read_result> *result)
2050 {
2051 ULONGEST current_begin = begin;
2052 ULONGEST current_end = end;
2053 int forward;
2054 ULONGEST xfered_len;
2055
2056 /* If we previously failed to read 1 byte, nothing can be done here. */
2057 if (end - begin <= 1)
2058 return;
2059
2060 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2061
2062 /* Check that either first or the last byte is readable, and give up
2063 if not. This heuristic is meant to permit reading accessible memory
2064 at the boundary of accessible region. */
2065 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2066 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2067 {
2068 forward = 1;
2069 ++current_begin;
2070 }
2071 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2072 buf.get () + (end - begin) - 1, end - 1, 1,
2073 &xfered_len) == TARGET_XFER_OK)
2074 {
2075 forward = 0;
2076 --current_end;
2077 }
2078 else
2079 return;
2080
2081 /* Loop invariant is that the [current_begin, current_end) was previously
2082 found to be not readable as a whole.
2083
2084 Note loop condition -- if the range has 1 byte, we can't divide the range
2085 so there's no point trying further. */
2086 while (current_end - current_begin > 1)
2087 {
2088 ULONGEST first_half_begin, first_half_end;
2089 ULONGEST second_half_begin, second_half_end;
2090 LONGEST xfer;
2091 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2092
2093 if (forward)
2094 {
2095 first_half_begin = current_begin;
2096 first_half_end = middle;
2097 second_half_begin = middle;
2098 second_half_end = current_end;
2099 }
2100 else
2101 {
2102 first_half_begin = middle;
2103 first_half_end = current_end;
2104 second_half_begin = current_begin;
2105 second_half_end = middle;
2106 }
2107
2108 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2109 buf.get () + (first_half_begin - begin) * unit_size,
2110 first_half_begin,
2111 first_half_end - first_half_begin);
2112
2113 if (xfer == first_half_end - first_half_begin)
2114 {
2115 /* This half reads up fine. So, the error must be in the
2116 other half. */
2117 current_begin = second_half_begin;
2118 current_end = second_half_end;
2119 }
2120 else
2121 {
2122 /* This half is not readable. Because we've tried one byte, we
2123 know some part of this half if actually readable. Go to the next
2124 iteration to divide again and try to read.
2125
2126 We don't handle the other half, because this function only tries
2127 to read a single readable subrange. */
2128 current_begin = first_half_begin;
2129 current_end = first_half_end;
2130 }
2131 }
2132
2133 if (forward)
2134 {
2135 /* The [begin, current_begin) range has been read. */
2136 result->emplace_back (begin, current_end, std::move (buf));
2137 }
2138 else
2139 {
2140 /* The [current_end, end) range has been read. */
2141 LONGEST region_len = end - current_end;
2142
2143 gdb::unique_xmalloc_ptr<gdb_byte> data
2144 ((gdb_byte *) xmalloc (region_len * unit_size));
2145 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2146 region_len * unit_size);
2147 result->emplace_back (current_end, end, std::move (data));
2148 }
2149 }
2150
2151 std::vector<memory_read_result>
2152 read_memory_robust (struct target_ops *ops,
2153 const ULONGEST offset, const LONGEST len)
2154 {
2155 std::vector<memory_read_result> result;
2156 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2157
2158 LONGEST xfered_total = 0;
2159 while (xfered_total < len)
2160 {
2161 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2162 LONGEST region_len;
2163
2164 /* If there is no explicit region, a fake one should be created. */
2165 gdb_assert (region);
2166
2167 if (region->hi == 0)
2168 region_len = len - xfered_total;
2169 else
2170 region_len = region->hi - offset;
2171
2172 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2173 {
2174 /* Cannot read this region. Note that we can end up here only
2175 if the region is explicitly marked inaccessible, or
2176 'inaccessible-by-default' is in effect. */
2177 xfered_total += region_len;
2178 }
2179 else
2180 {
2181 LONGEST to_read = std::min (len - xfered_total, region_len);
2182 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2183 ((gdb_byte *) xmalloc (to_read * unit_size));
2184
2185 LONGEST xfered_partial =
2186 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2187 offset + xfered_total, to_read);
2188 /* Call an observer, notifying them of the xfer progress? */
2189 if (xfered_partial <= 0)
2190 {
2191 /* Got an error reading full chunk. See if maybe we can read
2192 some subrange. */
2193 read_whatever_is_readable (ops, offset + xfered_total,
2194 offset + xfered_total + to_read,
2195 unit_size, &result);
2196 xfered_total += to_read;
2197 }
2198 else
2199 {
2200 result.emplace_back (offset + xfered_total,
2201 offset + xfered_total + xfered_partial,
2202 std::move (buffer));
2203 xfered_total += xfered_partial;
2204 }
2205 QUIT;
2206 }
2207 }
2208
2209 return result;
2210 }
2211
2212
2213 /* An alternative to target_write with progress callbacks. */
2214
2215 LONGEST
2216 target_write_with_progress (struct target_ops *ops,
2217 enum target_object object,
2218 const char *annex, const gdb_byte *buf,
2219 ULONGEST offset, LONGEST len,
2220 void (*progress) (ULONGEST, void *), void *baton)
2221 {
2222 LONGEST xfered_total = 0;
2223 int unit_size = 1;
2224
2225 /* If we are writing to a memory object, find the length of an addressable
2226 unit for that architecture. */
2227 if (object == TARGET_OBJECT_MEMORY
2228 || object == TARGET_OBJECT_STACK_MEMORY
2229 || object == TARGET_OBJECT_CODE_MEMORY
2230 || object == TARGET_OBJECT_RAW_MEMORY)
2231 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2232
2233 /* Give the progress callback a chance to set up. */
2234 if (progress)
2235 (*progress) (0, baton);
2236
2237 while (xfered_total < len)
2238 {
2239 ULONGEST xfered_partial;
2240 enum target_xfer_status status;
2241
2242 status = target_write_partial (ops, object, annex,
2243 buf + xfered_total * unit_size,
2244 offset + xfered_total, len - xfered_total,
2245 &xfered_partial);
2246
2247 if (status != TARGET_XFER_OK)
2248 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2249
2250 if (progress)
2251 (*progress) (xfered_partial, baton);
2252
2253 xfered_total += xfered_partial;
2254 QUIT;
2255 }
2256 return len;
2257 }
2258
2259 /* For docs on target_write see target.h. */
2260
2261 LONGEST
2262 target_write (struct target_ops *ops,
2263 enum target_object object,
2264 const char *annex, const gdb_byte *buf,
2265 ULONGEST offset, LONGEST len)
2266 {
2267 return target_write_with_progress (ops, object, annex, buf, offset, len,
2268 NULL, NULL);
2269 }
2270
2271 /* Help for target_read_alloc and target_read_stralloc. See their comments
2272 for details. */
2273
2274 template <typename T>
2275 gdb::optional<gdb::def_vector<T>>
2276 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2277 const char *annex)
2278 {
2279 gdb::def_vector<T> buf;
2280 size_t buf_pos = 0;
2281 const int chunk = 4096;
2282
2283 /* This function does not have a length parameter; it reads the
2284 entire OBJECT). Also, it doesn't support objects fetched partly
2285 from one target and partly from another (in a different stratum,
2286 e.g. a core file and an executable). Both reasons make it
2287 unsuitable for reading memory. */
2288 gdb_assert (object != TARGET_OBJECT_MEMORY);
2289
2290 /* Start by reading up to 4K at a time. The target will throttle
2291 this number down if necessary. */
2292 while (1)
2293 {
2294 ULONGEST xfered_len;
2295 enum target_xfer_status status;
2296
2297 buf.resize (buf_pos + chunk);
2298
2299 status = target_read_partial (ops, object, annex,
2300 (gdb_byte *) &buf[buf_pos],
2301 buf_pos, chunk,
2302 &xfered_len);
2303
2304 if (status == TARGET_XFER_EOF)
2305 {
2306 /* Read all there was. */
2307 buf.resize (buf_pos);
2308 return buf;
2309 }
2310 else if (status != TARGET_XFER_OK)
2311 {
2312 /* An error occurred. */
2313 return {};
2314 }
2315
2316 buf_pos += xfered_len;
2317
2318 QUIT;
2319 }
2320 }
2321
2322 /* See target.h */
2323
2324 gdb::optional<gdb::byte_vector>
2325 target_read_alloc (struct target_ops *ops, enum target_object object,
2326 const char *annex)
2327 {
2328 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2329 }
2330
2331 /* See target.h. */
2332
2333 gdb::optional<gdb::char_vector>
2334 target_read_stralloc (struct target_ops *ops, enum target_object object,
2335 const char *annex)
2336 {
2337 gdb::optional<gdb::char_vector> buf
2338 = target_read_alloc_1<char> (ops, object, annex);
2339
2340 if (!buf)
2341 return {};
2342
2343 if (buf->empty () || buf->back () != '\0')
2344 buf->push_back ('\0');
2345
2346 /* Check for embedded NUL bytes; but allow trailing NULs. */
2347 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2348 it != buf->end (); it++)
2349 if (*it != '\0')
2350 {
2351 warning (_("target object %d, annex %s, "
2352 "contained unexpected null characters"),
2353 (int) object, annex ? annex : "(none)");
2354 break;
2355 }
2356
2357 return buf;
2358 }
2359
2360 /* Memory transfer methods. */
2361
2362 void
2363 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2364 LONGEST len)
2365 {
2366 /* This method is used to read from an alternate, non-current
2367 target. This read must bypass the overlay support (as symbols
2368 don't match this target), and GDB's internal cache (wrong cache
2369 for this target). */
2370 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2371 != len)
2372 memory_error (TARGET_XFER_E_IO, addr);
2373 }
2374
2375 ULONGEST
2376 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2377 int len, enum bfd_endian byte_order)
2378 {
2379 gdb_byte buf[sizeof (ULONGEST)];
2380
2381 gdb_assert (len <= sizeof (buf));
2382 get_target_memory (ops, addr, buf, len);
2383 return extract_unsigned_integer (buf, len, byte_order);
2384 }
2385
2386 /* See target.h. */
2387
2388 int
2389 target_insert_breakpoint (struct gdbarch *gdbarch,
2390 struct bp_target_info *bp_tgt)
2391 {
2392 if (!may_insert_breakpoints)
2393 {
2394 warning (_("May not insert breakpoints"));
2395 return 1;
2396 }
2397
2398 target_ops *target = current_inferior ()->top_target ();
2399
2400 return target->insert_breakpoint (gdbarch, bp_tgt);
2401 }
2402
2403 /* See target.h. */
2404
2405 int
2406 target_remove_breakpoint (struct gdbarch *gdbarch,
2407 struct bp_target_info *bp_tgt,
2408 enum remove_bp_reason reason)
2409 {
2410 /* This is kind of a weird case to handle, but the permission might
2411 have been changed after breakpoints were inserted - in which case
2412 we should just take the user literally and assume that any
2413 breakpoints should be left in place. */
2414 if (!may_insert_breakpoints)
2415 {
2416 warning (_("May not remove breakpoints"));
2417 return 1;
2418 }
2419
2420 target_ops *target = current_inferior ()->top_target ();
2421
2422 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2423 }
2424
2425 static void
2426 info_target_command (const char *args, int from_tty)
2427 {
2428 int has_all_mem = 0;
2429
2430 if (current_program_space->symfile_object_file != NULL)
2431 {
2432 objfile *objf = current_program_space->symfile_object_file;
2433 gdb_printf (_("Symbols from \"%s\".\n"),
2434 objfile_name (objf));
2435 }
2436
2437 for (target_ops *t = current_inferior ()->top_target ();
2438 t != NULL;
2439 t = t->beneath ())
2440 {
2441 if (!t->has_memory ())
2442 continue;
2443
2444 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2445 continue;
2446 if (has_all_mem)
2447 gdb_printf (_("\tWhile running this, "
2448 "GDB does not access memory from...\n"));
2449 gdb_printf ("%s:\n", t->longname ());
2450 t->files_info ();
2451 has_all_mem = t->has_all_memory ();
2452 }
2453 }
2454
2455 /* This function is called before any new inferior is created, e.g.
2456 by running a program, attaching, or connecting to a target.
2457 It cleans up any state from previous invocations which might
2458 change between runs. This is a subset of what target_preopen
2459 resets (things which might change between targets). */
2460
2461 void
2462 target_pre_inferior (int from_tty)
2463 {
2464 /* Clear out solib state. Otherwise the solib state of the previous
2465 inferior might have survived and is entirely wrong for the new
2466 target. This has been observed on GNU/Linux using glibc 2.3. How
2467 to reproduce:
2468
2469 bash$ ./foo&
2470 [1] 4711
2471 bash$ ./foo&
2472 [1] 4712
2473 bash$ gdb ./foo
2474 [...]
2475 (gdb) attach 4711
2476 (gdb) detach
2477 (gdb) attach 4712
2478 Cannot access memory at address 0xdeadbeef
2479 */
2480
2481 /* In some OSs, the shared library list is the same/global/shared
2482 across inferiors. If code is shared between processes, so are
2483 memory regions and features. */
2484 if (!gdbarch_has_global_solist (target_gdbarch ()))
2485 {
2486 no_shared_libraries (NULL, from_tty);
2487
2488 invalidate_target_mem_regions ();
2489
2490 target_clear_description ();
2491 }
2492
2493 /* attach_flag may be set if the previous process associated with
2494 the inferior was attached to. */
2495 current_inferior ()->attach_flag = 0;
2496
2497 current_inferior ()->highest_thread_num = 0;
2498
2499 agent_capability_invalidate ();
2500 }
2501
2502 /* This is to be called by the open routine before it does
2503 anything. */
2504
2505 void
2506 target_preopen (int from_tty)
2507 {
2508 dont_repeat ();
2509
2510 if (current_inferior ()->pid != 0)
2511 {
2512 if (!from_tty
2513 || !target_has_execution ()
2514 || query (_("A program is being debugged already. Kill it? ")))
2515 {
2516 /* Core inferiors actually should be detached, not
2517 killed. */
2518 if (target_has_execution ())
2519 target_kill ();
2520 else
2521 target_detach (current_inferior (), 0);
2522 }
2523 else
2524 error (_("Program not killed."));
2525 }
2526
2527 /* Calling target_kill may remove the target from the stack. But if
2528 it doesn't (which seems like a win for UDI), remove it now. */
2529 /* Leave the exec target, though. The user may be switching from a
2530 live process to a core of the same program. */
2531 pop_all_targets_above (file_stratum);
2532
2533 target_pre_inferior (from_tty);
2534 }
2535
2536 /* See target.h. */
2537
2538 void
2539 target_detach (inferior *inf, int from_tty)
2540 {
2541 /* After we have detached, we will clear the register cache for this inferior
2542 by calling registers_changed_ptid. We must save the pid_ptid before
2543 detaching, as the target detach method will clear inf->pid. */
2544 ptid_t save_pid_ptid = ptid_t (inf->pid);
2545
2546 /* As long as some to_detach implementations rely on the current_inferior
2547 (either directly, or indirectly, like through target_gdbarch or by
2548 reading memory), INF needs to be the current inferior. When that
2549 requirement will become no longer true, then we can remove this
2550 assertion. */
2551 gdb_assert (inf == current_inferior ());
2552
2553 prepare_for_detach ();
2554
2555 /* Hold a strong reference because detaching may unpush the
2556 target. */
2557 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2558
2559 current_inferior ()->top_target ()->detach (inf, from_tty);
2560
2561 process_stratum_target *proc_target
2562 = as_process_stratum_target (proc_target_ref.get ());
2563
2564 registers_changed_ptid (proc_target, save_pid_ptid);
2565
2566 /* We have to ensure we have no frame cache left. Normally,
2567 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2568 inferior_ptid matches save_pid_ptid, but in our case, it does not
2569 call it, as inferior_ptid has been reset. */
2570 reinit_frame_cache ();
2571 }
2572
2573 void
2574 target_disconnect (const char *args, int from_tty)
2575 {
2576 /* If we're in breakpoints-always-inserted mode or if breakpoints
2577 are global across processes, we have to remove them before
2578 disconnecting. */
2579 remove_breakpoints ();
2580
2581 current_inferior ()->top_target ()->disconnect (args, from_tty);
2582 }
2583
2584 /* See target/target.h. */
2585
2586 ptid_t
2587 target_wait (ptid_t ptid, struct target_waitstatus *status,
2588 target_wait_flags options)
2589 {
2590 target_ops *target = current_inferior ()->top_target ();
2591 process_stratum_target *proc_target = current_inferior ()->process_target ();
2592
2593 gdb_assert (!proc_target->commit_resumed_state);
2594
2595 if (!target_can_async_p (target))
2596 gdb_assert ((options & TARGET_WNOHANG) == 0);
2597
2598 try
2599 {
2600 gdb::observers::target_pre_wait.notify (ptid);
2601 ptid_t event_ptid = target->wait (ptid, status, options);
2602 gdb::observers::target_post_wait.notify (event_ptid);
2603 return event_ptid;
2604 }
2605 catch (...)
2606 {
2607 gdb::observers::target_post_wait.notify (null_ptid);
2608 throw;
2609 }
2610 }
2611
2612 /* See target.h. */
2613
2614 ptid_t
2615 default_target_wait (struct target_ops *ops,
2616 ptid_t ptid, struct target_waitstatus *status,
2617 target_wait_flags options)
2618 {
2619 status->set_ignore ();
2620 return minus_one_ptid;
2621 }
2622
2623 std::string
2624 target_pid_to_str (ptid_t ptid)
2625 {
2626 return current_inferior ()->top_target ()->pid_to_str (ptid);
2627 }
2628
2629 const char *
2630 target_thread_name (struct thread_info *info)
2631 {
2632 gdb_assert (info->inf == current_inferior ());
2633
2634 return current_inferior ()->top_target ()->thread_name (info);
2635 }
2636
2637 struct thread_info *
2638 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2639 int handle_len,
2640 struct inferior *inf)
2641 {
2642 target_ops *target = current_inferior ()->top_target ();
2643
2644 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2645 }
2646
2647 /* See target.h. */
2648
2649 gdb::byte_vector
2650 target_thread_info_to_thread_handle (struct thread_info *tip)
2651 {
2652 target_ops *target = current_inferior ()->top_target ();
2653
2654 return target->thread_info_to_thread_handle (tip);
2655 }
2656
2657 void
2658 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2659 {
2660 process_stratum_target *curr_target = current_inferior ()->process_target ();
2661 gdb_assert (!curr_target->commit_resumed_state);
2662
2663 target_dcache_invalidate ();
2664
2665 current_inferior ()->top_target ()->resume (ptid, step, signal);
2666
2667 registers_changed_ptid (curr_target, ptid);
2668 /* We only set the internal executing state here. The user/frontend
2669 running state is set at a higher level. This also clears the
2670 thread's stop_pc as side effect. */
2671 set_executing (curr_target, ptid, true);
2672 clear_inline_frame_state (curr_target, ptid);
2673
2674 if (target_can_async_p ())
2675 target_async (1);
2676 }
2677
2678 /* See target.h. */
2679
2680 void
2681 target_commit_resumed ()
2682 {
2683 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2684 current_inferior ()->top_target ()->commit_resumed ();
2685 }
2686
2687 /* See target.h. */
2688
2689 bool
2690 target_has_pending_events ()
2691 {
2692 return current_inferior ()->top_target ()->has_pending_events ();
2693 }
2694
2695 void
2696 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2697 {
2698 current_inferior ()->top_target ()->pass_signals (pass_signals);
2699 }
2700
2701 void
2702 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2703 {
2704 current_inferior ()->top_target ()->program_signals (program_signals);
2705 }
2706
2707 static void
2708 default_follow_fork (struct target_ops *self, inferior *child_inf,
2709 ptid_t child_ptid, target_waitkind fork_kind,
2710 bool follow_child, bool detach_fork)
2711 {
2712 /* Some target returned a fork event, but did not know how to follow it. */
2713 internal_error (__FILE__, __LINE__,
2714 _("could not find a target to follow fork"));
2715 }
2716
2717 /* See target.h. */
2718
2719 void
2720 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2721 target_waitkind fork_kind, bool follow_child,
2722 bool detach_fork)
2723 {
2724 target_ops *target = current_inferior ()->top_target ();
2725
2726 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2727 DETACH_FORK. */
2728 if (child_inf != nullptr)
2729 {
2730 gdb_assert (follow_child || !detach_fork);
2731 gdb_assert (child_inf->pid == child_ptid.pid ());
2732 }
2733 else
2734 gdb_assert (!follow_child && detach_fork);
2735
2736 return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2737 detach_fork);
2738 }
2739
2740 /* See target.h. */
2741
2742 void
2743 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2744 const char *execd_pathname)
2745 {
2746 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2747 execd_pathname);
2748 }
2749
2750 static void
2751 default_mourn_inferior (struct target_ops *self)
2752 {
2753 internal_error (__FILE__, __LINE__,
2754 _("could not find a target to follow mourn inferior"));
2755 }
2756
2757 void
2758 target_mourn_inferior (ptid_t ptid)
2759 {
2760 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2761 current_inferior ()->top_target ()->mourn_inferior ();
2762
2763 /* We no longer need to keep handles on any of the object files.
2764 Make sure to release them to avoid unnecessarily locking any
2765 of them while we're not actually debugging. */
2766 bfd_cache_close_all ();
2767 }
2768
2769 /* Look for a target which can describe architectural features, starting
2770 from TARGET. If we find one, return its description. */
2771
2772 const struct target_desc *
2773 target_read_description (struct target_ops *target)
2774 {
2775 return target->read_description ();
2776 }
2777
2778
2779 /* Default implementation of memory-searching. */
2780
2781 static int
2782 default_search_memory (struct target_ops *self,
2783 CORE_ADDR start_addr, ULONGEST search_space_len,
2784 const gdb_byte *pattern, ULONGEST pattern_len,
2785 CORE_ADDR *found_addrp)
2786 {
2787 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2788 {
2789 return target_read (current_inferior ()->top_target (),
2790 TARGET_OBJECT_MEMORY, NULL,
2791 result, addr, len) == len;
2792 };
2793
2794 /* Start over from the top of the target stack. */
2795 return simple_search_memory (read_memory, start_addr, search_space_len,
2796 pattern, pattern_len, found_addrp);
2797 }
2798
2799 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2800 sequence of bytes in PATTERN with length PATTERN_LEN.
2801
2802 The result is 1 if found, 0 if not found, and -1 if there was an error
2803 requiring halting of the search (e.g. memory read error).
2804 If the pattern is found the address is recorded in FOUND_ADDRP. */
2805
2806 int
2807 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2808 const gdb_byte *pattern, ULONGEST pattern_len,
2809 CORE_ADDR *found_addrp)
2810 {
2811 target_ops *target = current_inferior ()->top_target ();
2812
2813 return target->search_memory (start_addr, search_space_len, pattern,
2814 pattern_len, found_addrp);
2815 }
2816
2817 /* Look through the currently pushed targets. If none of them will
2818 be able to restart the currently running process, issue an error
2819 message. */
2820
2821 void
2822 target_require_runnable (void)
2823 {
2824 for (target_ops *t = current_inferior ()->top_target ();
2825 t != NULL;
2826 t = t->beneath ())
2827 {
2828 /* If this target knows how to create a new program, then
2829 assume we will still be able to after killing the current
2830 one. Either killing and mourning will not pop T, or else
2831 find_default_run_target will find it again. */
2832 if (t->can_create_inferior ())
2833 return;
2834
2835 /* Do not worry about targets at certain strata that can not
2836 create inferiors. Assume they will be pushed again if
2837 necessary, and continue to the process_stratum. */
2838 if (t->stratum () > process_stratum)
2839 continue;
2840
2841 error (_("The \"%s\" target does not support \"run\". "
2842 "Try \"help target\" or \"continue\"."),
2843 t->shortname ());
2844 }
2845
2846 /* This function is only called if the target is running. In that
2847 case there should have been a process_stratum target and it
2848 should either know how to create inferiors, or not... */
2849 internal_error (__FILE__, __LINE__, _("No targets found"));
2850 }
2851
2852 /* Whether GDB is allowed to fall back to the default run target for
2853 "run", "attach", etc. when no target is connected yet. */
2854 static bool auto_connect_native_target = true;
2855
2856 static void
2857 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2858 struct cmd_list_element *c, const char *value)
2859 {
2860 gdb_printf (file,
2861 _("Whether GDB may automatically connect to the "
2862 "native target is %s.\n"),
2863 value);
2864 }
2865
2866 /* A pointer to the target that can respond to "run" or "attach".
2867 Native targets are always singletons and instantiated early at GDB
2868 startup. */
2869 static target_ops *the_native_target;
2870
2871 /* See target.h. */
2872
2873 void
2874 set_native_target (target_ops *target)
2875 {
2876 if (the_native_target != NULL)
2877 internal_error (__FILE__, __LINE__,
2878 _("native target already set (\"%s\")."),
2879 the_native_target->longname ());
2880
2881 the_native_target = target;
2882 }
2883
2884 /* See target.h. */
2885
2886 target_ops *
2887 get_native_target ()
2888 {
2889 return the_native_target;
2890 }
2891
2892 /* Look through the list of possible targets for a target that can
2893 execute a run or attach command without any other data. This is
2894 used to locate the default process stratum.
2895
2896 If DO_MESG is not NULL, the result is always valid (error() is
2897 called for errors); else, return NULL on error. */
2898
2899 static struct target_ops *
2900 find_default_run_target (const char *do_mesg)
2901 {
2902 if (auto_connect_native_target && the_native_target != NULL)
2903 return the_native_target;
2904
2905 if (do_mesg != NULL)
2906 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2907 return NULL;
2908 }
2909
2910 /* See target.h. */
2911
2912 struct target_ops *
2913 find_attach_target (void)
2914 {
2915 /* If a target on the current stack can attach, use it. */
2916 for (target_ops *t = current_inferior ()->top_target ();
2917 t != NULL;
2918 t = t->beneath ())
2919 {
2920 if (t->can_attach ())
2921 return t;
2922 }
2923
2924 /* Otherwise, use the default run target for attaching. */
2925 return find_default_run_target ("attach");
2926 }
2927
2928 /* See target.h. */
2929
2930 struct target_ops *
2931 find_run_target (void)
2932 {
2933 /* If a target on the current stack can run, use it. */
2934 for (target_ops *t = current_inferior ()->top_target ();
2935 t != NULL;
2936 t = t->beneath ())
2937 {
2938 if (t->can_create_inferior ())
2939 return t;
2940 }
2941
2942 /* Otherwise, use the default run target. */
2943 return find_default_run_target ("run");
2944 }
2945
2946 bool
2947 target_ops::info_proc (const char *args, enum info_proc_what what)
2948 {
2949 return false;
2950 }
2951
2952 /* Implement the "info proc" command. */
2953
2954 int
2955 target_info_proc (const char *args, enum info_proc_what what)
2956 {
2957 struct target_ops *t;
2958
2959 /* If we're already connected to something that can get us OS
2960 related data, use it. Otherwise, try using the native
2961 target. */
2962 t = find_target_at (process_stratum);
2963 if (t == NULL)
2964 t = find_default_run_target (NULL);
2965
2966 for (; t != NULL; t = t->beneath ())
2967 {
2968 if (t->info_proc (args, what))
2969 {
2970 if (targetdebug)
2971 gdb_printf (gdb_stdlog,
2972 "target_info_proc (\"%s\", %d)\n", args, what);
2973
2974 return 1;
2975 }
2976 }
2977
2978 return 0;
2979 }
2980
2981 static int
2982 find_default_supports_disable_randomization (struct target_ops *self)
2983 {
2984 struct target_ops *t;
2985
2986 t = find_default_run_target (NULL);
2987 if (t != NULL)
2988 return t->supports_disable_randomization ();
2989 return 0;
2990 }
2991
2992 int
2993 target_supports_disable_randomization (void)
2994 {
2995 return current_inferior ()->top_target ()->supports_disable_randomization ();
2996 }
2997
2998 /* See target/target.h. */
2999
3000 int
3001 target_supports_multi_process (void)
3002 {
3003 return current_inferior ()->top_target ()->supports_multi_process ();
3004 }
3005
3006 /* See target.h. */
3007
3008 gdb::optional<gdb::char_vector>
3009 target_get_osdata (const char *type)
3010 {
3011 struct target_ops *t;
3012
3013 /* If we're already connected to something that can get us OS
3014 related data, use it. Otherwise, try using the native
3015 target. */
3016 t = find_target_at (process_stratum);
3017 if (t == NULL)
3018 t = find_default_run_target ("get OS data");
3019
3020 if (!t)
3021 return {};
3022
3023 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3024 }
3025
3026 /* Determine the current address space of thread PTID. */
3027
3028 struct address_space *
3029 target_thread_address_space (ptid_t ptid)
3030 {
3031 struct address_space *aspace;
3032
3033 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3034 gdb_assert (aspace != NULL);
3035
3036 return aspace;
3037 }
3038
3039 /* See target.h. */
3040
3041 target_ops *
3042 target_ops::beneath () const
3043 {
3044 return current_inferior ()->find_target_beneath (this);
3045 }
3046
3047 void
3048 target_ops::close ()
3049 {
3050 }
3051
3052 bool
3053 target_ops::can_attach ()
3054 {
3055 return 0;
3056 }
3057
3058 void
3059 target_ops::attach (const char *, int)
3060 {
3061 gdb_assert_not_reached ("target_ops::attach called");
3062 }
3063
3064 bool
3065 target_ops::can_create_inferior ()
3066 {
3067 return 0;
3068 }
3069
3070 void
3071 target_ops::create_inferior (const char *, const std::string &,
3072 char **, int)
3073 {
3074 gdb_assert_not_reached ("target_ops::create_inferior called");
3075 }
3076
3077 bool
3078 target_ops::can_run ()
3079 {
3080 return false;
3081 }
3082
3083 int
3084 target_can_run ()
3085 {
3086 for (target_ops *t = current_inferior ()->top_target ();
3087 t != NULL;
3088 t = t->beneath ())
3089 {
3090 if (t->can_run ())
3091 return 1;
3092 }
3093
3094 return 0;
3095 }
3096
3097 /* Target file operations. */
3098
3099 static struct target_ops *
3100 default_fileio_target (void)
3101 {
3102 struct target_ops *t;
3103
3104 /* If we're already connected to something that can perform
3105 file I/O, use it. Otherwise, try using the native target. */
3106 t = find_target_at (process_stratum);
3107 if (t != NULL)
3108 return t;
3109 return find_default_run_target ("file I/O");
3110 }
3111
3112 /* File handle for target file operations. */
3113
3114 struct fileio_fh_t
3115 {
3116 /* The target on which this file is open. NULL if the target is
3117 meanwhile closed while the handle is open. */
3118 target_ops *target;
3119
3120 /* The file descriptor on the target. */
3121 int target_fd;
3122
3123 /* Check whether this fileio_fh_t represents a closed file. */
3124 bool is_closed ()
3125 {
3126 return target_fd < 0;
3127 }
3128 };
3129
3130 /* Vector of currently open file handles. The value returned by
3131 target_fileio_open and passed as the FD argument to other
3132 target_fileio_* functions is an index into this vector. This
3133 vector's entries are never freed; instead, files are marked as
3134 closed, and the handle becomes available for reuse. */
3135 static std::vector<fileio_fh_t> fileio_fhandles;
3136
3137 /* Index into fileio_fhandles of the lowest handle that might be
3138 closed. This permits handle reuse without searching the whole
3139 list each time a new file is opened. */
3140 static int lowest_closed_fd;
3141
3142 /* See target.h. */
3143
3144 void
3145 fileio_handles_invalidate_target (target_ops *targ)
3146 {
3147 for (fileio_fh_t &fh : fileio_fhandles)
3148 if (fh.target == targ)
3149 fh.target = NULL;
3150 }
3151
3152 /* Acquire a target fileio file descriptor. */
3153
3154 static int
3155 acquire_fileio_fd (target_ops *target, int target_fd)
3156 {
3157 /* Search for closed handles to reuse. */
3158 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3159 {
3160 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3161
3162 if (fh.is_closed ())
3163 break;
3164 }
3165
3166 /* Push a new handle if no closed handles were found. */
3167 if (lowest_closed_fd == fileio_fhandles.size ())
3168 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3169 else
3170 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3171
3172 /* Should no longer be marked closed. */
3173 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3174
3175 /* Return its index, and start the next lookup at
3176 the next index. */
3177 return lowest_closed_fd++;
3178 }
3179
3180 /* Release a target fileio file descriptor. */
3181
3182 static void
3183 release_fileio_fd (int fd, fileio_fh_t *fh)
3184 {
3185 fh->target_fd = -1;
3186 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3187 }
3188
3189 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3190
3191 static fileio_fh_t *
3192 fileio_fd_to_fh (int fd)
3193 {
3194 return &fileio_fhandles[fd];
3195 }
3196
3197
3198 /* Default implementations of file i/o methods. We don't want these
3199 to delegate automatically, because we need to know which target
3200 supported the method, in order to call it directly from within
3201 pread/pwrite, etc. */
3202
3203 int
3204 target_ops::fileio_open (struct inferior *inf, const char *filename,
3205 int flags, int mode, int warn_if_slow,
3206 int *target_errno)
3207 {
3208 *target_errno = FILEIO_ENOSYS;
3209 return -1;
3210 }
3211
3212 int
3213 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3214 ULONGEST offset, int *target_errno)
3215 {
3216 *target_errno = FILEIO_ENOSYS;
3217 return -1;
3218 }
3219
3220 int
3221 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3222 ULONGEST offset, int *target_errno)
3223 {
3224 *target_errno = FILEIO_ENOSYS;
3225 return -1;
3226 }
3227
3228 int
3229 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3230 {
3231 *target_errno = FILEIO_ENOSYS;
3232 return -1;
3233 }
3234
3235 int
3236 target_ops::fileio_close (int fd, int *target_errno)
3237 {
3238 *target_errno = FILEIO_ENOSYS;
3239 return -1;
3240 }
3241
3242 int
3243 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3244 int *target_errno)
3245 {
3246 *target_errno = FILEIO_ENOSYS;
3247 return -1;
3248 }
3249
3250 gdb::optional<std::string>
3251 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3252 int *target_errno)
3253 {
3254 *target_errno = FILEIO_ENOSYS;
3255 return {};
3256 }
3257
3258 /* See target.h. */
3259
3260 int
3261 target_fileio_open (struct inferior *inf, const char *filename,
3262 int flags, int mode, bool warn_if_slow, int *target_errno)
3263 {
3264 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3265 {
3266 int fd = t->fileio_open (inf, filename, flags, mode,
3267 warn_if_slow, target_errno);
3268
3269 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3270 continue;
3271
3272 if (fd < 0)
3273 fd = -1;
3274 else
3275 fd = acquire_fileio_fd (t, fd);
3276
3277 if (targetdebug)
3278 gdb_printf (gdb_stdlog,
3279 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3280 " = %d (%d)\n",
3281 inf == NULL ? 0 : inf->num,
3282 filename, flags, mode,
3283 warn_if_slow, fd,
3284 fd != -1 ? 0 : *target_errno);
3285 return fd;
3286 }
3287
3288 *target_errno = FILEIO_ENOSYS;
3289 return -1;
3290 }
3291
3292 /* See target.h. */
3293
3294 int
3295 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3296 ULONGEST offset, int *target_errno)
3297 {
3298 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3299 int ret = -1;
3300
3301 if (fh->is_closed ())
3302 *target_errno = EBADF;
3303 else if (fh->target == NULL)
3304 *target_errno = EIO;
3305 else
3306 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3307 len, offset, target_errno);
3308
3309 if (targetdebug)
3310 gdb_printf (gdb_stdlog,
3311 "target_fileio_pwrite (%d,...,%d,%s) "
3312 "= %d (%d)\n",
3313 fd, len, pulongest (offset),
3314 ret, ret != -1 ? 0 : *target_errno);
3315 return ret;
3316 }
3317
3318 /* See target.h. */
3319
3320 int
3321 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3322 ULONGEST offset, int *target_errno)
3323 {
3324 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3325 int ret = -1;
3326
3327 if (fh->is_closed ())
3328 *target_errno = EBADF;
3329 else if (fh->target == NULL)
3330 *target_errno = EIO;
3331 else
3332 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3333 len, offset, target_errno);
3334
3335 if (targetdebug)
3336 gdb_printf (gdb_stdlog,
3337 "target_fileio_pread (%d,...,%d,%s) "
3338 "= %d (%d)\n",
3339 fd, len, pulongest (offset),
3340 ret, ret != -1 ? 0 : *target_errno);
3341 return ret;
3342 }
3343
3344 /* See target.h. */
3345
3346 int
3347 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3348 {
3349 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3350 int ret = -1;
3351
3352 if (fh->is_closed ())
3353 *target_errno = EBADF;
3354 else if (fh->target == NULL)
3355 *target_errno = EIO;
3356 else
3357 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3358
3359 if (targetdebug)
3360 gdb_printf (gdb_stdlog,
3361 "target_fileio_fstat (%d) = %d (%d)\n",
3362 fd, ret, ret != -1 ? 0 : *target_errno);
3363 return ret;
3364 }
3365
3366 /* See target.h. */
3367
3368 int
3369 target_fileio_close (int fd, int *target_errno)
3370 {
3371 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3372 int ret = -1;
3373
3374 if (fh->is_closed ())
3375 *target_errno = EBADF;
3376 else
3377 {
3378 if (fh->target != NULL)
3379 ret = fh->target->fileio_close (fh->target_fd,
3380 target_errno);
3381 else
3382 ret = 0;
3383 release_fileio_fd (fd, fh);
3384 }
3385
3386 if (targetdebug)
3387 gdb_printf (gdb_stdlog,
3388 "target_fileio_close (%d) = %d (%d)\n",
3389 fd, ret, ret != -1 ? 0 : *target_errno);
3390 return ret;
3391 }
3392
3393 /* See target.h. */
3394
3395 int
3396 target_fileio_unlink (struct inferior *inf, const char *filename,
3397 int *target_errno)
3398 {
3399 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3400 {
3401 int ret = t->fileio_unlink (inf, filename, target_errno);
3402
3403 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3404 continue;
3405
3406 if (targetdebug)
3407 gdb_printf (gdb_stdlog,
3408 "target_fileio_unlink (%d,%s)"
3409 " = %d (%d)\n",
3410 inf == NULL ? 0 : inf->num, filename,
3411 ret, ret != -1 ? 0 : *target_errno);
3412 return ret;
3413 }
3414
3415 *target_errno = FILEIO_ENOSYS;
3416 return -1;
3417 }
3418
3419 /* See target.h. */
3420
3421 gdb::optional<std::string>
3422 target_fileio_readlink (struct inferior *inf, const char *filename,
3423 int *target_errno)
3424 {
3425 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3426 {
3427 gdb::optional<std::string> ret
3428 = t->fileio_readlink (inf, filename, target_errno);
3429
3430 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3431 continue;
3432
3433 if (targetdebug)
3434 gdb_printf (gdb_stdlog,
3435 "target_fileio_readlink (%d,%s)"
3436 " = %s (%d)\n",
3437 inf == NULL ? 0 : inf->num,
3438 filename, ret ? ret->c_str () : "(nil)",
3439 ret ? 0 : *target_errno);
3440 return ret;
3441 }
3442
3443 *target_errno = FILEIO_ENOSYS;
3444 return {};
3445 }
3446
3447 /* Like scoped_fd, but specific to target fileio. */
3448
3449 class scoped_target_fd
3450 {
3451 public:
3452 explicit scoped_target_fd (int fd) noexcept
3453 : m_fd (fd)
3454 {
3455 }
3456
3457 ~scoped_target_fd ()
3458 {
3459 if (m_fd >= 0)
3460 {
3461 int target_errno;
3462
3463 target_fileio_close (m_fd, &target_errno);
3464 }
3465 }
3466
3467 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3468
3469 int get () const noexcept
3470 {
3471 return m_fd;
3472 }
3473
3474 private:
3475 int m_fd;
3476 };
3477
3478 /* Read target file FILENAME, in the filesystem as seen by INF. If
3479 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3480 remote targets, the remote stub). Store the result in *BUF_P and
3481 return the size of the transferred data. PADDING additional bytes
3482 are available in *BUF_P. This is a helper function for
3483 target_fileio_read_alloc; see the declaration of that function for
3484 more information. */
3485
3486 static LONGEST
3487 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3488 gdb_byte **buf_p, int padding)
3489 {
3490 size_t buf_alloc, buf_pos;
3491 gdb_byte *buf;
3492 LONGEST n;
3493 int target_errno;
3494
3495 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3496 0700, false, &target_errno));
3497 if (fd.get () == -1)
3498 return -1;
3499
3500 /* Start by reading up to 4K at a time. The target will throttle
3501 this number down if necessary. */
3502 buf_alloc = 4096;
3503 buf = (gdb_byte *) xmalloc (buf_alloc);
3504 buf_pos = 0;
3505 while (1)
3506 {
3507 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3508 buf_alloc - buf_pos - padding, buf_pos,
3509 &target_errno);
3510 if (n < 0)
3511 {
3512 /* An error occurred. */
3513 xfree (buf);
3514 return -1;
3515 }
3516 else if (n == 0)
3517 {
3518 /* Read all there was. */
3519 if (buf_pos == 0)
3520 xfree (buf);
3521 else
3522 *buf_p = buf;
3523 return buf_pos;
3524 }
3525
3526 buf_pos += n;
3527
3528 /* If the buffer is filling up, expand it. */
3529 if (buf_alloc < buf_pos * 2)
3530 {
3531 buf_alloc *= 2;
3532 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3533 }
3534
3535 QUIT;
3536 }
3537 }
3538
3539 /* See target.h. */
3540
3541 LONGEST
3542 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3543 gdb_byte **buf_p)
3544 {
3545 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3546 }
3547
3548 /* See target.h. */
3549
3550 gdb::unique_xmalloc_ptr<char>
3551 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3552 {
3553 gdb_byte *buffer;
3554 char *bufstr;
3555 LONGEST i, transferred;
3556
3557 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3558 bufstr = (char *) buffer;
3559
3560 if (transferred < 0)
3561 return gdb::unique_xmalloc_ptr<char> (nullptr);
3562
3563 if (transferred == 0)
3564 return make_unique_xstrdup ("");
3565
3566 bufstr[transferred] = 0;
3567
3568 /* Check for embedded NUL bytes; but allow trailing NULs. */
3569 for (i = strlen (bufstr); i < transferred; i++)
3570 if (bufstr[i] != 0)
3571 {
3572 warning (_("target file %s "
3573 "contained unexpected null characters"),
3574 filename);
3575 break;
3576 }
3577
3578 return gdb::unique_xmalloc_ptr<char> (bufstr);
3579 }
3580
3581
3582 static int
3583 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3584 CORE_ADDR addr, int len)
3585 {
3586 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3587 }
3588
3589 static int
3590 default_watchpoint_addr_within_range (struct target_ops *target,
3591 CORE_ADDR addr,
3592 CORE_ADDR start, int length)
3593 {
3594 return addr >= start && addr < start + length;
3595 }
3596
3597 /* See target.h. */
3598
3599 target_ops *
3600 target_stack::find_beneath (const target_ops *t) const
3601 {
3602 /* Look for a non-empty slot at stratum levels beneath T's. */
3603 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3604 if (m_stack[stratum] != NULL)
3605 return m_stack[stratum];
3606
3607 return NULL;
3608 }
3609
3610 /* See target.h. */
3611
3612 struct target_ops *
3613 find_target_at (enum strata stratum)
3614 {
3615 return current_inferior ()->target_at (stratum);
3616 }
3617
3618 \f
3619
3620 /* See target.h */
3621
3622 void
3623 target_announce_detach (int from_tty)
3624 {
3625 pid_t pid;
3626 const char *exec_file;
3627
3628 if (!from_tty)
3629 return;
3630
3631 pid = inferior_ptid.pid ();
3632 exec_file = get_exec_file (0);
3633 if (exec_file == nullptr)
3634 gdb_printf ("Detaching from pid %s\n",
3635 target_pid_to_str (ptid_t (pid)).c_str ());
3636 else
3637 gdb_printf (_("Detaching from program: %s, %s\n"), exec_file,
3638 target_pid_to_str (ptid_t (pid)).c_str ());
3639 }
3640
3641 /* See target.h */
3642
3643 void
3644 target_announce_attach (int from_tty, int pid)
3645 {
3646 if (!from_tty)
3647 return;
3648
3649 const char *exec_file = get_exec_file (0);
3650
3651 if (exec_file != nullptr)
3652 gdb_printf ("Attaching to program: %s, %s\n", exec_file,
3653 target_pid_to_str (ptid_t (pid)).c_str ());
3654 else
3655 gdb_printf ("Attaching to %s\n",
3656 target_pid_to_str (ptid_t (pid)).c_str ());
3657 }
3658
3659 /* The inferior process has died. Long live the inferior! */
3660
3661 void
3662 generic_mourn_inferior (void)
3663 {
3664 inferior *inf = current_inferior ();
3665
3666 switch_to_no_thread ();
3667
3668 /* Mark breakpoints uninserted in case something tries to delete a
3669 breakpoint while we delete the inferior's threads (which would
3670 fail, since the inferior is long gone). */
3671 mark_breakpoints_out ();
3672
3673 if (inf->pid != 0)
3674 exit_inferior (inf);
3675
3676 /* Note this wipes step-resume breakpoints, so needs to be done
3677 after exit_inferior, which ends up referencing the step-resume
3678 breakpoints through clear_thread_inferior_resources. */
3679 breakpoint_init_inferior (inf_exited);
3680
3681 registers_changed ();
3682
3683 reopen_exec_file ();
3684 reinit_frame_cache ();
3685
3686 if (deprecated_detach_hook)
3687 deprecated_detach_hook ();
3688 }
3689 \f
3690 /* Convert a normal process ID to a string. Returns the string in a
3691 static buffer. */
3692
3693 std::string
3694 normal_pid_to_str (ptid_t ptid)
3695 {
3696 return string_printf ("process %d", ptid.pid ());
3697 }
3698
3699 static std::string
3700 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3701 {
3702 return normal_pid_to_str (ptid);
3703 }
3704
3705 /* Error-catcher for target_find_memory_regions. */
3706 static int
3707 dummy_find_memory_regions (struct target_ops *self,
3708 find_memory_region_ftype ignore1, void *ignore2)
3709 {
3710 error (_("Command not implemented for this target."));
3711 return 0;
3712 }
3713
3714 /* Error-catcher for target_make_corefile_notes. */
3715 static gdb::unique_xmalloc_ptr<char>
3716 dummy_make_corefile_notes (struct target_ops *self,
3717 bfd *ignore1, int *ignore2)
3718 {
3719 error (_("Command not implemented for this target."));
3720 return NULL;
3721 }
3722
3723 #include "target-delegates.c"
3724
3725 /* The initial current target, so that there is always a semi-valid
3726 current target. */
3727
3728 static dummy_target the_dummy_target;
3729
3730 /* See target.h. */
3731
3732 target_ops *
3733 get_dummy_target ()
3734 {
3735 return &the_dummy_target;
3736 }
3737
3738 static const target_info dummy_target_info = {
3739 "None",
3740 N_("None"),
3741 ""
3742 };
3743
3744 strata
3745 dummy_target::stratum () const
3746 {
3747 return dummy_stratum;
3748 }
3749
3750 strata
3751 debug_target::stratum () const
3752 {
3753 return debug_stratum;
3754 }
3755
3756 const target_info &
3757 dummy_target::info () const
3758 {
3759 return dummy_target_info;
3760 }
3761
3762 const target_info &
3763 debug_target::info () const
3764 {
3765 return beneath ()->info ();
3766 }
3767
3768 \f
3769
3770 void
3771 target_close (struct target_ops *targ)
3772 {
3773 for (inferior *inf : all_inferiors ())
3774 gdb_assert (!inf->target_is_pushed (targ));
3775
3776 fileio_handles_invalidate_target (targ);
3777
3778 targ->close ();
3779
3780 if (targetdebug)
3781 gdb_printf (gdb_stdlog, "target_close ()\n");
3782 }
3783
3784 int
3785 target_thread_alive (ptid_t ptid)
3786 {
3787 return current_inferior ()->top_target ()->thread_alive (ptid);
3788 }
3789
3790 void
3791 target_update_thread_list (void)
3792 {
3793 current_inferior ()->top_target ()->update_thread_list ();
3794 }
3795
3796 void
3797 target_stop (ptid_t ptid)
3798 {
3799 process_stratum_target *proc_target = current_inferior ()->process_target ();
3800
3801 gdb_assert (!proc_target->commit_resumed_state);
3802
3803 if (!may_stop)
3804 {
3805 warning (_("May not interrupt or stop the target, ignoring attempt"));
3806 return;
3807 }
3808
3809 current_inferior ()->top_target ()->stop (ptid);
3810 }
3811
3812 void
3813 target_interrupt ()
3814 {
3815 if (!may_stop)
3816 {
3817 warning (_("May not interrupt or stop the target, ignoring attempt"));
3818 return;
3819 }
3820
3821 current_inferior ()->top_target ()->interrupt ();
3822 }
3823
3824 /* See target.h. */
3825
3826 void
3827 target_pass_ctrlc (void)
3828 {
3829 /* Pass the Ctrl-C to the first target that has a thread
3830 running. */
3831 for (inferior *inf : all_inferiors ())
3832 {
3833 target_ops *proc_target = inf->process_target ();
3834 if (proc_target == NULL)
3835 continue;
3836
3837 for (thread_info *thr : inf->non_exited_threads ())
3838 {
3839 /* A thread can be THREAD_STOPPED and executing, while
3840 running an infcall. */
3841 if (thr->state == THREAD_RUNNING || thr->executing ())
3842 {
3843 /* We can get here quite deep in target layers. Avoid
3844 switching thread context or anything that would
3845 communicate with the target (e.g., to fetch
3846 registers), or flushing e.g., the frame cache. We
3847 just switch inferior in order to be able to call
3848 through the target_stack. */
3849 scoped_restore_current_inferior restore_inferior;
3850 set_current_inferior (inf);
3851 current_inferior ()->top_target ()->pass_ctrlc ();
3852 return;
3853 }
3854 }
3855 }
3856 }
3857
3858 /* See target.h. */
3859
3860 void
3861 default_target_pass_ctrlc (struct target_ops *ops)
3862 {
3863 target_interrupt ();
3864 }
3865
3866 /* See target/target.h. */
3867
3868 void
3869 target_stop_and_wait (ptid_t ptid)
3870 {
3871 struct target_waitstatus status;
3872 bool was_non_stop = non_stop;
3873
3874 non_stop = true;
3875 target_stop (ptid);
3876
3877 target_wait (ptid, &status, 0);
3878
3879 non_stop = was_non_stop;
3880 }
3881
3882 /* See target/target.h. */
3883
3884 void
3885 target_continue_no_signal (ptid_t ptid)
3886 {
3887 target_resume (ptid, 0, GDB_SIGNAL_0);
3888 }
3889
3890 /* See target/target.h. */
3891
3892 void
3893 target_continue (ptid_t ptid, enum gdb_signal signal)
3894 {
3895 target_resume (ptid, 0, signal);
3896 }
3897
3898 /* Concatenate ELEM to LIST, a comma-separated list. */
3899
3900 static void
3901 str_comma_list_concat_elem (std::string *list, const char *elem)
3902 {
3903 if (!list->empty ())
3904 list->append (", ");
3905
3906 list->append (elem);
3907 }
3908
3909 /* Helper for target_options_to_string. If OPT is present in
3910 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3911 OPT is removed from TARGET_OPTIONS. */
3912
3913 static void
3914 do_option (target_wait_flags *target_options, std::string *ret,
3915 target_wait_flag opt, const char *opt_str)
3916 {
3917 if ((*target_options & opt) != 0)
3918 {
3919 str_comma_list_concat_elem (ret, opt_str);
3920 *target_options &= ~opt;
3921 }
3922 }
3923
3924 /* See target.h. */
3925
3926 std::string
3927 target_options_to_string (target_wait_flags target_options)
3928 {
3929 std::string ret;
3930
3931 #define DO_TARG_OPTION(OPT) \
3932 do_option (&target_options, &ret, OPT, #OPT)
3933
3934 DO_TARG_OPTION (TARGET_WNOHANG);
3935
3936 if (target_options != 0)
3937 str_comma_list_concat_elem (&ret, "unknown???");
3938
3939 return ret;
3940 }
3941
3942 void
3943 target_fetch_registers (struct regcache *regcache, int regno)
3944 {
3945 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3946 if (targetdebug)
3947 regcache->debug_print_register ("target_fetch_registers", regno);
3948 }
3949
3950 void
3951 target_store_registers (struct regcache *regcache, int regno)
3952 {
3953 if (!may_write_registers)
3954 error (_("Writing to registers is not allowed (regno %d)"), regno);
3955
3956 current_inferior ()->top_target ()->store_registers (regcache, regno);
3957 if (targetdebug)
3958 {
3959 regcache->debug_print_register ("target_store_registers", regno);
3960 }
3961 }
3962
3963 int
3964 target_core_of_thread (ptid_t ptid)
3965 {
3966 return current_inferior ()->top_target ()->core_of_thread (ptid);
3967 }
3968
3969 int
3970 simple_verify_memory (struct target_ops *ops,
3971 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3972 {
3973 LONGEST total_xfered = 0;
3974
3975 while (total_xfered < size)
3976 {
3977 ULONGEST xfered_len;
3978 enum target_xfer_status status;
3979 gdb_byte buf[1024];
3980 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3981
3982 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3983 buf, NULL, lma + total_xfered, howmuch,
3984 &xfered_len);
3985 if (status == TARGET_XFER_OK
3986 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3987 {
3988 total_xfered += xfered_len;
3989 QUIT;
3990 }
3991 else
3992 return 0;
3993 }
3994 return 1;
3995 }
3996
3997 /* Default implementation of memory verification. */
3998
3999 static int
4000 default_verify_memory (struct target_ops *self,
4001 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4002 {
4003 /* Start over from the top of the target stack. */
4004 return simple_verify_memory (current_inferior ()->top_target (),
4005 data, memaddr, size);
4006 }
4007
4008 int
4009 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4010 {
4011 target_ops *target = current_inferior ()->top_target ();
4012
4013 return target->verify_memory (data, memaddr, size);
4014 }
4015
4016 /* The documentation for this function is in its prototype declaration in
4017 target.h. */
4018
4019 int
4020 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4021 enum target_hw_bp_type rw)
4022 {
4023 target_ops *target = current_inferior ()->top_target ();
4024
4025 return target->insert_mask_watchpoint (addr, mask, rw);
4026 }
4027
4028 /* The documentation for this function is in its prototype declaration in
4029 target.h. */
4030
4031 int
4032 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4033 enum target_hw_bp_type rw)
4034 {
4035 target_ops *target = current_inferior ()->top_target ();
4036
4037 return target->remove_mask_watchpoint (addr, mask, rw);
4038 }
4039
4040 /* The documentation for this function is in its prototype declaration
4041 in target.h. */
4042
4043 int
4044 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4045 {
4046 target_ops *target = current_inferior ()->top_target ();
4047
4048 return target->masked_watch_num_registers (addr, mask);
4049 }
4050
4051 /* The documentation for this function is in its prototype declaration
4052 in target.h. */
4053
4054 int
4055 target_ranged_break_num_registers (void)
4056 {
4057 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4058 }
4059
4060 /* See target.h. */
4061
4062 struct btrace_target_info *
4063 target_enable_btrace (thread_info *tp, const struct btrace_config *conf)
4064 {
4065 return current_inferior ()->top_target ()->enable_btrace (tp, conf);
4066 }
4067
4068 /* See target.h. */
4069
4070 void
4071 target_disable_btrace (struct btrace_target_info *btinfo)
4072 {
4073 current_inferior ()->top_target ()->disable_btrace (btinfo);
4074 }
4075
4076 /* See target.h. */
4077
4078 void
4079 target_teardown_btrace (struct btrace_target_info *btinfo)
4080 {
4081 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4082 }
4083
4084 /* See target.h. */
4085
4086 enum btrace_error
4087 target_read_btrace (struct btrace_data *btrace,
4088 struct btrace_target_info *btinfo,
4089 enum btrace_read_type type)
4090 {
4091 target_ops *target = current_inferior ()->top_target ();
4092
4093 return target->read_btrace (btrace, btinfo, type);
4094 }
4095
4096 /* See target.h. */
4097
4098 const struct btrace_config *
4099 target_btrace_conf (const struct btrace_target_info *btinfo)
4100 {
4101 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4102 }
4103
4104 /* See target.h. */
4105
4106 void
4107 target_stop_recording (void)
4108 {
4109 current_inferior ()->top_target ()->stop_recording ();
4110 }
4111
4112 /* See target.h. */
4113
4114 void
4115 target_save_record (const char *filename)
4116 {
4117 current_inferior ()->top_target ()->save_record (filename);
4118 }
4119
4120 /* See target.h. */
4121
4122 int
4123 target_supports_delete_record ()
4124 {
4125 return current_inferior ()->top_target ()->supports_delete_record ();
4126 }
4127
4128 /* See target.h. */
4129
4130 void
4131 target_delete_record (void)
4132 {
4133 current_inferior ()->top_target ()->delete_record ();
4134 }
4135
4136 /* See target.h. */
4137
4138 enum record_method
4139 target_record_method (ptid_t ptid)
4140 {
4141 return current_inferior ()->top_target ()->record_method (ptid);
4142 }
4143
4144 /* See target.h. */
4145
4146 int
4147 target_record_is_replaying (ptid_t ptid)
4148 {
4149 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4150 }
4151
4152 /* See target.h. */
4153
4154 int
4155 target_record_will_replay (ptid_t ptid, int dir)
4156 {
4157 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4158 }
4159
4160 /* See target.h. */
4161
4162 void
4163 target_record_stop_replaying (void)
4164 {
4165 current_inferior ()->top_target ()->record_stop_replaying ();
4166 }
4167
4168 /* See target.h. */
4169
4170 void
4171 target_goto_record_begin (void)
4172 {
4173 current_inferior ()->top_target ()->goto_record_begin ();
4174 }
4175
4176 /* See target.h. */
4177
4178 void
4179 target_goto_record_end (void)
4180 {
4181 current_inferior ()->top_target ()->goto_record_end ();
4182 }
4183
4184 /* See target.h. */
4185
4186 void
4187 target_goto_record (ULONGEST insn)
4188 {
4189 current_inferior ()->top_target ()->goto_record (insn);
4190 }
4191
4192 /* See target.h. */
4193
4194 void
4195 target_insn_history (int size, gdb_disassembly_flags flags)
4196 {
4197 current_inferior ()->top_target ()->insn_history (size, flags);
4198 }
4199
4200 /* See target.h. */
4201
4202 void
4203 target_insn_history_from (ULONGEST from, int size,
4204 gdb_disassembly_flags flags)
4205 {
4206 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4207 }
4208
4209 /* See target.h. */
4210
4211 void
4212 target_insn_history_range (ULONGEST begin, ULONGEST end,
4213 gdb_disassembly_flags flags)
4214 {
4215 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4216 }
4217
4218 /* See target.h. */
4219
4220 void
4221 target_call_history (int size, record_print_flags flags)
4222 {
4223 current_inferior ()->top_target ()->call_history (size, flags);
4224 }
4225
4226 /* See target.h. */
4227
4228 void
4229 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4230 {
4231 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4232 }
4233
4234 /* See target.h. */
4235
4236 void
4237 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4238 {
4239 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4240 }
4241
4242 /* See target.h. */
4243
4244 const struct frame_unwind *
4245 target_get_unwinder (void)
4246 {
4247 return current_inferior ()->top_target ()->get_unwinder ();
4248 }
4249
4250 /* See target.h. */
4251
4252 const struct frame_unwind *
4253 target_get_tailcall_unwinder (void)
4254 {
4255 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4256 }
4257
4258 /* See target.h. */
4259
4260 void
4261 target_prepare_to_generate_core (void)
4262 {
4263 current_inferior ()->top_target ()->prepare_to_generate_core ();
4264 }
4265
4266 /* See target.h. */
4267
4268 void
4269 target_done_generating_core (void)
4270 {
4271 current_inferior ()->top_target ()->done_generating_core ();
4272 }
4273
4274 \f
4275
4276 static char targ_desc[] =
4277 "Names of targets and files being debugged.\nShows the entire \
4278 stack of targets currently in use (including the exec-file,\n\
4279 core-file, and process, if any), as well as the symbol file name.";
4280
4281 static void
4282 default_rcmd (struct target_ops *self, const char *command,
4283 struct ui_file *output)
4284 {
4285 error (_("\"monitor\" command not supported by this target."));
4286 }
4287
4288 static void
4289 do_monitor_command (const char *cmd, int from_tty)
4290 {
4291 target_rcmd (cmd, gdb_stdtarg);
4292 }
4293
4294 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4295 ignored. */
4296
4297 void
4298 flash_erase_command (const char *cmd, int from_tty)
4299 {
4300 /* Used to communicate termination of flash operations to the target. */
4301 bool found_flash_region = false;
4302 struct gdbarch *gdbarch = target_gdbarch ();
4303
4304 std::vector<mem_region> mem_regions = target_memory_map ();
4305
4306 /* Iterate over all memory regions. */
4307 for (const mem_region &m : mem_regions)
4308 {
4309 /* Is this a flash memory region? */
4310 if (m.attrib.mode == MEM_FLASH)
4311 {
4312 found_flash_region = true;
4313 target_flash_erase (m.lo, m.hi - m.lo);
4314
4315 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4316
4317 current_uiout->message (_("Erasing flash memory region at address "));
4318 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4319 current_uiout->message (", size = ");
4320 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4321 current_uiout->message ("\n");
4322 }
4323 }
4324
4325 /* Did we do any flash operations? If so, we need to finalize them. */
4326 if (found_flash_region)
4327 target_flash_done ();
4328 else
4329 current_uiout->message (_("No flash memory regions found.\n"));
4330 }
4331
4332 /* Print the name of each layers of our target stack. */
4333
4334 static void
4335 maintenance_print_target_stack (const char *cmd, int from_tty)
4336 {
4337 gdb_printf (_("The current target stack is:\n"));
4338
4339 for (target_ops *t = current_inferior ()->top_target ();
4340 t != NULL;
4341 t = t->beneath ())
4342 {
4343 if (t->stratum () == debug_stratum)
4344 continue;
4345 gdb_printf (" - %s (%s)\n", t->shortname (), t->longname ());
4346 }
4347 }
4348
4349 /* See target.h. */
4350
4351 void
4352 target_async (int enable)
4353 {
4354 /* If we are trying to enable async mode then it must be the case that
4355 async mode is possible for this target. */
4356 gdb_assert (!enable || target_can_async_p ());
4357 infrun_async (enable);
4358 current_inferior ()->top_target ()->async (enable);
4359 }
4360
4361 /* See target.h. */
4362
4363 void
4364 target_thread_events (int enable)
4365 {
4366 current_inferior ()->top_target ()->thread_events (enable);
4367 }
4368
4369 /* Controls if targets can report that they can/are async. This is
4370 just for maintainers to use when debugging gdb. */
4371 bool target_async_permitted = true;
4372
4373 static void
4374 set_maint_target_async (bool permitted)
4375 {
4376 if (have_live_inferiors ())
4377 error (_("Cannot change this setting while the inferior is running."));
4378
4379 target_async_permitted = permitted;
4380 }
4381
4382 static bool
4383 get_maint_target_async ()
4384 {
4385 return target_async_permitted;
4386 }
4387
4388 static void
4389 show_maint_target_async (ui_file *file, int from_tty,
4390 cmd_list_element *c, const char *value)
4391 {
4392 gdb_printf (file,
4393 _("Controlling the inferior in "
4394 "asynchronous mode is %s.\n"), value);
4395 }
4396
4397 /* Return true if the target operates in non-stop mode even with "set
4398 non-stop off". */
4399
4400 static int
4401 target_always_non_stop_p (void)
4402 {
4403 return current_inferior ()->top_target ()->always_non_stop_p ();
4404 }
4405
4406 /* See target.h. */
4407
4408 bool
4409 target_is_non_stop_p ()
4410 {
4411 return ((non_stop
4412 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4413 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4414 && target_always_non_stop_p ()))
4415 && target_can_async_p ());
4416 }
4417
4418 /* See target.h. */
4419
4420 bool
4421 exists_non_stop_target ()
4422 {
4423 if (target_is_non_stop_p ())
4424 return true;
4425
4426 scoped_restore_current_thread restore_thread;
4427
4428 for (inferior *inf : all_inferiors ())
4429 {
4430 switch_to_inferior_no_thread (inf);
4431 if (target_is_non_stop_p ())
4432 return true;
4433 }
4434
4435 return false;
4436 }
4437
4438 /* Controls if targets can report that they always run in non-stop
4439 mode. This is just for maintainers to use when debugging gdb. */
4440 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4441
4442 /* Set callback for maint target-non-stop setting. */
4443
4444 static void
4445 set_maint_target_non_stop (auto_boolean enabled)
4446 {
4447 if (have_live_inferiors ())
4448 error (_("Cannot change this setting while the inferior is running."));
4449
4450 target_non_stop_enabled = enabled;
4451 }
4452
4453 /* Get callback for maint target-non-stop setting. */
4454
4455 static auto_boolean
4456 get_maint_target_non_stop ()
4457 {
4458 return target_non_stop_enabled;
4459 }
4460
4461 static void
4462 show_maint_target_non_stop (ui_file *file, int from_tty,
4463 cmd_list_element *c, const char *value)
4464 {
4465 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4466 gdb_printf (file,
4467 _("Whether the target is always in non-stop mode "
4468 "is %s (currently %s).\n"), value,
4469 target_always_non_stop_p () ? "on" : "off");
4470 else
4471 gdb_printf (file,
4472 _("Whether the target is always in non-stop mode "
4473 "is %s.\n"), value);
4474 }
4475
4476 /* Temporary copies of permission settings. */
4477
4478 static bool may_write_registers_1 = true;
4479 static bool may_write_memory_1 = true;
4480 static bool may_insert_breakpoints_1 = true;
4481 static bool may_insert_tracepoints_1 = true;
4482 static bool may_insert_fast_tracepoints_1 = true;
4483 static bool may_stop_1 = true;
4484
4485 /* Make the user-set values match the real values again. */
4486
4487 void
4488 update_target_permissions (void)
4489 {
4490 may_write_registers_1 = may_write_registers;
4491 may_write_memory_1 = may_write_memory;
4492 may_insert_breakpoints_1 = may_insert_breakpoints;
4493 may_insert_tracepoints_1 = may_insert_tracepoints;
4494 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4495 may_stop_1 = may_stop;
4496 }
4497
4498 /* The one function handles (most of) the permission flags in the same
4499 way. */
4500
4501 static void
4502 set_target_permissions (const char *args, int from_tty,
4503 struct cmd_list_element *c)
4504 {
4505 if (target_has_execution ())
4506 {
4507 update_target_permissions ();
4508 error (_("Cannot change this setting while the inferior is running."));
4509 }
4510
4511 /* Make the real values match the user-changed values. */
4512 may_write_registers = may_write_registers_1;
4513 may_insert_breakpoints = may_insert_breakpoints_1;
4514 may_insert_tracepoints = may_insert_tracepoints_1;
4515 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4516 may_stop = may_stop_1;
4517 update_observer_mode ();
4518 }
4519
4520 /* Set memory write permission independently of observer mode. */
4521
4522 static void
4523 set_write_memory_permission (const char *args, int from_tty,
4524 struct cmd_list_element *c)
4525 {
4526 /* Make the real values match the user-changed values. */
4527 may_write_memory = may_write_memory_1;
4528 update_observer_mode ();
4529 }
4530
4531 void _initialize_target ();
4532
4533 void
4534 _initialize_target ()
4535 {
4536 the_debug_target = new debug_target ();
4537
4538 add_info ("target", info_target_command, targ_desc);
4539 add_info ("files", info_target_command, targ_desc);
4540
4541 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4542 Set target debugging."), _("\
4543 Show target debugging."), _("\
4544 When non-zero, target debugging is enabled. Higher numbers are more\n\
4545 verbose."),
4546 set_targetdebug,
4547 show_targetdebug,
4548 &setdebuglist, &showdebuglist);
4549
4550 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4551 &trust_readonly, _("\
4552 Set mode for reading from readonly sections."), _("\
4553 Show mode for reading from readonly sections."), _("\
4554 When this mode is on, memory reads from readonly sections (such as .text)\n\
4555 will be read from the object file instead of from the target. This will\n\
4556 result in significant performance improvement for remote targets."),
4557 NULL,
4558 show_trust_readonly,
4559 &setlist, &showlist);
4560
4561 add_com ("monitor", class_obscure, do_monitor_command,
4562 _("Send a command to the remote monitor (remote targets only)."));
4563
4564 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4565 _("Print the name of each layer of the internal target stack."),
4566 &maintenanceprintlist);
4567
4568 add_setshow_boolean_cmd ("target-async", no_class,
4569 _("\
4570 Set whether gdb controls the inferior in asynchronous mode."), _("\
4571 Show whether gdb controls the inferior in asynchronous mode."), _("\
4572 Tells gdb whether to control the inferior in asynchronous mode."),
4573 set_maint_target_async,
4574 get_maint_target_async,
4575 show_maint_target_async,
4576 &maintenance_set_cmdlist,
4577 &maintenance_show_cmdlist);
4578
4579 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4580 _("\
4581 Set whether gdb always controls the inferior in non-stop mode."), _("\
4582 Show whether gdb always controls the inferior in non-stop mode."), _("\
4583 Tells gdb whether to control the inferior in non-stop mode."),
4584 set_maint_target_non_stop,
4585 get_maint_target_non_stop,
4586 show_maint_target_non_stop,
4587 &maintenance_set_cmdlist,
4588 &maintenance_show_cmdlist);
4589
4590 add_setshow_boolean_cmd ("may-write-registers", class_support,
4591 &may_write_registers_1, _("\
4592 Set permission to write into registers."), _("\
4593 Show permission to write into registers."), _("\
4594 When this permission is on, GDB may write into the target's registers.\n\
4595 Otherwise, any sort of write attempt will result in an error."),
4596 set_target_permissions, NULL,
4597 &setlist, &showlist);
4598
4599 add_setshow_boolean_cmd ("may-write-memory", class_support,
4600 &may_write_memory_1, _("\
4601 Set permission to write into target memory."), _("\
4602 Show permission to write into target memory."), _("\
4603 When this permission is on, GDB may write into the target's memory.\n\
4604 Otherwise, any sort of write attempt will result in an error."),
4605 set_write_memory_permission, NULL,
4606 &setlist, &showlist);
4607
4608 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4609 &may_insert_breakpoints_1, _("\
4610 Set permission to insert breakpoints in the target."), _("\
4611 Show permission to insert breakpoints in the target."), _("\
4612 When this permission is on, GDB may insert breakpoints in the program.\n\
4613 Otherwise, any sort of insertion attempt will result in an error."),
4614 set_target_permissions, NULL,
4615 &setlist, &showlist);
4616
4617 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4618 &may_insert_tracepoints_1, _("\
4619 Set permission to insert tracepoints in the target."), _("\
4620 Show permission to insert tracepoints in the target."), _("\
4621 When this permission is on, GDB may insert tracepoints in the program.\n\
4622 Otherwise, any sort of insertion attempt will result in an error."),
4623 set_target_permissions, NULL,
4624 &setlist, &showlist);
4625
4626 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4627 &may_insert_fast_tracepoints_1, _("\
4628 Set permission to insert fast tracepoints in the target."), _("\
4629 Show permission to insert fast tracepoints in the target."), _("\
4630 When this permission is on, GDB may insert fast tracepoints.\n\
4631 Otherwise, any sort of insertion attempt will result in an error."),
4632 set_target_permissions, NULL,
4633 &setlist, &showlist);
4634
4635 add_setshow_boolean_cmd ("may-interrupt", class_support,
4636 &may_stop_1, _("\
4637 Set permission to interrupt or signal the target."), _("\
4638 Show permission to interrupt or signal the target."), _("\
4639 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4640 Otherwise, any attempt to interrupt or stop will be ignored."),
4641 set_target_permissions, NULL,
4642 &setlist, &showlist);
4643
4644 add_com ("flash-erase", no_class, flash_erase_command,
4645 _("Erase all flash memory regions."));
4646
4647 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4648 &auto_connect_native_target, _("\
4649 Set whether GDB may automatically connect to the native target."), _("\
4650 Show whether GDB may automatically connect to the native target."), _("\
4651 When on, and GDB is not connected to a target yet, GDB\n\
4652 attempts \"run\" and other commands with the native target."),
4653 NULL, show_auto_connect_native_target,
4654 &setlist, &showlist);
4655 }