Introduce DWARF abbrev cache
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2022 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "observable.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdbcore.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
39 #include "solib.h"
40 #include "exec.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdb/fileio.h"
44 #include "gdbsupport/agent.h"
45 #include "auxv.h"
46 #include "target-debug.h"
47 #include "top.h"
48 #include "event-top.h"
49 #include <algorithm>
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
52 #include "terminal.h"
53 #include <unordered_map>
54 #include "target-connection.h"
55 #include "valprint.h"
56 #include "cli/cli-decode.h"
57
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
59
60 static void default_terminal_info (struct target_ops *, const char *, int);
61
62 static int default_watchpoint_addr_within_range (struct target_ops *,
63 CORE_ADDR, CORE_ADDR, int);
64
65 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
66 CORE_ADDR, int);
67
68 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
69
70 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
71 long lwp, ULONGEST tid);
72
73 static void default_mourn_inferior (struct target_ops *self);
74
75 static int default_search_memory (struct target_ops *ops,
76 CORE_ADDR start_addr,
77 ULONGEST search_space_len,
78 const gdb_byte *pattern,
79 ULONGEST pattern_len,
80 CORE_ADDR *found_addrp);
81
82 static int default_verify_memory (struct target_ops *self,
83 const gdb_byte *data,
84 CORE_ADDR memaddr, ULONGEST size);
85
86 static void tcomplain (void) ATTRIBUTE_NORETURN;
87
88 static struct target_ops *find_default_run_target (const char *);
89
90 static int dummy_find_memory_regions (struct target_ops *self,
91 find_memory_region_ftype ignore1,
92 void *ignore2);
93
94 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
95 (struct target_ops *self, bfd *ignore1, int *ignore2);
96
97 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
98
99 static enum exec_direction_kind default_execution_direction
100 (struct target_ops *self);
101
102 /* Mapping between target_info objects (which have address identity)
103 and corresponding open/factory function/callback. Each add_target
104 call adds one entry to this map, and registers a "target
105 TARGET_NAME" command that when invoked calls the factory registered
106 here. The target_info object is associated with the command via
107 the command's context. */
108 static std::unordered_map<const target_info *, target_open_ftype *>
109 target_factories;
110
111 /* The singleton debug target. */
112
113 static struct target_ops *the_debug_target;
114
115 /* Command list for target. */
116
117 static struct cmd_list_element *targetlist = NULL;
118
119 /* True if we should trust readonly sections from the
120 executable when reading memory. */
121
122 static bool trust_readonly = false;
123
124 /* Nonzero if we should show true memory content including
125 memory breakpoint inserted by gdb. */
126
127 static int show_memory_breakpoints = 0;
128
129 /* These globals control whether GDB attempts to perform these
130 operations; they are useful for targets that need to prevent
131 inadvertent disruption, such as in non-stop mode. */
132
133 bool may_write_registers = true;
134
135 bool may_write_memory = true;
136
137 bool may_insert_breakpoints = true;
138
139 bool may_insert_tracepoints = true;
140
141 bool may_insert_fast_tracepoints = true;
142
143 bool may_stop = true;
144
145 /* Non-zero if we want to see trace of target level stuff. */
146
147 static unsigned int targetdebug = 0;
148
149 static void
150 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
151 {
152 if (targetdebug)
153 current_inferior ()->push_target (the_debug_target);
154 else
155 current_inferior ()->unpush_target (the_debug_target);
156 }
157
158 static void
159 show_targetdebug (struct ui_file *file, int from_tty,
160 struct cmd_list_element *c, const char *value)
161 {
162 gdb_printf (file, _("Target debugging is %s.\n"), value);
163 }
164
165 int
166 target_has_memory ()
167 {
168 for (target_ops *t = current_inferior ()->top_target ();
169 t != NULL;
170 t = t->beneath ())
171 if (t->has_memory ())
172 return 1;
173
174 return 0;
175 }
176
177 int
178 target_has_stack ()
179 {
180 for (target_ops *t = current_inferior ()->top_target ();
181 t != NULL;
182 t = t->beneath ())
183 if (t->has_stack ())
184 return 1;
185
186 return 0;
187 }
188
189 int
190 target_has_registers ()
191 {
192 for (target_ops *t = current_inferior ()->top_target ();
193 t != NULL;
194 t = t->beneath ())
195 if (t->has_registers ())
196 return 1;
197
198 return 0;
199 }
200
201 bool
202 target_has_execution (inferior *inf)
203 {
204 if (inf == nullptr)
205 inf = current_inferior ();
206
207 for (target_ops *t = inf->top_target ();
208 t != nullptr;
209 t = inf->find_target_beneath (t))
210 if (t->has_execution (inf))
211 return true;
212
213 return false;
214 }
215
216 const char *
217 target_shortname ()
218 {
219 return current_inferior ()->top_target ()->shortname ();
220 }
221
222 /* See target.h. */
223
224 bool
225 target_attach_no_wait ()
226 {
227 return current_inferior ()->top_target ()->attach_no_wait ();
228 }
229
230 /* See target.h. */
231
232 void
233 target_post_attach (int pid)
234 {
235 return current_inferior ()->top_target ()->post_attach (pid);
236 }
237
238 /* See target.h. */
239
240 void
241 target_prepare_to_store (regcache *regcache)
242 {
243 return current_inferior ()->top_target ()->prepare_to_store (regcache);
244 }
245
246 /* See target.h. */
247
248 bool
249 target_supports_enable_disable_tracepoint ()
250 {
251 target_ops *target = current_inferior ()->top_target ();
252
253 return target->supports_enable_disable_tracepoint ();
254 }
255
256 bool
257 target_supports_string_tracing ()
258 {
259 return current_inferior ()->top_target ()->supports_string_tracing ();
260 }
261
262 /* See target.h. */
263
264 bool
265 target_supports_evaluation_of_breakpoint_conditions ()
266 {
267 target_ops *target = current_inferior ()->top_target ();
268
269 return target->supports_evaluation_of_breakpoint_conditions ();
270 }
271
272 /* See target.h. */
273
274 bool
275 target_supports_dumpcore ()
276 {
277 return current_inferior ()->top_target ()->supports_dumpcore ();
278 }
279
280 /* See target.h. */
281
282 void
283 target_dumpcore (const char *filename)
284 {
285 return current_inferior ()->top_target ()->dumpcore (filename);
286 }
287
288 /* See target.h. */
289
290 bool
291 target_can_run_breakpoint_commands ()
292 {
293 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
294 }
295
296 /* See target.h. */
297
298 void
299 target_files_info ()
300 {
301 return current_inferior ()->top_target ()->files_info ();
302 }
303
304 /* See target.h. */
305
306 int
307 target_insert_fork_catchpoint (int pid)
308 {
309 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
310 }
311
312 /* See target.h. */
313
314 int
315 target_remove_fork_catchpoint (int pid)
316 {
317 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
318 }
319
320 /* See target.h. */
321
322 int
323 target_insert_vfork_catchpoint (int pid)
324 {
325 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
326 }
327
328 /* See target.h. */
329
330 int
331 target_remove_vfork_catchpoint (int pid)
332 {
333 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
334 }
335
336 /* See target.h. */
337
338 int
339 target_insert_exec_catchpoint (int pid)
340 {
341 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
342 }
343
344 /* See target.h. */
345
346 int
347 target_remove_exec_catchpoint (int pid)
348 {
349 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
350 }
351
352 /* See target.h. */
353
354 int
355 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
356 gdb::array_view<const int> syscall_counts)
357 {
358 target_ops *target = current_inferior ()->top_target ();
359
360 return target->set_syscall_catchpoint (pid, needed, any_count,
361 syscall_counts);
362 }
363
364 /* See target.h. */
365
366 void
367 target_rcmd (const char *command, struct ui_file *outbuf)
368 {
369 return current_inferior ()->top_target ()->rcmd (command, outbuf);
370 }
371
372 /* See target.h. */
373
374 bool
375 target_can_lock_scheduler ()
376 {
377 target_ops *target = current_inferior ()->top_target ();
378
379 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
380 }
381
382 /* See target.h. */
383
384 bool
385 target_can_async_p ()
386 {
387 return target_can_async_p (current_inferior ()->top_target ());
388 }
389
390 /* See target.h. */
391
392 bool
393 target_can_async_p (struct target_ops *target)
394 {
395 if (!target_async_permitted)
396 return false;
397 return target->can_async_p ();
398 }
399
400 /* See target.h. */
401
402 bool
403 target_is_async_p ()
404 {
405 bool result = current_inferior ()->top_target ()->is_async_p ();
406 gdb_assert (target_async_permitted || !result);
407 return result;
408 }
409
410 exec_direction_kind
411 target_execution_direction ()
412 {
413 return current_inferior ()->top_target ()->execution_direction ();
414 }
415
416 /* See target.h. */
417
418 const char *
419 target_extra_thread_info (thread_info *tp)
420 {
421 return current_inferior ()->top_target ()->extra_thread_info (tp);
422 }
423
424 /* See target.h. */
425
426 char *
427 target_pid_to_exec_file (int pid)
428 {
429 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
430 }
431
432 /* See target.h. */
433
434 gdbarch *
435 target_thread_architecture (ptid_t ptid)
436 {
437 return current_inferior ()->top_target ()->thread_architecture (ptid);
438 }
439
440 /* See target.h. */
441
442 int
443 target_find_memory_regions (find_memory_region_ftype func, void *data)
444 {
445 return current_inferior ()->top_target ()->find_memory_regions (func, data);
446 }
447
448 /* See target.h. */
449
450 gdb::unique_xmalloc_ptr<char>
451 target_make_corefile_notes (bfd *bfd, int *size_p)
452 {
453 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
454 }
455
456 gdb_byte *
457 target_get_bookmark (const char *args, int from_tty)
458 {
459 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
460 }
461
462 void
463 target_goto_bookmark (const gdb_byte *arg, int from_tty)
464 {
465 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
466 }
467
468 /* See target.h. */
469
470 bool
471 target_stopped_by_watchpoint ()
472 {
473 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
474 }
475
476 /* See target.h. */
477
478 bool
479 target_stopped_by_sw_breakpoint ()
480 {
481 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
482 }
483
484 bool
485 target_supports_stopped_by_sw_breakpoint ()
486 {
487 target_ops *target = current_inferior ()->top_target ();
488
489 return target->supports_stopped_by_sw_breakpoint ();
490 }
491
492 bool
493 target_stopped_by_hw_breakpoint ()
494 {
495 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
496 }
497
498 bool
499 target_supports_stopped_by_hw_breakpoint ()
500 {
501 target_ops *target = current_inferior ()->top_target ();
502
503 return target->supports_stopped_by_hw_breakpoint ();
504 }
505
506 /* See target.h. */
507
508 bool
509 target_have_steppable_watchpoint ()
510 {
511 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
512 }
513
514 /* See target.h. */
515
516 int
517 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
518 {
519 target_ops *target = current_inferior ()->top_target ();
520
521 return target->can_use_hw_breakpoint (type, cnt, othertype);
522 }
523
524 /* See target.h. */
525
526 int
527 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
528 {
529 target_ops *target = current_inferior ()->top_target ();
530
531 return target->region_ok_for_hw_watchpoint (addr, len);
532 }
533
534
535 int
536 target_can_do_single_step ()
537 {
538 return current_inferior ()->top_target ()->can_do_single_step ();
539 }
540
541 /* See target.h. */
542
543 int
544 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
545 expression *cond)
546 {
547 target_ops *target = current_inferior ()->top_target ();
548
549 return target->insert_watchpoint (addr, len, type, cond);
550 }
551
552 /* See target.h. */
553
554 int
555 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
556 expression *cond)
557 {
558 target_ops *target = current_inferior ()->top_target ();
559
560 return target->remove_watchpoint (addr, len, type, cond);
561 }
562
563 /* See target.h. */
564
565 int
566 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
567 {
568 target_ops *target = current_inferior ()->top_target ();
569
570 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
571 }
572
573 /* See target.h. */
574
575 int
576 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
577 {
578 target_ops *target = current_inferior ()->top_target ();
579
580 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
581 }
582
583 /* See target.h. */
584
585 bool
586 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
587 expression *cond)
588 {
589 target_ops *target = current_inferior ()->top_target ();
590
591 return target->can_accel_watchpoint_condition (addr, len, type, cond);
592 }
593
594 /* See target.h. */
595
596 bool
597 target_can_execute_reverse ()
598 {
599 return current_inferior ()->top_target ()->can_execute_reverse ();
600 }
601
602 ptid_t
603 target_get_ada_task_ptid (long lwp, ULONGEST tid)
604 {
605 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
606 }
607
608 bool
609 target_filesystem_is_local ()
610 {
611 return current_inferior ()->top_target ()->filesystem_is_local ();
612 }
613
614 void
615 target_trace_init ()
616 {
617 return current_inferior ()->top_target ()->trace_init ();
618 }
619
620 void
621 target_download_tracepoint (bp_location *location)
622 {
623 return current_inferior ()->top_target ()->download_tracepoint (location);
624 }
625
626 bool
627 target_can_download_tracepoint ()
628 {
629 return current_inferior ()->top_target ()->can_download_tracepoint ();
630 }
631
632 void
633 target_download_trace_state_variable (const trace_state_variable &tsv)
634 {
635 target_ops *target = current_inferior ()->top_target ();
636
637 return target->download_trace_state_variable (tsv);
638 }
639
640 void
641 target_enable_tracepoint (bp_location *loc)
642 {
643 return current_inferior ()->top_target ()->enable_tracepoint (loc);
644 }
645
646 void
647 target_disable_tracepoint (bp_location *loc)
648 {
649 return current_inferior ()->top_target ()->disable_tracepoint (loc);
650 }
651
652 void
653 target_trace_start ()
654 {
655 return current_inferior ()->top_target ()->trace_start ();
656 }
657
658 void
659 target_trace_set_readonly_regions ()
660 {
661 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
662 }
663
664 int
665 target_get_trace_status (trace_status *ts)
666 {
667 return current_inferior ()->top_target ()->get_trace_status (ts);
668 }
669
670 void
671 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
672 {
673 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
674 }
675
676 void
677 target_trace_stop ()
678 {
679 return current_inferior ()->top_target ()->trace_stop ();
680 }
681
682 int
683 target_trace_find (trace_find_type type, int num,
684 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
685 {
686 target_ops *target = current_inferior ()->top_target ();
687
688 return target->trace_find (type, num, addr1, addr2, tpp);
689 }
690
691 bool
692 target_get_trace_state_variable_value (int tsv, LONGEST *val)
693 {
694 target_ops *target = current_inferior ()->top_target ();
695
696 return target->get_trace_state_variable_value (tsv, val);
697 }
698
699 int
700 target_save_trace_data (const char *filename)
701 {
702 return current_inferior ()->top_target ()->save_trace_data (filename);
703 }
704
705 int
706 target_upload_tracepoints (uploaded_tp **utpp)
707 {
708 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
709 }
710
711 int
712 target_upload_trace_state_variables (uploaded_tsv **utsvp)
713 {
714 target_ops *target = current_inferior ()->top_target ();
715
716 return target->upload_trace_state_variables (utsvp);
717 }
718
719 LONGEST
720 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
721 {
722 target_ops *target = current_inferior ()->top_target ();
723
724 return target->get_raw_trace_data (buf, offset, len);
725 }
726
727 int
728 target_get_min_fast_tracepoint_insn_len ()
729 {
730 target_ops *target = current_inferior ()->top_target ();
731
732 return target->get_min_fast_tracepoint_insn_len ();
733 }
734
735 void
736 target_set_disconnected_tracing (int val)
737 {
738 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
739 }
740
741 void
742 target_set_circular_trace_buffer (int val)
743 {
744 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
745 }
746
747 void
748 target_set_trace_buffer_size (LONGEST val)
749 {
750 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
751 }
752
753 bool
754 target_set_trace_notes (const char *user, const char *notes,
755 const char *stopnotes)
756 {
757 target_ops *target = current_inferior ()->top_target ();
758
759 return target->set_trace_notes (user, notes, stopnotes);
760 }
761
762 bool
763 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
764 {
765 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
766 }
767
768 void
769 target_set_permissions ()
770 {
771 return current_inferior ()->top_target ()->set_permissions ();
772 }
773
774 bool
775 target_static_tracepoint_marker_at (CORE_ADDR addr,
776 static_tracepoint_marker *marker)
777 {
778 target_ops *target = current_inferior ()->top_target ();
779
780 return target->static_tracepoint_marker_at (addr, marker);
781 }
782
783 std::vector<static_tracepoint_marker>
784 target_static_tracepoint_markers_by_strid (const char *marker_id)
785 {
786 target_ops *target = current_inferior ()->top_target ();
787
788 return target->static_tracepoint_markers_by_strid (marker_id);
789 }
790
791 traceframe_info_up
792 target_traceframe_info ()
793 {
794 return current_inferior ()->top_target ()->traceframe_info ();
795 }
796
797 bool
798 target_use_agent (bool use)
799 {
800 return current_inferior ()->top_target ()->use_agent (use);
801 }
802
803 bool
804 target_can_use_agent ()
805 {
806 return current_inferior ()->top_target ()->can_use_agent ();
807 }
808
809 bool
810 target_augmented_libraries_svr4_read ()
811 {
812 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
813 }
814
815 bool
816 target_supports_memory_tagging ()
817 {
818 return current_inferior ()->top_target ()->supports_memory_tagging ();
819 }
820
821 bool
822 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
823 int type)
824 {
825 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
826 }
827
828 bool
829 target_store_memtags (CORE_ADDR address, size_t len,
830 const gdb::byte_vector &tags, int type)
831 {
832 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
833 }
834
835 void
836 target_log_command (const char *p)
837 {
838 return current_inferior ()->top_target ()->log_command (p);
839 }
840
841 /* This is used to implement the various target commands. */
842
843 static void
844 open_target (const char *args, int from_tty, struct cmd_list_element *command)
845 {
846 auto *ti = static_cast<target_info *> (command->context ());
847 target_open_ftype *func = target_factories[ti];
848
849 if (targetdebug)
850 gdb_printf (gdb_stdlog, "-> %s->open (...)\n",
851 ti->shortname);
852
853 func (args, from_tty);
854
855 if (targetdebug)
856 gdb_printf (gdb_stdlog, "<- %s->open (%s, %d)\n",
857 ti->shortname, args, from_tty);
858 }
859
860 /* See target.h. */
861
862 void
863 add_target (const target_info &t, target_open_ftype *func,
864 completer_ftype *completer)
865 {
866 struct cmd_list_element *c;
867
868 auto &func_slot = target_factories[&t];
869 if (func_slot != nullptr)
870 internal_error (__FILE__, __LINE__,
871 _("target already added (\"%s\")."), t.shortname);
872 func_slot = func;
873
874 if (targetlist == NULL)
875 add_basic_prefix_cmd ("target", class_run, _("\
876 Connect to a target machine or process.\n\
877 The first argument is the type or protocol of the target machine.\n\
878 Remaining arguments are interpreted by the target protocol. For more\n\
879 information on the arguments for a particular protocol, type\n\
880 `help target ' followed by the protocol name."),
881 &targetlist, 0, &cmdlist);
882 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
883 c->set_context ((void *) &t);
884 c->func = open_target;
885 if (completer != NULL)
886 set_cmd_completer (c, completer);
887 }
888
889 /* See target.h. */
890
891 void
892 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
893 {
894 struct cmd_list_element *c;
895
896 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
897 see PR cli/15104. */
898 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
899 c->func = open_target;
900 c->set_context ((void *) &tinfo);
901 gdb::unique_xmalloc_ptr<char> alt
902 = xstrprintf ("target %s", tinfo.shortname);
903 deprecate_cmd (c, alt.release ());
904 }
905
906 /* Stub functions */
907
908 void
909 target_kill (void)
910 {
911 current_inferior ()->top_target ()->kill ();
912 }
913
914 void
915 target_load (const char *arg, int from_tty)
916 {
917 target_dcache_invalidate ();
918 current_inferior ()->top_target ()->load (arg, from_tty);
919 }
920
921 /* Define it. */
922
923 target_terminal_state target_terminal::m_terminal_state
924 = target_terminal_state::is_ours;
925
926 /* See target/target.h. */
927
928 void
929 target_terminal::init (void)
930 {
931 current_inferior ()->top_target ()->terminal_init ();
932
933 m_terminal_state = target_terminal_state::is_ours;
934 }
935
936 /* See target/target.h. */
937
938 void
939 target_terminal::inferior (void)
940 {
941 struct ui *ui = current_ui;
942
943 /* A background resume (``run&'') should leave GDB in control of the
944 terminal. */
945 if (ui->prompt_state != PROMPT_BLOCKED)
946 return;
947
948 /* Since we always run the inferior in the main console (unless "set
949 inferior-tty" is in effect), when some UI other than the main one
950 calls target_terminal::inferior, then we leave the main UI's
951 terminal settings as is. */
952 if (ui != main_ui)
953 return;
954
955 /* If GDB is resuming the inferior in the foreground, install
956 inferior's terminal modes. */
957
958 struct inferior *inf = current_inferior ();
959
960 if (inf->terminal_state != target_terminal_state::is_inferior)
961 {
962 current_inferior ()->top_target ()->terminal_inferior ();
963 inf->terminal_state = target_terminal_state::is_inferior;
964 }
965
966 m_terminal_state = target_terminal_state::is_inferior;
967
968 /* If the user hit C-c before, pretend that it was hit right
969 here. */
970 if (check_quit_flag ())
971 target_pass_ctrlc ();
972 }
973
974 /* See target/target.h. */
975
976 void
977 target_terminal::restore_inferior (void)
978 {
979 struct ui *ui = current_ui;
980
981 /* See target_terminal::inferior(). */
982 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
983 return;
984
985 /* Restore the terminal settings of inferiors that were in the
986 foreground but are now ours_for_output due to a temporary
987 target_target::ours_for_output() call. */
988
989 {
990 scoped_restore_current_inferior restore_inferior;
991
992 for (::inferior *inf : all_inferiors ())
993 {
994 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
995 {
996 set_current_inferior (inf);
997 current_inferior ()->top_target ()->terminal_inferior ();
998 inf->terminal_state = target_terminal_state::is_inferior;
999 }
1000 }
1001 }
1002
1003 m_terminal_state = target_terminal_state::is_inferior;
1004
1005 /* If the user hit C-c before, pretend that it was hit right
1006 here. */
1007 if (check_quit_flag ())
1008 target_pass_ctrlc ();
1009 }
1010
1011 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1012 is_ours_for_output. */
1013
1014 static void
1015 target_terminal_is_ours_kind (target_terminal_state desired_state)
1016 {
1017 scoped_restore_current_inferior restore_inferior;
1018
1019 /* Must do this in two passes. First, have all inferiors save the
1020 current terminal settings. Then, after all inferiors have add a
1021 chance to safely save the terminal settings, restore GDB's
1022 terminal settings. */
1023
1024 for (inferior *inf : all_inferiors ())
1025 {
1026 if (inf->terminal_state == target_terminal_state::is_inferior)
1027 {
1028 set_current_inferior (inf);
1029 current_inferior ()->top_target ()->terminal_save_inferior ();
1030 }
1031 }
1032
1033 for (inferior *inf : all_inferiors ())
1034 {
1035 /* Note we don't check is_inferior here like above because we
1036 need to handle 'is_ours_for_output -> is_ours' too. Careful
1037 to never transition from 'is_ours' to 'is_ours_for_output',
1038 though. */
1039 if (inf->terminal_state != target_terminal_state::is_ours
1040 && inf->terminal_state != desired_state)
1041 {
1042 set_current_inferior (inf);
1043 if (desired_state == target_terminal_state::is_ours)
1044 current_inferior ()->top_target ()->terminal_ours ();
1045 else if (desired_state == target_terminal_state::is_ours_for_output)
1046 current_inferior ()->top_target ()->terminal_ours_for_output ();
1047 else
1048 gdb_assert_not_reached ("unhandled desired state");
1049 inf->terminal_state = desired_state;
1050 }
1051 }
1052 }
1053
1054 /* See target/target.h. */
1055
1056 void
1057 target_terminal::ours ()
1058 {
1059 struct ui *ui = current_ui;
1060
1061 /* See target_terminal::inferior. */
1062 if (ui != main_ui)
1063 return;
1064
1065 if (m_terminal_state == target_terminal_state::is_ours)
1066 return;
1067
1068 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1069 m_terminal_state = target_terminal_state::is_ours;
1070 }
1071
1072 /* See target/target.h. */
1073
1074 void
1075 target_terminal::ours_for_output ()
1076 {
1077 struct ui *ui = current_ui;
1078
1079 /* See target_terminal::inferior. */
1080 if (ui != main_ui)
1081 return;
1082
1083 if (!target_terminal::is_inferior ())
1084 return;
1085
1086 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1087 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1088 }
1089
1090 /* See target/target.h. */
1091
1092 void
1093 target_terminal::info (const char *arg, int from_tty)
1094 {
1095 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1096 }
1097
1098 /* See target.h. */
1099
1100 bool
1101 target_supports_terminal_ours (void)
1102 {
1103 /* The current top target is the target at the top of the target
1104 stack of the current inferior. While normally there's always an
1105 inferior, we must check for nullptr here because we can get here
1106 very early during startup, before the initial inferior is first
1107 created. */
1108 inferior *inf = current_inferior ();
1109
1110 if (inf == nullptr)
1111 return false;
1112 return inf->top_target ()->supports_terminal_ours ();
1113 }
1114
1115 static void
1116 tcomplain (void)
1117 {
1118 error (_("You can't do that when your target is `%s'"),
1119 current_inferior ()->top_target ()->shortname ());
1120 }
1121
1122 void
1123 noprocess (void)
1124 {
1125 error (_("You can't do that without a process to debug."));
1126 }
1127
1128 static void
1129 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1130 {
1131 gdb_printf (_("No saved terminal information.\n"));
1132 }
1133
1134 /* A default implementation for the to_get_ada_task_ptid target method.
1135
1136 This function builds the PTID by using both LWP and TID as part of
1137 the PTID lwp and tid elements. The pid used is the pid of the
1138 inferior_ptid. */
1139
1140 static ptid_t
1141 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1142 {
1143 return ptid_t (inferior_ptid.pid (), lwp, tid);
1144 }
1145
1146 static enum exec_direction_kind
1147 default_execution_direction (struct target_ops *self)
1148 {
1149 if (!target_can_execute_reverse ())
1150 return EXEC_FORWARD;
1151 else if (!target_can_async_p ())
1152 return EXEC_FORWARD;
1153 else
1154 gdb_assert_not_reached ("\
1155 to_execution_direction must be implemented for reverse async");
1156 }
1157
1158 /* See target.h. */
1159
1160 void
1161 decref_target (target_ops *t)
1162 {
1163 t->decref ();
1164 if (t->refcount () == 0)
1165 {
1166 if (t->stratum () == process_stratum)
1167 connection_list_remove (as_process_stratum_target (t));
1168 target_close (t);
1169 }
1170 }
1171
1172 /* See target.h. */
1173
1174 void
1175 target_stack::push (target_ops *t)
1176 {
1177 t->incref ();
1178
1179 strata stratum = t->stratum ();
1180
1181 if (stratum == process_stratum)
1182 connection_list_add (as_process_stratum_target (t));
1183
1184 /* If there's already a target at this stratum, remove it. */
1185
1186 if (m_stack[stratum] != NULL)
1187 unpush (m_stack[stratum]);
1188
1189 /* Now add the new one. */
1190 m_stack[stratum] = t;
1191
1192 if (m_top < stratum)
1193 m_top = stratum;
1194 }
1195
1196 /* See target.h. */
1197
1198 bool
1199 target_stack::unpush (target_ops *t)
1200 {
1201 gdb_assert (t != NULL);
1202
1203 strata stratum = t->stratum ();
1204
1205 if (stratum == dummy_stratum)
1206 internal_error (__FILE__, __LINE__,
1207 _("Attempt to unpush the dummy target"));
1208
1209 /* Look for the specified target. Note that a target can only occur
1210 once in the target stack. */
1211
1212 if (m_stack[stratum] != t)
1213 {
1214 /* If T wasn't pushed, quit. Only open targets should be
1215 closed. */
1216 return false;
1217 }
1218
1219 /* Unchain the target. */
1220 m_stack[stratum] = NULL;
1221
1222 if (m_top == stratum)
1223 m_top = this->find_beneath (t)->stratum ();
1224
1225 /* Finally close the target, if there are no inferiors
1226 referencing this target still. Note we do this after unchaining,
1227 so any target method calls from within the target_close
1228 implementation don't end up in T anymore. Do leave the target
1229 open if we have are other inferiors referencing this target
1230 still. */
1231 decref_target (t);
1232
1233 return true;
1234 }
1235
1236 /* Unpush TARGET and assert that it worked. */
1237
1238 static void
1239 unpush_target_and_assert (struct target_ops *target)
1240 {
1241 if (!current_inferior ()->unpush_target (target))
1242 {
1243 gdb_printf (gdb_stderr,
1244 "pop_all_targets couldn't find target %s\n",
1245 target->shortname ());
1246 internal_error (__FILE__, __LINE__,
1247 _("failed internal consistency check"));
1248 }
1249 }
1250
1251 void
1252 pop_all_targets_above (enum strata above_stratum)
1253 {
1254 while ((int) (current_inferior ()->top_target ()->stratum ())
1255 > (int) above_stratum)
1256 unpush_target_and_assert (current_inferior ()->top_target ());
1257 }
1258
1259 /* See target.h. */
1260
1261 void
1262 pop_all_targets_at_and_above (enum strata stratum)
1263 {
1264 while ((int) (current_inferior ()->top_target ()->stratum ())
1265 >= (int) stratum)
1266 unpush_target_and_assert (current_inferior ()->top_target ());
1267 }
1268
1269 void
1270 pop_all_targets (void)
1271 {
1272 pop_all_targets_above (dummy_stratum);
1273 }
1274
1275 void
1276 target_unpusher::operator() (struct target_ops *ops) const
1277 {
1278 current_inferior ()->unpush_target (ops);
1279 }
1280
1281 /* Default implementation of to_get_thread_local_address. */
1282
1283 static void
1284 generic_tls_error (void)
1285 {
1286 throw_error (TLS_GENERIC_ERROR,
1287 _("Cannot find thread-local variables on this target"));
1288 }
1289
1290 /* Using the objfile specified in OBJFILE, find the address for the
1291 current thread's thread-local storage with offset OFFSET. */
1292 CORE_ADDR
1293 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1294 {
1295 volatile CORE_ADDR addr = 0;
1296 struct target_ops *target = current_inferior ()->top_target ();
1297 struct gdbarch *gdbarch = target_gdbarch ();
1298
1299 /* If OBJFILE is a separate debug object file, look for the
1300 original object file. */
1301 if (objfile->separate_debug_objfile_backlink != NULL)
1302 objfile = objfile->separate_debug_objfile_backlink;
1303
1304 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1305 {
1306 ptid_t ptid = inferior_ptid;
1307
1308 try
1309 {
1310 CORE_ADDR lm_addr;
1311
1312 /* Fetch the load module address for this objfile. */
1313 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1314 objfile);
1315
1316 if (gdbarch_get_thread_local_address_p (gdbarch))
1317 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1318 offset);
1319 else
1320 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1321 }
1322 /* If an error occurred, print TLS related messages here. Otherwise,
1323 throw the error to some higher catcher. */
1324 catch (const gdb_exception &ex)
1325 {
1326 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1327
1328 switch (ex.error)
1329 {
1330 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1331 error (_("Cannot find thread-local variables "
1332 "in this thread library."));
1333 break;
1334 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1335 if (objfile_is_library)
1336 error (_("Cannot find shared library `%s' in dynamic"
1337 " linker's load module list"), objfile_name (objfile));
1338 else
1339 error (_("Cannot find executable file `%s' in dynamic"
1340 " linker's load module list"), objfile_name (objfile));
1341 break;
1342 case TLS_NOT_ALLOCATED_YET_ERROR:
1343 if (objfile_is_library)
1344 error (_("The inferior has not yet allocated storage for"
1345 " thread-local variables in\n"
1346 "the shared library `%s'\n"
1347 "for %s"),
1348 objfile_name (objfile),
1349 target_pid_to_str (ptid).c_str ());
1350 else
1351 error (_("The inferior has not yet allocated storage for"
1352 " thread-local variables in\n"
1353 "the executable `%s'\n"
1354 "for %s"),
1355 objfile_name (objfile),
1356 target_pid_to_str (ptid).c_str ());
1357 break;
1358 case TLS_GENERIC_ERROR:
1359 if (objfile_is_library)
1360 error (_("Cannot find thread-local storage for %s, "
1361 "shared library %s:\n%s"),
1362 target_pid_to_str (ptid).c_str (),
1363 objfile_name (objfile), ex.what ());
1364 else
1365 error (_("Cannot find thread-local storage for %s, "
1366 "executable file %s:\n%s"),
1367 target_pid_to_str (ptid).c_str (),
1368 objfile_name (objfile), ex.what ());
1369 break;
1370 default:
1371 throw;
1372 break;
1373 }
1374 }
1375 }
1376 else
1377 error (_("Cannot find thread-local variables on this target"));
1378
1379 return addr;
1380 }
1381
1382 const char *
1383 target_xfer_status_to_string (enum target_xfer_status status)
1384 {
1385 #define CASE(X) case X: return #X
1386 switch (status)
1387 {
1388 CASE(TARGET_XFER_E_IO);
1389 CASE(TARGET_XFER_UNAVAILABLE);
1390 default:
1391 return "<unknown>";
1392 }
1393 #undef CASE
1394 };
1395
1396
1397 /* See target.h. */
1398
1399 gdb::unique_xmalloc_ptr<char>
1400 target_read_string (CORE_ADDR memaddr, int len, int *bytes_read)
1401 {
1402 gdb::unique_xmalloc_ptr<gdb_byte> buffer;
1403
1404 int ignore;
1405 if (bytes_read == nullptr)
1406 bytes_read = &ignore;
1407
1408 /* Note that the endian-ness does not matter here. */
1409 int errcode = read_string (memaddr, -1, 1, len, BFD_ENDIAN_LITTLE,
1410 &buffer, bytes_read);
1411 if (errcode != 0)
1412 return {};
1413
1414 return gdb::unique_xmalloc_ptr<char> ((char *) buffer.release ());
1415 }
1416
1417 const target_section_table *
1418 target_get_section_table (struct target_ops *target)
1419 {
1420 return target->get_section_table ();
1421 }
1422
1423 /* Find a section containing ADDR. */
1424
1425 const struct target_section *
1426 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1427 {
1428 const target_section_table *table = target_get_section_table (target);
1429
1430 if (table == NULL)
1431 return NULL;
1432
1433 for (const target_section &secp : *table)
1434 {
1435 if (addr >= secp.addr && addr < secp.endaddr)
1436 return &secp;
1437 }
1438 return NULL;
1439 }
1440
1441 /* See target.h. */
1442
1443 const target_section_table *
1444 default_get_section_table ()
1445 {
1446 return &current_program_space->target_sections ();
1447 }
1448
1449 /* Helper for the memory xfer routines. Checks the attributes of the
1450 memory region of MEMADDR against the read or write being attempted.
1451 If the access is permitted returns true, otherwise returns false.
1452 REGION_P is an optional output parameter. If not-NULL, it is
1453 filled with a pointer to the memory region of MEMADDR. REG_LEN
1454 returns LEN trimmed to the end of the region. This is how much the
1455 caller can continue requesting, if the access is permitted. A
1456 single xfer request must not straddle memory region boundaries. */
1457
1458 static int
1459 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1460 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1461 struct mem_region **region_p)
1462 {
1463 struct mem_region *region;
1464
1465 region = lookup_mem_region (memaddr);
1466
1467 if (region_p != NULL)
1468 *region_p = region;
1469
1470 switch (region->attrib.mode)
1471 {
1472 case MEM_RO:
1473 if (writebuf != NULL)
1474 return 0;
1475 break;
1476
1477 case MEM_WO:
1478 if (readbuf != NULL)
1479 return 0;
1480 break;
1481
1482 case MEM_FLASH:
1483 /* We only support writing to flash during "load" for now. */
1484 if (writebuf != NULL)
1485 error (_("Writing to flash memory forbidden in this context"));
1486 break;
1487
1488 case MEM_NONE:
1489 return 0;
1490 }
1491
1492 /* region->hi == 0 means there's no upper bound. */
1493 if (memaddr + len < region->hi || region->hi == 0)
1494 *reg_len = len;
1495 else
1496 *reg_len = region->hi - memaddr;
1497
1498 return 1;
1499 }
1500
1501 /* Read memory from more than one valid target. A core file, for
1502 instance, could have some of memory but delegate other bits to
1503 the target below it. So, we must manually try all targets. */
1504
1505 enum target_xfer_status
1506 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1507 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1508 ULONGEST *xfered_len)
1509 {
1510 enum target_xfer_status res;
1511
1512 do
1513 {
1514 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1515 readbuf, writebuf, memaddr, len,
1516 xfered_len);
1517 if (res == TARGET_XFER_OK)
1518 break;
1519
1520 /* Stop if the target reports that the memory is not available. */
1521 if (res == TARGET_XFER_UNAVAILABLE)
1522 break;
1523
1524 /* Don't continue past targets which have all the memory.
1525 At one time, this code was necessary to read data from
1526 executables / shared libraries when data for the requested
1527 addresses weren't available in the core file. But now the
1528 core target handles this case itself. */
1529 if (ops->has_all_memory ())
1530 break;
1531
1532 ops = ops->beneath ();
1533 }
1534 while (ops != NULL);
1535
1536 /* The cache works at the raw memory level. Make sure the cache
1537 gets updated with raw contents no matter what kind of memory
1538 object was originally being written. Note we do write-through
1539 first, so that if it fails, we don't write to the cache contents
1540 that never made it to the target. */
1541 if (writebuf != NULL
1542 && inferior_ptid != null_ptid
1543 && target_dcache_init_p ()
1544 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1545 {
1546 DCACHE *dcache = target_dcache_get ();
1547
1548 /* Note that writing to an area of memory which wasn't present
1549 in the cache doesn't cause it to be loaded in. */
1550 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1551 }
1552
1553 return res;
1554 }
1555
1556 /* Perform a partial memory transfer.
1557 For docs see target.h, to_xfer_partial. */
1558
1559 static enum target_xfer_status
1560 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1561 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1562 ULONGEST len, ULONGEST *xfered_len)
1563 {
1564 enum target_xfer_status res;
1565 ULONGEST reg_len;
1566 struct mem_region *region;
1567 struct inferior *inf;
1568
1569 /* For accesses to unmapped overlay sections, read directly from
1570 files. Must do this first, as MEMADDR may need adjustment. */
1571 if (readbuf != NULL && overlay_debugging)
1572 {
1573 struct obj_section *section = find_pc_overlay (memaddr);
1574
1575 if (pc_in_unmapped_range (memaddr, section))
1576 {
1577 const target_section_table *table = target_get_section_table (ops);
1578 const char *section_name = section->the_bfd_section->name;
1579
1580 memaddr = overlay_mapped_address (memaddr, section);
1581
1582 auto match_cb = [=] (const struct target_section *s)
1583 {
1584 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1585 };
1586
1587 return section_table_xfer_memory_partial (readbuf, writebuf,
1588 memaddr, len, xfered_len,
1589 *table, match_cb);
1590 }
1591 }
1592
1593 /* Try the executable files, if "trust-readonly-sections" is set. */
1594 if (readbuf != NULL && trust_readonly)
1595 {
1596 const struct target_section *secp
1597 = target_section_by_addr (ops, memaddr);
1598 if (secp != NULL
1599 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1600 {
1601 const target_section_table *table = target_get_section_table (ops);
1602 return section_table_xfer_memory_partial (readbuf, writebuf,
1603 memaddr, len, xfered_len,
1604 *table);
1605 }
1606 }
1607
1608 /* Try GDB's internal data cache. */
1609
1610 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1611 &region))
1612 return TARGET_XFER_E_IO;
1613
1614 if (inferior_ptid != null_ptid)
1615 inf = current_inferior ();
1616 else
1617 inf = NULL;
1618
1619 if (inf != NULL
1620 && readbuf != NULL
1621 /* The dcache reads whole cache lines; that doesn't play well
1622 with reading from a trace buffer, because reading outside of
1623 the collected memory range fails. */
1624 && get_traceframe_number () == -1
1625 && (region->attrib.cache
1626 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1627 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1628 {
1629 DCACHE *dcache = target_dcache_get_or_init ();
1630
1631 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1632 reg_len, xfered_len);
1633 }
1634
1635 /* If none of those methods found the memory we wanted, fall back
1636 to a target partial transfer. Normally a single call to
1637 to_xfer_partial is enough; if it doesn't recognize an object
1638 it will call the to_xfer_partial of the next target down.
1639 But for memory this won't do. Memory is the only target
1640 object which can be read from more than one valid target.
1641 A core file, for instance, could have some of memory but
1642 delegate other bits to the target below it. So, we must
1643 manually try all targets. */
1644
1645 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1646 xfered_len);
1647
1648 /* If we still haven't got anything, return the last error. We
1649 give up. */
1650 return res;
1651 }
1652
1653 /* Perform a partial memory transfer. For docs see target.h,
1654 to_xfer_partial. */
1655
1656 static enum target_xfer_status
1657 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1658 gdb_byte *readbuf, const gdb_byte *writebuf,
1659 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1660 {
1661 enum target_xfer_status res;
1662
1663 /* Zero length requests are ok and require no work. */
1664 if (len == 0)
1665 return TARGET_XFER_EOF;
1666
1667 memaddr = address_significant (target_gdbarch (), memaddr);
1668
1669 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1670 breakpoint insns, thus hiding out from higher layers whether
1671 there are software breakpoints inserted in the code stream. */
1672 if (readbuf != NULL)
1673 {
1674 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1675 xfered_len);
1676
1677 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1678 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1679 }
1680 else
1681 {
1682 /* A large write request is likely to be partially satisfied
1683 by memory_xfer_partial_1. We will continually malloc
1684 and free a copy of the entire write request for breakpoint
1685 shadow handling even though we only end up writing a small
1686 subset of it. Cap writes to a limit specified by the target
1687 to mitigate this. */
1688 len = std::min (ops->get_memory_xfer_limit (), len);
1689
1690 gdb::byte_vector buf (writebuf, writebuf + len);
1691 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1692 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1693 xfered_len);
1694 }
1695
1696 return res;
1697 }
1698
1699 scoped_restore_tmpl<int>
1700 make_scoped_restore_show_memory_breakpoints (int show)
1701 {
1702 return make_scoped_restore (&show_memory_breakpoints, show);
1703 }
1704
1705 /* For docs see target.h, to_xfer_partial. */
1706
1707 enum target_xfer_status
1708 target_xfer_partial (struct target_ops *ops,
1709 enum target_object object, const char *annex,
1710 gdb_byte *readbuf, const gdb_byte *writebuf,
1711 ULONGEST offset, ULONGEST len,
1712 ULONGEST *xfered_len)
1713 {
1714 enum target_xfer_status retval;
1715
1716 /* Transfer is done when LEN is zero. */
1717 if (len == 0)
1718 return TARGET_XFER_EOF;
1719
1720 if (writebuf && !may_write_memory)
1721 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1722 core_addr_to_string_nz (offset), plongest (len));
1723
1724 *xfered_len = 0;
1725
1726 /* If this is a memory transfer, let the memory-specific code
1727 have a look at it instead. Memory transfers are more
1728 complicated. */
1729 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1730 || object == TARGET_OBJECT_CODE_MEMORY)
1731 retval = memory_xfer_partial (ops, object, readbuf,
1732 writebuf, offset, len, xfered_len);
1733 else if (object == TARGET_OBJECT_RAW_MEMORY)
1734 {
1735 /* Skip/avoid accessing the target if the memory region
1736 attributes block the access. Check this here instead of in
1737 raw_memory_xfer_partial as otherwise we'd end up checking
1738 this twice in the case of the memory_xfer_partial path is
1739 taken; once before checking the dcache, and another in the
1740 tail call to raw_memory_xfer_partial. */
1741 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1742 NULL))
1743 return TARGET_XFER_E_IO;
1744
1745 /* Request the normal memory object from other layers. */
1746 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1747 xfered_len);
1748 }
1749 else
1750 retval = ops->xfer_partial (object, annex, readbuf,
1751 writebuf, offset, len, xfered_len);
1752
1753 if (targetdebug)
1754 {
1755 const unsigned char *myaddr = NULL;
1756
1757 gdb_printf (gdb_stdlog,
1758 "%s:target_xfer_partial "
1759 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1760 ops->shortname (),
1761 (int) object,
1762 (annex ? annex : "(null)"),
1763 host_address_to_string (readbuf),
1764 host_address_to_string (writebuf),
1765 core_addr_to_string_nz (offset),
1766 pulongest (len), retval,
1767 pulongest (*xfered_len));
1768
1769 if (readbuf)
1770 myaddr = readbuf;
1771 if (writebuf)
1772 myaddr = writebuf;
1773 if (retval == TARGET_XFER_OK && myaddr != NULL)
1774 {
1775 int i;
1776
1777 gdb_puts (", bytes =", gdb_stdlog);
1778 for (i = 0; i < *xfered_len; i++)
1779 {
1780 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1781 {
1782 if (targetdebug < 2 && i > 0)
1783 {
1784 gdb_printf (gdb_stdlog, " ...");
1785 break;
1786 }
1787 gdb_printf (gdb_stdlog, "\n");
1788 }
1789
1790 gdb_printf (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1791 }
1792 }
1793
1794 gdb_putc ('\n', gdb_stdlog);
1795 }
1796
1797 /* Check implementations of to_xfer_partial update *XFERED_LEN
1798 properly. Do assertion after printing debug messages, so that we
1799 can find more clues on assertion failure from debugging messages. */
1800 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1801 gdb_assert (*xfered_len > 0);
1802
1803 return retval;
1804 }
1805
1806 /* Read LEN bytes of target memory at address MEMADDR, placing the
1807 results in GDB's memory at MYADDR. Returns either 0 for success or
1808 -1 if any error occurs.
1809
1810 If an error occurs, no guarantee is made about the contents of the data at
1811 MYADDR. In particular, the caller should not depend upon partial reads
1812 filling the buffer with good data. There is no way for the caller to know
1813 how much good data might have been transfered anyway. Callers that can
1814 deal with partial reads should call target_read (which will retry until
1815 it makes no progress, and then return how much was transferred). */
1816
1817 int
1818 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1819 {
1820 if (target_read (current_inferior ()->top_target (),
1821 TARGET_OBJECT_MEMORY, NULL,
1822 myaddr, memaddr, len) == len)
1823 return 0;
1824 else
1825 return -1;
1826 }
1827
1828 /* See target/target.h. */
1829
1830 int
1831 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1832 {
1833 gdb_byte buf[4];
1834 int r;
1835
1836 r = target_read_memory (memaddr, buf, sizeof buf);
1837 if (r != 0)
1838 return r;
1839 *result = extract_unsigned_integer (buf, sizeof buf,
1840 gdbarch_byte_order (target_gdbarch ()));
1841 return 0;
1842 }
1843
1844 /* Like target_read_memory, but specify explicitly that this is a read
1845 from the target's raw memory. That is, this read bypasses the
1846 dcache, breakpoint shadowing, etc. */
1847
1848 int
1849 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1850 {
1851 if (target_read (current_inferior ()->top_target (),
1852 TARGET_OBJECT_RAW_MEMORY, NULL,
1853 myaddr, memaddr, len) == len)
1854 return 0;
1855 else
1856 return -1;
1857 }
1858
1859 /* Like target_read_memory, but specify explicitly that this is a read from
1860 the target's stack. This may trigger different cache behavior. */
1861
1862 int
1863 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1864 {
1865 if (target_read (current_inferior ()->top_target (),
1866 TARGET_OBJECT_STACK_MEMORY, NULL,
1867 myaddr, memaddr, len) == len)
1868 return 0;
1869 else
1870 return -1;
1871 }
1872
1873 /* Like target_read_memory, but specify explicitly that this is a read from
1874 the target's code. This may trigger different cache behavior. */
1875
1876 int
1877 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1878 {
1879 if (target_read (current_inferior ()->top_target (),
1880 TARGET_OBJECT_CODE_MEMORY, NULL,
1881 myaddr, memaddr, len) == len)
1882 return 0;
1883 else
1884 return -1;
1885 }
1886
1887 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1888 Returns either 0 for success or -1 if any error occurs. If an
1889 error occurs, no guarantee is made about how much data got written.
1890 Callers that can deal with partial writes should call
1891 target_write. */
1892
1893 int
1894 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1895 {
1896 if (target_write (current_inferior ()->top_target (),
1897 TARGET_OBJECT_MEMORY, NULL,
1898 myaddr, memaddr, len) == len)
1899 return 0;
1900 else
1901 return -1;
1902 }
1903
1904 /* Write LEN bytes from MYADDR to target raw memory at address
1905 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1906 If an error occurs, no guarantee is made about how much data got
1907 written. Callers that can deal with partial writes should call
1908 target_write. */
1909
1910 int
1911 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1912 {
1913 if (target_write (current_inferior ()->top_target (),
1914 TARGET_OBJECT_RAW_MEMORY, NULL,
1915 myaddr, memaddr, len) == len)
1916 return 0;
1917 else
1918 return -1;
1919 }
1920
1921 /* Fetch the target's memory map. */
1922
1923 std::vector<mem_region>
1924 target_memory_map (void)
1925 {
1926 target_ops *target = current_inferior ()->top_target ();
1927 std::vector<mem_region> result = target->memory_map ();
1928 if (result.empty ())
1929 return result;
1930
1931 std::sort (result.begin (), result.end ());
1932
1933 /* Check that regions do not overlap. Simultaneously assign
1934 a numbering for the "mem" commands to use to refer to
1935 each region. */
1936 mem_region *last_one = NULL;
1937 for (size_t ix = 0; ix < result.size (); ix++)
1938 {
1939 mem_region *this_one = &result[ix];
1940 this_one->number = ix;
1941
1942 if (last_one != NULL && last_one->hi > this_one->lo)
1943 {
1944 warning (_("Overlapping regions in memory map: ignoring"));
1945 return std::vector<mem_region> ();
1946 }
1947
1948 last_one = this_one;
1949 }
1950
1951 return result;
1952 }
1953
1954 void
1955 target_flash_erase (ULONGEST address, LONGEST length)
1956 {
1957 current_inferior ()->top_target ()->flash_erase (address, length);
1958 }
1959
1960 void
1961 target_flash_done (void)
1962 {
1963 current_inferior ()->top_target ()->flash_done ();
1964 }
1965
1966 static void
1967 show_trust_readonly (struct ui_file *file, int from_tty,
1968 struct cmd_list_element *c, const char *value)
1969 {
1970 gdb_printf (file,
1971 _("Mode for reading from readonly sections is %s.\n"),
1972 value);
1973 }
1974
1975 /* Target vector read/write partial wrapper functions. */
1976
1977 static enum target_xfer_status
1978 target_read_partial (struct target_ops *ops,
1979 enum target_object object,
1980 const char *annex, gdb_byte *buf,
1981 ULONGEST offset, ULONGEST len,
1982 ULONGEST *xfered_len)
1983 {
1984 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1985 xfered_len);
1986 }
1987
1988 static enum target_xfer_status
1989 target_write_partial (struct target_ops *ops,
1990 enum target_object object,
1991 const char *annex, const gdb_byte *buf,
1992 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1993 {
1994 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1995 xfered_len);
1996 }
1997
1998 /* Wrappers to perform the full transfer. */
1999
2000 /* For docs on target_read see target.h. */
2001
2002 LONGEST
2003 target_read (struct target_ops *ops,
2004 enum target_object object,
2005 const char *annex, gdb_byte *buf,
2006 ULONGEST offset, LONGEST len)
2007 {
2008 LONGEST xfered_total = 0;
2009 int unit_size = 1;
2010
2011 /* If we are reading from a memory object, find the length of an addressable
2012 unit for that architecture. */
2013 if (object == TARGET_OBJECT_MEMORY
2014 || object == TARGET_OBJECT_STACK_MEMORY
2015 || object == TARGET_OBJECT_CODE_MEMORY
2016 || object == TARGET_OBJECT_RAW_MEMORY)
2017 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2018
2019 while (xfered_total < len)
2020 {
2021 ULONGEST xfered_partial;
2022 enum target_xfer_status status;
2023
2024 status = target_read_partial (ops, object, annex,
2025 buf + xfered_total * unit_size,
2026 offset + xfered_total, len - xfered_total,
2027 &xfered_partial);
2028
2029 /* Call an observer, notifying them of the xfer progress? */
2030 if (status == TARGET_XFER_EOF)
2031 return xfered_total;
2032 else if (status == TARGET_XFER_OK)
2033 {
2034 xfered_total += xfered_partial;
2035 QUIT;
2036 }
2037 else
2038 return TARGET_XFER_E_IO;
2039
2040 }
2041 return len;
2042 }
2043
2044 /* Assuming that the entire [begin, end) range of memory cannot be
2045 read, try to read whatever subrange is possible to read.
2046
2047 The function returns, in RESULT, either zero or one memory block.
2048 If there's a readable subrange at the beginning, it is completely
2049 read and returned. Any further readable subrange will not be read.
2050 Otherwise, if there's a readable subrange at the end, it will be
2051 completely read and returned. Any readable subranges before it
2052 (obviously, not starting at the beginning), will be ignored. In
2053 other cases -- either no readable subrange, or readable subrange(s)
2054 that is neither at the beginning, or end, nothing is returned.
2055
2056 The purpose of this function is to handle a read across a boundary
2057 of accessible memory in a case when memory map is not available.
2058 The above restrictions are fine for this case, but will give
2059 incorrect results if the memory is 'patchy'. However, supporting
2060 'patchy' memory would require trying to read every single byte,
2061 and it seems unacceptable solution. Explicit memory map is
2062 recommended for this case -- and target_read_memory_robust will
2063 take care of reading multiple ranges then. */
2064
2065 static void
2066 read_whatever_is_readable (struct target_ops *ops,
2067 const ULONGEST begin, const ULONGEST end,
2068 int unit_size,
2069 std::vector<memory_read_result> *result)
2070 {
2071 ULONGEST current_begin = begin;
2072 ULONGEST current_end = end;
2073 int forward;
2074 ULONGEST xfered_len;
2075
2076 /* If we previously failed to read 1 byte, nothing can be done here. */
2077 if (end - begin <= 1)
2078 return;
2079
2080 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2081
2082 /* Check that either first or the last byte is readable, and give up
2083 if not. This heuristic is meant to permit reading accessible memory
2084 at the boundary of accessible region. */
2085 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2086 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2087 {
2088 forward = 1;
2089 ++current_begin;
2090 }
2091 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2092 buf.get () + (end - begin) - 1, end - 1, 1,
2093 &xfered_len) == TARGET_XFER_OK)
2094 {
2095 forward = 0;
2096 --current_end;
2097 }
2098 else
2099 return;
2100
2101 /* Loop invariant is that the [current_begin, current_end) was previously
2102 found to be not readable as a whole.
2103
2104 Note loop condition -- if the range has 1 byte, we can't divide the range
2105 so there's no point trying further. */
2106 while (current_end - current_begin > 1)
2107 {
2108 ULONGEST first_half_begin, first_half_end;
2109 ULONGEST second_half_begin, second_half_end;
2110 LONGEST xfer;
2111 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2112
2113 if (forward)
2114 {
2115 first_half_begin = current_begin;
2116 first_half_end = middle;
2117 second_half_begin = middle;
2118 second_half_end = current_end;
2119 }
2120 else
2121 {
2122 first_half_begin = middle;
2123 first_half_end = current_end;
2124 second_half_begin = current_begin;
2125 second_half_end = middle;
2126 }
2127
2128 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2129 buf.get () + (first_half_begin - begin) * unit_size,
2130 first_half_begin,
2131 first_half_end - first_half_begin);
2132
2133 if (xfer == first_half_end - first_half_begin)
2134 {
2135 /* This half reads up fine. So, the error must be in the
2136 other half. */
2137 current_begin = second_half_begin;
2138 current_end = second_half_end;
2139 }
2140 else
2141 {
2142 /* This half is not readable. Because we've tried one byte, we
2143 know some part of this half if actually readable. Go to the next
2144 iteration to divide again and try to read.
2145
2146 We don't handle the other half, because this function only tries
2147 to read a single readable subrange. */
2148 current_begin = first_half_begin;
2149 current_end = first_half_end;
2150 }
2151 }
2152
2153 if (forward)
2154 {
2155 /* The [begin, current_begin) range has been read. */
2156 result->emplace_back (begin, current_end, std::move (buf));
2157 }
2158 else
2159 {
2160 /* The [current_end, end) range has been read. */
2161 LONGEST region_len = end - current_end;
2162
2163 gdb::unique_xmalloc_ptr<gdb_byte> data
2164 ((gdb_byte *) xmalloc (region_len * unit_size));
2165 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2166 region_len * unit_size);
2167 result->emplace_back (current_end, end, std::move (data));
2168 }
2169 }
2170
2171 std::vector<memory_read_result>
2172 read_memory_robust (struct target_ops *ops,
2173 const ULONGEST offset, const LONGEST len)
2174 {
2175 std::vector<memory_read_result> result;
2176 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2177
2178 LONGEST xfered_total = 0;
2179 while (xfered_total < len)
2180 {
2181 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2182 LONGEST region_len;
2183
2184 /* If there is no explicit region, a fake one should be created. */
2185 gdb_assert (region);
2186
2187 if (region->hi == 0)
2188 region_len = len - xfered_total;
2189 else
2190 region_len = region->hi - offset;
2191
2192 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2193 {
2194 /* Cannot read this region. Note that we can end up here only
2195 if the region is explicitly marked inaccessible, or
2196 'inaccessible-by-default' is in effect. */
2197 xfered_total += region_len;
2198 }
2199 else
2200 {
2201 LONGEST to_read = std::min (len - xfered_total, region_len);
2202 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2203 ((gdb_byte *) xmalloc (to_read * unit_size));
2204
2205 LONGEST xfered_partial =
2206 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2207 offset + xfered_total, to_read);
2208 /* Call an observer, notifying them of the xfer progress? */
2209 if (xfered_partial <= 0)
2210 {
2211 /* Got an error reading full chunk. See if maybe we can read
2212 some subrange. */
2213 read_whatever_is_readable (ops, offset + xfered_total,
2214 offset + xfered_total + to_read,
2215 unit_size, &result);
2216 xfered_total += to_read;
2217 }
2218 else
2219 {
2220 result.emplace_back (offset + xfered_total,
2221 offset + xfered_total + xfered_partial,
2222 std::move (buffer));
2223 xfered_total += xfered_partial;
2224 }
2225 QUIT;
2226 }
2227 }
2228
2229 return result;
2230 }
2231
2232
2233 /* An alternative to target_write with progress callbacks. */
2234
2235 LONGEST
2236 target_write_with_progress (struct target_ops *ops,
2237 enum target_object object,
2238 const char *annex, const gdb_byte *buf,
2239 ULONGEST offset, LONGEST len,
2240 void (*progress) (ULONGEST, void *), void *baton)
2241 {
2242 LONGEST xfered_total = 0;
2243 int unit_size = 1;
2244
2245 /* If we are writing to a memory object, find the length of an addressable
2246 unit for that architecture. */
2247 if (object == TARGET_OBJECT_MEMORY
2248 || object == TARGET_OBJECT_STACK_MEMORY
2249 || object == TARGET_OBJECT_CODE_MEMORY
2250 || object == TARGET_OBJECT_RAW_MEMORY)
2251 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2252
2253 /* Give the progress callback a chance to set up. */
2254 if (progress)
2255 (*progress) (0, baton);
2256
2257 while (xfered_total < len)
2258 {
2259 ULONGEST xfered_partial;
2260 enum target_xfer_status status;
2261
2262 status = target_write_partial (ops, object, annex,
2263 buf + xfered_total * unit_size,
2264 offset + xfered_total, len - xfered_total,
2265 &xfered_partial);
2266
2267 if (status != TARGET_XFER_OK)
2268 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2269
2270 if (progress)
2271 (*progress) (xfered_partial, baton);
2272
2273 xfered_total += xfered_partial;
2274 QUIT;
2275 }
2276 return len;
2277 }
2278
2279 /* For docs on target_write see target.h. */
2280
2281 LONGEST
2282 target_write (struct target_ops *ops,
2283 enum target_object object,
2284 const char *annex, const gdb_byte *buf,
2285 ULONGEST offset, LONGEST len)
2286 {
2287 return target_write_with_progress (ops, object, annex, buf, offset, len,
2288 NULL, NULL);
2289 }
2290
2291 /* Help for target_read_alloc and target_read_stralloc. See their comments
2292 for details. */
2293
2294 template <typename T>
2295 gdb::optional<gdb::def_vector<T>>
2296 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2297 const char *annex)
2298 {
2299 gdb::def_vector<T> buf;
2300 size_t buf_pos = 0;
2301 const int chunk = 4096;
2302
2303 /* This function does not have a length parameter; it reads the
2304 entire OBJECT). Also, it doesn't support objects fetched partly
2305 from one target and partly from another (in a different stratum,
2306 e.g. a core file and an executable). Both reasons make it
2307 unsuitable for reading memory. */
2308 gdb_assert (object != TARGET_OBJECT_MEMORY);
2309
2310 /* Start by reading up to 4K at a time. The target will throttle
2311 this number down if necessary. */
2312 while (1)
2313 {
2314 ULONGEST xfered_len;
2315 enum target_xfer_status status;
2316
2317 buf.resize (buf_pos + chunk);
2318
2319 status = target_read_partial (ops, object, annex,
2320 (gdb_byte *) &buf[buf_pos],
2321 buf_pos, chunk,
2322 &xfered_len);
2323
2324 if (status == TARGET_XFER_EOF)
2325 {
2326 /* Read all there was. */
2327 buf.resize (buf_pos);
2328 return buf;
2329 }
2330 else if (status != TARGET_XFER_OK)
2331 {
2332 /* An error occurred. */
2333 return {};
2334 }
2335
2336 buf_pos += xfered_len;
2337
2338 QUIT;
2339 }
2340 }
2341
2342 /* See target.h */
2343
2344 gdb::optional<gdb::byte_vector>
2345 target_read_alloc (struct target_ops *ops, enum target_object object,
2346 const char *annex)
2347 {
2348 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2349 }
2350
2351 /* See target.h. */
2352
2353 gdb::optional<gdb::char_vector>
2354 target_read_stralloc (struct target_ops *ops, enum target_object object,
2355 const char *annex)
2356 {
2357 gdb::optional<gdb::char_vector> buf
2358 = target_read_alloc_1<char> (ops, object, annex);
2359
2360 if (!buf)
2361 return {};
2362
2363 if (buf->empty () || buf->back () != '\0')
2364 buf->push_back ('\0');
2365
2366 /* Check for embedded NUL bytes; but allow trailing NULs. */
2367 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2368 it != buf->end (); it++)
2369 if (*it != '\0')
2370 {
2371 warning (_("target object %d, annex %s, "
2372 "contained unexpected null characters"),
2373 (int) object, annex ? annex : "(none)");
2374 break;
2375 }
2376
2377 return buf;
2378 }
2379
2380 /* Memory transfer methods. */
2381
2382 void
2383 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2384 LONGEST len)
2385 {
2386 /* This method is used to read from an alternate, non-current
2387 target. This read must bypass the overlay support (as symbols
2388 don't match this target), and GDB's internal cache (wrong cache
2389 for this target). */
2390 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2391 != len)
2392 memory_error (TARGET_XFER_E_IO, addr);
2393 }
2394
2395 ULONGEST
2396 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2397 int len, enum bfd_endian byte_order)
2398 {
2399 gdb_byte buf[sizeof (ULONGEST)];
2400
2401 gdb_assert (len <= sizeof (buf));
2402 get_target_memory (ops, addr, buf, len);
2403 return extract_unsigned_integer (buf, len, byte_order);
2404 }
2405
2406 /* See target.h. */
2407
2408 int
2409 target_insert_breakpoint (struct gdbarch *gdbarch,
2410 struct bp_target_info *bp_tgt)
2411 {
2412 if (!may_insert_breakpoints)
2413 {
2414 warning (_("May not insert breakpoints"));
2415 return 1;
2416 }
2417
2418 target_ops *target = current_inferior ()->top_target ();
2419
2420 return target->insert_breakpoint (gdbarch, bp_tgt);
2421 }
2422
2423 /* See target.h. */
2424
2425 int
2426 target_remove_breakpoint (struct gdbarch *gdbarch,
2427 struct bp_target_info *bp_tgt,
2428 enum remove_bp_reason reason)
2429 {
2430 /* This is kind of a weird case to handle, but the permission might
2431 have been changed after breakpoints were inserted - in which case
2432 we should just take the user literally and assume that any
2433 breakpoints should be left in place. */
2434 if (!may_insert_breakpoints)
2435 {
2436 warning (_("May not remove breakpoints"));
2437 return 1;
2438 }
2439
2440 target_ops *target = current_inferior ()->top_target ();
2441
2442 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2443 }
2444
2445 static void
2446 info_target_command (const char *args, int from_tty)
2447 {
2448 int has_all_mem = 0;
2449
2450 if (current_program_space->symfile_object_file != NULL)
2451 {
2452 objfile *objf = current_program_space->symfile_object_file;
2453 gdb_printf (_("Symbols from \"%s\".\n"),
2454 objfile_name (objf));
2455 }
2456
2457 for (target_ops *t = current_inferior ()->top_target ();
2458 t != NULL;
2459 t = t->beneath ())
2460 {
2461 if (!t->has_memory ())
2462 continue;
2463
2464 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2465 continue;
2466 if (has_all_mem)
2467 gdb_printf (_("\tWhile running this, "
2468 "GDB does not access memory from...\n"));
2469 gdb_printf ("%s:\n", t->longname ());
2470 t->files_info ();
2471 has_all_mem = t->has_all_memory ();
2472 }
2473 }
2474
2475 /* This function is called before any new inferior is created, e.g.
2476 by running a program, attaching, or connecting to a target.
2477 It cleans up any state from previous invocations which might
2478 change between runs. This is a subset of what target_preopen
2479 resets (things which might change between targets). */
2480
2481 void
2482 target_pre_inferior (int from_tty)
2483 {
2484 /* Clear out solib state. Otherwise the solib state of the previous
2485 inferior might have survived and is entirely wrong for the new
2486 target. This has been observed on GNU/Linux using glibc 2.3. How
2487 to reproduce:
2488
2489 bash$ ./foo&
2490 [1] 4711
2491 bash$ ./foo&
2492 [1] 4712
2493 bash$ gdb ./foo
2494 [...]
2495 (gdb) attach 4711
2496 (gdb) detach
2497 (gdb) attach 4712
2498 Cannot access memory at address 0xdeadbeef
2499 */
2500
2501 /* In some OSs, the shared library list is the same/global/shared
2502 across inferiors. If code is shared between processes, so are
2503 memory regions and features. */
2504 if (!gdbarch_has_global_solist (target_gdbarch ()))
2505 {
2506 no_shared_libraries (NULL, from_tty);
2507
2508 invalidate_target_mem_regions ();
2509
2510 target_clear_description ();
2511 }
2512
2513 /* attach_flag may be set if the previous process associated with
2514 the inferior was attached to. */
2515 current_inferior ()->attach_flag = 0;
2516
2517 current_inferior ()->highest_thread_num = 0;
2518
2519 agent_capability_invalidate ();
2520 }
2521
2522 /* This is to be called by the open routine before it does
2523 anything. */
2524
2525 void
2526 target_preopen (int from_tty)
2527 {
2528 dont_repeat ();
2529
2530 if (current_inferior ()->pid != 0)
2531 {
2532 if (!from_tty
2533 || !target_has_execution ()
2534 || query (_("A program is being debugged already. Kill it? ")))
2535 {
2536 /* Core inferiors actually should be detached, not
2537 killed. */
2538 if (target_has_execution ())
2539 target_kill ();
2540 else
2541 target_detach (current_inferior (), 0);
2542 }
2543 else
2544 error (_("Program not killed."));
2545 }
2546
2547 /* Calling target_kill may remove the target from the stack. But if
2548 it doesn't (which seems like a win for UDI), remove it now. */
2549 /* Leave the exec target, though. The user may be switching from a
2550 live process to a core of the same program. */
2551 pop_all_targets_above (file_stratum);
2552
2553 target_pre_inferior (from_tty);
2554 }
2555
2556 /* See target.h. */
2557
2558 void
2559 target_detach (inferior *inf, int from_tty)
2560 {
2561 /* After we have detached, we will clear the register cache for this inferior
2562 by calling registers_changed_ptid. We must save the pid_ptid before
2563 detaching, as the target detach method will clear inf->pid. */
2564 ptid_t save_pid_ptid = ptid_t (inf->pid);
2565
2566 /* As long as some to_detach implementations rely on the current_inferior
2567 (either directly, or indirectly, like through target_gdbarch or by
2568 reading memory), INF needs to be the current inferior. When that
2569 requirement will become no longer true, then we can remove this
2570 assertion. */
2571 gdb_assert (inf == current_inferior ());
2572
2573 prepare_for_detach ();
2574
2575 /* Hold a strong reference because detaching may unpush the
2576 target. */
2577 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2578
2579 current_inferior ()->top_target ()->detach (inf, from_tty);
2580
2581 process_stratum_target *proc_target
2582 = as_process_stratum_target (proc_target_ref.get ());
2583
2584 registers_changed_ptid (proc_target, save_pid_ptid);
2585
2586 /* We have to ensure we have no frame cache left. Normally,
2587 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2588 inferior_ptid matches save_pid_ptid, but in our case, it does not
2589 call it, as inferior_ptid has been reset. */
2590 reinit_frame_cache ();
2591 }
2592
2593 void
2594 target_disconnect (const char *args, int from_tty)
2595 {
2596 /* If we're in breakpoints-always-inserted mode or if breakpoints
2597 are global across processes, we have to remove them before
2598 disconnecting. */
2599 remove_breakpoints ();
2600
2601 current_inferior ()->top_target ()->disconnect (args, from_tty);
2602 }
2603
2604 /* See target/target.h. */
2605
2606 ptid_t
2607 target_wait (ptid_t ptid, struct target_waitstatus *status,
2608 target_wait_flags options)
2609 {
2610 target_ops *target = current_inferior ()->top_target ();
2611 process_stratum_target *proc_target = current_inferior ()->process_target ();
2612
2613 gdb_assert (!proc_target->commit_resumed_state);
2614
2615 if (!target_can_async_p (target))
2616 gdb_assert ((options & TARGET_WNOHANG) == 0);
2617
2618 try
2619 {
2620 gdb::observers::target_pre_wait.notify (ptid);
2621 ptid_t event_ptid = target->wait (ptid, status, options);
2622 gdb::observers::target_post_wait.notify (event_ptid);
2623 return event_ptid;
2624 }
2625 catch (...)
2626 {
2627 gdb::observers::target_post_wait.notify (null_ptid);
2628 throw;
2629 }
2630 }
2631
2632 /* See target.h. */
2633
2634 ptid_t
2635 default_target_wait (struct target_ops *ops,
2636 ptid_t ptid, struct target_waitstatus *status,
2637 target_wait_flags options)
2638 {
2639 status->set_ignore ();
2640 return minus_one_ptid;
2641 }
2642
2643 std::string
2644 target_pid_to_str (ptid_t ptid)
2645 {
2646 return current_inferior ()->top_target ()->pid_to_str (ptid);
2647 }
2648
2649 const char *
2650 target_thread_name (struct thread_info *info)
2651 {
2652 gdb_assert (info->inf == current_inferior ());
2653
2654 return current_inferior ()->top_target ()->thread_name (info);
2655 }
2656
2657 struct thread_info *
2658 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2659 int handle_len,
2660 struct inferior *inf)
2661 {
2662 target_ops *target = current_inferior ()->top_target ();
2663
2664 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2665 }
2666
2667 /* See target.h. */
2668
2669 gdb::byte_vector
2670 target_thread_info_to_thread_handle (struct thread_info *tip)
2671 {
2672 target_ops *target = current_inferior ()->top_target ();
2673
2674 return target->thread_info_to_thread_handle (tip);
2675 }
2676
2677 void
2678 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2679 {
2680 process_stratum_target *curr_target = current_inferior ()->process_target ();
2681 gdb_assert (!curr_target->commit_resumed_state);
2682
2683 target_dcache_invalidate ();
2684
2685 current_inferior ()->top_target ()->resume (ptid, step, signal);
2686
2687 registers_changed_ptid (curr_target, ptid);
2688 /* We only set the internal executing state here. The user/frontend
2689 running state is set at a higher level. This also clears the
2690 thread's stop_pc as side effect. */
2691 set_executing (curr_target, ptid, true);
2692 clear_inline_frame_state (curr_target, ptid);
2693
2694 if (target_can_async_p ())
2695 target_async (1);
2696 }
2697
2698 /* See target.h. */
2699
2700 void
2701 target_commit_resumed ()
2702 {
2703 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2704 current_inferior ()->top_target ()->commit_resumed ();
2705 }
2706
2707 /* See target.h. */
2708
2709 bool
2710 target_has_pending_events ()
2711 {
2712 return current_inferior ()->top_target ()->has_pending_events ();
2713 }
2714
2715 void
2716 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2717 {
2718 current_inferior ()->top_target ()->pass_signals (pass_signals);
2719 }
2720
2721 void
2722 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2723 {
2724 current_inferior ()->top_target ()->program_signals (program_signals);
2725 }
2726
2727 static void
2728 default_follow_fork (struct target_ops *self, inferior *child_inf,
2729 ptid_t child_ptid, target_waitkind fork_kind,
2730 bool follow_child, bool detach_fork)
2731 {
2732 /* Some target returned a fork event, but did not know how to follow it. */
2733 internal_error (__FILE__, __LINE__,
2734 _("could not find a target to follow fork"));
2735 }
2736
2737 /* See target.h. */
2738
2739 void
2740 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2741 target_waitkind fork_kind, bool follow_child,
2742 bool detach_fork)
2743 {
2744 target_ops *target = current_inferior ()->top_target ();
2745
2746 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2747 DETACH_FORK. */
2748 if (child_inf != nullptr)
2749 {
2750 gdb_assert (follow_child || !detach_fork);
2751 gdb_assert (child_inf->pid == child_ptid.pid ());
2752 }
2753 else
2754 gdb_assert (!follow_child && detach_fork);
2755
2756 return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2757 detach_fork);
2758 }
2759
2760 /* See target.h. */
2761
2762 void
2763 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2764 const char *execd_pathname)
2765 {
2766 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2767 execd_pathname);
2768 }
2769
2770 static void
2771 default_mourn_inferior (struct target_ops *self)
2772 {
2773 internal_error (__FILE__, __LINE__,
2774 _("could not find a target to follow mourn inferior"));
2775 }
2776
2777 void
2778 target_mourn_inferior (ptid_t ptid)
2779 {
2780 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2781 current_inferior ()->top_target ()->mourn_inferior ();
2782
2783 /* We no longer need to keep handles on any of the object files.
2784 Make sure to release them to avoid unnecessarily locking any
2785 of them while we're not actually debugging. */
2786 bfd_cache_close_all ();
2787 }
2788
2789 /* Look for a target which can describe architectural features, starting
2790 from TARGET. If we find one, return its description. */
2791
2792 const struct target_desc *
2793 target_read_description (struct target_ops *target)
2794 {
2795 return target->read_description ();
2796 }
2797
2798
2799 /* Default implementation of memory-searching. */
2800
2801 static int
2802 default_search_memory (struct target_ops *self,
2803 CORE_ADDR start_addr, ULONGEST search_space_len,
2804 const gdb_byte *pattern, ULONGEST pattern_len,
2805 CORE_ADDR *found_addrp)
2806 {
2807 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2808 {
2809 return target_read (current_inferior ()->top_target (),
2810 TARGET_OBJECT_MEMORY, NULL,
2811 result, addr, len) == len;
2812 };
2813
2814 /* Start over from the top of the target stack. */
2815 return simple_search_memory (read_memory, start_addr, search_space_len,
2816 pattern, pattern_len, found_addrp);
2817 }
2818
2819 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2820 sequence of bytes in PATTERN with length PATTERN_LEN.
2821
2822 The result is 1 if found, 0 if not found, and -1 if there was an error
2823 requiring halting of the search (e.g. memory read error).
2824 If the pattern is found the address is recorded in FOUND_ADDRP. */
2825
2826 int
2827 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2828 const gdb_byte *pattern, ULONGEST pattern_len,
2829 CORE_ADDR *found_addrp)
2830 {
2831 target_ops *target = current_inferior ()->top_target ();
2832
2833 return target->search_memory (start_addr, search_space_len, pattern,
2834 pattern_len, found_addrp);
2835 }
2836
2837 /* Look through the currently pushed targets. If none of them will
2838 be able to restart the currently running process, issue an error
2839 message. */
2840
2841 void
2842 target_require_runnable (void)
2843 {
2844 for (target_ops *t = current_inferior ()->top_target ();
2845 t != NULL;
2846 t = t->beneath ())
2847 {
2848 /* If this target knows how to create a new program, then
2849 assume we will still be able to after killing the current
2850 one. Either killing and mourning will not pop T, or else
2851 find_default_run_target will find it again. */
2852 if (t->can_create_inferior ())
2853 return;
2854
2855 /* Do not worry about targets at certain strata that can not
2856 create inferiors. Assume they will be pushed again if
2857 necessary, and continue to the process_stratum. */
2858 if (t->stratum () > process_stratum)
2859 continue;
2860
2861 error (_("The \"%s\" target does not support \"run\". "
2862 "Try \"help target\" or \"continue\"."),
2863 t->shortname ());
2864 }
2865
2866 /* This function is only called if the target is running. In that
2867 case there should have been a process_stratum target and it
2868 should either know how to create inferiors, or not... */
2869 internal_error (__FILE__, __LINE__, _("No targets found"));
2870 }
2871
2872 /* Whether GDB is allowed to fall back to the default run target for
2873 "run", "attach", etc. when no target is connected yet. */
2874 static bool auto_connect_native_target = true;
2875
2876 static void
2877 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2878 struct cmd_list_element *c, const char *value)
2879 {
2880 gdb_printf (file,
2881 _("Whether GDB may automatically connect to the "
2882 "native target is %s.\n"),
2883 value);
2884 }
2885
2886 /* A pointer to the target that can respond to "run" or "attach".
2887 Native targets are always singletons and instantiated early at GDB
2888 startup. */
2889 static target_ops *the_native_target;
2890
2891 /* See target.h. */
2892
2893 void
2894 set_native_target (target_ops *target)
2895 {
2896 if (the_native_target != NULL)
2897 internal_error (__FILE__, __LINE__,
2898 _("native target already set (\"%s\")."),
2899 the_native_target->longname ());
2900
2901 the_native_target = target;
2902 }
2903
2904 /* See target.h. */
2905
2906 target_ops *
2907 get_native_target ()
2908 {
2909 return the_native_target;
2910 }
2911
2912 /* Look through the list of possible targets for a target that can
2913 execute a run or attach command without any other data. This is
2914 used to locate the default process stratum.
2915
2916 If DO_MESG is not NULL, the result is always valid (error() is
2917 called for errors); else, return NULL on error. */
2918
2919 static struct target_ops *
2920 find_default_run_target (const char *do_mesg)
2921 {
2922 if (auto_connect_native_target && the_native_target != NULL)
2923 return the_native_target;
2924
2925 if (do_mesg != NULL)
2926 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2927 return NULL;
2928 }
2929
2930 /* See target.h. */
2931
2932 struct target_ops *
2933 find_attach_target (void)
2934 {
2935 /* If a target on the current stack can attach, use it. */
2936 for (target_ops *t = current_inferior ()->top_target ();
2937 t != NULL;
2938 t = t->beneath ())
2939 {
2940 if (t->can_attach ())
2941 return t;
2942 }
2943
2944 /* Otherwise, use the default run target for attaching. */
2945 return find_default_run_target ("attach");
2946 }
2947
2948 /* See target.h. */
2949
2950 struct target_ops *
2951 find_run_target (void)
2952 {
2953 /* If a target on the current stack can run, use it. */
2954 for (target_ops *t = current_inferior ()->top_target ();
2955 t != NULL;
2956 t = t->beneath ())
2957 {
2958 if (t->can_create_inferior ())
2959 return t;
2960 }
2961
2962 /* Otherwise, use the default run target. */
2963 return find_default_run_target ("run");
2964 }
2965
2966 bool
2967 target_ops::info_proc (const char *args, enum info_proc_what what)
2968 {
2969 return false;
2970 }
2971
2972 /* Implement the "info proc" command. */
2973
2974 int
2975 target_info_proc (const char *args, enum info_proc_what what)
2976 {
2977 struct target_ops *t;
2978
2979 /* If we're already connected to something that can get us OS
2980 related data, use it. Otherwise, try using the native
2981 target. */
2982 t = find_target_at (process_stratum);
2983 if (t == NULL)
2984 t = find_default_run_target (NULL);
2985
2986 for (; t != NULL; t = t->beneath ())
2987 {
2988 if (t->info_proc (args, what))
2989 {
2990 if (targetdebug)
2991 gdb_printf (gdb_stdlog,
2992 "target_info_proc (\"%s\", %d)\n", args, what);
2993
2994 return 1;
2995 }
2996 }
2997
2998 return 0;
2999 }
3000
3001 static int
3002 find_default_supports_disable_randomization (struct target_ops *self)
3003 {
3004 struct target_ops *t;
3005
3006 t = find_default_run_target (NULL);
3007 if (t != NULL)
3008 return t->supports_disable_randomization ();
3009 return 0;
3010 }
3011
3012 int
3013 target_supports_disable_randomization (void)
3014 {
3015 return current_inferior ()->top_target ()->supports_disable_randomization ();
3016 }
3017
3018 /* See target/target.h. */
3019
3020 int
3021 target_supports_multi_process (void)
3022 {
3023 return current_inferior ()->top_target ()->supports_multi_process ();
3024 }
3025
3026 /* See target.h. */
3027
3028 gdb::optional<gdb::char_vector>
3029 target_get_osdata (const char *type)
3030 {
3031 struct target_ops *t;
3032
3033 /* If we're already connected to something that can get us OS
3034 related data, use it. Otherwise, try using the native
3035 target. */
3036 t = find_target_at (process_stratum);
3037 if (t == NULL)
3038 t = find_default_run_target ("get OS data");
3039
3040 if (!t)
3041 return {};
3042
3043 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3044 }
3045
3046 /* Determine the current address space of thread PTID. */
3047
3048 struct address_space *
3049 target_thread_address_space (ptid_t ptid)
3050 {
3051 struct address_space *aspace;
3052
3053 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3054 gdb_assert (aspace != NULL);
3055
3056 return aspace;
3057 }
3058
3059 /* See target.h. */
3060
3061 target_ops *
3062 target_ops::beneath () const
3063 {
3064 return current_inferior ()->find_target_beneath (this);
3065 }
3066
3067 void
3068 target_ops::close ()
3069 {
3070 }
3071
3072 bool
3073 target_ops::can_attach ()
3074 {
3075 return 0;
3076 }
3077
3078 void
3079 target_ops::attach (const char *, int)
3080 {
3081 gdb_assert_not_reached ("target_ops::attach called");
3082 }
3083
3084 bool
3085 target_ops::can_create_inferior ()
3086 {
3087 return 0;
3088 }
3089
3090 void
3091 target_ops::create_inferior (const char *, const std::string &,
3092 char **, int)
3093 {
3094 gdb_assert_not_reached ("target_ops::create_inferior called");
3095 }
3096
3097 bool
3098 target_ops::can_run ()
3099 {
3100 return false;
3101 }
3102
3103 int
3104 target_can_run ()
3105 {
3106 for (target_ops *t = current_inferior ()->top_target ();
3107 t != NULL;
3108 t = t->beneath ())
3109 {
3110 if (t->can_run ())
3111 return 1;
3112 }
3113
3114 return 0;
3115 }
3116
3117 /* Target file operations. */
3118
3119 static struct target_ops *
3120 default_fileio_target (void)
3121 {
3122 struct target_ops *t;
3123
3124 /* If we're already connected to something that can perform
3125 file I/O, use it. Otherwise, try using the native target. */
3126 t = find_target_at (process_stratum);
3127 if (t != NULL)
3128 return t;
3129 return find_default_run_target ("file I/O");
3130 }
3131
3132 /* File handle for target file operations. */
3133
3134 struct fileio_fh_t
3135 {
3136 /* The target on which this file is open. NULL if the target is
3137 meanwhile closed while the handle is open. */
3138 target_ops *target;
3139
3140 /* The file descriptor on the target. */
3141 int target_fd;
3142
3143 /* Check whether this fileio_fh_t represents a closed file. */
3144 bool is_closed ()
3145 {
3146 return target_fd < 0;
3147 }
3148 };
3149
3150 /* Vector of currently open file handles. The value returned by
3151 target_fileio_open and passed as the FD argument to other
3152 target_fileio_* functions is an index into this vector. This
3153 vector's entries are never freed; instead, files are marked as
3154 closed, and the handle becomes available for reuse. */
3155 static std::vector<fileio_fh_t> fileio_fhandles;
3156
3157 /* Index into fileio_fhandles of the lowest handle that might be
3158 closed. This permits handle reuse without searching the whole
3159 list each time a new file is opened. */
3160 static int lowest_closed_fd;
3161
3162 /* See target.h. */
3163
3164 void
3165 fileio_handles_invalidate_target (target_ops *targ)
3166 {
3167 for (fileio_fh_t &fh : fileio_fhandles)
3168 if (fh.target == targ)
3169 fh.target = NULL;
3170 }
3171
3172 /* Acquire a target fileio file descriptor. */
3173
3174 static int
3175 acquire_fileio_fd (target_ops *target, int target_fd)
3176 {
3177 /* Search for closed handles to reuse. */
3178 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3179 {
3180 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3181
3182 if (fh.is_closed ())
3183 break;
3184 }
3185
3186 /* Push a new handle if no closed handles were found. */
3187 if (lowest_closed_fd == fileio_fhandles.size ())
3188 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3189 else
3190 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3191
3192 /* Should no longer be marked closed. */
3193 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3194
3195 /* Return its index, and start the next lookup at
3196 the next index. */
3197 return lowest_closed_fd++;
3198 }
3199
3200 /* Release a target fileio file descriptor. */
3201
3202 static void
3203 release_fileio_fd (int fd, fileio_fh_t *fh)
3204 {
3205 fh->target_fd = -1;
3206 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3207 }
3208
3209 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3210
3211 static fileio_fh_t *
3212 fileio_fd_to_fh (int fd)
3213 {
3214 return &fileio_fhandles[fd];
3215 }
3216
3217
3218 /* Default implementations of file i/o methods. We don't want these
3219 to delegate automatically, because we need to know which target
3220 supported the method, in order to call it directly from within
3221 pread/pwrite, etc. */
3222
3223 int
3224 target_ops::fileio_open (struct inferior *inf, const char *filename,
3225 int flags, int mode, int warn_if_slow,
3226 int *target_errno)
3227 {
3228 *target_errno = FILEIO_ENOSYS;
3229 return -1;
3230 }
3231
3232 int
3233 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3234 ULONGEST offset, int *target_errno)
3235 {
3236 *target_errno = FILEIO_ENOSYS;
3237 return -1;
3238 }
3239
3240 int
3241 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3242 ULONGEST offset, int *target_errno)
3243 {
3244 *target_errno = FILEIO_ENOSYS;
3245 return -1;
3246 }
3247
3248 int
3249 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3250 {
3251 *target_errno = FILEIO_ENOSYS;
3252 return -1;
3253 }
3254
3255 int
3256 target_ops::fileio_close (int fd, int *target_errno)
3257 {
3258 *target_errno = FILEIO_ENOSYS;
3259 return -1;
3260 }
3261
3262 int
3263 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3264 int *target_errno)
3265 {
3266 *target_errno = FILEIO_ENOSYS;
3267 return -1;
3268 }
3269
3270 gdb::optional<std::string>
3271 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3272 int *target_errno)
3273 {
3274 *target_errno = FILEIO_ENOSYS;
3275 return {};
3276 }
3277
3278 /* See target.h. */
3279
3280 int
3281 target_fileio_open (struct inferior *inf, const char *filename,
3282 int flags, int mode, bool warn_if_slow, int *target_errno)
3283 {
3284 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3285 {
3286 int fd = t->fileio_open (inf, filename, flags, mode,
3287 warn_if_slow, target_errno);
3288
3289 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3290 continue;
3291
3292 if (fd < 0)
3293 fd = -1;
3294 else
3295 fd = acquire_fileio_fd (t, fd);
3296
3297 if (targetdebug)
3298 gdb_printf (gdb_stdlog,
3299 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3300 " = %d (%d)\n",
3301 inf == NULL ? 0 : inf->num,
3302 filename, flags, mode,
3303 warn_if_slow, fd,
3304 fd != -1 ? 0 : *target_errno);
3305 return fd;
3306 }
3307
3308 *target_errno = FILEIO_ENOSYS;
3309 return -1;
3310 }
3311
3312 /* See target.h. */
3313
3314 int
3315 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3316 ULONGEST offset, int *target_errno)
3317 {
3318 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3319 int ret = -1;
3320
3321 if (fh->is_closed ())
3322 *target_errno = EBADF;
3323 else if (fh->target == NULL)
3324 *target_errno = EIO;
3325 else
3326 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3327 len, offset, target_errno);
3328
3329 if (targetdebug)
3330 gdb_printf (gdb_stdlog,
3331 "target_fileio_pwrite (%d,...,%d,%s) "
3332 "= %d (%d)\n",
3333 fd, len, pulongest (offset),
3334 ret, ret != -1 ? 0 : *target_errno);
3335 return ret;
3336 }
3337
3338 /* See target.h. */
3339
3340 int
3341 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3342 ULONGEST offset, int *target_errno)
3343 {
3344 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3345 int ret = -1;
3346
3347 if (fh->is_closed ())
3348 *target_errno = EBADF;
3349 else if (fh->target == NULL)
3350 *target_errno = EIO;
3351 else
3352 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3353 len, offset, target_errno);
3354
3355 if (targetdebug)
3356 gdb_printf (gdb_stdlog,
3357 "target_fileio_pread (%d,...,%d,%s) "
3358 "= %d (%d)\n",
3359 fd, len, pulongest (offset),
3360 ret, ret != -1 ? 0 : *target_errno);
3361 return ret;
3362 }
3363
3364 /* See target.h. */
3365
3366 int
3367 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3368 {
3369 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3370 int ret = -1;
3371
3372 if (fh->is_closed ())
3373 *target_errno = EBADF;
3374 else if (fh->target == NULL)
3375 *target_errno = EIO;
3376 else
3377 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3378
3379 if (targetdebug)
3380 gdb_printf (gdb_stdlog,
3381 "target_fileio_fstat (%d) = %d (%d)\n",
3382 fd, ret, ret != -1 ? 0 : *target_errno);
3383 return ret;
3384 }
3385
3386 /* See target.h. */
3387
3388 int
3389 target_fileio_close (int fd, int *target_errno)
3390 {
3391 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3392 int ret = -1;
3393
3394 if (fh->is_closed ())
3395 *target_errno = EBADF;
3396 else
3397 {
3398 if (fh->target != NULL)
3399 ret = fh->target->fileio_close (fh->target_fd,
3400 target_errno);
3401 else
3402 ret = 0;
3403 release_fileio_fd (fd, fh);
3404 }
3405
3406 if (targetdebug)
3407 gdb_printf (gdb_stdlog,
3408 "target_fileio_close (%d) = %d (%d)\n",
3409 fd, ret, ret != -1 ? 0 : *target_errno);
3410 return ret;
3411 }
3412
3413 /* See target.h. */
3414
3415 int
3416 target_fileio_unlink (struct inferior *inf, const char *filename,
3417 int *target_errno)
3418 {
3419 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3420 {
3421 int ret = t->fileio_unlink (inf, filename, target_errno);
3422
3423 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3424 continue;
3425
3426 if (targetdebug)
3427 gdb_printf (gdb_stdlog,
3428 "target_fileio_unlink (%d,%s)"
3429 " = %d (%d)\n",
3430 inf == NULL ? 0 : inf->num, filename,
3431 ret, ret != -1 ? 0 : *target_errno);
3432 return ret;
3433 }
3434
3435 *target_errno = FILEIO_ENOSYS;
3436 return -1;
3437 }
3438
3439 /* See target.h. */
3440
3441 gdb::optional<std::string>
3442 target_fileio_readlink (struct inferior *inf, const char *filename,
3443 int *target_errno)
3444 {
3445 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3446 {
3447 gdb::optional<std::string> ret
3448 = t->fileio_readlink (inf, filename, target_errno);
3449
3450 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3451 continue;
3452
3453 if (targetdebug)
3454 gdb_printf (gdb_stdlog,
3455 "target_fileio_readlink (%d,%s)"
3456 " = %s (%d)\n",
3457 inf == NULL ? 0 : inf->num,
3458 filename, ret ? ret->c_str () : "(nil)",
3459 ret ? 0 : *target_errno);
3460 return ret;
3461 }
3462
3463 *target_errno = FILEIO_ENOSYS;
3464 return {};
3465 }
3466
3467 /* Like scoped_fd, but specific to target fileio. */
3468
3469 class scoped_target_fd
3470 {
3471 public:
3472 explicit scoped_target_fd (int fd) noexcept
3473 : m_fd (fd)
3474 {
3475 }
3476
3477 ~scoped_target_fd ()
3478 {
3479 if (m_fd >= 0)
3480 {
3481 int target_errno;
3482
3483 target_fileio_close (m_fd, &target_errno);
3484 }
3485 }
3486
3487 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3488
3489 int get () const noexcept
3490 {
3491 return m_fd;
3492 }
3493
3494 private:
3495 int m_fd;
3496 };
3497
3498 /* Read target file FILENAME, in the filesystem as seen by INF. If
3499 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3500 remote targets, the remote stub). Store the result in *BUF_P and
3501 return the size of the transferred data. PADDING additional bytes
3502 are available in *BUF_P. This is a helper function for
3503 target_fileio_read_alloc; see the declaration of that function for
3504 more information. */
3505
3506 static LONGEST
3507 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3508 gdb_byte **buf_p, int padding)
3509 {
3510 size_t buf_alloc, buf_pos;
3511 gdb_byte *buf;
3512 LONGEST n;
3513 int target_errno;
3514
3515 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3516 0700, false, &target_errno));
3517 if (fd.get () == -1)
3518 return -1;
3519
3520 /* Start by reading up to 4K at a time. The target will throttle
3521 this number down if necessary. */
3522 buf_alloc = 4096;
3523 buf = (gdb_byte *) xmalloc (buf_alloc);
3524 buf_pos = 0;
3525 while (1)
3526 {
3527 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3528 buf_alloc - buf_pos - padding, buf_pos,
3529 &target_errno);
3530 if (n < 0)
3531 {
3532 /* An error occurred. */
3533 xfree (buf);
3534 return -1;
3535 }
3536 else if (n == 0)
3537 {
3538 /* Read all there was. */
3539 if (buf_pos == 0)
3540 xfree (buf);
3541 else
3542 *buf_p = buf;
3543 return buf_pos;
3544 }
3545
3546 buf_pos += n;
3547
3548 /* If the buffer is filling up, expand it. */
3549 if (buf_alloc < buf_pos * 2)
3550 {
3551 buf_alloc *= 2;
3552 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3553 }
3554
3555 QUIT;
3556 }
3557 }
3558
3559 /* See target.h. */
3560
3561 LONGEST
3562 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3563 gdb_byte **buf_p)
3564 {
3565 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3566 }
3567
3568 /* See target.h. */
3569
3570 gdb::unique_xmalloc_ptr<char>
3571 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3572 {
3573 gdb_byte *buffer;
3574 char *bufstr;
3575 LONGEST i, transferred;
3576
3577 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3578 bufstr = (char *) buffer;
3579
3580 if (transferred < 0)
3581 return gdb::unique_xmalloc_ptr<char> (nullptr);
3582
3583 if (transferred == 0)
3584 return make_unique_xstrdup ("");
3585
3586 bufstr[transferred] = 0;
3587
3588 /* Check for embedded NUL bytes; but allow trailing NULs. */
3589 for (i = strlen (bufstr); i < transferred; i++)
3590 if (bufstr[i] != 0)
3591 {
3592 warning (_("target file %s "
3593 "contained unexpected null characters"),
3594 filename);
3595 break;
3596 }
3597
3598 return gdb::unique_xmalloc_ptr<char> (bufstr);
3599 }
3600
3601
3602 static int
3603 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3604 CORE_ADDR addr, int len)
3605 {
3606 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3607 }
3608
3609 static int
3610 default_watchpoint_addr_within_range (struct target_ops *target,
3611 CORE_ADDR addr,
3612 CORE_ADDR start, int length)
3613 {
3614 return addr >= start && addr < start + length;
3615 }
3616
3617 /* See target.h. */
3618
3619 target_ops *
3620 target_stack::find_beneath (const target_ops *t) const
3621 {
3622 /* Look for a non-empty slot at stratum levels beneath T's. */
3623 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3624 if (m_stack[stratum] != NULL)
3625 return m_stack[stratum];
3626
3627 return NULL;
3628 }
3629
3630 /* See target.h. */
3631
3632 struct target_ops *
3633 find_target_at (enum strata stratum)
3634 {
3635 return current_inferior ()->target_at (stratum);
3636 }
3637
3638 \f
3639
3640 /* See target.h */
3641
3642 void
3643 target_announce_detach (int from_tty)
3644 {
3645 pid_t pid;
3646 const char *exec_file;
3647
3648 if (!from_tty)
3649 return;
3650
3651 pid = inferior_ptid.pid ();
3652 exec_file = get_exec_file (0);
3653 if (exec_file == nullptr)
3654 gdb_printf ("Detaching from pid %s\n",
3655 target_pid_to_str (ptid_t (pid)).c_str ());
3656 else
3657 gdb_printf (_("Detaching from program: %s, %s\n"), exec_file,
3658 target_pid_to_str (ptid_t (pid)).c_str ());
3659 }
3660
3661 /* See target.h */
3662
3663 void
3664 target_announce_attach (int from_tty, int pid)
3665 {
3666 if (!from_tty)
3667 return;
3668
3669 const char *exec_file = get_exec_file (0);
3670
3671 if (exec_file != nullptr)
3672 gdb_printf ("Attaching to program: %s, %s\n", exec_file,
3673 target_pid_to_str (ptid_t (pid)).c_str ());
3674 else
3675 gdb_printf ("Attaching to %s\n",
3676 target_pid_to_str (ptid_t (pid)).c_str ());
3677 }
3678
3679 /* The inferior process has died. Long live the inferior! */
3680
3681 void
3682 generic_mourn_inferior (void)
3683 {
3684 inferior *inf = current_inferior ();
3685
3686 switch_to_no_thread ();
3687
3688 /* Mark breakpoints uninserted in case something tries to delete a
3689 breakpoint while we delete the inferior's threads (which would
3690 fail, since the inferior is long gone). */
3691 mark_breakpoints_out ();
3692
3693 if (inf->pid != 0)
3694 exit_inferior (inf);
3695
3696 /* Note this wipes step-resume breakpoints, so needs to be done
3697 after exit_inferior, which ends up referencing the step-resume
3698 breakpoints through clear_thread_inferior_resources. */
3699 breakpoint_init_inferior (inf_exited);
3700
3701 registers_changed ();
3702
3703 reopen_exec_file ();
3704 reinit_frame_cache ();
3705
3706 if (deprecated_detach_hook)
3707 deprecated_detach_hook ();
3708 }
3709 \f
3710 /* Convert a normal process ID to a string. Returns the string in a
3711 static buffer. */
3712
3713 std::string
3714 normal_pid_to_str (ptid_t ptid)
3715 {
3716 return string_printf ("process %d", ptid.pid ());
3717 }
3718
3719 static std::string
3720 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3721 {
3722 return normal_pid_to_str (ptid);
3723 }
3724
3725 /* Error-catcher for target_find_memory_regions. */
3726 static int
3727 dummy_find_memory_regions (struct target_ops *self,
3728 find_memory_region_ftype ignore1, void *ignore2)
3729 {
3730 error (_("Command not implemented for this target."));
3731 return 0;
3732 }
3733
3734 /* Error-catcher for target_make_corefile_notes. */
3735 static gdb::unique_xmalloc_ptr<char>
3736 dummy_make_corefile_notes (struct target_ops *self,
3737 bfd *ignore1, int *ignore2)
3738 {
3739 error (_("Command not implemented for this target."));
3740 return NULL;
3741 }
3742
3743 #include "target-delegates.c"
3744
3745 /* The initial current target, so that there is always a semi-valid
3746 current target. */
3747
3748 static dummy_target the_dummy_target;
3749
3750 /* See target.h. */
3751
3752 target_ops *
3753 get_dummy_target ()
3754 {
3755 return &the_dummy_target;
3756 }
3757
3758 static const target_info dummy_target_info = {
3759 "None",
3760 N_("None"),
3761 ""
3762 };
3763
3764 strata
3765 dummy_target::stratum () const
3766 {
3767 return dummy_stratum;
3768 }
3769
3770 strata
3771 debug_target::stratum () const
3772 {
3773 return debug_stratum;
3774 }
3775
3776 const target_info &
3777 dummy_target::info () const
3778 {
3779 return dummy_target_info;
3780 }
3781
3782 const target_info &
3783 debug_target::info () const
3784 {
3785 return beneath ()->info ();
3786 }
3787
3788 \f
3789
3790 void
3791 target_close (struct target_ops *targ)
3792 {
3793 for (inferior *inf : all_inferiors ())
3794 gdb_assert (!inf->target_is_pushed (targ));
3795
3796 fileio_handles_invalidate_target (targ);
3797
3798 targ->close ();
3799
3800 if (targetdebug)
3801 gdb_printf (gdb_stdlog, "target_close ()\n");
3802 }
3803
3804 int
3805 target_thread_alive (ptid_t ptid)
3806 {
3807 return current_inferior ()->top_target ()->thread_alive (ptid);
3808 }
3809
3810 void
3811 target_update_thread_list (void)
3812 {
3813 current_inferior ()->top_target ()->update_thread_list ();
3814 }
3815
3816 void
3817 target_stop (ptid_t ptid)
3818 {
3819 process_stratum_target *proc_target = current_inferior ()->process_target ();
3820
3821 gdb_assert (!proc_target->commit_resumed_state);
3822
3823 if (!may_stop)
3824 {
3825 warning (_("May not interrupt or stop the target, ignoring attempt"));
3826 return;
3827 }
3828
3829 current_inferior ()->top_target ()->stop (ptid);
3830 }
3831
3832 void
3833 target_interrupt ()
3834 {
3835 if (!may_stop)
3836 {
3837 warning (_("May not interrupt or stop the target, ignoring attempt"));
3838 return;
3839 }
3840
3841 current_inferior ()->top_target ()->interrupt ();
3842 }
3843
3844 /* See target.h. */
3845
3846 void
3847 target_pass_ctrlc (void)
3848 {
3849 /* Pass the Ctrl-C to the first target that has a thread
3850 running. */
3851 for (inferior *inf : all_inferiors ())
3852 {
3853 target_ops *proc_target = inf->process_target ();
3854 if (proc_target == NULL)
3855 continue;
3856
3857 for (thread_info *thr : inf->non_exited_threads ())
3858 {
3859 /* A thread can be THREAD_STOPPED and executing, while
3860 running an infcall. */
3861 if (thr->state == THREAD_RUNNING || thr->executing ())
3862 {
3863 /* We can get here quite deep in target layers. Avoid
3864 switching thread context or anything that would
3865 communicate with the target (e.g., to fetch
3866 registers), or flushing e.g., the frame cache. We
3867 just switch inferior in order to be able to call
3868 through the target_stack. */
3869 scoped_restore_current_inferior restore_inferior;
3870 set_current_inferior (inf);
3871 current_inferior ()->top_target ()->pass_ctrlc ();
3872 return;
3873 }
3874 }
3875 }
3876 }
3877
3878 /* See target.h. */
3879
3880 void
3881 default_target_pass_ctrlc (struct target_ops *ops)
3882 {
3883 target_interrupt ();
3884 }
3885
3886 /* See target/target.h. */
3887
3888 void
3889 target_stop_and_wait (ptid_t ptid)
3890 {
3891 struct target_waitstatus status;
3892 bool was_non_stop = non_stop;
3893
3894 non_stop = true;
3895 target_stop (ptid);
3896
3897 target_wait (ptid, &status, 0);
3898
3899 non_stop = was_non_stop;
3900 }
3901
3902 /* See target/target.h. */
3903
3904 void
3905 target_continue_no_signal (ptid_t ptid)
3906 {
3907 target_resume (ptid, 0, GDB_SIGNAL_0);
3908 }
3909
3910 /* See target/target.h. */
3911
3912 void
3913 target_continue (ptid_t ptid, enum gdb_signal signal)
3914 {
3915 target_resume (ptid, 0, signal);
3916 }
3917
3918 /* Concatenate ELEM to LIST, a comma-separated list. */
3919
3920 static void
3921 str_comma_list_concat_elem (std::string *list, const char *elem)
3922 {
3923 if (!list->empty ())
3924 list->append (", ");
3925
3926 list->append (elem);
3927 }
3928
3929 /* Helper for target_options_to_string. If OPT is present in
3930 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3931 OPT is removed from TARGET_OPTIONS. */
3932
3933 static void
3934 do_option (target_wait_flags *target_options, std::string *ret,
3935 target_wait_flag opt, const char *opt_str)
3936 {
3937 if ((*target_options & opt) != 0)
3938 {
3939 str_comma_list_concat_elem (ret, opt_str);
3940 *target_options &= ~opt;
3941 }
3942 }
3943
3944 /* See target.h. */
3945
3946 std::string
3947 target_options_to_string (target_wait_flags target_options)
3948 {
3949 std::string ret;
3950
3951 #define DO_TARG_OPTION(OPT) \
3952 do_option (&target_options, &ret, OPT, #OPT)
3953
3954 DO_TARG_OPTION (TARGET_WNOHANG);
3955
3956 if (target_options != 0)
3957 str_comma_list_concat_elem (&ret, "unknown???");
3958
3959 return ret;
3960 }
3961
3962 void
3963 target_fetch_registers (struct regcache *regcache, int regno)
3964 {
3965 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3966 if (targetdebug)
3967 regcache->debug_print_register ("target_fetch_registers", regno);
3968 }
3969
3970 void
3971 target_store_registers (struct regcache *regcache, int regno)
3972 {
3973 if (!may_write_registers)
3974 error (_("Writing to registers is not allowed (regno %d)"), regno);
3975
3976 current_inferior ()->top_target ()->store_registers (regcache, regno);
3977 if (targetdebug)
3978 {
3979 regcache->debug_print_register ("target_store_registers", regno);
3980 }
3981 }
3982
3983 int
3984 target_core_of_thread (ptid_t ptid)
3985 {
3986 return current_inferior ()->top_target ()->core_of_thread (ptid);
3987 }
3988
3989 int
3990 simple_verify_memory (struct target_ops *ops,
3991 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3992 {
3993 LONGEST total_xfered = 0;
3994
3995 while (total_xfered < size)
3996 {
3997 ULONGEST xfered_len;
3998 enum target_xfer_status status;
3999 gdb_byte buf[1024];
4000 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
4001
4002 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
4003 buf, NULL, lma + total_xfered, howmuch,
4004 &xfered_len);
4005 if (status == TARGET_XFER_OK
4006 && memcmp (data + total_xfered, buf, xfered_len) == 0)
4007 {
4008 total_xfered += xfered_len;
4009 QUIT;
4010 }
4011 else
4012 return 0;
4013 }
4014 return 1;
4015 }
4016
4017 /* Default implementation of memory verification. */
4018
4019 static int
4020 default_verify_memory (struct target_ops *self,
4021 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4022 {
4023 /* Start over from the top of the target stack. */
4024 return simple_verify_memory (current_inferior ()->top_target (),
4025 data, memaddr, size);
4026 }
4027
4028 int
4029 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4030 {
4031 target_ops *target = current_inferior ()->top_target ();
4032
4033 return target->verify_memory (data, memaddr, size);
4034 }
4035
4036 /* The documentation for this function is in its prototype declaration in
4037 target.h. */
4038
4039 int
4040 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4041 enum target_hw_bp_type rw)
4042 {
4043 target_ops *target = current_inferior ()->top_target ();
4044
4045 return target->insert_mask_watchpoint (addr, mask, rw);
4046 }
4047
4048 /* The documentation for this function is in its prototype declaration in
4049 target.h. */
4050
4051 int
4052 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4053 enum target_hw_bp_type rw)
4054 {
4055 target_ops *target = current_inferior ()->top_target ();
4056
4057 return target->remove_mask_watchpoint (addr, mask, rw);
4058 }
4059
4060 /* The documentation for this function is in its prototype declaration
4061 in target.h. */
4062
4063 int
4064 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4065 {
4066 target_ops *target = current_inferior ()->top_target ();
4067
4068 return target->masked_watch_num_registers (addr, mask);
4069 }
4070
4071 /* The documentation for this function is in its prototype declaration
4072 in target.h. */
4073
4074 int
4075 target_ranged_break_num_registers (void)
4076 {
4077 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4078 }
4079
4080 /* See target.h. */
4081
4082 struct btrace_target_info *
4083 target_enable_btrace (thread_info *tp, const struct btrace_config *conf)
4084 {
4085 return current_inferior ()->top_target ()->enable_btrace (tp, conf);
4086 }
4087
4088 /* See target.h. */
4089
4090 void
4091 target_disable_btrace (struct btrace_target_info *btinfo)
4092 {
4093 current_inferior ()->top_target ()->disable_btrace (btinfo);
4094 }
4095
4096 /* See target.h. */
4097
4098 void
4099 target_teardown_btrace (struct btrace_target_info *btinfo)
4100 {
4101 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4102 }
4103
4104 /* See target.h. */
4105
4106 enum btrace_error
4107 target_read_btrace (struct btrace_data *btrace,
4108 struct btrace_target_info *btinfo,
4109 enum btrace_read_type type)
4110 {
4111 target_ops *target = current_inferior ()->top_target ();
4112
4113 return target->read_btrace (btrace, btinfo, type);
4114 }
4115
4116 /* See target.h. */
4117
4118 const struct btrace_config *
4119 target_btrace_conf (const struct btrace_target_info *btinfo)
4120 {
4121 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4122 }
4123
4124 /* See target.h. */
4125
4126 void
4127 target_stop_recording (void)
4128 {
4129 current_inferior ()->top_target ()->stop_recording ();
4130 }
4131
4132 /* See target.h. */
4133
4134 void
4135 target_save_record (const char *filename)
4136 {
4137 current_inferior ()->top_target ()->save_record (filename);
4138 }
4139
4140 /* See target.h. */
4141
4142 int
4143 target_supports_delete_record ()
4144 {
4145 return current_inferior ()->top_target ()->supports_delete_record ();
4146 }
4147
4148 /* See target.h. */
4149
4150 void
4151 target_delete_record (void)
4152 {
4153 current_inferior ()->top_target ()->delete_record ();
4154 }
4155
4156 /* See target.h. */
4157
4158 enum record_method
4159 target_record_method (ptid_t ptid)
4160 {
4161 return current_inferior ()->top_target ()->record_method (ptid);
4162 }
4163
4164 /* See target.h. */
4165
4166 int
4167 target_record_is_replaying (ptid_t ptid)
4168 {
4169 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4170 }
4171
4172 /* See target.h. */
4173
4174 int
4175 target_record_will_replay (ptid_t ptid, int dir)
4176 {
4177 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4178 }
4179
4180 /* See target.h. */
4181
4182 void
4183 target_record_stop_replaying (void)
4184 {
4185 current_inferior ()->top_target ()->record_stop_replaying ();
4186 }
4187
4188 /* See target.h. */
4189
4190 void
4191 target_goto_record_begin (void)
4192 {
4193 current_inferior ()->top_target ()->goto_record_begin ();
4194 }
4195
4196 /* See target.h. */
4197
4198 void
4199 target_goto_record_end (void)
4200 {
4201 current_inferior ()->top_target ()->goto_record_end ();
4202 }
4203
4204 /* See target.h. */
4205
4206 void
4207 target_goto_record (ULONGEST insn)
4208 {
4209 current_inferior ()->top_target ()->goto_record (insn);
4210 }
4211
4212 /* See target.h. */
4213
4214 void
4215 target_insn_history (int size, gdb_disassembly_flags flags)
4216 {
4217 current_inferior ()->top_target ()->insn_history (size, flags);
4218 }
4219
4220 /* See target.h. */
4221
4222 void
4223 target_insn_history_from (ULONGEST from, int size,
4224 gdb_disassembly_flags flags)
4225 {
4226 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4227 }
4228
4229 /* See target.h. */
4230
4231 void
4232 target_insn_history_range (ULONGEST begin, ULONGEST end,
4233 gdb_disassembly_flags flags)
4234 {
4235 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4236 }
4237
4238 /* See target.h. */
4239
4240 void
4241 target_call_history (int size, record_print_flags flags)
4242 {
4243 current_inferior ()->top_target ()->call_history (size, flags);
4244 }
4245
4246 /* See target.h. */
4247
4248 void
4249 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4250 {
4251 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4252 }
4253
4254 /* See target.h. */
4255
4256 void
4257 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4258 {
4259 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4260 }
4261
4262 /* See target.h. */
4263
4264 const struct frame_unwind *
4265 target_get_unwinder (void)
4266 {
4267 return current_inferior ()->top_target ()->get_unwinder ();
4268 }
4269
4270 /* See target.h. */
4271
4272 const struct frame_unwind *
4273 target_get_tailcall_unwinder (void)
4274 {
4275 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4276 }
4277
4278 /* See target.h. */
4279
4280 void
4281 target_prepare_to_generate_core (void)
4282 {
4283 current_inferior ()->top_target ()->prepare_to_generate_core ();
4284 }
4285
4286 /* See target.h. */
4287
4288 void
4289 target_done_generating_core (void)
4290 {
4291 current_inferior ()->top_target ()->done_generating_core ();
4292 }
4293
4294 \f
4295
4296 static char targ_desc[] =
4297 "Names of targets and files being debugged.\nShows the entire \
4298 stack of targets currently in use (including the exec-file,\n\
4299 core-file, and process, if any), as well as the symbol file name.";
4300
4301 static void
4302 default_rcmd (struct target_ops *self, const char *command,
4303 struct ui_file *output)
4304 {
4305 error (_("\"monitor\" command not supported by this target."));
4306 }
4307
4308 static void
4309 do_monitor_command (const char *cmd, int from_tty)
4310 {
4311 target_rcmd (cmd, gdb_stdtarg);
4312 }
4313
4314 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4315 ignored. */
4316
4317 void
4318 flash_erase_command (const char *cmd, int from_tty)
4319 {
4320 /* Used to communicate termination of flash operations to the target. */
4321 bool found_flash_region = false;
4322 struct gdbarch *gdbarch = target_gdbarch ();
4323
4324 std::vector<mem_region> mem_regions = target_memory_map ();
4325
4326 /* Iterate over all memory regions. */
4327 for (const mem_region &m : mem_regions)
4328 {
4329 /* Is this a flash memory region? */
4330 if (m.attrib.mode == MEM_FLASH)
4331 {
4332 found_flash_region = true;
4333 target_flash_erase (m.lo, m.hi - m.lo);
4334
4335 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4336
4337 current_uiout->message (_("Erasing flash memory region at address "));
4338 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4339 current_uiout->message (", size = ");
4340 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4341 current_uiout->message ("\n");
4342 }
4343 }
4344
4345 /* Did we do any flash operations? If so, we need to finalize them. */
4346 if (found_flash_region)
4347 target_flash_done ();
4348 else
4349 current_uiout->message (_("No flash memory regions found.\n"));
4350 }
4351
4352 /* Print the name of each layers of our target stack. */
4353
4354 static void
4355 maintenance_print_target_stack (const char *cmd, int from_tty)
4356 {
4357 gdb_printf (_("The current target stack is:\n"));
4358
4359 for (target_ops *t = current_inferior ()->top_target ();
4360 t != NULL;
4361 t = t->beneath ())
4362 {
4363 if (t->stratum () == debug_stratum)
4364 continue;
4365 gdb_printf (" - %s (%s)\n", t->shortname (), t->longname ());
4366 }
4367 }
4368
4369 /* See target.h. */
4370
4371 void
4372 target_async (int enable)
4373 {
4374 /* If we are trying to enable async mode then it must be the case that
4375 async mode is possible for this target. */
4376 gdb_assert (!enable || target_can_async_p ());
4377 infrun_async (enable);
4378 current_inferior ()->top_target ()->async (enable);
4379 }
4380
4381 /* See target.h. */
4382
4383 void
4384 target_thread_events (int enable)
4385 {
4386 current_inferior ()->top_target ()->thread_events (enable);
4387 }
4388
4389 /* Controls if targets can report that they can/are async. This is
4390 just for maintainers to use when debugging gdb. */
4391 bool target_async_permitted = true;
4392
4393 static void
4394 set_maint_target_async (bool permitted)
4395 {
4396 if (have_live_inferiors ())
4397 error (_("Cannot change this setting while the inferior is running."));
4398
4399 target_async_permitted = permitted;
4400 }
4401
4402 static bool
4403 get_maint_target_async ()
4404 {
4405 return target_async_permitted;
4406 }
4407
4408 static void
4409 show_maint_target_async (ui_file *file, int from_tty,
4410 cmd_list_element *c, const char *value)
4411 {
4412 gdb_printf (file,
4413 _("Controlling the inferior in "
4414 "asynchronous mode is %s.\n"), value);
4415 }
4416
4417 /* Return true if the target operates in non-stop mode even with "set
4418 non-stop off". */
4419
4420 static int
4421 target_always_non_stop_p (void)
4422 {
4423 return current_inferior ()->top_target ()->always_non_stop_p ();
4424 }
4425
4426 /* See target.h. */
4427
4428 bool
4429 target_is_non_stop_p ()
4430 {
4431 return ((non_stop
4432 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4433 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4434 && target_always_non_stop_p ()))
4435 && target_can_async_p ());
4436 }
4437
4438 /* See target.h. */
4439
4440 bool
4441 exists_non_stop_target ()
4442 {
4443 if (target_is_non_stop_p ())
4444 return true;
4445
4446 scoped_restore_current_thread restore_thread;
4447
4448 for (inferior *inf : all_inferiors ())
4449 {
4450 switch_to_inferior_no_thread (inf);
4451 if (target_is_non_stop_p ())
4452 return true;
4453 }
4454
4455 return false;
4456 }
4457
4458 /* Controls if targets can report that they always run in non-stop
4459 mode. This is just for maintainers to use when debugging gdb. */
4460 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4461
4462 /* Set callback for maint target-non-stop setting. */
4463
4464 static void
4465 set_maint_target_non_stop (auto_boolean enabled)
4466 {
4467 if (have_live_inferiors ())
4468 error (_("Cannot change this setting while the inferior is running."));
4469
4470 target_non_stop_enabled = enabled;
4471 }
4472
4473 /* Get callback for maint target-non-stop setting. */
4474
4475 static auto_boolean
4476 get_maint_target_non_stop ()
4477 {
4478 return target_non_stop_enabled;
4479 }
4480
4481 static void
4482 show_maint_target_non_stop (ui_file *file, int from_tty,
4483 cmd_list_element *c, const char *value)
4484 {
4485 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4486 gdb_printf (file,
4487 _("Whether the target is always in non-stop mode "
4488 "is %s (currently %s).\n"), value,
4489 target_always_non_stop_p () ? "on" : "off");
4490 else
4491 gdb_printf (file,
4492 _("Whether the target is always in non-stop mode "
4493 "is %s.\n"), value);
4494 }
4495
4496 /* Temporary copies of permission settings. */
4497
4498 static bool may_write_registers_1 = true;
4499 static bool may_write_memory_1 = true;
4500 static bool may_insert_breakpoints_1 = true;
4501 static bool may_insert_tracepoints_1 = true;
4502 static bool may_insert_fast_tracepoints_1 = true;
4503 static bool may_stop_1 = true;
4504
4505 /* Make the user-set values match the real values again. */
4506
4507 void
4508 update_target_permissions (void)
4509 {
4510 may_write_registers_1 = may_write_registers;
4511 may_write_memory_1 = may_write_memory;
4512 may_insert_breakpoints_1 = may_insert_breakpoints;
4513 may_insert_tracepoints_1 = may_insert_tracepoints;
4514 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4515 may_stop_1 = may_stop;
4516 }
4517
4518 /* The one function handles (most of) the permission flags in the same
4519 way. */
4520
4521 static void
4522 set_target_permissions (const char *args, int from_tty,
4523 struct cmd_list_element *c)
4524 {
4525 if (target_has_execution ())
4526 {
4527 update_target_permissions ();
4528 error (_("Cannot change this setting while the inferior is running."));
4529 }
4530
4531 /* Make the real values match the user-changed values. */
4532 may_write_registers = may_write_registers_1;
4533 may_insert_breakpoints = may_insert_breakpoints_1;
4534 may_insert_tracepoints = may_insert_tracepoints_1;
4535 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4536 may_stop = may_stop_1;
4537 update_observer_mode ();
4538 }
4539
4540 /* Set memory write permission independently of observer mode. */
4541
4542 static void
4543 set_write_memory_permission (const char *args, int from_tty,
4544 struct cmd_list_element *c)
4545 {
4546 /* Make the real values match the user-changed values. */
4547 may_write_memory = may_write_memory_1;
4548 update_observer_mode ();
4549 }
4550
4551 void _initialize_target ();
4552
4553 void
4554 _initialize_target ()
4555 {
4556 the_debug_target = new debug_target ();
4557
4558 add_info ("target", info_target_command, targ_desc);
4559 add_info ("files", info_target_command, targ_desc);
4560
4561 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4562 Set target debugging."), _("\
4563 Show target debugging."), _("\
4564 When non-zero, target debugging is enabled. Higher numbers are more\n\
4565 verbose."),
4566 set_targetdebug,
4567 show_targetdebug,
4568 &setdebuglist, &showdebuglist);
4569
4570 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4571 &trust_readonly, _("\
4572 Set mode for reading from readonly sections."), _("\
4573 Show mode for reading from readonly sections."), _("\
4574 When this mode is on, memory reads from readonly sections (such as .text)\n\
4575 will be read from the object file instead of from the target. This will\n\
4576 result in significant performance improvement for remote targets."),
4577 NULL,
4578 show_trust_readonly,
4579 &setlist, &showlist);
4580
4581 add_com ("monitor", class_obscure, do_monitor_command,
4582 _("Send a command to the remote monitor (remote targets only)."));
4583
4584 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4585 _("Print the name of each layer of the internal target stack."),
4586 &maintenanceprintlist);
4587
4588 add_setshow_boolean_cmd ("target-async", no_class,
4589 _("\
4590 Set whether gdb controls the inferior in asynchronous mode."), _("\
4591 Show whether gdb controls the inferior in asynchronous mode."), _("\
4592 Tells gdb whether to control the inferior in asynchronous mode."),
4593 set_maint_target_async,
4594 get_maint_target_async,
4595 show_maint_target_async,
4596 &maintenance_set_cmdlist,
4597 &maintenance_show_cmdlist);
4598
4599 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4600 _("\
4601 Set whether gdb always controls the inferior in non-stop mode."), _("\
4602 Show whether gdb always controls the inferior in non-stop mode."), _("\
4603 Tells gdb whether to control the inferior in non-stop mode."),
4604 set_maint_target_non_stop,
4605 get_maint_target_non_stop,
4606 show_maint_target_non_stop,
4607 &maintenance_set_cmdlist,
4608 &maintenance_show_cmdlist);
4609
4610 add_setshow_boolean_cmd ("may-write-registers", class_support,
4611 &may_write_registers_1, _("\
4612 Set permission to write into registers."), _("\
4613 Show permission to write into registers."), _("\
4614 When this permission is on, GDB may write into the target's registers.\n\
4615 Otherwise, any sort of write attempt will result in an error."),
4616 set_target_permissions, NULL,
4617 &setlist, &showlist);
4618
4619 add_setshow_boolean_cmd ("may-write-memory", class_support,
4620 &may_write_memory_1, _("\
4621 Set permission to write into target memory."), _("\
4622 Show permission to write into target memory."), _("\
4623 When this permission is on, GDB may write into the target's memory.\n\
4624 Otherwise, any sort of write attempt will result in an error."),
4625 set_write_memory_permission, NULL,
4626 &setlist, &showlist);
4627
4628 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4629 &may_insert_breakpoints_1, _("\
4630 Set permission to insert breakpoints in the target."), _("\
4631 Show permission to insert breakpoints in the target."), _("\
4632 When this permission is on, GDB may insert breakpoints in the program.\n\
4633 Otherwise, any sort of insertion attempt will result in an error."),
4634 set_target_permissions, NULL,
4635 &setlist, &showlist);
4636
4637 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4638 &may_insert_tracepoints_1, _("\
4639 Set permission to insert tracepoints in the target."), _("\
4640 Show permission to insert tracepoints in the target."), _("\
4641 When this permission is on, GDB may insert tracepoints in the program.\n\
4642 Otherwise, any sort of insertion attempt will result in an error."),
4643 set_target_permissions, NULL,
4644 &setlist, &showlist);
4645
4646 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4647 &may_insert_fast_tracepoints_1, _("\
4648 Set permission to insert fast tracepoints in the target."), _("\
4649 Show permission to insert fast tracepoints in the target."), _("\
4650 When this permission is on, GDB may insert fast tracepoints.\n\
4651 Otherwise, any sort of insertion attempt will result in an error."),
4652 set_target_permissions, NULL,
4653 &setlist, &showlist);
4654
4655 add_setshow_boolean_cmd ("may-interrupt", class_support,
4656 &may_stop_1, _("\
4657 Set permission to interrupt or signal the target."), _("\
4658 Show permission to interrupt or signal the target."), _("\
4659 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4660 Otherwise, any attempt to interrupt or stop will be ignored."),
4661 set_target_permissions, NULL,
4662 &setlist, &showlist);
4663
4664 add_com ("flash-erase", no_class, flash_erase_command,
4665 _("Erase all flash memory regions."));
4666
4667 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4668 &auto_connect_native_target, _("\
4669 Set whether GDB may automatically connect to the native target."), _("\
4670 Show whether GDB may automatically connect to the native target."), _("\
4671 When on, and GDB is not connected to a target yet, GDB\n\
4672 attempts \"run\" and other commands with the native target."),
4673 NULL, show_auto_connect_native_target,
4674 &setlist, &showlist);
4675 }