gdb: Update my email address in MAINTAINERS
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2022 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "observable.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdbcore.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
39 #include "solib.h"
40 #include "exec.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdbsupport/fileio.h"
44 #include "gdbsupport/agent.h"
45 #include "auxv.h"
46 #include "target-debug.h"
47 #include "top.h"
48 #include "event-top.h"
49 #include <algorithm>
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
52 #include "terminal.h"
53 #include <unordered_map>
54 #include "target-connection.h"
55 #include "valprint.h"
56 #include "cli/cli-decode.h"
57
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
59
60 static void default_terminal_info (struct target_ops *, const char *, int);
61
62 static int default_watchpoint_addr_within_range (struct target_ops *,
63 CORE_ADDR, CORE_ADDR, int);
64
65 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
66 CORE_ADDR, int);
67
68 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
69
70 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
71 long lwp, ULONGEST tid);
72
73 static void default_mourn_inferior (struct target_ops *self);
74
75 static int default_search_memory (struct target_ops *ops,
76 CORE_ADDR start_addr,
77 ULONGEST search_space_len,
78 const gdb_byte *pattern,
79 ULONGEST pattern_len,
80 CORE_ADDR *found_addrp);
81
82 static int default_verify_memory (struct target_ops *self,
83 const gdb_byte *data,
84 CORE_ADDR memaddr, ULONGEST size);
85
86 static void tcomplain (void) ATTRIBUTE_NORETURN;
87
88 static struct target_ops *find_default_run_target (const char *);
89
90 static int dummy_find_memory_regions (struct target_ops *self,
91 find_memory_region_ftype ignore1,
92 void *ignore2);
93
94 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
95 (struct target_ops *self, bfd *ignore1, int *ignore2);
96
97 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
98
99 static enum exec_direction_kind default_execution_direction
100 (struct target_ops *self);
101
102 /* Mapping between target_info objects (which have address identity)
103 and corresponding open/factory function/callback. Each add_target
104 call adds one entry to this map, and registers a "target
105 TARGET_NAME" command that when invoked calls the factory registered
106 here. The target_info object is associated with the command via
107 the command's context. */
108 static std::unordered_map<const target_info *, target_open_ftype *>
109 target_factories;
110
111 /* The singleton debug target. */
112
113 static struct target_ops *the_debug_target;
114
115 /* Command list for target. */
116
117 static struct cmd_list_element *targetlist = NULL;
118
119 /* True if we should trust readonly sections from the
120 executable when reading memory. */
121
122 static bool trust_readonly = false;
123
124 /* Nonzero if we should show true memory content including
125 memory breakpoint inserted by gdb. */
126
127 static int show_memory_breakpoints = 0;
128
129 /* These globals control whether GDB attempts to perform these
130 operations; they are useful for targets that need to prevent
131 inadvertent disruption, such as in non-stop mode. */
132
133 bool may_write_registers = true;
134
135 bool may_write_memory = true;
136
137 bool may_insert_breakpoints = true;
138
139 bool may_insert_tracepoints = true;
140
141 bool may_insert_fast_tracepoints = true;
142
143 bool may_stop = true;
144
145 /* Non-zero if we want to see trace of target level stuff. */
146
147 static unsigned int targetdebug = 0;
148
149 static void
150 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
151 {
152 if (targetdebug)
153 current_inferior ()->push_target (the_debug_target);
154 else
155 current_inferior ()->unpush_target (the_debug_target);
156 }
157
158 static void
159 show_targetdebug (struct ui_file *file, int from_tty,
160 struct cmd_list_element *c, const char *value)
161 {
162 gdb_printf (file, _("Target debugging is %s.\n"), value);
163 }
164
165 int
166 target_has_memory ()
167 {
168 for (target_ops *t = current_inferior ()->top_target ();
169 t != NULL;
170 t = t->beneath ())
171 if (t->has_memory ())
172 return 1;
173
174 return 0;
175 }
176
177 int
178 target_has_stack ()
179 {
180 for (target_ops *t = current_inferior ()->top_target ();
181 t != NULL;
182 t = t->beneath ())
183 if (t->has_stack ())
184 return 1;
185
186 return 0;
187 }
188
189 int
190 target_has_registers ()
191 {
192 for (target_ops *t = current_inferior ()->top_target ();
193 t != NULL;
194 t = t->beneath ())
195 if (t->has_registers ())
196 return 1;
197
198 return 0;
199 }
200
201 bool
202 target_has_execution (inferior *inf)
203 {
204 if (inf == nullptr)
205 inf = current_inferior ();
206
207 for (target_ops *t = inf->top_target ();
208 t != nullptr;
209 t = inf->find_target_beneath (t))
210 if (t->has_execution (inf))
211 return true;
212
213 return false;
214 }
215
216 const char *
217 target_shortname ()
218 {
219 return current_inferior ()->top_target ()->shortname ();
220 }
221
222 /* See target.h. */
223
224 bool
225 target_attach_no_wait ()
226 {
227 return current_inferior ()->top_target ()->attach_no_wait ();
228 }
229
230 /* See target.h. */
231
232 void
233 target_post_attach (int pid)
234 {
235 return current_inferior ()->top_target ()->post_attach (pid);
236 }
237
238 /* See target.h. */
239
240 void
241 target_prepare_to_store (regcache *regcache)
242 {
243 return current_inferior ()->top_target ()->prepare_to_store (regcache);
244 }
245
246 /* See target.h. */
247
248 bool
249 target_supports_enable_disable_tracepoint ()
250 {
251 target_ops *target = current_inferior ()->top_target ();
252
253 return target->supports_enable_disable_tracepoint ();
254 }
255
256 bool
257 target_supports_string_tracing ()
258 {
259 return current_inferior ()->top_target ()->supports_string_tracing ();
260 }
261
262 /* See target.h. */
263
264 bool
265 target_supports_evaluation_of_breakpoint_conditions ()
266 {
267 target_ops *target = current_inferior ()->top_target ();
268
269 return target->supports_evaluation_of_breakpoint_conditions ();
270 }
271
272 /* See target.h. */
273
274 bool
275 target_supports_dumpcore ()
276 {
277 return current_inferior ()->top_target ()->supports_dumpcore ();
278 }
279
280 /* See target.h. */
281
282 void
283 target_dumpcore (const char *filename)
284 {
285 return current_inferior ()->top_target ()->dumpcore (filename);
286 }
287
288 /* See target.h. */
289
290 bool
291 target_can_run_breakpoint_commands ()
292 {
293 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
294 }
295
296 /* See target.h. */
297
298 void
299 target_files_info ()
300 {
301 return current_inferior ()->top_target ()->files_info ();
302 }
303
304 /* See target.h. */
305
306 int
307 target_insert_fork_catchpoint (int pid)
308 {
309 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
310 }
311
312 /* See target.h. */
313
314 int
315 target_remove_fork_catchpoint (int pid)
316 {
317 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
318 }
319
320 /* See target.h. */
321
322 int
323 target_insert_vfork_catchpoint (int pid)
324 {
325 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
326 }
327
328 /* See target.h. */
329
330 int
331 target_remove_vfork_catchpoint (int pid)
332 {
333 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
334 }
335
336 /* See target.h. */
337
338 int
339 target_insert_exec_catchpoint (int pid)
340 {
341 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
342 }
343
344 /* See target.h. */
345
346 int
347 target_remove_exec_catchpoint (int pid)
348 {
349 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
350 }
351
352 /* See target.h. */
353
354 int
355 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
356 gdb::array_view<const int> syscall_counts)
357 {
358 target_ops *target = current_inferior ()->top_target ();
359
360 return target->set_syscall_catchpoint (pid, needed, any_count,
361 syscall_counts);
362 }
363
364 /* See target.h. */
365
366 void
367 target_rcmd (const char *command, struct ui_file *outbuf)
368 {
369 return current_inferior ()->top_target ()->rcmd (command, outbuf);
370 }
371
372 /* See target.h. */
373
374 bool
375 target_can_lock_scheduler ()
376 {
377 target_ops *target = current_inferior ()->top_target ();
378
379 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
380 }
381
382 /* See target.h. */
383
384 bool
385 target_can_async_p ()
386 {
387 return target_can_async_p (current_inferior ()->top_target ());
388 }
389
390 /* See target.h. */
391
392 bool
393 target_can_async_p (struct target_ops *target)
394 {
395 if (!target_async_permitted)
396 return false;
397 return target->can_async_p ();
398 }
399
400 /* See target.h. */
401
402 bool
403 target_is_async_p ()
404 {
405 bool result = current_inferior ()->top_target ()->is_async_p ();
406 gdb_assert (target_async_permitted || !result);
407 return result;
408 }
409
410 exec_direction_kind
411 target_execution_direction ()
412 {
413 return current_inferior ()->top_target ()->execution_direction ();
414 }
415
416 /* See target.h. */
417
418 const char *
419 target_extra_thread_info (thread_info *tp)
420 {
421 return current_inferior ()->top_target ()->extra_thread_info (tp);
422 }
423
424 /* See target.h. */
425
426 const char *
427 target_pid_to_exec_file (int pid)
428 {
429 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
430 }
431
432 /* See target.h. */
433
434 gdbarch *
435 target_thread_architecture (ptid_t ptid)
436 {
437 return current_inferior ()->top_target ()->thread_architecture (ptid);
438 }
439
440 /* See target.h. */
441
442 int
443 target_find_memory_regions (find_memory_region_ftype func, void *data)
444 {
445 return current_inferior ()->top_target ()->find_memory_regions (func, data);
446 }
447
448 /* See target.h. */
449
450 gdb::unique_xmalloc_ptr<char>
451 target_make_corefile_notes (bfd *bfd, int *size_p)
452 {
453 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
454 }
455
456 gdb_byte *
457 target_get_bookmark (const char *args, int from_tty)
458 {
459 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
460 }
461
462 void
463 target_goto_bookmark (const gdb_byte *arg, int from_tty)
464 {
465 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
466 }
467
468 /* See target.h. */
469
470 bool
471 target_stopped_by_watchpoint ()
472 {
473 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
474 }
475
476 /* See target.h. */
477
478 bool
479 target_stopped_by_sw_breakpoint ()
480 {
481 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
482 }
483
484 bool
485 target_supports_stopped_by_sw_breakpoint ()
486 {
487 target_ops *target = current_inferior ()->top_target ();
488
489 return target->supports_stopped_by_sw_breakpoint ();
490 }
491
492 bool
493 target_stopped_by_hw_breakpoint ()
494 {
495 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
496 }
497
498 bool
499 target_supports_stopped_by_hw_breakpoint ()
500 {
501 target_ops *target = current_inferior ()->top_target ();
502
503 return target->supports_stopped_by_hw_breakpoint ();
504 }
505
506 /* See target.h. */
507
508 bool
509 target_have_steppable_watchpoint ()
510 {
511 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
512 }
513
514 /* See target.h. */
515
516 int
517 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
518 {
519 target_ops *target = current_inferior ()->top_target ();
520
521 return target->can_use_hw_breakpoint (type, cnt, othertype);
522 }
523
524 /* See target.h. */
525
526 int
527 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
528 {
529 target_ops *target = current_inferior ()->top_target ();
530
531 return target->region_ok_for_hw_watchpoint (addr, len);
532 }
533
534
535 int
536 target_can_do_single_step ()
537 {
538 return current_inferior ()->top_target ()->can_do_single_step ();
539 }
540
541 /* See target.h. */
542
543 int
544 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
545 expression *cond)
546 {
547 target_ops *target = current_inferior ()->top_target ();
548
549 return target->insert_watchpoint (addr, len, type, cond);
550 }
551
552 /* See target.h. */
553
554 int
555 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
556 expression *cond)
557 {
558 target_ops *target = current_inferior ()->top_target ();
559
560 return target->remove_watchpoint (addr, len, type, cond);
561 }
562
563 /* See target.h. */
564
565 int
566 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
567 {
568 target_ops *target = current_inferior ()->top_target ();
569
570 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
571 }
572
573 /* See target.h. */
574
575 int
576 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
577 {
578 target_ops *target = current_inferior ()->top_target ();
579
580 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
581 }
582
583 /* See target.h. */
584
585 bool
586 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
587 expression *cond)
588 {
589 target_ops *target = current_inferior ()->top_target ();
590
591 return target->can_accel_watchpoint_condition (addr, len, type, cond);
592 }
593
594 /* See target.h. */
595
596 bool
597 target_can_execute_reverse ()
598 {
599 return current_inferior ()->top_target ()->can_execute_reverse ();
600 }
601
602 ptid_t
603 target_get_ada_task_ptid (long lwp, ULONGEST tid)
604 {
605 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
606 }
607
608 bool
609 target_filesystem_is_local ()
610 {
611 return current_inferior ()->top_target ()->filesystem_is_local ();
612 }
613
614 void
615 target_trace_init ()
616 {
617 return current_inferior ()->top_target ()->trace_init ();
618 }
619
620 void
621 target_download_tracepoint (bp_location *location)
622 {
623 return current_inferior ()->top_target ()->download_tracepoint (location);
624 }
625
626 bool
627 target_can_download_tracepoint ()
628 {
629 return current_inferior ()->top_target ()->can_download_tracepoint ();
630 }
631
632 void
633 target_download_trace_state_variable (const trace_state_variable &tsv)
634 {
635 target_ops *target = current_inferior ()->top_target ();
636
637 return target->download_trace_state_variable (tsv);
638 }
639
640 void
641 target_enable_tracepoint (bp_location *loc)
642 {
643 return current_inferior ()->top_target ()->enable_tracepoint (loc);
644 }
645
646 void
647 target_disable_tracepoint (bp_location *loc)
648 {
649 return current_inferior ()->top_target ()->disable_tracepoint (loc);
650 }
651
652 void
653 target_trace_start ()
654 {
655 return current_inferior ()->top_target ()->trace_start ();
656 }
657
658 void
659 target_trace_set_readonly_regions ()
660 {
661 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
662 }
663
664 int
665 target_get_trace_status (trace_status *ts)
666 {
667 return current_inferior ()->top_target ()->get_trace_status (ts);
668 }
669
670 void
671 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
672 {
673 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
674 }
675
676 void
677 target_trace_stop ()
678 {
679 return current_inferior ()->top_target ()->trace_stop ();
680 }
681
682 int
683 target_trace_find (trace_find_type type, int num,
684 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
685 {
686 target_ops *target = current_inferior ()->top_target ();
687
688 return target->trace_find (type, num, addr1, addr2, tpp);
689 }
690
691 bool
692 target_get_trace_state_variable_value (int tsv, LONGEST *val)
693 {
694 target_ops *target = current_inferior ()->top_target ();
695
696 return target->get_trace_state_variable_value (tsv, val);
697 }
698
699 int
700 target_save_trace_data (const char *filename)
701 {
702 return current_inferior ()->top_target ()->save_trace_data (filename);
703 }
704
705 int
706 target_upload_tracepoints (uploaded_tp **utpp)
707 {
708 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
709 }
710
711 int
712 target_upload_trace_state_variables (uploaded_tsv **utsvp)
713 {
714 target_ops *target = current_inferior ()->top_target ();
715
716 return target->upload_trace_state_variables (utsvp);
717 }
718
719 LONGEST
720 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
721 {
722 target_ops *target = current_inferior ()->top_target ();
723
724 return target->get_raw_trace_data (buf, offset, len);
725 }
726
727 int
728 target_get_min_fast_tracepoint_insn_len ()
729 {
730 target_ops *target = current_inferior ()->top_target ();
731
732 return target->get_min_fast_tracepoint_insn_len ();
733 }
734
735 void
736 target_set_disconnected_tracing (int val)
737 {
738 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
739 }
740
741 void
742 target_set_circular_trace_buffer (int val)
743 {
744 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
745 }
746
747 void
748 target_set_trace_buffer_size (LONGEST val)
749 {
750 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
751 }
752
753 bool
754 target_set_trace_notes (const char *user, const char *notes,
755 const char *stopnotes)
756 {
757 target_ops *target = current_inferior ()->top_target ();
758
759 return target->set_trace_notes (user, notes, stopnotes);
760 }
761
762 bool
763 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
764 {
765 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
766 }
767
768 void
769 target_set_permissions ()
770 {
771 return current_inferior ()->top_target ()->set_permissions ();
772 }
773
774 bool
775 target_static_tracepoint_marker_at (CORE_ADDR addr,
776 static_tracepoint_marker *marker)
777 {
778 target_ops *target = current_inferior ()->top_target ();
779
780 return target->static_tracepoint_marker_at (addr, marker);
781 }
782
783 std::vector<static_tracepoint_marker>
784 target_static_tracepoint_markers_by_strid (const char *marker_id)
785 {
786 target_ops *target = current_inferior ()->top_target ();
787
788 return target->static_tracepoint_markers_by_strid (marker_id);
789 }
790
791 traceframe_info_up
792 target_traceframe_info ()
793 {
794 return current_inferior ()->top_target ()->traceframe_info ();
795 }
796
797 bool
798 target_use_agent (bool use)
799 {
800 return current_inferior ()->top_target ()->use_agent (use);
801 }
802
803 bool
804 target_can_use_agent ()
805 {
806 return current_inferior ()->top_target ()->can_use_agent ();
807 }
808
809 bool
810 target_augmented_libraries_svr4_read ()
811 {
812 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
813 }
814
815 bool
816 target_supports_memory_tagging ()
817 {
818 return current_inferior ()->top_target ()->supports_memory_tagging ();
819 }
820
821 bool
822 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
823 int type)
824 {
825 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
826 }
827
828 bool
829 target_store_memtags (CORE_ADDR address, size_t len,
830 const gdb::byte_vector &tags, int type)
831 {
832 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
833 }
834
835 void
836 target_log_command (const char *p)
837 {
838 return current_inferior ()->top_target ()->log_command (p);
839 }
840
841 /* This is used to implement the various target commands. */
842
843 static void
844 open_target (const char *args, int from_tty, struct cmd_list_element *command)
845 {
846 auto *ti = static_cast<target_info *> (command->context ());
847 target_open_ftype *func = target_factories[ti];
848
849 if (targetdebug)
850 gdb_printf (gdb_stdlog, "-> %s->open (...)\n",
851 ti->shortname);
852
853 func (args, from_tty);
854
855 if (targetdebug)
856 gdb_printf (gdb_stdlog, "<- %s->open (%s, %d)\n",
857 ti->shortname, args, from_tty);
858 }
859
860 /* See target.h. */
861
862 void
863 add_target (const target_info &t, target_open_ftype *func,
864 completer_ftype *completer)
865 {
866 struct cmd_list_element *c;
867
868 auto &func_slot = target_factories[&t];
869 if (func_slot != nullptr)
870 internal_error (_("target already added (\"%s\")."), t.shortname);
871 func_slot = func;
872
873 if (targetlist == NULL)
874 add_basic_prefix_cmd ("target", class_run, _("\
875 Connect to a target machine or process.\n\
876 The first argument is the type or protocol of the target machine.\n\
877 Remaining arguments are interpreted by the target protocol. For more\n\
878 information on the arguments for a particular protocol, type\n\
879 `help target ' followed by the protocol name."),
880 &targetlist, 0, &cmdlist);
881 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
882 c->set_context ((void *) &t);
883 c->func = open_target;
884 if (completer != NULL)
885 set_cmd_completer (c, completer);
886 }
887
888 /* See target.h. */
889
890 void
891 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
892 {
893 struct cmd_list_element *c;
894
895 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
896 see PR cli/15104. */
897 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
898 c->func = open_target;
899 c->set_context ((void *) &tinfo);
900 gdb::unique_xmalloc_ptr<char> alt
901 = xstrprintf ("target %s", tinfo.shortname);
902 deprecate_cmd (c, alt.release ());
903 }
904
905 /* Stub functions */
906
907 void
908 target_kill (void)
909 {
910
911 /* If the commit_resume_state of the to-be-killed-inferior's process stratum
912 is true, and this inferior is the last live inferior with resumed threads
913 of that target, then we want to leave commit_resume_state to false, as the
914 target won't have any resumed threads anymore. We achieve this with
915 this scoped_disable_commit_resumed. On construction, it will set the flag
916 to false. On destruction, it will only set it to true if there are resumed
917 threads left. */
918 scoped_disable_commit_resumed disable ("killing");
919 current_inferior ()->top_target ()->kill ();
920 }
921
922 void
923 target_load (const char *arg, int from_tty)
924 {
925 target_dcache_invalidate ();
926 current_inferior ()->top_target ()->load (arg, from_tty);
927 }
928
929 /* Define it. */
930
931 target_terminal_state target_terminal::m_terminal_state
932 = target_terminal_state::is_ours;
933
934 /* See target/target.h. */
935
936 void
937 target_terminal::init (void)
938 {
939 current_inferior ()->top_target ()->terminal_init ();
940
941 m_terminal_state = target_terminal_state::is_ours;
942 }
943
944 /* See target/target.h. */
945
946 void
947 target_terminal::inferior (void)
948 {
949 struct ui *ui = current_ui;
950
951 /* A background resume (``run&'') should leave GDB in control of the
952 terminal. */
953 if (ui->prompt_state != PROMPT_BLOCKED)
954 return;
955
956 /* Since we always run the inferior in the main console (unless "set
957 inferior-tty" is in effect), when some UI other than the main one
958 calls target_terminal::inferior, then we leave the main UI's
959 terminal settings as is. */
960 if (ui != main_ui)
961 return;
962
963 /* If GDB is resuming the inferior in the foreground, install
964 inferior's terminal modes. */
965
966 struct inferior *inf = current_inferior ();
967
968 if (inf->terminal_state != target_terminal_state::is_inferior)
969 {
970 current_inferior ()->top_target ()->terminal_inferior ();
971 inf->terminal_state = target_terminal_state::is_inferior;
972 }
973
974 m_terminal_state = target_terminal_state::is_inferior;
975
976 /* If the user hit C-c before, pretend that it was hit right
977 here. */
978 if (check_quit_flag ())
979 target_pass_ctrlc ();
980 }
981
982 /* See target/target.h. */
983
984 void
985 target_terminal::restore_inferior (void)
986 {
987 struct ui *ui = current_ui;
988
989 /* See target_terminal::inferior(). */
990 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
991 return;
992
993 /* Restore the terminal settings of inferiors that were in the
994 foreground but are now ours_for_output due to a temporary
995 target_target::ours_for_output() call. */
996
997 {
998 scoped_restore_current_inferior restore_inferior;
999
1000 for (::inferior *inf : all_inferiors ())
1001 {
1002 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
1003 {
1004 set_current_inferior (inf);
1005 current_inferior ()->top_target ()->terminal_inferior ();
1006 inf->terminal_state = target_terminal_state::is_inferior;
1007 }
1008 }
1009 }
1010
1011 m_terminal_state = target_terminal_state::is_inferior;
1012
1013 /* If the user hit C-c before, pretend that it was hit right
1014 here. */
1015 if (check_quit_flag ())
1016 target_pass_ctrlc ();
1017 }
1018
1019 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1020 is_ours_for_output. */
1021
1022 static void
1023 target_terminal_is_ours_kind (target_terminal_state desired_state)
1024 {
1025 scoped_restore_current_inferior restore_inferior;
1026
1027 /* Must do this in two passes. First, have all inferiors save the
1028 current terminal settings. Then, after all inferiors have add a
1029 chance to safely save the terminal settings, restore GDB's
1030 terminal settings. */
1031
1032 for (inferior *inf : all_inferiors ())
1033 {
1034 if (inf->terminal_state == target_terminal_state::is_inferior)
1035 {
1036 set_current_inferior (inf);
1037 current_inferior ()->top_target ()->terminal_save_inferior ();
1038 }
1039 }
1040
1041 for (inferior *inf : all_inferiors ())
1042 {
1043 /* Note we don't check is_inferior here like above because we
1044 need to handle 'is_ours_for_output -> is_ours' too. Careful
1045 to never transition from 'is_ours' to 'is_ours_for_output',
1046 though. */
1047 if (inf->terminal_state != target_terminal_state::is_ours
1048 && inf->terminal_state != desired_state)
1049 {
1050 set_current_inferior (inf);
1051 if (desired_state == target_terminal_state::is_ours)
1052 current_inferior ()->top_target ()->terminal_ours ();
1053 else if (desired_state == target_terminal_state::is_ours_for_output)
1054 current_inferior ()->top_target ()->terminal_ours_for_output ();
1055 else
1056 gdb_assert_not_reached ("unhandled desired state");
1057 inf->terminal_state = desired_state;
1058 }
1059 }
1060 }
1061
1062 /* See target/target.h. */
1063
1064 void
1065 target_terminal::ours ()
1066 {
1067 struct ui *ui = current_ui;
1068
1069 /* See target_terminal::inferior. */
1070 if (ui != main_ui)
1071 return;
1072
1073 if (m_terminal_state == target_terminal_state::is_ours)
1074 return;
1075
1076 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1077 m_terminal_state = target_terminal_state::is_ours;
1078 }
1079
1080 /* See target/target.h. */
1081
1082 void
1083 target_terminal::ours_for_output ()
1084 {
1085 struct ui *ui = current_ui;
1086
1087 /* See target_terminal::inferior. */
1088 if (ui != main_ui)
1089 return;
1090
1091 if (!target_terminal::is_inferior ())
1092 return;
1093
1094 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1095 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1096 }
1097
1098 /* See target/target.h. */
1099
1100 void
1101 target_terminal::info (const char *arg, int from_tty)
1102 {
1103 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1104 }
1105
1106 /* See target.h. */
1107
1108 bool
1109 target_supports_terminal_ours (void)
1110 {
1111 /* The current top target is the target at the top of the target
1112 stack of the current inferior. While normally there's always an
1113 inferior, we must check for nullptr here because we can get here
1114 very early during startup, before the initial inferior is first
1115 created. */
1116 inferior *inf = current_inferior ();
1117
1118 if (inf == nullptr)
1119 return false;
1120 return inf->top_target ()->supports_terminal_ours ();
1121 }
1122
1123 static void
1124 tcomplain (void)
1125 {
1126 error (_("You can't do that when your target is `%s'"),
1127 current_inferior ()->top_target ()->shortname ());
1128 }
1129
1130 void
1131 noprocess (void)
1132 {
1133 error (_("You can't do that without a process to debug."));
1134 }
1135
1136 static void
1137 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1138 {
1139 gdb_printf (_("No saved terminal information.\n"));
1140 }
1141
1142 /* A default implementation for the to_get_ada_task_ptid target method.
1143
1144 This function builds the PTID by using both LWP and TID as part of
1145 the PTID lwp and tid elements. The pid used is the pid of the
1146 inferior_ptid. */
1147
1148 static ptid_t
1149 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1150 {
1151 return ptid_t (inferior_ptid.pid (), lwp, tid);
1152 }
1153
1154 static enum exec_direction_kind
1155 default_execution_direction (struct target_ops *self)
1156 {
1157 if (!target_can_execute_reverse ())
1158 return EXEC_FORWARD;
1159 else if (!target_can_async_p ())
1160 return EXEC_FORWARD;
1161 else
1162 gdb_assert_not_reached ("\
1163 to_execution_direction must be implemented for reverse async");
1164 }
1165
1166 /* See target.h. */
1167
1168 void
1169 decref_target (target_ops *t)
1170 {
1171 t->decref ();
1172 if (t->refcount () == 0)
1173 {
1174 if (t->stratum () == process_stratum)
1175 connection_list_remove (as_process_stratum_target (t));
1176 target_close (t);
1177 }
1178 }
1179
1180 /* See target.h. */
1181
1182 void
1183 target_stack::push (target_ops *t)
1184 {
1185 t->incref ();
1186
1187 strata stratum = t->stratum ();
1188
1189 if (stratum == process_stratum)
1190 connection_list_add (as_process_stratum_target (t));
1191
1192 /* If there's already a target at this stratum, remove it. */
1193
1194 if (m_stack[stratum] != NULL)
1195 unpush (m_stack[stratum]);
1196
1197 /* Now add the new one. */
1198 m_stack[stratum] = t;
1199
1200 if (m_top < stratum)
1201 m_top = stratum;
1202 }
1203
1204 /* See target.h. */
1205
1206 bool
1207 target_stack::unpush (target_ops *t)
1208 {
1209 gdb_assert (t != NULL);
1210
1211 strata stratum = t->stratum ();
1212
1213 if (stratum == dummy_stratum)
1214 internal_error (_("Attempt to unpush the dummy target"));
1215
1216 /* Look for the specified target. Note that a target can only occur
1217 once in the target stack. */
1218
1219 if (m_stack[stratum] != t)
1220 {
1221 /* If T wasn't pushed, quit. Only open targets should be
1222 closed. */
1223 return false;
1224 }
1225
1226 /* Unchain the target. */
1227 m_stack[stratum] = NULL;
1228
1229 if (m_top == stratum)
1230 m_top = this->find_beneath (t)->stratum ();
1231
1232 /* Finally close the target, if there are no inferiors
1233 referencing this target still. Note we do this after unchaining,
1234 so any target method calls from within the target_close
1235 implementation don't end up in T anymore. Do leave the target
1236 open if we have are other inferiors referencing this target
1237 still. */
1238 decref_target (t);
1239
1240 return true;
1241 }
1242
1243 /* Unpush TARGET and assert that it worked. */
1244
1245 static void
1246 unpush_target_and_assert (struct target_ops *target)
1247 {
1248 if (!current_inferior ()->unpush_target (target))
1249 {
1250 gdb_printf (gdb_stderr,
1251 "pop_all_targets couldn't find target %s\n",
1252 target->shortname ());
1253 internal_error (_("failed internal consistency check"));
1254 }
1255 }
1256
1257 void
1258 pop_all_targets_above (enum strata above_stratum)
1259 {
1260 while ((int) (current_inferior ()->top_target ()->stratum ())
1261 > (int) above_stratum)
1262 unpush_target_and_assert (current_inferior ()->top_target ());
1263 }
1264
1265 /* See target.h. */
1266
1267 void
1268 pop_all_targets_at_and_above (enum strata stratum)
1269 {
1270 while ((int) (current_inferior ()->top_target ()->stratum ())
1271 >= (int) stratum)
1272 unpush_target_and_assert (current_inferior ()->top_target ());
1273 }
1274
1275 void
1276 pop_all_targets (void)
1277 {
1278 pop_all_targets_above (dummy_stratum);
1279 }
1280
1281 void
1282 target_unpusher::operator() (struct target_ops *ops) const
1283 {
1284 current_inferior ()->unpush_target (ops);
1285 }
1286
1287 /* Default implementation of to_get_thread_local_address. */
1288
1289 static void
1290 generic_tls_error (void)
1291 {
1292 throw_error (TLS_GENERIC_ERROR,
1293 _("Cannot find thread-local variables on this target"));
1294 }
1295
1296 /* Using the objfile specified in OBJFILE, find the address for the
1297 current thread's thread-local storage with offset OFFSET. */
1298 CORE_ADDR
1299 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1300 {
1301 volatile CORE_ADDR addr = 0;
1302 struct target_ops *target = current_inferior ()->top_target ();
1303 struct gdbarch *gdbarch = target_gdbarch ();
1304
1305 /* If OBJFILE is a separate debug object file, look for the
1306 original object file. */
1307 if (objfile->separate_debug_objfile_backlink != NULL)
1308 objfile = objfile->separate_debug_objfile_backlink;
1309
1310 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1311 {
1312 ptid_t ptid = inferior_ptid;
1313
1314 try
1315 {
1316 CORE_ADDR lm_addr;
1317
1318 /* Fetch the load module address for this objfile. */
1319 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1320 objfile);
1321
1322 if (gdbarch_get_thread_local_address_p (gdbarch))
1323 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1324 offset);
1325 else
1326 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1327 }
1328 /* If an error occurred, print TLS related messages here. Otherwise,
1329 throw the error to some higher catcher. */
1330 catch (const gdb_exception &ex)
1331 {
1332 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1333
1334 switch (ex.error)
1335 {
1336 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1337 error (_("Cannot find thread-local variables "
1338 "in this thread library."));
1339 break;
1340 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1341 if (objfile_is_library)
1342 error (_("Cannot find shared library `%s' in dynamic"
1343 " linker's load module list"), objfile_name (objfile));
1344 else
1345 error (_("Cannot find executable file `%s' in dynamic"
1346 " linker's load module list"), objfile_name (objfile));
1347 break;
1348 case TLS_NOT_ALLOCATED_YET_ERROR:
1349 if (objfile_is_library)
1350 error (_("The inferior has not yet allocated storage for"
1351 " thread-local variables in\n"
1352 "the shared library `%s'\n"
1353 "for %s"),
1354 objfile_name (objfile),
1355 target_pid_to_str (ptid).c_str ());
1356 else
1357 error (_("The inferior has not yet allocated storage for"
1358 " thread-local variables in\n"
1359 "the executable `%s'\n"
1360 "for %s"),
1361 objfile_name (objfile),
1362 target_pid_to_str (ptid).c_str ());
1363 break;
1364 case TLS_GENERIC_ERROR:
1365 if (objfile_is_library)
1366 error (_("Cannot find thread-local storage for %s, "
1367 "shared library %s:\n%s"),
1368 target_pid_to_str (ptid).c_str (),
1369 objfile_name (objfile), ex.what ());
1370 else
1371 error (_("Cannot find thread-local storage for %s, "
1372 "executable file %s:\n%s"),
1373 target_pid_to_str (ptid).c_str (),
1374 objfile_name (objfile), ex.what ());
1375 break;
1376 default:
1377 throw;
1378 break;
1379 }
1380 }
1381 }
1382 else
1383 error (_("Cannot find thread-local variables on this target"));
1384
1385 return addr;
1386 }
1387
1388 const char *
1389 target_xfer_status_to_string (enum target_xfer_status status)
1390 {
1391 #define CASE(X) case X: return #X
1392 switch (status)
1393 {
1394 CASE(TARGET_XFER_E_IO);
1395 CASE(TARGET_XFER_UNAVAILABLE);
1396 default:
1397 return "<unknown>";
1398 }
1399 #undef CASE
1400 };
1401
1402
1403 const target_section_table *
1404 target_get_section_table (struct target_ops *target)
1405 {
1406 return target->get_section_table ();
1407 }
1408
1409 /* Find a section containing ADDR. */
1410
1411 const struct target_section *
1412 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1413 {
1414 const target_section_table *table = target_get_section_table (target);
1415
1416 if (table == NULL)
1417 return NULL;
1418
1419 for (const target_section &secp : *table)
1420 {
1421 if (addr >= secp.addr && addr < secp.endaddr)
1422 return &secp;
1423 }
1424 return NULL;
1425 }
1426
1427 /* See target.h. */
1428
1429 const target_section_table *
1430 default_get_section_table ()
1431 {
1432 return &current_program_space->target_sections ();
1433 }
1434
1435 /* Helper for the memory xfer routines. Checks the attributes of the
1436 memory region of MEMADDR against the read or write being attempted.
1437 If the access is permitted returns true, otherwise returns false.
1438 REGION_P is an optional output parameter. If not-NULL, it is
1439 filled with a pointer to the memory region of MEMADDR. REG_LEN
1440 returns LEN trimmed to the end of the region. This is how much the
1441 caller can continue requesting, if the access is permitted. A
1442 single xfer request must not straddle memory region boundaries. */
1443
1444 static int
1445 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1446 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1447 struct mem_region **region_p)
1448 {
1449 struct mem_region *region;
1450
1451 region = lookup_mem_region (memaddr);
1452
1453 if (region_p != NULL)
1454 *region_p = region;
1455
1456 switch (region->attrib.mode)
1457 {
1458 case MEM_RO:
1459 if (writebuf != NULL)
1460 return 0;
1461 break;
1462
1463 case MEM_WO:
1464 if (readbuf != NULL)
1465 return 0;
1466 break;
1467
1468 case MEM_FLASH:
1469 /* We only support writing to flash during "load" for now. */
1470 if (writebuf != NULL)
1471 error (_("Writing to flash memory forbidden in this context"));
1472 break;
1473
1474 case MEM_NONE:
1475 return 0;
1476 }
1477
1478 /* region->hi == 0 means there's no upper bound. */
1479 if (memaddr + len < region->hi || region->hi == 0)
1480 *reg_len = len;
1481 else
1482 *reg_len = region->hi - memaddr;
1483
1484 return 1;
1485 }
1486
1487 /* Read memory from more than one valid target. A core file, for
1488 instance, could have some of memory but delegate other bits to
1489 the target below it. So, we must manually try all targets. */
1490
1491 enum target_xfer_status
1492 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1493 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1494 ULONGEST *xfered_len)
1495 {
1496 enum target_xfer_status res;
1497
1498 do
1499 {
1500 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1501 readbuf, writebuf, memaddr, len,
1502 xfered_len);
1503 if (res == TARGET_XFER_OK)
1504 break;
1505
1506 /* Stop if the target reports that the memory is not available. */
1507 if (res == TARGET_XFER_UNAVAILABLE)
1508 break;
1509
1510 /* Don't continue past targets which have all the memory.
1511 At one time, this code was necessary to read data from
1512 executables / shared libraries when data for the requested
1513 addresses weren't available in the core file. But now the
1514 core target handles this case itself. */
1515 if (ops->has_all_memory ())
1516 break;
1517
1518 ops = ops->beneath ();
1519 }
1520 while (ops != NULL);
1521
1522 /* The cache works at the raw memory level. Make sure the cache
1523 gets updated with raw contents no matter what kind of memory
1524 object was originally being written. Note we do write-through
1525 first, so that if it fails, we don't write to the cache contents
1526 that never made it to the target. */
1527 if (writebuf != NULL
1528 && inferior_ptid != null_ptid
1529 && target_dcache_init_p ()
1530 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1531 {
1532 DCACHE *dcache = target_dcache_get ();
1533
1534 /* Note that writing to an area of memory which wasn't present
1535 in the cache doesn't cause it to be loaded in. */
1536 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1537 }
1538
1539 return res;
1540 }
1541
1542 /* Perform a partial memory transfer.
1543 For docs see target.h, to_xfer_partial. */
1544
1545 static enum target_xfer_status
1546 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1547 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1548 ULONGEST len, ULONGEST *xfered_len)
1549 {
1550 enum target_xfer_status res;
1551 ULONGEST reg_len;
1552 struct mem_region *region;
1553 struct inferior *inf;
1554
1555 /* For accesses to unmapped overlay sections, read directly from
1556 files. Must do this first, as MEMADDR may need adjustment. */
1557 if (readbuf != NULL && overlay_debugging)
1558 {
1559 struct obj_section *section = find_pc_overlay (memaddr);
1560
1561 if (pc_in_unmapped_range (memaddr, section))
1562 {
1563 const target_section_table *table = target_get_section_table (ops);
1564 const char *section_name = section->the_bfd_section->name;
1565
1566 memaddr = overlay_mapped_address (memaddr, section);
1567
1568 auto match_cb = [=] (const struct target_section *s)
1569 {
1570 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1571 };
1572
1573 return section_table_xfer_memory_partial (readbuf, writebuf,
1574 memaddr, len, xfered_len,
1575 *table, match_cb);
1576 }
1577 }
1578
1579 /* Try the executable files, if "trust-readonly-sections" is set. */
1580 if (readbuf != NULL && trust_readonly)
1581 {
1582 const struct target_section *secp
1583 = target_section_by_addr (ops, memaddr);
1584 if (secp != NULL
1585 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1586 {
1587 const target_section_table *table = target_get_section_table (ops);
1588 return section_table_xfer_memory_partial (readbuf, writebuf,
1589 memaddr, len, xfered_len,
1590 *table);
1591 }
1592 }
1593
1594 /* Try GDB's internal data cache. */
1595
1596 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1597 &region))
1598 return TARGET_XFER_E_IO;
1599
1600 if (inferior_ptid != null_ptid)
1601 inf = current_inferior ();
1602 else
1603 inf = NULL;
1604
1605 if (inf != NULL
1606 && readbuf != NULL
1607 /* The dcache reads whole cache lines; that doesn't play well
1608 with reading from a trace buffer, because reading outside of
1609 the collected memory range fails. */
1610 && get_traceframe_number () == -1
1611 && (region->attrib.cache
1612 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1613 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1614 {
1615 DCACHE *dcache = target_dcache_get_or_init ();
1616
1617 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1618 reg_len, xfered_len);
1619 }
1620
1621 /* If none of those methods found the memory we wanted, fall back
1622 to a target partial transfer. Normally a single call to
1623 to_xfer_partial is enough; if it doesn't recognize an object
1624 it will call the to_xfer_partial of the next target down.
1625 But for memory this won't do. Memory is the only target
1626 object which can be read from more than one valid target.
1627 A core file, for instance, could have some of memory but
1628 delegate other bits to the target below it. So, we must
1629 manually try all targets. */
1630
1631 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1632 xfered_len);
1633
1634 /* If we still haven't got anything, return the last error. We
1635 give up. */
1636 return res;
1637 }
1638
1639 /* Perform a partial memory transfer. For docs see target.h,
1640 to_xfer_partial. */
1641
1642 static enum target_xfer_status
1643 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1644 gdb_byte *readbuf, const gdb_byte *writebuf,
1645 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1646 {
1647 enum target_xfer_status res;
1648
1649 /* Zero length requests are ok and require no work. */
1650 if (len == 0)
1651 return TARGET_XFER_EOF;
1652
1653 memaddr = address_significant (target_gdbarch (), memaddr);
1654
1655 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1656 breakpoint insns, thus hiding out from higher layers whether
1657 there are software breakpoints inserted in the code stream. */
1658 if (readbuf != NULL)
1659 {
1660 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1661 xfered_len);
1662
1663 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1664 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1665 }
1666 else
1667 {
1668 /* A large write request is likely to be partially satisfied
1669 by memory_xfer_partial_1. We will continually malloc
1670 and free a copy of the entire write request for breakpoint
1671 shadow handling even though we only end up writing a small
1672 subset of it. Cap writes to a limit specified by the target
1673 to mitigate this. */
1674 len = std::min (ops->get_memory_xfer_limit (), len);
1675
1676 gdb::byte_vector buf (writebuf, writebuf + len);
1677 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1678 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1679 xfered_len);
1680 }
1681
1682 return res;
1683 }
1684
1685 scoped_restore_tmpl<int>
1686 make_scoped_restore_show_memory_breakpoints (int show)
1687 {
1688 return make_scoped_restore (&show_memory_breakpoints, show);
1689 }
1690
1691 /* For docs see target.h, to_xfer_partial. */
1692
1693 enum target_xfer_status
1694 target_xfer_partial (struct target_ops *ops,
1695 enum target_object object, const char *annex,
1696 gdb_byte *readbuf, const gdb_byte *writebuf,
1697 ULONGEST offset, ULONGEST len,
1698 ULONGEST *xfered_len)
1699 {
1700 enum target_xfer_status retval;
1701
1702 /* Transfer is done when LEN is zero. */
1703 if (len == 0)
1704 return TARGET_XFER_EOF;
1705
1706 if (writebuf && !may_write_memory)
1707 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1708 core_addr_to_string_nz (offset), plongest (len));
1709
1710 *xfered_len = 0;
1711
1712 /* If this is a memory transfer, let the memory-specific code
1713 have a look at it instead. Memory transfers are more
1714 complicated. */
1715 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1716 || object == TARGET_OBJECT_CODE_MEMORY)
1717 retval = memory_xfer_partial (ops, object, readbuf,
1718 writebuf, offset, len, xfered_len);
1719 else if (object == TARGET_OBJECT_RAW_MEMORY)
1720 {
1721 /* Skip/avoid accessing the target if the memory region
1722 attributes block the access. Check this here instead of in
1723 raw_memory_xfer_partial as otherwise we'd end up checking
1724 this twice in the case of the memory_xfer_partial path is
1725 taken; once before checking the dcache, and another in the
1726 tail call to raw_memory_xfer_partial. */
1727 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1728 NULL))
1729 return TARGET_XFER_E_IO;
1730
1731 /* Request the normal memory object from other layers. */
1732 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1733 xfered_len);
1734 }
1735 else
1736 retval = ops->xfer_partial (object, annex, readbuf,
1737 writebuf, offset, len, xfered_len);
1738
1739 if (targetdebug)
1740 {
1741 const unsigned char *myaddr = NULL;
1742
1743 gdb_printf (gdb_stdlog,
1744 "%s:target_xfer_partial "
1745 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1746 ops->shortname (),
1747 (int) object,
1748 (annex ? annex : "(null)"),
1749 host_address_to_string (readbuf),
1750 host_address_to_string (writebuf),
1751 core_addr_to_string_nz (offset),
1752 pulongest (len), retval,
1753 pulongest (*xfered_len));
1754
1755 if (readbuf)
1756 myaddr = readbuf;
1757 if (writebuf)
1758 myaddr = writebuf;
1759 if (retval == TARGET_XFER_OK && myaddr != NULL)
1760 {
1761 int i;
1762
1763 gdb_puts (", bytes =", gdb_stdlog);
1764 for (i = 0; i < *xfered_len; i++)
1765 {
1766 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1767 {
1768 if (targetdebug < 2 && i > 0)
1769 {
1770 gdb_printf (gdb_stdlog, " ...");
1771 break;
1772 }
1773 gdb_printf (gdb_stdlog, "\n");
1774 }
1775
1776 gdb_printf (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1777 }
1778 }
1779
1780 gdb_putc ('\n', gdb_stdlog);
1781 }
1782
1783 /* Check implementations of to_xfer_partial update *XFERED_LEN
1784 properly. Do assertion after printing debug messages, so that we
1785 can find more clues on assertion failure from debugging messages. */
1786 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1787 gdb_assert (*xfered_len > 0);
1788
1789 return retval;
1790 }
1791
1792 /* Read LEN bytes of target memory at address MEMADDR, placing the
1793 results in GDB's memory at MYADDR. Returns either 0 for success or
1794 -1 if any error occurs.
1795
1796 If an error occurs, no guarantee is made about the contents of the data at
1797 MYADDR. In particular, the caller should not depend upon partial reads
1798 filling the buffer with good data. There is no way for the caller to know
1799 how much good data might have been transfered anyway. Callers that can
1800 deal with partial reads should call target_read (which will retry until
1801 it makes no progress, and then return how much was transferred). */
1802
1803 int
1804 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1805 {
1806 if (target_read (current_inferior ()->top_target (),
1807 TARGET_OBJECT_MEMORY, NULL,
1808 myaddr, memaddr, len) == len)
1809 return 0;
1810 else
1811 return -1;
1812 }
1813
1814 /* See target/target.h. */
1815
1816 int
1817 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1818 {
1819 gdb_byte buf[4];
1820 int r;
1821
1822 r = target_read_memory (memaddr, buf, sizeof buf);
1823 if (r != 0)
1824 return r;
1825 *result = extract_unsigned_integer (buf, sizeof buf,
1826 gdbarch_byte_order (target_gdbarch ()));
1827 return 0;
1828 }
1829
1830 /* Like target_read_memory, but specify explicitly that this is a read
1831 from the target's raw memory. That is, this read bypasses the
1832 dcache, breakpoint shadowing, etc. */
1833
1834 int
1835 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1836 {
1837 if (target_read (current_inferior ()->top_target (),
1838 TARGET_OBJECT_RAW_MEMORY, NULL,
1839 myaddr, memaddr, len) == len)
1840 return 0;
1841 else
1842 return -1;
1843 }
1844
1845 /* Like target_read_memory, but specify explicitly that this is a read from
1846 the target's stack. This may trigger different cache behavior. */
1847
1848 int
1849 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1850 {
1851 if (target_read (current_inferior ()->top_target (),
1852 TARGET_OBJECT_STACK_MEMORY, NULL,
1853 myaddr, memaddr, len) == len)
1854 return 0;
1855 else
1856 return -1;
1857 }
1858
1859 /* Like target_read_memory, but specify explicitly that this is a read from
1860 the target's code. This may trigger different cache behavior. */
1861
1862 int
1863 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1864 {
1865 if (target_read (current_inferior ()->top_target (),
1866 TARGET_OBJECT_CODE_MEMORY, NULL,
1867 myaddr, memaddr, len) == len)
1868 return 0;
1869 else
1870 return -1;
1871 }
1872
1873 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1874 Returns either 0 for success or -1 if any error occurs. If an
1875 error occurs, no guarantee is made about how much data got written.
1876 Callers that can deal with partial writes should call
1877 target_write. */
1878
1879 int
1880 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1881 {
1882 if (target_write (current_inferior ()->top_target (),
1883 TARGET_OBJECT_MEMORY, NULL,
1884 myaddr, memaddr, len) == len)
1885 return 0;
1886 else
1887 return -1;
1888 }
1889
1890 /* Write LEN bytes from MYADDR to target raw memory at address
1891 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1892 If an error occurs, no guarantee is made about how much data got
1893 written. Callers that can deal with partial writes should call
1894 target_write. */
1895
1896 int
1897 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1898 {
1899 if (target_write (current_inferior ()->top_target (),
1900 TARGET_OBJECT_RAW_MEMORY, NULL,
1901 myaddr, memaddr, len) == len)
1902 return 0;
1903 else
1904 return -1;
1905 }
1906
1907 /* Fetch the target's memory map. */
1908
1909 std::vector<mem_region>
1910 target_memory_map (void)
1911 {
1912 target_ops *target = current_inferior ()->top_target ();
1913 std::vector<mem_region> result = target->memory_map ();
1914 if (result.empty ())
1915 return result;
1916
1917 std::sort (result.begin (), result.end ());
1918
1919 /* Check that regions do not overlap. Simultaneously assign
1920 a numbering for the "mem" commands to use to refer to
1921 each region. */
1922 mem_region *last_one = NULL;
1923 for (size_t ix = 0; ix < result.size (); ix++)
1924 {
1925 mem_region *this_one = &result[ix];
1926 this_one->number = ix;
1927
1928 if (last_one != NULL && last_one->hi > this_one->lo)
1929 {
1930 warning (_("Overlapping regions in memory map: ignoring"));
1931 return std::vector<mem_region> ();
1932 }
1933
1934 last_one = this_one;
1935 }
1936
1937 return result;
1938 }
1939
1940 void
1941 target_flash_erase (ULONGEST address, LONGEST length)
1942 {
1943 current_inferior ()->top_target ()->flash_erase (address, length);
1944 }
1945
1946 void
1947 target_flash_done (void)
1948 {
1949 current_inferior ()->top_target ()->flash_done ();
1950 }
1951
1952 static void
1953 show_trust_readonly (struct ui_file *file, int from_tty,
1954 struct cmd_list_element *c, const char *value)
1955 {
1956 gdb_printf (file,
1957 _("Mode for reading from readonly sections is %s.\n"),
1958 value);
1959 }
1960
1961 /* Target vector read/write partial wrapper functions. */
1962
1963 static enum target_xfer_status
1964 target_read_partial (struct target_ops *ops,
1965 enum target_object object,
1966 const char *annex, gdb_byte *buf,
1967 ULONGEST offset, ULONGEST len,
1968 ULONGEST *xfered_len)
1969 {
1970 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1971 xfered_len);
1972 }
1973
1974 static enum target_xfer_status
1975 target_write_partial (struct target_ops *ops,
1976 enum target_object object,
1977 const char *annex, const gdb_byte *buf,
1978 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1979 {
1980 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1981 xfered_len);
1982 }
1983
1984 /* Wrappers to perform the full transfer. */
1985
1986 /* For docs on target_read see target.h. */
1987
1988 LONGEST
1989 target_read (struct target_ops *ops,
1990 enum target_object object,
1991 const char *annex, gdb_byte *buf,
1992 ULONGEST offset, LONGEST len)
1993 {
1994 LONGEST xfered_total = 0;
1995 int unit_size = 1;
1996
1997 /* If we are reading from a memory object, find the length of an addressable
1998 unit for that architecture. */
1999 if (object == TARGET_OBJECT_MEMORY
2000 || object == TARGET_OBJECT_STACK_MEMORY
2001 || object == TARGET_OBJECT_CODE_MEMORY
2002 || object == TARGET_OBJECT_RAW_MEMORY)
2003 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2004
2005 while (xfered_total < len)
2006 {
2007 ULONGEST xfered_partial;
2008 enum target_xfer_status status;
2009
2010 status = target_read_partial (ops, object, annex,
2011 buf + xfered_total * unit_size,
2012 offset + xfered_total, len - xfered_total,
2013 &xfered_partial);
2014
2015 /* Call an observer, notifying them of the xfer progress? */
2016 if (status == TARGET_XFER_EOF)
2017 return xfered_total;
2018 else if (status == TARGET_XFER_OK)
2019 {
2020 xfered_total += xfered_partial;
2021 QUIT;
2022 }
2023 else
2024 return TARGET_XFER_E_IO;
2025
2026 }
2027 return len;
2028 }
2029
2030 /* Assuming that the entire [begin, end) range of memory cannot be
2031 read, try to read whatever subrange is possible to read.
2032
2033 The function returns, in RESULT, either zero or one memory block.
2034 If there's a readable subrange at the beginning, it is completely
2035 read and returned. Any further readable subrange will not be read.
2036 Otherwise, if there's a readable subrange at the end, it will be
2037 completely read and returned. Any readable subranges before it
2038 (obviously, not starting at the beginning), will be ignored. In
2039 other cases -- either no readable subrange, or readable subrange(s)
2040 that is neither at the beginning, or end, nothing is returned.
2041
2042 The purpose of this function is to handle a read across a boundary
2043 of accessible memory in a case when memory map is not available.
2044 The above restrictions are fine for this case, but will give
2045 incorrect results if the memory is 'patchy'. However, supporting
2046 'patchy' memory would require trying to read every single byte,
2047 and it seems unacceptable solution. Explicit memory map is
2048 recommended for this case -- and target_read_memory_robust will
2049 take care of reading multiple ranges then. */
2050
2051 static void
2052 read_whatever_is_readable (struct target_ops *ops,
2053 const ULONGEST begin, const ULONGEST end,
2054 int unit_size,
2055 std::vector<memory_read_result> *result)
2056 {
2057 ULONGEST current_begin = begin;
2058 ULONGEST current_end = end;
2059 int forward;
2060 ULONGEST xfered_len;
2061
2062 /* If we previously failed to read 1 byte, nothing can be done here. */
2063 if (end - begin <= 1)
2064 return;
2065
2066 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2067
2068 /* Check that either first or the last byte is readable, and give up
2069 if not. This heuristic is meant to permit reading accessible memory
2070 at the boundary of accessible region. */
2071 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2072 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2073 {
2074 forward = 1;
2075 ++current_begin;
2076 }
2077 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2078 buf.get () + (end - begin) - 1, end - 1, 1,
2079 &xfered_len) == TARGET_XFER_OK)
2080 {
2081 forward = 0;
2082 --current_end;
2083 }
2084 else
2085 return;
2086
2087 /* Loop invariant is that the [current_begin, current_end) was previously
2088 found to be not readable as a whole.
2089
2090 Note loop condition -- if the range has 1 byte, we can't divide the range
2091 so there's no point trying further. */
2092 while (current_end - current_begin > 1)
2093 {
2094 ULONGEST first_half_begin, first_half_end;
2095 ULONGEST second_half_begin, second_half_end;
2096 LONGEST xfer;
2097 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2098
2099 if (forward)
2100 {
2101 first_half_begin = current_begin;
2102 first_half_end = middle;
2103 second_half_begin = middle;
2104 second_half_end = current_end;
2105 }
2106 else
2107 {
2108 first_half_begin = middle;
2109 first_half_end = current_end;
2110 second_half_begin = current_begin;
2111 second_half_end = middle;
2112 }
2113
2114 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2115 buf.get () + (first_half_begin - begin) * unit_size,
2116 first_half_begin,
2117 first_half_end - first_half_begin);
2118
2119 if (xfer == first_half_end - first_half_begin)
2120 {
2121 /* This half reads up fine. So, the error must be in the
2122 other half. */
2123 current_begin = second_half_begin;
2124 current_end = second_half_end;
2125 }
2126 else
2127 {
2128 /* This half is not readable. Because we've tried one byte, we
2129 know some part of this half if actually readable. Go to the next
2130 iteration to divide again and try to read.
2131
2132 We don't handle the other half, because this function only tries
2133 to read a single readable subrange. */
2134 current_begin = first_half_begin;
2135 current_end = first_half_end;
2136 }
2137 }
2138
2139 if (forward)
2140 {
2141 /* The [begin, current_begin) range has been read. */
2142 result->emplace_back (begin, current_end, std::move (buf));
2143 }
2144 else
2145 {
2146 /* The [current_end, end) range has been read. */
2147 LONGEST region_len = end - current_end;
2148
2149 gdb::unique_xmalloc_ptr<gdb_byte> data
2150 ((gdb_byte *) xmalloc (region_len * unit_size));
2151 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2152 region_len * unit_size);
2153 result->emplace_back (current_end, end, std::move (data));
2154 }
2155 }
2156
2157 std::vector<memory_read_result>
2158 read_memory_robust (struct target_ops *ops,
2159 const ULONGEST offset, const LONGEST len)
2160 {
2161 std::vector<memory_read_result> result;
2162 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2163
2164 LONGEST xfered_total = 0;
2165 while (xfered_total < len)
2166 {
2167 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2168 LONGEST region_len;
2169
2170 /* If there is no explicit region, a fake one should be created. */
2171 gdb_assert (region);
2172
2173 if (region->hi == 0)
2174 region_len = len - xfered_total;
2175 else
2176 region_len = region->hi - offset;
2177
2178 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2179 {
2180 /* Cannot read this region. Note that we can end up here only
2181 if the region is explicitly marked inaccessible, or
2182 'inaccessible-by-default' is in effect. */
2183 xfered_total += region_len;
2184 }
2185 else
2186 {
2187 LONGEST to_read = std::min (len - xfered_total, region_len);
2188 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2189 ((gdb_byte *) xmalloc (to_read * unit_size));
2190
2191 LONGEST xfered_partial =
2192 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2193 offset + xfered_total, to_read);
2194 /* Call an observer, notifying them of the xfer progress? */
2195 if (xfered_partial <= 0)
2196 {
2197 /* Got an error reading full chunk. See if maybe we can read
2198 some subrange. */
2199 read_whatever_is_readable (ops, offset + xfered_total,
2200 offset + xfered_total + to_read,
2201 unit_size, &result);
2202 xfered_total += to_read;
2203 }
2204 else
2205 {
2206 result.emplace_back (offset + xfered_total,
2207 offset + xfered_total + xfered_partial,
2208 std::move (buffer));
2209 xfered_total += xfered_partial;
2210 }
2211 QUIT;
2212 }
2213 }
2214
2215 return result;
2216 }
2217
2218
2219 /* An alternative to target_write with progress callbacks. */
2220
2221 LONGEST
2222 target_write_with_progress (struct target_ops *ops,
2223 enum target_object object,
2224 const char *annex, const gdb_byte *buf,
2225 ULONGEST offset, LONGEST len,
2226 void (*progress) (ULONGEST, void *), void *baton)
2227 {
2228 LONGEST xfered_total = 0;
2229 int unit_size = 1;
2230
2231 /* If we are writing to a memory object, find the length of an addressable
2232 unit for that architecture. */
2233 if (object == TARGET_OBJECT_MEMORY
2234 || object == TARGET_OBJECT_STACK_MEMORY
2235 || object == TARGET_OBJECT_CODE_MEMORY
2236 || object == TARGET_OBJECT_RAW_MEMORY)
2237 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2238
2239 /* Give the progress callback a chance to set up. */
2240 if (progress)
2241 (*progress) (0, baton);
2242
2243 while (xfered_total < len)
2244 {
2245 ULONGEST xfered_partial;
2246 enum target_xfer_status status;
2247
2248 status = target_write_partial (ops, object, annex,
2249 buf + xfered_total * unit_size,
2250 offset + xfered_total, len - xfered_total,
2251 &xfered_partial);
2252
2253 if (status != TARGET_XFER_OK)
2254 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2255
2256 if (progress)
2257 (*progress) (xfered_partial, baton);
2258
2259 xfered_total += xfered_partial;
2260 QUIT;
2261 }
2262 return len;
2263 }
2264
2265 /* For docs on target_write see target.h. */
2266
2267 LONGEST
2268 target_write (struct target_ops *ops,
2269 enum target_object object,
2270 const char *annex, const gdb_byte *buf,
2271 ULONGEST offset, LONGEST len)
2272 {
2273 return target_write_with_progress (ops, object, annex, buf, offset, len,
2274 NULL, NULL);
2275 }
2276
2277 /* Help for target_read_alloc and target_read_stralloc. See their comments
2278 for details. */
2279
2280 template <typename T>
2281 gdb::optional<gdb::def_vector<T>>
2282 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2283 const char *annex)
2284 {
2285 gdb::def_vector<T> buf;
2286 size_t buf_pos = 0;
2287 const int chunk = 4096;
2288
2289 /* This function does not have a length parameter; it reads the
2290 entire OBJECT). Also, it doesn't support objects fetched partly
2291 from one target and partly from another (in a different stratum,
2292 e.g. a core file and an executable). Both reasons make it
2293 unsuitable for reading memory. */
2294 gdb_assert (object != TARGET_OBJECT_MEMORY);
2295
2296 /* Start by reading up to 4K at a time. The target will throttle
2297 this number down if necessary. */
2298 while (1)
2299 {
2300 ULONGEST xfered_len;
2301 enum target_xfer_status status;
2302
2303 buf.resize (buf_pos + chunk);
2304
2305 status = target_read_partial (ops, object, annex,
2306 (gdb_byte *) &buf[buf_pos],
2307 buf_pos, chunk,
2308 &xfered_len);
2309
2310 if (status == TARGET_XFER_EOF)
2311 {
2312 /* Read all there was. */
2313 buf.resize (buf_pos);
2314 return buf;
2315 }
2316 else if (status != TARGET_XFER_OK)
2317 {
2318 /* An error occurred. */
2319 return {};
2320 }
2321
2322 buf_pos += xfered_len;
2323
2324 QUIT;
2325 }
2326 }
2327
2328 /* See target.h */
2329
2330 gdb::optional<gdb::byte_vector>
2331 target_read_alloc (struct target_ops *ops, enum target_object object,
2332 const char *annex)
2333 {
2334 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2335 }
2336
2337 /* See target.h. */
2338
2339 gdb::optional<gdb::char_vector>
2340 target_read_stralloc (struct target_ops *ops, enum target_object object,
2341 const char *annex)
2342 {
2343 gdb::optional<gdb::char_vector> buf
2344 = target_read_alloc_1<char> (ops, object, annex);
2345
2346 if (!buf)
2347 return {};
2348
2349 if (buf->empty () || buf->back () != '\0')
2350 buf->push_back ('\0');
2351
2352 /* Check for embedded NUL bytes; but allow trailing NULs. */
2353 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2354 it != buf->end (); it++)
2355 if (*it != '\0')
2356 {
2357 warning (_("target object %d, annex %s, "
2358 "contained unexpected null characters"),
2359 (int) object, annex ? annex : "(none)");
2360 break;
2361 }
2362
2363 return buf;
2364 }
2365
2366 /* Memory transfer methods. */
2367
2368 void
2369 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2370 LONGEST len)
2371 {
2372 /* This method is used to read from an alternate, non-current
2373 target. This read must bypass the overlay support (as symbols
2374 don't match this target), and GDB's internal cache (wrong cache
2375 for this target). */
2376 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2377 != len)
2378 memory_error (TARGET_XFER_E_IO, addr);
2379 }
2380
2381 ULONGEST
2382 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2383 int len, enum bfd_endian byte_order)
2384 {
2385 gdb_byte buf[sizeof (ULONGEST)];
2386
2387 gdb_assert (len <= sizeof (buf));
2388 get_target_memory (ops, addr, buf, len);
2389 return extract_unsigned_integer (buf, len, byte_order);
2390 }
2391
2392 /* See target.h. */
2393
2394 int
2395 target_insert_breakpoint (struct gdbarch *gdbarch,
2396 struct bp_target_info *bp_tgt)
2397 {
2398 if (!may_insert_breakpoints)
2399 {
2400 warning (_("May not insert breakpoints"));
2401 return 1;
2402 }
2403
2404 target_ops *target = current_inferior ()->top_target ();
2405
2406 return target->insert_breakpoint (gdbarch, bp_tgt);
2407 }
2408
2409 /* See target.h. */
2410
2411 int
2412 target_remove_breakpoint (struct gdbarch *gdbarch,
2413 struct bp_target_info *bp_tgt,
2414 enum remove_bp_reason reason)
2415 {
2416 /* This is kind of a weird case to handle, but the permission might
2417 have been changed after breakpoints were inserted - in which case
2418 we should just take the user literally and assume that any
2419 breakpoints should be left in place. */
2420 if (!may_insert_breakpoints)
2421 {
2422 warning (_("May not remove breakpoints"));
2423 return 1;
2424 }
2425
2426 target_ops *target = current_inferior ()->top_target ();
2427
2428 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2429 }
2430
2431 static void
2432 info_target_command (const char *args, int from_tty)
2433 {
2434 int has_all_mem = 0;
2435
2436 if (current_program_space->symfile_object_file != NULL)
2437 {
2438 objfile *objf = current_program_space->symfile_object_file;
2439 gdb_printf (_("Symbols from \"%s\".\n"),
2440 objfile_name (objf));
2441 }
2442
2443 for (target_ops *t = current_inferior ()->top_target ();
2444 t != NULL;
2445 t = t->beneath ())
2446 {
2447 if (!t->has_memory ())
2448 continue;
2449
2450 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2451 continue;
2452 if (has_all_mem)
2453 gdb_printf (_("\tWhile running this, "
2454 "GDB does not access memory from...\n"));
2455 gdb_printf ("%s:\n", t->longname ());
2456 t->files_info ();
2457 has_all_mem = t->has_all_memory ();
2458 }
2459 }
2460
2461 /* This function is called before any new inferior is created, e.g.
2462 by running a program, attaching, or connecting to a target.
2463 It cleans up any state from previous invocations which might
2464 change between runs. This is a subset of what target_preopen
2465 resets (things which might change between targets). */
2466
2467 void
2468 target_pre_inferior (int from_tty)
2469 {
2470 /* Clear out solib state. Otherwise the solib state of the previous
2471 inferior might have survived and is entirely wrong for the new
2472 target. This has been observed on GNU/Linux using glibc 2.3. How
2473 to reproduce:
2474
2475 bash$ ./foo&
2476 [1] 4711
2477 bash$ ./foo&
2478 [1] 4712
2479 bash$ gdb ./foo
2480 [...]
2481 (gdb) attach 4711
2482 (gdb) detach
2483 (gdb) attach 4712
2484 Cannot access memory at address 0xdeadbeef
2485 */
2486
2487 /* In some OSs, the shared library list is the same/global/shared
2488 across inferiors. If code is shared between processes, so are
2489 memory regions and features. */
2490 if (!gdbarch_has_global_solist (target_gdbarch ()))
2491 {
2492 no_shared_libraries (NULL, from_tty);
2493
2494 invalidate_target_mem_regions ();
2495
2496 target_clear_description ();
2497 }
2498
2499 /* attach_flag may be set if the previous process associated with
2500 the inferior was attached to. */
2501 current_inferior ()->attach_flag = false;
2502
2503 current_inferior ()->highest_thread_num = 0;
2504
2505 agent_capability_invalidate ();
2506 }
2507
2508 /* This is to be called by the open routine before it does
2509 anything. */
2510
2511 void
2512 target_preopen (int from_tty)
2513 {
2514 dont_repeat ();
2515
2516 if (current_inferior ()->pid != 0)
2517 {
2518 if (!from_tty
2519 || !target_has_execution ()
2520 || query (_("A program is being debugged already. Kill it? ")))
2521 {
2522 /* Core inferiors actually should be detached, not
2523 killed. */
2524 if (target_has_execution ())
2525 target_kill ();
2526 else
2527 target_detach (current_inferior (), 0);
2528 }
2529 else
2530 error (_("Program not killed."));
2531 }
2532
2533 /* Calling target_kill may remove the target from the stack. But if
2534 it doesn't (which seems like a win for UDI), remove it now. */
2535 /* Leave the exec target, though. The user may be switching from a
2536 live process to a core of the same program. */
2537 pop_all_targets_above (file_stratum);
2538
2539 target_pre_inferior (from_tty);
2540 }
2541
2542 /* See target.h. */
2543
2544 void
2545 target_detach (inferior *inf, int from_tty)
2546 {
2547 /* Thread's don't need to be resumed until the end of this function. */
2548 scoped_disable_commit_resumed disable_commit_resumed ("detaching");
2549
2550 /* After we have detached, we will clear the register cache for this inferior
2551 by calling registers_changed_ptid. We must save the pid_ptid before
2552 detaching, as the target detach method will clear inf->pid. */
2553 ptid_t save_pid_ptid = ptid_t (inf->pid);
2554
2555 /* As long as some to_detach implementations rely on the current_inferior
2556 (either directly, or indirectly, like through target_gdbarch or by
2557 reading memory), INF needs to be the current inferior. When that
2558 requirement will become no longer true, then we can remove this
2559 assertion. */
2560 gdb_assert (inf == current_inferior ());
2561
2562 prepare_for_detach ();
2563
2564 /* Hold a strong reference because detaching may unpush the
2565 target. */
2566 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2567
2568 current_inferior ()->top_target ()->detach (inf, from_tty);
2569
2570 process_stratum_target *proc_target
2571 = as_process_stratum_target (proc_target_ref.get ());
2572
2573 registers_changed_ptid (proc_target, save_pid_ptid);
2574
2575 /* We have to ensure we have no frame cache left. Normally,
2576 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2577 inferior_ptid matches save_pid_ptid, but in our case, it does not
2578 call it, as inferior_ptid has been reset. */
2579 reinit_frame_cache ();
2580
2581 disable_commit_resumed.reset_and_commit ();
2582 }
2583
2584 void
2585 target_disconnect (const char *args, int from_tty)
2586 {
2587 /* If we're in breakpoints-always-inserted mode or if breakpoints
2588 are global across processes, we have to remove them before
2589 disconnecting. */
2590 remove_breakpoints ();
2591
2592 current_inferior ()->top_target ()->disconnect (args, from_tty);
2593 }
2594
2595 /* See target/target.h. */
2596
2597 ptid_t
2598 target_wait (ptid_t ptid, struct target_waitstatus *status,
2599 target_wait_flags options)
2600 {
2601 target_ops *target = current_inferior ()->top_target ();
2602 process_stratum_target *proc_target = current_inferior ()->process_target ();
2603
2604 gdb_assert (!proc_target->commit_resumed_state);
2605
2606 if (!target_can_async_p (target))
2607 gdb_assert ((options & TARGET_WNOHANG) == 0);
2608
2609 try
2610 {
2611 gdb::observers::target_pre_wait.notify (ptid);
2612 ptid_t event_ptid = target->wait (ptid, status, options);
2613 gdb::observers::target_post_wait.notify (event_ptid);
2614 return event_ptid;
2615 }
2616 catch (...)
2617 {
2618 gdb::observers::target_post_wait.notify (null_ptid);
2619 throw;
2620 }
2621 }
2622
2623 /* See target.h. */
2624
2625 ptid_t
2626 default_target_wait (struct target_ops *ops,
2627 ptid_t ptid, struct target_waitstatus *status,
2628 target_wait_flags options)
2629 {
2630 status->set_ignore ();
2631 return minus_one_ptid;
2632 }
2633
2634 std::string
2635 target_pid_to_str (ptid_t ptid)
2636 {
2637 return current_inferior ()->top_target ()->pid_to_str (ptid);
2638 }
2639
2640 const char *
2641 target_thread_name (struct thread_info *info)
2642 {
2643 gdb_assert (info->inf == current_inferior ());
2644
2645 return current_inferior ()->top_target ()->thread_name (info);
2646 }
2647
2648 struct thread_info *
2649 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2650 int handle_len,
2651 struct inferior *inf)
2652 {
2653 target_ops *target = current_inferior ()->top_target ();
2654
2655 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2656 }
2657
2658 /* See target.h. */
2659
2660 gdb::byte_vector
2661 target_thread_info_to_thread_handle (struct thread_info *tip)
2662 {
2663 target_ops *target = current_inferior ()->top_target ();
2664
2665 return target->thread_info_to_thread_handle (tip);
2666 }
2667
2668 void
2669 target_resume (ptid_t scope_ptid, int step, enum gdb_signal signal)
2670 {
2671 process_stratum_target *curr_target = current_inferior ()->process_target ();
2672 gdb_assert (!curr_target->commit_resumed_state);
2673
2674 gdb_assert (inferior_ptid != null_ptid);
2675 gdb_assert (inferior_ptid.matches (scope_ptid));
2676
2677 target_dcache_invalidate ();
2678
2679 current_inferior ()->top_target ()->resume (scope_ptid, step, signal);
2680
2681 registers_changed_ptid (curr_target, scope_ptid);
2682 /* We only set the internal executing state here. The user/frontend
2683 running state is set at a higher level. This also clears the
2684 thread's stop_pc as side effect. */
2685 set_executing (curr_target, scope_ptid, true);
2686 clear_inline_frame_state (curr_target, scope_ptid);
2687
2688 if (target_can_async_p ())
2689 target_async (true);
2690 }
2691
2692 /* See target.h. */
2693
2694 void
2695 target_commit_resumed ()
2696 {
2697 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2698 current_inferior ()->top_target ()->commit_resumed ();
2699 }
2700
2701 /* See target.h. */
2702
2703 bool
2704 target_has_pending_events ()
2705 {
2706 return current_inferior ()->top_target ()->has_pending_events ();
2707 }
2708
2709 void
2710 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2711 {
2712 current_inferior ()->top_target ()->pass_signals (pass_signals);
2713 }
2714
2715 void
2716 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2717 {
2718 current_inferior ()->top_target ()->program_signals (program_signals);
2719 }
2720
2721 static void
2722 default_follow_fork (struct target_ops *self, inferior *child_inf,
2723 ptid_t child_ptid, target_waitkind fork_kind,
2724 bool follow_child, bool detach_fork)
2725 {
2726 /* Some target returned a fork event, but did not know how to follow it. */
2727 internal_error (_("could not find a target to follow fork"));
2728 }
2729
2730 /* See target.h. */
2731
2732 void
2733 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2734 target_waitkind fork_kind, bool follow_child,
2735 bool detach_fork)
2736 {
2737 target_ops *target = current_inferior ()->top_target ();
2738
2739 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2740 DETACH_FORK. */
2741 if (child_inf != nullptr)
2742 {
2743 gdb_assert (follow_child || !detach_fork);
2744 gdb_assert (child_inf->pid == child_ptid.pid ());
2745 }
2746 else
2747 gdb_assert (!follow_child && detach_fork);
2748
2749 return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2750 detach_fork);
2751 }
2752
2753 /* See target.h. */
2754
2755 void
2756 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2757 const char *execd_pathname)
2758 {
2759 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2760 execd_pathname);
2761 }
2762
2763 static void
2764 default_mourn_inferior (struct target_ops *self)
2765 {
2766 internal_error (_("could not find a target to follow mourn inferior"));
2767 }
2768
2769 void
2770 target_mourn_inferior (ptid_t ptid)
2771 {
2772 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2773 current_inferior ()->top_target ()->mourn_inferior ();
2774
2775 /* We no longer need to keep handles on any of the object files.
2776 Make sure to release them to avoid unnecessarily locking any
2777 of them while we're not actually debugging. */
2778 bfd_cache_close_all ();
2779 }
2780
2781 /* Look for a target which can describe architectural features, starting
2782 from TARGET. If we find one, return its description. */
2783
2784 const struct target_desc *
2785 target_read_description (struct target_ops *target)
2786 {
2787 return target->read_description ();
2788 }
2789
2790
2791 /* Default implementation of memory-searching. */
2792
2793 static int
2794 default_search_memory (struct target_ops *self,
2795 CORE_ADDR start_addr, ULONGEST search_space_len,
2796 const gdb_byte *pattern, ULONGEST pattern_len,
2797 CORE_ADDR *found_addrp)
2798 {
2799 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2800 {
2801 return target_read (current_inferior ()->top_target (),
2802 TARGET_OBJECT_MEMORY, NULL,
2803 result, addr, len) == len;
2804 };
2805
2806 /* Start over from the top of the target stack. */
2807 return simple_search_memory (read_memory, start_addr, search_space_len,
2808 pattern, pattern_len, found_addrp);
2809 }
2810
2811 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2812 sequence of bytes in PATTERN with length PATTERN_LEN.
2813
2814 The result is 1 if found, 0 if not found, and -1 if there was an error
2815 requiring halting of the search (e.g. memory read error).
2816 If the pattern is found the address is recorded in FOUND_ADDRP. */
2817
2818 int
2819 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2820 const gdb_byte *pattern, ULONGEST pattern_len,
2821 CORE_ADDR *found_addrp)
2822 {
2823 target_ops *target = current_inferior ()->top_target ();
2824
2825 return target->search_memory (start_addr, search_space_len, pattern,
2826 pattern_len, found_addrp);
2827 }
2828
2829 /* Look through the currently pushed targets. If none of them will
2830 be able to restart the currently running process, issue an error
2831 message. */
2832
2833 void
2834 target_require_runnable (void)
2835 {
2836 for (target_ops *t = current_inferior ()->top_target ();
2837 t != NULL;
2838 t = t->beneath ())
2839 {
2840 /* If this target knows how to create a new program, then
2841 assume we will still be able to after killing the current
2842 one. Either killing and mourning will not pop T, or else
2843 find_default_run_target will find it again. */
2844 if (t->can_create_inferior ())
2845 return;
2846
2847 /* Do not worry about targets at certain strata that can not
2848 create inferiors. Assume they will be pushed again if
2849 necessary, and continue to the process_stratum. */
2850 if (t->stratum () > process_stratum)
2851 continue;
2852
2853 error (_("The \"%s\" target does not support \"run\". "
2854 "Try \"help target\" or \"continue\"."),
2855 t->shortname ());
2856 }
2857
2858 /* This function is only called if the target is running. In that
2859 case there should have been a process_stratum target and it
2860 should either know how to create inferiors, or not... */
2861 internal_error (_("No targets found"));
2862 }
2863
2864 /* Whether GDB is allowed to fall back to the default run target for
2865 "run", "attach", etc. when no target is connected yet. */
2866 static bool auto_connect_native_target = true;
2867
2868 static void
2869 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2870 struct cmd_list_element *c, const char *value)
2871 {
2872 gdb_printf (file,
2873 _("Whether GDB may automatically connect to the "
2874 "native target is %s.\n"),
2875 value);
2876 }
2877
2878 /* A pointer to the target that can respond to "run" or "attach".
2879 Native targets are always singletons and instantiated early at GDB
2880 startup. */
2881 static target_ops *the_native_target;
2882
2883 /* See target.h. */
2884
2885 void
2886 set_native_target (target_ops *target)
2887 {
2888 if (the_native_target != NULL)
2889 internal_error (_("native target already set (\"%s\")."),
2890 the_native_target->longname ());
2891
2892 the_native_target = target;
2893 }
2894
2895 /* See target.h. */
2896
2897 target_ops *
2898 get_native_target ()
2899 {
2900 return the_native_target;
2901 }
2902
2903 /* Look through the list of possible targets for a target that can
2904 execute a run or attach command without any other data. This is
2905 used to locate the default process stratum.
2906
2907 If DO_MESG is not NULL, the result is always valid (error() is
2908 called for errors); else, return NULL on error. */
2909
2910 static struct target_ops *
2911 find_default_run_target (const char *do_mesg)
2912 {
2913 if (auto_connect_native_target && the_native_target != NULL)
2914 return the_native_target;
2915
2916 if (do_mesg != NULL)
2917 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2918 return NULL;
2919 }
2920
2921 /* See target.h. */
2922
2923 struct target_ops *
2924 find_attach_target (void)
2925 {
2926 /* If a target on the current stack can attach, use it. */
2927 for (target_ops *t = current_inferior ()->top_target ();
2928 t != NULL;
2929 t = t->beneath ())
2930 {
2931 if (t->can_attach ())
2932 return t;
2933 }
2934
2935 /* Otherwise, use the default run target for attaching. */
2936 return find_default_run_target ("attach");
2937 }
2938
2939 /* See target.h. */
2940
2941 struct target_ops *
2942 find_run_target (void)
2943 {
2944 /* If a target on the current stack can run, use it. */
2945 for (target_ops *t = current_inferior ()->top_target ();
2946 t != NULL;
2947 t = t->beneath ())
2948 {
2949 if (t->can_create_inferior ())
2950 return t;
2951 }
2952
2953 /* Otherwise, use the default run target. */
2954 return find_default_run_target ("run");
2955 }
2956
2957 bool
2958 target_ops::info_proc (const char *args, enum info_proc_what what)
2959 {
2960 return false;
2961 }
2962
2963 /* Implement the "info proc" command. */
2964
2965 int
2966 target_info_proc (const char *args, enum info_proc_what what)
2967 {
2968 struct target_ops *t;
2969
2970 /* If we're already connected to something that can get us OS
2971 related data, use it. Otherwise, try using the native
2972 target. */
2973 t = find_target_at (process_stratum);
2974 if (t == NULL)
2975 t = find_default_run_target (NULL);
2976
2977 for (; t != NULL; t = t->beneath ())
2978 {
2979 if (t->info_proc (args, what))
2980 {
2981 if (targetdebug)
2982 gdb_printf (gdb_stdlog,
2983 "target_info_proc (\"%s\", %d)\n", args, what);
2984
2985 return 1;
2986 }
2987 }
2988
2989 return 0;
2990 }
2991
2992 static int
2993 find_default_supports_disable_randomization (struct target_ops *self)
2994 {
2995 struct target_ops *t;
2996
2997 t = find_default_run_target (NULL);
2998 if (t != NULL)
2999 return t->supports_disable_randomization ();
3000 return 0;
3001 }
3002
3003 int
3004 target_supports_disable_randomization (void)
3005 {
3006 return current_inferior ()->top_target ()->supports_disable_randomization ();
3007 }
3008
3009 /* See target/target.h. */
3010
3011 int
3012 target_supports_multi_process (void)
3013 {
3014 return current_inferior ()->top_target ()->supports_multi_process ();
3015 }
3016
3017 /* See target.h. */
3018
3019 gdb::optional<gdb::char_vector>
3020 target_get_osdata (const char *type)
3021 {
3022 struct target_ops *t;
3023
3024 /* If we're already connected to something that can get us OS
3025 related data, use it. Otherwise, try using the native
3026 target. */
3027 t = find_target_at (process_stratum);
3028 if (t == NULL)
3029 t = find_default_run_target ("get OS data");
3030
3031 if (!t)
3032 return {};
3033
3034 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3035 }
3036
3037 /* Determine the current address space of thread PTID. */
3038
3039 struct address_space *
3040 target_thread_address_space (ptid_t ptid)
3041 {
3042 struct address_space *aspace;
3043
3044 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3045 gdb_assert (aspace != NULL);
3046
3047 return aspace;
3048 }
3049
3050 /* See target.h. */
3051
3052 target_ops *
3053 target_ops::beneath () const
3054 {
3055 return current_inferior ()->find_target_beneath (this);
3056 }
3057
3058 void
3059 target_ops::close ()
3060 {
3061 }
3062
3063 bool
3064 target_ops::can_attach ()
3065 {
3066 return 0;
3067 }
3068
3069 void
3070 target_ops::attach (const char *, int)
3071 {
3072 gdb_assert_not_reached ("target_ops::attach called");
3073 }
3074
3075 bool
3076 target_ops::can_create_inferior ()
3077 {
3078 return 0;
3079 }
3080
3081 void
3082 target_ops::create_inferior (const char *, const std::string &,
3083 char **, int)
3084 {
3085 gdb_assert_not_reached ("target_ops::create_inferior called");
3086 }
3087
3088 bool
3089 target_ops::can_run ()
3090 {
3091 return false;
3092 }
3093
3094 int
3095 target_can_run ()
3096 {
3097 for (target_ops *t = current_inferior ()->top_target ();
3098 t != NULL;
3099 t = t->beneath ())
3100 {
3101 if (t->can_run ())
3102 return 1;
3103 }
3104
3105 return 0;
3106 }
3107
3108 /* Target file operations. */
3109
3110 static struct target_ops *
3111 default_fileio_target (void)
3112 {
3113 struct target_ops *t;
3114
3115 /* If we're already connected to something that can perform
3116 file I/O, use it. Otherwise, try using the native target. */
3117 t = find_target_at (process_stratum);
3118 if (t != NULL)
3119 return t;
3120 return find_default_run_target ("file I/O");
3121 }
3122
3123 /* File handle for target file operations. */
3124
3125 struct fileio_fh_t
3126 {
3127 /* The target on which this file is open. NULL if the target is
3128 meanwhile closed while the handle is open. */
3129 target_ops *target;
3130
3131 /* The file descriptor on the target. */
3132 int target_fd;
3133
3134 /* Check whether this fileio_fh_t represents a closed file. */
3135 bool is_closed ()
3136 {
3137 return target_fd < 0;
3138 }
3139 };
3140
3141 /* Vector of currently open file handles. The value returned by
3142 target_fileio_open and passed as the FD argument to other
3143 target_fileio_* functions is an index into this vector. This
3144 vector's entries are never freed; instead, files are marked as
3145 closed, and the handle becomes available for reuse. */
3146 static std::vector<fileio_fh_t> fileio_fhandles;
3147
3148 /* Index into fileio_fhandles of the lowest handle that might be
3149 closed. This permits handle reuse without searching the whole
3150 list each time a new file is opened. */
3151 static int lowest_closed_fd;
3152
3153 /* See target.h. */
3154
3155 void
3156 fileio_handles_invalidate_target (target_ops *targ)
3157 {
3158 for (fileio_fh_t &fh : fileio_fhandles)
3159 if (fh.target == targ)
3160 fh.target = NULL;
3161 }
3162
3163 /* Acquire a target fileio file descriptor. */
3164
3165 static int
3166 acquire_fileio_fd (target_ops *target, int target_fd)
3167 {
3168 /* Search for closed handles to reuse. */
3169 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3170 {
3171 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3172
3173 if (fh.is_closed ())
3174 break;
3175 }
3176
3177 /* Push a new handle if no closed handles were found. */
3178 if (lowest_closed_fd == fileio_fhandles.size ())
3179 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3180 else
3181 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3182
3183 /* Should no longer be marked closed. */
3184 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3185
3186 /* Return its index, and start the next lookup at
3187 the next index. */
3188 return lowest_closed_fd++;
3189 }
3190
3191 /* Release a target fileio file descriptor. */
3192
3193 static void
3194 release_fileio_fd (int fd, fileio_fh_t *fh)
3195 {
3196 fh->target_fd = -1;
3197 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3198 }
3199
3200 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3201
3202 static fileio_fh_t *
3203 fileio_fd_to_fh (int fd)
3204 {
3205 return &fileio_fhandles[fd];
3206 }
3207
3208
3209 /* Default implementations of file i/o methods. We don't want these
3210 to delegate automatically, because we need to know which target
3211 supported the method, in order to call it directly from within
3212 pread/pwrite, etc. */
3213
3214 int
3215 target_ops::fileio_open (struct inferior *inf, const char *filename,
3216 int flags, int mode, int warn_if_slow,
3217 fileio_error *target_errno)
3218 {
3219 *target_errno = FILEIO_ENOSYS;
3220 return -1;
3221 }
3222
3223 int
3224 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3225 ULONGEST offset, fileio_error *target_errno)
3226 {
3227 *target_errno = FILEIO_ENOSYS;
3228 return -1;
3229 }
3230
3231 int
3232 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3233 ULONGEST offset, fileio_error *target_errno)
3234 {
3235 *target_errno = FILEIO_ENOSYS;
3236 return -1;
3237 }
3238
3239 int
3240 target_ops::fileio_fstat (int fd, struct stat *sb, fileio_error *target_errno)
3241 {
3242 *target_errno = FILEIO_ENOSYS;
3243 return -1;
3244 }
3245
3246 int
3247 target_ops::fileio_close (int fd, fileio_error *target_errno)
3248 {
3249 *target_errno = FILEIO_ENOSYS;
3250 return -1;
3251 }
3252
3253 int
3254 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3255 fileio_error *target_errno)
3256 {
3257 *target_errno = FILEIO_ENOSYS;
3258 return -1;
3259 }
3260
3261 gdb::optional<std::string>
3262 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3263 fileio_error *target_errno)
3264 {
3265 *target_errno = FILEIO_ENOSYS;
3266 return {};
3267 }
3268
3269 /* See target.h. */
3270
3271 int
3272 target_fileio_open (struct inferior *inf, const char *filename,
3273 int flags, int mode, bool warn_if_slow, fileio_error *target_errno)
3274 {
3275 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3276 {
3277 int fd = t->fileio_open (inf, filename, flags, mode,
3278 warn_if_slow, target_errno);
3279
3280 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3281 continue;
3282
3283 if (fd < 0)
3284 fd = -1;
3285 else
3286 fd = acquire_fileio_fd (t, fd);
3287
3288 if (targetdebug)
3289 gdb_printf (gdb_stdlog,
3290 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3291 " = %d (%d)\n",
3292 inf == NULL ? 0 : inf->num,
3293 filename, flags, mode,
3294 warn_if_slow, fd,
3295 fd != -1 ? 0 : *target_errno);
3296 return fd;
3297 }
3298
3299 *target_errno = FILEIO_ENOSYS;
3300 return -1;
3301 }
3302
3303 /* See target.h. */
3304
3305 int
3306 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3307 ULONGEST offset, fileio_error *target_errno)
3308 {
3309 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3310 int ret = -1;
3311
3312 if (fh->is_closed ())
3313 *target_errno = FILEIO_EBADF;
3314 else if (fh->target == NULL)
3315 *target_errno = FILEIO_EIO;
3316 else
3317 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3318 len, offset, target_errno);
3319
3320 if (targetdebug)
3321 gdb_printf (gdb_stdlog,
3322 "target_fileio_pwrite (%d,...,%d,%s) "
3323 "= %d (%d)\n",
3324 fd, len, pulongest (offset),
3325 ret, ret != -1 ? 0 : *target_errno);
3326 return ret;
3327 }
3328
3329 /* See target.h. */
3330
3331 int
3332 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3333 ULONGEST offset, fileio_error *target_errno)
3334 {
3335 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3336 int ret = -1;
3337
3338 if (fh->is_closed ())
3339 *target_errno = FILEIO_EBADF;
3340 else if (fh->target == NULL)
3341 *target_errno = FILEIO_EIO;
3342 else
3343 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3344 len, offset, target_errno);
3345
3346 if (targetdebug)
3347 gdb_printf (gdb_stdlog,
3348 "target_fileio_pread (%d,...,%d,%s) "
3349 "= %d (%d)\n",
3350 fd, len, pulongest (offset),
3351 ret, ret != -1 ? 0 : *target_errno);
3352 return ret;
3353 }
3354
3355 /* See target.h. */
3356
3357 int
3358 target_fileio_fstat (int fd, struct stat *sb, fileio_error *target_errno)
3359 {
3360 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3361 int ret = -1;
3362
3363 if (fh->is_closed ())
3364 *target_errno = FILEIO_EBADF;
3365 else if (fh->target == NULL)
3366 *target_errno = FILEIO_EIO;
3367 else
3368 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3369
3370 if (targetdebug)
3371 gdb_printf (gdb_stdlog,
3372 "target_fileio_fstat (%d) = %d (%d)\n",
3373 fd, ret, ret != -1 ? 0 : *target_errno);
3374 return ret;
3375 }
3376
3377 /* See target.h. */
3378
3379 int
3380 target_fileio_close (int fd, fileio_error *target_errno)
3381 {
3382 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3383 int ret = -1;
3384
3385 if (fh->is_closed ())
3386 *target_errno = FILEIO_EBADF;
3387 else
3388 {
3389 if (fh->target != NULL)
3390 ret = fh->target->fileio_close (fh->target_fd,
3391 target_errno);
3392 else
3393 ret = 0;
3394 release_fileio_fd (fd, fh);
3395 }
3396
3397 if (targetdebug)
3398 gdb_printf (gdb_stdlog,
3399 "target_fileio_close (%d) = %d (%d)\n",
3400 fd, ret, ret != -1 ? 0 : *target_errno);
3401 return ret;
3402 }
3403
3404 /* See target.h. */
3405
3406 int
3407 target_fileio_unlink (struct inferior *inf, const char *filename,
3408 fileio_error *target_errno)
3409 {
3410 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3411 {
3412 int ret = t->fileio_unlink (inf, filename, target_errno);
3413
3414 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3415 continue;
3416
3417 if (targetdebug)
3418 gdb_printf (gdb_stdlog,
3419 "target_fileio_unlink (%d,%s)"
3420 " = %d (%d)\n",
3421 inf == NULL ? 0 : inf->num, filename,
3422 ret, ret != -1 ? 0 : *target_errno);
3423 return ret;
3424 }
3425
3426 *target_errno = FILEIO_ENOSYS;
3427 return -1;
3428 }
3429
3430 /* See target.h. */
3431
3432 gdb::optional<std::string>
3433 target_fileio_readlink (struct inferior *inf, const char *filename,
3434 fileio_error *target_errno)
3435 {
3436 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3437 {
3438 gdb::optional<std::string> ret
3439 = t->fileio_readlink (inf, filename, target_errno);
3440
3441 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3442 continue;
3443
3444 if (targetdebug)
3445 gdb_printf (gdb_stdlog,
3446 "target_fileio_readlink (%d,%s)"
3447 " = %s (%d)\n",
3448 inf == NULL ? 0 : inf->num,
3449 filename, ret ? ret->c_str () : "(nil)",
3450 ret ? 0 : *target_errno);
3451 return ret;
3452 }
3453
3454 *target_errno = FILEIO_ENOSYS;
3455 return {};
3456 }
3457
3458 /* Like scoped_fd, but specific to target fileio. */
3459
3460 class scoped_target_fd
3461 {
3462 public:
3463 explicit scoped_target_fd (int fd) noexcept
3464 : m_fd (fd)
3465 {
3466 }
3467
3468 ~scoped_target_fd ()
3469 {
3470 if (m_fd >= 0)
3471 {
3472 fileio_error target_errno;
3473
3474 target_fileio_close (m_fd, &target_errno);
3475 }
3476 }
3477
3478 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3479
3480 int get () const noexcept
3481 {
3482 return m_fd;
3483 }
3484
3485 private:
3486 int m_fd;
3487 };
3488
3489 /* Read target file FILENAME, in the filesystem as seen by INF. If
3490 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3491 remote targets, the remote stub). Store the result in *BUF_P and
3492 return the size of the transferred data. PADDING additional bytes
3493 are available in *BUF_P. This is a helper function for
3494 target_fileio_read_alloc; see the declaration of that function for
3495 more information. */
3496
3497 static LONGEST
3498 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3499 gdb_byte **buf_p, int padding)
3500 {
3501 size_t buf_alloc, buf_pos;
3502 gdb_byte *buf;
3503 LONGEST n;
3504 fileio_error target_errno;
3505
3506 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3507 0700, false, &target_errno));
3508 if (fd.get () == -1)
3509 return -1;
3510
3511 /* Start by reading up to 4K at a time. The target will throttle
3512 this number down if necessary. */
3513 buf_alloc = 4096;
3514 buf = (gdb_byte *) xmalloc (buf_alloc);
3515 buf_pos = 0;
3516 while (1)
3517 {
3518 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3519 buf_alloc - buf_pos - padding, buf_pos,
3520 &target_errno);
3521 if (n < 0)
3522 {
3523 /* An error occurred. */
3524 xfree (buf);
3525 return -1;
3526 }
3527 else if (n == 0)
3528 {
3529 /* Read all there was. */
3530 if (buf_pos == 0)
3531 xfree (buf);
3532 else
3533 *buf_p = buf;
3534 return buf_pos;
3535 }
3536
3537 buf_pos += n;
3538
3539 /* If the buffer is filling up, expand it. */
3540 if (buf_alloc < buf_pos * 2)
3541 {
3542 buf_alloc *= 2;
3543 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3544 }
3545
3546 QUIT;
3547 }
3548 }
3549
3550 /* See target.h. */
3551
3552 LONGEST
3553 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3554 gdb_byte **buf_p)
3555 {
3556 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3557 }
3558
3559 /* See target.h. */
3560
3561 gdb::unique_xmalloc_ptr<char>
3562 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3563 {
3564 gdb_byte *buffer;
3565 char *bufstr;
3566 LONGEST i, transferred;
3567
3568 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3569 bufstr = (char *) buffer;
3570
3571 if (transferred < 0)
3572 return gdb::unique_xmalloc_ptr<char> (nullptr);
3573
3574 if (transferred == 0)
3575 return make_unique_xstrdup ("");
3576
3577 bufstr[transferred] = 0;
3578
3579 /* Check for embedded NUL bytes; but allow trailing NULs. */
3580 for (i = strlen (bufstr); i < transferred; i++)
3581 if (bufstr[i] != 0)
3582 {
3583 warning (_("target file %s "
3584 "contained unexpected null characters"),
3585 filename);
3586 break;
3587 }
3588
3589 return gdb::unique_xmalloc_ptr<char> (bufstr);
3590 }
3591
3592
3593 static int
3594 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3595 CORE_ADDR addr, int len)
3596 {
3597 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3598 }
3599
3600 static int
3601 default_watchpoint_addr_within_range (struct target_ops *target,
3602 CORE_ADDR addr,
3603 CORE_ADDR start, int length)
3604 {
3605 return addr >= start && addr < start + length;
3606 }
3607
3608 /* See target.h. */
3609
3610 target_ops *
3611 target_stack::find_beneath (const target_ops *t) const
3612 {
3613 /* Look for a non-empty slot at stratum levels beneath T's. */
3614 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3615 if (m_stack[stratum] != NULL)
3616 return m_stack[stratum];
3617
3618 return NULL;
3619 }
3620
3621 /* See target.h. */
3622
3623 struct target_ops *
3624 find_target_at (enum strata stratum)
3625 {
3626 return current_inferior ()->target_at (stratum);
3627 }
3628
3629 \f
3630
3631 /* See target.h */
3632
3633 void
3634 target_announce_detach (int from_tty)
3635 {
3636 pid_t pid;
3637 const char *exec_file;
3638
3639 if (!from_tty)
3640 return;
3641
3642 pid = inferior_ptid.pid ();
3643 exec_file = get_exec_file (0);
3644 if (exec_file == nullptr)
3645 gdb_printf ("Detaching from pid %s\n",
3646 target_pid_to_str (ptid_t (pid)).c_str ());
3647 else
3648 gdb_printf (_("Detaching from program: %s, %s\n"), exec_file,
3649 target_pid_to_str (ptid_t (pid)).c_str ());
3650 }
3651
3652 /* See target.h */
3653
3654 void
3655 target_announce_attach (int from_tty, int pid)
3656 {
3657 if (!from_tty)
3658 return;
3659
3660 const char *exec_file = get_exec_file (0);
3661
3662 if (exec_file != nullptr)
3663 gdb_printf ("Attaching to program: %s, %s\n", exec_file,
3664 target_pid_to_str (ptid_t (pid)).c_str ());
3665 else
3666 gdb_printf ("Attaching to %s\n",
3667 target_pid_to_str (ptid_t (pid)).c_str ());
3668 }
3669
3670 /* The inferior process has died. Long live the inferior! */
3671
3672 void
3673 generic_mourn_inferior (void)
3674 {
3675 inferior *inf = current_inferior ();
3676
3677 switch_to_no_thread ();
3678
3679 /* Mark breakpoints uninserted in case something tries to delete a
3680 breakpoint while we delete the inferior's threads (which would
3681 fail, since the inferior is long gone). */
3682 mark_breakpoints_out ();
3683
3684 if (inf->pid != 0)
3685 exit_inferior (inf);
3686
3687 /* Note this wipes step-resume breakpoints, so needs to be done
3688 after exit_inferior, which ends up referencing the step-resume
3689 breakpoints through clear_thread_inferior_resources. */
3690 breakpoint_init_inferior (inf_exited);
3691
3692 registers_changed ();
3693
3694 reopen_exec_file ();
3695 reinit_frame_cache ();
3696
3697 if (deprecated_detach_hook)
3698 deprecated_detach_hook ();
3699 }
3700 \f
3701 /* Convert a normal process ID to a string. Returns the string in a
3702 static buffer. */
3703
3704 std::string
3705 normal_pid_to_str (ptid_t ptid)
3706 {
3707 return string_printf ("process %d", ptid.pid ());
3708 }
3709
3710 static std::string
3711 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3712 {
3713 return normal_pid_to_str (ptid);
3714 }
3715
3716 /* Error-catcher for target_find_memory_regions. */
3717 static int
3718 dummy_find_memory_regions (struct target_ops *self,
3719 find_memory_region_ftype ignore1, void *ignore2)
3720 {
3721 error (_("Command not implemented for this target."));
3722 return 0;
3723 }
3724
3725 /* Error-catcher for target_make_corefile_notes. */
3726 static gdb::unique_xmalloc_ptr<char>
3727 dummy_make_corefile_notes (struct target_ops *self,
3728 bfd *ignore1, int *ignore2)
3729 {
3730 error (_("Command not implemented for this target."));
3731 return NULL;
3732 }
3733
3734 #include "target-delegates.c"
3735
3736 /* The initial current target, so that there is always a semi-valid
3737 current target. */
3738
3739 static dummy_target the_dummy_target;
3740
3741 /* See target.h. */
3742
3743 target_ops *
3744 get_dummy_target ()
3745 {
3746 return &the_dummy_target;
3747 }
3748
3749 static const target_info dummy_target_info = {
3750 "None",
3751 N_("None"),
3752 ""
3753 };
3754
3755 strata
3756 dummy_target::stratum () const
3757 {
3758 return dummy_stratum;
3759 }
3760
3761 strata
3762 debug_target::stratum () const
3763 {
3764 return debug_stratum;
3765 }
3766
3767 const target_info &
3768 dummy_target::info () const
3769 {
3770 return dummy_target_info;
3771 }
3772
3773 const target_info &
3774 debug_target::info () const
3775 {
3776 return beneath ()->info ();
3777 }
3778
3779 \f
3780
3781 void
3782 target_close (struct target_ops *targ)
3783 {
3784 for (inferior *inf : all_inferiors ())
3785 gdb_assert (!inf->target_is_pushed (targ));
3786
3787 fileio_handles_invalidate_target (targ);
3788
3789 targ->close ();
3790
3791 if (targetdebug)
3792 gdb_printf (gdb_stdlog, "target_close ()\n");
3793 }
3794
3795 int
3796 target_thread_alive (ptid_t ptid)
3797 {
3798 return current_inferior ()->top_target ()->thread_alive (ptid);
3799 }
3800
3801 void
3802 target_update_thread_list (void)
3803 {
3804 current_inferior ()->top_target ()->update_thread_list ();
3805 }
3806
3807 void
3808 target_stop (ptid_t ptid)
3809 {
3810 process_stratum_target *proc_target = current_inferior ()->process_target ();
3811
3812 gdb_assert (!proc_target->commit_resumed_state);
3813
3814 if (!may_stop)
3815 {
3816 warning (_("May not interrupt or stop the target, ignoring attempt"));
3817 return;
3818 }
3819
3820 current_inferior ()->top_target ()->stop (ptid);
3821 }
3822
3823 void
3824 target_interrupt ()
3825 {
3826 if (!may_stop)
3827 {
3828 warning (_("May not interrupt or stop the target, ignoring attempt"));
3829 return;
3830 }
3831
3832 current_inferior ()->top_target ()->interrupt ();
3833 }
3834
3835 /* See target.h. */
3836
3837 void
3838 target_pass_ctrlc (void)
3839 {
3840 /* Pass the Ctrl-C to the first target that has a thread
3841 running. */
3842 for (inferior *inf : all_inferiors ())
3843 {
3844 target_ops *proc_target = inf->process_target ();
3845 if (proc_target == NULL)
3846 continue;
3847
3848 for (thread_info *thr : inf->non_exited_threads ())
3849 {
3850 /* A thread can be THREAD_STOPPED and executing, while
3851 running an infcall. */
3852 if (thr->state == THREAD_RUNNING || thr->executing ())
3853 {
3854 /* We can get here quite deep in target layers. Avoid
3855 switching thread context or anything that would
3856 communicate with the target (e.g., to fetch
3857 registers), or flushing e.g., the frame cache. We
3858 just switch inferior in order to be able to call
3859 through the target_stack. */
3860 scoped_restore_current_inferior restore_inferior;
3861 set_current_inferior (inf);
3862 current_inferior ()->top_target ()->pass_ctrlc ();
3863 return;
3864 }
3865 }
3866 }
3867 }
3868
3869 /* See target.h. */
3870
3871 void
3872 default_target_pass_ctrlc (struct target_ops *ops)
3873 {
3874 target_interrupt ();
3875 }
3876
3877 /* See target/target.h. */
3878
3879 void
3880 target_stop_and_wait (ptid_t ptid)
3881 {
3882 struct target_waitstatus status;
3883 bool was_non_stop = non_stop;
3884
3885 non_stop = true;
3886 target_stop (ptid);
3887
3888 target_wait (ptid, &status, 0);
3889
3890 non_stop = was_non_stop;
3891 }
3892
3893 /* See target/target.h. */
3894
3895 void
3896 target_continue_no_signal (ptid_t ptid)
3897 {
3898 target_resume (ptid, 0, GDB_SIGNAL_0);
3899 }
3900
3901 /* See target/target.h. */
3902
3903 void
3904 target_continue (ptid_t ptid, enum gdb_signal signal)
3905 {
3906 target_resume (ptid, 0, signal);
3907 }
3908
3909 /* Concatenate ELEM to LIST, a comma-separated list. */
3910
3911 static void
3912 str_comma_list_concat_elem (std::string *list, const char *elem)
3913 {
3914 if (!list->empty ())
3915 list->append (", ");
3916
3917 list->append (elem);
3918 }
3919
3920 /* Helper for target_options_to_string. If OPT is present in
3921 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3922 OPT is removed from TARGET_OPTIONS. */
3923
3924 static void
3925 do_option (target_wait_flags *target_options, std::string *ret,
3926 target_wait_flag opt, const char *opt_str)
3927 {
3928 if ((*target_options & opt) != 0)
3929 {
3930 str_comma_list_concat_elem (ret, opt_str);
3931 *target_options &= ~opt;
3932 }
3933 }
3934
3935 /* See target.h. */
3936
3937 std::string
3938 target_options_to_string (target_wait_flags target_options)
3939 {
3940 std::string ret;
3941
3942 #define DO_TARG_OPTION(OPT) \
3943 do_option (&target_options, &ret, OPT, #OPT)
3944
3945 DO_TARG_OPTION (TARGET_WNOHANG);
3946
3947 if (target_options != 0)
3948 str_comma_list_concat_elem (&ret, "unknown???");
3949
3950 return ret;
3951 }
3952
3953 void
3954 target_fetch_registers (struct regcache *regcache, int regno)
3955 {
3956 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3957 if (targetdebug)
3958 regcache->debug_print_register ("target_fetch_registers", regno);
3959 }
3960
3961 void
3962 target_store_registers (struct regcache *regcache, int regno)
3963 {
3964 if (!may_write_registers)
3965 error (_("Writing to registers is not allowed (regno %d)"), regno);
3966
3967 current_inferior ()->top_target ()->store_registers (regcache, regno);
3968 if (targetdebug)
3969 {
3970 regcache->debug_print_register ("target_store_registers", regno);
3971 }
3972 }
3973
3974 int
3975 target_core_of_thread (ptid_t ptid)
3976 {
3977 return current_inferior ()->top_target ()->core_of_thread (ptid);
3978 }
3979
3980 int
3981 simple_verify_memory (struct target_ops *ops,
3982 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3983 {
3984 LONGEST total_xfered = 0;
3985
3986 while (total_xfered < size)
3987 {
3988 ULONGEST xfered_len;
3989 enum target_xfer_status status;
3990 gdb_byte buf[1024];
3991 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3992
3993 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3994 buf, NULL, lma + total_xfered, howmuch,
3995 &xfered_len);
3996 if (status == TARGET_XFER_OK
3997 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3998 {
3999 total_xfered += xfered_len;
4000 QUIT;
4001 }
4002 else
4003 return 0;
4004 }
4005 return 1;
4006 }
4007
4008 /* Default implementation of memory verification. */
4009
4010 static int
4011 default_verify_memory (struct target_ops *self,
4012 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4013 {
4014 /* Start over from the top of the target stack. */
4015 return simple_verify_memory (current_inferior ()->top_target (),
4016 data, memaddr, size);
4017 }
4018
4019 int
4020 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4021 {
4022 target_ops *target = current_inferior ()->top_target ();
4023
4024 return target->verify_memory (data, memaddr, size);
4025 }
4026
4027 /* The documentation for this function is in its prototype declaration in
4028 target.h. */
4029
4030 int
4031 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4032 enum target_hw_bp_type rw)
4033 {
4034 target_ops *target = current_inferior ()->top_target ();
4035
4036 return target->insert_mask_watchpoint (addr, mask, rw);
4037 }
4038
4039 /* The documentation for this function is in its prototype declaration in
4040 target.h. */
4041
4042 int
4043 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4044 enum target_hw_bp_type rw)
4045 {
4046 target_ops *target = current_inferior ()->top_target ();
4047
4048 return target->remove_mask_watchpoint (addr, mask, rw);
4049 }
4050
4051 /* The documentation for this function is in its prototype declaration
4052 in target.h. */
4053
4054 int
4055 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4056 {
4057 target_ops *target = current_inferior ()->top_target ();
4058
4059 return target->masked_watch_num_registers (addr, mask);
4060 }
4061
4062 /* The documentation for this function is in its prototype declaration
4063 in target.h. */
4064
4065 int
4066 target_ranged_break_num_registers (void)
4067 {
4068 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4069 }
4070
4071 /* See target.h. */
4072
4073 struct btrace_target_info *
4074 target_enable_btrace (thread_info *tp, const struct btrace_config *conf)
4075 {
4076 return current_inferior ()->top_target ()->enable_btrace (tp, conf);
4077 }
4078
4079 /* See target.h. */
4080
4081 void
4082 target_disable_btrace (struct btrace_target_info *btinfo)
4083 {
4084 current_inferior ()->top_target ()->disable_btrace (btinfo);
4085 }
4086
4087 /* See target.h. */
4088
4089 void
4090 target_teardown_btrace (struct btrace_target_info *btinfo)
4091 {
4092 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4093 }
4094
4095 /* See target.h. */
4096
4097 enum btrace_error
4098 target_read_btrace (struct btrace_data *btrace,
4099 struct btrace_target_info *btinfo,
4100 enum btrace_read_type type)
4101 {
4102 target_ops *target = current_inferior ()->top_target ();
4103
4104 return target->read_btrace (btrace, btinfo, type);
4105 }
4106
4107 /* See target.h. */
4108
4109 const struct btrace_config *
4110 target_btrace_conf (const struct btrace_target_info *btinfo)
4111 {
4112 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4113 }
4114
4115 /* See target.h. */
4116
4117 void
4118 target_stop_recording (void)
4119 {
4120 current_inferior ()->top_target ()->stop_recording ();
4121 }
4122
4123 /* See target.h. */
4124
4125 void
4126 target_save_record (const char *filename)
4127 {
4128 current_inferior ()->top_target ()->save_record (filename);
4129 }
4130
4131 /* See target.h. */
4132
4133 int
4134 target_supports_delete_record ()
4135 {
4136 return current_inferior ()->top_target ()->supports_delete_record ();
4137 }
4138
4139 /* See target.h. */
4140
4141 void
4142 target_delete_record (void)
4143 {
4144 current_inferior ()->top_target ()->delete_record ();
4145 }
4146
4147 /* See target.h. */
4148
4149 enum record_method
4150 target_record_method (ptid_t ptid)
4151 {
4152 return current_inferior ()->top_target ()->record_method (ptid);
4153 }
4154
4155 /* See target.h. */
4156
4157 int
4158 target_record_is_replaying (ptid_t ptid)
4159 {
4160 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4161 }
4162
4163 /* See target.h. */
4164
4165 int
4166 target_record_will_replay (ptid_t ptid, int dir)
4167 {
4168 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4169 }
4170
4171 /* See target.h. */
4172
4173 void
4174 target_record_stop_replaying (void)
4175 {
4176 current_inferior ()->top_target ()->record_stop_replaying ();
4177 }
4178
4179 /* See target.h. */
4180
4181 void
4182 target_goto_record_begin (void)
4183 {
4184 current_inferior ()->top_target ()->goto_record_begin ();
4185 }
4186
4187 /* See target.h. */
4188
4189 void
4190 target_goto_record_end (void)
4191 {
4192 current_inferior ()->top_target ()->goto_record_end ();
4193 }
4194
4195 /* See target.h. */
4196
4197 void
4198 target_goto_record (ULONGEST insn)
4199 {
4200 current_inferior ()->top_target ()->goto_record (insn);
4201 }
4202
4203 /* See target.h. */
4204
4205 void
4206 target_insn_history (int size, gdb_disassembly_flags flags)
4207 {
4208 current_inferior ()->top_target ()->insn_history (size, flags);
4209 }
4210
4211 /* See target.h. */
4212
4213 void
4214 target_insn_history_from (ULONGEST from, int size,
4215 gdb_disassembly_flags flags)
4216 {
4217 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4218 }
4219
4220 /* See target.h. */
4221
4222 void
4223 target_insn_history_range (ULONGEST begin, ULONGEST end,
4224 gdb_disassembly_flags flags)
4225 {
4226 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4227 }
4228
4229 /* See target.h. */
4230
4231 void
4232 target_call_history (int size, record_print_flags flags)
4233 {
4234 current_inferior ()->top_target ()->call_history (size, flags);
4235 }
4236
4237 /* See target.h. */
4238
4239 void
4240 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4241 {
4242 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4243 }
4244
4245 /* See target.h. */
4246
4247 void
4248 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4249 {
4250 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4251 }
4252
4253 /* See target.h. */
4254
4255 const struct frame_unwind *
4256 target_get_unwinder (void)
4257 {
4258 return current_inferior ()->top_target ()->get_unwinder ();
4259 }
4260
4261 /* See target.h. */
4262
4263 const struct frame_unwind *
4264 target_get_tailcall_unwinder (void)
4265 {
4266 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4267 }
4268
4269 /* See target.h. */
4270
4271 void
4272 target_prepare_to_generate_core (void)
4273 {
4274 current_inferior ()->top_target ()->prepare_to_generate_core ();
4275 }
4276
4277 /* See target.h. */
4278
4279 void
4280 target_done_generating_core (void)
4281 {
4282 current_inferior ()->top_target ()->done_generating_core ();
4283 }
4284
4285 \f
4286
4287 static char targ_desc[] =
4288 "Names of targets and files being debugged.\nShows the entire \
4289 stack of targets currently in use (including the exec-file,\n\
4290 core-file, and process, if any), as well as the symbol file name.";
4291
4292 static void
4293 default_rcmd (struct target_ops *self, const char *command,
4294 struct ui_file *output)
4295 {
4296 error (_("\"monitor\" command not supported by this target."));
4297 }
4298
4299 static void
4300 do_monitor_command (const char *cmd, int from_tty)
4301 {
4302 target_rcmd (cmd, gdb_stdtarg);
4303 }
4304
4305 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4306 ignored. */
4307
4308 void
4309 flash_erase_command (const char *cmd, int from_tty)
4310 {
4311 /* Used to communicate termination of flash operations to the target. */
4312 bool found_flash_region = false;
4313 struct gdbarch *gdbarch = target_gdbarch ();
4314
4315 std::vector<mem_region> mem_regions = target_memory_map ();
4316
4317 /* Iterate over all memory regions. */
4318 for (const mem_region &m : mem_regions)
4319 {
4320 /* Is this a flash memory region? */
4321 if (m.attrib.mode == MEM_FLASH)
4322 {
4323 found_flash_region = true;
4324 target_flash_erase (m.lo, m.hi - m.lo);
4325
4326 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4327
4328 current_uiout->message (_("Erasing flash memory region at address "));
4329 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4330 current_uiout->message (", size = ");
4331 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4332 current_uiout->message ("\n");
4333 }
4334 }
4335
4336 /* Did we do any flash operations? If so, we need to finalize them. */
4337 if (found_flash_region)
4338 target_flash_done ();
4339 else
4340 current_uiout->message (_("No flash memory regions found.\n"));
4341 }
4342
4343 /* Print the name of each layers of our target stack. */
4344
4345 static void
4346 maintenance_print_target_stack (const char *cmd, int from_tty)
4347 {
4348 gdb_printf (_("The current target stack is:\n"));
4349
4350 for (target_ops *t = current_inferior ()->top_target ();
4351 t != NULL;
4352 t = t->beneath ())
4353 {
4354 if (t->stratum () == debug_stratum)
4355 continue;
4356 gdb_printf (" - %s (%s)\n", t->shortname (), t->longname ());
4357 }
4358 }
4359
4360 /* See target.h. */
4361
4362 void
4363 target_async (bool enable)
4364 {
4365 /* If we are trying to enable async mode then it must be the case that
4366 async mode is possible for this target. */
4367 gdb_assert (!enable || target_can_async_p ());
4368 infrun_async (enable);
4369 current_inferior ()->top_target ()->async (enable);
4370 }
4371
4372 /* See target.h. */
4373
4374 void
4375 target_thread_events (int enable)
4376 {
4377 current_inferior ()->top_target ()->thread_events (enable);
4378 }
4379
4380 /* Controls if targets can report that they can/are async. This is
4381 just for maintainers to use when debugging gdb. */
4382 bool target_async_permitted = true;
4383
4384 static void
4385 set_maint_target_async (bool permitted)
4386 {
4387 if (have_live_inferiors ())
4388 error (_("Cannot change this setting while the inferior is running."));
4389
4390 target_async_permitted = permitted;
4391 }
4392
4393 static bool
4394 get_maint_target_async ()
4395 {
4396 return target_async_permitted;
4397 }
4398
4399 static void
4400 show_maint_target_async (ui_file *file, int from_tty,
4401 cmd_list_element *c, const char *value)
4402 {
4403 gdb_printf (file,
4404 _("Controlling the inferior in "
4405 "asynchronous mode is %s.\n"), value);
4406 }
4407
4408 /* Return true if the target operates in non-stop mode even with "set
4409 non-stop off". */
4410
4411 static int
4412 target_always_non_stop_p (void)
4413 {
4414 return current_inferior ()->top_target ()->always_non_stop_p ();
4415 }
4416
4417 /* See target.h. */
4418
4419 bool
4420 target_is_non_stop_p ()
4421 {
4422 return ((non_stop
4423 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4424 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4425 && target_always_non_stop_p ()))
4426 && target_can_async_p ());
4427 }
4428
4429 /* See target.h. */
4430
4431 bool
4432 exists_non_stop_target ()
4433 {
4434 if (target_is_non_stop_p ())
4435 return true;
4436
4437 scoped_restore_current_thread restore_thread;
4438
4439 for (inferior *inf : all_inferiors ())
4440 {
4441 switch_to_inferior_no_thread (inf);
4442 if (target_is_non_stop_p ())
4443 return true;
4444 }
4445
4446 return false;
4447 }
4448
4449 /* Controls if targets can report that they always run in non-stop
4450 mode. This is just for maintainers to use when debugging gdb. */
4451 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4452
4453 /* Set callback for maint target-non-stop setting. */
4454
4455 static void
4456 set_maint_target_non_stop (auto_boolean enabled)
4457 {
4458 if (have_live_inferiors ())
4459 error (_("Cannot change this setting while the inferior is running."));
4460
4461 target_non_stop_enabled = enabled;
4462 }
4463
4464 /* Get callback for maint target-non-stop setting. */
4465
4466 static auto_boolean
4467 get_maint_target_non_stop ()
4468 {
4469 return target_non_stop_enabled;
4470 }
4471
4472 static void
4473 show_maint_target_non_stop (ui_file *file, int from_tty,
4474 cmd_list_element *c, const char *value)
4475 {
4476 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4477 gdb_printf (file,
4478 _("Whether the target is always in non-stop mode "
4479 "is %s (currently %s).\n"), value,
4480 target_always_non_stop_p () ? "on" : "off");
4481 else
4482 gdb_printf (file,
4483 _("Whether the target is always in non-stop mode "
4484 "is %s.\n"), value);
4485 }
4486
4487 /* Temporary copies of permission settings. */
4488
4489 static bool may_write_registers_1 = true;
4490 static bool may_write_memory_1 = true;
4491 static bool may_insert_breakpoints_1 = true;
4492 static bool may_insert_tracepoints_1 = true;
4493 static bool may_insert_fast_tracepoints_1 = true;
4494 static bool may_stop_1 = true;
4495
4496 /* Make the user-set values match the real values again. */
4497
4498 void
4499 update_target_permissions (void)
4500 {
4501 may_write_registers_1 = may_write_registers;
4502 may_write_memory_1 = may_write_memory;
4503 may_insert_breakpoints_1 = may_insert_breakpoints;
4504 may_insert_tracepoints_1 = may_insert_tracepoints;
4505 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4506 may_stop_1 = may_stop;
4507 }
4508
4509 /* The one function handles (most of) the permission flags in the same
4510 way. */
4511
4512 static void
4513 set_target_permissions (const char *args, int from_tty,
4514 struct cmd_list_element *c)
4515 {
4516 if (target_has_execution ())
4517 {
4518 update_target_permissions ();
4519 error (_("Cannot change this setting while the inferior is running."));
4520 }
4521
4522 /* Make the real values match the user-changed values. */
4523 may_write_registers = may_write_registers_1;
4524 may_insert_breakpoints = may_insert_breakpoints_1;
4525 may_insert_tracepoints = may_insert_tracepoints_1;
4526 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4527 may_stop = may_stop_1;
4528 update_observer_mode ();
4529 }
4530
4531 /* Set memory write permission independently of observer mode. */
4532
4533 static void
4534 set_write_memory_permission (const char *args, int from_tty,
4535 struct cmd_list_element *c)
4536 {
4537 /* Make the real values match the user-changed values. */
4538 may_write_memory = may_write_memory_1;
4539 update_observer_mode ();
4540 }
4541
4542 void _initialize_target ();
4543
4544 void
4545 _initialize_target ()
4546 {
4547 the_debug_target = new debug_target ();
4548
4549 add_info ("target", info_target_command, targ_desc);
4550 add_info ("files", info_target_command, targ_desc);
4551
4552 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4553 Set target debugging."), _("\
4554 Show target debugging."), _("\
4555 When non-zero, target debugging is enabled. Higher numbers are more\n\
4556 verbose."),
4557 set_targetdebug,
4558 show_targetdebug,
4559 &setdebuglist, &showdebuglist);
4560
4561 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4562 &trust_readonly, _("\
4563 Set mode for reading from readonly sections."), _("\
4564 Show mode for reading from readonly sections."), _("\
4565 When this mode is on, memory reads from readonly sections (such as .text)\n\
4566 will be read from the object file instead of from the target. This will\n\
4567 result in significant performance improvement for remote targets."),
4568 NULL,
4569 show_trust_readonly,
4570 &setlist, &showlist);
4571
4572 add_com ("monitor", class_obscure, do_monitor_command,
4573 _("Send a command to the remote monitor (remote targets only)."));
4574
4575 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4576 _("Print the name of each layer of the internal target stack."),
4577 &maintenanceprintlist);
4578
4579 add_setshow_boolean_cmd ("target-async", no_class,
4580 _("\
4581 Set whether gdb controls the inferior in asynchronous mode."), _("\
4582 Show whether gdb controls the inferior in asynchronous mode."), _("\
4583 Tells gdb whether to control the inferior in asynchronous mode."),
4584 set_maint_target_async,
4585 get_maint_target_async,
4586 show_maint_target_async,
4587 &maintenance_set_cmdlist,
4588 &maintenance_show_cmdlist);
4589
4590 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4591 _("\
4592 Set whether gdb always controls the inferior in non-stop mode."), _("\
4593 Show whether gdb always controls the inferior in non-stop mode."), _("\
4594 Tells gdb whether to control the inferior in non-stop mode."),
4595 set_maint_target_non_stop,
4596 get_maint_target_non_stop,
4597 show_maint_target_non_stop,
4598 &maintenance_set_cmdlist,
4599 &maintenance_show_cmdlist);
4600
4601 add_setshow_boolean_cmd ("may-write-registers", class_support,
4602 &may_write_registers_1, _("\
4603 Set permission to write into registers."), _("\
4604 Show permission to write into registers."), _("\
4605 When this permission is on, GDB may write into the target's registers.\n\
4606 Otherwise, any sort of write attempt will result in an error."),
4607 set_target_permissions, NULL,
4608 &setlist, &showlist);
4609
4610 add_setshow_boolean_cmd ("may-write-memory", class_support,
4611 &may_write_memory_1, _("\
4612 Set permission to write into target memory."), _("\
4613 Show permission to write into target memory."), _("\
4614 When this permission is on, GDB may write into the target's memory.\n\
4615 Otherwise, any sort of write attempt will result in an error."),
4616 set_write_memory_permission, NULL,
4617 &setlist, &showlist);
4618
4619 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4620 &may_insert_breakpoints_1, _("\
4621 Set permission to insert breakpoints in the target."), _("\
4622 Show permission to insert breakpoints in the target."), _("\
4623 When this permission is on, GDB may insert breakpoints in the program.\n\
4624 Otherwise, any sort of insertion attempt will result in an error."),
4625 set_target_permissions, NULL,
4626 &setlist, &showlist);
4627
4628 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4629 &may_insert_tracepoints_1, _("\
4630 Set permission to insert tracepoints in the target."), _("\
4631 Show permission to insert tracepoints in the target."), _("\
4632 When this permission is on, GDB may insert tracepoints in the program.\n\
4633 Otherwise, any sort of insertion attempt will result in an error."),
4634 set_target_permissions, NULL,
4635 &setlist, &showlist);
4636
4637 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4638 &may_insert_fast_tracepoints_1, _("\
4639 Set permission to insert fast tracepoints in the target."), _("\
4640 Show permission to insert fast tracepoints in the target."), _("\
4641 When this permission is on, GDB may insert fast tracepoints.\n\
4642 Otherwise, any sort of insertion attempt will result in an error."),
4643 set_target_permissions, NULL,
4644 &setlist, &showlist);
4645
4646 add_setshow_boolean_cmd ("may-interrupt", class_support,
4647 &may_stop_1, _("\
4648 Set permission to interrupt or signal the target."), _("\
4649 Show permission to interrupt or signal the target."), _("\
4650 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4651 Otherwise, any attempt to interrupt or stop will be ignored."),
4652 set_target_permissions, NULL,
4653 &setlist, &showlist);
4654
4655 add_com ("flash-erase", no_class, flash_erase_command,
4656 _("Erase all flash memory regions."));
4657
4658 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4659 &auto_connect_native_target, _("\
4660 Set whether GDB may automatically connect to the native target."), _("\
4661 Show whether GDB may automatically connect to the native target."), _("\
4662 When on, and GDB is not connected to a target yet, GDB\n\
4663 attempts \"run\" and other commands with the native target."),
4664 NULL, show_auto_connect_native_target,
4665 &setlist, &showlist);
4666 }