Pre-read DWARF section data
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2022 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "displaced-stepping.h"
23 #include "infrun.h"
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "breakpoint.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "target.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include "inf-loop.h"
38 #include "regcache.h"
39 #include "value.h"
40 #include "observable.h"
41 #include "language.h"
42 #include "solib.h"
43 #include "main.h"
44 #include "block.h"
45 #include "mi/mi-common.h"
46 #include "event-top.h"
47 #include "record.h"
48 #include "record-full.h"
49 #include "inline-frame.h"
50 #include "jit.h"
51 #include "tracepoint.h"
52 #include "skip.h"
53 #include "probe.h"
54 #include "objfiles.h"
55 #include "completer.h"
56 #include "target-descriptions.h"
57 #include "target-dcache.h"
58 #include "terminal.h"
59 #include "solist.h"
60 #include "gdbsupport/event-loop.h"
61 #include "thread-fsm.h"
62 #include "gdbsupport/enum-flags.h"
63 #include "progspace-and-thread.h"
64 #include "gdbsupport/gdb_optional.h"
65 #include "arch-utils.h"
66 #include "gdbsupport/scope-exit.h"
67 #include "gdbsupport/forward-scope-exit.h"
68 #include "gdbsupport/gdb_select.h"
69 #include <unordered_map>
70 #include "async-event.h"
71 #include "gdbsupport/selftest.h"
72 #include "scoped-mock-context.h"
73 #include "test-target.h"
74 #include "gdbsupport/common-debug.h"
75 #include "gdbsupport/buildargv.h"
76
77 /* Prototypes for local functions */
78
79 static void sig_print_info (enum gdb_signal);
80
81 static void sig_print_header (void);
82
83 static void follow_inferior_reset_breakpoints (void);
84
85 static bool currently_stepping (struct thread_info *tp);
86
87 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
88
89 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
90
91 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
92
93 static bool maybe_software_singlestep (struct gdbarch *gdbarch);
94
95 static void resume (gdb_signal sig);
96
97 static void wait_for_inferior (inferior *inf);
98
99 static void restart_threads (struct thread_info *event_thread,
100 inferior *inf = nullptr);
101
102 static bool start_step_over (void);
103
104 /* Asynchronous signal handler registered as event loop source for
105 when we have pending events ready to be passed to the core. */
106 static struct async_event_handler *infrun_async_inferior_event_token;
107
108 /* Stores whether infrun_async was previously enabled or disabled.
109 Starts off as -1, indicating "never enabled/disabled". */
110 static int infrun_is_async = -1;
111
112 /* See infrun.h. */
113
114 void
115 infrun_async (int enable)
116 {
117 if (infrun_is_async != enable)
118 {
119 infrun_is_async = enable;
120
121 infrun_debug_printf ("enable=%d", enable);
122
123 if (enable)
124 mark_async_event_handler (infrun_async_inferior_event_token);
125 else
126 clear_async_event_handler (infrun_async_inferior_event_token);
127 }
128 }
129
130 /* See infrun.h. */
131
132 void
133 mark_infrun_async_event_handler (void)
134 {
135 mark_async_event_handler (infrun_async_inferior_event_token);
136 }
137
138 /* When set, stop the 'step' command if we enter a function which has
139 no line number information. The normal behavior is that we step
140 over such function. */
141 bool step_stop_if_no_debug = false;
142 static void
143 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
147 }
148
149 /* proceed and normal_stop use this to notify the user when the
150 inferior stopped in a different thread than it had been running
151 in. */
152
153 static ptid_t previous_inferior_ptid;
154
155 /* If set (default for legacy reasons), when following a fork, GDB
156 will detach from one of the fork branches, child or parent.
157 Exactly which branch is detached depends on 'set follow-fork-mode'
158 setting. */
159
160 static bool detach_fork = true;
161
162 bool debug_infrun = false;
163 static void
164 show_debug_infrun (struct ui_file *file, int from_tty,
165 struct cmd_list_element *c, const char *value)
166 {
167 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
168 }
169
170 /* Support for disabling address space randomization. */
171
172 bool disable_randomization = true;
173
174 static void
175 show_disable_randomization (struct ui_file *file, int from_tty,
176 struct cmd_list_element *c, const char *value)
177 {
178 if (target_supports_disable_randomization ())
179 gdb_printf (file,
180 _("Disabling randomization of debuggee's "
181 "virtual address space is %s.\n"),
182 value);
183 else
184 gdb_puts (_("Disabling randomization of debuggee's "
185 "virtual address space is unsupported on\n"
186 "this platform.\n"), file);
187 }
188
189 static void
190 set_disable_randomization (const char *args, int from_tty,
191 struct cmd_list_element *c)
192 {
193 if (!target_supports_disable_randomization ())
194 error (_("Disabling randomization of debuggee's "
195 "virtual address space is unsupported on\n"
196 "this platform."));
197 }
198
199 /* User interface for non-stop mode. */
200
201 bool non_stop = false;
202 static bool non_stop_1 = false;
203
204 static void
205 set_non_stop (const char *args, int from_tty,
206 struct cmd_list_element *c)
207 {
208 if (target_has_execution ())
209 {
210 non_stop_1 = non_stop;
211 error (_("Cannot change this setting while the inferior is running."));
212 }
213
214 non_stop = non_stop_1;
215 }
216
217 static void
218 show_non_stop (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220 {
221 gdb_printf (file,
222 _("Controlling the inferior in non-stop mode is %s.\n"),
223 value);
224 }
225
226 /* "Observer mode" is somewhat like a more extreme version of
227 non-stop, in which all GDB operations that might affect the
228 target's execution have been disabled. */
229
230 static bool observer_mode = false;
231 static bool observer_mode_1 = false;
232
233 static void
234 set_observer_mode (const char *args, int from_tty,
235 struct cmd_list_element *c)
236 {
237 if (target_has_execution ())
238 {
239 observer_mode_1 = observer_mode;
240 error (_("Cannot change this setting while the inferior is running."));
241 }
242
243 observer_mode = observer_mode_1;
244
245 may_write_registers = !observer_mode;
246 may_write_memory = !observer_mode;
247 may_insert_breakpoints = !observer_mode;
248 may_insert_tracepoints = !observer_mode;
249 /* We can insert fast tracepoints in or out of observer mode,
250 but enable them if we're going into this mode. */
251 if (observer_mode)
252 may_insert_fast_tracepoints = true;
253 may_stop = !observer_mode;
254 update_target_permissions ();
255
256 /* Going *into* observer mode we must force non-stop, then
257 going out we leave it that way. */
258 if (observer_mode)
259 {
260 pagination_enabled = 0;
261 non_stop = non_stop_1 = true;
262 }
263
264 if (from_tty)
265 gdb_printf (_("Observer mode is now %s.\n"),
266 (observer_mode ? "on" : "off"));
267 }
268
269 static void
270 show_observer_mode (struct ui_file *file, int from_tty,
271 struct cmd_list_element *c, const char *value)
272 {
273 gdb_printf (file, _("Observer mode is %s.\n"), value);
274 }
275
276 /* This updates the value of observer mode based on changes in
277 permissions. Note that we are deliberately ignoring the values of
278 may-write-registers and may-write-memory, since the user may have
279 reason to enable these during a session, for instance to turn on a
280 debugging-related global. */
281
282 void
283 update_observer_mode (void)
284 {
285 bool newval = (!may_insert_breakpoints
286 && !may_insert_tracepoints
287 && may_insert_fast_tracepoints
288 && !may_stop
289 && non_stop);
290
291 /* Let the user know if things change. */
292 if (newval != observer_mode)
293 gdb_printf (_("Observer mode is now %s.\n"),
294 (newval ? "on" : "off"));
295
296 observer_mode = observer_mode_1 = newval;
297 }
298
299 /* Tables of how to react to signals; the user sets them. */
300
301 static unsigned char signal_stop[GDB_SIGNAL_LAST];
302 static unsigned char signal_print[GDB_SIGNAL_LAST];
303 static unsigned char signal_program[GDB_SIGNAL_LAST];
304
305 /* Table of signals that are registered with "catch signal". A
306 non-zero entry indicates that the signal is caught by some "catch
307 signal" command. */
308 static unsigned char signal_catch[GDB_SIGNAL_LAST];
309
310 /* Table of signals that the target may silently handle.
311 This is automatically determined from the flags above,
312 and simply cached here. */
313 static unsigned char signal_pass[GDB_SIGNAL_LAST];
314
315 #define SET_SIGS(nsigs,sigs,flags) \
316 do { \
317 int signum = (nsigs); \
318 while (signum-- > 0) \
319 if ((sigs)[signum]) \
320 (flags)[signum] = 1; \
321 } while (0)
322
323 #define UNSET_SIGS(nsigs,sigs,flags) \
324 do { \
325 int signum = (nsigs); \
326 while (signum-- > 0) \
327 if ((sigs)[signum]) \
328 (flags)[signum] = 0; \
329 } while (0)
330
331 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
332 this function is to avoid exporting `signal_program'. */
333
334 void
335 update_signals_program_target (void)
336 {
337 target_program_signals (signal_program);
338 }
339
340 /* Value to pass to target_resume() to cause all threads to resume. */
341
342 #define RESUME_ALL minus_one_ptid
343
344 /* Command list pointer for the "stop" placeholder. */
345
346 static struct cmd_list_element *stop_command;
347
348 /* Nonzero if we want to give control to the user when we're notified
349 of shared library events by the dynamic linker. */
350 int stop_on_solib_events;
351
352 /* Enable or disable optional shared library event breakpoints
353 as appropriate when the above flag is changed. */
354
355 static void
356 set_stop_on_solib_events (const char *args,
357 int from_tty, struct cmd_list_element *c)
358 {
359 update_solib_breakpoints ();
360 }
361
362 static void
363 show_stop_on_solib_events (struct ui_file *file, int from_tty,
364 struct cmd_list_element *c, const char *value)
365 {
366 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
367 value);
368 }
369
370 /* True after stop if current stack frame should be printed. */
371
372 static bool stop_print_frame;
373
374 /* This is a cached copy of the target/ptid/waitstatus of the last
375 event returned by target_wait().
376 This information is returned by get_last_target_status(). */
377 static process_stratum_target *target_last_proc_target;
378 static ptid_t target_last_wait_ptid;
379 static struct target_waitstatus target_last_waitstatus;
380
381 void init_thread_stepping_state (struct thread_info *tss);
382
383 static const char follow_fork_mode_child[] = "child";
384 static const char follow_fork_mode_parent[] = "parent";
385
386 static const char *const follow_fork_mode_kind_names[] = {
387 follow_fork_mode_child,
388 follow_fork_mode_parent,
389 NULL
390 };
391
392 static const char *follow_fork_mode_string = follow_fork_mode_parent;
393 static void
394 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
395 struct cmd_list_element *c, const char *value)
396 {
397 gdb_printf (file,
398 _("Debugger response to a program "
399 "call of fork or vfork is \"%s\".\n"),
400 value);
401 }
402 \f
403
404 /* Handle changes to the inferior list based on the type of fork,
405 which process is being followed, and whether the other process
406 should be detached. On entry inferior_ptid must be the ptid of
407 the fork parent. At return inferior_ptid is the ptid of the
408 followed inferior. */
409
410 static bool
411 follow_fork_inferior (bool follow_child, bool detach_fork)
412 {
413 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
414 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
415 || fork_kind == TARGET_WAITKIND_VFORKED);
416 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
417 ptid_t parent_ptid = inferior_ptid;
418 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
419
420 if (has_vforked
421 && !non_stop /* Non-stop always resumes both branches. */
422 && current_ui->prompt_state == PROMPT_BLOCKED
423 && !(follow_child || detach_fork || sched_multi))
424 {
425 /* The parent stays blocked inside the vfork syscall until the
426 child execs or exits. If we don't let the child run, then
427 the parent stays blocked. If we're telling the parent to run
428 in the foreground, the user will not be able to ctrl-c to get
429 back the terminal, effectively hanging the debug session. */
430 gdb_printf (gdb_stderr, _("\
431 Can not resume the parent process over vfork in the foreground while\n\
432 holding the child stopped. Try \"set detach-on-fork\" or \
433 \"set schedule-multiple\".\n"));
434 return true;
435 }
436
437 inferior *parent_inf = current_inferior ();
438 inferior *child_inf = nullptr;
439
440 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
441
442 if (!follow_child)
443 {
444 /* Detach new forked process? */
445 if (detach_fork)
446 {
447 /* Before detaching from the child, remove all breakpoints
448 from it. If we forked, then this has already been taken
449 care of by infrun.c. If we vforked however, any
450 breakpoint inserted in the parent is visible in the
451 child, even those added while stopped in a vfork
452 catchpoint. This will remove the breakpoints from the
453 parent also, but they'll be reinserted below. */
454 if (has_vforked)
455 {
456 /* Keep breakpoints list in sync. */
457 remove_breakpoints_inf (current_inferior ());
458 }
459
460 if (print_inferior_events)
461 {
462 /* Ensure that we have a process ptid. */
463 ptid_t process_ptid = ptid_t (child_ptid.pid ());
464
465 target_terminal::ours_for_output ();
466 gdb_printf (_("[Detaching after %s from child %s]\n"),
467 has_vforked ? "vfork" : "fork",
468 target_pid_to_str (process_ptid).c_str ());
469 }
470 }
471 else
472 {
473 /* Add process to GDB's tables. */
474 child_inf = add_inferior (child_ptid.pid ());
475
476 child_inf->attach_flag = parent_inf->attach_flag;
477 copy_terminal_info (child_inf, parent_inf);
478 child_inf->gdbarch = parent_inf->gdbarch;
479 copy_inferior_target_desc_info (child_inf, parent_inf);
480
481 child_inf->symfile_flags = SYMFILE_NO_READ;
482
483 /* If this is a vfork child, then the address-space is
484 shared with the parent. */
485 if (has_vforked)
486 {
487 child_inf->pspace = parent_inf->pspace;
488 child_inf->aspace = parent_inf->aspace;
489
490 exec_on_vfork (child_inf);
491
492 /* The parent will be frozen until the child is done
493 with the shared region. Keep track of the
494 parent. */
495 child_inf->vfork_parent = parent_inf;
496 child_inf->pending_detach = 0;
497 parent_inf->vfork_child = child_inf;
498 parent_inf->pending_detach = 0;
499 }
500 else
501 {
502 child_inf->aspace = new_address_space ();
503 child_inf->pspace = new program_space (child_inf->aspace);
504 child_inf->removable = 1;
505 clone_program_space (child_inf->pspace, parent_inf->pspace);
506 }
507 }
508
509 if (has_vforked)
510 {
511 /* If we detached from the child, then we have to be careful
512 to not insert breakpoints in the parent until the child
513 is done with the shared memory region. However, if we're
514 staying attached to the child, then we can and should
515 insert breakpoints, so that we can debug it. A
516 subsequent child exec or exit is enough to know when does
517 the child stops using the parent's address space. */
518 parent_inf->thread_waiting_for_vfork_done
519 = detach_fork ? inferior_thread () : nullptr;
520 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
521 }
522 }
523 else
524 {
525 /* Follow the child. */
526
527 if (print_inferior_events)
528 {
529 std::string parent_pid = target_pid_to_str (parent_ptid);
530 std::string child_pid = target_pid_to_str (child_ptid);
531
532 target_terminal::ours_for_output ();
533 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
534 parent_pid.c_str (),
535 has_vforked ? "vfork" : "fork",
536 child_pid.c_str ());
537 }
538
539 /* Add the new inferior first, so that the target_detach below
540 doesn't unpush the target. */
541
542 child_inf = add_inferior (child_ptid.pid ());
543
544 child_inf->attach_flag = parent_inf->attach_flag;
545 copy_terminal_info (child_inf, parent_inf);
546 child_inf->gdbarch = parent_inf->gdbarch;
547 copy_inferior_target_desc_info (child_inf, parent_inf);
548
549 if (has_vforked)
550 {
551 /* If this is a vfork child, then the address-space is shared
552 with the parent. */
553 child_inf->aspace = parent_inf->aspace;
554 child_inf->pspace = parent_inf->pspace;
555
556 exec_on_vfork (child_inf);
557 }
558 else if (detach_fork)
559 {
560 /* We follow the child and detach from the parent: move the parent's
561 program space to the child. This simplifies some things, like
562 doing "next" over fork() and landing on the expected line in the
563 child (note, that is broken with "set detach-on-fork off").
564
565 Before assigning brand new spaces for the parent, remove
566 breakpoints from it: because the new pspace won't match
567 currently inserted locations, the normal detach procedure
568 wouldn't remove them, and we would leave them inserted when
569 detaching. */
570 remove_breakpoints_inf (parent_inf);
571
572 child_inf->aspace = parent_inf->aspace;
573 child_inf->pspace = parent_inf->pspace;
574 parent_inf->aspace = new_address_space ();
575 parent_inf->pspace = new program_space (parent_inf->aspace);
576 clone_program_space (parent_inf->pspace, child_inf->pspace);
577
578 /* The parent inferior is still the current one, so keep things
579 in sync. */
580 set_current_program_space (parent_inf->pspace);
581 }
582 else
583 {
584 child_inf->aspace = new_address_space ();
585 child_inf->pspace = new program_space (child_inf->aspace);
586 child_inf->removable = 1;
587 child_inf->symfile_flags = SYMFILE_NO_READ;
588 clone_program_space (child_inf->pspace, parent_inf->pspace);
589 }
590 }
591
592 gdb_assert (current_inferior () == parent_inf);
593
594 /* If we are setting up an inferior for the child, target_follow_fork is
595 responsible for pushing the appropriate targets on the new inferior's
596 target stack and adding the initial thread (with ptid CHILD_PTID).
597
598 If we are not setting up an inferior for the child (because following
599 the parent and detach_fork is true), it is responsible for detaching
600 from CHILD_PTID. */
601 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
602 detach_fork);
603
604 /* target_follow_fork must leave the parent as the current inferior. If we
605 want to follow the child, we make it the current one below. */
606 gdb_assert (current_inferior () == parent_inf);
607
608 /* If there is a child inferior, target_follow_fork must have created a thread
609 for it. */
610 if (child_inf != nullptr)
611 gdb_assert (!child_inf->thread_list.empty ());
612
613 /* Clear the parent thread's pending follow field. Do this before calling
614 target_detach, so that the target can differentiate the two following
615 cases:
616
617 - We continue past a fork with "follow-fork-mode == child" &&
618 "detach-on-fork on", and therefore detach the parent. In that
619 case the target should not detach the fork child.
620 - We run to a fork catchpoint and the user types "detach". In that
621 case, the target should detach the fork child in addition to the
622 parent.
623
624 The former case will have pending_follow cleared, the later will have
625 pending_follow set. */
626 thread_info *parent_thread = find_thread_ptid (parent_inf, parent_ptid);
627 gdb_assert (parent_thread != nullptr);
628 parent_thread->pending_follow.set_spurious ();
629
630 /* Detach the parent if needed. */
631 if (follow_child)
632 {
633 /* If we're vforking, we want to hold on to the parent until
634 the child exits or execs. At child exec or exit time we
635 can remove the old breakpoints from the parent and detach
636 or resume debugging it. Otherwise, detach the parent now;
637 we'll want to reuse it's program/address spaces, but we
638 can't set them to the child before removing breakpoints
639 from the parent, otherwise, the breakpoints module could
640 decide to remove breakpoints from the wrong process (since
641 they'd be assigned to the same address space). */
642
643 if (has_vforked)
644 {
645 gdb_assert (child_inf->vfork_parent == NULL);
646 gdb_assert (parent_inf->vfork_child == NULL);
647 child_inf->vfork_parent = parent_inf;
648 child_inf->pending_detach = 0;
649 parent_inf->vfork_child = child_inf;
650 parent_inf->pending_detach = detach_fork;
651 }
652 else if (detach_fork)
653 {
654 if (print_inferior_events)
655 {
656 /* Ensure that we have a process ptid. */
657 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
658
659 target_terminal::ours_for_output ();
660 gdb_printf (_("[Detaching after fork from "
661 "parent %s]\n"),
662 target_pid_to_str (process_ptid).c_str ());
663 }
664
665 target_detach (parent_inf, 0);
666 }
667 }
668
669 /* If we ended up creating a new inferior, call post_create_inferior to inform
670 the various subcomponents. */
671 if (child_inf != nullptr)
672 {
673 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
674 (do not restore the parent as the current inferior). */
675 gdb::optional<scoped_restore_current_thread> maybe_restore;
676
677 if (!follow_child)
678 maybe_restore.emplace ();
679
680 switch_to_thread (*child_inf->threads ().begin ());
681 post_create_inferior (0);
682 }
683
684 return false;
685 }
686
687 /* Tell the target to follow the fork we're stopped at. Returns true
688 if the inferior should be resumed; false, if the target for some
689 reason decided it's best not to resume. */
690
691 static bool
692 follow_fork ()
693 {
694 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
695 bool should_resume = true;
696
697 /* Copy user stepping state to the new inferior thread. FIXME: the
698 followed fork child thread should have a copy of most of the
699 parent thread structure's run control related fields, not just these.
700 Initialized to avoid "may be used uninitialized" warnings from gcc. */
701 struct breakpoint *step_resume_breakpoint = NULL;
702 struct breakpoint *exception_resume_breakpoint = NULL;
703 CORE_ADDR step_range_start = 0;
704 CORE_ADDR step_range_end = 0;
705 int current_line = 0;
706 symtab *current_symtab = NULL;
707 struct frame_id step_frame_id = { 0 };
708
709 if (!non_stop)
710 {
711 process_stratum_target *wait_target;
712 ptid_t wait_ptid;
713 struct target_waitstatus wait_status;
714
715 /* Get the last target status returned by target_wait(). */
716 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
717
718 /* If not stopped at a fork event, then there's nothing else to
719 do. */
720 if (wait_status.kind () != TARGET_WAITKIND_FORKED
721 && wait_status.kind () != TARGET_WAITKIND_VFORKED)
722 return 1;
723
724 /* Check if we switched over from WAIT_PTID, since the event was
725 reported. */
726 if (wait_ptid != minus_one_ptid
727 && (current_inferior ()->process_target () != wait_target
728 || inferior_ptid != wait_ptid))
729 {
730 /* We did. Switch back to WAIT_PTID thread, to tell the
731 target to follow it (in either direction). We'll
732 afterwards refuse to resume, and inform the user what
733 happened. */
734 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
735 switch_to_thread (wait_thread);
736 should_resume = false;
737 }
738 }
739
740 thread_info *tp = inferior_thread ();
741
742 /* If there were any forks/vforks that were caught and are now to be
743 followed, then do so now. */
744 switch (tp->pending_follow.kind ())
745 {
746 case TARGET_WAITKIND_FORKED:
747 case TARGET_WAITKIND_VFORKED:
748 {
749 ptid_t parent, child;
750 std::unique_ptr<struct thread_fsm> thread_fsm;
751
752 /* If the user did a next/step, etc, over a fork call,
753 preserve the stepping state in the fork child. */
754 if (follow_child && should_resume)
755 {
756 step_resume_breakpoint = clone_momentary_breakpoint
757 (tp->control.step_resume_breakpoint);
758 step_range_start = tp->control.step_range_start;
759 step_range_end = tp->control.step_range_end;
760 current_line = tp->current_line;
761 current_symtab = tp->current_symtab;
762 step_frame_id = tp->control.step_frame_id;
763 exception_resume_breakpoint
764 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
765 thread_fsm = tp->release_thread_fsm ();
766
767 /* For now, delete the parent's sr breakpoint, otherwise,
768 parent/child sr breakpoints are considered duplicates,
769 and the child version will not be installed. Remove
770 this when the breakpoints module becomes aware of
771 inferiors and address spaces. */
772 delete_step_resume_breakpoint (tp);
773 tp->control.step_range_start = 0;
774 tp->control.step_range_end = 0;
775 tp->control.step_frame_id = null_frame_id;
776 delete_exception_resume_breakpoint (tp);
777 }
778
779 parent = inferior_ptid;
780 child = tp->pending_follow.child_ptid ();
781
782 /* If handling a vfork, stop all the inferior's threads, they will be
783 restarted when the vfork shared region is complete. */
784 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
785 && target_is_non_stop_p ())
786 stop_all_threads ("handling vfork", tp->inf);
787
788 process_stratum_target *parent_targ = tp->inf->process_target ();
789 /* Set up inferior(s) as specified by the caller, and tell the
790 target to do whatever is necessary to follow either parent
791 or child. */
792 if (follow_fork_inferior (follow_child, detach_fork))
793 {
794 /* Target refused to follow, or there's some other reason
795 we shouldn't resume. */
796 should_resume = 0;
797 }
798 else
799 {
800 /* This makes sure we don't try to apply the "Switched
801 over from WAIT_PID" logic above. */
802 nullify_last_target_wait_ptid ();
803
804 /* If we followed the child, switch to it... */
805 if (follow_child)
806 {
807 thread_info *child_thr = find_thread_ptid (parent_targ, child);
808 switch_to_thread (child_thr);
809
810 /* ... and preserve the stepping state, in case the
811 user was stepping over the fork call. */
812 if (should_resume)
813 {
814 tp = inferior_thread ();
815 tp->control.step_resume_breakpoint
816 = step_resume_breakpoint;
817 tp->control.step_range_start = step_range_start;
818 tp->control.step_range_end = step_range_end;
819 tp->current_line = current_line;
820 tp->current_symtab = current_symtab;
821 tp->control.step_frame_id = step_frame_id;
822 tp->control.exception_resume_breakpoint
823 = exception_resume_breakpoint;
824 tp->set_thread_fsm (std::move (thread_fsm));
825 }
826 else
827 {
828 /* If we get here, it was because we're trying to
829 resume from a fork catchpoint, but, the user
830 has switched threads away from the thread that
831 forked. In that case, the resume command
832 issued is most likely not applicable to the
833 child, so just warn, and refuse to resume. */
834 warning (_("Not resuming: switched threads "
835 "before following fork child."));
836 }
837
838 /* Reset breakpoints in the child as appropriate. */
839 follow_inferior_reset_breakpoints ();
840 }
841 }
842 }
843 break;
844 case TARGET_WAITKIND_SPURIOUS:
845 /* Nothing to follow. */
846 break;
847 default:
848 internal_error (__FILE__, __LINE__,
849 "Unexpected pending_follow.kind %d\n",
850 tp->pending_follow.kind ());
851 break;
852 }
853
854 return should_resume;
855 }
856
857 static void
858 follow_inferior_reset_breakpoints (void)
859 {
860 struct thread_info *tp = inferior_thread ();
861
862 /* Was there a step_resume breakpoint? (There was if the user
863 did a "next" at the fork() call.) If so, explicitly reset its
864 thread number. Cloned step_resume breakpoints are disabled on
865 creation, so enable it here now that it is associated with the
866 correct thread.
867
868 step_resumes are a form of bp that are made to be per-thread.
869 Since we created the step_resume bp when the parent process
870 was being debugged, and now are switching to the child process,
871 from the breakpoint package's viewpoint, that's a switch of
872 "threads". We must update the bp's notion of which thread
873 it is for, or it'll be ignored when it triggers. */
874
875 if (tp->control.step_resume_breakpoint)
876 {
877 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
878 tp->control.step_resume_breakpoint->loc->enabled = 1;
879 }
880
881 /* Treat exception_resume breakpoints like step_resume breakpoints. */
882 if (tp->control.exception_resume_breakpoint)
883 {
884 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
885 tp->control.exception_resume_breakpoint->loc->enabled = 1;
886 }
887
888 /* Reinsert all breakpoints in the child. The user may have set
889 breakpoints after catching the fork, in which case those
890 were never set in the child, but only in the parent. This makes
891 sure the inserted breakpoints match the breakpoint list. */
892
893 breakpoint_re_set ();
894 insert_breakpoints ();
895 }
896
897 /* The child has exited or execed: resume THREAD, a thread of the parent,
898 if it was meant to be executing. */
899
900 static void
901 proceed_after_vfork_done (thread_info *thread)
902 {
903 if (thread->state == THREAD_RUNNING
904 && !thread->executing ()
905 && !thread->stop_requested
906 && thread->stop_signal () == GDB_SIGNAL_0)
907 {
908 infrun_debug_printf ("resuming vfork parent thread %s",
909 thread->ptid.to_string ().c_str ());
910
911 switch_to_thread (thread);
912 clear_proceed_status (0);
913 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
914 }
915 }
916
917 /* Called whenever we notice an exec or exit event, to handle
918 detaching or resuming a vfork parent. */
919
920 static void
921 handle_vfork_child_exec_or_exit (int exec)
922 {
923 struct inferior *inf = current_inferior ();
924
925 if (inf->vfork_parent)
926 {
927 inferior *resume_parent = nullptr;
928
929 /* This exec or exit marks the end of the shared memory region
930 between the parent and the child. Break the bonds. */
931 inferior *vfork_parent = inf->vfork_parent;
932 inf->vfork_parent->vfork_child = NULL;
933 inf->vfork_parent = NULL;
934
935 /* If the user wanted to detach from the parent, now is the
936 time. */
937 if (vfork_parent->pending_detach)
938 {
939 struct program_space *pspace;
940 struct address_space *aspace;
941
942 /* follow-fork child, detach-on-fork on. */
943
944 vfork_parent->pending_detach = 0;
945
946 scoped_restore_current_pspace_and_thread restore_thread;
947
948 /* We're letting loose of the parent. */
949 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
950 switch_to_thread (tp);
951
952 /* We're about to detach from the parent, which implicitly
953 removes breakpoints from its address space. There's a
954 catch here: we want to reuse the spaces for the child,
955 but, parent/child are still sharing the pspace at this
956 point, although the exec in reality makes the kernel give
957 the child a fresh set of new pages. The problem here is
958 that the breakpoints module being unaware of this, would
959 likely chose the child process to write to the parent
960 address space. Swapping the child temporarily away from
961 the spaces has the desired effect. Yes, this is "sort
962 of" a hack. */
963
964 pspace = inf->pspace;
965 aspace = inf->aspace;
966 inf->aspace = NULL;
967 inf->pspace = NULL;
968
969 if (print_inferior_events)
970 {
971 std::string pidstr
972 = target_pid_to_str (ptid_t (vfork_parent->pid));
973
974 target_terminal::ours_for_output ();
975
976 if (exec)
977 {
978 gdb_printf (_("[Detaching vfork parent %s "
979 "after child exec]\n"), pidstr.c_str ());
980 }
981 else
982 {
983 gdb_printf (_("[Detaching vfork parent %s "
984 "after child exit]\n"), pidstr.c_str ());
985 }
986 }
987
988 target_detach (vfork_parent, 0);
989
990 /* Put it back. */
991 inf->pspace = pspace;
992 inf->aspace = aspace;
993 }
994 else if (exec)
995 {
996 /* We're staying attached to the parent, so, really give the
997 child a new address space. */
998 inf->pspace = new program_space (maybe_new_address_space ());
999 inf->aspace = inf->pspace->aspace;
1000 inf->removable = 1;
1001 set_current_program_space (inf->pspace);
1002
1003 resume_parent = vfork_parent;
1004 }
1005 else
1006 {
1007 /* If this is a vfork child exiting, then the pspace and
1008 aspaces were shared with the parent. Since we're
1009 reporting the process exit, we'll be mourning all that is
1010 found in the address space, and switching to null_ptid,
1011 preparing to start a new inferior. But, since we don't
1012 want to clobber the parent's address/program spaces, we
1013 go ahead and create a new one for this exiting
1014 inferior. */
1015
1016 /* Switch to no-thread while running clone_program_space, so
1017 that clone_program_space doesn't want to read the
1018 selected frame of a dead process. */
1019 scoped_restore_current_thread restore_thread;
1020 switch_to_no_thread ();
1021
1022 inf->pspace = new program_space (maybe_new_address_space ());
1023 inf->aspace = inf->pspace->aspace;
1024 set_current_program_space (inf->pspace);
1025 inf->removable = 1;
1026 inf->symfile_flags = SYMFILE_NO_READ;
1027 clone_program_space (inf->pspace, vfork_parent->pspace);
1028
1029 resume_parent = vfork_parent;
1030 }
1031
1032 gdb_assert (current_program_space == inf->pspace);
1033
1034 if (non_stop && resume_parent != nullptr)
1035 {
1036 /* If the user wanted the parent to be running, let it go
1037 free now. */
1038 scoped_restore_current_thread restore_thread;
1039
1040 infrun_debug_printf ("resuming vfork parent process %d",
1041 resume_parent->pid);
1042
1043 for (thread_info *thread : resume_parent->threads ())
1044 proceed_after_vfork_done (thread);
1045 }
1046 }
1047 }
1048
1049 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1050
1051 static void
1052 handle_vfork_done (thread_info *event_thread)
1053 {
1054 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1055 set, that is if we are waiting for a vfork child not under our control
1056 (because we detached it) to exec or exit.
1057
1058 If an inferior has vforked and we are debugging the child, we don't use
1059 the vfork-done event to get notified about the end of the shared address
1060 space window. We rely instead on the child's exec or exit event, and the
1061 inferior::vfork_{parent,child} fields are used instead. See
1062 handle_vfork_child_exec_or_exit for that. */
1063 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1064 {
1065 infrun_debug_printf ("not waiting for a vfork-done event");
1066 return;
1067 }
1068
1069 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1070
1071 /* We stopped all threads (other than the vforking thread) of the inferior in
1072 follow_fork and kept them stopped until now. It should therefore not be
1073 possible for another thread to have reported a vfork during that window.
1074 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1075 vfork-done we are handling right now. */
1076 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1077
1078 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1079 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1080
1081 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1082 resume them now. On all-stop targets, everything that needs to be resumed
1083 will be when we resume the event thread. */
1084 if (target_is_non_stop_p ())
1085 {
1086 /* restart_threads and start_step_over may change the current thread, make
1087 sure we leave the event thread as the current thread. */
1088 scoped_restore_current_thread restore_thread;
1089
1090 insert_breakpoints ();
1091 restart_threads (event_thread, event_thread->inf);
1092 start_step_over ();
1093 }
1094 }
1095
1096 /* Enum strings for "set|show follow-exec-mode". */
1097
1098 static const char follow_exec_mode_new[] = "new";
1099 static const char follow_exec_mode_same[] = "same";
1100 static const char *const follow_exec_mode_names[] =
1101 {
1102 follow_exec_mode_new,
1103 follow_exec_mode_same,
1104 NULL,
1105 };
1106
1107 static const char *follow_exec_mode_string = follow_exec_mode_same;
1108 static void
1109 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1110 struct cmd_list_element *c, const char *value)
1111 {
1112 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1113 }
1114
1115 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1116
1117 static void
1118 follow_exec (ptid_t ptid, const char *exec_file_target)
1119 {
1120 int pid = ptid.pid ();
1121 ptid_t process_ptid;
1122
1123 /* Switch terminal for any messages produced e.g. by
1124 breakpoint_re_set. */
1125 target_terminal::ours_for_output ();
1126
1127 /* This is an exec event that we actually wish to pay attention to.
1128 Refresh our symbol table to the newly exec'd program, remove any
1129 momentary bp's, etc.
1130
1131 If there are breakpoints, they aren't really inserted now,
1132 since the exec() transformed our inferior into a fresh set
1133 of instructions.
1134
1135 We want to preserve symbolic breakpoints on the list, since
1136 we have hopes that they can be reset after the new a.out's
1137 symbol table is read.
1138
1139 However, any "raw" breakpoints must be removed from the list
1140 (e.g., the solib bp's), since their address is probably invalid
1141 now.
1142
1143 And, we DON'T want to call delete_breakpoints() here, since
1144 that may write the bp's "shadow contents" (the instruction
1145 value that was overwritten with a TRAP instruction). Since
1146 we now have a new a.out, those shadow contents aren't valid. */
1147
1148 mark_breakpoints_out ();
1149
1150 /* The target reports the exec event to the main thread, even if
1151 some other thread does the exec, and even if the main thread was
1152 stopped or already gone. We may still have non-leader threads of
1153 the process on our list. E.g., on targets that don't have thread
1154 exit events (like remote); or on native Linux in non-stop mode if
1155 there were only two threads in the inferior and the non-leader
1156 one is the one that execs (and nothing forces an update of the
1157 thread list up to here). When debugging remotely, it's best to
1158 avoid extra traffic, when possible, so avoid syncing the thread
1159 list with the target, and instead go ahead and delete all threads
1160 of the process but one that reported the event. Note this must
1161 be done before calling update_breakpoints_after_exec, as
1162 otherwise clearing the threads' resources would reference stale
1163 thread breakpoints -- it may have been one of these threads that
1164 stepped across the exec. We could just clear their stepping
1165 states, but as long as we're iterating, might as well delete
1166 them. Deleting them now rather than at the next user-visible
1167 stop provides a nicer sequence of events for user and MI
1168 notifications. */
1169 for (thread_info *th : all_threads_safe ())
1170 if (th->ptid.pid () == pid && th->ptid != ptid)
1171 delete_thread (th);
1172
1173 /* We also need to clear any left over stale state for the
1174 leader/event thread. E.g., if there was any step-resume
1175 breakpoint or similar, it's gone now. We cannot truly
1176 step-to-next statement through an exec(). */
1177 thread_info *th = inferior_thread ();
1178 th->control.step_resume_breakpoint = NULL;
1179 th->control.exception_resume_breakpoint = NULL;
1180 th->control.single_step_breakpoints = NULL;
1181 th->control.step_range_start = 0;
1182 th->control.step_range_end = 0;
1183
1184 /* The user may have had the main thread held stopped in the
1185 previous image (e.g., schedlock on, or non-stop). Release
1186 it now. */
1187 th->stop_requested = 0;
1188
1189 update_breakpoints_after_exec ();
1190
1191 /* What is this a.out's name? */
1192 process_ptid = ptid_t (pid);
1193 gdb_printf (_("%s is executing new program: %s\n"),
1194 target_pid_to_str (process_ptid).c_str (),
1195 exec_file_target);
1196
1197 /* We've followed the inferior through an exec. Therefore, the
1198 inferior has essentially been killed & reborn. */
1199
1200 breakpoint_init_inferior (inf_execd);
1201
1202 gdb::unique_xmalloc_ptr<char> exec_file_host
1203 = exec_file_find (exec_file_target, NULL);
1204
1205 /* If we were unable to map the executable target pathname onto a host
1206 pathname, tell the user that. Otherwise GDB's subsequent behavior
1207 is confusing. Maybe it would even be better to stop at this point
1208 so that the user can specify a file manually before continuing. */
1209 if (exec_file_host == NULL)
1210 warning (_("Could not load symbols for executable %s.\n"
1211 "Do you need \"set sysroot\"?"),
1212 exec_file_target);
1213
1214 /* Reset the shared library package. This ensures that we get a
1215 shlib event when the child reaches "_start", at which point the
1216 dld will have had a chance to initialize the child. */
1217 /* Also, loading a symbol file below may trigger symbol lookups, and
1218 we don't want those to be satisfied by the libraries of the
1219 previous incarnation of this process. */
1220 no_shared_libraries (NULL, 0);
1221
1222 struct inferior *inf = current_inferior ();
1223
1224 if (follow_exec_mode_string == follow_exec_mode_new)
1225 {
1226 /* The user wants to keep the old inferior and program spaces
1227 around. Create a new fresh one, and switch to it. */
1228
1229 /* Do exit processing for the original inferior before setting the new
1230 inferior's pid. Having two inferiors with the same pid would confuse
1231 find_inferior_p(t)id. Transfer the terminal state and info from the
1232 old to the new inferior. */
1233 inferior *new_inferior = add_inferior_with_spaces ();
1234
1235 swap_terminal_info (new_inferior, inf);
1236 exit_inferior_silent (inf);
1237
1238 new_inferior->pid = pid;
1239 target_follow_exec (new_inferior, ptid, exec_file_target);
1240
1241 /* We continue with the new inferior. */
1242 inf = new_inferior;
1243 }
1244 else
1245 {
1246 /* The old description may no longer be fit for the new image.
1247 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1248 old description; we'll read a new one below. No need to do
1249 this on "follow-exec-mode new", as the old inferior stays
1250 around (its description is later cleared/refetched on
1251 restart). */
1252 target_clear_description ();
1253 target_follow_exec (inf, ptid, exec_file_target);
1254 }
1255
1256 gdb_assert (current_inferior () == inf);
1257 gdb_assert (current_program_space == inf->pspace);
1258
1259 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1260 because the proper displacement for a PIE (Position Independent
1261 Executable) main symbol file will only be computed by
1262 solib_create_inferior_hook below. breakpoint_re_set would fail
1263 to insert the breakpoints with the zero displacement. */
1264 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
1265
1266 /* If the target can specify a description, read it. Must do this
1267 after flipping to the new executable (because the target supplied
1268 description must be compatible with the executable's
1269 architecture, and the old executable may e.g., be 32-bit, while
1270 the new one 64-bit), and before anything involving memory or
1271 registers. */
1272 target_find_description ();
1273
1274 gdb::observers::inferior_execd.notify (inf);
1275
1276 breakpoint_re_set ();
1277
1278 /* Reinsert all breakpoints. (Those which were symbolic have
1279 been reset to the proper address in the new a.out, thanks
1280 to symbol_file_command...). */
1281 insert_breakpoints ();
1282
1283 /* The next resume of this inferior should bring it to the shlib
1284 startup breakpoints. (If the user had also set bp's on
1285 "main" from the old (parent) process, then they'll auto-
1286 matically get reset there in the new process.). */
1287 }
1288
1289 /* The chain of threads that need to do a step-over operation to get
1290 past e.g., a breakpoint. What technique is used to step over the
1291 breakpoint/watchpoint does not matter -- all threads end up in the
1292 same queue, to maintain rough temporal order of execution, in order
1293 to avoid starvation, otherwise, we could e.g., find ourselves
1294 constantly stepping the same couple threads past their breakpoints
1295 over and over, if the single-step finish fast enough. */
1296 thread_step_over_list global_thread_step_over_list;
1297
1298 /* Bit flags indicating what the thread needs to step over. */
1299
1300 enum step_over_what_flag
1301 {
1302 /* Step over a breakpoint. */
1303 STEP_OVER_BREAKPOINT = 1,
1304
1305 /* Step past a non-continuable watchpoint, in order to let the
1306 instruction execute so we can evaluate the watchpoint
1307 expression. */
1308 STEP_OVER_WATCHPOINT = 2
1309 };
1310 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1311
1312 /* Info about an instruction that is being stepped over. */
1313
1314 struct step_over_info
1315 {
1316 /* If we're stepping past a breakpoint, this is the address space
1317 and address of the instruction the breakpoint is set at. We'll
1318 skip inserting all breakpoints here. Valid iff ASPACE is
1319 non-NULL. */
1320 const address_space *aspace = nullptr;
1321 CORE_ADDR address = 0;
1322
1323 /* The instruction being stepped over triggers a nonsteppable
1324 watchpoint. If true, we'll skip inserting watchpoints. */
1325 int nonsteppable_watchpoint_p = 0;
1326
1327 /* The thread's global number. */
1328 int thread = -1;
1329 };
1330
1331 /* The step-over info of the location that is being stepped over.
1332
1333 Note that with async/breakpoint always-inserted mode, a user might
1334 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1335 being stepped over. As setting a new breakpoint inserts all
1336 breakpoints, we need to make sure the breakpoint being stepped over
1337 isn't inserted then. We do that by only clearing the step-over
1338 info when the step-over is actually finished (or aborted).
1339
1340 Presently GDB can only step over one breakpoint at any given time.
1341 Given threads that can't run code in the same address space as the
1342 breakpoint's can't really miss the breakpoint, GDB could be taught
1343 to step-over at most one breakpoint per address space (so this info
1344 could move to the address space object if/when GDB is extended).
1345 The set of breakpoints being stepped over will normally be much
1346 smaller than the set of all breakpoints, so a flag in the
1347 breakpoint location structure would be wasteful. A separate list
1348 also saves complexity and run-time, as otherwise we'd have to go
1349 through all breakpoint locations clearing their flag whenever we
1350 start a new sequence. Similar considerations weigh against storing
1351 this info in the thread object. Plus, not all step overs actually
1352 have breakpoint locations -- e.g., stepping past a single-step
1353 breakpoint, or stepping to complete a non-continuable
1354 watchpoint. */
1355 static struct step_over_info step_over_info;
1356
1357 /* Record the address of the breakpoint/instruction we're currently
1358 stepping over.
1359 N.B. We record the aspace and address now, instead of say just the thread,
1360 because when we need the info later the thread may be running. */
1361
1362 static void
1363 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1364 int nonsteppable_watchpoint_p,
1365 int thread)
1366 {
1367 step_over_info.aspace = aspace;
1368 step_over_info.address = address;
1369 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1370 step_over_info.thread = thread;
1371 }
1372
1373 /* Called when we're not longer stepping over a breakpoint / an
1374 instruction, so all breakpoints are free to be (re)inserted. */
1375
1376 static void
1377 clear_step_over_info (void)
1378 {
1379 infrun_debug_printf ("clearing step over info");
1380 step_over_info.aspace = NULL;
1381 step_over_info.address = 0;
1382 step_over_info.nonsteppable_watchpoint_p = 0;
1383 step_over_info.thread = -1;
1384 }
1385
1386 /* See infrun.h. */
1387
1388 int
1389 stepping_past_instruction_at (struct address_space *aspace,
1390 CORE_ADDR address)
1391 {
1392 return (step_over_info.aspace != NULL
1393 && breakpoint_address_match (aspace, address,
1394 step_over_info.aspace,
1395 step_over_info.address));
1396 }
1397
1398 /* See infrun.h. */
1399
1400 int
1401 thread_is_stepping_over_breakpoint (int thread)
1402 {
1403 return (step_over_info.thread != -1
1404 && thread == step_over_info.thread);
1405 }
1406
1407 /* See infrun.h. */
1408
1409 int
1410 stepping_past_nonsteppable_watchpoint (void)
1411 {
1412 return step_over_info.nonsteppable_watchpoint_p;
1413 }
1414
1415 /* Returns true if step-over info is valid. */
1416
1417 static bool
1418 step_over_info_valid_p (void)
1419 {
1420 return (step_over_info.aspace != NULL
1421 || stepping_past_nonsteppable_watchpoint ());
1422 }
1423
1424 \f
1425 /* Displaced stepping. */
1426
1427 /* In non-stop debugging mode, we must take special care to manage
1428 breakpoints properly; in particular, the traditional strategy for
1429 stepping a thread past a breakpoint it has hit is unsuitable.
1430 'Displaced stepping' is a tactic for stepping one thread past a
1431 breakpoint it has hit while ensuring that other threads running
1432 concurrently will hit the breakpoint as they should.
1433
1434 The traditional way to step a thread T off a breakpoint in a
1435 multi-threaded program in all-stop mode is as follows:
1436
1437 a0) Initially, all threads are stopped, and breakpoints are not
1438 inserted.
1439 a1) We single-step T, leaving breakpoints uninserted.
1440 a2) We insert breakpoints, and resume all threads.
1441
1442 In non-stop debugging, however, this strategy is unsuitable: we
1443 don't want to have to stop all threads in the system in order to
1444 continue or step T past a breakpoint. Instead, we use displaced
1445 stepping:
1446
1447 n0) Initially, T is stopped, other threads are running, and
1448 breakpoints are inserted.
1449 n1) We copy the instruction "under" the breakpoint to a separate
1450 location, outside the main code stream, making any adjustments
1451 to the instruction, register, and memory state as directed by
1452 T's architecture.
1453 n2) We single-step T over the instruction at its new location.
1454 n3) We adjust the resulting register and memory state as directed
1455 by T's architecture. This includes resetting T's PC to point
1456 back into the main instruction stream.
1457 n4) We resume T.
1458
1459 This approach depends on the following gdbarch methods:
1460
1461 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1462 indicate where to copy the instruction, and how much space must
1463 be reserved there. We use these in step n1.
1464
1465 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1466 address, and makes any necessary adjustments to the instruction,
1467 register contents, and memory. We use this in step n1.
1468
1469 - gdbarch_displaced_step_fixup adjusts registers and memory after
1470 we have successfully single-stepped the instruction, to yield the
1471 same effect the instruction would have had if we had executed it
1472 at its original address. We use this in step n3.
1473
1474 The gdbarch_displaced_step_copy_insn and
1475 gdbarch_displaced_step_fixup functions must be written so that
1476 copying an instruction with gdbarch_displaced_step_copy_insn,
1477 single-stepping across the copied instruction, and then applying
1478 gdbarch_displaced_insn_fixup should have the same effects on the
1479 thread's memory and registers as stepping the instruction in place
1480 would have. Exactly which responsibilities fall to the copy and
1481 which fall to the fixup is up to the author of those functions.
1482
1483 See the comments in gdbarch.sh for details.
1484
1485 Note that displaced stepping and software single-step cannot
1486 currently be used in combination, although with some care I think
1487 they could be made to. Software single-step works by placing
1488 breakpoints on all possible subsequent instructions; if the
1489 displaced instruction is a PC-relative jump, those breakpoints
1490 could fall in very strange places --- on pages that aren't
1491 executable, or at addresses that are not proper instruction
1492 boundaries. (We do generally let other threads run while we wait
1493 to hit the software single-step breakpoint, and they might
1494 encounter such a corrupted instruction.) One way to work around
1495 this would be to have gdbarch_displaced_step_copy_insn fully
1496 simulate the effect of PC-relative instructions (and return NULL)
1497 on architectures that use software single-stepping.
1498
1499 In non-stop mode, we can have independent and simultaneous step
1500 requests, so more than one thread may need to simultaneously step
1501 over a breakpoint. The current implementation assumes there is
1502 only one scratch space per process. In this case, we have to
1503 serialize access to the scratch space. If thread A wants to step
1504 over a breakpoint, but we are currently waiting for some other
1505 thread to complete a displaced step, we leave thread A stopped and
1506 place it in the displaced_step_request_queue. Whenever a displaced
1507 step finishes, we pick the next thread in the queue and start a new
1508 displaced step operation on it. See displaced_step_prepare and
1509 displaced_step_finish for details. */
1510
1511 /* Return true if THREAD is doing a displaced step. */
1512
1513 static bool
1514 displaced_step_in_progress_thread (thread_info *thread)
1515 {
1516 gdb_assert (thread != NULL);
1517
1518 return thread->displaced_step_state.in_progress ();
1519 }
1520
1521 /* Return true if INF has a thread doing a displaced step. */
1522
1523 static bool
1524 displaced_step_in_progress (inferior *inf)
1525 {
1526 return inf->displaced_step_state.in_progress_count > 0;
1527 }
1528
1529 /* Return true if any thread is doing a displaced step. */
1530
1531 static bool
1532 displaced_step_in_progress_any_thread ()
1533 {
1534 for (inferior *inf : all_non_exited_inferiors ())
1535 {
1536 if (displaced_step_in_progress (inf))
1537 return true;
1538 }
1539
1540 return false;
1541 }
1542
1543 static void
1544 infrun_inferior_exit (struct inferior *inf)
1545 {
1546 inf->displaced_step_state.reset ();
1547 inf->thread_waiting_for_vfork_done = nullptr;
1548 }
1549
1550 static void
1551 infrun_inferior_execd (inferior *inf)
1552 {
1553 /* If some threads where was doing a displaced step in this inferior at the
1554 moment of the exec, they no longer exist. Even if the exec'ing thread
1555 doing a displaced step, we don't want to to any fixup nor restore displaced
1556 stepping buffer bytes. */
1557 inf->displaced_step_state.reset ();
1558
1559 for (thread_info *thread : inf->threads ())
1560 thread->displaced_step_state.reset ();
1561
1562 /* Since an in-line step is done with everything else stopped, if there was
1563 one in progress at the time of the exec, it must have been the exec'ing
1564 thread. */
1565 clear_step_over_info ();
1566
1567 inf->thread_waiting_for_vfork_done = nullptr;
1568 }
1569
1570 /* If ON, and the architecture supports it, GDB will use displaced
1571 stepping to step over breakpoints. If OFF, or if the architecture
1572 doesn't support it, GDB will instead use the traditional
1573 hold-and-step approach. If AUTO (which is the default), GDB will
1574 decide which technique to use to step over breakpoints depending on
1575 whether the target works in a non-stop way (see use_displaced_stepping). */
1576
1577 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1578
1579 static void
1580 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1581 struct cmd_list_element *c,
1582 const char *value)
1583 {
1584 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1585 gdb_printf (file,
1586 _("Debugger's willingness to use displaced stepping "
1587 "to step over breakpoints is %s (currently %s).\n"),
1588 value, target_is_non_stop_p () ? "on" : "off");
1589 else
1590 gdb_printf (file,
1591 _("Debugger's willingness to use displaced stepping "
1592 "to step over breakpoints is %s.\n"), value);
1593 }
1594
1595 /* Return true if the gdbarch implements the required methods to use
1596 displaced stepping. */
1597
1598 static bool
1599 gdbarch_supports_displaced_stepping (gdbarch *arch)
1600 {
1601 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1602 that if `prepare` is provided, so is `finish`. */
1603 return gdbarch_displaced_step_prepare_p (arch);
1604 }
1605
1606 /* Return non-zero if displaced stepping can/should be used to step
1607 over breakpoints of thread TP. */
1608
1609 static bool
1610 use_displaced_stepping (thread_info *tp)
1611 {
1612 /* If the user disabled it explicitly, don't use displaced stepping. */
1613 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1614 return false;
1615
1616 /* If "auto", only use displaced stepping if the target operates in a non-stop
1617 way. */
1618 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1619 && !target_is_non_stop_p ())
1620 return false;
1621
1622 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1623
1624 /* If the architecture doesn't implement displaced stepping, don't use
1625 it. */
1626 if (!gdbarch_supports_displaced_stepping (gdbarch))
1627 return false;
1628
1629 /* If recording, don't use displaced stepping. */
1630 if (find_record_target () != nullptr)
1631 return false;
1632
1633 /* If displaced stepping failed before for this inferior, don't bother trying
1634 again. */
1635 if (tp->inf->displaced_step_state.failed_before)
1636 return false;
1637
1638 return true;
1639 }
1640
1641 /* Simple function wrapper around displaced_step_thread_state::reset. */
1642
1643 static void
1644 displaced_step_reset (displaced_step_thread_state *displaced)
1645 {
1646 displaced->reset ();
1647 }
1648
1649 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1650 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1651
1652 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1653
1654 /* See infrun.h. */
1655
1656 std::string
1657 displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
1658 {
1659 std::string ret;
1660
1661 for (size_t i = 0; i < len; i++)
1662 {
1663 if (i == 0)
1664 ret += string_printf ("%02x", buf[i]);
1665 else
1666 ret += string_printf (" %02x", buf[i]);
1667 }
1668
1669 return ret;
1670 }
1671
1672 /* Prepare to single-step, using displaced stepping.
1673
1674 Note that we cannot use displaced stepping when we have a signal to
1675 deliver. If we have a signal to deliver and an instruction to step
1676 over, then after the step, there will be no indication from the
1677 target whether the thread entered a signal handler or ignored the
1678 signal and stepped over the instruction successfully --- both cases
1679 result in a simple SIGTRAP. In the first case we mustn't do a
1680 fixup, and in the second case we must --- but we can't tell which.
1681 Comments in the code for 'random signals' in handle_inferior_event
1682 explain how we handle this case instead.
1683
1684 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1685 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1686 if displaced stepping this thread got queued; or
1687 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1688 stepped. */
1689
1690 static displaced_step_prepare_status
1691 displaced_step_prepare_throw (thread_info *tp)
1692 {
1693 regcache *regcache = get_thread_regcache (tp);
1694 struct gdbarch *gdbarch = regcache->arch ();
1695 displaced_step_thread_state &disp_step_thread_state
1696 = tp->displaced_step_state;
1697
1698 /* We should never reach this function if the architecture does not
1699 support displaced stepping. */
1700 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1701
1702 /* Nor if the thread isn't meant to step over a breakpoint. */
1703 gdb_assert (tp->control.trap_expected);
1704
1705 /* Disable range stepping while executing in the scratch pad. We
1706 want a single-step even if executing the displaced instruction in
1707 the scratch buffer lands within the stepping range (e.g., a
1708 jump/branch). */
1709 tp->control.may_range_step = 0;
1710
1711 /* We are about to start a displaced step for this thread. If one is already
1712 in progress, something's wrong. */
1713 gdb_assert (!disp_step_thread_state.in_progress ());
1714
1715 if (tp->inf->displaced_step_state.unavailable)
1716 {
1717 /* The gdbarch tells us it's not worth asking to try a prepare because
1718 it is likely that it will return unavailable, so don't bother asking. */
1719
1720 displaced_debug_printf ("deferring step of %s",
1721 tp->ptid.to_string ().c_str ());
1722
1723 global_thread_step_over_chain_enqueue (tp);
1724 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1725 }
1726
1727 displaced_debug_printf ("displaced-stepping %s now",
1728 tp->ptid.to_string ().c_str ());
1729
1730 scoped_restore_current_thread restore_thread;
1731
1732 switch_to_thread (tp);
1733
1734 CORE_ADDR original_pc = regcache_read_pc (regcache);
1735 CORE_ADDR displaced_pc;
1736
1737 displaced_step_prepare_status status
1738 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1739
1740 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1741 {
1742 displaced_debug_printf ("failed to prepare (%s)",
1743 tp->ptid.to_string ().c_str ());
1744
1745 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1746 }
1747 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1748 {
1749 /* Not enough displaced stepping resources available, defer this
1750 request by placing it the queue. */
1751
1752 displaced_debug_printf ("not enough resources available, "
1753 "deferring step of %s",
1754 tp->ptid.to_string ().c_str ());
1755
1756 global_thread_step_over_chain_enqueue (tp);
1757
1758 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1759 }
1760
1761 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1762
1763 /* Save the information we need to fix things up if the step
1764 succeeds. */
1765 disp_step_thread_state.set (gdbarch);
1766
1767 tp->inf->displaced_step_state.in_progress_count++;
1768
1769 displaced_debug_printf ("prepared successfully thread=%s, "
1770 "original_pc=%s, displaced_pc=%s",
1771 tp->ptid.to_string ().c_str (),
1772 paddress (gdbarch, original_pc),
1773 paddress (gdbarch, displaced_pc));
1774
1775 return DISPLACED_STEP_PREPARE_STATUS_OK;
1776 }
1777
1778 /* Wrapper for displaced_step_prepare_throw that disabled further
1779 attempts at displaced stepping if we get a memory error. */
1780
1781 static displaced_step_prepare_status
1782 displaced_step_prepare (thread_info *thread)
1783 {
1784 displaced_step_prepare_status status
1785 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1786
1787 try
1788 {
1789 status = displaced_step_prepare_throw (thread);
1790 }
1791 catch (const gdb_exception_error &ex)
1792 {
1793 if (ex.error != MEMORY_ERROR
1794 && ex.error != NOT_SUPPORTED_ERROR)
1795 throw;
1796
1797 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1798 ex.what ());
1799
1800 /* Be verbose if "set displaced-stepping" is "on", silent if
1801 "auto". */
1802 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1803 {
1804 warning (_("disabling displaced stepping: %s"),
1805 ex.what ());
1806 }
1807
1808 /* Disable further displaced stepping attempts. */
1809 thread->inf->displaced_step_state.failed_before = 1;
1810 }
1811
1812 return status;
1813 }
1814
1815 /* If we displaced stepped an instruction successfully, adjust registers and
1816 memory to yield the same effect the instruction would have had if we had
1817 executed it at its original address, and return
1818 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1819 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
1820
1821 If the thread wasn't displaced stepping, return
1822 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1823
1824 static displaced_step_finish_status
1825 displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
1826 {
1827 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
1828
1829 /* Was this thread performing a displaced step? */
1830 if (!displaced->in_progress ())
1831 return DISPLACED_STEP_FINISH_STATUS_OK;
1832
1833 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
1834 event_thread->inf->displaced_step_state.in_progress_count--;
1835
1836 /* Fixup may need to read memory/registers. Switch to the thread
1837 that we're fixing up. Also, target_stopped_by_watchpoint checks
1838 the current thread, and displaced_step_restore performs ptid-dependent
1839 memory accesses using current_inferior(). */
1840 switch_to_thread (event_thread);
1841
1842 displaced_step_reset_cleanup cleanup (displaced);
1843
1844 /* Do the fixup, and release the resources acquired to do the displaced
1845 step. */
1846 return gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1847 event_thread, signal);
1848 }
1849
1850 /* Data to be passed around while handling an event. This data is
1851 discarded between events. */
1852 struct execution_control_state
1853 {
1854 execution_control_state ()
1855 {
1856 this->reset ();
1857 }
1858
1859 void reset ()
1860 {
1861 this->target = nullptr;
1862 this->ptid = null_ptid;
1863 this->event_thread = nullptr;
1864 ws = target_waitstatus ();
1865 stop_func_filled_in = 0;
1866 stop_func_start = 0;
1867 stop_func_end = 0;
1868 stop_func_name = nullptr;
1869 wait_some_more = 0;
1870 hit_singlestep_breakpoint = 0;
1871 }
1872
1873 process_stratum_target *target;
1874 ptid_t ptid;
1875 /* The thread that got the event, if this was a thread event; NULL
1876 otherwise. */
1877 struct thread_info *event_thread;
1878
1879 struct target_waitstatus ws;
1880 int stop_func_filled_in;
1881 CORE_ADDR stop_func_start;
1882 CORE_ADDR stop_func_end;
1883 const char *stop_func_name;
1884 int wait_some_more;
1885
1886 /* True if the event thread hit the single-step breakpoint of
1887 another thread. Thus the event doesn't cause a stop, the thread
1888 needs to be single-stepped past the single-step breakpoint before
1889 we can switch back to the original stepping thread. */
1890 int hit_singlestep_breakpoint;
1891 };
1892
1893 /* Clear ECS and set it to point at TP. */
1894
1895 static void
1896 reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1897 {
1898 ecs->reset ();
1899 ecs->event_thread = tp;
1900 ecs->ptid = tp->ptid;
1901 }
1902
1903 static void keep_going_pass_signal (struct execution_control_state *ecs);
1904 static void prepare_to_wait (struct execution_control_state *ecs);
1905 static bool keep_going_stepped_thread (struct thread_info *tp);
1906 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
1907
1908 /* Are there any pending step-over requests? If so, run all we can
1909 now and return true. Otherwise, return false. */
1910
1911 static bool
1912 start_step_over (void)
1913 {
1914 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1915
1916 /* Don't start a new step-over if we already have an in-line
1917 step-over operation ongoing. */
1918 if (step_over_info_valid_p ())
1919 return false;
1920
1921 /* Steal the global thread step over chain. As we try to initiate displaced
1922 steps, threads will be enqueued in the global chain if no buffers are
1923 available. If we iterated on the global chain directly, we might iterate
1924 indefinitely. */
1925 thread_step_over_list threads_to_step
1926 = std::move (global_thread_step_over_list);
1927
1928 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1929 thread_step_over_chain_length (threads_to_step));
1930
1931 bool started = false;
1932
1933 /* On scope exit (whatever the reason, return or exception), if there are
1934 threads left in the THREADS_TO_STEP chain, put back these threads in the
1935 global list. */
1936 SCOPE_EXIT
1937 {
1938 if (threads_to_step.empty ())
1939 infrun_debug_printf ("step-over queue now empty");
1940 else
1941 {
1942 infrun_debug_printf ("putting back %d threads to step in global queue",
1943 thread_step_over_chain_length (threads_to_step));
1944
1945 global_thread_step_over_chain_enqueue_chain
1946 (std::move (threads_to_step));
1947 }
1948 };
1949
1950 thread_step_over_list_safe_range range
1951 = make_thread_step_over_list_safe_range (threads_to_step);
1952
1953 for (thread_info *tp : range)
1954 {
1955 struct execution_control_state ecss;
1956 struct execution_control_state *ecs = &ecss;
1957 step_over_what step_what;
1958 int must_be_in_line;
1959
1960 gdb_assert (!tp->stop_requested);
1961
1962 if (tp->inf->displaced_step_state.unavailable)
1963 {
1964 /* The arch told us to not even try preparing another displaced step
1965 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1966 will get moved to the global chain on scope exit. */
1967 continue;
1968 }
1969
1970 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
1971 {
1972 /* When we stop all threads, handling a vfork, any thread in the step
1973 over chain remains there. A user could also try to continue a
1974 thread stopped at a breakpoint while another thread is waiting for
1975 a vfork-done event. In any case, we don't want to start a step
1976 over right now. */
1977 continue;
1978 }
1979
1980 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1981 while we try to prepare the displaced step, we don't add it back to
1982 the global step over chain. This is to avoid a thread staying in the
1983 step over chain indefinitely if something goes wrong when resuming it
1984 If the error is intermittent and it still needs a step over, it will
1985 get enqueued again when we try to resume it normally. */
1986 threads_to_step.erase (threads_to_step.iterator_to (*tp));
1987
1988 step_what = thread_still_needs_step_over (tp);
1989 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1990 || ((step_what & STEP_OVER_BREAKPOINT)
1991 && !use_displaced_stepping (tp)));
1992
1993 /* We currently stop all threads of all processes to step-over
1994 in-line. If we need to start a new in-line step-over, let
1995 any pending displaced steps finish first. */
1996 if (must_be_in_line && displaced_step_in_progress_any_thread ())
1997 {
1998 global_thread_step_over_chain_enqueue (tp);
1999 continue;
2000 }
2001
2002 if (tp->control.trap_expected
2003 || tp->resumed ()
2004 || tp->executing ())
2005 {
2006 internal_error (__FILE__, __LINE__,
2007 "[%s] has inconsistent state: "
2008 "trap_expected=%d, resumed=%d, executing=%d\n",
2009 tp->ptid.to_string ().c_str (),
2010 tp->control.trap_expected,
2011 tp->resumed (),
2012 tp->executing ());
2013 }
2014
2015 infrun_debug_printf ("resuming [%s] for step-over",
2016 tp->ptid.to_string ().c_str ());
2017
2018 /* keep_going_pass_signal skips the step-over if the breakpoint
2019 is no longer inserted. In all-stop, we want to keep looking
2020 for a thread that needs a step-over instead of resuming TP,
2021 because we wouldn't be able to resume anything else until the
2022 target stops again. In non-stop, the resume always resumes
2023 only TP, so it's OK to let the thread resume freely. */
2024 if (!target_is_non_stop_p () && !step_what)
2025 continue;
2026
2027 switch_to_thread (tp);
2028 reset_ecs (ecs, tp);
2029 keep_going_pass_signal (ecs);
2030
2031 if (!ecs->wait_some_more)
2032 error (_("Command aborted."));
2033
2034 /* If the thread's step over could not be initiated because no buffers
2035 were available, it was re-added to the global step over chain. */
2036 if (tp->resumed ())
2037 {
2038 infrun_debug_printf ("[%s] was resumed.",
2039 tp->ptid.to_string ().c_str ());
2040 gdb_assert (!thread_is_in_step_over_chain (tp));
2041 }
2042 else
2043 {
2044 infrun_debug_printf ("[%s] was NOT resumed.",
2045 tp->ptid.to_string ().c_str ());
2046 gdb_assert (thread_is_in_step_over_chain (tp));
2047 }
2048
2049 /* If we started a new in-line step-over, we're done. */
2050 if (step_over_info_valid_p ())
2051 {
2052 gdb_assert (tp->control.trap_expected);
2053 started = true;
2054 break;
2055 }
2056
2057 if (!target_is_non_stop_p ())
2058 {
2059 /* On all-stop, shouldn't have resumed unless we needed a
2060 step over. */
2061 gdb_assert (tp->control.trap_expected
2062 || tp->step_after_step_resume_breakpoint);
2063
2064 /* With remote targets (at least), in all-stop, we can't
2065 issue any further remote commands until the program stops
2066 again. */
2067 started = true;
2068 break;
2069 }
2070
2071 /* Either the thread no longer needed a step-over, or a new
2072 displaced stepping sequence started. Even in the latter
2073 case, continue looking. Maybe we can also start another
2074 displaced step on a thread of other process. */
2075 }
2076
2077 return started;
2078 }
2079
2080 /* Update global variables holding ptids to hold NEW_PTID if they were
2081 holding OLD_PTID. */
2082 static void
2083 infrun_thread_ptid_changed (process_stratum_target *target,
2084 ptid_t old_ptid, ptid_t new_ptid)
2085 {
2086 if (inferior_ptid == old_ptid
2087 && current_inferior ()->process_target () == target)
2088 inferior_ptid = new_ptid;
2089 }
2090
2091 \f
2092
2093 static const char schedlock_off[] = "off";
2094 static const char schedlock_on[] = "on";
2095 static const char schedlock_step[] = "step";
2096 static const char schedlock_replay[] = "replay";
2097 static const char *const scheduler_enums[] = {
2098 schedlock_off,
2099 schedlock_on,
2100 schedlock_step,
2101 schedlock_replay,
2102 NULL
2103 };
2104 static const char *scheduler_mode = schedlock_replay;
2105 static void
2106 show_scheduler_mode (struct ui_file *file, int from_tty,
2107 struct cmd_list_element *c, const char *value)
2108 {
2109 gdb_printf (file,
2110 _("Mode for locking scheduler "
2111 "during execution is \"%s\".\n"),
2112 value);
2113 }
2114
2115 static void
2116 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2117 {
2118 if (!target_can_lock_scheduler ())
2119 {
2120 scheduler_mode = schedlock_off;
2121 error (_("Target '%s' cannot support this command."),
2122 target_shortname ());
2123 }
2124 }
2125
2126 /* True if execution commands resume all threads of all processes by
2127 default; otherwise, resume only threads of the current inferior
2128 process. */
2129 bool sched_multi = false;
2130
2131 /* Try to setup for software single stepping. Return true if target_resume()
2132 should use hardware single step.
2133
2134 GDBARCH the current gdbarch. */
2135
2136 static bool
2137 maybe_software_singlestep (struct gdbarch *gdbarch)
2138 {
2139 bool hw_step = true;
2140
2141 if (execution_direction == EXEC_FORWARD
2142 && gdbarch_software_single_step_p (gdbarch))
2143 hw_step = !insert_single_step_breakpoints (gdbarch);
2144
2145 return hw_step;
2146 }
2147
2148 /* See infrun.h. */
2149
2150 ptid_t
2151 user_visible_resume_ptid (int step)
2152 {
2153 ptid_t resume_ptid;
2154
2155 if (non_stop)
2156 {
2157 /* With non-stop mode on, threads are always handled
2158 individually. */
2159 resume_ptid = inferior_ptid;
2160 }
2161 else if ((scheduler_mode == schedlock_on)
2162 || (scheduler_mode == schedlock_step && step))
2163 {
2164 /* User-settable 'scheduler' mode requires solo thread
2165 resume. */
2166 resume_ptid = inferior_ptid;
2167 }
2168 else if ((scheduler_mode == schedlock_replay)
2169 && target_record_will_replay (minus_one_ptid, execution_direction))
2170 {
2171 /* User-settable 'scheduler' mode requires solo thread resume in replay
2172 mode. */
2173 resume_ptid = inferior_ptid;
2174 }
2175 else if (!sched_multi && target_supports_multi_process ())
2176 {
2177 /* Resume all threads of the current process (and none of other
2178 processes). */
2179 resume_ptid = ptid_t (inferior_ptid.pid ());
2180 }
2181 else
2182 {
2183 /* Resume all threads of all processes. */
2184 resume_ptid = RESUME_ALL;
2185 }
2186
2187 return resume_ptid;
2188 }
2189
2190 /* See infrun.h. */
2191
2192 process_stratum_target *
2193 user_visible_resume_target (ptid_t resume_ptid)
2194 {
2195 return (resume_ptid == minus_one_ptid && sched_multi
2196 ? NULL
2197 : current_inferior ()->process_target ());
2198 }
2199
2200 /* Return a ptid representing the set of threads that we will resume,
2201 in the perspective of the target, assuming run control handling
2202 does not require leaving some threads stopped (e.g., stepping past
2203 breakpoint). USER_STEP indicates whether we're about to start the
2204 target for a stepping command. */
2205
2206 static ptid_t
2207 internal_resume_ptid (int user_step)
2208 {
2209 /* In non-stop, we always control threads individually. Note that
2210 the target may always work in non-stop mode even with "set
2211 non-stop off", in which case user_visible_resume_ptid could
2212 return a wildcard ptid. */
2213 if (target_is_non_stop_p ())
2214 return inferior_ptid;
2215
2216 /* The rest of the function assumes non-stop==off and
2217 target-non-stop==off.
2218
2219 If a thread is waiting for a vfork-done event, it means breakpoints are out
2220 for this inferior (well, program space in fact). We don't want to resume
2221 any thread other than the one waiting for vfork done, otherwise these other
2222 threads could miss breakpoints. So if a thread in the resumption set is
2223 waiting for a vfork-done event, resume only that thread.
2224
2225 The resumption set width depends on whether schedule-multiple is on or off.
2226
2227 Note that if the target_resume interface was more flexible, we could be
2228 smarter here when schedule-multiple is on. For example, imagine 3
2229 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2230 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2231 target(s) to resume:
2232
2233 - All threads of inferior 1
2234 - Thread 2.1
2235 - Thread 3.2
2236
2237 Since we don't have that flexibility (we can only pass one ptid), just
2238 resume the first thread waiting for a vfork-done event we find (e.g. thread
2239 2.1). */
2240 if (sched_multi)
2241 {
2242 for (inferior *inf : all_non_exited_inferiors ())
2243 if (inf->thread_waiting_for_vfork_done != nullptr)
2244 return inf->thread_waiting_for_vfork_done->ptid;
2245 }
2246 else if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2247 return current_inferior ()->thread_waiting_for_vfork_done->ptid;
2248
2249 return user_visible_resume_ptid (user_step);
2250 }
2251
2252 /* Wrapper for target_resume, that handles infrun-specific
2253 bookkeeping. */
2254
2255 static void
2256 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2257 {
2258 struct thread_info *tp = inferior_thread ();
2259
2260 gdb_assert (!tp->stop_requested);
2261
2262 /* Install inferior's terminal modes. */
2263 target_terminal::inferior ();
2264
2265 /* Avoid confusing the next resume, if the next stop/resume
2266 happens to apply to another thread. */
2267 tp->set_stop_signal (GDB_SIGNAL_0);
2268
2269 /* Advise target which signals may be handled silently.
2270
2271 If we have removed breakpoints because we are stepping over one
2272 in-line (in any thread), we need to receive all signals to avoid
2273 accidentally skipping a breakpoint during execution of a signal
2274 handler.
2275
2276 Likewise if we're displaced stepping, otherwise a trap for a
2277 breakpoint in a signal handler might be confused with the
2278 displaced step finishing. We don't make the displaced_step_finish
2279 step distinguish the cases instead, because:
2280
2281 - a backtrace while stopped in the signal handler would show the
2282 scratch pad as frame older than the signal handler, instead of
2283 the real mainline code.
2284
2285 - when the thread is later resumed, the signal handler would
2286 return to the scratch pad area, which would no longer be
2287 valid. */
2288 if (step_over_info_valid_p ()
2289 || displaced_step_in_progress (tp->inf))
2290 target_pass_signals ({});
2291 else
2292 target_pass_signals (signal_pass);
2293
2294 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2295 resume_ptid.to_string ().c_str (),
2296 step, gdb_signal_to_symbol_string (sig));
2297
2298 target_resume (resume_ptid, step, sig);
2299 }
2300
2301 /* Resume the inferior. SIG is the signal to give the inferior
2302 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2303 call 'resume', which handles exceptions. */
2304
2305 static void
2306 resume_1 (enum gdb_signal sig)
2307 {
2308 struct regcache *regcache = get_current_regcache ();
2309 struct gdbarch *gdbarch = regcache->arch ();
2310 struct thread_info *tp = inferior_thread ();
2311 const address_space *aspace = regcache->aspace ();
2312 ptid_t resume_ptid;
2313 /* This represents the user's step vs continue request. When
2314 deciding whether "set scheduler-locking step" applies, it's the
2315 user's intention that counts. */
2316 const int user_step = tp->control.stepping_command;
2317 /* This represents what we'll actually request the target to do.
2318 This can decay from a step to a continue, if e.g., we need to
2319 implement single-stepping with breakpoints (software
2320 single-step). */
2321 bool step;
2322
2323 gdb_assert (!tp->stop_requested);
2324 gdb_assert (!thread_is_in_step_over_chain (tp));
2325
2326 if (tp->has_pending_waitstatus ())
2327 {
2328 infrun_debug_printf
2329 ("thread %s has pending wait "
2330 "status %s (currently_stepping=%d).",
2331 tp->ptid.to_string ().c_str (),
2332 tp->pending_waitstatus ().to_string ().c_str (),
2333 currently_stepping (tp));
2334
2335 tp->inf->process_target ()->threads_executing = true;
2336 tp->set_resumed (true);
2337
2338 /* FIXME: What should we do if we are supposed to resume this
2339 thread with a signal? Maybe we should maintain a queue of
2340 pending signals to deliver. */
2341 if (sig != GDB_SIGNAL_0)
2342 {
2343 warning (_("Couldn't deliver signal %s to %s."),
2344 gdb_signal_to_name (sig),
2345 tp->ptid.to_string ().c_str ());
2346 }
2347
2348 tp->set_stop_signal (GDB_SIGNAL_0);
2349
2350 if (target_can_async_p ())
2351 {
2352 target_async (1);
2353 /* Tell the event loop we have an event to process. */
2354 mark_async_event_handler (infrun_async_inferior_event_token);
2355 }
2356 return;
2357 }
2358
2359 tp->stepped_breakpoint = 0;
2360
2361 /* Depends on stepped_breakpoint. */
2362 step = currently_stepping (tp);
2363
2364 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2365 {
2366 /* Don't try to single-step a vfork parent that is waiting for
2367 the child to get out of the shared memory region (by exec'ing
2368 or exiting). This is particularly important on software
2369 single-step archs, as the child process would trip on the
2370 software single step breakpoint inserted for the parent
2371 process. Since the parent will not actually execute any
2372 instruction until the child is out of the shared region (such
2373 are vfork's semantics), it is safe to simply continue it.
2374 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2375 the parent, and tell it to `keep_going', which automatically
2376 re-sets it stepping. */
2377 infrun_debug_printf ("resume : clear step");
2378 step = false;
2379 }
2380
2381 CORE_ADDR pc = regcache_read_pc (regcache);
2382
2383 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2384 "current thread [%s] at %s",
2385 step, gdb_signal_to_symbol_string (sig),
2386 tp->control.trap_expected,
2387 inferior_ptid.to_string ().c_str (),
2388 paddress (gdbarch, pc));
2389
2390 /* Normally, by the time we reach `resume', the breakpoints are either
2391 removed or inserted, as appropriate. The exception is if we're sitting
2392 at a permanent breakpoint; we need to step over it, but permanent
2393 breakpoints can't be removed. So we have to test for it here. */
2394 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2395 {
2396 if (sig != GDB_SIGNAL_0)
2397 {
2398 /* We have a signal to pass to the inferior. The resume
2399 may, or may not take us to the signal handler. If this
2400 is a step, we'll need to stop in the signal handler, if
2401 there's one, (if the target supports stepping into
2402 handlers), or in the next mainline instruction, if
2403 there's no handler. If this is a continue, we need to be
2404 sure to run the handler with all breakpoints inserted.
2405 In all cases, set a breakpoint at the current address
2406 (where the handler returns to), and once that breakpoint
2407 is hit, resume skipping the permanent breakpoint. If
2408 that breakpoint isn't hit, then we've stepped into the
2409 signal handler (or hit some other event). We'll delete
2410 the step-resume breakpoint then. */
2411
2412 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2413 "deliver signal first");
2414
2415 clear_step_over_info ();
2416 tp->control.trap_expected = 0;
2417
2418 if (tp->control.step_resume_breakpoint == NULL)
2419 {
2420 /* Set a "high-priority" step-resume, as we don't want
2421 user breakpoints at PC to trigger (again) when this
2422 hits. */
2423 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2424 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2425
2426 tp->step_after_step_resume_breakpoint = step;
2427 }
2428
2429 insert_breakpoints ();
2430 }
2431 else
2432 {
2433 /* There's no signal to pass, we can go ahead and skip the
2434 permanent breakpoint manually. */
2435 infrun_debug_printf ("skipping permanent breakpoint");
2436 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2437 /* Update pc to reflect the new address from which we will
2438 execute instructions. */
2439 pc = regcache_read_pc (regcache);
2440
2441 if (step)
2442 {
2443 /* We've already advanced the PC, so the stepping part
2444 is done. Now we need to arrange for a trap to be
2445 reported to handle_inferior_event. Set a breakpoint
2446 at the current PC, and run to it. Don't update
2447 prev_pc, because if we end in
2448 switch_back_to_stepped_thread, we want the "expected
2449 thread advanced also" branch to be taken. IOW, we
2450 don't want this thread to step further from PC
2451 (overstep). */
2452 gdb_assert (!step_over_info_valid_p ());
2453 insert_single_step_breakpoint (gdbarch, aspace, pc);
2454 insert_breakpoints ();
2455
2456 resume_ptid = internal_resume_ptid (user_step);
2457 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2458 tp->set_resumed (true);
2459 return;
2460 }
2461 }
2462 }
2463
2464 /* If we have a breakpoint to step over, make sure to do a single
2465 step only. Same if we have software watchpoints. */
2466 if (tp->control.trap_expected || bpstat_should_step ())
2467 tp->control.may_range_step = 0;
2468
2469 /* If displaced stepping is enabled, step over breakpoints by executing a
2470 copy of the instruction at a different address.
2471
2472 We can't use displaced stepping when we have a signal to deliver;
2473 the comments for displaced_step_prepare explain why. The
2474 comments in the handle_inferior event for dealing with 'random
2475 signals' explain what we do instead.
2476
2477 We can't use displaced stepping when we are waiting for vfork_done
2478 event, displaced stepping breaks the vfork child similarly as single
2479 step software breakpoint. */
2480 if (tp->control.trap_expected
2481 && use_displaced_stepping (tp)
2482 && !step_over_info_valid_p ()
2483 && sig == GDB_SIGNAL_0
2484 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2485 {
2486 displaced_step_prepare_status prepare_status
2487 = displaced_step_prepare (tp);
2488
2489 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2490 {
2491 infrun_debug_printf ("Got placed in step-over queue");
2492
2493 tp->control.trap_expected = 0;
2494 return;
2495 }
2496 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2497 {
2498 /* Fallback to stepping over the breakpoint in-line. */
2499
2500 if (target_is_non_stop_p ())
2501 stop_all_threads ("displaced stepping falling back on inline stepping");
2502
2503 set_step_over_info (regcache->aspace (),
2504 regcache_read_pc (regcache), 0, tp->global_num);
2505
2506 step = maybe_software_singlestep (gdbarch);
2507
2508 insert_breakpoints ();
2509 }
2510 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2511 {
2512 /* Update pc to reflect the new address from which we will
2513 execute instructions due to displaced stepping. */
2514 pc = regcache_read_pc (get_thread_regcache (tp));
2515
2516 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2517 }
2518 else
2519 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2520 "value.");
2521 }
2522
2523 /* Do we need to do it the hard way, w/temp breakpoints? */
2524 else if (step)
2525 step = maybe_software_singlestep (gdbarch);
2526
2527 /* Currently, our software single-step implementation leads to different
2528 results than hardware single-stepping in one situation: when stepping
2529 into delivering a signal which has an associated signal handler,
2530 hardware single-step will stop at the first instruction of the handler,
2531 while software single-step will simply skip execution of the handler.
2532
2533 For now, this difference in behavior is accepted since there is no
2534 easy way to actually implement single-stepping into a signal handler
2535 without kernel support.
2536
2537 However, there is one scenario where this difference leads to follow-on
2538 problems: if we're stepping off a breakpoint by removing all breakpoints
2539 and then single-stepping. In this case, the software single-step
2540 behavior means that even if there is a *breakpoint* in the signal
2541 handler, GDB still would not stop.
2542
2543 Fortunately, we can at least fix this particular issue. We detect
2544 here the case where we are about to deliver a signal while software
2545 single-stepping with breakpoints removed. In this situation, we
2546 revert the decisions to remove all breakpoints and insert single-
2547 step breakpoints, and instead we install a step-resume breakpoint
2548 at the current address, deliver the signal without stepping, and
2549 once we arrive back at the step-resume breakpoint, actually step
2550 over the breakpoint we originally wanted to step over. */
2551 if (thread_has_single_step_breakpoints_set (tp)
2552 && sig != GDB_SIGNAL_0
2553 && step_over_info_valid_p ())
2554 {
2555 /* If we have nested signals or a pending signal is delivered
2556 immediately after a handler returns, might already have
2557 a step-resume breakpoint set on the earlier handler. We cannot
2558 set another step-resume breakpoint; just continue on until the
2559 original breakpoint is hit. */
2560 if (tp->control.step_resume_breakpoint == NULL)
2561 {
2562 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2563 tp->step_after_step_resume_breakpoint = 1;
2564 }
2565
2566 delete_single_step_breakpoints (tp);
2567
2568 clear_step_over_info ();
2569 tp->control.trap_expected = 0;
2570
2571 insert_breakpoints ();
2572 }
2573
2574 /* If STEP is set, it's a request to use hardware stepping
2575 facilities. But in that case, we should never
2576 use singlestep breakpoint. */
2577 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2578
2579 /* Decide the set of threads to ask the target to resume. */
2580 if (tp->control.trap_expected)
2581 {
2582 /* We're allowing a thread to run past a breakpoint it has
2583 hit, either by single-stepping the thread with the breakpoint
2584 removed, or by displaced stepping, with the breakpoint inserted.
2585 In the former case, we need to single-step only this thread,
2586 and keep others stopped, as they can miss this breakpoint if
2587 allowed to run. That's not really a problem for displaced
2588 stepping, but, we still keep other threads stopped, in case
2589 another thread is also stopped for a breakpoint waiting for
2590 its turn in the displaced stepping queue. */
2591 resume_ptid = inferior_ptid;
2592 }
2593 else
2594 resume_ptid = internal_resume_ptid (user_step);
2595
2596 if (execution_direction != EXEC_REVERSE
2597 && step && breakpoint_inserted_here_p (aspace, pc))
2598 {
2599 /* There are two cases where we currently need to step a
2600 breakpoint instruction when we have a signal to deliver:
2601
2602 - See handle_signal_stop where we handle random signals that
2603 could take out us out of the stepping range. Normally, in
2604 that case we end up continuing (instead of stepping) over the
2605 signal handler with a breakpoint at PC, but there are cases
2606 where we should _always_ single-step, even if we have a
2607 step-resume breakpoint, like when a software watchpoint is
2608 set. Assuming single-stepping and delivering a signal at the
2609 same time would takes us to the signal handler, then we could
2610 have removed the breakpoint at PC to step over it. However,
2611 some hardware step targets (like e.g., Mac OS) can't step
2612 into signal handlers, and for those, we need to leave the
2613 breakpoint at PC inserted, as otherwise if the handler
2614 recurses and executes PC again, it'll miss the breakpoint.
2615 So we leave the breakpoint inserted anyway, but we need to
2616 record that we tried to step a breakpoint instruction, so
2617 that adjust_pc_after_break doesn't end up confused.
2618
2619 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2620 in one thread after another thread that was stepping had been
2621 momentarily paused for a step-over. When we re-resume the
2622 stepping thread, it may be resumed from that address with a
2623 breakpoint that hasn't trapped yet. Seen with
2624 gdb.threads/non-stop-fair-events.exp, on targets that don't
2625 do displaced stepping. */
2626
2627 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2628 tp->ptid.to_string ().c_str ());
2629
2630 tp->stepped_breakpoint = 1;
2631
2632 /* Most targets can step a breakpoint instruction, thus
2633 executing it normally. But if this one cannot, just
2634 continue and we will hit it anyway. */
2635 if (gdbarch_cannot_step_breakpoint (gdbarch))
2636 step = false;
2637 }
2638
2639 if (debug_displaced
2640 && tp->control.trap_expected
2641 && use_displaced_stepping (tp)
2642 && !step_over_info_valid_p ())
2643 {
2644 struct regcache *resume_regcache = get_thread_regcache (tp);
2645 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
2646 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2647 gdb_byte buf[4];
2648
2649 read_memory (actual_pc, buf, sizeof (buf));
2650 displaced_debug_printf ("run %s: %s",
2651 paddress (resume_gdbarch, actual_pc),
2652 displaced_step_dump_bytes
2653 (buf, sizeof (buf)).c_str ());
2654 }
2655
2656 if (tp->control.may_range_step)
2657 {
2658 /* If we're resuming a thread with the PC out of the step
2659 range, then we're doing some nested/finer run control
2660 operation, like stepping the thread out of the dynamic
2661 linker or the displaced stepping scratch pad. We
2662 shouldn't have allowed a range step then. */
2663 gdb_assert (pc_in_thread_step_range (pc, tp));
2664 }
2665
2666 do_target_resume (resume_ptid, step, sig);
2667 tp->set_resumed (true);
2668 }
2669
2670 /* Resume the inferior. SIG is the signal to give the inferior
2671 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2672 rolls back state on error. */
2673
2674 static void
2675 resume (gdb_signal sig)
2676 {
2677 try
2678 {
2679 resume_1 (sig);
2680 }
2681 catch (const gdb_exception &ex)
2682 {
2683 /* If resuming is being aborted for any reason, delete any
2684 single-step breakpoint resume_1 may have created, to avoid
2685 confusing the following resumption, and to avoid leaving
2686 single-step breakpoints perturbing other threads, in case
2687 we're running in non-stop mode. */
2688 if (inferior_ptid != null_ptid)
2689 delete_single_step_breakpoints (inferior_thread ());
2690 throw;
2691 }
2692 }
2693
2694 \f
2695 /* Proceeding. */
2696
2697 /* See infrun.h. */
2698
2699 /* Counter that tracks number of user visible stops. This can be used
2700 to tell whether a command has proceeded the inferior past the
2701 current location. This allows e.g., inferior function calls in
2702 breakpoint commands to not interrupt the command list. When the
2703 call finishes successfully, the inferior is standing at the same
2704 breakpoint as if nothing happened (and so we don't call
2705 normal_stop). */
2706 static ULONGEST current_stop_id;
2707
2708 /* See infrun.h. */
2709
2710 ULONGEST
2711 get_stop_id (void)
2712 {
2713 return current_stop_id;
2714 }
2715
2716 /* Called when we report a user visible stop. */
2717
2718 static void
2719 new_stop_id (void)
2720 {
2721 current_stop_id++;
2722 }
2723
2724 /* Clear out all variables saying what to do when inferior is continued.
2725 First do this, then set the ones you want, then call `proceed'. */
2726
2727 static void
2728 clear_proceed_status_thread (struct thread_info *tp)
2729 {
2730 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
2731
2732 /* If we're starting a new sequence, then the previous finished
2733 single-step is no longer relevant. */
2734 if (tp->has_pending_waitstatus ())
2735 {
2736 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
2737 {
2738 infrun_debug_printf ("pending event of %s was a finished step. "
2739 "Discarding.",
2740 tp->ptid.to_string ().c_str ());
2741
2742 tp->clear_pending_waitstatus ();
2743 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
2744 }
2745 else
2746 {
2747 infrun_debug_printf
2748 ("thread %s has pending wait status %s (currently_stepping=%d).",
2749 tp->ptid.to_string ().c_str (),
2750 tp->pending_waitstatus ().to_string ().c_str (),
2751 currently_stepping (tp));
2752 }
2753 }
2754
2755 /* If this signal should not be seen by program, give it zero.
2756 Used for debugging signals. */
2757 if (!signal_pass_state (tp->stop_signal ()))
2758 tp->set_stop_signal (GDB_SIGNAL_0);
2759
2760 tp->release_thread_fsm ();
2761
2762 tp->control.trap_expected = 0;
2763 tp->control.step_range_start = 0;
2764 tp->control.step_range_end = 0;
2765 tp->control.may_range_step = 0;
2766 tp->control.step_frame_id = null_frame_id;
2767 tp->control.step_stack_frame_id = null_frame_id;
2768 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2769 tp->control.step_start_function = NULL;
2770 tp->stop_requested = 0;
2771
2772 tp->control.stop_step = 0;
2773
2774 tp->control.proceed_to_finish = 0;
2775
2776 tp->control.stepping_command = 0;
2777
2778 /* Discard any remaining commands or status from previous stop. */
2779 bpstat_clear (&tp->control.stop_bpstat);
2780 }
2781
2782 void
2783 clear_proceed_status (int step)
2784 {
2785 /* With scheduler-locking replay, stop replaying other threads if we're
2786 not replaying the user-visible resume ptid.
2787
2788 This is a convenience feature to not require the user to explicitly
2789 stop replaying the other threads. We're assuming that the user's
2790 intent is to resume tracing the recorded process. */
2791 if (!non_stop && scheduler_mode == schedlock_replay
2792 && target_record_is_replaying (minus_one_ptid)
2793 && !target_record_will_replay (user_visible_resume_ptid (step),
2794 execution_direction))
2795 target_record_stop_replaying ();
2796
2797 if (!non_stop && inferior_ptid != null_ptid)
2798 {
2799 ptid_t resume_ptid = user_visible_resume_ptid (step);
2800 process_stratum_target *resume_target
2801 = user_visible_resume_target (resume_ptid);
2802
2803 /* In all-stop mode, delete the per-thread status of all threads
2804 we're about to resume, implicitly and explicitly. */
2805 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
2806 clear_proceed_status_thread (tp);
2807 }
2808
2809 if (inferior_ptid != null_ptid)
2810 {
2811 struct inferior *inferior;
2812
2813 if (non_stop)
2814 {
2815 /* If in non-stop mode, only delete the per-thread status of
2816 the current thread. */
2817 clear_proceed_status_thread (inferior_thread ());
2818 }
2819
2820 inferior = current_inferior ();
2821 inferior->control.stop_soon = NO_STOP_QUIETLY;
2822 }
2823
2824 gdb::observers::about_to_proceed.notify ();
2825 }
2826
2827 /* Returns true if TP is still stopped at a breakpoint that needs
2828 stepping-over in order to make progress. If the breakpoint is gone
2829 meanwhile, we can skip the whole step-over dance. */
2830
2831 static bool
2832 thread_still_needs_step_over_bp (struct thread_info *tp)
2833 {
2834 if (tp->stepping_over_breakpoint)
2835 {
2836 struct regcache *regcache = get_thread_regcache (tp);
2837
2838 if (breakpoint_here_p (regcache->aspace (),
2839 regcache_read_pc (regcache))
2840 == ordinary_breakpoint_here)
2841 return true;
2842
2843 tp->stepping_over_breakpoint = 0;
2844 }
2845
2846 return false;
2847 }
2848
2849 /* Check whether thread TP still needs to start a step-over in order
2850 to make progress when resumed. Returns an bitwise or of enum
2851 step_over_what bits, indicating what needs to be stepped over. */
2852
2853 static step_over_what
2854 thread_still_needs_step_over (struct thread_info *tp)
2855 {
2856 step_over_what what = 0;
2857
2858 if (thread_still_needs_step_over_bp (tp))
2859 what |= STEP_OVER_BREAKPOINT;
2860
2861 if (tp->stepping_over_watchpoint
2862 && !target_have_steppable_watchpoint ())
2863 what |= STEP_OVER_WATCHPOINT;
2864
2865 return what;
2866 }
2867
2868 /* Returns true if scheduler locking applies. STEP indicates whether
2869 we're about to do a step/next-like command to a thread. */
2870
2871 static bool
2872 schedlock_applies (struct thread_info *tp)
2873 {
2874 return (scheduler_mode == schedlock_on
2875 || (scheduler_mode == schedlock_step
2876 && tp->control.stepping_command)
2877 || (scheduler_mode == schedlock_replay
2878 && target_record_will_replay (minus_one_ptid,
2879 execution_direction)));
2880 }
2881
2882 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2883 stacks that have threads executing and don't have threads with
2884 pending events. */
2885
2886 static void
2887 maybe_set_commit_resumed_all_targets ()
2888 {
2889 scoped_restore_current_thread restore_thread;
2890
2891 for (inferior *inf : all_non_exited_inferiors ())
2892 {
2893 process_stratum_target *proc_target = inf->process_target ();
2894
2895 if (proc_target->commit_resumed_state)
2896 {
2897 /* We already set this in a previous iteration, via another
2898 inferior sharing the process_stratum target. */
2899 continue;
2900 }
2901
2902 /* If the target has no resumed threads, it would be useless to
2903 ask it to commit the resumed threads. */
2904 if (!proc_target->threads_executing)
2905 {
2906 infrun_debug_printf ("not requesting commit-resumed for target "
2907 "%s, no resumed threads",
2908 proc_target->shortname ());
2909 continue;
2910 }
2911
2912 /* As an optimization, if a thread from this target has some
2913 status to report, handle it before requiring the target to
2914 commit its resumed threads: handling the status might lead to
2915 resuming more threads. */
2916 if (proc_target->has_resumed_with_pending_wait_status ())
2917 {
2918 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2919 " thread has a pending waitstatus",
2920 proc_target->shortname ());
2921 continue;
2922 }
2923
2924 switch_to_inferior_no_thread (inf);
2925
2926 if (target_has_pending_events ())
2927 {
2928 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2929 "target has pending events",
2930 proc_target->shortname ());
2931 continue;
2932 }
2933
2934 infrun_debug_printf ("enabling commit-resumed for target %s",
2935 proc_target->shortname ());
2936
2937 proc_target->commit_resumed_state = true;
2938 }
2939 }
2940
2941 /* See infrun.h. */
2942
2943 void
2944 maybe_call_commit_resumed_all_targets ()
2945 {
2946 scoped_restore_current_thread restore_thread;
2947
2948 for (inferior *inf : all_non_exited_inferiors ())
2949 {
2950 process_stratum_target *proc_target = inf->process_target ();
2951
2952 if (!proc_target->commit_resumed_state)
2953 continue;
2954
2955 switch_to_inferior_no_thread (inf);
2956
2957 infrun_debug_printf ("calling commit_resumed for target %s",
2958 proc_target->shortname());
2959
2960 target_commit_resumed ();
2961 }
2962 }
2963
2964 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
2965 that only the outermost one attempts to re-enable
2966 commit-resumed. */
2967 static bool enable_commit_resumed = true;
2968
2969 /* See infrun.h. */
2970
2971 scoped_disable_commit_resumed::scoped_disable_commit_resumed
2972 (const char *reason)
2973 : m_reason (reason),
2974 m_prev_enable_commit_resumed (enable_commit_resumed)
2975 {
2976 infrun_debug_printf ("reason=%s", m_reason);
2977
2978 enable_commit_resumed = false;
2979
2980 for (inferior *inf : all_non_exited_inferiors ())
2981 {
2982 process_stratum_target *proc_target = inf->process_target ();
2983
2984 if (m_prev_enable_commit_resumed)
2985 {
2986 /* This is the outermost instance: force all
2987 COMMIT_RESUMED_STATE to false. */
2988 proc_target->commit_resumed_state = false;
2989 }
2990 else
2991 {
2992 /* This is not the outermost instance, we expect
2993 COMMIT_RESUMED_STATE to have been cleared by the
2994 outermost instance. */
2995 gdb_assert (!proc_target->commit_resumed_state);
2996 }
2997 }
2998 }
2999
3000 /* See infrun.h. */
3001
3002 void
3003 scoped_disable_commit_resumed::reset ()
3004 {
3005 if (m_reset)
3006 return;
3007 m_reset = true;
3008
3009 infrun_debug_printf ("reason=%s", m_reason);
3010
3011 gdb_assert (!enable_commit_resumed);
3012
3013 enable_commit_resumed = m_prev_enable_commit_resumed;
3014
3015 if (m_prev_enable_commit_resumed)
3016 {
3017 /* This is the outermost instance, re-enable
3018 COMMIT_RESUMED_STATE on the targets where it's possible. */
3019 maybe_set_commit_resumed_all_targets ();
3020 }
3021 else
3022 {
3023 /* This is not the outermost instance, we expect
3024 COMMIT_RESUMED_STATE to still be false. */
3025 for (inferior *inf : all_non_exited_inferiors ())
3026 {
3027 process_stratum_target *proc_target = inf->process_target ();
3028 gdb_assert (!proc_target->commit_resumed_state);
3029 }
3030 }
3031 }
3032
3033 /* See infrun.h. */
3034
3035 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3036 {
3037 reset ();
3038 }
3039
3040 /* See infrun.h. */
3041
3042 void
3043 scoped_disable_commit_resumed::reset_and_commit ()
3044 {
3045 reset ();
3046 maybe_call_commit_resumed_all_targets ();
3047 }
3048
3049 /* See infrun.h. */
3050
3051 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3052 (const char *reason)
3053 : m_reason (reason),
3054 m_prev_enable_commit_resumed (enable_commit_resumed)
3055 {
3056 infrun_debug_printf ("reason=%s", m_reason);
3057
3058 if (!enable_commit_resumed)
3059 {
3060 enable_commit_resumed = true;
3061
3062 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3063 possible. */
3064 maybe_set_commit_resumed_all_targets ();
3065
3066 maybe_call_commit_resumed_all_targets ();
3067 }
3068 }
3069
3070 /* See infrun.h. */
3071
3072 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3073 {
3074 infrun_debug_printf ("reason=%s", m_reason);
3075
3076 gdb_assert (enable_commit_resumed);
3077
3078 enable_commit_resumed = m_prev_enable_commit_resumed;
3079
3080 if (!enable_commit_resumed)
3081 {
3082 /* Force all COMMIT_RESUMED_STATE back to false. */
3083 for (inferior *inf : all_non_exited_inferiors ())
3084 {
3085 process_stratum_target *proc_target = inf->process_target ();
3086 proc_target->commit_resumed_state = false;
3087 }
3088 }
3089 }
3090
3091 /* Check that all the targets we're about to resume are in non-stop
3092 mode. Ideally, we'd only care whether all targets support
3093 target-async, but we're not there yet. E.g., stop_all_threads
3094 doesn't know how to handle all-stop targets. Also, the remote
3095 protocol in all-stop mode is synchronous, irrespective of
3096 target-async, which means that things like a breakpoint re-set
3097 triggered by one target would try to read memory from all targets
3098 and fail. */
3099
3100 static void
3101 check_multi_target_resumption (process_stratum_target *resume_target)
3102 {
3103 if (!non_stop && resume_target == nullptr)
3104 {
3105 scoped_restore_current_thread restore_thread;
3106
3107 /* This is used to track whether we're resuming more than one
3108 target. */
3109 process_stratum_target *first_connection = nullptr;
3110
3111 /* The first inferior we see with a target that does not work in
3112 always-non-stop mode. */
3113 inferior *first_not_non_stop = nullptr;
3114
3115 for (inferior *inf : all_non_exited_inferiors ())
3116 {
3117 switch_to_inferior_no_thread (inf);
3118
3119 if (!target_has_execution ())
3120 continue;
3121
3122 process_stratum_target *proc_target
3123 = current_inferior ()->process_target();
3124
3125 if (!target_is_non_stop_p ())
3126 first_not_non_stop = inf;
3127
3128 if (first_connection == nullptr)
3129 first_connection = proc_target;
3130 else if (first_connection != proc_target
3131 && first_not_non_stop != nullptr)
3132 {
3133 switch_to_inferior_no_thread (first_not_non_stop);
3134
3135 proc_target = current_inferior ()->process_target();
3136
3137 error (_("Connection %d (%s) does not support "
3138 "multi-target resumption."),
3139 proc_target->connection_number,
3140 make_target_connection_string (proc_target).c_str ());
3141 }
3142 }
3143 }
3144 }
3145
3146 /* Basic routine for continuing the program in various fashions.
3147
3148 ADDR is the address to resume at, or -1 for resume where stopped.
3149 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3150 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3151
3152 You should call clear_proceed_status before calling proceed. */
3153
3154 void
3155 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3156 {
3157 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3158
3159 struct regcache *regcache;
3160 struct gdbarch *gdbarch;
3161 CORE_ADDR pc;
3162 struct execution_control_state ecss;
3163 struct execution_control_state *ecs = &ecss;
3164
3165 /* If we're stopped at a fork/vfork, follow the branch set by the
3166 "set follow-fork-mode" command; otherwise, we'll just proceed
3167 resuming the current thread. */
3168 if (!follow_fork ())
3169 {
3170 /* The target for some reason decided not to resume. */
3171 normal_stop ();
3172 if (target_can_async_p ())
3173 inferior_event_handler (INF_EXEC_COMPLETE);
3174 return;
3175 }
3176
3177 /* We'll update this if & when we switch to a new thread. */
3178 previous_inferior_ptid = inferior_ptid;
3179
3180 regcache = get_current_regcache ();
3181 gdbarch = regcache->arch ();
3182 const address_space *aspace = regcache->aspace ();
3183
3184 pc = regcache_read_pc_protected (regcache);
3185
3186 thread_info *cur_thr = inferior_thread ();
3187
3188 /* Fill in with reasonable starting values. */
3189 init_thread_stepping_state (cur_thr);
3190
3191 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3192
3193 ptid_t resume_ptid
3194 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3195 process_stratum_target *resume_target
3196 = user_visible_resume_target (resume_ptid);
3197
3198 check_multi_target_resumption (resume_target);
3199
3200 if (addr == (CORE_ADDR) -1)
3201 {
3202 if (cur_thr->stop_pc_p ()
3203 && pc == cur_thr->stop_pc ()
3204 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3205 && execution_direction != EXEC_REVERSE)
3206 /* There is a breakpoint at the address we will resume at,
3207 step one instruction before inserting breakpoints so that
3208 we do not stop right away (and report a second hit at this
3209 breakpoint).
3210
3211 Note, we don't do this in reverse, because we won't
3212 actually be executing the breakpoint insn anyway.
3213 We'll be (un-)executing the previous instruction. */
3214 cur_thr->stepping_over_breakpoint = 1;
3215 else if (gdbarch_single_step_through_delay_p (gdbarch)
3216 && gdbarch_single_step_through_delay (gdbarch,
3217 get_current_frame ()))
3218 /* We stepped onto an instruction that needs to be stepped
3219 again before re-inserting the breakpoint, do so. */
3220 cur_thr->stepping_over_breakpoint = 1;
3221 }
3222 else
3223 {
3224 regcache_write_pc (regcache, addr);
3225 }
3226
3227 if (siggnal != GDB_SIGNAL_DEFAULT)
3228 cur_thr->set_stop_signal (siggnal);
3229
3230 /* If an exception is thrown from this point on, make sure to
3231 propagate GDB's knowledge of the executing state to the
3232 frontend/user running state. */
3233 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3234
3235 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3236 threads (e.g., we might need to set threads stepping over
3237 breakpoints first), from the user/frontend's point of view, all
3238 threads in RESUME_PTID are now running. Unless we're calling an
3239 inferior function, as in that case we pretend the inferior
3240 doesn't run at all. */
3241 if (!cur_thr->control.in_infcall)
3242 set_running (resume_target, resume_ptid, true);
3243
3244 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
3245 gdb_signal_to_symbol_string (siggnal));
3246
3247 annotate_starting ();
3248
3249 /* Make sure that output from GDB appears before output from the
3250 inferior. */
3251 gdb_flush (gdb_stdout);
3252
3253 /* Since we've marked the inferior running, give it the terminal. A
3254 QUIT/Ctrl-C from here on is forwarded to the target (which can
3255 still detect attempts to unblock a stuck connection with repeated
3256 Ctrl-C from within target_pass_ctrlc). */
3257 target_terminal::inferior ();
3258
3259 /* In a multi-threaded task we may select another thread and
3260 then continue or step.
3261
3262 But if a thread that we're resuming had stopped at a breakpoint,
3263 it will immediately cause another breakpoint stop without any
3264 execution (i.e. it will report a breakpoint hit incorrectly). So
3265 we must step over it first.
3266
3267 Look for threads other than the current (TP) that reported a
3268 breakpoint hit and haven't been resumed yet since. */
3269
3270 /* If scheduler locking applies, we can avoid iterating over all
3271 threads. */
3272 if (!non_stop && !schedlock_applies (cur_thr))
3273 {
3274 for (thread_info *tp : all_non_exited_threads (resume_target,
3275 resume_ptid))
3276 {
3277 switch_to_thread_no_regs (tp);
3278
3279 /* Ignore the current thread here. It's handled
3280 afterwards. */
3281 if (tp == cur_thr)
3282 continue;
3283
3284 if (!thread_still_needs_step_over (tp))
3285 continue;
3286
3287 gdb_assert (!thread_is_in_step_over_chain (tp));
3288
3289 infrun_debug_printf ("need to step-over [%s] first",
3290 tp->ptid.to_string ().c_str ());
3291
3292 global_thread_step_over_chain_enqueue (tp);
3293 }
3294
3295 switch_to_thread (cur_thr);
3296 }
3297
3298 /* Enqueue the current thread last, so that we move all other
3299 threads over their breakpoints first. */
3300 if (cur_thr->stepping_over_breakpoint)
3301 global_thread_step_over_chain_enqueue (cur_thr);
3302
3303 /* If the thread isn't started, we'll still need to set its prev_pc,
3304 so that switch_back_to_stepped_thread knows the thread hasn't
3305 advanced. Must do this before resuming any thread, as in
3306 all-stop/remote, once we resume we can't send any other packet
3307 until the target stops again. */
3308 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3309
3310 {
3311 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3312 bool step_over_started = start_step_over ();
3313
3314 if (step_over_info_valid_p ())
3315 {
3316 /* Either this thread started a new in-line step over, or some
3317 other thread was already doing one. In either case, don't
3318 resume anything else until the step-over is finished. */
3319 }
3320 else if (step_over_started && !target_is_non_stop_p ())
3321 {
3322 /* A new displaced stepping sequence was started. In all-stop,
3323 we can't talk to the target anymore until it next stops. */
3324 }
3325 else if (!non_stop && target_is_non_stop_p ())
3326 {
3327 INFRUN_SCOPED_DEBUG_START_END
3328 ("resuming threads, all-stop-on-top-of-non-stop");
3329
3330 /* In all-stop, but the target is always in non-stop mode.
3331 Start all other threads that are implicitly resumed too. */
3332 for (thread_info *tp : all_non_exited_threads (resume_target,
3333 resume_ptid))
3334 {
3335 switch_to_thread_no_regs (tp);
3336
3337 if (!tp->inf->has_execution ())
3338 {
3339 infrun_debug_printf ("[%s] target has no execution",
3340 tp->ptid.to_string ().c_str ());
3341 continue;
3342 }
3343
3344 if (tp->resumed ())
3345 {
3346 infrun_debug_printf ("[%s] resumed",
3347 tp->ptid.to_string ().c_str ());
3348 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3349 continue;
3350 }
3351
3352 if (thread_is_in_step_over_chain (tp))
3353 {
3354 infrun_debug_printf ("[%s] needs step-over",
3355 tp->ptid.to_string ().c_str ());
3356 continue;
3357 }
3358
3359 /* If a thread of that inferior is waiting for a vfork-done
3360 (for a detached vfork child to exec or exit), breakpoints are
3361 removed. We must not resume any thread of that inferior, other
3362 than the one waiting for the vfork-done. */
3363 if (tp->inf->thread_waiting_for_vfork_done != nullptr
3364 && tp != tp->inf->thread_waiting_for_vfork_done)
3365 {
3366 infrun_debug_printf ("[%s] another thread of this inferior is "
3367 "waiting for vfork-done",
3368 tp->ptid.to_string ().c_str ());
3369 continue;
3370 }
3371
3372 infrun_debug_printf ("resuming %s",
3373 tp->ptid.to_string ().c_str ());
3374
3375 reset_ecs (ecs, tp);
3376 switch_to_thread (tp);
3377 keep_going_pass_signal (ecs);
3378 if (!ecs->wait_some_more)
3379 error (_("Command aborted."));
3380 }
3381 }
3382 else if (!cur_thr->resumed ()
3383 && !thread_is_in_step_over_chain (cur_thr)
3384 /* In non-stop, forbid resuming a thread if some other thread of
3385 that inferior is waiting for a vfork-done event (this means
3386 breakpoints are out for this inferior). */
3387 && !(non_stop
3388 && cur_thr->inf->thread_waiting_for_vfork_done != nullptr))
3389 {
3390 /* The thread wasn't started, and isn't queued, run it now. */
3391 reset_ecs (ecs, cur_thr);
3392 switch_to_thread (cur_thr);
3393 keep_going_pass_signal (ecs);
3394 if (!ecs->wait_some_more)
3395 error (_("Command aborted."));
3396 }
3397
3398 disable_commit_resumed.reset_and_commit ();
3399 }
3400
3401 finish_state.release ();
3402
3403 /* If we've switched threads above, switch back to the previously
3404 current thread. We don't want the user to see a different
3405 selected thread. */
3406 switch_to_thread (cur_thr);
3407
3408 /* Tell the event loop to wait for it to stop. If the target
3409 supports asynchronous execution, it'll do this from within
3410 target_resume. */
3411 if (!target_can_async_p ())
3412 mark_async_event_handler (infrun_async_inferior_event_token);
3413 }
3414 \f
3415
3416 /* Start remote-debugging of a machine over a serial link. */
3417
3418 void
3419 start_remote (int from_tty)
3420 {
3421 inferior *inf = current_inferior ();
3422 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3423
3424 /* Always go on waiting for the target, regardless of the mode. */
3425 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3426 indicate to wait_for_inferior that a target should timeout if
3427 nothing is returned (instead of just blocking). Because of this,
3428 targets expecting an immediate response need to, internally, set
3429 things up so that the target_wait() is forced to eventually
3430 timeout. */
3431 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3432 differentiate to its caller what the state of the target is after
3433 the initial open has been performed. Here we're assuming that
3434 the target has stopped. It should be possible to eventually have
3435 target_open() return to the caller an indication that the target
3436 is currently running and GDB state should be set to the same as
3437 for an async run. */
3438 wait_for_inferior (inf);
3439
3440 /* Now that the inferior has stopped, do any bookkeeping like
3441 loading shared libraries. We want to do this before normal_stop,
3442 so that the displayed frame is up to date. */
3443 post_create_inferior (from_tty);
3444
3445 normal_stop ();
3446 }
3447
3448 /* Initialize static vars when a new inferior begins. */
3449
3450 void
3451 init_wait_for_inferior (void)
3452 {
3453 /* These are meaningless until the first time through wait_for_inferior. */
3454
3455 breakpoint_init_inferior (inf_starting);
3456
3457 clear_proceed_status (0);
3458
3459 nullify_last_target_wait_ptid ();
3460
3461 previous_inferior_ptid = inferior_ptid;
3462 }
3463
3464 \f
3465
3466 static void handle_inferior_event (struct execution_control_state *ecs);
3467
3468 static void handle_step_into_function (struct gdbarch *gdbarch,
3469 struct execution_control_state *ecs);
3470 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3471 struct execution_control_state *ecs);
3472 static void handle_signal_stop (struct execution_control_state *ecs);
3473 static void check_exception_resume (struct execution_control_state *,
3474 struct frame_info *);
3475
3476 static void end_stepping_range (struct execution_control_state *ecs);
3477 static void stop_waiting (struct execution_control_state *ecs);
3478 static void keep_going (struct execution_control_state *ecs);
3479 static void process_event_stop_test (struct execution_control_state *ecs);
3480 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3481
3482 /* This function is attached as a "thread_stop_requested" observer.
3483 Cleanup local state that assumed the PTID was to be resumed, and
3484 report the stop to the frontend. */
3485
3486 static void
3487 infrun_thread_stop_requested (ptid_t ptid)
3488 {
3489 process_stratum_target *curr_target = current_inferior ()->process_target ();
3490
3491 /* PTID was requested to stop. If the thread was already stopped,
3492 but the user/frontend doesn't know about that yet (e.g., the
3493 thread had been temporarily paused for some step-over), set up
3494 for reporting the stop now. */
3495 for (thread_info *tp : all_threads (curr_target, ptid))
3496 {
3497 if (tp->state != THREAD_RUNNING)
3498 continue;
3499 if (tp->executing ())
3500 continue;
3501
3502 /* Remove matching threads from the step-over queue, so
3503 start_step_over doesn't try to resume them
3504 automatically. */
3505 if (thread_is_in_step_over_chain (tp))
3506 global_thread_step_over_chain_remove (tp);
3507
3508 /* If the thread is stopped, but the user/frontend doesn't
3509 know about that yet, queue a pending event, as if the
3510 thread had just stopped now. Unless the thread already had
3511 a pending event. */
3512 if (!tp->has_pending_waitstatus ())
3513 {
3514 target_waitstatus ws;
3515 ws.set_stopped (GDB_SIGNAL_0);
3516 tp->set_pending_waitstatus (ws);
3517 }
3518
3519 /* Clear the inline-frame state, since we're re-processing the
3520 stop. */
3521 clear_inline_frame_state (tp);
3522
3523 /* If this thread was paused because some other thread was
3524 doing an inline-step over, let that finish first. Once
3525 that happens, we'll restart all threads and consume pending
3526 stop events then. */
3527 if (step_over_info_valid_p ())
3528 continue;
3529
3530 /* Otherwise we can process the (new) pending event now. Set
3531 it so this pending event is considered by
3532 do_target_wait. */
3533 tp->set_resumed (true);
3534 }
3535 }
3536
3537 static void
3538 infrun_thread_thread_exit (struct thread_info *tp, int silent)
3539 {
3540 if (target_last_proc_target == tp->inf->process_target ()
3541 && target_last_wait_ptid == tp->ptid)
3542 nullify_last_target_wait_ptid ();
3543 }
3544
3545 /* Delete the step resume, single-step and longjmp/exception resume
3546 breakpoints of TP. */
3547
3548 static void
3549 delete_thread_infrun_breakpoints (struct thread_info *tp)
3550 {
3551 delete_step_resume_breakpoint (tp);
3552 delete_exception_resume_breakpoint (tp);
3553 delete_single_step_breakpoints (tp);
3554 }
3555
3556 /* If the target still has execution, call FUNC for each thread that
3557 just stopped. In all-stop, that's all the non-exited threads; in
3558 non-stop, that's the current thread, only. */
3559
3560 typedef void (*for_each_just_stopped_thread_callback_func)
3561 (struct thread_info *tp);
3562
3563 static void
3564 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3565 {
3566 if (!target_has_execution () || inferior_ptid == null_ptid)
3567 return;
3568
3569 if (target_is_non_stop_p ())
3570 {
3571 /* If in non-stop mode, only the current thread stopped. */
3572 func (inferior_thread ());
3573 }
3574 else
3575 {
3576 /* In all-stop mode, all threads have stopped. */
3577 for (thread_info *tp : all_non_exited_threads ())
3578 func (tp);
3579 }
3580 }
3581
3582 /* Delete the step resume and longjmp/exception resume breakpoints of
3583 the threads that just stopped. */
3584
3585 static void
3586 delete_just_stopped_threads_infrun_breakpoints (void)
3587 {
3588 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3589 }
3590
3591 /* Delete the single-step breakpoints of the threads that just
3592 stopped. */
3593
3594 static void
3595 delete_just_stopped_threads_single_step_breakpoints (void)
3596 {
3597 for_each_just_stopped_thread (delete_single_step_breakpoints);
3598 }
3599
3600 /* See infrun.h. */
3601
3602 void
3603 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3604 const struct target_waitstatus &ws)
3605 {
3606 infrun_debug_printf ("target_wait (%s [%s], status) =",
3607 waiton_ptid.to_string ().c_str (),
3608 target_pid_to_str (waiton_ptid).c_str ());
3609 infrun_debug_printf (" %s [%s],",
3610 result_ptid.to_string ().c_str (),
3611 target_pid_to_str (result_ptid).c_str ());
3612 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3613 }
3614
3615 /* Select a thread at random, out of those which are resumed and have
3616 had events. */
3617
3618 static struct thread_info *
3619 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3620 {
3621 process_stratum_target *proc_target = inf->process_target ();
3622 thread_info *thread
3623 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
3624
3625 if (thread == nullptr)
3626 {
3627 infrun_debug_printf ("None found.");
3628 return nullptr;
3629 }
3630
3631 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
3632 gdb_assert (thread->resumed ());
3633 gdb_assert (thread->has_pending_waitstatus ());
3634
3635 return thread;
3636 }
3637
3638 /* Wrapper for target_wait that first checks whether threads have
3639 pending statuses to report before actually asking the target for
3640 more events. INF is the inferior we're using to call target_wait
3641 on. */
3642
3643 static ptid_t
3644 do_target_wait_1 (inferior *inf, ptid_t ptid,
3645 target_waitstatus *status, target_wait_flags options)
3646 {
3647 struct thread_info *tp;
3648
3649 /* We know that we are looking for an event in the target of inferior
3650 INF, but we don't know which thread the event might come from. As
3651 such we want to make sure that INFERIOR_PTID is reset so that none of
3652 the wait code relies on it - doing so is always a mistake. */
3653 switch_to_inferior_no_thread (inf);
3654
3655 /* First check if there is a resumed thread with a wait status
3656 pending. */
3657 if (ptid == minus_one_ptid || ptid.is_pid ())
3658 {
3659 tp = random_pending_event_thread (inf, ptid);
3660 }
3661 else
3662 {
3663 infrun_debug_printf ("Waiting for specific thread %s.",
3664 ptid.to_string ().c_str ());
3665
3666 /* We have a specific thread to check. */
3667 tp = find_thread_ptid (inf, ptid);
3668 gdb_assert (tp != NULL);
3669 if (!tp->has_pending_waitstatus ())
3670 tp = NULL;
3671 }
3672
3673 if (tp != NULL
3674 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3675 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
3676 {
3677 struct regcache *regcache = get_thread_regcache (tp);
3678 struct gdbarch *gdbarch = regcache->arch ();
3679 CORE_ADDR pc;
3680 int discard = 0;
3681
3682 pc = regcache_read_pc (regcache);
3683
3684 if (pc != tp->stop_pc ())
3685 {
3686 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3687 tp->ptid.to_string ().c_str (),
3688 paddress (gdbarch, tp->stop_pc ()),
3689 paddress (gdbarch, pc));
3690 discard = 1;
3691 }
3692 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
3693 {
3694 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3695 tp->ptid.to_string ().c_str (),
3696 paddress (gdbarch, pc));
3697
3698 discard = 1;
3699 }
3700
3701 if (discard)
3702 {
3703 infrun_debug_printf ("pending event of %s cancelled.",
3704 tp->ptid.to_string ().c_str ());
3705
3706 tp->clear_pending_waitstatus ();
3707 target_waitstatus ws;
3708 ws.set_spurious ();
3709 tp->set_pending_waitstatus (ws);
3710 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3711 }
3712 }
3713
3714 if (tp != NULL)
3715 {
3716 infrun_debug_printf ("Using pending wait status %s for %s.",
3717 tp->pending_waitstatus ().to_string ().c_str (),
3718 tp->ptid.to_string ().c_str ());
3719
3720 /* Now that we've selected our final event LWP, un-adjust its PC
3721 if it was a software breakpoint (and the target doesn't
3722 always adjust the PC itself). */
3723 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3724 && !target_supports_stopped_by_sw_breakpoint ())
3725 {
3726 struct regcache *regcache;
3727 struct gdbarch *gdbarch;
3728 int decr_pc;
3729
3730 regcache = get_thread_regcache (tp);
3731 gdbarch = regcache->arch ();
3732
3733 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3734 if (decr_pc != 0)
3735 {
3736 CORE_ADDR pc;
3737
3738 pc = regcache_read_pc (regcache);
3739 regcache_write_pc (regcache, pc + decr_pc);
3740 }
3741 }
3742
3743 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3744 *status = tp->pending_waitstatus ();
3745 tp->clear_pending_waitstatus ();
3746
3747 /* Wake up the event loop again, until all pending events are
3748 processed. */
3749 if (target_is_async_p ())
3750 mark_async_event_handler (infrun_async_inferior_event_token);
3751 return tp->ptid;
3752 }
3753
3754 /* But if we don't find one, we'll have to wait. */
3755
3756 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3757 a blocking wait. */
3758 if (!target_can_async_p ())
3759 options &= ~TARGET_WNOHANG;
3760
3761 return target_wait (ptid, status, options);
3762 }
3763
3764 /* Wrapper for target_wait that first checks whether threads have
3765 pending statuses to report before actually asking the target for
3766 more events. Polls for events from all inferiors/targets. */
3767
3768 static bool
3769 do_target_wait (execution_control_state *ecs, target_wait_flags options)
3770 {
3771 int num_inferiors = 0;
3772 int random_selector;
3773
3774 /* For fairness, we pick the first inferior/target to poll at random
3775 out of all inferiors that may report events, and then continue
3776 polling the rest of the inferior list starting from that one in a
3777 circular fashion until the whole list is polled once. */
3778
3779 auto inferior_matches = [] (inferior *inf)
3780 {
3781 return inf->process_target () != nullptr;
3782 };
3783
3784 /* First see how many matching inferiors we have. */
3785 for (inferior *inf : all_inferiors ())
3786 if (inferior_matches (inf))
3787 num_inferiors++;
3788
3789 if (num_inferiors == 0)
3790 {
3791 ecs->ws.set_ignore ();
3792 return false;
3793 }
3794
3795 /* Now randomly pick an inferior out of those that matched. */
3796 random_selector = (int)
3797 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3798
3799 if (num_inferiors > 1)
3800 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3801 num_inferiors, random_selector);
3802
3803 /* Select the Nth inferior that matched. */
3804
3805 inferior *selected = nullptr;
3806
3807 for (inferior *inf : all_inferiors ())
3808 if (inferior_matches (inf))
3809 if (random_selector-- == 0)
3810 {
3811 selected = inf;
3812 break;
3813 }
3814
3815 /* Now poll for events out of each of the matching inferior's
3816 targets, starting from the selected one. */
3817
3818 auto do_wait = [&] (inferior *inf)
3819 {
3820 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
3821 ecs->target = inf->process_target ();
3822 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
3823 };
3824
3825 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3826 here spuriously after the target is all stopped and we've already
3827 reported the stop to the user, polling for events. */
3828 scoped_restore_current_thread restore_thread;
3829
3830 intrusive_list_iterator<inferior> start
3831 = inferior_list.iterator_to (*selected);
3832
3833 for (intrusive_list_iterator<inferior> it = start;
3834 it != inferior_list.end ();
3835 ++it)
3836 {
3837 inferior *inf = &*it;
3838
3839 if (inferior_matches (inf) && do_wait (inf))
3840 return true;
3841 }
3842
3843 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
3844 it != start;
3845 ++it)
3846 {
3847 inferior *inf = &*it;
3848
3849 if (inferior_matches (inf) && do_wait (inf))
3850 return true;
3851 }
3852
3853 ecs->ws.set_ignore ();
3854 return false;
3855 }
3856
3857 /* An event reported by wait_one. */
3858
3859 struct wait_one_event
3860 {
3861 /* The target the event came out of. */
3862 process_stratum_target *target;
3863
3864 /* The PTID the event was for. */
3865 ptid_t ptid;
3866
3867 /* The waitstatus. */
3868 target_waitstatus ws;
3869 };
3870
3871 static bool handle_one (const wait_one_event &event);
3872
3873 /* Prepare and stabilize the inferior for detaching it. E.g.,
3874 detaching while a thread is displaced stepping is a recipe for
3875 crashing it, as nothing would readjust the PC out of the scratch
3876 pad. */
3877
3878 void
3879 prepare_for_detach (void)
3880 {
3881 struct inferior *inf = current_inferior ();
3882 ptid_t pid_ptid = ptid_t (inf->pid);
3883 scoped_restore_current_thread restore_thread;
3884
3885 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
3886
3887 /* Remove all threads of INF from the global step-over chain. We
3888 want to stop any ongoing step-over, not start any new one. */
3889 thread_step_over_list_safe_range range
3890 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
3891
3892 for (thread_info *tp : range)
3893 if (tp->inf == inf)
3894 {
3895 infrun_debug_printf ("removing thread %s from global step over chain",
3896 tp->ptid.to_string ().c_str ());
3897 global_thread_step_over_chain_remove (tp);
3898 }
3899
3900 /* If we were already in the middle of an inline step-over, and the
3901 thread stepping belongs to the inferior we're detaching, we need
3902 to restart the threads of other inferiors. */
3903 if (step_over_info.thread != -1)
3904 {
3905 infrun_debug_printf ("inline step-over in-process while detaching");
3906
3907 thread_info *thr = find_thread_global_id (step_over_info.thread);
3908 if (thr->inf == inf)
3909 {
3910 /* Since we removed threads of INF from the step-over chain,
3911 we know this won't start a step-over for INF. */
3912 clear_step_over_info ();
3913
3914 if (target_is_non_stop_p ())
3915 {
3916 /* Start a new step-over in another thread if there's
3917 one that needs it. */
3918 start_step_over ();
3919
3920 /* Restart all other threads (except the
3921 previously-stepping thread, since that one is still
3922 running). */
3923 if (!step_over_info_valid_p ())
3924 restart_threads (thr);
3925 }
3926 }
3927 }
3928
3929 if (displaced_step_in_progress (inf))
3930 {
3931 infrun_debug_printf ("displaced-stepping in-process while detaching");
3932
3933 /* Stop threads currently displaced stepping, aborting it. */
3934
3935 for (thread_info *thr : inf->non_exited_threads ())
3936 {
3937 if (thr->displaced_step_state.in_progress ())
3938 {
3939 if (thr->executing ())
3940 {
3941 if (!thr->stop_requested)
3942 {
3943 target_stop (thr->ptid);
3944 thr->stop_requested = true;
3945 }
3946 }
3947 else
3948 thr->set_resumed (false);
3949 }
3950 }
3951
3952 while (displaced_step_in_progress (inf))
3953 {
3954 wait_one_event event;
3955
3956 event.target = inf->process_target ();
3957 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
3958
3959 if (debug_infrun)
3960 print_target_wait_results (pid_ptid, event.ptid, event.ws);
3961
3962 handle_one (event);
3963 }
3964
3965 /* It's OK to leave some of the threads of INF stopped, since
3966 they'll be detached shortly. */
3967 }
3968 }
3969
3970 /* Wait for control to return from inferior to debugger.
3971
3972 If inferior gets a signal, we may decide to start it up again
3973 instead of returning. That is why there is a loop in this function.
3974 When this function actually returns it means the inferior
3975 should be left stopped and GDB should read more commands. */
3976
3977 static void
3978 wait_for_inferior (inferior *inf)
3979 {
3980 infrun_debug_printf ("wait_for_inferior ()");
3981
3982 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
3983
3984 /* If an error happens while handling the event, propagate GDB's
3985 knowledge of the executing state to the frontend/user running
3986 state. */
3987 scoped_finish_thread_state finish_state
3988 (inf->process_target (), minus_one_ptid);
3989
3990 while (1)
3991 {
3992 struct execution_control_state ecss;
3993 struct execution_control_state *ecs = &ecss;
3994
3995 overlay_cache_invalid = 1;
3996
3997 /* Flush target cache before starting to handle each event.
3998 Target was running and cache could be stale. This is just a
3999 heuristic. Running threads may modify target memory, but we
4000 don't get any event. */
4001 target_dcache_invalidate ();
4002
4003 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
4004 ecs->target = inf->process_target ();
4005
4006 if (debug_infrun)
4007 print_target_wait_results (minus_one_ptid, ecs->ptid, ecs->ws);
4008
4009 /* Now figure out what to do with the result of the result. */
4010 handle_inferior_event (ecs);
4011
4012 if (!ecs->wait_some_more)
4013 break;
4014 }
4015
4016 /* No error, don't finish the state yet. */
4017 finish_state.release ();
4018 }
4019
4020 /* Cleanup that reinstalls the readline callback handler, if the
4021 target is running in the background. If while handling the target
4022 event something triggered a secondary prompt, like e.g., a
4023 pagination prompt, we'll have removed the callback handler (see
4024 gdb_readline_wrapper_line). Need to do this as we go back to the
4025 event loop, ready to process further input. Note this has no
4026 effect if the handler hasn't actually been removed, because calling
4027 rl_callback_handler_install resets the line buffer, thus losing
4028 input. */
4029
4030 static void
4031 reinstall_readline_callback_handler_cleanup ()
4032 {
4033 struct ui *ui = current_ui;
4034
4035 if (!ui->async)
4036 {
4037 /* We're not going back to the top level event loop yet. Don't
4038 install the readline callback, as it'd prep the terminal,
4039 readline-style (raw, noecho) (e.g., --batch). We'll install
4040 it the next time the prompt is displayed, when we're ready
4041 for input. */
4042 return;
4043 }
4044
4045 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4046 gdb_rl_callback_handler_reinstall ();
4047 }
4048
4049 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4050 that's just the event thread. In all-stop, that's all threads. */
4051
4052 static void
4053 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4054 {
4055 if (ecs->event_thread != nullptr
4056 && ecs->event_thread->thread_fsm () != nullptr)
4057 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4058
4059 if (!non_stop)
4060 {
4061 for (thread_info *thr : all_non_exited_threads ())
4062 {
4063 if (thr->thread_fsm () == nullptr)
4064 continue;
4065 if (thr == ecs->event_thread)
4066 continue;
4067
4068 switch_to_thread (thr);
4069 thr->thread_fsm ()->clean_up (thr);
4070 }
4071
4072 if (ecs->event_thread != nullptr)
4073 switch_to_thread (ecs->event_thread);
4074 }
4075 }
4076
4077 /* Helper for all_uis_check_sync_execution_done that works on the
4078 current UI. */
4079
4080 static void
4081 check_curr_ui_sync_execution_done (void)
4082 {
4083 struct ui *ui = current_ui;
4084
4085 if (ui->prompt_state == PROMPT_NEEDED
4086 && ui->async
4087 && !gdb_in_secondary_prompt_p (ui))
4088 {
4089 target_terminal::ours ();
4090 gdb::observers::sync_execution_done.notify ();
4091 ui_register_input_event_handler (ui);
4092 }
4093 }
4094
4095 /* See infrun.h. */
4096
4097 void
4098 all_uis_check_sync_execution_done (void)
4099 {
4100 SWITCH_THRU_ALL_UIS ()
4101 {
4102 check_curr_ui_sync_execution_done ();
4103 }
4104 }
4105
4106 /* See infrun.h. */
4107
4108 void
4109 all_uis_on_sync_execution_starting (void)
4110 {
4111 SWITCH_THRU_ALL_UIS ()
4112 {
4113 if (current_ui->prompt_state == PROMPT_NEEDED)
4114 async_disable_stdin ();
4115 }
4116 }
4117
4118 /* Asynchronous version of wait_for_inferior. It is called by the
4119 event loop whenever a change of state is detected on the file
4120 descriptor corresponding to the target. It can be called more than
4121 once to complete a single execution command. In such cases we need
4122 to keep the state in a global variable ECSS. If it is the last time
4123 that this function is called for a single execution command, then
4124 report to the user that the inferior has stopped, and do the
4125 necessary cleanups. */
4126
4127 void
4128 fetch_inferior_event ()
4129 {
4130 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4131
4132 struct execution_control_state ecss;
4133 struct execution_control_state *ecs = &ecss;
4134 int cmd_done = 0;
4135
4136 /* Events are always processed with the main UI as current UI. This
4137 way, warnings, debug output, etc. are always consistently sent to
4138 the main console. */
4139 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4140
4141 /* Temporarily disable pagination. Otherwise, the user would be
4142 given an option to press 'q' to quit, which would cause an early
4143 exit and could leave GDB in a half-baked state. */
4144 scoped_restore save_pagination
4145 = make_scoped_restore (&pagination_enabled, false);
4146
4147 /* End up with readline processing input, if necessary. */
4148 {
4149 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4150
4151 /* We're handling a live event, so make sure we're doing live
4152 debugging. If we're looking at traceframes while the target is
4153 running, we're going to need to get back to that mode after
4154 handling the event. */
4155 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4156 if (non_stop)
4157 {
4158 maybe_restore_traceframe.emplace ();
4159 set_current_traceframe (-1);
4160 }
4161
4162 /* The user/frontend should not notice a thread switch due to
4163 internal events. Make sure we revert to the user selected
4164 thread and frame after handling the event and running any
4165 breakpoint commands. */
4166 scoped_restore_current_thread restore_thread;
4167
4168 overlay_cache_invalid = 1;
4169 /* Flush target cache before starting to handle each event. Target
4170 was running and cache could be stale. This is just a heuristic.
4171 Running threads may modify target memory, but we don't get any
4172 event. */
4173 target_dcache_invalidate ();
4174
4175 scoped_restore save_exec_dir
4176 = make_scoped_restore (&execution_direction,
4177 target_execution_direction ());
4178
4179 /* Allow targets to pause their resumed threads while we handle
4180 the event. */
4181 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4182
4183 if (!do_target_wait (ecs, TARGET_WNOHANG))
4184 {
4185 infrun_debug_printf ("do_target_wait returned no event");
4186 disable_commit_resumed.reset_and_commit ();
4187 return;
4188 }
4189
4190 gdb_assert (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4191
4192 /* Switch to the target that generated the event, so we can do
4193 target calls. */
4194 switch_to_target_no_thread (ecs->target);
4195
4196 if (debug_infrun)
4197 print_target_wait_results (minus_one_ptid, ecs->ptid, ecs->ws);
4198
4199 /* If an error happens while handling the event, propagate GDB's
4200 knowledge of the executing state to the frontend/user running
4201 state. */
4202 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
4203 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
4204
4205 /* Get executed before scoped_restore_current_thread above to apply
4206 still for the thread which has thrown the exception. */
4207 auto defer_bpstat_clear
4208 = make_scope_exit (bpstat_clear_actions);
4209 auto defer_delete_threads
4210 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4211
4212 /* Now figure out what to do with the result of the result. */
4213 handle_inferior_event (ecs);
4214
4215 if (!ecs->wait_some_more)
4216 {
4217 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4218 bool should_stop = true;
4219 struct thread_info *thr = ecs->event_thread;
4220
4221 delete_just_stopped_threads_infrun_breakpoints ();
4222
4223 if (thr != nullptr && thr->thread_fsm () != nullptr)
4224 should_stop = thr->thread_fsm ()->should_stop (thr);
4225
4226 if (!should_stop)
4227 {
4228 keep_going (ecs);
4229 }
4230 else
4231 {
4232 bool should_notify_stop = true;
4233 int proceeded = 0;
4234
4235 clean_up_just_stopped_threads_fsms (ecs);
4236
4237 if (thr != nullptr && thr->thread_fsm () != nullptr)
4238 should_notify_stop
4239 = thr->thread_fsm ()->should_notify_stop ();
4240
4241 if (should_notify_stop)
4242 {
4243 /* We may not find an inferior if this was a process exit. */
4244 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
4245 proceeded = normal_stop ();
4246 }
4247
4248 if (!proceeded)
4249 {
4250 inferior_event_handler (INF_EXEC_COMPLETE);
4251 cmd_done = 1;
4252 }
4253
4254 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4255 previously selected thread is gone. We have two
4256 choices - switch to no thread selected, or restore the
4257 previously selected thread (now exited). We chose the
4258 later, just because that's what GDB used to do. After
4259 this, "info threads" says "The current thread <Thread
4260 ID 2> has terminated." instead of "No thread
4261 selected.". */
4262 if (!non_stop
4263 && cmd_done
4264 && ecs->ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4265 restore_thread.dont_restore ();
4266 }
4267 }
4268
4269 defer_delete_threads.release ();
4270 defer_bpstat_clear.release ();
4271
4272 /* No error, don't finish the thread states yet. */
4273 finish_state.release ();
4274
4275 disable_commit_resumed.reset_and_commit ();
4276
4277 /* This scope is used to ensure that readline callbacks are
4278 reinstalled here. */
4279 }
4280
4281 /* If a UI was in sync execution mode, and now isn't, restore its
4282 prompt (a synchronous execution command has finished, and we're
4283 ready for input). */
4284 all_uis_check_sync_execution_done ();
4285
4286 if (cmd_done
4287 && exec_done_display_p
4288 && (inferior_ptid == null_ptid
4289 || inferior_thread ()->state != THREAD_RUNNING))
4290 gdb_printf (_("completed.\n"));
4291 }
4292
4293 /* See infrun.h. */
4294
4295 void
4296 set_step_info (thread_info *tp, struct frame_info *frame,
4297 struct symtab_and_line sal)
4298 {
4299 /* This can be removed once this function no longer implicitly relies on the
4300 inferior_ptid value. */
4301 gdb_assert (inferior_ptid == tp->ptid);
4302
4303 tp->control.step_frame_id = get_frame_id (frame);
4304 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4305
4306 tp->current_symtab = sal.symtab;
4307 tp->current_line = sal.line;
4308
4309 infrun_debug_printf
4310 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4311 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4312 tp->current_line,
4313 tp->control.step_frame_id.to_string ().c_str (),
4314 tp->control.step_stack_frame_id.to_string ().c_str ());
4315 }
4316
4317 /* Clear context switchable stepping state. */
4318
4319 void
4320 init_thread_stepping_state (struct thread_info *tss)
4321 {
4322 tss->stepped_breakpoint = 0;
4323 tss->stepping_over_breakpoint = 0;
4324 tss->stepping_over_watchpoint = 0;
4325 tss->step_after_step_resume_breakpoint = 0;
4326 }
4327
4328 /* See infrun.h. */
4329
4330 void
4331 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4332 const target_waitstatus &status)
4333 {
4334 target_last_proc_target = target;
4335 target_last_wait_ptid = ptid;
4336 target_last_waitstatus = status;
4337 }
4338
4339 /* See infrun.h. */
4340
4341 void
4342 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4343 target_waitstatus *status)
4344 {
4345 if (target != nullptr)
4346 *target = target_last_proc_target;
4347 if (ptid != nullptr)
4348 *ptid = target_last_wait_ptid;
4349 if (status != nullptr)
4350 *status = target_last_waitstatus;
4351 }
4352
4353 /* See infrun.h. */
4354
4355 void
4356 nullify_last_target_wait_ptid (void)
4357 {
4358 target_last_proc_target = nullptr;
4359 target_last_wait_ptid = minus_one_ptid;
4360 target_last_waitstatus = {};
4361 }
4362
4363 /* Switch thread contexts. */
4364
4365 static void
4366 context_switch (execution_control_state *ecs)
4367 {
4368 if (ecs->ptid != inferior_ptid
4369 && (inferior_ptid == null_ptid
4370 || ecs->event_thread != inferior_thread ()))
4371 {
4372 infrun_debug_printf ("Switching context from %s to %s",
4373 inferior_ptid.to_string ().c_str (),
4374 ecs->ptid.to_string ().c_str ());
4375 }
4376
4377 switch_to_thread (ecs->event_thread);
4378 }
4379
4380 /* If the target can't tell whether we've hit breakpoints
4381 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4382 check whether that could have been caused by a breakpoint. If so,
4383 adjust the PC, per gdbarch_decr_pc_after_break. */
4384
4385 static void
4386 adjust_pc_after_break (struct thread_info *thread,
4387 const target_waitstatus &ws)
4388 {
4389 struct regcache *regcache;
4390 struct gdbarch *gdbarch;
4391 CORE_ADDR breakpoint_pc, decr_pc;
4392
4393 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4394 we aren't, just return.
4395
4396 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4397 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4398 implemented by software breakpoints should be handled through the normal
4399 breakpoint layer.
4400
4401 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4402 different signals (SIGILL or SIGEMT for instance), but it is less
4403 clear where the PC is pointing afterwards. It may not match
4404 gdbarch_decr_pc_after_break. I don't know any specific target that
4405 generates these signals at breakpoints (the code has been in GDB since at
4406 least 1992) so I can not guess how to handle them here.
4407
4408 In earlier versions of GDB, a target with
4409 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4410 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4411 target with both of these set in GDB history, and it seems unlikely to be
4412 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4413
4414 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4415 return;
4416
4417 if (ws.sig () != GDB_SIGNAL_TRAP)
4418 return;
4419
4420 /* In reverse execution, when a breakpoint is hit, the instruction
4421 under it has already been de-executed. The reported PC always
4422 points at the breakpoint address, so adjusting it further would
4423 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4424 architecture:
4425
4426 B1 0x08000000 : INSN1
4427 B2 0x08000001 : INSN2
4428 0x08000002 : INSN3
4429 PC -> 0x08000003 : INSN4
4430
4431 Say you're stopped at 0x08000003 as above. Reverse continuing
4432 from that point should hit B2 as below. Reading the PC when the
4433 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4434 been de-executed already.
4435
4436 B1 0x08000000 : INSN1
4437 B2 PC -> 0x08000001 : INSN2
4438 0x08000002 : INSN3
4439 0x08000003 : INSN4
4440
4441 We can't apply the same logic as for forward execution, because
4442 we would wrongly adjust the PC to 0x08000000, since there's a
4443 breakpoint at PC - 1. We'd then report a hit on B1, although
4444 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4445 behaviour. */
4446 if (execution_direction == EXEC_REVERSE)
4447 return;
4448
4449 /* If the target can tell whether the thread hit a SW breakpoint,
4450 trust it. Targets that can tell also adjust the PC
4451 themselves. */
4452 if (target_supports_stopped_by_sw_breakpoint ())
4453 return;
4454
4455 /* Note that relying on whether a breakpoint is planted in memory to
4456 determine this can fail. E.g,. the breakpoint could have been
4457 removed since. Or the thread could have been told to step an
4458 instruction the size of a breakpoint instruction, and only
4459 _after_ was a breakpoint inserted at its address. */
4460
4461 /* If this target does not decrement the PC after breakpoints, then
4462 we have nothing to do. */
4463 regcache = get_thread_regcache (thread);
4464 gdbarch = regcache->arch ();
4465
4466 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4467 if (decr_pc == 0)
4468 return;
4469
4470 const address_space *aspace = regcache->aspace ();
4471
4472 /* Find the location where (if we've hit a breakpoint) the
4473 breakpoint would be. */
4474 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4475
4476 /* If the target can't tell whether a software breakpoint triggered,
4477 fallback to figuring it out based on breakpoints we think were
4478 inserted in the target, and on whether the thread was stepped or
4479 continued. */
4480
4481 /* Check whether there actually is a software breakpoint inserted at
4482 that location.
4483
4484 If in non-stop mode, a race condition is possible where we've
4485 removed a breakpoint, but stop events for that breakpoint were
4486 already queued and arrive later. To suppress those spurious
4487 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4488 and retire them after a number of stop events are reported. Note
4489 this is an heuristic and can thus get confused. The real fix is
4490 to get the "stopped by SW BP and needs adjustment" info out of
4491 the target/kernel (and thus never reach here; see above). */
4492 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4493 || (target_is_non_stop_p ()
4494 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4495 {
4496 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4497
4498 if (record_full_is_used ())
4499 restore_operation_disable.emplace
4500 (record_full_gdb_operation_disable_set ());
4501
4502 /* When using hardware single-step, a SIGTRAP is reported for both
4503 a completed single-step and a software breakpoint. Need to
4504 differentiate between the two, as the latter needs adjusting
4505 but the former does not.
4506
4507 The SIGTRAP can be due to a completed hardware single-step only if
4508 - we didn't insert software single-step breakpoints
4509 - this thread is currently being stepped
4510
4511 If any of these events did not occur, we must have stopped due
4512 to hitting a software breakpoint, and have to back up to the
4513 breakpoint address.
4514
4515 As a special case, we could have hardware single-stepped a
4516 software breakpoint. In this case (prev_pc == breakpoint_pc),
4517 we also need to back up to the breakpoint address. */
4518
4519 if (thread_has_single_step_breakpoints_set (thread)
4520 || !currently_stepping (thread)
4521 || (thread->stepped_breakpoint
4522 && thread->prev_pc == breakpoint_pc))
4523 regcache_write_pc (regcache, breakpoint_pc);
4524 }
4525 }
4526
4527 static bool
4528 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4529 {
4530 for (frame = get_prev_frame (frame);
4531 frame != NULL;
4532 frame = get_prev_frame (frame))
4533 {
4534 if (frame_id_eq (get_frame_id (frame), step_frame_id))
4535 return true;
4536
4537 if (get_frame_type (frame) != INLINE_FRAME)
4538 break;
4539 }
4540
4541 return false;
4542 }
4543
4544 /* Look for an inline frame that is marked for skip.
4545 If PREV_FRAME is TRUE start at the previous frame,
4546 otherwise start at the current frame. Stop at the
4547 first non-inline frame, or at the frame where the
4548 step started. */
4549
4550 static bool
4551 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4552 {
4553 struct frame_info *frame = get_current_frame ();
4554
4555 if (prev_frame)
4556 frame = get_prev_frame (frame);
4557
4558 for (; frame != NULL; frame = get_prev_frame (frame))
4559 {
4560 const char *fn = NULL;
4561 symtab_and_line sal;
4562 struct symbol *sym;
4563
4564 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4565 break;
4566 if (get_frame_type (frame) != INLINE_FRAME)
4567 break;
4568
4569 sal = find_frame_sal (frame);
4570 sym = get_frame_function (frame);
4571
4572 if (sym != NULL)
4573 fn = sym->print_name ();
4574
4575 if (sal.line != 0
4576 && function_name_is_marked_for_skip (fn, sal))
4577 return true;
4578 }
4579
4580 return false;
4581 }
4582
4583 /* If the event thread has the stop requested flag set, pretend it
4584 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4585 target_stop). */
4586
4587 static bool
4588 handle_stop_requested (struct execution_control_state *ecs)
4589 {
4590 if (ecs->event_thread->stop_requested)
4591 {
4592 ecs->ws.set_stopped (GDB_SIGNAL_0);
4593 handle_signal_stop (ecs);
4594 return true;
4595 }
4596 return false;
4597 }
4598
4599 /* Auxiliary function that handles syscall entry/return events.
4600 It returns true if the inferior should keep going (and GDB
4601 should ignore the event), or false if the event deserves to be
4602 processed. */
4603
4604 static bool
4605 handle_syscall_event (struct execution_control_state *ecs)
4606 {
4607 struct regcache *regcache;
4608 int syscall_number;
4609
4610 context_switch (ecs);
4611
4612 regcache = get_thread_regcache (ecs->event_thread);
4613 syscall_number = ecs->ws.syscall_number ();
4614 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
4615
4616 if (catch_syscall_enabled () > 0
4617 && catching_syscall_number (syscall_number))
4618 {
4619 infrun_debug_printf ("syscall number=%d", syscall_number);
4620
4621 ecs->event_thread->control.stop_bpstat
4622 = bpstat_stop_status_nowatch (regcache->aspace (),
4623 ecs->event_thread->stop_pc (),
4624 ecs->event_thread, ecs->ws);
4625
4626 if (handle_stop_requested (ecs))
4627 return false;
4628
4629 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4630 {
4631 /* Catchpoint hit. */
4632 return false;
4633 }
4634 }
4635
4636 if (handle_stop_requested (ecs))
4637 return false;
4638
4639 /* If no catchpoint triggered for this, then keep going. */
4640 keep_going (ecs);
4641
4642 return true;
4643 }
4644
4645 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4646
4647 static void
4648 fill_in_stop_func (struct gdbarch *gdbarch,
4649 struct execution_control_state *ecs)
4650 {
4651 if (!ecs->stop_func_filled_in)
4652 {
4653 const block *block;
4654 const general_symbol_info *gsi;
4655
4656 /* Don't care about return value; stop_func_start and stop_func_name
4657 will both be 0 if it doesn't work. */
4658 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
4659 &gsi,
4660 &ecs->stop_func_start,
4661 &ecs->stop_func_end,
4662 &block);
4663 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
4664
4665 /* The call to find_pc_partial_function, above, will set
4666 stop_func_start and stop_func_end to the start and end
4667 of the range containing the stop pc. If this range
4668 contains the entry pc for the block (which is always the
4669 case for contiguous blocks), advance stop_func_start past
4670 the function's start offset and entrypoint. Note that
4671 stop_func_start is NOT advanced when in a range of a
4672 non-contiguous block that does not contain the entry pc. */
4673 if (block != nullptr
4674 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4675 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4676 {
4677 ecs->stop_func_start
4678 += gdbarch_deprecated_function_start_offset (gdbarch);
4679
4680 if (gdbarch_skip_entrypoint_p (gdbarch))
4681 ecs->stop_func_start
4682 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4683 }
4684
4685 ecs->stop_func_filled_in = 1;
4686 }
4687 }
4688
4689
4690 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4691
4692 static enum stop_kind
4693 get_inferior_stop_soon (execution_control_state *ecs)
4694 {
4695 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4696
4697 gdb_assert (inf != NULL);
4698 return inf->control.stop_soon;
4699 }
4700
4701 /* Poll for one event out of the current target. Store the resulting
4702 waitstatus in WS, and return the event ptid. Does not block. */
4703
4704 static ptid_t
4705 poll_one_curr_target (struct target_waitstatus *ws)
4706 {
4707 ptid_t event_ptid;
4708
4709 overlay_cache_invalid = 1;
4710
4711 /* Flush target cache before starting to handle each event.
4712 Target was running and cache could be stale. This is just a
4713 heuristic. Running threads may modify target memory, but we
4714 don't get any event. */
4715 target_dcache_invalidate ();
4716
4717 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
4718
4719 if (debug_infrun)
4720 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
4721
4722 return event_ptid;
4723 }
4724
4725 /* Wait for one event out of any target. */
4726
4727 static wait_one_event
4728 wait_one ()
4729 {
4730 while (1)
4731 {
4732 for (inferior *inf : all_inferiors ())
4733 {
4734 process_stratum_target *target = inf->process_target ();
4735 if (target == NULL
4736 || !target->is_async_p ()
4737 || !target->threads_executing)
4738 continue;
4739
4740 switch_to_inferior_no_thread (inf);
4741
4742 wait_one_event event;
4743 event.target = target;
4744 event.ptid = poll_one_curr_target (&event.ws);
4745
4746 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
4747 {
4748 /* If nothing is resumed, remove the target from the
4749 event loop. */
4750 target_async (0);
4751 }
4752 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
4753 return event;
4754 }
4755
4756 /* Block waiting for some event. */
4757
4758 fd_set readfds;
4759 int nfds = 0;
4760
4761 FD_ZERO (&readfds);
4762
4763 for (inferior *inf : all_inferiors ())
4764 {
4765 process_stratum_target *target = inf->process_target ();
4766 if (target == NULL
4767 || !target->is_async_p ()
4768 || !target->threads_executing)
4769 continue;
4770
4771 int fd = target->async_wait_fd ();
4772 FD_SET (fd, &readfds);
4773 if (nfds <= fd)
4774 nfds = fd + 1;
4775 }
4776
4777 if (nfds == 0)
4778 {
4779 /* No waitable targets left. All must be stopped. */
4780 target_waitstatus ws;
4781 ws.set_no_resumed ();
4782 return {NULL, minus_one_ptid, std::move (ws)};
4783 }
4784
4785 QUIT;
4786
4787 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4788 if (numfds < 0)
4789 {
4790 if (errno == EINTR)
4791 continue;
4792 else
4793 perror_with_name ("interruptible_select");
4794 }
4795 }
4796 }
4797
4798 /* Save the thread's event and stop reason to process it later. */
4799
4800 static void
4801 save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
4802 {
4803 infrun_debug_printf ("saving status %s for %s",
4804 ws.to_string ().c_str (),
4805 tp->ptid.to_string ().c_str ());
4806
4807 /* Record for later. */
4808 tp->set_pending_waitstatus (ws);
4809
4810 if (ws.kind () == TARGET_WAITKIND_STOPPED
4811 && ws.sig () == GDB_SIGNAL_TRAP)
4812 {
4813 struct regcache *regcache = get_thread_regcache (tp);
4814 const address_space *aspace = regcache->aspace ();
4815 CORE_ADDR pc = regcache_read_pc (regcache);
4816
4817 adjust_pc_after_break (tp, tp->pending_waitstatus ());
4818
4819 scoped_restore_current_thread restore_thread;
4820 switch_to_thread (tp);
4821
4822 if (target_stopped_by_watchpoint ())
4823 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
4824 else if (target_supports_stopped_by_sw_breakpoint ()
4825 && target_stopped_by_sw_breakpoint ())
4826 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
4827 else if (target_supports_stopped_by_hw_breakpoint ()
4828 && target_stopped_by_hw_breakpoint ())
4829 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
4830 else if (!target_supports_stopped_by_hw_breakpoint ()
4831 && hardware_breakpoint_inserted_here_p (aspace, pc))
4832 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
4833 else if (!target_supports_stopped_by_sw_breakpoint ()
4834 && software_breakpoint_inserted_here_p (aspace, pc))
4835 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
4836 else if (!thread_has_single_step_breakpoints_set (tp)
4837 && currently_stepping (tp))
4838 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
4839 }
4840 }
4841
4842 /* Mark the non-executing threads accordingly. In all-stop, all
4843 threads of all processes are stopped when we get any event
4844 reported. In non-stop mode, only the event thread stops. */
4845
4846 static void
4847 mark_non_executing_threads (process_stratum_target *target,
4848 ptid_t event_ptid,
4849 const target_waitstatus &ws)
4850 {
4851 ptid_t mark_ptid;
4852
4853 if (!target_is_non_stop_p ())
4854 mark_ptid = minus_one_ptid;
4855 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
4856 || ws.kind () == TARGET_WAITKIND_EXITED)
4857 {
4858 /* If we're handling a process exit in non-stop mode, even
4859 though threads haven't been deleted yet, one would think
4860 that there is nothing to do, as threads of the dead process
4861 will be soon deleted, and threads of any other process were
4862 left running. However, on some targets, threads survive a
4863 process exit event. E.g., for the "checkpoint" command,
4864 when the current checkpoint/fork exits, linux-fork.c
4865 automatically switches to another fork from within
4866 target_mourn_inferior, by associating the same
4867 inferior/thread to another fork. We haven't mourned yet at
4868 this point, but we must mark any threads left in the
4869 process as not-executing so that finish_thread_state marks
4870 them stopped (in the user's perspective) if/when we present
4871 the stop to the user. */
4872 mark_ptid = ptid_t (event_ptid.pid ());
4873 }
4874 else
4875 mark_ptid = event_ptid;
4876
4877 set_executing (target, mark_ptid, false);
4878
4879 /* Likewise the resumed flag. */
4880 set_resumed (target, mark_ptid, false);
4881 }
4882
4883 /* Handle one event after stopping threads. If the eventing thread
4884 reports back any interesting event, we leave it pending. If the
4885 eventing thread was in the middle of a displaced step, we
4886 cancel/finish it, and unless the thread's inferior is being
4887 detached, put the thread back in the step-over chain. Returns true
4888 if there are no resumed threads left in the target (thus there's no
4889 point in waiting further), false otherwise. */
4890
4891 static bool
4892 handle_one (const wait_one_event &event)
4893 {
4894 infrun_debug_printf
4895 ("%s %s", event.ws.to_string ().c_str (),
4896 event.ptid.to_string ().c_str ());
4897
4898 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
4899 {
4900 /* All resumed threads exited. */
4901 return true;
4902 }
4903 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
4904 || event.ws.kind () == TARGET_WAITKIND_EXITED
4905 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
4906 {
4907 /* One thread/process exited/signalled. */
4908
4909 thread_info *t = nullptr;
4910
4911 /* The target may have reported just a pid. If so, try
4912 the first non-exited thread. */
4913 if (event.ptid.is_pid ())
4914 {
4915 int pid = event.ptid.pid ();
4916 inferior *inf = find_inferior_pid (event.target, pid);
4917 for (thread_info *tp : inf->non_exited_threads ())
4918 {
4919 t = tp;
4920 break;
4921 }
4922
4923 /* If there is no available thread, the event would
4924 have to be appended to a per-inferior event list,
4925 which does not exist (and if it did, we'd have
4926 to adjust run control command to be able to
4927 resume such an inferior). We assert here instead
4928 of going into an infinite loop. */
4929 gdb_assert (t != nullptr);
4930
4931 infrun_debug_printf
4932 ("using %s", t->ptid.to_string ().c_str ());
4933 }
4934 else
4935 {
4936 t = find_thread_ptid (event.target, event.ptid);
4937 /* Check if this is the first time we see this thread.
4938 Don't bother adding if it individually exited. */
4939 if (t == nullptr
4940 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
4941 t = add_thread (event.target, event.ptid);
4942 }
4943
4944 if (t != nullptr)
4945 {
4946 /* Set the threads as non-executing to avoid
4947 another stop attempt on them. */
4948 switch_to_thread_no_regs (t);
4949 mark_non_executing_threads (event.target, event.ptid,
4950 event.ws);
4951 save_waitstatus (t, event.ws);
4952 t->stop_requested = false;
4953 }
4954 }
4955 else
4956 {
4957 thread_info *t = find_thread_ptid (event.target, event.ptid);
4958 if (t == NULL)
4959 t = add_thread (event.target, event.ptid);
4960
4961 t->stop_requested = 0;
4962 t->set_executing (false);
4963 t->set_resumed (false);
4964 t->control.may_range_step = 0;
4965
4966 /* This may be the first time we see the inferior report
4967 a stop. */
4968 if (t->inf->needs_setup)
4969 {
4970 switch_to_thread_no_regs (t);
4971 setup_inferior (0);
4972 }
4973
4974 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
4975 && event.ws.sig () == GDB_SIGNAL_0)
4976 {
4977 /* We caught the event that we intended to catch, so
4978 there's no event to save as pending. */
4979
4980 if (displaced_step_finish (t, GDB_SIGNAL_0)
4981 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
4982 {
4983 /* Add it back to the step-over queue. */
4984 infrun_debug_printf
4985 ("displaced-step of %s canceled",
4986 t->ptid.to_string ().c_str ());
4987
4988 t->control.trap_expected = 0;
4989 if (!t->inf->detaching)
4990 global_thread_step_over_chain_enqueue (t);
4991 }
4992 }
4993 else
4994 {
4995 enum gdb_signal sig;
4996 struct regcache *regcache;
4997
4998 infrun_debug_printf
4999 ("target_wait %s, saving status for %s",
5000 event.ws.to_string ().c_str (),
5001 t->ptid.to_string ().c_str ());
5002
5003 /* Record for later. */
5004 save_waitstatus (t, event.ws);
5005
5006 sig = (event.ws.kind () == TARGET_WAITKIND_STOPPED
5007 ? event.ws.sig () : GDB_SIGNAL_0);
5008
5009 if (displaced_step_finish (t, sig)
5010 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5011 {
5012 /* Add it back to the step-over queue. */
5013 t->control.trap_expected = 0;
5014 if (!t->inf->detaching)
5015 global_thread_step_over_chain_enqueue (t);
5016 }
5017
5018 regcache = get_thread_regcache (t);
5019 t->set_stop_pc (regcache_read_pc (regcache));
5020
5021 infrun_debug_printf ("saved stop_pc=%s for %s "
5022 "(currently_stepping=%d)",
5023 paddress (target_gdbarch (), t->stop_pc ()),
5024 t->ptid.to_string ().c_str (),
5025 currently_stepping (t));
5026 }
5027 }
5028
5029 return false;
5030 }
5031
5032 /* See infrun.h. */
5033
5034 void
5035 stop_all_threads (const char *reason, inferior *inf)
5036 {
5037 /* We may need multiple passes to discover all threads. */
5038 int pass;
5039 int iterations = 0;
5040
5041 gdb_assert (exists_non_stop_target ());
5042
5043 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5044 inf != nullptr ? inf->num : -1);
5045
5046 scoped_restore_current_thread restore_thread;
5047
5048 /* Enable thread events on relevant targets. */
5049 for (auto *target : all_non_exited_process_targets ())
5050 {
5051 if (inf != nullptr && inf->process_target () != target)
5052 continue;
5053
5054 switch_to_target_no_thread (target);
5055 target_thread_events (true);
5056 }
5057
5058 SCOPE_EXIT
5059 {
5060 /* Disable thread events on relevant targets. */
5061 for (auto *target : all_non_exited_process_targets ())
5062 {
5063 if (inf != nullptr && inf->process_target () != target)
5064 continue;
5065
5066 switch_to_target_no_thread (target);
5067 target_thread_events (false);
5068 }
5069
5070 /* Use debug_prefixed_printf directly to get a meaningful function
5071 name. */
5072 if (debug_infrun)
5073 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5074 };
5075
5076 /* Request threads to stop, and then wait for the stops. Because
5077 threads we already know about can spawn more threads while we're
5078 trying to stop them, and we only learn about new threads when we
5079 update the thread list, do this in a loop, and keep iterating
5080 until two passes find no threads that need to be stopped. */
5081 for (pass = 0; pass < 2; pass++, iterations++)
5082 {
5083 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5084 while (1)
5085 {
5086 int waits_needed = 0;
5087
5088 for (auto *target : all_non_exited_process_targets ())
5089 {
5090 if (inf != nullptr && inf->process_target () != target)
5091 continue;
5092
5093 switch_to_target_no_thread (target);
5094 update_thread_list ();
5095 }
5096
5097 /* Go through all threads looking for threads that we need
5098 to tell the target to stop. */
5099 for (thread_info *t : all_non_exited_threads ())
5100 {
5101 if (inf != nullptr && t->inf != inf)
5102 continue;
5103
5104 /* For a single-target setting with an all-stop target,
5105 we would not even arrive here. For a multi-target
5106 setting, until GDB is able to handle a mixture of
5107 all-stop and non-stop targets, simply skip all-stop
5108 targets' threads. This should be fine due to the
5109 protection of 'check_multi_target_resumption'. */
5110
5111 switch_to_thread_no_regs (t);
5112 if (!target_is_non_stop_p ())
5113 continue;
5114
5115 if (t->executing ())
5116 {
5117 /* If already stopping, don't request a stop again.
5118 We just haven't seen the notification yet. */
5119 if (!t->stop_requested)
5120 {
5121 infrun_debug_printf (" %s executing, need stop",
5122 t->ptid.to_string ().c_str ());
5123 target_stop (t->ptid);
5124 t->stop_requested = 1;
5125 }
5126 else
5127 {
5128 infrun_debug_printf (" %s executing, already stopping",
5129 t->ptid.to_string ().c_str ());
5130 }
5131
5132 if (t->stop_requested)
5133 waits_needed++;
5134 }
5135 else
5136 {
5137 infrun_debug_printf (" %s not executing",
5138 t->ptid.to_string ().c_str ());
5139
5140 /* The thread may be not executing, but still be
5141 resumed with a pending status to process. */
5142 t->set_resumed (false);
5143 }
5144 }
5145
5146 if (waits_needed == 0)
5147 break;
5148
5149 /* If we find new threads on the second iteration, restart
5150 over. We want to see two iterations in a row with all
5151 threads stopped. */
5152 if (pass > 0)
5153 pass = -1;
5154
5155 for (int i = 0; i < waits_needed; i++)
5156 {
5157 wait_one_event event = wait_one ();
5158 if (handle_one (event))
5159 break;
5160 }
5161 }
5162 }
5163 }
5164
5165 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5166
5167 static bool
5168 handle_no_resumed (struct execution_control_state *ecs)
5169 {
5170 if (target_can_async_p ())
5171 {
5172 bool any_sync = false;
5173
5174 for (ui *ui : all_uis ())
5175 {
5176 if (ui->prompt_state == PROMPT_BLOCKED)
5177 {
5178 any_sync = true;
5179 break;
5180 }
5181 }
5182 if (!any_sync)
5183 {
5184 /* There were no unwaited-for children left in the target, but,
5185 we're not synchronously waiting for events either. Just
5186 ignore. */
5187
5188 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5189 prepare_to_wait (ecs);
5190 return true;
5191 }
5192 }
5193
5194 /* Otherwise, if we were running a synchronous execution command, we
5195 may need to cancel it and give the user back the terminal.
5196
5197 In non-stop mode, the target can't tell whether we've already
5198 consumed previous stop events, so it can end up sending us a
5199 no-resumed event like so:
5200
5201 #0 - thread 1 is left stopped
5202
5203 #1 - thread 2 is resumed and hits breakpoint
5204 -> TARGET_WAITKIND_STOPPED
5205
5206 #2 - thread 3 is resumed and exits
5207 this is the last resumed thread, so
5208 -> TARGET_WAITKIND_NO_RESUMED
5209
5210 #3 - gdb processes stop for thread 2 and decides to re-resume
5211 it.
5212
5213 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5214 thread 2 is now resumed, so the event should be ignored.
5215
5216 IOW, if the stop for thread 2 doesn't end a foreground command,
5217 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5218 event. But it could be that the event meant that thread 2 itself
5219 (or whatever other thread was the last resumed thread) exited.
5220
5221 To address this we refresh the thread list and check whether we
5222 have resumed threads _now_. In the example above, this removes
5223 thread 3 from the thread list. If thread 2 was re-resumed, we
5224 ignore this event. If we find no thread resumed, then we cancel
5225 the synchronous command and show "no unwaited-for " to the
5226 user. */
5227
5228 inferior *curr_inf = current_inferior ();
5229
5230 scoped_restore_current_thread restore_thread;
5231
5232 for (auto *target : all_non_exited_process_targets ())
5233 {
5234 switch_to_target_no_thread (target);
5235 update_thread_list ();
5236 }
5237
5238 /* If:
5239
5240 - the current target has no thread executing, and
5241 - the current inferior is native, and
5242 - the current inferior is the one which has the terminal, and
5243 - we did nothing,
5244
5245 then a Ctrl-C from this point on would remain stuck in the
5246 kernel, until a thread resumes and dequeues it. That would
5247 result in the GDB CLI not reacting to Ctrl-C, not able to
5248 interrupt the program. To address this, if the current inferior
5249 no longer has any thread executing, we give the terminal to some
5250 other inferior that has at least one thread executing. */
5251 bool swap_terminal = true;
5252
5253 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5254 whether to report it to the user. */
5255 bool ignore_event = false;
5256
5257 for (thread_info *thread : all_non_exited_threads ())
5258 {
5259 if (swap_terminal && thread->executing ())
5260 {
5261 if (thread->inf != curr_inf)
5262 {
5263 target_terminal::ours ();
5264
5265 switch_to_thread (thread);
5266 target_terminal::inferior ();
5267 }
5268 swap_terminal = false;
5269 }
5270
5271 if (!ignore_event && thread->resumed ())
5272 {
5273 /* Either there were no unwaited-for children left in the
5274 target at some point, but there are now, or some target
5275 other than the eventing one has unwaited-for children
5276 left. Just ignore. */
5277 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5278 "(ignoring: found resumed)");
5279
5280 ignore_event = true;
5281 }
5282
5283 if (ignore_event && !swap_terminal)
5284 break;
5285 }
5286
5287 if (ignore_event)
5288 {
5289 switch_to_inferior_no_thread (curr_inf);
5290 prepare_to_wait (ecs);
5291 return true;
5292 }
5293
5294 /* Go ahead and report the event. */
5295 return false;
5296 }
5297
5298 /* Given an execution control state that has been freshly filled in by
5299 an event from the inferior, figure out what it means and take
5300 appropriate action.
5301
5302 The alternatives are:
5303
5304 1) stop_waiting and return; to really stop and return to the
5305 debugger.
5306
5307 2) keep_going and return; to wait for the next event (set
5308 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5309 once). */
5310
5311 static void
5312 handle_inferior_event (struct execution_control_state *ecs)
5313 {
5314 /* Make sure that all temporary struct value objects that were
5315 created during the handling of the event get deleted at the
5316 end. */
5317 scoped_value_mark free_values;
5318
5319 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
5320
5321 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
5322 {
5323 /* We had an event in the inferior, but we are not interested in
5324 handling it at this level. The lower layers have already
5325 done what needs to be done, if anything.
5326
5327 One of the possible circumstances for this is when the
5328 inferior produces output for the console. The inferior has
5329 not stopped, and we are ignoring the event. Another possible
5330 circumstance is any event which the lower level knows will be
5331 reported multiple times without an intervening resume. */
5332 prepare_to_wait (ecs);
5333 return;
5334 }
5335
5336 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5337 {
5338 prepare_to_wait (ecs);
5339 return;
5340 }
5341
5342 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
5343 && handle_no_resumed (ecs))
5344 return;
5345
5346 /* Cache the last target/ptid/waitstatus. */
5347 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5348
5349 /* Always clear state belonging to the previous time we stopped. */
5350 stop_stack_dummy = STOP_NONE;
5351
5352 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5353 {
5354 /* No unwaited-for children left. IOW, all resumed children
5355 have exited. */
5356 stop_print_frame = false;
5357 stop_waiting (ecs);
5358 return;
5359 }
5360
5361 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
5362 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
5363 {
5364 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
5365 /* If it's a new thread, add it to the thread database. */
5366 if (ecs->event_thread == NULL)
5367 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
5368
5369 /* Disable range stepping. If the next step request could use a
5370 range, this will be end up re-enabled then. */
5371 ecs->event_thread->control.may_range_step = 0;
5372 }
5373
5374 /* Dependent on valid ECS->EVENT_THREAD. */
5375 adjust_pc_after_break (ecs->event_thread, ecs->ws);
5376
5377 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5378 reinit_frame_cache ();
5379
5380 breakpoint_retire_moribund ();
5381
5382 /* First, distinguish signals caused by the debugger from signals
5383 that have to do with the program's own actions. Note that
5384 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5385 on the operating system version. Here we detect when a SIGILL or
5386 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5387 something similar for SIGSEGV, since a SIGSEGV will be generated
5388 when we're trying to execute a breakpoint instruction on a
5389 non-executable stack. This happens for call dummy breakpoints
5390 for architectures like SPARC that place call dummies on the
5391 stack. */
5392 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
5393 && (ecs->ws.sig () == GDB_SIGNAL_ILL
5394 || ecs->ws.sig () == GDB_SIGNAL_SEGV
5395 || ecs->ws.sig () == GDB_SIGNAL_EMT))
5396 {
5397 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5398
5399 if (breakpoint_inserted_here_p (regcache->aspace (),
5400 regcache_read_pc (regcache)))
5401 {
5402 infrun_debug_printf ("Treating signal as SIGTRAP");
5403 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
5404 }
5405 }
5406
5407 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
5408
5409 switch (ecs->ws.kind ())
5410 {
5411 case TARGET_WAITKIND_LOADED:
5412 {
5413 context_switch (ecs);
5414 /* Ignore gracefully during startup of the inferior, as it might
5415 be the shell which has just loaded some objects, otherwise
5416 add the symbols for the newly loaded objects. Also ignore at
5417 the beginning of an attach or remote session; we will query
5418 the full list of libraries once the connection is
5419 established. */
5420
5421 stop_kind stop_soon = get_inferior_stop_soon (ecs);
5422 if (stop_soon == NO_STOP_QUIETLY)
5423 {
5424 struct regcache *regcache;
5425
5426 regcache = get_thread_regcache (ecs->event_thread);
5427
5428 handle_solib_event ();
5429
5430 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5431 ecs->event_thread->control.stop_bpstat
5432 = bpstat_stop_status_nowatch (regcache->aspace (),
5433 ecs->event_thread->stop_pc (),
5434 ecs->event_thread, ecs->ws);
5435
5436 if (handle_stop_requested (ecs))
5437 return;
5438
5439 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5440 {
5441 /* A catchpoint triggered. */
5442 process_event_stop_test (ecs);
5443 return;
5444 }
5445
5446 /* If requested, stop when the dynamic linker notifies
5447 gdb of events. This allows the user to get control
5448 and place breakpoints in initializer routines for
5449 dynamically loaded objects (among other things). */
5450 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5451 if (stop_on_solib_events)
5452 {
5453 /* Make sure we print "Stopped due to solib-event" in
5454 normal_stop. */
5455 stop_print_frame = true;
5456
5457 stop_waiting (ecs);
5458 return;
5459 }
5460 }
5461
5462 /* If we are skipping through a shell, or through shared library
5463 loading that we aren't interested in, resume the program. If
5464 we're running the program normally, also resume. */
5465 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5466 {
5467 /* Loading of shared libraries might have changed breakpoint
5468 addresses. Make sure new breakpoints are inserted. */
5469 if (stop_soon == NO_STOP_QUIETLY)
5470 insert_breakpoints ();
5471 resume (GDB_SIGNAL_0);
5472 prepare_to_wait (ecs);
5473 return;
5474 }
5475
5476 /* But stop if we're attaching or setting up a remote
5477 connection. */
5478 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5479 || stop_soon == STOP_QUIETLY_REMOTE)
5480 {
5481 infrun_debug_printf ("quietly stopped");
5482 stop_waiting (ecs);
5483 return;
5484 }
5485
5486 internal_error (__FILE__, __LINE__,
5487 _("unhandled stop_soon: %d"), (int) stop_soon);
5488 }
5489
5490 case TARGET_WAITKIND_SPURIOUS:
5491 if (handle_stop_requested (ecs))
5492 return;
5493 context_switch (ecs);
5494 resume (GDB_SIGNAL_0);
5495 prepare_to_wait (ecs);
5496 return;
5497
5498 case TARGET_WAITKIND_THREAD_CREATED:
5499 if (handle_stop_requested (ecs))
5500 return;
5501 context_switch (ecs);
5502 if (!switch_back_to_stepped_thread (ecs))
5503 keep_going (ecs);
5504 return;
5505
5506 case TARGET_WAITKIND_EXITED:
5507 case TARGET_WAITKIND_SIGNALLED:
5508 {
5509 /* Depending on the system, ecs->ptid may point to a thread or
5510 to a process. On some targets, target_mourn_inferior may
5511 need to have access to the just-exited thread. That is the
5512 case of GNU/Linux's "checkpoint" support, for example.
5513 Call the switch_to_xxx routine as appropriate. */
5514 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5515 if (thr != nullptr)
5516 switch_to_thread (thr);
5517 else
5518 {
5519 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5520 switch_to_inferior_no_thread (inf);
5521 }
5522 }
5523 handle_vfork_child_exec_or_exit (0);
5524 target_terminal::ours (); /* Must do this before mourn anyway. */
5525
5526 /* Clearing any previous state of convenience variables. */
5527 clear_exit_convenience_vars ();
5528
5529 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
5530 {
5531 /* Record the exit code in the convenience variable $_exitcode, so
5532 that the user can inspect this again later. */
5533 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5534 (LONGEST) ecs->ws.exit_status ());
5535
5536 /* Also record this in the inferior itself. */
5537 current_inferior ()->has_exit_code = 1;
5538 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
5539
5540 /* Support the --return-child-result option. */
5541 return_child_result_value = ecs->ws.exit_status ();
5542
5543 gdb::observers::exited.notify (ecs->ws.exit_status ());
5544 }
5545 else
5546 {
5547 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
5548
5549 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5550 {
5551 /* Set the value of the internal variable $_exitsignal,
5552 which holds the signal uncaught by the inferior. */
5553 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5554 gdbarch_gdb_signal_to_target (gdbarch,
5555 ecs->ws.sig ()));
5556 }
5557 else
5558 {
5559 /* We don't have access to the target's method used for
5560 converting between signal numbers (GDB's internal
5561 representation <-> target's representation).
5562 Therefore, we cannot do a good job at displaying this
5563 information to the user. It's better to just warn
5564 her about it (if infrun debugging is enabled), and
5565 give up. */
5566 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5567 "signal number.");
5568 }
5569
5570 gdb::observers::signal_exited.notify (ecs->ws.sig ());
5571 }
5572
5573 gdb_flush (gdb_stdout);
5574 target_mourn_inferior (inferior_ptid);
5575 stop_print_frame = false;
5576 stop_waiting (ecs);
5577 return;
5578
5579 case TARGET_WAITKIND_FORKED:
5580 case TARGET_WAITKIND_VFORKED:
5581 /* Check whether the inferior is displaced stepping. */
5582 {
5583 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5584 struct gdbarch *gdbarch = regcache->arch ();
5585 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
5586
5587 /* If this is a fork (child gets its own address space copy)
5588 and some displaced step buffers were in use at the time of
5589 the fork, restore the displaced step buffer bytes in the
5590 child process.
5591
5592 Architectures which support displaced stepping and fork
5593 events must supply an implementation of
5594 gdbarch_displaced_step_restore_all_in_ptid. This is not
5595 enforced during gdbarch validation to support architectures
5596 which support displaced stepping but not forks. */
5597 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED
5598 && gdbarch_supports_displaced_stepping (gdbarch))
5599 gdbarch_displaced_step_restore_all_in_ptid
5600 (gdbarch, parent_inf, ecs->ws.child_ptid ());
5601
5602 /* If displaced stepping is supported, and thread ecs->ptid is
5603 displaced stepping. */
5604 if (displaced_step_in_progress_thread (ecs->event_thread))
5605 {
5606 struct regcache *child_regcache;
5607 CORE_ADDR parent_pc;
5608
5609 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5610 indicating that the displaced stepping of syscall instruction
5611 has been done. Perform cleanup for parent process here. Note
5612 that this operation also cleans up the child process for vfork,
5613 because their pages are shared. */
5614 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
5615 /* Start a new step-over in another thread if there's one
5616 that needs it. */
5617 start_step_over ();
5618
5619 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5620 the child's PC is also within the scratchpad. Set the child's PC
5621 to the parent's PC value, which has already been fixed up.
5622 FIXME: we use the parent's aspace here, although we're touching
5623 the child, because the child hasn't been added to the inferior
5624 list yet at this point. */
5625
5626 child_regcache
5627 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5628 ecs->ws.child_ptid (),
5629 gdbarch,
5630 parent_inf->aspace);
5631 /* Read PC value of parent process. */
5632 parent_pc = regcache_read_pc (regcache);
5633
5634 displaced_debug_printf ("write child pc from %s to %s",
5635 paddress (gdbarch,
5636 regcache_read_pc (child_regcache)),
5637 paddress (gdbarch, parent_pc));
5638
5639 regcache_write_pc (child_regcache, parent_pc);
5640 }
5641 }
5642
5643 context_switch (ecs);
5644
5645 /* Immediately detach breakpoints from the child before there's
5646 any chance of letting the user delete breakpoints from the
5647 breakpoint lists. If we don't do this early, it's easy to
5648 leave left over traps in the child, vis: "break foo; catch
5649 fork; c; <fork>; del; c; <child calls foo>". We only follow
5650 the fork on the last `continue', and by that time the
5651 breakpoint at "foo" is long gone from the breakpoint table.
5652 If we vforked, then we don't need to unpatch here, since both
5653 parent and child are sharing the same memory pages; we'll
5654 need to unpatch at follow/detach time instead to be certain
5655 that new breakpoints added between catchpoint hit time and
5656 vfork follow are detached. */
5657 if (ecs->ws.kind () != TARGET_WAITKIND_VFORKED)
5658 {
5659 /* This won't actually modify the breakpoint list, but will
5660 physically remove the breakpoints from the child. */
5661 detach_breakpoints (ecs->ws.child_ptid ());
5662 }
5663
5664 delete_just_stopped_threads_single_step_breakpoints ();
5665
5666 /* In case the event is caught by a catchpoint, remember that
5667 the event is to be followed at the next resume of the thread,
5668 and not immediately. */
5669 ecs->event_thread->pending_follow = ecs->ws;
5670
5671 ecs->event_thread->set_stop_pc
5672 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
5673
5674 ecs->event_thread->control.stop_bpstat
5675 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
5676 ecs->event_thread->stop_pc (),
5677 ecs->event_thread, ecs->ws);
5678
5679 if (handle_stop_requested (ecs))
5680 return;
5681
5682 /* If no catchpoint triggered for this, then keep going. Note
5683 that we're interested in knowing the bpstat actually causes a
5684 stop, not just if it may explain the signal. Software
5685 watchpoints, for example, always appear in the bpstat. */
5686 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5687 {
5688 bool follow_child
5689 = (follow_fork_mode_string == follow_fork_mode_child);
5690
5691 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5692
5693 process_stratum_target *targ
5694 = ecs->event_thread->inf->process_target ();
5695
5696 bool should_resume = follow_fork ();
5697
5698 /* Note that one of these may be an invalid pointer,
5699 depending on detach_fork. */
5700 thread_info *parent = ecs->event_thread;
5701 thread_info *child = find_thread_ptid (targ, ecs->ws.child_ptid ());
5702
5703 /* At this point, the parent is marked running, and the
5704 child is marked stopped. */
5705
5706 /* If not resuming the parent, mark it stopped. */
5707 if (follow_child && !detach_fork && !non_stop && !sched_multi)
5708 parent->set_running (false);
5709
5710 /* If resuming the child, mark it running. */
5711 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
5712 child->set_running (true);
5713
5714 /* In non-stop mode, also resume the other branch. */
5715 if (!detach_fork && (non_stop
5716 || (sched_multi && target_is_non_stop_p ())))
5717 {
5718 if (follow_child)
5719 switch_to_thread (parent);
5720 else
5721 switch_to_thread (child);
5722
5723 ecs->event_thread = inferior_thread ();
5724 ecs->ptid = inferior_ptid;
5725 keep_going (ecs);
5726 }
5727
5728 if (follow_child)
5729 switch_to_thread (child);
5730 else
5731 switch_to_thread (parent);
5732
5733 ecs->event_thread = inferior_thread ();
5734 ecs->ptid = inferior_ptid;
5735
5736 if (should_resume)
5737 {
5738 /* Never call switch_back_to_stepped_thread if we are waiting for
5739 vfork-done (waiting for an external vfork child to exec or
5740 exit). We will resume only the vforking thread for the purpose
5741 of collecting the vfork-done event, and we will restart any
5742 step once the critical shared address space window is done. */
5743 if ((!follow_child
5744 && detach_fork
5745 && parent->inf->thread_waiting_for_vfork_done != nullptr)
5746 || !switch_back_to_stepped_thread (ecs))
5747 keep_going (ecs);
5748 }
5749 else
5750 stop_waiting (ecs);
5751 return;
5752 }
5753 process_event_stop_test (ecs);
5754 return;
5755
5756 case TARGET_WAITKIND_VFORK_DONE:
5757 /* Done with the shared memory region. Re-insert breakpoints in
5758 the parent, and keep going. */
5759
5760 context_switch (ecs);
5761
5762 handle_vfork_done (ecs->event_thread);
5763 gdb_assert (inferior_thread () == ecs->event_thread);
5764
5765 if (handle_stop_requested (ecs))
5766 return;
5767
5768 if (!switch_back_to_stepped_thread (ecs))
5769 {
5770 gdb_assert (inferior_thread () == ecs->event_thread);
5771 /* This also takes care of reinserting breakpoints in the
5772 previously locked inferior. */
5773 keep_going (ecs);
5774 }
5775 return;
5776
5777 case TARGET_WAITKIND_EXECD:
5778
5779 /* Note we can't read registers yet (the stop_pc), because we
5780 don't yet know the inferior's post-exec architecture.
5781 'stop_pc' is explicitly read below instead. */
5782 switch_to_thread_no_regs (ecs->event_thread);
5783
5784 /* Do whatever is necessary to the parent branch of the vfork. */
5785 handle_vfork_child_exec_or_exit (1);
5786
5787 /* This causes the eventpoints and symbol table to be reset.
5788 Must do this now, before trying to determine whether to
5789 stop. */
5790 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
5791
5792 /* In follow_exec we may have deleted the original thread and
5793 created a new one. Make sure that the event thread is the
5794 execd thread for that case (this is a nop otherwise). */
5795 ecs->event_thread = inferior_thread ();
5796
5797 ecs->event_thread->set_stop_pc
5798 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
5799
5800 ecs->event_thread->control.stop_bpstat
5801 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
5802 ecs->event_thread->stop_pc (),
5803 ecs->event_thread, ecs->ws);
5804
5805 if (handle_stop_requested (ecs))
5806 return;
5807
5808 /* If no catchpoint triggered for this, then keep going. */
5809 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5810 {
5811 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5812 keep_going (ecs);
5813 return;
5814 }
5815 process_event_stop_test (ecs);
5816 return;
5817
5818 /* Be careful not to try to gather much state about a thread
5819 that's in a syscall. It's frequently a losing proposition. */
5820 case TARGET_WAITKIND_SYSCALL_ENTRY:
5821 /* Getting the current syscall number. */
5822 if (handle_syscall_event (ecs) == 0)
5823 process_event_stop_test (ecs);
5824 return;
5825
5826 /* Before examining the threads further, step this thread to
5827 get it entirely out of the syscall. (We get notice of the
5828 event when the thread is just on the verge of exiting a
5829 syscall. Stepping one instruction seems to get it back
5830 into user code.) */
5831 case TARGET_WAITKIND_SYSCALL_RETURN:
5832 if (handle_syscall_event (ecs) == 0)
5833 process_event_stop_test (ecs);
5834 return;
5835
5836 case TARGET_WAITKIND_STOPPED:
5837 handle_signal_stop (ecs);
5838 return;
5839
5840 case TARGET_WAITKIND_NO_HISTORY:
5841 /* Reverse execution: target ran out of history info. */
5842
5843 /* Switch to the stopped thread. */
5844 context_switch (ecs);
5845 infrun_debug_printf ("stopped");
5846
5847 delete_just_stopped_threads_single_step_breakpoints ();
5848 ecs->event_thread->set_stop_pc
5849 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
5850
5851 if (handle_stop_requested (ecs))
5852 return;
5853
5854 gdb::observers::no_history.notify ();
5855 stop_waiting (ecs);
5856 return;
5857 }
5858 }
5859
5860 /* Restart threads back to what they were trying to do back when we
5861 paused them (because of an in-line step-over or vfork, for example).
5862 The EVENT_THREAD thread is ignored (not restarted).
5863
5864 If INF is non-nullptr, only resume threads from INF. */
5865
5866 static void
5867 restart_threads (struct thread_info *event_thread, inferior *inf)
5868 {
5869 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
5870 event_thread->ptid.to_string ().c_str (),
5871 inf != nullptr ? inf->num : -1);
5872
5873 /* In case the instruction just stepped spawned a new thread. */
5874 update_thread_list ();
5875
5876 for (thread_info *tp : all_non_exited_threads ())
5877 {
5878 if (inf != nullptr && tp->inf != inf)
5879 continue;
5880
5881 if (tp->inf->detaching)
5882 {
5883 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5884 tp->ptid.to_string ().c_str ());
5885 continue;
5886 }
5887
5888 switch_to_thread_no_regs (tp);
5889
5890 if (tp == event_thread)
5891 {
5892 infrun_debug_printf ("restart threads: [%s] is event thread",
5893 tp->ptid.to_string ().c_str ());
5894 continue;
5895 }
5896
5897 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5898 {
5899 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5900 tp->ptid.to_string ().c_str ());
5901 continue;
5902 }
5903
5904 if (tp->resumed ())
5905 {
5906 infrun_debug_printf ("restart threads: [%s] resumed",
5907 tp->ptid.to_string ().c_str ());
5908 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
5909 continue;
5910 }
5911
5912 if (thread_is_in_step_over_chain (tp))
5913 {
5914 infrun_debug_printf ("restart threads: [%s] needs step-over",
5915 tp->ptid.to_string ().c_str ());
5916 gdb_assert (!tp->resumed ());
5917 continue;
5918 }
5919
5920
5921 if (tp->has_pending_waitstatus ())
5922 {
5923 infrun_debug_printf ("restart threads: [%s] has pending status",
5924 tp->ptid.to_string ().c_str ());
5925 tp->set_resumed (true);
5926 continue;
5927 }
5928
5929 gdb_assert (!tp->stop_requested);
5930
5931 /* If some thread needs to start a step-over at this point, it
5932 should still be in the step-over queue, and thus skipped
5933 above. */
5934 if (thread_still_needs_step_over (tp))
5935 {
5936 internal_error (__FILE__, __LINE__,
5937 "thread [%s] needs a step-over, but not in "
5938 "step-over queue\n",
5939 tp->ptid.to_string ().c_str ());
5940 }
5941
5942 if (currently_stepping (tp))
5943 {
5944 infrun_debug_printf ("restart threads: [%s] was stepping",
5945 tp->ptid.to_string ().c_str ());
5946 keep_going_stepped_thread (tp);
5947 }
5948 else
5949 {
5950 struct execution_control_state ecss;
5951 struct execution_control_state *ecs = &ecss;
5952
5953 infrun_debug_printf ("restart threads: [%s] continuing",
5954 tp->ptid.to_string ().c_str ());
5955 reset_ecs (ecs, tp);
5956 switch_to_thread (tp);
5957 keep_going_pass_signal (ecs);
5958 }
5959 }
5960 }
5961
5962 /* Callback for iterate_over_threads. Find a resumed thread that has
5963 a pending waitstatus. */
5964
5965 static int
5966 resumed_thread_with_pending_status (struct thread_info *tp,
5967 void *arg)
5968 {
5969 return tp->resumed () && tp->has_pending_waitstatus ();
5970 }
5971
5972 /* Called when we get an event that may finish an in-line or
5973 out-of-line (displaced stepping) step-over started previously.
5974 Return true if the event is processed and we should go back to the
5975 event loop; false if the caller should continue processing the
5976 event. */
5977
5978 static int
5979 finish_step_over (struct execution_control_state *ecs)
5980 {
5981 displaced_step_finish (ecs->event_thread, ecs->event_thread->stop_signal ());
5982
5983 bool had_step_over_info = step_over_info_valid_p ();
5984
5985 if (had_step_over_info)
5986 {
5987 /* If we're stepping over a breakpoint with all threads locked,
5988 then only the thread that was stepped should be reporting
5989 back an event. */
5990 gdb_assert (ecs->event_thread->control.trap_expected);
5991
5992 clear_step_over_info ();
5993 }
5994
5995 if (!target_is_non_stop_p ())
5996 return 0;
5997
5998 /* Start a new step-over in another thread if there's one that
5999 needs it. */
6000 start_step_over ();
6001
6002 /* If we were stepping over a breakpoint before, and haven't started
6003 a new in-line step-over sequence, then restart all other threads
6004 (except the event thread). We can't do this in all-stop, as then
6005 e.g., we wouldn't be able to issue any other remote packet until
6006 these other threads stop. */
6007 if (had_step_over_info && !step_over_info_valid_p ())
6008 {
6009 struct thread_info *pending;
6010
6011 /* If we only have threads with pending statuses, the restart
6012 below won't restart any thread and so nothing re-inserts the
6013 breakpoint we just stepped over. But we need it inserted
6014 when we later process the pending events, otherwise if
6015 another thread has a pending event for this breakpoint too,
6016 we'd discard its event (because the breakpoint that
6017 originally caused the event was no longer inserted). */
6018 context_switch (ecs);
6019 insert_breakpoints ();
6020
6021 restart_threads (ecs->event_thread);
6022
6023 /* If we have events pending, go through handle_inferior_event
6024 again, picking up a pending event at random. This avoids
6025 thread starvation. */
6026
6027 /* But not if we just stepped over a watchpoint in order to let
6028 the instruction execute so we can evaluate its expression.
6029 The set of watchpoints that triggered is recorded in the
6030 breakpoint objects themselves (see bp->watchpoint_triggered).
6031 If we processed another event first, that other event could
6032 clobber this info. */
6033 if (ecs->event_thread->stepping_over_watchpoint)
6034 return 0;
6035
6036 pending = iterate_over_threads (resumed_thread_with_pending_status,
6037 NULL);
6038 if (pending != NULL)
6039 {
6040 struct thread_info *tp = ecs->event_thread;
6041 struct regcache *regcache;
6042
6043 infrun_debug_printf ("found resumed threads with "
6044 "pending events, saving status");
6045
6046 gdb_assert (pending != tp);
6047
6048 /* Record the event thread's event for later. */
6049 save_waitstatus (tp, ecs->ws);
6050 /* This was cleared early, by handle_inferior_event. Set it
6051 so this pending event is considered by
6052 do_target_wait. */
6053 tp->set_resumed (true);
6054
6055 gdb_assert (!tp->executing ());
6056
6057 regcache = get_thread_regcache (tp);
6058 tp->set_stop_pc (regcache_read_pc (regcache));
6059
6060 infrun_debug_printf ("saved stop_pc=%s for %s "
6061 "(currently_stepping=%d)",
6062 paddress (target_gdbarch (), tp->stop_pc ()),
6063 tp->ptid.to_string ().c_str (),
6064 currently_stepping (tp));
6065
6066 /* This in-line step-over finished; clear this so we won't
6067 start a new one. This is what handle_signal_stop would
6068 do, if we returned false. */
6069 tp->stepping_over_breakpoint = 0;
6070
6071 /* Wake up the event loop again. */
6072 mark_async_event_handler (infrun_async_inferior_event_token);
6073
6074 prepare_to_wait (ecs);
6075 return 1;
6076 }
6077 }
6078
6079 return 0;
6080 }
6081
6082 /* Come here when the program has stopped with a signal. */
6083
6084 static void
6085 handle_signal_stop (struct execution_control_state *ecs)
6086 {
6087 struct frame_info *frame;
6088 struct gdbarch *gdbarch;
6089 int stopped_by_watchpoint;
6090 enum stop_kind stop_soon;
6091 int random_signal;
6092
6093 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6094
6095 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6096
6097 /* Do we need to clean up the state of a thread that has
6098 completed a displaced single-step? (Doing so usually affects
6099 the PC, so do it here, before we set stop_pc.) */
6100 if (finish_step_over (ecs))
6101 return;
6102
6103 /* If we either finished a single-step or hit a breakpoint, but
6104 the user wanted this thread to be stopped, pretend we got a
6105 SIG0 (generic unsignaled stop). */
6106 if (ecs->event_thread->stop_requested
6107 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6108 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6109
6110 ecs->event_thread->set_stop_pc
6111 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6112
6113 context_switch (ecs);
6114
6115 if (deprecated_context_hook)
6116 deprecated_context_hook (ecs->event_thread->global_num);
6117
6118 if (debug_infrun)
6119 {
6120 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6121 struct gdbarch *reg_gdbarch = regcache->arch ();
6122
6123 infrun_debug_printf
6124 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6125 if (target_stopped_by_watchpoint ())
6126 {
6127 CORE_ADDR addr;
6128
6129 infrun_debug_printf ("stopped by watchpoint");
6130
6131 if (target_stopped_data_address (current_inferior ()->top_target (),
6132 &addr))
6133 infrun_debug_printf ("stopped data address=%s",
6134 paddress (reg_gdbarch, addr));
6135 else
6136 infrun_debug_printf ("(no data address available)");
6137 }
6138 }
6139
6140 /* This is originated from start_remote(), start_inferior() and
6141 shared libraries hook functions. */
6142 stop_soon = get_inferior_stop_soon (ecs);
6143 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6144 {
6145 infrun_debug_printf ("quietly stopped");
6146 stop_print_frame = true;
6147 stop_waiting (ecs);
6148 return;
6149 }
6150
6151 /* This originates from attach_command(). We need to overwrite
6152 the stop_signal here, because some kernels don't ignore a
6153 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6154 See more comments in inferior.h. On the other hand, if we
6155 get a non-SIGSTOP, report it to the user - assume the backend
6156 will handle the SIGSTOP if it should show up later.
6157
6158 Also consider that the attach is complete when we see a
6159 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6160 target extended-remote report it instead of a SIGSTOP
6161 (e.g. gdbserver). We already rely on SIGTRAP being our
6162 signal, so this is no exception.
6163
6164 Also consider that the attach is complete when we see a
6165 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6166 the target to stop all threads of the inferior, in case the
6167 low level attach operation doesn't stop them implicitly. If
6168 they weren't stopped implicitly, then the stub will report a
6169 GDB_SIGNAL_0, meaning: stopped for no particular reason
6170 other than GDB's request. */
6171 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6172 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6173 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6174 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6175 {
6176 stop_print_frame = true;
6177 stop_waiting (ecs);
6178 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6179 return;
6180 }
6181
6182 /* At this point, get hold of the now-current thread's frame. */
6183 frame = get_current_frame ();
6184 gdbarch = get_frame_arch (frame);
6185
6186 /* Pull the single step breakpoints out of the target. */
6187 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6188 {
6189 struct regcache *regcache;
6190 CORE_ADDR pc;
6191
6192 regcache = get_thread_regcache (ecs->event_thread);
6193 const address_space *aspace = regcache->aspace ();
6194
6195 pc = regcache_read_pc (regcache);
6196
6197 /* However, before doing so, if this single-step breakpoint was
6198 actually for another thread, set this thread up for moving
6199 past it. */
6200 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6201 aspace, pc))
6202 {
6203 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6204 {
6205 infrun_debug_printf ("[%s] hit another thread's single-step "
6206 "breakpoint",
6207 ecs->ptid.to_string ().c_str ());
6208 ecs->hit_singlestep_breakpoint = 1;
6209 }
6210 }
6211 else
6212 {
6213 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6214 ecs->ptid.to_string ().c_str ());
6215 }
6216 }
6217 delete_just_stopped_threads_single_step_breakpoints ();
6218
6219 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6220 && ecs->event_thread->control.trap_expected
6221 && ecs->event_thread->stepping_over_watchpoint)
6222 stopped_by_watchpoint = 0;
6223 else
6224 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6225
6226 /* If necessary, step over this watchpoint. We'll be back to display
6227 it in a moment. */
6228 if (stopped_by_watchpoint
6229 && (target_have_steppable_watchpoint ()
6230 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6231 {
6232 /* At this point, we are stopped at an instruction which has
6233 attempted to write to a piece of memory under control of
6234 a watchpoint. The instruction hasn't actually executed
6235 yet. If we were to evaluate the watchpoint expression
6236 now, we would get the old value, and therefore no change
6237 would seem to have occurred.
6238
6239 In order to make watchpoints work `right', we really need
6240 to complete the memory write, and then evaluate the
6241 watchpoint expression. We do this by single-stepping the
6242 target.
6243
6244 It may not be necessary to disable the watchpoint to step over
6245 it. For example, the PA can (with some kernel cooperation)
6246 single step over a watchpoint without disabling the watchpoint.
6247
6248 It is far more common to need to disable a watchpoint to step
6249 the inferior over it. If we have non-steppable watchpoints,
6250 we must disable the current watchpoint; it's simplest to
6251 disable all watchpoints.
6252
6253 Any breakpoint at PC must also be stepped over -- if there's
6254 one, it will have already triggered before the watchpoint
6255 triggered, and we either already reported it to the user, or
6256 it didn't cause a stop and we called keep_going. In either
6257 case, if there was a breakpoint at PC, we must be trying to
6258 step past it. */
6259 ecs->event_thread->stepping_over_watchpoint = 1;
6260 keep_going (ecs);
6261 return;
6262 }
6263
6264 ecs->event_thread->stepping_over_breakpoint = 0;
6265 ecs->event_thread->stepping_over_watchpoint = 0;
6266 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6267 ecs->event_thread->control.stop_step = 0;
6268 stop_print_frame = true;
6269 stopped_by_random_signal = 0;
6270 bpstat *stop_chain = nullptr;
6271
6272 /* Hide inlined functions starting here, unless we just performed stepi or
6273 nexti. After stepi and nexti, always show the innermost frame (not any
6274 inline function call sites). */
6275 if (ecs->event_thread->control.step_range_end != 1)
6276 {
6277 const address_space *aspace
6278 = get_thread_regcache (ecs->event_thread)->aspace ();
6279
6280 /* skip_inline_frames is expensive, so we avoid it if we can
6281 determine that the address is one where functions cannot have
6282 been inlined. This improves performance with inferiors that
6283 load a lot of shared libraries, because the solib event
6284 breakpoint is defined as the address of a function (i.e. not
6285 inline). Note that we have to check the previous PC as well
6286 as the current one to catch cases when we have just
6287 single-stepped off a breakpoint prior to reinstating it.
6288 Note that we're assuming that the code we single-step to is
6289 not inline, but that's not definitive: there's nothing
6290 preventing the event breakpoint function from containing
6291 inlined code, and the single-step ending up there. If the
6292 user had set a breakpoint on that inlined code, the missing
6293 skip_inline_frames call would break things. Fortunately
6294 that's an extremely unlikely scenario. */
6295 if (!pc_at_non_inline_function (aspace,
6296 ecs->event_thread->stop_pc (),
6297 ecs->ws)
6298 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6299 && ecs->event_thread->control.trap_expected
6300 && pc_at_non_inline_function (aspace,
6301 ecs->event_thread->prev_pc,
6302 ecs->ws)))
6303 {
6304 stop_chain = build_bpstat_chain (aspace,
6305 ecs->event_thread->stop_pc (),
6306 ecs->ws);
6307 skip_inline_frames (ecs->event_thread, stop_chain);
6308
6309 /* Re-fetch current thread's frame in case that invalidated
6310 the frame cache. */
6311 frame = get_current_frame ();
6312 gdbarch = get_frame_arch (frame);
6313 }
6314 }
6315
6316 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6317 && ecs->event_thread->control.trap_expected
6318 && gdbarch_single_step_through_delay_p (gdbarch)
6319 && currently_stepping (ecs->event_thread))
6320 {
6321 /* We're trying to step off a breakpoint. Turns out that we're
6322 also on an instruction that needs to be stepped multiple
6323 times before it's been fully executing. E.g., architectures
6324 with a delay slot. It needs to be stepped twice, once for
6325 the instruction and once for the delay slot. */
6326 int step_through_delay
6327 = gdbarch_single_step_through_delay (gdbarch, frame);
6328
6329 if (step_through_delay)
6330 infrun_debug_printf ("step through delay");
6331
6332 if (ecs->event_thread->control.step_range_end == 0
6333 && step_through_delay)
6334 {
6335 /* The user issued a continue when stopped at a breakpoint.
6336 Set up for another trap and get out of here. */
6337 ecs->event_thread->stepping_over_breakpoint = 1;
6338 keep_going (ecs);
6339 return;
6340 }
6341 else if (step_through_delay)
6342 {
6343 /* The user issued a step when stopped at a breakpoint.
6344 Maybe we should stop, maybe we should not - the delay
6345 slot *might* correspond to a line of source. In any
6346 case, don't decide that here, just set
6347 ecs->stepping_over_breakpoint, making sure we
6348 single-step again before breakpoints are re-inserted. */
6349 ecs->event_thread->stepping_over_breakpoint = 1;
6350 }
6351 }
6352
6353 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6354 handles this event. */
6355 ecs->event_thread->control.stop_bpstat
6356 = bpstat_stop_status (get_current_regcache ()->aspace (),
6357 ecs->event_thread->stop_pc (),
6358 ecs->event_thread, ecs->ws, stop_chain);
6359
6360 /* Following in case break condition called a
6361 function. */
6362 stop_print_frame = true;
6363
6364 /* This is where we handle "moribund" watchpoints. Unlike
6365 software breakpoints traps, hardware watchpoint traps are
6366 always distinguishable from random traps. If no high-level
6367 watchpoint is associated with the reported stop data address
6368 anymore, then the bpstat does not explain the signal ---
6369 simply make sure to ignore it if `stopped_by_watchpoint' is
6370 set. */
6371
6372 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6373 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6374 GDB_SIGNAL_TRAP)
6375 && stopped_by_watchpoint)
6376 {
6377 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6378 "ignoring");
6379 }
6380
6381 /* NOTE: cagney/2003-03-29: These checks for a random signal
6382 at one stage in the past included checks for an inferior
6383 function call's call dummy's return breakpoint. The original
6384 comment, that went with the test, read:
6385
6386 ``End of a stack dummy. Some systems (e.g. Sony news) give
6387 another signal besides SIGTRAP, so check here as well as
6388 above.''
6389
6390 If someone ever tries to get call dummys on a
6391 non-executable stack to work (where the target would stop
6392 with something like a SIGSEGV), then those tests might need
6393 to be re-instated. Given, however, that the tests were only
6394 enabled when momentary breakpoints were not being used, I
6395 suspect that it won't be the case.
6396
6397 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6398 be necessary for call dummies on a non-executable stack on
6399 SPARC. */
6400
6401 /* See if the breakpoints module can explain the signal. */
6402 random_signal
6403 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6404 ecs->event_thread->stop_signal ());
6405
6406 /* Maybe this was a trap for a software breakpoint that has since
6407 been removed. */
6408 if (random_signal && target_stopped_by_sw_breakpoint ())
6409 {
6410 if (gdbarch_program_breakpoint_here_p (gdbarch,
6411 ecs->event_thread->stop_pc ()))
6412 {
6413 struct regcache *regcache;
6414 int decr_pc;
6415
6416 /* Re-adjust PC to what the program would see if GDB was not
6417 debugging it. */
6418 regcache = get_thread_regcache (ecs->event_thread);
6419 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
6420 if (decr_pc != 0)
6421 {
6422 gdb::optional<scoped_restore_tmpl<int>>
6423 restore_operation_disable;
6424
6425 if (record_full_is_used ())
6426 restore_operation_disable.emplace
6427 (record_full_gdb_operation_disable_set ());
6428
6429 regcache_write_pc (regcache,
6430 ecs->event_thread->stop_pc () + decr_pc);
6431 }
6432 }
6433 else
6434 {
6435 /* A delayed software breakpoint event. Ignore the trap. */
6436 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
6437 random_signal = 0;
6438 }
6439 }
6440
6441 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6442 has since been removed. */
6443 if (random_signal && target_stopped_by_hw_breakpoint ())
6444 {
6445 /* A delayed hardware breakpoint event. Ignore the trap. */
6446 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6447 "trap, ignoring");
6448 random_signal = 0;
6449 }
6450
6451 /* If not, perhaps stepping/nexting can. */
6452 if (random_signal)
6453 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6454 && currently_stepping (ecs->event_thread));
6455
6456 /* Perhaps the thread hit a single-step breakpoint of _another_
6457 thread. Single-step breakpoints are transparent to the
6458 breakpoints module. */
6459 if (random_signal)
6460 random_signal = !ecs->hit_singlestep_breakpoint;
6461
6462 /* No? Perhaps we got a moribund watchpoint. */
6463 if (random_signal)
6464 random_signal = !stopped_by_watchpoint;
6465
6466 /* Always stop if the user explicitly requested this thread to
6467 remain stopped. */
6468 if (ecs->event_thread->stop_requested)
6469 {
6470 random_signal = 1;
6471 infrun_debug_printf ("user-requested stop");
6472 }
6473
6474 /* For the program's own signals, act according to
6475 the signal handling tables. */
6476
6477 if (random_signal)
6478 {
6479 /* Signal not for debugging purposes. */
6480 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
6481
6482 infrun_debug_printf ("random signal (%s)",
6483 gdb_signal_to_symbol_string (stop_signal));
6484
6485 stopped_by_random_signal = 1;
6486
6487 /* Always stop on signals if we're either just gaining control
6488 of the program, or the user explicitly requested this thread
6489 to remain stopped. */
6490 if (stop_soon != NO_STOP_QUIETLY
6491 || ecs->event_thread->stop_requested
6492 || signal_stop_state (ecs->event_thread->stop_signal ()))
6493 {
6494 stop_waiting (ecs);
6495 return;
6496 }
6497
6498 /* Notify observers the signal has "handle print" set. Note we
6499 returned early above if stopping; normal_stop handles the
6500 printing in that case. */
6501 if (signal_print[ecs->event_thread->stop_signal ()])
6502 {
6503 /* The signal table tells us to print about this signal. */
6504 target_terminal::ours_for_output ();
6505 gdb::observers::signal_received.notify (ecs->event_thread->stop_signal ());
6506 target_terminal::inferior ();
6507 }
6508
6509 /* Clear the signal if it should not be passed. */
6510 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
6511 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6512
6513 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
6514 && ecs->event_thread->control.trap_expected
6515 && ecs->event_thread->control.step_resume_breakpoint == NULL)
6516 {
6517 /* We were just starting a new sequence, attempting to
6518 single-step off of a breakpoint and expecting a SIGTRAP.
6519 Instead this signal arrives. This signal will take us out
6520 of the stepping range so GDB needs to remember to, when
6521 the signal handler returns, resume stepping off that
6522 breakpoint. */
6523 /* To simplify things, "continue" is forced to use the same
6524 code paths as single-step - set a breakpoint at the
6525 signal return address and then, once hit, step off that
6526 breakpoint. */
6527 infrun_debug_printf ("signal arrived while stepping over breakpoint");
6528
6529 insert_hp_step_resume_breakpoint_at_frame (frame);
6530 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6531 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6532 ecs->event_thread->control.trap_expected = 0;
6533
6534 /* If we were nexting/stepping some other thread, switch to
6535 it, so that we don't continue it, losing control. */
6536 if (!switch_back_to_stepped_thread (ecs))
6537 keep_going (ecs);
6538 return;
6539 }
6540
6541 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
6542 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
6543 ecs->event_thread)
6544 || ecs->event_thread->control.step_range_end == 1)
6545 && frame_id_eq (get_stack_frame_id (frame),
6546 ecs->event_thread->control.step_stack_frame_id)
6547 && ecs->event_thread->control.step_resume_breakpoint == NULL)
6548 {
6549 /* The inferior is about to take a signal that will take it
6550 out of the single step range. Set a breakpoint at the
6551 current PC (which is presumably where the signal handler
6552 will eventually return) and then allow the inferior to
6553 run free.
6554
6555 Note that this is only needed for a signal delivered
6556 while in the single-step range. Nested signals aren't a
6557 problem as they eventually all return. */
6558 infrun_debug_printf ("signal may take us out of single-step range");
6559
6560 clear_step_over_info ();
6561 insert_hp_step_resume_breakpoint_at_frame (frame);
6562 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6563 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6564 ecs->event_thread->control.trap_expected = 0;
6565 keep_going (ecs);
6566 return;
6567 }
6568
6569 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6570 when either there's a nested signal, or when there's a
6571 pending signal enabled just as the signal handler returns
6572 (leaving the inferior at the step-resume-breakpoint without
6573 actually executing it). Either way continue until the
6574 breakpoint is really hit. */
6575
6576 if (!switch_back_to_stepped_thread (ecs))
6577 {
6578 infrun_debug_printf ("random signal, keep going");
6579
6580 keep_going (ecs);
6581 }
6582 return;
6583 }
6584
6585 process_event_stop_test (ecs);
6586 }
6587
6588 /* Come here when we've got some debug event / signal we can explain
6589 (IOW, not a random signal), and test whether it should cause a
6590 stop, or whether we should resume the inferior (transparently).
6591 E.g., could be a breakpoint whose condition evaluates false; we
6592 could be still stepping within the line; etc. */
6593
6594 static void
6595 process_event_stop_test (struct execution_control_state *ecs)
6596 {
6597 struct symtab_and_line stop_pc_sal;
6598 struct frame_info *frame;
6599 struct gdbarch *gdbarch;
6600 CORE_ADDR jmp_buf_pc;
6601 struct bpstat_what what;
6602
6603 /* Handle cases caused by hitting a breakpoint. */
6604
6605 frame = get_current_frame ();
6606 gdbarch = get_frame_arch (frame);
6607
6608 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
6609
6610 if (what.call_dummy)
6611 {
6612 stop_stack_dummy = what.call_dummy;
6613 }
6614
6615 /* A few breakpoint types have callbacks associated (e.g.,
6616 bp_jit_event). Run them now. */
6617 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6618
6619 /* If we hit an internal event that triggers symbol changes, the
6620 current frame will be invalidated within bpstat_what (e.g., if we
6621 hit an internal solib event). Re-fetch it. */
6622 frame = get_current_frame ();
6623 gdbarch = get_frame_arch (frame);
6624
6625 switch (what.main_action)
6626 {
6627 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6628 /* If we hit the breakpoint at longjmp while stepping, we
6629 install a momentary breakpoint at the target of the
6630 jmp_buf. */
6631
6632 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
6633
6634 ecs->event_thread->stepping_over_breakpoint = 1;
6635
6636 if (what.is_longjmp)
6637 {
6638 struct value *arg_value;
6639
6640 /* If we set the longjmp breakpoint via a SystemTap probe,
6641 then use it to extract the arguments. The destination PC
6642 is the third argument to the probe. */
6643 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6644 if (arg_value)
6645 {
6646 jmp_buf_pc = value_as_address (arg_value);
6647 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6648 }
6649 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6650 || !gdbarch_get_longjmp_target (gdbarch,
6651 frame, &jmp_buf_pc))
6652 {
6653 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6654 "(!gdbarch_get_longjmp_target)");
6655 keep_going (ecs);
6656 return;
6657 }
6658
6659 /* Insert a breakpoint at resume address. */
6660 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6661 }
6662 else
6663 check_exception_resume (ecs, frame);
6664 keep_going (ecs);
6665 return;
6666
6667 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6668 {
6669 struct frame_info *init_frame;
6670
6671 /* There are several cases to consider.
6672
6673 1. The initiating frame no longer exists. In this case we
6674 must stop, because the exception or longjmp has gone too
6675 far.
6676
6677 2. The initiating frame exists, and is the same as the
6678 current frame. We stop, because the exception or longjmp
6679 has been caught.
6680
6681 3. The initiating frame exists and is different from the
6682 current frame. This means the exception or longjmp has
6683 been caught beneath the initiating frame, so keep going.
6684
6685 4. longjmp breakpoint has been placed just to protect
6686 against stale dummy frames and user is not interested in
6687 stopping around longjmps. */
6688
6689 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
6690
6691 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6692 != NULL);
6693 delete_exception_resume_breakpoint (ecs->event_thread);
6694
6695 if (what.is_longjmp)
6696 {
6697 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
6698
6699 if (!frame_id_p (ecs->event_thread->initiating_frame))
6700 {
6701 /* Case 4. */
6702 keep_going (ecs);
6703 return;
6704 }
6705 }
6706
6707 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
6708
6709 if (init_frame)
6710 {
6711 struct frame_id current_id
6712 = get_frame_id (get_current_frame ());
6713 if (frame_id_eq (current_id,
6714 ecs->event_thread->initiating_frame))
6715 {
6716 /* Case 2. Fall through. */
6717 }
6718 else
6719 {
6720 /* Case 3. */
6721 keep_going (ecs);
6722 return;
6723 }
6724 }
6725
6726 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6727 exists. */
6728 delete_step_resume_breakpoint (ecs->event_thread);
6729
6730 end_stepping_range (ecs);
6731 }
6732 return;
6733
6734 case BPSTAT_WHAT_SINGLE:
6735 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
6736 ecs->event_thread->stepping_over_breakpoint = 1;
6737 /* Still need to check other stuff, at least the case where we
6738 are stepping and step out of the right range. */
6739 break;
6740
6741 case BPSTAT_WHAT_STEP_RESUME:
6742 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
6743
6744 delete_step_resume_breakpoint (ecs->event_thread);
6745 if (ecs->event_thread->control.proceed_to_finish
6746 && execution_direction == EXEC_REVERSE)
6747 {
6748 struct thread_info *tp = ecs->event_thread;
6749
6750 /* We are finishing a function in reverse, and just hit the
6751 step-resume breakpoint at the start address of the
6752 function, and we're almost there -- just need to back up
6753 by one more single-step, which should take us back to the
6754 function call. */
6755 tp->control.step_range_start = tp->control.step_range_end = 1;
6756 keep_going (ecs);
6757 return;
6758 }
6759 fill_in_stop_func (gdbarch, ecs);
6760 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
6761 && execution_direction == EXEC_REVERSE)
6762 {
6763 /* We are stepping over a function call in reverse, and just
6764 hit the step-resume breakpoint at the start address of
6765 the function. Go back to single-stepping, which should
6766 take us back to the function call. */
6767 ecs->event_thread->stepping_over_breakpoint = 1;
6768 keep_going (ecs);
6769 return;
6770 }
6771 break;
6772
6773 case BPSTAT_WHAT_STOP_NOISY:
6774 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
6775 stop_print_frame = true;
6776
6777 /* Assume the thread stopped for a breakpoint. We'll still check
6778 whether a/the breakpoint is there when the thread is next
6779 resumed. */
6780 ecs->event_thread->stepping_over_breakpoint = 1;
6781
6782 stop_waiting (ecs);
6783 return;
6784
6785 case BPSTAT_WHAT_STOP_SILENT:
6786 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
6787 stop_print_frame = false;
6788
6789 /* Assume the thread stopped for a breakpoint. We'll still check
6790 whether a/the breakpoint is there when the thread is next
6791 resumed. */
6792 ecs->event_thread->stepping_over_breakpoint = 1;
6793 stop_waiting (ecs);
6794 return;
6795
6796 case BPSTAT_WHAT_HP_STEP_RESUME:
6797 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
6798
6799 delete_step_resume_breakpoint (ecs->event_thread);
6800 if (ecs->event_thread->step_after_step_resume_breakpoint)
6801 {
6802 /* Back when the step-resume breakpoint was inserted, we
6803 were trying to single-step off a breakpoint. Go back to
6804 doing that. */
6805 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6806 ecs->event_thread->stepping_over_breakpoint = 1;
6807 keep_going (ecs);
6808 return;
6809 }
6810 break;
6811
6812 case BPSTAT_WHAT_KEEP_CHECKING:
6813 break;
6814 }
6815
6816 /* If we stepped a permanent breakpoint and we had a high priority
6817 step-resume breakpoint for the address we stepped, but we didn't
6818 hit it, then we must have stepped into the signal handler. The
6819 step-resume was only necessary to catch the case of _not_
6820 stepping into the handler, so delete it, and fall through to
6821 checking whether the step finished. */
6822 if (ecs->event_thread->stepped_breakpoint)
6823 {
6824 struct breakpoint *sr_bp
6825 = ecs->event_thread->control.step_resume_breakpoint;
6826
6827 if (sr_bp != NULL
6828 && sr_bp->loc->permanent
6829 && sr_bp->type == bp_hp_step_resume
6830 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6831 {
6832 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
6833 delete_step_resume_breakpoint (ecs->event_thread);
6834 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6835 }
6836 }
6837
6838 /* We come here if we hit a breakpoint but should not stop for it.
6839 Possibly we also were stepping and should stop for that. So fall
6840 through and test for stepping. But, if not stepping, do not
6841 stop. */
6842
6843 /* In all-stop mode, if we're currently stepping but have stopped in
6844 some other thread, we need to switch back to the stepped thread. */
6845 if (switch_back_to_stepped_thread (ecs))
6846 return;
6847
6848 if (ecs->event_thread->control.step_resume_breakpoint)
6849 {
6850 infrun_debug_printf ("step-resume breakpoint is inserted");
6851
6852 /* Having a step-resume breakpoint overrides anything
6853 else having to do with stepping commands until
6854 that breakpoint is reached. */
6855 keep_going (ecs);
6856 return;
6857 }
6858
6859 if (ecs->event_thread->control.step_range_end == 0)
6860 {
6861 infrun_debug_printf ("no stepping, continue");
6862 /* Likewise if we aren't even stepping. */
6863 keep_going (ecs);
6864 return;
6865 }
6866
6867 /* Re-fetch current thread's frame in case the code above caused
6868 the frame cache to be re-initialized, making our FRAME variable
6869 a dangling pointer. */
6870 frame = get_current_frame ();
6871 gdbarch = get_frame_arch (frame);
6872 fill_in_stop_func (gdbarch, ecs);
6873
6874 /* If stepping through a line, keep going if still within it.
6875
6876 Note that step_range_end is the address of the first instruction
6877 beyond the step range, and NOT the address of the last instruction
6878 within it!
6879
6880 Note also that during reverse execution, we may be stepping
6881 through a function epilogue and therefore must detect when
6882 the current-frame changes in the middle of a line. */
6883
6884 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
6885 ecs->event_thread)
6886 && (execution_direction != EXEC_REVERSE
6887 || frame_id_eq (get_frame_id (frame),
6888 ecs->event_thread->control.step_frame_id)))
6889 {
6890 infrun_debug_printf
6891 ("stepping inside range [%s-%s]",
6892 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6893 paddress (gdbarch, ecs->event_thread->control.step_range_end));
6894
6895 /* Tentatively re-enable range stepping; `resume' disables it if
6896 necessary (e.g., if we're stepping over a breakpoint or we
6897 have software watchpoints). */
6898 ecs->event_thread->control.may_range_step = 1;
6899
6900 /* When stepping backward, stop at beginning of line range
6901 (unless it's the function entry point, in which case
6902 keep going back to the call point). */
6903 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
6904 if (stop_pc == ecs->event_thread->control.step_range_start
6905 && stop_pc != ecs->stop_func_start
6906 && execution_direction == EXEC_REVERSE)
6907 end_stepping_range (ecs);
6908 else
6909 keep_going (ecs);
6910
6911 return;
6912 }
6913
6914 /* We stepped out of the stepping range. */
6915
6916 /* If we are stepping at the source level and entered the runtime
6917 loader dynamic symbol resolution code...
6918
6919 EXEC_FORWARD: we keep on single stepping until we exit the run
6920 time loader code and reach the callee's address.
6921
6922 EXEC_REVERSE: we've already executed the callee (backward), and
6923 the runtime loader code is handled just like any other
6924 undebuggable function call. Now we need only keep stepping
6925 backward through the trampoline code, and that's handled further
6926 down, so there is nothing for us to do here. */
6927
6928 if (execution_direction != EXEC_REVERSE
6929 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6930 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ()))
6931 {
6932 CORE_ADDR pc_after_resolver =
6933 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
6934
6935 infrun_debug_printf ("stepped into dynsym resolve code");
6936
6937 if (pc_after_resolver)
6938 {
6939 /* Set up a step-resume breakpoint at the address
6940 indicated by SKIP_SOLIB_RESOLVER. */
6941 symtab_and_line sr_sal;
6942 sr_sal.pc = pc_after_resolver;
6943 sr_sal.pspace = get_frame_program_space (frame);
6944
6945 insert_step_resume_breakpoint_at_sal (gdbarch,
6946 sr_sal, null_frame_id);
6947 }
6948
6949 keep_going (ecs);
6950 return;
6951 }
6952
6953 /* Step through an indirect branch thunk. */
6954 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6955 && gdbarch_in_indirect_branch_thunk (gdbarch,
6956 ecs->event_thread->stop_pc ()))
6957 {
6958 infrun_debug_printf ("stepped into indirect branch thunk");
6959 keep_going (ecs);
6960 return;
6961 }
6962
6963 if (ecs->event_thread->control.step_range_end != 1
6964 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6965 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6966 && get_frame_type (frame) == SIGTRAMP_FRAME)
6967 {
6968 infrun_debug_printf ("stepped into signal trampoline");
6969 /* The inferior, while doing a "step" or "next", has ended up in
6970 a signal trampoline (either by a signal being delivered or by
6971 the signal handler returning). Just single-step until the
6972 inferior leaves the trampoline (either by calling the handler
6973 or returning). */
6974 keep_going (ecs);
6975 return;
6976 }
6977
6978 /* If we're in the return path from a shared library trampoline,
6979 we want to proceed through the trampoline when stepping. */
6980 /* macro/2012-04-25: This needs to come before the subroutine
6981 call check below as on some targets return trampolines look
6982 like subroutine calls (MIPS16 return thunks). */
6983 if (gdbarch_in_solib_return_trampoline (gdbarch,
6984 ecs->event_thread->stop_pc (),
6985 ecs->stop_func_name)
6986 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6987 {
6988 /* Determine where this trampoline returns. */
6989 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
6990 CORE_ADDR real_stop_pc
6991 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
6992
6993 infrun_debug_printf ("stepped into solib return tramp");
6994
6995 /* Only proceed through if we know where it's going. */
6996 if (real_stop_pc)
6997 {
6998 /* And put the step-breakpoint there and go until there. */
6999 symtab_and_line sr_sal;
7000 sr_sal.pc = real_stop_pc;
7001 sr_sal.section = find_pc_overlay (sr_sal.pc);
7002 sr_sal.pspace = get_frame_program_space (frame);
7003
7004 /* Do not specify what the fp should be when we stop since
7005 on some machines the prologue is where the new fp value
7006 is established. */
7007 insert_step_resume_breakpoint_at_sal (gdbarch,
7008 sr_sal, null_frame_id);
7009
7010 /* Restart without fiddling with the step ranges or
7011 other state. */
7012 keep_going (ecs);
7013 return;
7014 }
7015 }
7016
7017 /* Check for subroutine calls. The check for the current frame
7018 equalling the step ID is not necessary - the check of the
7019 previous frame's ID is sufficient - but it is a common case and
7020 cheaper than checking the previous frame's ID.
7021
7022 NOTE: frame_id_eq will never report two invalid frame IDs as
7023 being equal, so to get into this block, both the current and
7024 previous frame must have valid frame IDs. */
7025 /* The outer_frame_id check is a heuristic to detect stepping
7026 through startup code. If we step over an instruction which
7027 sets the stack pointer from an invalid value to a valid value,
7028 we may detect that as a subroutine call from the mythical
7029 "outermost" function. This could be fixed by marking
7030 outermost frames as !stack_p,code_p,special_p. Then the
7031 initial outermost frame, before sp was valid, would
7032 have code_addr == &_start. See the comment in frame_id_eq
7033 for more. */
7034 if (!frame_id_eq (get_stack_frame_id (frame),
7035 ecs->event_thread->control.step_stack_frame_id)
7036 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
7037 ecs->event_thread->control.step_stack_frame_id)
7038 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
7039 outer_frame_id)
7040 || (ecs->event_thread->control.step_start_function
7041 != find_pc_function (ecs->event_thread->stop_pc ())))))
7042 {
7043 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7044 CORE_ADDR real_stop_pc;
7045
7046 infrun_debug_printf ("stepped into subroutine");
7047
7048 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7049 {
7050 /* I presume that step_over_calls is only 0 when we're
7051 supposed to be stepping at the assembly language level
7052 ("stepi"). Just stop. */
7053 /* And this works the same backward as frontward. MVS */
7054 end_stepping_range (ecs);
7055 return;
7056 }
7057
7058 /* Reverse stepping through solib trampolines. */
7059
7060 if (execution_direction == EXEC_REVERSE
7061 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7062 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7063 || (ecs->stop_func_start == 0
7064 && in_solib_dynsym_resolve_code (stop_pc))))
7065 {
7066 /* Any solib trampoline code can be handled in reverse
7067 by simply continuing to single-step. We have already
7068 executed the solib function (backwards), and a few
7069 steps will take us back through the trampoline to the
7070 caller. */
7071 keep_going (ecs);
7072 return;
7073 }
7074
7075 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7076 {
7077 /* We're doing a "next".
7078
7079 Normal (forward) execution: set a breakpoint at the
7080 callee's return address (the address at which the caller
7081 will resume).
7082
7083 Reverse (backward) execution. set the step-resume
7084 breakpoint at the start of the function that we just
7085 stepped into (backwards), and continue to there. When we
7086 get there, we'll need to single-step back to the caller. */
7087
7088 if (execution_direction == EXEC_REVERSE)
7089 {
7090 /* If we're already at the start of the function, we've either
7091 just stepped backward into a single instruction function,
7092 or stepped back out of a signal handler to the first instruction
7093 of the function. Just keep going, which will single-step back
7094 to the caller. */
7095 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7096 {
7097 /* Normal function call return (static or dynamic). */
7098 symtab_and_line sr_sal;
7099 sr_sal.pc = ecs->stop_func_start;
7100 sr_sal.pspace = get_frame_program_space (frame);
7101 insert_step_resume_breakpoint_at_sal (gdbarch,
7102 sr_sal, null_frame_id);
7103 }
7104 }
7105 else
7106 insert_step_resume_breakpoint_at_caller (frame);
7107
7108 keep_going (ecs);
7109 return;
7110 }
7111
7112 /* If we are in a function call trampoline (a stub between the
7113 calling routine and the real function), locate the real
7114 function. That's what tells us (a) whether we want to step
7115 into it at all, and (b) what prologue we want to run to the
7116 end of, if we do step into it. */
7117 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7118 if (real_stop_pc == 0)
7119 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7120 if (real_stop_pc != 0)
7121 ecs->stop_func_start = real_stop_pc;
7122
7123 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7124 {
7125 symtab_and_line sr_sal;
7126 sr_sal.pc = ecs->stop_func_start;
7127 sr_sal.pspace = get_frame_program_space (frame);
7128
7129 insert_step_resume_breakpoint_at_sal (gdbarch,
7130 sr_sal, null_frame_id);
7131 keep_going (ecs);
7132 return;
7133 }
7134
7135 /* If we have line number information for the function we are
7136 thinking of stepping into and the function isn't on the skip
7137 list, step into it.
7138
7139 If there are several symtabs at that PC (e.g. with include
7140 files), just want to know whether *any* of them have line
7141 numbers. find_pc_line handles this. */
7142 {
7143 struct symtab_and_line tmp_sal;
7144
7145 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7146 if (tmp_sal.line != 0
7147 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7148 tmp_sal)
7149 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7150 {
7151 if (execution_direction == EXEC_REVERSE)
7152 handle_step_into_function_backward (gdbarch, ecs);
7153 else
7154 handle_step_into_function (gdbarch, ecs);
7155 return;
7156 }
7157 }
7158
7159 /* If we have no line number and the step-stop-if-no-debug is
7160 set, we stop the step so that the user has a chance to switch
7161 in assembly mode. */
7162 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7163 && step_stop_if_no_debug)
7164 {
7165 end_stepping_range (ecs);
7166 return;
7167 }
7168
7169 if (execution_direction == EXEC_REVERSE)
7170 {
7171 /* If we're already at the start of the function, we've either just
7172 stepped backward into a single instruction function without line
7173 number info, or stepped back out of a signal handler to the first
7174 instruction of the function without line number info. Just keep
7175 going, which will single-step back to the caller. */
7176 if (ecs->stop_func_start != stop_pc)
7177 {
7178 /* Set a breakpoint at callee's start address.
7179 From there we can step once and be back in the caller. */
7180 symtab_and_line sr_sal;
7181 sr_sal.pc = ecs->stop_func_start;
7182 sr_sal.pspace = get_frame_program_space (frame);
7183 insert_step_resume_breakpoint_at_sal (gdbarch,
7184 sr_sal, null_frame_id);
7185 }
7186 }
7187 else
7188 /* Set a breakpoint at callee's return address (the address
7189 at which the caller will resume). */
7190 insert_step_resume_breakpoint_at_caller (frame);
7191
7192 keep_going (ecs);
7193 return;
7194 }
7195
7196 /* Reverse stepping through solib trampolines. */
7197
7198 if (execution_direction == EXEC_REVERSE
7199 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7200 {
7201 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7202
7203 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7204 || (ecs->stop_func_start == 0
7205 && in_solib_dynsym_resolve_code (stop_pc)))
7206 {
7207 /* Any solib trampoline code can be handled in reverse
7208 by simply continuing to single-step. We have already
7209 executed the solib function (backwards), and a few
7210 steps will take us back through the trampoline to the
7211 caller. */
7212 keep_going (ecs);
7213 return;
7214 }
7215 else if (in_solib_dynsym_resolve_code (stop_pc))
7216 {
7217 /* Stepped backward into the solib dynsym resolver.
7218 Set a breakpoint at its start and continue, then
7219 one more step will take us out. */
7220 symtab_and_line sr_sal;
7221 sr_sal.pc = ecs->stop_func_start;
7222 sr_sal.pspace = get_frame_program_space (frame);
7223 insert_step_resume_breakpoint_at_sal (gdbarch,
7224 sr_sal, null_frame_id);
7225 keep_going (ecs);
7226 return;
7227 }
7228 }
7229
7230 /* This always returns the sal for the inner-most frame when we are in a
7231 stack of inlined frames, even if GDB actually believes that it is in a
7232 more outer frame. This is checked for below by calls to
7233 inline_skipped_frames. */
7234 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7235
7236 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7237 the trampoline processing logic, however, there are some trampolines
7238 that have no names, so we should do trampoline handling first. */
7239 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7240 && ecs->stop_func_name == NULL
7241 && stop_pc_sal.line == 0)
7242 {
7243 infrun_debug_printf ("stepped into undebuggable function");
7244
7245 /* The inferior just stepped into, or returned to, an
7246 undebuggable function (where there is no debugging information
7247 and no line number corresponding to the address where the
7248 inferior stopped). Since we want to skip this kind of code,
7249 we keep going until the inferior returns from this
7250 function - unless the user has asked us not to (via
7251 set step-mode) or we no longer know how to get back
7252 to the call site. */
7253 if (step_stop_if_no_debug
7254 || !frame_id_p (frame_unwind_caller_id (frame)))
7255 {
7256 /* If we have no line number and the step-stop-if-no-debug
7257 is set, we stop the step so that the user has a chance to
7258 switch in assembly mode. */
7259 end_stepping_range (ecs);
7260 return;
7261 }
7262 else
7263 {
7264 /* Set a breakpoint at callee's return address (the address
7265 at which the caller will resume). */
7266 insert_step_resume_breakpoint_at_caller (frame);
7267 keep_going (ecs);
7268 return;
7269 }
7270 }
7271
7272 if (ecs->event_thread->control.step_range_end == 1)
7273 {
7274 /* It is stepi or nexti. We always want to stop stepping after
7275 one instruction. */
7276 infrun_debug_printf ("stepi/nexti");
7277 end_stepping_range (ecs);
7278 return;
7279 }
7280
7281 if (stop_pc_sal.line == 0)
7282 {
7283 /* We have no line number information. That means to stop
7284 stepping (does this always happen right after one instruction,
7285 when we do "s" in a function with no line numbers,
7286 or can this happen as a result of a return or longjmp?). */
7287 infrun_debug_printf ("line number info");
7288 end_stepping_range (ecs);
7289 return;
7290 }
7291
7292 /* Look for "calls" to inlined functions, part one. If the inline
7293 frame machinery detected some skipped call sites, we have entered
7294 a new inline function. */
7295
7296 if (frame_id_eq (get_frame_id (get_current_frame ()),
7297 ecs->event_thread->control.step_frame_id)
7298 && inline_skipped_frames (ecs->event_thread))
7299 {
7300 infrun_debug_printf ("stepped into inlined function");
7301
7302 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
7303
7304 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
7305 {
7306 /* For "step", we're going to stop. But if the call site
7307 for this inlined function is on the same source line as
7308 we were previously stepping, go down into the function
7309 first. Otherwise stop at the call site. */
7310
7311 if (call_sal.line == ecs->event_thread->current_line
7312 && call_sal.symtab == ecs->event_thread->current_symtab)
7313 {
7314 step_into_inline_frame (ecs->event_thread);
7315 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7316 {
7317 keep_going (ecs);
7318 return;
7319 }
7320 }
7321
7322 end_stepping_range (ecs);
7323 return;
7324 }
7325 else
7326 {
7327 /* For "next", we should stop at the call site if it is on a
7328 different source line. Otherwise continue through the
7329 inlined function. */
7330 if (call_sal.line == ecs->event_thread->current_line
7331 && call_sal.symtab == ecs->event_thread->current_symtab)
7332 keep_going (ecs);
7333 else
7334 end_stepping_range (ecs);
7335 return;
7336 }
7337 }
7338
7339 /* Look for "calls" to inlined functions, part two. If we are still
7340 in the same real function we were stepping through, but we have
7341 to go further up to find the exact frame ID, we are stepping
7342 through a more inlined call beyond its call site. */
7343
7344 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7345 && !frame_id_eq (get_frame_id (get_current_frame ()),
7346 ecs->event_thread->control.step_frame_id)
7347 && stepped_in_from (get_current_frame (),
7348 ecs->event_thread->control.step_frame_id))
7349 {
7350 infrun_debug_printf ("stepping through inlined function");
7351
7352 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7353 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
7354 keep_going (ecs);
7355 else
7356 end_stepping_range (ecs);
7357 return;
7358 }
7359
7360 bool refresh_step_info = true;
7361 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
7362 && (ecs->event_thread->current_line != stop_pc_sal.line
7363 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
7364 {
7365 /* We are at a different line. */
7366
7367 if (stop_pc_sal.is_stmt)
7368 {
7369 /* We are at the start of a statement.
7370
7371 So stop. Note that we don't stop if we step into the middle of a
7372 statement. That is said to make things like for (;;) statements
7373 work better. */
7374 infrun_debug_printf ("stepped to a different line");
7375 end_stepping_range (ecs);
7376 return;
7377 }
7378 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7379 ecs->event_thread->control.step_frame_id))
7380 {
7381 /* We are not at the start of a statement, and we have not changed
7382 frame.
7383
7384 We ignore this line table entry, and continue stepping forward,
7385 looking for a better place to stop. */
7386 refresh_step_info = false;
7387 infrun_debug_printf ("stepped to a different line, but "
7388 "it's not the start of a statement");
7389 }
7390 else
7391 {
7392 /* We are not the start of a statement, and we have changed frame.
7393
7394 We ignore this line table entry, and continue stepping forward,
7395 looking for a better place to stop. Keep refresh_step_info at
7396 true to note that the frame has changed, but ignore the line
7397 number to make sure we don't ignore a subsequent entry with the
7398 same line number. */
7399 stop_pc_sal.line = 0;
7400 infrun_debug_printf ("stepped to a different frame, but "
7401 "it's not the start of a statement");
7402 }
7403 }
7404
7405 /* We aren't done stepping.
7406
7407 Optimize by setting the stepping range to the line.
7408 (We might not be in the original line, but if we entered a
7409 new line in mid-statement, we continue stepping. This makes
7410 things like for(;;) statements work better.)
7411
7412 If we entered a SAL that indicates a non-statement line table entry,
7413 then we update the stepping range, but we don't update the step info,
7414 which includes things like the line number we are stepping away from.
7415 This means we will stop when we find a line table entry that is marked
7416 as is-statement, even if it matches the non-statement one we just
7417 stepped into. */
7418
7419 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7420 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
7421 ecs->event_thread->control.may_range_step = 1;
7422 infrun_debug_printf
7423 ("updated step range, start = %s, end = %s, may_range_step = %d",
7424 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7425 paddress (gdbarch, ecs->event_thread->control.step_range_end),
7426 ecs->event_thread->control.may_range_step);
7427 if (refresh_step_info)
7428 set_step_info (ecs->event_thread, frame, stop_pc_sal);
7429
7430 infrun_debug_printf ("keep going");
7431 keep_going (ecs);
7432 }
7433
7434 static bool restart_stepped_thread (process_stratum_target *resume_target,
7435 ptid_t resume_ptid);
7436
7437 /* In all-stop mode, if we're currently stepping but have stopped in
7438 some other thread, we may need to switch back to the stepped
7439 thread. Returns true we set the inferior running, false if we left
7440 it stopped (and the event needs further processing). */
7441
7442 static bool
7443 switch_back_to_stepped_thread (struct execution_control_state *ecs)
7444 {
7445 if (!target_is_non_stop_p ())
7446 {
7447 /* If any thread is blocked on some internal breakpoint, and we
7448 simply need to step over that breakpoint to get it going
7449 again, do that first. */
7450
7451 /* However, if we see an event for the stepping thread, then we
7452 know all other threads have been moved past their breakpoints
7453 already. Let the caller check whether the step is finished,
7454 etc., before deciding to move it past a breakpoint. */
7455 if (ecs->event_thread->control.step_range_end != 0)
7456 return false;
7457
7458 /* Check if the current thread is blocked on an incomplete
7459 step-over, interrupted by a random signal. */
7460 if (ecs->event_thread->control.trap_expected
7461 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
7462 {
7463 infrun_debug_printf
7464 ("need to finish step-over of [%s]",
7465 ecs->event_thread->ptid.to_string ().c_str ());
7466 keep_going (ecs);
7467 return true;
7468 }
7469
7470 /* Check if the current thread is blocked by a single-step
7471 breakpoint of another thread. */
7472 if (ecs->hit_singlestep_breakpoint)
7473 {
7474 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7475 ecs->ptid.to_string ().c_str ());
7476 keep_going (ecs);
7477 return true;
7478 }
7479
7480 /* If this thread needs yet another step-over (e.g., stepping
7481 through a delay slot), do it first before moving on to
7482 another thread. */
7483 if (thread_still_needs_step_over (ecs->event_thread))
7484 {
7485 infrun_debug_printf
7486 ("thread [%s] still needs step-over",
7487 ecs->event_thread->ptid.to_string ().c_str ());
7488 keep_going (ecs);
7489 return true;
7490 }
7491
7492 /* If scheduler locking applies even if not stepping, there's no
7493 need to walk over threads. Above we've checked whether the
7494 current thread is stepping. If some other thread not the
7495 event thread is stepping, then it must be that scheduler
7496 locking is not in effect. */
7497 if (schedlock_applies (ecs->event_thread))
7498 return false;
7499
7500 /* Otherwise, we no longer expect a trap in the current thread.
7501 Clear the trap_expected flag before switching back -- this is
7502 what keep_going does as well, if we call it. */
7503 ecs->event_thread->control.trap_expected = 0;
7504
7505 /* Likewise, clear the signal if it should not be passed. */
7506 if (!signal_program[ecs->event_thread->stop_signal ()])
7507 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7508
7509 if (restart_stepped_thread (ecs->target, ecs->ptid))
7510 {
7511 prepare_to_wait (ecs);
7512 return true;
7513 }
7514
7515 switch_to_thread (ecs->event_thread);
7516 }
7517
7518 return false;
7519 }
7520
7521 /* Look for the thread that was stepping, and resume it.
7522 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7523 is resuming. Return true if a thread was started, false
7524 otherwise. */
7525
7526 static bool
7527 restart_stepped_thread (process_stratum_target *resume_target,
7528 ptid_t resume_ptid)
7529 {
7530 /* Do all pending step-overs before actually proceeding with
7531 step/next/etc. */
7532 if (start_step_over ())
7533 return true;
7534
7535 for (thread_info *tp : all_threads_safe ())
7536 {
7537 if (tp->state == THREAD_EXITED)
7538 continue;
7539
7540 if (tp->has_pending_waitstatus ())
7541 continue;
7542
7543 /* Ignore threads of processes the caller is not
7544 resuming. */
7545 if (!sched_multi
7546 && (tp->inf->process_target () != resume_target
7547 || tp->inf->pid != resume_ptid.pid ()))
7548 continue;
7549
7550 if (tp->control.trap_expected)
7551 {
7552 infrun_debug_printf ("switching back to stepped thread (step-over)");
7553
7554 if (keep_going_stepped_thread (tp))
7555 return true;
7556 }
7557 }
7558
7559 for (thread_info *tp : all_threads_safe ())
7560 {
7561 if (tp->state == THREAD_EXITED)
7562 continue;
7563
7564 if (tp->has_pending_waitstatus ())
7565 continue;
7566
7567 /* Ignore threads of processes the caller is not
7568 resuming. */
7569 if (!sched_multi
7570 && (tp->inf->process_target () != resume_target
7571 || tp->inf->pid != resume_ptid.pid ()))
7572 continue;
7573
7574 /* Did we find the stepping thread? */
7575 if (tp->control.step_range_end)
7576 {
7577 infrun_debug_printf ("switching back to stepped thread (stepping)");
7578
7579 if (keep_going_stepped_thread (tp))
7580 return true;
7581 }
7582 }
7583
7584 return false;
7585 }
7586
7587 /* See infrun.h. */
7588
7589 void
7590 restart_after_all_stop_detach (process_stratum_target *proc_target)
7591 {
7592 /* Note we don't check target_is_non_stop_p() here, because the
7593 current inferior may no longer have a process_stratum target
7594 pushed, as we just detached. */
7595
7596 /* See if we have a THREAD_RUNNING thread that need to be
7597 re-resumed. If we have any thread that is already executing,
7598 then we don't need to resume the target -- it is already been
7599 resumed. With the remote target (in all-stop), it's even
7600 impossible to issue another resumption if the target is already
7601 resumed, until the target reports a stop. */
7602 for (thread_info *thr : all_threads (proc_target))
7603 {
7604 if (thr->state != THREAD_RUNNING)
7605 continue;
7606
7607 /* If we have any thread that is already executing, then we
7608 don't need to resume the target -- it is already been
7609 resumed. */
7610 if (thr->executing ())
7611 return;
7612
7613 /* If we have a pending event to process, skip resuming the
7614 target and go straight to processing it. */
7615 if (thr->resumed () && thr->has_pending_waitstatus ())
7616 return;
7617 }
7618
7619 /* Alright, we need to re-resume the target. If a thread was
7620 stepping, we need to restart it stepping. */
7621 if (restart_stepped_thread (proc_target, minus_one_ptid))
7622 return;
7623
7624 /* Otherwise, find the first THREAD_RUNNING thread and resume
7625 it. */
7626 for (thread_info *thr : all_threads (proc_target))
7627 {
7628 if (thr->state != THREAD_RUNNING)
7629 continue;
7630
7631 execution_control_state ecs;
7632 reset_ecs (&ecs, thr);
7633 switch_to_thread (thr);
7634 keep_going (&ecs);
7635 return;
7636 }
7637 }
7638
7639 /* Set a previously stepped thread back to stepping. Returns true on
7640 success, false if the resume is not possible (e.g., the thread
7641 vanished). */
7642
7643 static bool
7644 keep_going_stepped_thread (struct thread_info *tp)
7645 {
7646 struct frame_info *frame;
7647 struct execution_control_state ecss;
7648 struct execution_control_state *ecs = &ecss;
7649
7650 /* If the stepping thread exited, then don't try to switch back and
7651 resume it, which could fail in several different ways depending
7652 on the target. Instead, just keep going.
7653
7654 We can find a stepping dead thread in the thread list in two
7655 cases:
7656
7657 - The target supports thread exit events, and when the target
7658 tries to delete the thread from the thread list, inferior_ptid
7659 pointed at the exiting thread. In such case, calling
7660 delete_thread does not really remove the thread from the list;
7661 instead, the thread is left listed, with 'exited' state.
7662
7663 - The target's debug interface does not support thread exit
7664 events, and so we have no idea whatsoever if the previously
7665 stepping thread is still alive. For that reason, we need to
7666 synchronously query the target now. */
7667
7668 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
7669 {
7670 infrun_debug_printf ("not resuming previously stepped thread, it has "
7671 "vanished");
7672
7673 delete_thread (tp);
7674 return false;
7675 }
7676
7677 infrun_debug_printf ("resuming previously stepped thread");
7678
7679 reset_ecs (ecs, tp);
7680 switch_to_thread (tp);
7681
7682 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
7683 frame = get_current_frame ();
7684
7685 /* If the PC of the thread we were trying to single-step has
7686 changed, then that thread has trapped or been signaled, but the
7687 event has not been reported to GDB yet. Re-poll the target
7688 looking for this particular thread's event (i.e. temporarily
7689 enable schedlock) by:
7690
7691 - setting a break at the current PC
7692 - resuming that particular thread, only (by setting trap
7693 expected)
7694
7695 This prevents us continuously moving the single-step breakpoint
7696 forward, one instruction at a time, overstepping. */
7697
7698 if (tp->stop_pc () != tp->prev_pc)
7699 {
7700 ptid_t resume_ptid;
7701
7702 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7703 paddress (target_gdbarch (), tp->prev_pc),
7704 paddress (target_gdbarch (), tp->stop_pc ()));
7705
7706 /* Clear the info of the previous step-over, as it's no longer
7707 valid (if the thread was trying to step over a breakpoint, it
7708 has already succeeded). It's what keep_going would do too,
7709 if we called it. Do this before trying to insert the sss
7710 breakpoint, otherwise if we were previously trying to step
7711 over this exact address in another thread, the breakpoint is
7712 skipped. */
7713 clear_step_over_info ();
7714 tp->control.trap_expected = 0;
7715
7716 insert_single_step_breakpoint (get_frame_arch (frame),
7717 get_frame_address_space (frame),
7718 tp->stop_pc ());
7719
7720 tp->set_resumed (true);
7721 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
7722 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
7723 }
7724 else
7725 {
7726 infrun_debug_printf ("expected thread still hasn't advanced");
7727
7728 keep_going_pass_signal (ecs);
7729 }
7730
7731 return true;
7732 }
7733
7734 /* Is thread TP in the middle of (software or hardware)
7735 single-stepping? (Note the result of this function must never be
7736 passed directly as target_resume's STEP parameter.) */
7737
7738 static bool
7739 currently_stepping (struct thread_info *tp)
7740 {
7741 return ((tp->control.step_range_end
7742 && tp->control.step_resume_breakpoint == NULL)
7743 || tp->control.trap_expected
7744 || tp->stepped_breakpoint
7745 || bpstat_should_step ());
7746 }
7747
7748 /* Inferior has stepped into a subroutine call with source code that
7749 we should not step over. Do step to the first line of code in
7750 it. */
7751
7752 static void
7753 handle_step_into_function (struct gdbarch *gdbarch,
7754 struct execution_control_state *ecs)
7755 {
7756 fill_in_stop_func (gdbarch, ecs);
7757
7758 compunit_symtab *cust
7759 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
7760 if (cust != NULL && compunit_language (cust) != language_asm)
7761 ecs->stop_func_start
7762 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7763
7764 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
7765 /* Use the step_resume_break to step until the end of the prologue,
7766 even if that involves jumps (as it seems to on the vax under
7767 4.2). */
7768 /* If the prologue ends in the middle of a source line, continue to
7769 the end of that source line (if it is still within the function).
7770 Otherwise, just go to end of prologue. */
7771 if (stop_func_sal.end
7772 && stop_func_sal.pc != ecs->stop_func_start
7773 && stop_func_sal.end < ecs->stop_func_end)
7774 ecs->stop_func_start = stop_func_sal.end;
7775
7776 /* Architectures which require breakpoint adjustment might not be able
7777 to place a breakpoint at the computed address. If so, the test
7778 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7779 ecs->stop_func_start to an address at which a breakpoint may be
7780 legitimately placed.
7781
7782 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7783 made, GDB will enter an infinite loop when stepping through
7784 optimized code consisting of VLIW instructions which contain
7785 subinstructions corresponding to different source lines. On
7786 FR-V, it's not permitted to place a breakpoint on any but the
7787 first subinstruction of a VLIW instruction. When a breakpoint is
7788 set, GDB will adjust the breakpoint address to the beginning of
7789 the VLIW instruction. Thus, we need to make the corresponding
7790 adjustment here when computing the stop address. */
7791
7792 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
7793 {
7794 ecs->stop_func_start
7795 = gdbarch_adjust_breakpoint_address (gdbarch,
7796 ecs->stop_func_start);
7797 }
7798
7799 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
7800 {
7801 /* We are already there: stop now. */
7802 end_stepping_range (ecs);
7803 return;
7804 }
7805 else
7806 {
7807 /* Put the step-breakpoint there and go until there. */
7808 symtab_and_line sr_sal;
7809 sr_sal.pc = ecs->stop_func_start;
7810 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
7811 sr_sal.pspace = get_frame_program_space (get_current_frame ());
7812
7813 /* Do not specify what the fp should be when we stop since on
7814 some machines the prologue is where the new fp value is
7815 established. */
7816 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
7817
7818 /* And make sure stepping stops right away then. */
7819 ecs->event_thread->control.step_range_end
7820 = ecs->event_thread->control.step_range_start;
7821 }
7822 keep_going (ecs);
7823 }
7824
7825 /* Inferior has stepped backward into a subroutine call with source
7826 code that we should not step over. Do step to the beginning of the
7827 last line of code in it. */
7828
7829 static void
7830 handle_step_into_function_backward (struct gdbarch *gdbarch,
7831 struct execution_control_state *ecs)
7832 {
7833 struct compunit_symtab *cust;
7834 struct symtab_and_line stop_func_sal;
7835
7836 fill_in_stop_func (gdbarch, ecs);
7837
7838 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
7839 if (cust != NULL && compunit_language (cust) != language_asm)
7840 ecs->stop_func_start
7841 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7842
7843 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7844
7845 /* OK, we're just going to keep stepping here. */
7846 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
7847 {
7848 /* We're there already. Just stop stepping now. */
7849 end_stepping_range (ecs);
7850 }
7851 else
7852 {
7853 /* Else just reset the step range and keep going.
7854 No step-resume breakpoint, they don't work for
7855 epilogues, which can have multiple entry paths. */
7856 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7857 ecs->event_thread->control.step_range_end = stop_func_sal.end;
7858 keep_going (ecs);
7859 }
7860 return;
7861 }
7862
7863 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7864 This is used to both functions and to skip over code. */
7865
7866 static void
7867 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7868 struct symtab_and_line sr_sal,
7869 struct frame_id sr_id,
7870 enum bptype sr_type)
7871 {
7872 /* There should never be more than one step-resume or longjmp-resume
7873 breakpoint per thread, so we should never be setting a new
7874 step_resume_breakpoint when one is already active. */
7875 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
7876 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
7877
7878 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7879 paddress (gdbarch, sr_sal.pc));
7880
7881 inferior_thread ()->control.step_resume_breakpoint
7882 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
7883 }
7884
7885 void
7886 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7887 struct symtab_and_line sr_sal,
7888 struct frame_id sr_id)
7889 {
7890 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7891 sr_sal, sr_id,
7892 bp_step_resume);
7893 }
7894
7895 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7896 This is used to skip a potential signal handler.
7897
7898 This is called with the interrupted function's frame. The signal
7899 handler, when it returns, will resume the interrupted function at
7900 RETURN_FRAME.pc. */
7901
7902 static void
7903 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
7904 {
7905 gdb_assert (return_frame != NULL);
7906
7907 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7908
7909 symtab_and_line sr_sal;
7910 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
7911 sr_sal.section = find_pc_overlay (sr_sal.pc);
7912 sr_sal.pspace = get_frame_program_space (return_frame);
7913
7914 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7915 get_stack_frame_id (return_frame),
7916 bp_hp_step_resume);
7917 }
7918
7919 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7920 is used to skip a function after stepping into it (for "next" or if
7921 the called function has no debugging information).
7922
7923 The current function has almost always been reached by single
7924 stepping a call or return instruction. NEXT_FRAME belongs to the
7925 current function, and the breakpoint will be set at the caller's
7926 resume address.
7927
7928 This is a separate function rather than reusing
7929 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7930 get_prev_frame, which may stop prematurely (see the implementation
7931 of frame_unwind_caller_id for an example). */
7932
7933 static void
7934 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7935 {
7936 /* We shouldn't have gotten here if we don't know where the call site
7937 is. */
7938 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
7939
7940 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
7941
7942 symtab_and_line sr_sal;
7943 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7944 frame_unwind_caller_pc (next_frame));
7945 sr_sal.section = find_pc_overlay (sr_sal.pc);
7946 sr_sal.pspace = frame_unwind_program_space (next_frame);
7947
7948 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
7949 frame_unwind_caller_id (next_frame));
7950 }
7951
7952 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7953 new breakpoint at the target of a jmp_buf. The handling of
7954 longjmp-resume uses the same mechanisms used for handling
7955 "step-resume" breakpoints. */
7956
7957 static void
7958 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
7959 {
7960 /* There should never be more than one longjmp-resume breakpoint per
7961 thread, so we should never be setting a new
7962 longjmp_resume_breakpoint when one is already active. */
7963 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
7964
7965 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7966 paddress (gdbarch, pc));
7967
7968 inferior_thread ()->control.exception_resume_breakpoint =
7969 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
7970 }
7971
7972 /* Insert an exception resume breakpoint. TP is the thread throwing
7973 the exception. The block B is the block of the unwinder debug hook
7974 function. FRAME is the frame corresponding to the call to this
7975 function. SYM is the symbol of the function argument holding the
7976 target PC of the exception. */
7977
7978 static void
7979 insert_exception_resume_breakpoint (struct thread_info *tp,
7980 const struct block *b,
7981 struct frame_info *frame,
7982 struct symbol *sym)
7983 {
7984 try
7985 {
7986 struct block_symbol vsym;
7987 struct value *value;
7988 CORE_ADDR handler;
7989 struct breakpoint *bp;
7990
7991 vsym = lookup_symbol_search_name (sym->search_name (),
7992 b, VAR_DOMAIN);
7993 value = read_var_value (vsym.symbol, vsym.block, frame);
7994 /* If the value was optimized out, revert to the old behavior. */
7995 if (! value_optimized_out (value))
7996 {
7997 handler = value_as_address (value);
7998
7999 infrun_debug_printf ("exception resume at %lx",
8000 (unsigned long) handler);
8001
8002 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8003 handler,
8004 bp_exception_resume).release ();
8005
8006 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8007 frame = NULL;
8008
8009 bp->thread = tp->global_num;
8010 inferior_thread ()->control.exception_resume_breakpoint = bp;
8011 }
8012 }
8013 catch (const gdb_exception_error &e)
8014 {
8015 /* We want to ignore errors here. */
8016 }
8017 }
8018
8019 /* A helper for check_exception_resume that sets an
8020 exception-breakpoint based on a SystemTap probe. */
8021
8022 static void
8023 insert_exception_resume_from_probe (struct thread_info *tp,
8024 const struct bound_probe *probe,
8025 struct frame_info *frame)
8026 {
8027 struct value *arg_value;
8028 CORE_ADDR handler;
8029 struct breakpoint *bp;
8030
8031 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8032 if (!arg_value)
8033 return;
8034
8035 handler = value_as_address (arg_value);
8036
8037 infrun_debug_printf ("exception resume at %s",
8038 paddress (probe->objfile->arch (), handler));
8039
8040 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8041 handler, bp_exception_resume).release ();
8042 bp->thread = tp->global_num;
8043 inferior_thread ()->control.exception_resume_breakpoint = bp;
8044 }
8045
8046 /* This is called when an exception has been intercepted. Check to
8047 see whether the exception's destination is of interest, and if so,
8048 set an exception resume breakpoint there. */
8049
8050 static void
8051 check_exception_resume (struct execution_control_state *ecs,
8052 struct frame_info *frame)
8053 {
8054 struct bound_probe probe;
8055 struct symbol *func;
8056
8057 /* First see if this exception unwinding breakpoint was set via a
8058 SystemTap probe point. If so, the probe has two arguments: the
8059 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8060 set a breakpoint there. */
8061 probe = find_probe_by_pc (get_frame_pc (frame));
8062 if (probe.prob)
8063 {
8064 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8065 return;
8066 }
8067
8068 func = get_frame_function (frame);
8069 if (!func)
8070 return;
8071
8072 try
8073 {
8074 const struct block *b;
8075 struct block_iterator iter;
8076 struct symbol *sym;
8077 int argno = 0;
8078
8079 /* The exception breakpoint is a thread-specific breakpoint on
8080 the unwinder's debug hook, declared as:
8081
8082 void _Unwind_DebugHook (void *cfa, void *handler);
8083
8084 The CFA argument indicates the frame to which control is
8085 about to be transferred. HANDLER is the destination PC.
8086
8087 We ignore the CFA and set a temporary breakpoint at HANDLER.
8088 This is not extremely efficient but it avoids issues in gdb
8089 with computing the DWARF CFA, and it also works even in weird
8090 cases such as throwing an exception from inside a signal
8091 handler. */
8092
8093 b = func->value_block ();
8094 ALL_BLOCK_SYMBOLS (b, iter, sym)
8095 {
8096 if (!sym->is_argument ())
8097 continue;
8098
8099 if (argno == 0)
8100 ++argno;
8101 else
8102 {
8103 insert_exception_resume_breakpoint (ecs->event_thread,
8104 b, frame, sym);
8105 break;
8106 }
8107 }
8108 }
8109 catch (const gdb_exception_error &e)
8110 {
8111 }
8112 }
8113
8114 static void
8115 stop_waiting (struct execution_control_state *ecs)
8116 {
8117 infrun_debug_printf ("stop_waiting");
8118
8119 /* Let callers know we don't want to wait for the inferior anymore. */
8120 ecs->wait_some_more = 0;
8121
8122 /* If all-stop, but there exists a non-stop target, stop all
8123 threads now that we're presenting the stop to the user. */
8124 if (!non_stop && exists_non_stop_target ())
8125 stop_all_threads ("presenting stop to user in all-stop");
8126 }
8127
8128 /* Like keep_going, but passes the signal to the inferior, even if the
8129 signal is set to nopass. */
8130
8131 static void
8132 keep_going_pass_signal (struct execution_control_state *ecs)
8133 {
8134 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8135 gdb_assert (!ecs->event_thread->resumed ());
8136
8137 /* Save the pc before execution, to compare with pc after stop. */
8138 ecs->event_thread->prev_pc
8139 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
8140
8141 if (ecs->event_thread->control.trap_expected)
8142 {
8143 struct thread_info *tp = ecs->event_thread;
8144
8145 infrun_debug_printf ("%s has trap_expected set, "
8146 "resuming to collect trap",
8147 tp->ptid.to_string ().c_str ());
8148
8149 /* We haven't yet gotten our trap, and either: intercepted a
8150 non-signal event (e.g., a fork); or took a signal which we
8151 are supposed to pass through to the inferior. Simply
8152 continue. */
8153 resume (ecs->event_thread->stop_signal ());
8154 }
8155 else if (step_over_info_valid_p ())
8156 {
8157 /* Another thread is stepping over a breakpoint in-line. If
8158 this thread needs a step-over too, queue the request. In
8159 either case, this resume must be deferred for later. */
8160 struct thread_info *tp = ecs->event_thread;
8161
8162 if (ecs->hit_singlestep_breakpoint
8163 || thread_still_needs_step_over (tp))
8164 {
8165 infrun_debug_printf ("step-over already in progress: "
8166 "step-over for %s deferred",
8167 tp->ptid.to_string ().c_str ());
8168 global_thread_step_over_chain_enqueue (tp);
8169 }
8170 else
8171 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8172 tp->ptid.to_string ().c_str ());
8173 }
8174 else
8175 {
8176 struct regcache *regcache = get_current_regcache ();
8177 int remove_bp;
8178 int remove_wps;
8179 step_over_what step_what;
8180
8181 /* Either the trap was not expected, but we are continuing
8182 anyway (if we got a signal, the user asked it be passed to
8183 the child)
8184 -- or --
8185 We got our expected trap, but decided we should resume from
8186 it.
8187
8188 We're going to run this baby now!
8189
8190 Note that insert_breakpoints won't try to re-insert
8191 already inserted breakpoints. Therefore, we don't
8192 care if breakpoints were already inserted, or not. */
8193
8194 /* If we need to step over a breakpoint, and we're not using
8195 displaced stepping to do so, insert all breakpoints
8196 (watchpoints, etc.) but the one we're stepping over, step one
8197 instruction, and then re-insert the breakpoint when that step
8198 is finished. */
8199
8200 step_what = thread_still_needs_step_over (ecs->event_thread);
8201
8202 remove_bp = (ecs->hit_singlestep_breakpoint
8203 || (step_what & STEP_OVER_BREAKPOINT));
8204 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
8205
8206 /* We can't use displaced stepping if we need to step past a
8207 watchpoint. The instruction copied to the scratch pad would
8208 still trigger the watchpoint. */
8209 if (remove_bp
8210 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
8211 {
8212 set_step_over_info (regcache->aspace (),
8213 regcache_read_pc (regcache), remove_wps,
8214 ecs->event_thread->global_num);
8215 }
8216 else if (remove_wps)
8217 set_step_over_info (NULL, 0, remove_wps, -1);
8218
8219 /* If we now need to do an in-line step-over, we need to stop
8220 all other threads. Note this must be done before
8221 insert_breakpoints below, because that removes the breakpoint
8222 we're about to step over, otherwise other threads could miss
8223 it. */
8224 if (step_over_info_valid_p () && target_is_non_stop_p ())
8225 stop_all_threads ("starting in-line step-over");
8226
8227 /* Stop stepping if inserting breakpoints fails. */
8228 try
8229 {
8230 insert_breakpoints ();
8231 }
8232 catch (const gdb_exception_error &e)
8233 {
8234 exception_print (gdb_stderr, e);
8235 stop_waiting (ecs);
8236 clear_step_over_info ();
8237 return;
8238 }
8239
8240 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
8241
8242 resume (ecs->event_thread->stop_signal ());
8243 }
8244
8245 prepare_to_wait (ecs);
8246 }
8247
8248 /* Called when we should continue running the inferior, because the
8249 current event doesn't cause a user visible stop. This does the
8250 resuming part; waiting for the next event is done elsewhere. */
8251
8252 static void
8253 keep_going (struct execution_control_state *ecs)
8254 {
8255 if (ecs->event_thread->control.trap_expected
8256 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
8257 ecs->event_thread->control.trap_expected = 0;
8258
8259 if (!signal_program[ecs->event_thread->stop_signal ()])
8260 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8261 keep_going_pass_signal (ecs);
8262 }
8263
8264 /* This function normally comes after a resume, before
8265 handle_inferior_event exits. It takes care of any last bits of
8266 housekeeping, and sets the all-important wait_some_more flag. */
8267
8268 static void
8269 prepare_to_wait (struct execution_control_state *ecs)
8270 {
8271 infrun_debug_printf ("prepare_to_wait");
8272
8273 ecs->wait_some_more = 1;
8274
8275 /* If the target can't async, emulate it by marking the infrun event
8276 handler such that as soon as we get back to the event-loop, we
8277 immediately end up in fetch_inferior_event again calling
8278 target_wait. */
8279 if (!target_can_async_p ())
8280 mark_infrun_async_event_handler ();
8281 }
8282
8283 /* We are done with the step range of a step/next/si/ni command.
8284 Called once for each n of a "step n" operation. */
8285
8286 static void
8287 end_stepping_range (struct execution_control_state *ecs)
8288 {
8289 ecs->event_thread->control.stop_step = 1;
8290 stop_waiting (ecs);
8291 }
8292
8293 /* Several print_*_reason functions to print why the inferior has stopped.
8294 We always print something when the inferior exits, or receives a signal.
8295 The rest of the cases are dealt with later on in normal_stop and
8296 print_it_typical. Ideally there should be a call to one of these
8297 print_*_reason functions functions from handle_inferior_event each time
8298 stop_waiting is called.
8299
8300 Note that we don't call these directly, instead we delegate that to
8301 the interpreters, through observers. Interpreters then call these
8302 with whatever uiout is right. */
8303
8304 void
8305 print_end_stepping_range_reason (struct ui_out *uiout)
8306 {
8307 /* For CLI-like interpreters, print nothing. */
8308
8309 if (uiout->is_mi_like_p ())
8310 {
8311 uiout->field_string ("reason",
8312 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8313 }
8314 }
8315
8316 void
8317 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8318 {
8319 annotate_signalled ();
8320 if (uiout->is_mi_like_p ())
8321 uiout->field_string
8322 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8323 uiout->text ("\nProgram terminated with signal ");
8324 annotate_signal_name ();
8325 uiout->field_string ("signal-name",
8326 gdb_signal_to_name (siggnal));
8327 annotate_signal_name_end ();
8328 uiout->text (", ");
8329 annotate_signal_string ();
8330 uiout->field_string ("signal-meaning",
8331 gdb_signal_to_string (siggnal));
8332 annotate_signal_string_end ();
8333 uiout->text (".\n");
8334 uiout->text ("The program no longer exists.\n");
8335 }
8336
8337 void
8338 print_exited_reason (struct ui_out *uiout, int exitstatus)
8339 {
8340 struct inferior *inf = current_inferior ();
8341 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
8342
8343 annotate_exited (exitstatus);
8344 if (exitstatus)
8345 {
8346 if (uiout->is_mi_like_p ())
8347 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
8348 std::string exit_code_str
8349 = string_printf ("0%o", (unsigned int) exitstatus);
8350 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8351 plongest (inf->num), pidstr.c_str (),
8352 string_field ("exit-code", exit_code_str.c_str ()));
8353 }
8354 else
8355 {
8356 if (uiout->is_mi_like_p ())
8357 uiout->field_string
8358 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
8359 uiout->message ("[Inferior %s (%s) exited normally]\n",
8360 plongest (inf->num), pidstr.c_str ());
8361 }
8362 }
8363
8364 void
8365 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8366 {
8367 struct thread_info *thr = inferior_thread ();
8368
8369 annotate_signal ();
8370
8371 if (uiout->is_mi_like_p ())
8372 ;
8373 else if (show_thread_that_caused_stop ())
8374 {
8375 uiout->text ("\nThread ");
8376 uiout->field_string ("thread-id", print_thread_id (thr));
8377
8378 const char *name = thread_name (thr);
8379 if (name != NULL)
8380 {
8381 uiout->text (" \"");
8382 uiout->field_string ("name", name);
8383 uiout->text ("\"");
8384 }
8385 }
8386 else
8387 uiout->text ("\nProgram");
8388
8389 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8390 uiout->text (" stopped");
8391 else
8392 {
8393 uiout->text (" received signal ");
8394 annotate_signal_name ();
8395 if (uiout->is_mi_like_p ())
8396 uiout->field_string
8397 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8398 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8399 annotate_signal_name_end ();
8400 uiout->text (", ");
8401 annotate_signal_string ();
8402 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
8403
8404 struct regcache *regcache = get_current_regcache ();
8405 struct gdbarch *gdbarch = regcache->arch ();
8406 if (gdbarch_report_signal_info_p (gdbarch))
8407 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8408
8409 annotate_signal_string_end ();
8410 }
8411 uiout->text (".\n");
8412 }
8413
8414 void
8415 print_no_history_reason (struct ui_out *uiout)
8416 {
8417 uiout->text ("\nNo more reverse-execution history.\n");
8418 }
8419
8420 /* Print current location without a level number, if we have changed
8421 functions or hit a breakpoint. Print source line if we have one.
8422 bpstat_print contains the logic deciding in detail what to print,
8423 based on the event(s) that just occurred. */
8424
8425 static void
8426 print_stop_location (const target_waitstatus &ws)
8427 {
8428 int bpstat_ret;
8429 enum print_what source_flag;
8430 int do_frame_printing = 1;
8431 struct thread_info *tp = inferior_thread ();
8432
8433 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
8434 switch (bpstat_ret)
8435 {
8436 case PRINT_UNKNOWN:
8437 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8438 should) carry around the function and does (or should) use
8439 that when doing a frame comparison. */
8440 if (tp->control.stop_step
8441 && frame_id_eq (tp->control.step_frame_id,
8442 get_frame_id (get_current_frame ()))
8443 && (tp->control.step_start_function
8444 == find_pc_function (tp->stop_pc ())))
8445 {
8446 /* Finished step, just print source line. */
8447 source_flag = SRC_LINE;
8448 }
8449 else
8450 {
8451 /* Print location and source line. */
8452 source_flag = SRC_AND_LOC;
8453 }
8454 break;
8455 case PRINT_SRC_AND_LOC:
8456 /* Print location and source line. */
8457 source_flag = SRC_AND_LOC;
8458 break;
8459 case PRINT_SRC_ONLY:
8460 source_flag = SRC_LINE;
8461 break;
8462 case PRINT_NOTHING:
8463 /* Something bogus. */
8464 source_flag = SRC_LINE;
8465 do_frame_printing = 0;
8466 break;
8467 default:
8468 internal_error (__FILE__, __LINE__, _("Unknown value."));
8469 }
8470
8471 /* The behavior of this routine with respect to the source
8472 flag is:
8473 SRC_LINE: Print only source line
8474 LOCATION: Print only location
8475 SRC_AND_LOC: Print location and source line. */
8476 if (do_frame_printing)
8477 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
8478 }
8479
8480 /* See infrun.h. */
8481
8482 void
8483 print_stop_event (struct ui_out *uiout, bool displays)
8484 {
8485 struct target_waitstatus last;
8486 struct thread_info *tp;
8487
8488 get_last_target_status (nullptr, nullptr, &last);
8489
8490 {
8491 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
8492
8493 print_stop_location (last);
8494
8495 /* Display the auto-display expressions. */
8496 if (displays)
8497 do_displays ();
8498 }
8499
8500 tp = inferior_thread ();
8501 if (tp->thread_fsm () != nullptr
8502 && tp->thread_fsm ()->finished_p ())
8503 {
8504 struct return_value_info *rv;
8505
8506 rv = tp->thread_fsm ()->return_value ();
8507 if (rv != nullptr)
8508 print_return_value (uiout, rv);
8509 }
8510 }
8511
8512 /* See infrun.h. */
8513
8514 void
8515 maybe_remove_breakpoints (void)
8516 {
8517 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
8518 {
8519 if (remove_breakpoints ())
8520 {
8521 target_terminal::ours_for_output ();
8522 gdb_printf (_("Cannot remove breakpoints because "
8523 "program is no longer writable.\nFurther "
8524 "execution is probably impossible.\n"));
8525 }
8526 }
8527 }
8528
8529 /* The execution context that just caused a normal stop. */
8530
8531 struct stop_context
8532 {
8533 stop_context ();
8534
8535 DISABLE_COPY_AND_ASSIGN (stop_context);
8536
8537 bool changed () const;
8538
8539 /* The stop ID. */
8540 ULONGEST stop_id;
8541
8542 /* The event PTID. */
8543
8544 ptid_t ptid;
8545
8546 /* If stopp for a thread event, this is the thread that caused the
8547 stop. */
8548 thread_info_ref thread;
8549
8550 /* The inferior that caused the stop. */
8551 int inf_num;
8552 };
8553
8554 /* Initializes a new stop context. If stopped for a thread event, this
8555 takes a strong reference to the thread. */
8556
8557 stop_context::stop_context ()
8558 {
8559 stop_id = get_stop_id ();
8560 ptid = inferior_ptid;
8561 inf_num = current_inferior ()->num;
8562
8563 if (inferior_ptid != null_ptid)
8564 {
8565 /* Take a strong reference so that the thread can't be deleted
8566 yet. */
8567 thread = thread_info_ref::new_reference (inferior_thread ());
8568 }
8569 }
8570
8571 /* Return true if the current context no longer matches the saved stop
8572 context. */
8573
8574 bool
8575 stop_context::changed () const
8576 {
8577 if (ptid != inferior_ptid)
8578 return true;
8579 if (inf_num != current_inferior ()->num)
8580 return true;
8581 if (thread != NULL && thread->state != THREAD_STOPPED)
8582 return true;
8583 if (get_stop_id () != stop_id)
8584 return true;
8585 return false;
8586 }
8587
8588 /* See infrun.h. */
8589
8590 int
8591 normal_stop (void)
8592 {
8593 struct target_waitstatus last;
8594
8595 get_last_target_status (nullptr, nullptr, &last);
8596
8597 new_stop_id ();
8598
8599 /* If an exception is thrown from this point on, make sure to
8600 propagate GDB's knowledge of the executing state to the
8601 frontend/user running state. A QUIT is an easy exception to see
8602 here, so do this before any filtered output. */
8603
8604 ptid_t finish_ptid = null_ptid;
8605
8606 if (!non_stop)
8607 finish_ptid = minus_one_ptid;
8608 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
8609 || last.kind () == TARGET_WAITKIND_EXITED)
8610 {
8611 /* On some targets, we may still have live threads in the
8612 inferior when we get a process exit event. E.g., for
8613 "checkpoint", when the current checkpoint/fork exits,
8614 linux-fork.c automatically switches to another fork from
8615 within target_mourn_inferior. */
8616 if (inferior_ptid != null_ptid)
8617 finish_ptid = ptid_t (inferior_ptid.pid ());
8618 }
8619 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED)
8620 finish_ptid = inferior_ptid;
8621
8622 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8623 if (finish_ptid != null_ptid)
8624 {
8625 maybe_finish_thread_state.emplace
8626 (user_visible_resume_target (finish_ptid), finish_ptid);
8627 }
8628
8629 /* As we're presenting a stop, and potentially removing breakpoints,
8630 update the thread list so we can tell whether there are threads
8631 running on the target. With target remote, for example, we can
8632 only learn about new threads when we explicitly update the thread
8633 list. Do this before notifying the interpreters about signal
8634 stops, end of stepping ranges, etc., so that the "new thread"
8635 output is emitted before e.g., "Program received signal FOO",
8636 instead of after. */
8637 update_thread_list ();
8638
8639 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
8640 gdb::observers::signal_received.notify (inferior_thread ()->stop_signal ());
8641
8642 /* As with the notification of thread events, we want to delay
8643 notifying the user that we've switched thread context until
8644 the inferior actually stops.
8645
8646 There's no point in saying anything if the inferior has exited.
8647 Note that SIGNALLED here means "exited with a signal", not
8648 "received a signal".
8649
8650 Also skip saying anything in non-stop mode. In that mode, as we
8651 don't want GDB to switch threads behind the user's back, to avoid
8652 races where the user is typing a command to apply to thread x,
8653 but GDB switches to thread y before the user finishes entering
8654 the command, fetch_inferior_event installs a cleanup to restore
8655 the current thread back to the thread the user had selected right
8656 after this event is handled, so we're not really switching, only
8657 informing of a stop. */
8658 if (!non_stop
8659 && previous_inferior_ptid != inferior_ptid
8660 && target_has_execution ()
8661 && last.kind () != TARGET_WAITKIND_SIGNALLED
8662 && last.kind () != TARGET_WAITKIND_EXITED
8663 && last.kind () != TARGET_WAITKIND_NO_RESUMED)
8664 {
8665 SWITCH_THRU_ALL_UIS ()
8666 {
8667 target_terminal::ours_for_output ();
8668 gdb_printf (_("[Switching to %s]\n"),
8669 target_pid_to_str (inferior_ptid).c_str ());
8670 annotate_thread_changed ();
8671 }
8672 previous_inferior_ptid = inferior_ptid;
8673 }
8674
8675 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
8676 {
8677 SWITCH_THRU_ALL_UIS ()
8678 if (current_ui->prompt_state == PROMPT_BLOCKED)
8679 {
8680 target_terminal::ours_for_output ();
8681 gdb_printf (_("No unwaited-for children left.\n"));
8682 }
8683 }
8684
8685 /* Note: this depends on the update_thread_list call above. */
8686 maybe_remove_breakpoints ();
8687
8688 /* If an auto-display called a function and that got a signal,
8689 delete that auto-display to avoid an infinite recursion. */
8690
8691 if (stopped_by_random_signal)
8692 disable_current_display ();
8693
8694 SWITCH_THRU_ALL_UIS ()
8695 {
8696 async_enable_stdin ();
8697 }
8698
8699 /* Let the user/frontend see the threads as stopped. */
8700 maybe_finish_thread_state.reset ();
8701
8702 /* Select innermost stack frame - i.e., current frame is frame 0,
8703 and current location is based on that. Handle the case where the
8704 dummy call is returning after being stopped. E.g. the dummy call
8705 previously hit a breakpoint. (If the dummy call returns
8706 normally, we won't reach here.) Do this before the stop hook is
8707 run, so that it doesn't get to see the temporary dummy frame,
8708 which is not where we'll present the stop. */
8709 if (has_stack_frames ())
8710 {
8711 if (stop_stack_dummy == STOP_STACK_DUMMY)
8712 {
8713 /* Pop the empty frame that contains the stack dummy. This
8714 also restores inferior state prior to the call (struct
8715 infcall_suspend_state). */
8716 struct frame_info *frame = get_current_frame ();
8717
8718 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8719 frame_pop (frame);
8720 /* frame_pop calls reinit_frame_cache as the last thing it
8721 does which means there's now no selected frame. */
8722 }
8723
8724 select_frame (get_current_frame ());
8725
8726 /* Set the current source location. */
8727 set_current_sal_from_frame (get_current_frame ());
8728 }
8729
8730 /* Look up the hook_stop and run it (CLI internally handles problem
8731 of stop_command's pre-hook not existing). */
8732 stop_context saved_context;
8733
8734 try
8735 {
8736 execute_cmd_pre_hook (stop_command);
8737 }
8738 catch (const gdb_exception &ex)
8739 {
8740 exception_fprintf (gdb_stderr, ex,
8741 "Error while running hook_stop:\n");
8742 }
8743
8744 /* If the stop hook resumes the target, then there's no point in
8745 trying to notify about the previous stop; its context is
8746 gone. Likewise if the command switches thread or inferior --
8747 the observers would print a stop for the wrong
8748 thread/inferior. */
8749 if (saved_context.changed ())
8750 return 1;
8751
8752 /* Notify observers about the stop. This is where the interpreters
8753 print the stop event. */
8754 if (inferior_ptid != null_ptid)
8755 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
8756 stop_print_frame);
8757 else
8758 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
8759
8760 annotate_stopped ();
8761
8762 if (target_has_execution ())
8763 {
8764 if (last.kind () != TARGET_WAITKIND_SIGNALLED
8765 && last.kind () != TARGET_WAITKIND_EXITED
8766 && last.kind () != TARGET_WAITKIND_NO_RESUMED)
8767 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8768 Delete any breakpoint that is to be deleted at the next stop. */
8769 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
8770 }
8771
8772 /* Try to get rid of automatically added inferiors that are no
8773 longer needed. Keeping those around slows down things linearly.
8774 Note that this never removes the current inferior. */
8775 prune_inferiors ();
8776
8777 return 0;
8778 }
8779 \f
8780 int
8781 signal_stop_state (int signo)
8782 {
8783 return signal_stop[signo];
8784 }
8785
8786 int
8787 signal_print_state (int signo)
8788 {
8789 return signal_print[signo];
8790 }
8791
8792 int
8793 signal_pass_state (int signo)
8794 {
8795 return signal_program[signo];
8796 }
8797
8798 static void
8799 signal_cache_update (int signo)
8800 {
8801 if (signo == -1)
8802 {
8803 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
8804 signal_cache_update (signo);
8805
8806 return;
8807 }
8808
8809 signal_pass[signo] = (signal_stop[signo] == 0
8810 && signal_print[signo] == 0
8811 && signal_program[signo] == 1
8812 && signal_catch[signo] == 0);
8813 }
8814
8815 int
8816 signal_stop_update (int signo, int state)
8817 {
8818 int ret = signal_stop[signo];
8819
8820 signal_stop[signo] = state;
8821 signal_cache_update (signo);
8822 return ret;
8823 }
8824
8825 int
8826 signal_print_update (int signo, int state)
8827 {
8828 int ret = signal_print[signo];
8829
8830 signal_print[signo] = state;
8831 signal_cache_update (signo);
8832 return ret;
8833 }
8834
8835 int
8836 signal_pass_update (int signo, int state)
8837 {
8838 int ret = signal_program[signo];
8839
8840 signal_program[signo] = state;
8841 signal_cache_update (signo);
8842 return ret;
8843 }
8844
8845 /* Update the global 'signal_catch' from INFO and notify the
8846 target. */
8847
8848 void
8849 signal_catch_update (const unsigned int *info)
8850 {
8851 int i;
8852
8853 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8854 signal_catch[i] = info[i] > 0;
8855 signal_cache_update (-1);
8856 target_pass_signals (signal_pass);
8857 }
8858
8859 static void
8860 sig_print_header (void)
8861 {
8862 gdb_printf (_("Signal Stop\tPrint\tPass "
8863 "to program\tDescription\n"));
8864 }
8865
8866 static void
8867 sig_print_info (enum gdb_signal oursig)
8868 {
8869 const char *name = gdb_signal_to_name (oursig);
8870 int name_padding = 13 - strlen (name);
8871
8872 if (name_padding <= 0)
8873 name_padding = 0;
8874
8875 gdb_printf ("%s", name);
8876 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
8877 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8878 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
8879 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
8880 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
8881 }
8882
8883 /* Specify how various signals in the inferior should be handled. */
8884
8885 static void
8886 handle_command (const char *args, int from_tty)
8887 {
8888 int digits, wordlen;
8889 int sigfirst, siglast;
8890 enum gdb_signal oursig;
8891 int allsigs;
8892
8893 if (args == NULL)
8894 {
8895 error_no_arg (_("signal to handle"));
8896 }
8897
8898 /* Allocate and zero an array of flags for which signals to handle. */
8899
8900 const size_t nsigs = GDB_SIGNAL_LAST;
8901 unsigned char sigs[nsigs] {};
8902
8903 /* Break the command line up into args. */
8904
8905 gdb_argv built_argv (args);
8906
8907 /* Walk through the args, looking for signal oursigs, signal names, and
8908 actions. Signal numbers and signal names may be interspersed with
8909 actions, with the actions being performed for all signals cumulatively
8910 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8911
8912 for (char *arg : built_argv)
8913 {
8914 wordlen = strlen (arg);
8915 for (digits = 0; isdigit (arg[digits]); digits++)
8916 {;
8917 }
8918 allsigs = 0;
8919 sigfirst = siglast = -1;
8920
8921 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
8922 {
8923 /* Apply action to all signals except those used by the
8924 debugger. Silently skip those. */
8925 allsigs = 1;
8926 sigfirst = 0;
8927 siglast = nsigs - 1;
8928 }
8929 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
8930 {
8931 SET_SIGS (nsigs, sigs, signal_stop);
8932 SET_SIGS (nsigs, sigs, signal_print);
8933 }
8934 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
8935 {
8936 UNSET_SIGS (nsigs, sigs, signal_program);
8937 }
8938 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
8939 {
8940 SET_SIGS (nsigs, sigs, signal_print);
8941 }
8942 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
8943 {
8944 SET_SIGS (nsigs, sigs, signal_program);
8945 }
8946 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
8947 {
8948 UNSET_SIGS (nsigs, sigs, signal_stop);
8949 }
8950 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
8951 {
8952 SET_SIGS (nsigs, sigs, signal_program);
8953 }
8954 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
8955 {
8956 UNSET_SIGS (nsigs, sigs, signal_print);
8957 UNSET_SIGS (nsigs, sigs, signal_stop);
8958 }
8959 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
8960 {
8961 UNSET_SIGS (nsigs, sigs, signal_program);
8962 }
8963 else if (digits > 0)
8964 {
8965 /* It is numeric. The numeric signal refers to our own
8966 internal signal numbering from target.h, not to host/target
8967 signal number. This is a feature; users really should be
8968 using symbolic names anyway, and the common ones like
8969 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8970
8971 sigfirst = siglast = (int)
8972 gdb_signal_from_command (atoi (arg));
8973 if (arg[digits] == '-')
8974 {
8975 siglast = (int)
8976 gdb_signal_from_command (atoi (arg + digits + 1));
8977 }
8978 if (sigfirst > siglast)
8979 {
8980 /* Bet he didn't figure we'd think of this case... */
8981 std::swap (sigfirst, siglast);
8982 }
8983 }
8984 else
8985 {
8986 oursig = gdb_signal_from_name (arg);
8987 if (oursig != GDB_SIGNAL_UNKNOWN)
8988 {
8989 sigfirst = siglast = (int) oursig;
8990 }
8991 else
8992 {
8993 /* Not a number and not a recognized flag word => complain. */
8994 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
8995 }
8996 }
8997
8998 /* If any signal numbers or symbol names were found, set flags for
8999 which signals to apply actions to. */
9000
9001 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9002 {
9003 switch ((enum gdb_signal) signum)
9004 {
9005 case GDB_SIGNAL_TRAP:
9006 case GDB_SIGNAL_INT:
9007 if (!allsigs && !sigs[signum])
9008 {
9009 if (query (_("%s is used by the debugger.\n\
9010 Are you sure you want to change it? "),
9011 gdb_signal_to_name ((enum gdb_signal) signum)))
9012 {
9013 sigs[signum] = 1;
9014 }
9015 else
9016 gdb_printf (_("Not confirmed, unchanged.\n"));
9017 }
9018 break;
9019 case GDB_SIGNAL_0:
9020 case GDB_SIGNAL_DEFAULT:
9021 case GDB_SIGNAL_UNKNOWN:
9022 /* Make sure that "all" doesn't print these. */
9023 break;
9024 default:
9025 sigs[signum] = 1;
9026 break;
9027 }
9028 }
9029 }
9030
9031 for (int signum = 0; signum < nsigs; signum++)
9032 if (sigs[signum])
9033 {
9034 signal_cache_update (-1);
9035 target_pass_signals (signal_pass);
9036 target_program_signals (signal_program);
9037
9038 if (from_tty)
9039 {
9040 /* Show the results. */
9041 sig_print_header ();
9042 for (; signum < nsigs; signum++)
9043 if (sigs[signum])
9044 sig_print_info ((enum gdb_signal) signum);
9045 }
9046
9047 break;
9048 }
9049 }
9050
9051 /* Complete the "handle" command. */
9052
9053 static void
9054 handle_completer (struct cmd_list_element *ignore,
9055 completion_tracker &tracker,
9056 const char *text, const char *word)
9057 {
9058 static const char * const keywords[] =
9059 {
9060 "all",
9061 "stop",
9062 "ignore",
9063 "print",
9064 "pass",
9065 "nostop",
9066 "noignore",
9067 "noprint",
9068 "nopass",
9069 NULL,
9070 };
9071
9072 signal_completer (ignore, tracker, text, word);
9073 complete_on_enum (tracker, keywords, word, word);
9074 }
9075
9076 enum gdb_signal
9077 gdb_signal_from_command (int num)
9078 {
9079 if (num >= 1 && num <= 15)
9080 return (enum gdb_signal) num;
9081 error (_("Only signals 1-15 are valid as numeric signals.\n\
9082 Use \"info signals\" for a list of symbolic signals."));
9083 }
9084
9085 /* Print current contents of the tables set by the handle command.
9086 It is possible we should just be printing signals actually used
9087 by the current target (but for things to work right when switching
9088 targets, all signals should be in the signal tables). */
9089
9090 static void
9091 info_signals_command (const char *signum_exp, int from_tty)
9092 {
9093 enum gdb_signal oursig;
9094
9095 sig_print_header ();
9096
9097 if (signum_exp)
9098 {
9099 /* First see if this is a symbol name. */
9100 oursig = gdb_signal_from_name (signum_exp);
9101 if (oursig == GDB_SIGNAL_UNKNOWN)
9102 {
9103 /* No, try numeric. */
9104 oursig =
9105 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9106 }
9107 sig_print_info (oursig);
9108 return;
9109 }
9110
9111 gdb_printf ("\n");
9112 /* These ugly casts brought to you by the native VAX compiler. */
9113 for (oursig = GDB_SIGNAL_FIRST;
9114 (int) oursig < (int) GDB_SIGNAL_LAST;
9115 oursig = (enum gdb_signal) ((int) oursig + 1))
9116 {
9117 QUIT;
9118
9119 if (oursig != GDB_SIGNAL_UNKNOWN
9120 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9121 sig_print_info (oursig);
9122 }
9123
9124 gdb_printf (_("\nUse the \"handle\" command "
9125 "to change these tables.\n"));
9126 }
9127
9128 /* The $_siginfo convenience variable is a bit special. We don't know
9129 for sure the type of the value until we actually have a chance to
9130 fetch the data. The type can change depending on gdbarch, so it is
9131 also dependent on which thread you have selected.
9132
9133 1. making $_siginfo be an internalvar that creates a new value on
9134 access.
9135
9136 2. making the value of $_siginfo be an lval_computed value. */
9137
9138 /* This function implements the lval_computed support for reading a
9139 $_siginfo value. */
9140
9141 static void
9142 siginfo_value_read (struct value *v)
9143 {
9144 LONGEST transferred;
9145
9146 /* If we can access registers, so can we access $_siginfo. Likewise
9147 vice versa. */
9148 validate_registers_access ();
9149
9150 transferred =
9151 target_read (current_inferior ()->top_target (),
9152 TARGET_OBJECT_SIGNAL_INFO,
9153 NULL,
9154 value_contents_all_raw (v).data (),
9155 value_offset (v),
9156 TYPE_LENGTH (value_type (v)));
9157
9158 if (transferred != TYPE_LENGTH (value_type (v)))
9159 error (_("Unable to read siginfo"));
9160 }
9161
9162 /* This function implements the lval_computed support for writing a
9163 $_siginfo value. */
9164
9165 static void
9166 siginfo_value_write (struct value *v, struct value *fromval)
9167 {
9168 LONGEST transferred;
9169
9170 /* If we can access registers, so can we access $_siginfo. Likewise
9171 vice versa. */
9172 validate_registers_access ();
9173
9174 transferred = target_write (current_inferior ()->top_target (),
9175 TARGET_OBJECT_SIGNAL_INFO,
9176 NULL,
9177 value_contents_all_raw (fromval).data (),
9178 value_offset (v),
9179 TYPE_LENGTH (value_type (fromval)));
9180
9181 if (transferred != TYPE_LENGTH (value_type (fromval)))
9182 error (_("Unable to write siginfo"));
9183 }
9184
9185 static const struct lval_funcs siginfo_value_funcs =
9186 {
9187 siginfo_value_read,
9188 siginfo_value_write
9189 };
9190
9191 /* Return a new value with the correct type for the siginfo object of
9192 the current thread using architecture GDBARCH. Return a void value
9193 if there's no object available. */
9194
9195 static struct value *
9196 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9197 void *ignore)
9198 {
9199 if (target_has_stack ()
9200 && inferior_ptid != null_ptid
9201 && gdbarch_get_siginfo_type_p (gdbarch))
9202 {
9203 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9204
9205 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
9206 }
9207
9208 return allocate_value (builtin_type (gdbarch)->builtin_void);
9209 }
9210
9211 \f
9212 /* infcall_suspend_state contains state about the program itself like its
9213 registers and any signal it received when it last stopped.
9214 This state must be restored regardless of how the inferior function call
9215 ends (either successfully, or after it hits a breakpoint or signal)
9216 if the program is to properly continue where it left off. */
9217
9218 class infcall_suspend_state
9219 {
9220 public:
9221 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9222 once the inferior function call has finished. */
9223 infcall_suspend_state (struct gdbarch *gdbarch,
9224 const struct thread_info *tp,
9225 struct regcache *regcache)
9226 : m_registers (new readonly_detached_regcache (*regcache))
9227 {
9228 tp->save_suspend_to (m_thread_suspend);
9229
9230 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9231
9232 if (gdbarch_get_siginfo_type_p (gdbarch))
9233 {
9234 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9235 size_t len = TYPE_LENGTH (type);
9236
9237 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9238
9239 if (target_read (current_inferior ()->top_target (),
9240 TARGET_OBJECT_SIGNAL_INFO, NULL,
9241 siginfo_data.get (), 0, len) != len)
9242 {
9243 /* Errors ignored. */
9244 siginfo_data.reset (nullptr);
9245 }
9246 }
9247
9248 if (siginfo_data)
9249 {
9250 m_siginfo_gdbarch = gdbarch;
9251 m_siginfo_data = std::move (siginfo_data);
9252 }
9253 }
9254
9255 /* Return a pointer to the stored register state. */
9256
9257 readonly_detached_regcache *registers () const
9258 {
9259 return m_registers.get ();
9260 }
9261
9262 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9263
9264 void restore (struct gdbarch *gdbarch,
9265 struct thread_info *tp,
9266 struct regcache *regcache) const
9267 {
9268 tp->restore_suspend_from (m_thread_suspend);
9269
9270 if (m_siginfo_gdbarch == gdbarch)
9271 {
9272 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9273
9274 /* Errors ignored. */
9275 target_write (current_inferior ()->top_target (),
9276 TARGET_OBJECT_SIGNAL_INFO, NULL,
9277 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
9278 }
9279
9280 /* The inferior can be gone if the user types "print exit(0)"
9281 (and perhaps other times). */
9282 if (target_has_execution ())
9283 /* NB: The register write goes through to the target. */
9284 regcache->restore (registers ());
9285 }
9286
9287 private:
9288 /* How the current thread stopped before the inferior function call was
9289 executed. */
9290 struct thread_suspend_state m_thread_suspend;
9291
9292 /* The registers before the inferior function call was executed. */
9293 std::unique_ptr<readonly_detached_regcache> m_registers;
9294
9295 /* Format of SIGINFO_DATA or NULL if it is not present. */
9296 struct gdbarch *m_siginfo_gdbarch = nullptr;
9297
9298 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9299 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9300 content would be invalid. */
9301 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
9302 };
9303
9304 infcall_suspend_state_up
9305 save_infcall_suspend_state ()
9306 {
9307 struct thread_info *tp = inferior_thread ();
9308 struct regcache *regcache = get_current_regcache ();
9309 struct gdbarch *gdbarch = regcache->arch ();
9310
9311 infcall_suspend_state_up inf_state
9312 (new struct infcall_suspend_state (gdbarch, tp, regcache));
9313
9314 /* Having saved the current state, adjust the thread state, discarding
9315 any stop signal information. The stop signal is not useful when
9316 starting an inferior function call, and run_inferior_call will not use
9317 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9318 tp->set_stop_signal (GDB_SIGNAL_0);
9319
9320 return inf_state;
9321 }
9322
9323 /* Restore inferior session state to INF_STATE. */
9324
9325 void
9326 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9327 {
9328 struct thread_info *tp = inferior_thread ();
9329 struct regcache *regcache = get_current_regcache ();
9330 struct gdbarch *gdbarch = regcache->arch ();
9331
9332 inf_state->restore (gdbarch, tp, regcache);
9333 discard_infcall_suspend_state (inf_state);
9334 }
9335
9336 void
9337 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9338 {
9339 delete inf_state;
9340 }
9341
9342 readonly_detached_regcache *
9343 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
9344 {
9345 return inf_state->registers ();
9346 }
9347
9348 /* infcall_control_state contains state regarding gdb's control of the
9349 inferior itself like stepping control. It also contains session state like
9350 the user's currently selected frame. */
9351
9352 struct infcall_control_state
9353 {
9354 struct thread_control_state thread_control;
9355 struct inferior_control_state inferior_control;
9356
9357 /* Other fields: */
9358 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9359 int stopped_by_random_signal = 0;
9360
9361 /* ID and level of the selected frame when the inferior function
9362 call was made. */
9363 struct frame_id selected_frame_id {};
9364 int selected_frame_level = -1;
9365 };
9366
9367 /* Save all of the information associated with the inferior<==>gdb
9368 connection. */
9369
9370 infcall_control_state_up
9371 save_infcall_control_state ()
9372 {
9373 infcall_control_state_up inf_status (new struct infcall_control_state);
9374 struct thread_info *tp = inferior_thread ();
9375 struct inferior *inf = current_inferior ();
9376
9377 inf_status->thread_control = tp->control;
9378 inf_status->inferior_control = inf->control;
9379
9380 tp->control.step_resume_breakpoint = NULL;
9381 tp->control.exception_resume_breakpoint = NULL;
9382
9383 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9384 chain. If caller's caller is walking the chain, they'll be happier if we
9385 hand them back the original chain when restore_infcall_control_state is
9386 called. */
9387 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
9388
9389 /* Other fields: */
9390 inf_status->stop_stack_dummy = stop_stack_dummy;
9391 inf_status->stopped_by_random_signal = stopped_by_random_signal;
9392
9393 save_selected_frame (&inf_status->selected_frame_id,
9394 &inf_status->selected_frame_level);
9395
9396 return inf_status;
9397 }
9398
9399 /* Restore inferior session state to INF_STATUS. */
9400
9401 void
9402 restore_infcall_control_state (struct infcall_control_state *inf_status)
9403 {
9404 struct thread_info *tp = inferior_thread ();
9405 struct inferior *inf = current_inferior ();
9406
9407 if (tp->control.step_resume_breakpoint)
9408 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9409
9410 if (tp->control.exception_resume_breakpoint)
9411 tp->control.exception_resume_breakpoint->disposition
9412 = disp_del_at_next_stop;
9413
9414 /* Handle the bpstat_copy of the chain. */
9415 bpstat_clear (&tp->control.stop_bpstat);
9416
9417 tp->control = inf_status->thread_control;
9418 inf->control = inf_status->inferior_control;
9419
9420 /* Other fields: */
9421 stop_stack_dummy = inf_status->stop_stack_dummy;
9422 stopped_by_random_signal = inf_status->stopped_by_random_signal;
9423
9424 if (target_has_stack ())
9425 {
9426 restore_selected_frame (inf_status->selected_frame_id,
9427 inf_status->selected_frame_level);
9428 }
9429
9430 delete inf_status;
9431 }
9432
9433 void
9434 discard_infcall_control_state (struct infcall_control_state *inf_status)
9435 {
9436 if (inf_status->thread_control.step_resume_breakpoint)
9437 inf_status->thread_control.step_resume_breakpoint->disposition
9438 = disp_del_at_next_stop;
9439
9440 if (inf_status->thread_control.exception_resume_breakpoint)
9441 inf_status->thread_control.exception_resume_breakpoint->disposition
9442 = disp_del_at_next_stop;
9443
9444 /* See save_infcall_control_state for info on stop_bpstat. */
9445 bpstat_clear (&inf_status->thread_control.stop_bpstat);
9446
9447 delete inf_status;
9448 }
9449 \f
9450 /* See infrun.h. */
9451
9452 void
9453 clear_exit_convenience_vars (void)
9454 {
9455 clear_internalvar (lookup_internalvar ("_exitsignal"));
9456 clear_internalvar (lookup_internalvar ("_exitcode"));
9457 }
9458 \f
9459
9460 /* User interface for reverse debugging:
9461 Set exec-direction / show exec-direction commands
9462 (returns error unless target implements to_set_exec_direction method). */
9463
9464 enum exec_direction_kind execution_direction = EXEC_FORWARD;
9465 static const char exec_forward[] = "forward";
9466 static const char exec_reverse[] = "reverse";
9467 static const char *exec_direction = exec_forward;
9468 static const char *const exec_direction_names[] = {
9469 exec_forward,
9470 exec_reverse,
9471 NULL
9472 };
9473
9474 static void
9475 set_exec_direction_func (const char *args, int from_tty,
9476 struct cmd_list_element *cmd)
9477 {
9478 if (target_can_execute_reverse ())
9479 {
9480 if (!strcmp (exec_direction, exec_forward))
9481 execution_direction = EXEC_FORWARD;
9482 else if (!strcmp (exec_direction, exec_reverse))
9483 execution_direction = EXEC_REVERSE;
9484 }
9485 else
9486 {
9487 exec_direction = exec_forward;
9488 error (_("Target does not support this operation."));
9489 }
9490 }
9491
9492 static void
9493 show_exec_direction_func (struct ui_file *out, int from_tty,
9494 struct cmd_list_element *cmd, const char *value)
9495 {
9496 switch (execution_direction) {
9497 case EXEC_FORWARD:
9498 gdb_printf (out, _("Forward.\n"));
9499 break;
9500 case EXEC_REVERSE:
9501 gdb_printf (out, _("Reverse.\n"));
9502 break;
9503 default:
9504 internal_error (__FILE__, __LINE__,
9505 _("bogus execution_direction value: %d"),
9506 (int) execution_direction);
9507 }
9508 }
9509
9510 static void
9511 show_schedule_multiple (struct ui_file *file, int from_tty,
9512 struct cmd_list_element *c, const char *value)
9513 {
9514 gdb_printf (file, _("Resuming the execution of threads "
9515 "of all processes is %s.\n"), value);
9516 }
9517
9518 /* Implementation of `siginfo' variable. */
9519
9520 static const struct internalvar_funcs siginfo_funcs =
9521 {
9522 siginfo_make_value,
9523 NULL,
9524 };
9525
9526 /* Callback for infrun's target events source. This is marked when a
9527 thread has a pending status to process. */
9528
9529 static void
9530 infrun_async_inferior_event_handler (gdb_client_data data)
9531 {
9532 clear_async_event_handler (infrun_async_inferior_event_token);
9533 inferior_event_handler (INF_REG_EVENT);
9534 }
9535
9536 #if GDB_SELF_TEST
9537 namespace selftests
9538 {
9539
9540 /* Verify that when two threads with the same ptid exist (from two different
9541 targets) and one of them changes ptid, we only update inferior_ptid if
9542 it is appropriate. */
9543
9544 static void
9545 infrun_thread_ptid_changed ()
9546 {
9547 gdbarch *arch = current_inferior ()->gdbarch;
9548
9549 /* The thread which inferior_ptid represents changes ptid. */
9550 {
9551 scoped_restore_current_pspace_and_thread restore;
9552
9553 scoped_mock_context<test_target_ops> target1 (arch);
9554 scoped_mock_context<test_target_ops> target2 (arch);
9555
9556 ptid_t old_ptid (111, 222);
9557 ptid_t new_ptid (111, 333);
9558
9559 target1.mock_inferior.pid = old_ptid.pid ();
9560 target1.mock_thread.ptid = old_ptid;
9561 target1.mock_inferior.ptid_thread_map.clear ();
9562 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9563
9564 target2.mock_inferior.pid = old_ptid.pid ();
9565 target2.mock_thread.ptid = old_ptid;
9566 target2.mock_inferior.ptid_thread_map.clear ();
9567 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
9568
9569 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9570 set_current_inferior (&target1.mock_inferior);
9571
9572 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9573
9574 gdb_assert (inferior_ptid == new_ptid);
9575 }
9576
9577 /* A thread with the same ptid as inferior_ptid, but from another target,
9578 changes ptid. */
9579 {
9580 scoped_restore_current_pspace_and_thread restore;
9581
9582 scoped_mock_context<test_target_ops> target1 (arch);
9583 scoped_mock_context<test_target_ops> target2 (arch);
9584
9585 ptid_t old_ptid (111, 222);
9586 ptid_t new_ptid (111, 333);
9587
9588 target1.mock_inferior.pid = old_ptid.pid ();
9589 target1.mock_thread.ptid = old_ptid;
9590 target1.mock_inferior.ptid_thread_map.clear ();
9591 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9592
9593 target2.mock_inferior.pid = old_ptid.pid ();
9594 target2.mock_thread.ptid = old_ptid;
9595 target2.mock_inferior.ptid_thread_map.clear ();
9596 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
9597
9598 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9599 set_current_inferior (&target2.mock_inferior);
9600
9601 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9602
9603 gdb_assert (inferior_ptid == old_ptid);
9604 }
9605 }
9606
9607 } /* namespace selftests */
9608
9609 #endif /* GDB_SELF_TEST */
9610
9611 void _initialize_infrun ();
9612 void
9613 _initialize_infrun ()
9614 {
9615 struct cmd_list_element *c;
9616
9617 /* Register extra event sources in the event loop. */
9618 infrun_async_inferior_event_token
9619 = create_async_event_handler (infrun_async_inferior_event_handler, NULL,
9620 "infrun");
9621
9622 cmd_list_element *info_signals_cmd
9623 = add_info ("signals", info_signals_command, _("\
9624 What debugger does when program gets various signals.\n\
9625 Specify a signal as argument to print info on that signal only."));
9626 add_info_alias ("handle", info_signals_cmd, 0);
9627
9628 c = add_com ("handle", class_run, handle_command, _("\
9629 Specify how to handle signals.\n\
9630 Usage: handle SIGNAL [ACTIONS]\n\
9631 Args are signals and actions to apply to those signals.\n\
9632 If no actions are specified, the current settings for the specified signals\n\
9633 will be displayed instead.\n\
9634 \n\
9635 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9636 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9637 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9638 The special arg \"all\" is recognized to mean all signals except those\n\
9639 used by the debugger, typically SIGTRAP and SIGINT.\n\
9640 \n\
9641 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9642 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9643 Stop means reenter debugger if this signal happens (implies print).\n\
9644 Print means print a message if this signal happens.\n\
9645 Pass means let program see this signal; otherwise program doesn't know.\n\
9646 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9647 Pass and Stop may be combined.\n\
9648 \n\
9649 Multiple signals may be specified. Signal numbers and signal names\n\
9650 may be interspersed with actions, with the actions being performed for\n\
9651 all signals cumulatively specified."));
9652 set_cmd_completer (c, handle_completer);
9653
9654 stop_command = add_cmd ("stop", class_obscure,
9655 not_just_help_class_command, _("\
9656 There is no `stop' command, but you can set a hook on `stop'.\n\
9657 This allows you to set a list of commands to be run each time execution\n\
9658 of the program stops."), &cmdlist);
9659
9660 add_setshow_boolean_cmd
9661 ("infrun", class_maintenance, &debug_infrun,
9662 _("Set inferior debugging."),
9663 _("Show inferior debugging."),
9664 _("When non-zero, inferior specific debugging is enabled."),
9665 NULL, show_debug_infrun, &setdebuglist, &showdebuglist);
9666
9667 add_setshow_boolean_cmd ("non-stop", no_class,
9668 &non_stop_1, _("\
9669 Set whether gdb controls the inferior in non-stop mode."), _("\
9670 Show whether gdb controls the inferior in non-stop mode."), _("\
9671 When debugging a multi-threaded program and this setting is\n\
9672 off (the default, also called all-stop mode), when one thread stops\n\
9673 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9674 all other threads in the program while you interact with the thread of\n\
9675 interest. When you continue or step a thread, you can allow the other\n\
9676 threads to run, or have them remain stopped, but while you inspect any\n\
9677 thread's state, all threads stop.\n\
9678 \n\
9679 In non-stop mode, when one thread stops, other threads can continue\n\
9680 to run freely. You'll be able to step each thread independently,\n\
9681 leave it stopped or free to run as needed."),
9682 set_non_stop,
9683 show_non_stop,
9684 &setlist,
9685 &showlist);
9686
9687 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
9688 {
9689 signal_stop[i] = 1;
9690 signal_print[i] = 1;
9691 signal_program[i] = 1;
9692 signal_catch[i] = 0;
9693 }
9694
9695 /* Signals caused by debugger's own actions should not be given to
9696 the program afterwards.
9697
9698 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9699 explicitly specifies that it should be delivered to the target
9700 program. Typically, that would occur when a user is debugging a
9701 target monitor on a simulator: the target monitor sets a
9702 breakpoint; the simulator encounters this breakpoint and halts
9703 the simulation handing control to GDB; GDB, noting that the stop
9704 address doesn't map to any known breakpoint, returns control back
9705 to the simulator; the simulator then delivers the hardware
9706 equivalent of a GDB_SIGNAL_TRAP to the program being
9707 debugged. */
9708 signal_program[GDB_SIGNAL_TRAP] = 0;
9709 signal_program[GDB_SIGNAL_INT] = 0;
9710
9711 /* Signals that are not errors should not normally enter the debugger. */
9712 signal_stop[GDB_SIGNAL_ALRM] = 0;
9713 signal_print[GDB_SIGNAL_ALRM] = 0;
9714 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9715 signal_print[GDB_SIGNAL_VTALRM] = 0;
9716 signal_stop[GDB_SIGNAL_PROF] = 0;
9717 signal_print[GDB_SIGNAL_PROF] = 0;
9718 signal_stop[GDB_SIGNAL_CHLD] = 0;
9719 signal_print[GDB_SIGNAL_CHLD] = 0;
9720 signal_stop[GDB_SIGNAL_IO] = 0;
9721 signal_print[GDB_SIGNAL_IO] = 0;
9722 signal_stop[GDB_SIGNAL_POLL] = 0;
9723 signal_print[GDB_SIGNAL_POLL] = 0;
9724 signal_stop[GDB_SIGNAL_URG] = 0;
9725 signal_print[GDB_SIGNAL_URG] = 0;
9726 signal_stop[GDB_SIGNAL_WINCH] = 0;
9727 signal_print[GDB_SIGNAL_WINCH] = 0;
9728 signal_stop[GDB_SIGNAL_PRIO] = 0;
9729 signal_print[GDB_SIGNAL_PRIO] = 0;
9730
9731 /* These signals are used internally by user-level thread
9732 implementations. (See signal(5) on Solaris.) Like the above
9733 signals, a healthy program receives and handles them as part of
9734 its normal operation. */
9735 signal_stop[GDB_SIGNAL_LWP] = 0;
9736 signal_print[GDB_SIGNAL_LWP] = 0;
9737 signal_stop[GDB_SIGNAL_WAITING] = 0;
9738 signal_print[GDB_SIGNAL_WAITING] = 0;
9739 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9740 signal_print[GDB_SIGNAL_CANCEL] = 0;
9741 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9742 signal_print[GDB_SIGNAL_LIBRT] = 0;
9743
9744 /* Update cached state. */
9745 signal_cache_update (-1);
9746
9747 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9748 &stop_on_solib_events, _("\
9749 Set stopping for shared library events."), _("\
9750 Show stopping for shared library events."), _("\
9751 If nonzero, gdb will give control to the user when the dynamic linker\n\
9752 notifies gdb of shared library events. The most common event of interest\n\
9753 to the user would be loading/unloading of a new library."),
9754 set_stop_on_solib_events,
9755 show_stop_on_solib_events,
9756 &setlist, &showlist);
9757
9758 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9759 follow_fork_mode_kind_names,
9760 &follow_fork_mode_string, _("\
9761 Set debugger response to a program call of fork or vfork."), _("\
9762 Show debugger response to a program call of fork or vfork."), _("\
9763 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9764 parent - the original process is debugged after a fork\n\
9765 child - the new process is debugged after a fork\n\
9766 The unfollowed process will continue to run.\n\
9767 By default, the debugger will follow the parent process."),
9768 NULL,
9769 show_follow_fork_mode_string,
9770 &setlist, &showlist);
9771
9772 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9773 follow_exec_mode_names,
9774 &follow_exec_mode_string, _("\
9775 Set debugger response to a program call of exec."), _("\
9776 Show debugger response to a program call of exec."), _("\
9777 An exec call replaces the program image of a process.\n\
9778 \n\
9779 follow-exec-mode can be:\n\
9780 \n\
9781 new - the debugger creates a new inferior and rebinds the process\n\
9782 to this new inferior. The program the process was running before\n\
9783 the exec call can be restarted afterwards by restarting the original\n\
9784 inferior.\n\
9785 \n\
9786 same - the debugger keeps the process bound to the same inferior.\n\
9787 The new executable image replaces the previous executable loaded in\n\
9788 the inferior. Restarting the inferior after the exec call restarts\n\
9789 the executable the process was running after the exec call.\n\
9790 \n\
9791 By default, the debugger will use the same inferior."),
9792 NULL,
9793 show_follow_exec_mode_string,
9794 &setlist, &showlist);
9795
9796 add_setshow_enum_cmd ("scheduler-locking", class_run,
9797 scheduler_enums, &scheduler_mode, _("\
9798 Set mode for locking scheduler during execution."), _("\
9799 Show mode for locking scheduler during execution."), _("\
9800 off == no locking (threads may preempt at any time)\n\
9801 on == full locking (no thread except the current thread may run)\n\
9802 This applies to both normal execution and replay mode.\n\
9803 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9804 In this mode, other threads may run during other commands.\n\
9805 This applies to both normal execution and replay mode.\n\
9806 replay == scheduler locked in replay mode and unlocked during normal execution."),
9807 set_schedlock_func, /* traps on target vector */
9808 show_scheduler_mode,
9809 &setlist, &showlist);
9810
9811 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9812 Set mode for resuming threads of all processes."), _("\
9813 Show mode for resuming threads of all processes."), _("\
9814 When on, execution commands (such as 'continue' or 'next') resume all\n\
9815 threads of all processes. When off (which is the default), execution\n\
9816 commands only resume the threads of the current process. The set of\n\
9817 threads that are resumed is further refined by the scheduler-locking\n\
9818 mode (see help set scheduler-locking)."),
9819 NULL,
9820 show_schedule_multiple,
9821 &setlist, &showlist);
9822
9823 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9824 Set mode of the step operation."), _("\
9825 Show mode of the step operation."), _("\
9826 When set, doing a step over a function without debug line information\n\
9827 will stop at the first instruction of that function. Otherwise, the\n\
9828 function is skipped and the step command stops at a different source line."),
9829 NULL,
9830 show_step_stop_if_no_debug,
9831 &setlist, &showlist);
9832
9833 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9834 &can_use_displaced_stepping, _("\
9835 Set debugger's willingness to use displaced stepping."), _("\
9836 Show debugger's willingness to use displaced stepping."), _("\
9837 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9838 supported by the target architecture. If off, gdb will not use displaced\n\
9839 stepping to step over breakpoints, even if such is supported by the target\n\
9840 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9841 if the target architecture supports it and non-stop mode is active, but will not\n\
9842 use it in all-stop mode (see help set non-stop)."),
9843 NULL,
9844 show_can_use_displaced_stepping,
9845 &setlist, &showlist);
9846
9847 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9848 &exec_direction, _("Set direction of execution.\n\
9849 Options are 'forward' or 'reverse'."),
9850 _("Show direction of execution (forward/reverse)."),
9851 _("Tells gdb whether to execute forward or backward."),
9852 set_exec_direction_func, show_exec_direction_func,
9853 &setlist, &showlist);
9854
9855 /* Set/show detach-on-fork: user-settable mode. */
9856
9857 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9858 Set whether gdb will detach the child of a fork."), _("\
9859 Show whether gdb will detach the child of a fork."), _("\
9860 Tells gdb whether to detach the child of a fork."),
9861 NULL, NULL, &setlist, &showlist);
9862
9863 /* Set/show disable address space randomization mode. */
9864
9865 add_setshow_boolean_cmd ("disable-randomization", class_support,
9866 &disable_randomization, _("\
9867 Set disabling of debuggee's virtual address space randomization."), _("\
9868 Show disabling of debuggee's virtual address space randomization."), _("\
9869 When this mode is on (which is the default), randomization of the virtual\n\
9870 address space is disabled. Standalone programs run with the randomization\n\
9871 enabled by default on some platforms."),
9872 &set_disable_randomization,
9873 &show_disable_randomization,
9874 &setlist, &showlist);
9875
9876 /* ptid initializations */
9877 inferior_ptid = null_ptid;
9878 target_last_wait_ptid = minus_one_ptid;
9879
9880 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
9881 "infrun");
9882 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
9883 "infrun");
9884 gdb::observers::thread_exit.attach (infrun_thread_thread_exit, "infrun");
9885 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
9886 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
9887
9888 /* Explicitly create without lookup, since that tries to create a
9889 value with a void typed value, and when we get here, gdbarch
9890 isn't initialized yet. At this point, we're quite sure there
9891 isn't another convenience variable of the same name. */
9892 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
9893
9894 add_setshow_boolean_cmd ("observer", no_class,
9895 &observer_mode_1, _("\
9896 Set whether gdb controls the inferior in observer mode."), _("\
9897 Show whether gdb controls the inferior in observer mode."), _("\
9898 In observer mode, GDB can get data from the inferior, but not\n\
9899 affect its execution. Registers and memory may not be changed,\n\
9900 breakpoints may not be set, and the program cannot be interrupted\n\
9901 or signalled."),
9902 set_observer_mode,
9903 show_observer_mode,
9904 &setlist,
9905 &showlist);
9906
9907 #if GDB_SELF_TEST
9908 selftests::register_test ("infrun_thread_ptid_changed",
9909 selftests::infrun_thread_ptid_changed);
9910 #endif
9911 }