96346e1f25b15c43761659686d276baecefb004d
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2022 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "displaced-stepping.h"
23 #include "infrun.h"
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "breakpoint.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "target.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include "inf-loop.h"
38 #include "regcache.h"
39 #include "value.h"
40 #include "observable.h"
41 #include "language.h"
42 #include "solib.h"
43 #include "main.h"
44 #include "block.h"
45 #include "mi/mi-common.h"
46 #include "event-top.h"
47 #include "record.h"
48 #include "record-full.h"
49 #include "inline-frame.h"
50 #include "jit.h"
51 #include "tracepoint.h"
52 #include "skip.h"
53 #include "probe.h"
54 #include "objfiles.h"
55 #include "completer.h"
56 #include "target-descriptions.h"
57 #include "target-dcache.h"
58 #include "terminal.h"
59 #include "solist.h"
60 #include "gdbsupport/event-loop.h"
61 #include "thread-fsm.h"
62 #include "gdbsupport/enum-flags.h"
63 #include "progspace-and-thread.h"
64 #include "gdbsupport/gdb_optional.h"
65 #include "arch-utils.h"
66 #include "gdbsupport/scope-exit.h"
67 #include "gdbsupport/forward-scope-exit.h"
68 #include "gdbsupport/gdb_select.h"
69 #include <unordered_map>
70 #include "async-event.h"
71 #include "gdbsupport/selftest.h"
72 #include "scoped-mock-context.h"
73 #include "test-target.h"
74 #include "gdbsupport/common-debug.h"
75 #include "gdbsupport/buildargv.h"
76
77 /* Prototypes for local functions */
78
79 static void sig_print_info (enum gdb_signal);
80
81 static void sig_print_header (void);
82
83 static void follow_inferior_reset_breakpoints (void);
84
85 static bool currently_stepping (struct thread_info *tp);
86
87 static void insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr);
88
89 static void insert_step_resume_breakpoint_at_caller (frame_info_ptr);
90
91 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
92
93 static bool maybe_software_singlestep (struct gdbarch *gdbarch);
94
95 static void resume (gdb_signal sig);
96
97 static void wait_for_inferior (inferior *inf);
98
99 static void restart_threads (struct thread_info *event_thread,
100 inferior *inf = nullptr);
101
102 static bool start_step_over (void);
103
104 static bool step_over_info_valid_p (void);
105
106 /* Asynchronous signal handler registered as event loop source for
107 when we have pending events ready to be passed to the core. */
108 static struct async_event_handler *infrun_async_inferior_event_token;
109
110 /* Stores whether infrun_async was previously enabled or disabled.
111 Starts off as -1, indicating "never enabled/disabled". */
112 static int infrun_is_async = -1;
113
114 /* See infrun.h. */
115
116 void
117 infrun_async (int enable)
118 {
119 if (infrun_is_async != enable)
120 {
121 infrun_is_async = enable;
122
123 infrun_debug_printf ("enable=%d", enable);
124
125 if (enable)
126 mark_async_event_handler (infrun_async_inferior_event_token);
127 else
128 clear_async_event_handler (infrun_async_inferior_event_token);
129 }
130 }
131
132 /* See infrun.h. */
133
134 void
135 mark_infrun_async_event_handler (void)
136 {
137 mark_async_event_handler (infrun_async_inferior_event_token);
138 }
139
140 /* When set, stop the 'step' command if we enter a function which has
141 no line number information. The normal behavior is that we step
142 over such function. */
143 bool step_stop_if_no_debug = false;
144 static void
145 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
146 struct cmd_list_element *c, const char *value)
147 {
148 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
149 }
150
151 /* proceed and normal_stop use this to notify the user when the
152 inferior stopped in a different thread than it had been running
153 in. */
154
155 static ptid_t previous_inferior_ptid;
156
157 /* If set (default for legacy reasons), when following a fork, GDB
158 will detach from one of the fork branches, child or parent.
159 Exactly which branch is detached depends on 'set follow-fork-mode'
160 setting. */
161
162 static bool detach_fork = true;
163
164 bool debug_infrun = false;
165 static void
166 show_debug_infrun (struct ui_file *file, int from_tty,
167 struct cmd_list_element *c, const char *value)
168 {
169 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
170 }
171
172 /* Support for disabling address space randomization. */
173
174 bool disable_randomization = true;
175
176 static void
177 show_disable_randomization (struct ui_file *file, int from_tty,
178 struct cmd_list_element *c, const char *value)
179 {
180 if (target_supports_disable_randomization ())
181 gdb_printf (file,
182 _("Disabling randomization of debuggee's "
183 "virtual address space is %s.\n"),
184 value);
185 else
186 gdb_puts (_("Disabling randomization of debuggee's "
187 "virtual address space is unsupported on\n"
188 "this platform.\n"), file);
189 }
190
191 static void
192 set_disable_randomization (const char *args, int from_tty,
193 struct cmd_list_element *c)
194 {
195 if (!target_supports_disable_randomization ())
196 error (_("Disabling randomization of debuggee's "
197 "virtual address space is unsupported on\n"
198 "this platform."));
199 }
200
201 /* User interface for non-stop mode. */
202
203 bool non_stop = false;
204 static bool non_stop_1 = false;
205
206 static void
207 set_non_stop (const char *args, int from_tty,
208 struct cmd_list_element *c)
209 {
210 if (target_has_execution ())
211 {
212 non_stop_1 = non_stop;
213 error (_("Cannot change this setting while the inferior is running."));
214 }
215
216 non_stop = non_stop_1;
217 }
218
219 static void
220 show_non_stop (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 gdb_printf (file,
224 _("Controlling the inferior in non-stop mode is %s.\n"),
225 value);
226 }
227
228 /* "Observer mode" is somewhat like a more extreme version of
229 non-stop, in which all GDB operations that might affect the
230 target's execution have been disabled. */
231
232 static bool observer_mode = false;
233 static bool observer_mode_1 = false;
234
235 static void
236 set_observer_mode (const char *args, int from_tty,
237 struct cmd_list_element *c)
238 {
239 if (target_has_execution ())
240 {
241 observer_mode_1 = observer_mode;
242 error (_("Cannot change this setting while the inferior is running."));
243 }
244
245 observer_mode = observer_mode_1;
246
247 may_write_registers = !observer_mode;
248 may_write_memory = !observer_mode;
249 may_insert_breakpoints = !observer_mode;
250 may_insert_tracepoints = !observer_mode;
251 /* We can insert fast tracepoints in or out of observer mode,
252 but enable them if we're going into this mode. */
253 if (observer_mode)
254 may_insert_fast_tracepoints = true;
255 may_stop = !observer_mode;
256 update_target_permissions ();
257
258 /* Going *into* observer mode we must force non-stop, then
259 going out we leave it that way. */
260 if (observer_mode)
261 {
262 pagination_enabled = false;
263 non_stop = non_stop_1 = true;
264 }
265
266 if (from_tty)
267 gdb_printf (_("Observer mode is now %s.\n"),
268 (observer_mode ? "on" : "off"));
269 }
270
271 static void
272 show_observer_mode (struct ui_file *file, int from_tty,
273 struct cmd_list_element *c, const char *value)
274 {
275 gdb_printf (file, _("Observer mode is %s.\n"), value);
276 }
277
278 /* This updates the value of observer mode based on changes in
279 permissions. Note that we are deliberately ignoring the values of
280 may-write-registers and may-write-memory, since the user may have
281 reason to enable these during a session, for instance to turn on a
282 debugging-related global. */
283
284 void
285 update_observer_mode (void)
286 {
287 bool newval = (!may_insert_breakpoints
288 && !may_insert_tracepoints
289 && may_insert_fast_tracepoints
290 && !may_stop
291 && non_stop);
292
293 /* Let the user know if things change. */
294 if (newval != observer_mode)
295 gdb_printf (_("Observer mode is now %s.\n"),
296 (newval ? "on" : "off"));
297
298 observer_mode = observer_mode_1 = newval;
299 }
300
301 /* Tables of how to react to signals; the user sets them. */
302
303 static unsigned char signal_stop[GDB_SIGNAL_LAST];
304 static unsigned char signal_print[GDB_SIGNAL_LAST];
305 static unsigned char signal_program[GDB_SIGNAL_LAST];
306
307 /* Table of signals that are registered with "catch signal". A
308 non-zero entry indicates that the signal is caught by some "catch
309 signal" command. */
310 static unsigned char signal_catch[GDB_SIGNAL_LAST];
311
312 /* Table of signals that the target may silently handle.
313 This is automatically determined from the flags above,
314 and simply cached here. */
315 static unsigned char signal_pass[GDB_SIGNAL_LAST];
316
317 #define SET_SIGS(nsigs,sigs,flags) \
318 do { \
319 int signum = (nsigs); \
320 while (signum-- > 0) \
321 if ((sigs)[signum]) \
322 (flags)[signum] = 1; \
323 } while (0)
324
325 #define UNSET_SIGS(nsigs,sigs,flags) \
326 do { \
327 int signum = (nsigs); \
328 while (signum-- > 0) \
329 if ((sigs)[signum]) \
330 (flags)[signum] = 0; \
331 } while (0)
332
333 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
334 this function is to avoid exporting `signal_program'. */
335
336 void
337 update_signals_program_target (void)
338 {
339 target_program_signals (signal_program);
340 }
341
342 /* Value to pass to target_resume() to cause all threads to resume. */
343
344 #define RESUME_ALL minus_one_ptid
345
346 /* Command list pointer for the "stop" placeholder. */
347
348 static struct cmd_list_element *stop_command;
349
350 /* Nonzero if we want to give control to the user when we're notified
351 of shared library events by the dynamic linker. */
352 int stop_on_solib_events;
353
354 /* Enable or disable optional shared library event breakpoints
355 as appropriate when the above flag is changed. */
356
357 static void
358 set_stop_on_solib_events (const char *args,
359 int from_tty, struct cmd_list_element *c)
360 {
361 update_solib_breakpoints ();
362 }
363
364 static void
365 show_stop_on_solib_events (struct ui_file *file, int from_tty,
366 struct cmd_list_element *c, const char *value)
367 {
368 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
369 value);
370 }
371
372 /* True after stop if current stack frame should be printed. */
373
374 static bool stop_print_frame;
375
376 /* This is a cached copy of the target/ptid/waitstatus of the last
377 event returned by target_wait().
378 This information is returned by get_last_target_status(). */
379 static process_stratum_target *target_last_proc_target;
380 static ptid_t target_last_wait_ptid;
381 static struct target_waitstatus target_last_waitstatus;
382
383 void init_thread_stepping_state (struct thread_info *tss);
384
385 static const char follow_fork_mode_child[] = "child";
386 static const char follow_fork_mode_parent[] = "parent";
387
388 static const char *const follow_fork_mode_kind_names[] = {
389 follow_fork_mode_child,
390 follow_fork_mode_parent,
391 nullptr
392 };
393
394 static const char *follow_fork_mode_string = follow_fork_mode_parent;
395 static void
396 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
397 struct cmd_list_element *c, const char *value)
398 {
399 gdb_printf (file,
400 _("Debugger response to a program "
401 "call of fork or vfork is \"%s\".\n"),
402 value);
403 }
404 \f
405
406 /* Handle changes to the inferior list based on the type of fork,
407 which process is being followed, and whether the other process
408 should be detached. On entry inferior_ptid must be the ptid of
409 the fork parent. At return inferior_ptid is the ptid of the
410 followed inferior. */
411
412 static bool
413 follow_fork_inferior (bool follow_child, bool detach_fork)
414 {
415 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
416 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
417 || fork_kind == TARGET_WAITKIND_VFORKED);
418 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
419 ptid_t parent_ptid = inferior_ptid;
420 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
421
422 if (has_vforked
423 && !non_stop /* Non-stop always resumes both branches. */
424 && current_ui->prompt_state == PROMPT_BLOCKED
425 && !(follow_child || detach_fork || sched_multi))
426 {
427 /* The parent stays blocked inside the vfork syscall until the
428 child execs or exits. If we don't let the child run, then
429 the parent stays blocked. If we're telling the parent to run
430 in the foreground, the user will not be able to ctrl-c to get
431 back the terminal, effectively hanging the debug session. */
432 gdb_printf (gdb_stderr, _("\
433 Can not resume the parent process over vfork in the foreground while\n\
434 holding the child stopped. Try \"set detach-on-fork\" or \
435 \"set schedule-multiple\".\n"));
436 return true;
437 }
438
439 inferior *parent_inf = current_inferior ();
440 inferior *child_inf = nullptr;
441
442 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
443
444 if (!follow_child)
445 {
446 /* Detach new forked process? */
447 if (detach_fork)
448 {
449 /* Before detaching from the child, remove all breakpoints
450 from it. If we forked, then this has already been taken
451 care of by infrun.c. If we vforked however, any
452 breakpoint inserted in the parent is visible in the
453 child, even those added while stopped in a vfork
454 catchpoint. This will remove the breakpoints from the
455 parent also, but they'll be reinserted below. */
456 if (has_vforked)
457 {
458 /* Keep breakpoints list in sync. */
459 remove_breakpoints_inf (current_inferior ());
460 }
461
462 if (print_inferior_events)
463 {
464 /* Ensure that we have a process ptid. */
465 ptid_t process_ptid = ptid_t (child_ptid.pid ());
466
467 target_terminal::ours_for_output ();
468 gdb_printf (_("[Detaching after %s from child %s]\n"),
469 has_vforked ? "vfork" : "fork",
470 target_pid_to_str (process_ptid).c_str ());
471 }
472 }
473 else
474 {
475 /* Add process to GDB's tables. */
476 child_inf = add_inferior (child_ptid.pid ());
477
478 child_inf->attach_flag = parent_inf->attach_flag;
479 copy_terminal_info (child_inf, parent_inf);
480 child_inf->gdbarch = parent_inf->gdbarch;
481 copy_inferior_target_desc_info (child_inf, parent_inf);
482
483 child_inf->symfile_flags = SYMFILE_NO_READ;
484
485 /* If this is a vfork child, then the address-space is
486 shared with the parent. */
487 if (has_vforked)
488 {
489 child_inf->pspace = parent_inf->pspace;
490 child_inf->aspace = parent_inf->aspace;
491
492 exec_on_vfork (child_inf);
493
494 /* The parent will be frozen until the child is done
495 with the shared region. Keep track of the
496 parent. */
497 child_inf->vfork_parent = parent_inf;
498 child_inf->pending_detach = 0;
499 parent_inf->vfork_child = child_inf;
500 parent_inf->pending_detach = 0;
501 }
502 else
503 {
504 child_inf->aspace = new address_space ();
505 child_inf->pspace = new program_space (child_inf->aspace);
506 child_inf->removable = 1;
507 clone_program_space (child_inf->pspace, parent_inf->pspace);
508 }
509 }
510
511 if (has_vforked)
512 {
513 /* If we detached from the child, then we have to be careful
514 to not insert breakpoints in the parent until the child
515 is done with the shared memory region. However, if we're
516 staying attached to the child, then we can and should
517 insert breakpoints, so that we can debug it. A
518 subsequent child exec or exit is enough to know when does
519 the child stops using the parent's address space. */
520 parent_inf->thread_waiting_for_vfork_done
521 = detach_fork ? inferior_thread () : nullptr;
522 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
523 }
524 }
525 else
526 {
527 /* Follow the child. */
528
529 if (print_inferior_events)
530 {
531 std::string parent_pid = target_pid_to_str (parent_ptid);
532 std::string child_pid = target_pid_to_str (child_ptid);
533
534 target_terminal::ours_for_output ();
535 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
536 parent_pid.c_str (),
537 has_vforked ? "vfork" : "fork",
538 child_pid.c_str ());
539 }
540
541 /* Add the new inferior first, so that the target_detach below
542 doesn't unpush the target. */
543
544 child_inf = add_inferior (child_ptid.pid ());
545
546 child_inf->attach_flag = parent_inf->attach_flag;
547 copy_terminal_info (child_inf, parent_inf);
548 child_inf->gdbarch = parent_inf->gdbarch;
549 copy_inferior_target_desc_info (child_inf, parent_inf);
550
551 if (has_vforked)
552 {
553 /* If this is a vfork child, then the address-space is shared
554 with the parent. */
555 child_inf->aspace = parent_inf->aspace;
556 child_inf->pspace = parent_inf->pspace;
557
558 exec_on_vfork (child_inf);
559 }
560 else if (detach_fork)
561 {
562 /* We follow the child and detach from the parent: move the parent's
563 program space to the child. This simplifies some things, like
564 doing "next" over fork() and landing on the expected line in the
565 child (note, that is broken with "set detach-on-fork off").
566
567 Before assigning brand new spaces for the parent, remove
568 breakpoints from it: because the new pspace won't match
569 currently inserted locations, the normal detach procedure
570 wouldn't remove them, and we would leave them inserted when
571 detaching. */
572 remove_breakpoints_inf (parent_inf);
573
574 child_inf->aspace = parent_inf->aspace;
575 child_inf->pspace = parent_inf->pspace;
576 parent_inf->aspace = new address_space ();
577 parent_inf->pspace = new program_space (parent_inf->aspace);
578 clone_program_space (parent_inf->pspace, child_inf->pspace);
579
580 /* The parent inferior is still the current one, so keep things
581 in sync. */
582 set_current_program_space (parent_inf->pspace);
583 }
584 else
585 {
586 child_inf->aspace = new address_space ();
587 child_inf->pspace = new program_space (child_inf->aspace);
588 child_inf->removable = 1;
589 child_inf->symfile_flags = SYMFILE_NO_READ;
590 clone_program_space (child_inf->pspace, parent_inf->pspace);
591 }
592 }
593
594 gdb_assert (current_inferior () == parent_inf);
595
596 /* If we are setting up an inferior for the child, target_follow_fork is
597 responsible for pushing the appropriate targets on the new inferior's
598 target stack and adding the initial thread (with ptid CHILD_PTID).
599
600 If we are not setting up an inferior for the child (because following
601 the parent and detach_fork is true), it is responsible for detaching
602 from CHILD_PTID. */
603 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
604 detach_fork);
605
606 /* target_follow_fork must leave the parent as the current inferior. If we
607 want to follow the child, we make it the current one below. */
608 gdb_assert (current_inferior () == parent_inf);
609
610 /* If there is a child inferior, target_follow_fork must have created a thread
611 for it. */
612 if (child_inf != nullptr)
613 gdb_assert (!child_inf->thread_list.empty ());
614
615 /* Clear the parent thread's pending follow field. Do this before calling
616 target_detach, so that the target can differentiate the two following
617 cases:
618
619 - We continue past a fork with "follow-fork-mode == child" &&
620 "detach-on-fork on", and therefore detach the parent. In that
621 case the target should not detach the fork child.
622 - We run to a fork catchpoint and the user types "detach". In that
623 case, the target should detach the fork child in addition to the
624 parent.
625
626 The former case will have pending_follow cleared, the later will have
627 pending_follow set. */
628 thread_info *parent_thread = find_thread_ptid (parent_inf, parent_ptid);
629 gdb_assert (parent_thread != nullptr);
630 parent_thread->pending_follow.set_spurious ();
631
632 /* Detach the parent if needed. */
633 if (follow_child)
634 {
635 /* If we're vforking, we want to hold on to the parent until
636 the child exits or execs. At child exec or exit time we
637 can remove the old breakpoints from the parent and detach
638 or resume debugging it. Otherwise, detach the parent now;
639 we'll want to reuse it's program/address spaces, but we
640 can't set them to the child before removing breakpoints
641 from the parent, otherwise, the breakpoints module could
642 decide to remove breakpoints from the wrong process (since
643 they'd be assigned to the same address space). */
644
645 if (has_vforked)
646 {
647 gdb_assert (child_inf->vfork_parent == nullptr);
648 gdb_assert (parent_inf->vfork_child == nullptr);
649 child_inf->vfork_parent = parent_inf;
650 child_inf->pending_detach = 0;
651 parent_inf->vfork_child = child_inf;
652 parent_inf->pending_detach = detach_fork;
653 }
654 else if (detach_fork)
655 {
656 if (print_inferior_events)
657 {
658 /* Ensure that we have a process ptid. */
659 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
660
661 target_terminal::ours_for_output ();
662 gdb_printf (_("[Detaching after fork from "
663 "parent %s]\n"),
664 target_pid_to_str (process_ptid).c_str ());
665 }
666
667 target_detach (parent_inf, 0);
668 }
669 }
670
671 /* If we ended up creating a new inferior, call post_create_inferior to inform
672 the various subcomponents. */
673 if (child_inf != nullptr)
674 {
675 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
676 (do not restore the parent as the current inferior). */
677 gdb::optional<scoped_restore_current_thread> maybe_restore;
678
679 if (!follow_child)
680 maybe_restore.emplace ();
681
682 switch_to_thread (*child_inf->threads ().begin ());
683 post_create_inferior (0);
684 }
685
686 return false;
687 }
688
689 /* Tell the target to follow the fork we're stopped at. Returns true
690 if the inferior should be resumed; false, if the target for some
691 reason decided it's best not to resume. */
692
693 static bool
694 follow_fork ()
695 {
696 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
697 bool should_resume = true;
698
699 /* Copy user stepping state to the new inferior thread. FIXME: the
700 followed fork child thread should have a copy of most of the
701 parent thread structure's run control related fields, not just these.
702 Initialized to avoid "may be used uninitialized" warnings from gcc. */
703 struct breakpoint *step_resume_breakpoint = nullptr;
704 struct breakpoint *exception_resume_breakpoint = nullptr;
705 CORE_ADDR step_range_start = 0;
706 CORE_ADDR step_range_end = 0;
707 int current_line = 0;
708 symtab *current_symtab = nullptr;
709 struct frame_id step_frame_id = { 0 };
710
711 if (!non_stop)
712 {
713 process_stratum_target *wait_target;
714 ptid_t wait_ptid;
715 struct target_waitstatus wait_status;
716
717 /* Get the last target status returned by target_wait(). */
718 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
719
720 /* If not stopped at a fork event, then there's nothing else to
721 do. */
722 if (wait_status.kind () != TARGET_WAITKIND_FORKED
723 && wait_status.kind () != TARGET_WAITKIND_VFORKED)
724 return 1;
725
726 /* Check if we switched over from WAIT_PTID, since the event was
727 reported. */
728 if (wait_ptid != minus_one_ptid
729 && (current_inferior ()->process_target () != wait_target
730 || inferior_ptid != wait_ptid))
731 {
732 /* We did. Switch back to WAIT_PTID thread, to tell the
733 target to follow it (in either direction). We'll
734 afterwards refuse to resume, and inform the user what
735 happened. */
736 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
737 switch_to_thread (wait_thread);
738 should_resume = false;
739 }
740 }
741
742 thread_info *tp = inferior_thread ();
743
744 /* If there were any forks/vforks that were caught and are now to be
745 followed, then do so now. */
746 switch (tp->pending_follow.kind ())
747 {
748 case TARGET_WAITKIND_FORKED:
749 case TARGET_WAITKIND_VFORKED:
750 {
751 ptid_t parent, child;
752 std::unique_ptr<struct thread_fsm> thread_fsm;
753
754 /* If the user did a next/step, etc, over a fork call,
755 preserve the stepping state in the fork child. */
756 if (follow_child && should_resume)
757 {
758 step_resume_breakpoint = clone_momentary_breakpoint
759 (tp->control.step_resume_breakpoint);
760 step_range_start = tp->control.step_range_start;
761 step_range_end = tp->control.step_range_end;
762 current_line = tp->current_line;
763 current_symtab = tp->current_symtab;
764 step_frame_id = tp->control.step_frame_id;
765 exception_resume_breakpoint
766 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
767 thread_fsm = tp->release_thread_fsm ();
768
769 /* For now, delete the parent's sr breakpoint, otherwise,
770 parent/child sr breakpoints are considered duplicates,
771 and the child version will not be installed. Remove
772 this when the breakpoints module becomes aware of
773 inferiors and address spaces. */
774 delete_step_resume_breakpoint (tp);
775 tp->control.step_range_start = 0;
776 tp->control.step_range_end = 0;
777 tp->control.step_frame_id = null_frame_id;
778 delete_exception_resume_breakpoint (tp);
779 }
780
781 parent = inferior_ptid;
782 child = tp->pending_follow.child_ptid ();
783
784 /* If handling a vfork, stop all the inferior's threads, they will be
785 restarted when the vfork shared region is complete. */
786 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
787 && target_is_non_stop_p ())
788 stop_all_threads ("handling vfork", tp->inf);
789
790 process_stratum_target *parent_targ = tp->inf->process_target ();
791 /* Set up inferior(s) as specified by the caller, and tell the
792 target to do whatever is necessary to follow either parent
793 or child. */
794 if (follow_fork_inferior (follow_child, detach_fork))
795 {
796 /* Target refused to follow, or there's some other reason
797 we shouldn't resume. */
798 should_resume = 0;
799 }
800 else
801 {
802 /* This makes sure we don't try to apply the "Switched
803 over from WAIT_PID" logic above. */
804 nullify_last_target_wait_ptid ();
805
806 /* If we followed the child, switch to it... */
807 if (follow_child)
808 {
809 thread_info *child_thr = find_thread_ptid (parent_targ, child);
810 switch_to_thread (child_thr);
811
812 /* ... and preserve the stepping state, in case the
813 user was stepping over the fork call. */
814 if (should_resume)
815 {
816 tp = inferior_thread ();
817 tp->control.step_resume_breakpoint
818 = step_resume_breakpoint;
819 tp->control.step_range_start = step_range_start;
820 tp->control.step_range_end = step_range_end;
821 tp->current_line = current_line;
822 tp->current_symtab = current_symtab;
823 tp->control.step_frame_id = step_frame_id;
824 tp->control.exception_resume_breakpoint
825 = exception_resume_breakpoint;
826 tp->set_thread_fsm (std::move (thread_fsm));
827 }
828 else
829 {
830 /* If we get here, it was because we're trying to
831 resume from a fork catchpoint, but, the user
832 has switched threads away from the thread that
833 forked. In that case, the resume command
834 issued is most likely not applicable to the
835 child, so just warn, and refuse to resume. */
836 warning (_("Not resuming: switched threads "
837 "before following fork child."));
838 }
839
840 /* Reset breakpoints in the child as appropriate. */
841 follow_inferior_reset_breakpoints ();
842 }
843 }
844 }
845 break;
846 case TARGET_WAITKIND_SPURIOUS:
847 /* Nothing to follow. */
848 break;
849 default:
850 internal_error ("Unexpected pending_follow.kind %d\n",
851 tp->pending_follow.kind ());
852 break;
853 }
854
855 return should_resume;
856 }
857
858 static void
859 follow_inferior_reset_breakpoints (void)
860 {
861 struct thread_info *tp = inferior_thread ();
862
863 /* Was there a step_resume breakpoint? (There was if the user
864 did a "next" at the fork() call.) If so, explicitly reset its
865 thread number. Cloned step_resume breakpoints are disabled on
866 creation, so enable it here now that it is associated with the
867 correct thread.
868
869 step_resumes are a form of bp that are made to be per-thread.
870 Since we created the step_resume bp when the parent process
871 was being debugged, and now are switching to the child process,
872 from the breakpoint package's viewpoint, that's a switch of
873 "threads". We must update the bp's notion of which thread
874 it is for, or it'll be ignored when it triggers. */
875
876 if (tp->control.step_resume_breakpoint)
877 {
878 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
879 tp->control.step_resume_breakpoint->loc->enabled = 1;
880 }
881
882 /* Treat exception_resume breakpoints like step_resume breakpoints. */
883 if (tp->control.exception_resume_breakpoint)
884 {
885 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
886 tp->control.exception_resume_breakpoint->loc->enabled = 1;
887 }
888
889 /* Reinsert all breakpoints in the child. The user may have set
890 breakpoints after catching the fork, in which case those
891 were never set in the child, but only in the parent. This makes
892 sure the inserted breakpoints match the breakpoint list. */
893
894 breakpoint_re_set ();
895 insert_breakpoints ();
896 }
897
898 /* The child has exited or execed: resume THREAD, a thread of the parent,
899 if it was meant to be executing. */
900
901 static void
902 proceed_after_vfork_done (thread_info *thread)
903 {
904 if (thread->state == THREAD_RUNNING
905 && !thread->executing ()
906 && !thread->stop_requested
907 && thread->stop_signal () == GDB_SIGNAL_0)
908 {
909 infrun_debug_printf ("resuming vfork parent thread %s",
910 thread->ptid.to_string ().c_str ());
911
912 switch_to_thread (thread);
913 clear_proceed_status (0);
914 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
915 }
916 }
917
918 /* Called whenever we notice an exec or exit event, to handle
919 detaching or resuming a vfork parent. */
920
921 static void
922 handle_vfork_child_exec_or_exit (int exec)
923 {
924 struct inferior *inf = current_inferior ();
925
926 if (inf->vfork_parent)
927 {
928 inferior *resume_parent = nullptr;
929
930 /* This exec or exit marks the end of the shared memory region
931 between the parent and the child. Break the bonds. */
932 inferior *vfork_parent = inf->vfork_parent;
933 inf->vfork_parent->vfork_child = nullptr;
934 inf->vfork_parent = nullptr;
935
936 /* If the user wanted to detach from the parent, now is the
937 time. */
938 if (vfork_parent->pending_detach)
939 {
940 struct program_space *pspace;
941 struct address_space *aspace;
942
943 /* follow-fork child, detach-on-fork on. */
944
945 vfork_parent->pending_detach = 0;
946
947 scoped_restore_current_pspace_and_thread restore_thread;
948
949 /* We're letting loose of the parent. */
950 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
951 switch_to_thread (tp);
952
953 /* We're about to detach from the parent, which implicitly
954 removes breakpoints from its address space. There's a
955 catch here: we want to reuse the spaces for the child,
956 but, parent/child are still sharing the pspace at this
957 point, although the exec in reality makes the kernel give
958 the child a fresh set of new pages. The problem here is
959 that the breakpoints module being unaware of this, would
960 likely chose the child process to write to the parent
961 address space. Swapping the child temporarily away from
962 the spaces has the desired effect. Yes, this is "sort
963 of" a hack. */
964
965 pspace = inf->pspace;
966 aspace = inf->aspace;
967 inf->aspace = nullptr;
968 inf->pspace = nullptr;
969
970 if (print_inferior_events)
971 {
972 std::string pidstr
973 = target_pid_to_str (ptid_t (vfork_parent->pid));
974
975 target_terminal::ours_for_output ();
976
977 if (exec)
978 {
979 gdb_printf (_("[Detaching vfork parent %s "
980 "after child exec]\n"), pidstr.c_str ());
981 }
982 else
983 {
984 gdb_printf (_("[Detaching vfork parent %s "
985 "after child exit]\n"), pidstr.c_str ());
986 }
987 }
988
989 target_detach (vfork_parent, 0);
990
991 /* Put it back. */
992 inf->pspace = pspace;
993 inf->aspace = aspace;
994 }
995 else if (exec)
996 {
997 /* We're staying attached to the parent, so, really give the
998 child a new address space. */
999 inf->pspace = new program_space (maybe_new_address_space ());
1000 inf->aspace = inf->pspace->aspace;
1001 inf->removable = 1;
1002 set_current_program_space (inf->pspace);
1003
1004 resume_parent = vfork_parent;
1005 }
1006 else
1007 {
1008 /* If this is a vfork child exiting, then the pspace and
1009 aspaces were shared with the parent. Since we're
1010 reporting the process exit, we'll be mourning all that is
1011 found in the address space, and switching to null_ptid,
1012 preparing to start a new inferior. But, since we don't
1013 want to clobber the parent's address/program spaces, we
1014 go ahead and create a new one for this exiting
1015 inferior. */
1016
1017 /* Switch to no-thread while running clone_program_space, so
1018 that clone_program_space doesn't want to read the
1019 selected frame of a dead process. */
1020 scoped_restore_current_thread restore_thread;
1021 switch_to_no_thread ();
1022
1023 inf->pspace = new program_space (maybe_new_address_space ());
1024 inf->aspace = inf->pspace->aspace;
1025 set_current_program_space (inf->pspace);
1026 inf->removable = 1;
1027 inf->symfile_flags = SYMFILE_NO_READ;
1028 clone_program_space (inf->pspace, vfork_parent->pspace);
1029
1030 resume_parent = vfork_parent;
1031 }
1032
1033 gdb_assert (current_program_space == inf->pspace);
1034
1035 if (non_stop && resume_parent != nullptr)
1036 {
1037 /* If the user wanted the parent to be running, let it go
1038 free now. */
1039 scoped_restore_current_thread restore_thread;
1040
1041 infrun_debug_printf ("resuming vfork parent process %d",
1042 resume_parent->pid);
1043
1044 for (thread_info *thread : resume_parent->threads ())
1045 proceed_after_vfork_done (thread);
1046 }
1047 }
1048 }
1049
1050 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1051
1052 static void
1053 handle_vfork_done (thread_info *event_thread)
1054 {
1055 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1056 set, that is if we are waiting for a vfork child not under our control
1057 (because we detached it) to exec or exit.
1058
1059 If an inferior has vforked and we are debugging the child, we don't use
1060 the vfork-done event to get notified about the end of the shared address
1061 space window. We rely instead on the child's exec or exit event, and the
1062 inferior::vfork_{parent,child} fields are used instead. See
1063 handle_vfork_child_exec_or_exit for that. */
1064 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1065 {
1066 infrun_debug_printf ("not waiting for a vfork-done event");
1067 return;
1068 }
1069
1070 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1071
1072 /* We stopped all threads (other than the vforking thread) of the inferior in
1073 follow_fork and kept them stopped until now. It should therefore not be
1074 possible for another thread to have reported a vfork during that window.
1075 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1076 vfork-done we are handling right now. */
1077 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1078
1079 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1080 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1081
1082 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1083 resume them now. On all-stop targets, everything that needs to be resumed
1084 will be when we resume the event thread. */
1085 if (target_is_non_stop_p ())
1086 {
1087 /* restart_threads and start_step_over may change the current thread, make
1088 sure we leave the event thread as the current thread. */
1089 scoped_restore_current_thread restore_thread;
1090
1091 insert_breakpoints ();
1092 start_step_over ();
1093
1094 if (!step_over_info_valid_p ())
1095 restart_threads (event_thread, event_thread->inf);
1096 }
1097 }
1098
1099 /* Enum strings for "set|show follow-exec-mode". */
1100
1101 static const char follow_exec_mode_new[] = "new";
1102 static const char follow_exec_mode_same[] = "same";
1103 static const char *const follow_exec_mode_names[] =
1104 {
1105 follow_exec_mode_new,
1106 follow_exec_mode_same,
1107 nullptr,
1108 };
1109
1110 static const char *follow_exec_mode_string = follow_exec_mode_same;
1111 static void
1112 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1113 struct cmd_list_element *c, const char *value)
1114 {
1115 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1116 }
1117
1118 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1119
1120 static void
1121 follow_exec (ptid_t ptid, const char *exec_file_target)
1122 {
1123 int pid = ptid.pid ();
1124 ptid_t process_ptid;
1125
1126 /* Switch terminal for any messages produced e.g. by
1127 breakpoint_re_set. */
1128 target_terminal::ours_for_output ();
1129
1130 /* This is an exec event that we actually wish to pay attention to.
1131 Refresh our symbol table to the newly exec'd program, remove any
1132 momentary bp's, etc.
1133
1134 If there are breakpoints, they aren't really inserted now,
1135 since the exec() transformed our inferior into a fresh set
1136 of instructions.
1137
1138 We want to preserve symbolic breakpoints on the list, since
1139 we have hopes that they can be reset after the new a.out's
1140 symbol table is read.
1141
1142 However, any "raw" breakpoints must be removed from the list
1143 (e.g., the solib bp's), since their address is probably invalid
1144 now.
1145
1146 And, we DON'T want to call delete_breakpoints() here, since
1147 that may write the bp's "shadow contents" (the instruction
1148 value that was overwritten with a TRAP instruction). Since
1149 we now have a new a.out, those shadow contents aren't valid. */
1150
1151 mark_breakpoints_out ();
1152
1153 /* The target reports the exec event to the main thread, even if
1154 some other thread does the exec, and even if the main thread was
1155 stopped or already gone. We may still have non-leader threads of
1156 the process on our list. E.g., on targets that don't have thread
1157 exit events (like remote); or on native Linux in non-stop mode if
1158 there were only two threads in the inferior and the non-leader
1159 one is the one that execs (and nothing forces an update of the
1160 thread list up to here). When debugging remotely, it's best to
1161 avoid extra traffic, when possible, so avoid syncing the thread
1162 list with the target, and instead go ahead and delete all threads
1163 of the process but one that reported the event. Note this must
1164 be done before calling update_breakpoints_after_exec, as
1165 otherwise clearing the threads' resources would reference stale
1166 thread breakpoints -- it may have been one of these threads that
1167 stepped across the exec. We could just clear their stepping
1168 states, but as long as we're iterating, might as well delete
1169 them. Deleting them now rather than at the next user-visible
1170 stop provides a nicer sequence of events for user and MI
1171 notifications. */
1172 for (thread_info *th : all_threads_safe ())
1173 if (th->ptid.pid () == pid && th->ptid != ptid)
1174 delete_thread (th);
1175
1176 /* We also need to clear any left over stale state for the
1177 leader/event thread. E.g., if there was any step-resume
1178 breakpoint or similar, it's gone now. We cannot truly
1179 step-to-next statement through an exec(). */
1180 thread_info *th = inferior_thread ();
1181 th->control.step_resume_breakpoint = nullptr;
1182 th->control.exception_resume_breakpoint = nullptr;
1183 th->control.single_step_breakpoints = nullptr;
1184 th->control.step_range_start = 0;
1185 th->control.step_range_end = 0;
1186
1187 /* The user may have had the main thread held stopped in the
1188 previous image (e.g., schedlock on, or non-stop). Release
1189 it now. */
1190 th->stop_requested = 0;
1191
1192 update_breakpoints_after_exec ();
1193
1194 /* What is this a.out's name? */
1195 process_ptid = ptid_t (pid);
1196 gdb_printf (_("%s is executing new program: %s\n"),
1197 target_pid_to_str (process_ptid).c_str (),
1198 exec_file_target);
1199
1200 /* We've followed the inferior through an exec. Therefore, the
1201 inferior has essentially been killed & reborn. */
1202
1203 breakpoint_init_inferior (inf_execd);
1204
1205 gdb::unique_xmalloc_ptr<char> exec_file_host
1206 = exec_file_find (exec_file_target, nullptr);
1207
1208 /* If we were unable to map the executable target pathname onto a host
1209 pathname, tell the user that. Otherwise GDB's subsequent behavior
1210 is confusing. Maybe it would even be better to stop at this point
1211 so that the user can specify a file manually before continuing. */
1212 if (exec_file_host == nullptr)
1213 warning (_("Could not load symbols for executable %s.\n"
1214 "Do you need \"set sysroot\"?"),
1215 exec_file_target);
1216
1217 /* Reset the shared library package. This ensures that we get a
1218 shlib event when the child reaches "_start", at which point the
1219 dld will have had a chance to initialize the child. */
1220 /* Also, loading a symbol file below may trigger symbol lookups, and
1221 we don't want those to be satisfied by the libraries of the
1222 previous incarnation of this process. */
1223 no_shared_libraries (nullptr, 0);
1224
1225 struct inferior *inf = current_inferior ();
1226
1227 if (follow_exec_mode_string == follow_exec_mode_new)
1228 {
1229 /* The user wants to keep the old inferior and program spaces
1230 around. Create a new fresh one, and switch to it. */
1231
1232 /* Do exit processing for the original inferior before setting the new
1233 inferior's pid. Having two inferiors with the same pid would confuse
1234 find_inferior_p(t)id. Transfer the terminal state and info from the
1235 old to the new inferior. */
1236 inferior *new_inferior = add_inferior_with_spaces ();
1237
1238 swap_terminal_info (new_inferior, inf);
1239 exit_inferior_silent (inf);
1240
1241 new_inferior->pid = pid;
1242 target_follow_exec (new_inferior, ptid, exec_file_target);
1243
1244 /* We continue with the new inferior. */
1245 inf = new_inferior;
1246 }
1247 else
1248 {
1249 /* The old description may no longer be fit for the new image.
1250 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1251 old description; we'll read a new one below. No need to do
1252 this on "follow-exec-mode new", as the old inferior stays
1253 around (its description is later cleared/refetched on
1254 restart). */
1255 target_clear_description ();
1256 target_follow_exec (inf, ptid, exec_file_target);
1257 }
1258
1259 gdb_assert (current_inferior () == inf);
1260 gdb_assert (current_program_space == inf->pspace);
1261
1262 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1263 because the proper displacement for a PIE (Position Independent
1264 Executable) main symbol file will only be computed by
1265 solib_create_inferior_hook below. breakpoint_re_set would fail
1266 to insert the breakpoints with the zero displacement. */
1267 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
1268
1269 /* If the target can specify a description, read it. Must do this
1270 after flipping to the new executable (because the target supplied
1271 description must be compatible with the executable's
1272 architecture, and the old executable may e.g., be 32-bit, while
1273 the new one 64-bit), and before anything involving memory or
1274 registers. */
1275 target_find_description ();
1276
1277 gdb::observers::inferior_execd.notify (inf);
1278
1279 breakpoint_re_set ();
1280
1281 /* Reinsert all breakpoints. (Those which were symbolic have
1282 been reset to the proper address in the new a.out, thanks
1283 to symbol_file_command...). */
1284 insert_breakpoints ();
1285
1286 /* The next resume of this inferior should bring it to the shlib
1287 startup breakpoints. (If the user had also set bp's on
1288 "main" from the old (parent) process, then they'll auto-
1289 matically get reset there in the new process.). */
1290 }
1291
1292 /* The chain of threads that need to do a step-over operation to get
1293 past e.g., a breakpoint. What technique is used to step over the
1294 breakpoint/watchpoint does not matter -- all threads end up in the
1295 same queue, to maintain rough temporal order of execution, in order
1296 to avoid starvation, otherwise, we could e.g., find ourselves
1297 constantly stepping the same couple threads past their breakpoints
1298 over and over, if the single-step finish fast enough. */
1299 thread_step_over_list global_thread_step_over_list;
1300
1301 /* Bit flags indicating what the thread needs to step over. */
1302
1303 enum step_over_what_flag
1304 {
1305 /* Step over a breakpoint. */
1306 STEP_OVER_BREAKPOINT = 1,
1307
1308 /* Step past a non-continuable watchpoint, in order to let the
1309 instruction execute so we can evaluate the watchpoint
1310 expression. */
1311 STEP_OVER_WATCHPOINT = 2
1312 };
1313 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1314
1315 /* Info about an instruction that is being stepped over. */
1316
1317 struct step_over_info
1318 {
1319 /* If we're stepping past a breakpoint, this is the address space
1320 and address of the instruction the breakpoint is set at. We'll
1321 skip inserting all breakpoints here. Valid iff ASPACE is
1322 non-NULL. */
1323 const address_space *aspace = nullptr;
1324 CORE_ADDR address = 0;
1325
1326 /* The instruction being stepped over triggers a nonsteppable
1327 watchpoint. If true, we'll skip inserting watchpoints. */
1328 int nonsteppable_watchpoint_p = 0;
1329
1330 /* The thread's global number. */
1331 int thread = -1;
1332 };
1333
1334 /* The step-over info of the location that is being stepped over.
1335
1336 Note that with async/breakpoint always-inserted mode, a user might
1337 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1338 being stepped over. As setting a new breakpoint inserts all
1339 breakpoints, we need to make sure the breakpoint being stepped over
1340 isn't inserted then. We do that by only clearing the step-over
1341 info when the step-over is actually finished (or aborted).
1342
1343 Presently GDB can only step over one breakpoint at any given time.
1344 Given threads that can't run code in the same address space as the
1345 breakpoint's can't really miss the breakpoint, GDB could be taught
1346 to step-over at most one breakpoint per address space (so this info
1347 could move to the address space object if/when GDB is extended).
1348 The set of breakpoints being stepped over will normally be much
1349 smaller than the set of all breakpoints, so a flag in the
1350 breakpoint location structure would be wasteful. A separate list
1351 also saves complexity and run-time, as otherwise we'd have to go
1352 through all breakpoint locations clearing their flag whenever we
1353 start a new sequence. Similar considerations weigh against storing
1354 this info in the thread object. Plus, not all step overs actually
1355 have breakpoint locations -- e.g., stepping past a single-step
1356 breakpoint, or stepping to complete a non-continuable
1357 watchpoint. */
1358 static struct step_over_info step_over_info;
1359
1360 /* Record the address of the breakpoint/instruction we're currently
1361 stepping over.
1362 N.B. We record the aspace and address now, instead of say just the thread,
1363 because when we need the info later the thread may be running. */
1364
1365 static void
1366 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1367 int nonsteppable_watchpoint_p,
1368 int thread)
1369 {
1370 step_over_info.aspace = aspace;
1371 step_over_info.address = address;
1372 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1373 step_over_info.thread = thread;
1374 }
1375
1376 /* Called when we're not longer stepping over a breakpoint / an
1377 instruction, so all breakpoints are free to be (re)inserted. */
1378
1379 static void
1380 clear_step_over_info (void)
1381 {
1382 infrun_debug_printf ("clearing step over info");
1383 step_over_info.aspace = nullptr;
1384 step_over_info.address = 0;
1385 step_over_info.nonsteppable_watchpoint_p = 0;
1386 step_over_info.thread = -1;
1387 }
1388
1389 /* See infrun.h. */
1390
1391 int
1392 stepping_past_instruction_at (struct address_space *aspace,
1393 CORE_ADDR address)
1394 {
1395 return (step_over_info.aspace != nullptr
1396 && breakpoint_address_match (aspace, address,
1397 step_over_info.aspace,
1398 step_over_info.address));
1399 }
1400
1401 /* See infrun.h. */
1402
1403 int
1404 thread_is_stepping_over_breakpoint (int thread)
1405 {
1406 return (step_over_info.thread != -1
1407 && thread == step_over_info.thread);
1408 }
1409
1410 /* See infrun.h. */
1411
1412 int
1413 stepping_past_nonsteppable_watchpoint (void)
1414 {
1415 return step_over_info.nonsteppable_watchpoint_p;
1416 }
1417
1418 /* Returns true if step-over info is valid. */
1419
1420 static bool
1421 step_over_info_valid_p (void)
1422 {
1423 return (step_over_info.aspace != nullptr
1424 || stepping_past_nonsteppable_watchpoint ());
1425 }
1426
1427 \f
1428 /* Displaced stepping. */
1429
1430 /* In non-stop debugging mode, we must take special care to manage
1431 breakpoints properly; in particular, the traditional strategy for
1432 stepping a thread past a breakpoint it has hit is unsuitable.
1433 'Displaced stepping' is a tactic for stepping one thread past a
1434 breakpoint it has hit while ensuring that other threads running
1435 concurrently will hit the breakpoint as they should.
1436
1437 The traditional way to step a thread T off a breakpoint in a
1438 multi-threaded program in all-stop mode is as follows:
1439
1440 a0) Initially, all threads are stopped, and breakpoints are not
1441 inserted.
1442 a1) We single-step T, leaving breakpoints uninserted.
1443 a2) We insert breakpoints, and resume all threads.
1444
1445 In non-stop debugging, however, this strategy is unsuitable: we
1446 don't want to have to stop all threads in the system in order to
1447 continue or step T past a breakpoint. Instead, we use displaced
1448 stepping:
1449
1450 n0) Initially, T is stopped, other threads are running, and
1451 breakpoints are inserted.
1452 n1) We copy the instruction "under" the breakpoint to a separate
1453 location, outside the main code stream, making any adjustments
1454 to the instruction, register, and memory state as directed by
1455 T's architecture.
1456 n2) We single-step T over the instruction at its new location.
1457 n3) We adjust the resulting register and memory state as directed
1458 by T's architecture. This includes resetting T's PC to point
1459 back into the main instruction stream.
1460 n4) We resume T.
1461
1462 This approach depends on the following gdbarch methods:
1463
1464 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1465 indicate where to copy the instruction, and how much space must
1466 be reserved there. We use these in step n1.
1467
1468 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1469 address, and makes any necessary adjustments to the instruction,
1470 register contents, and memory. We use this in step n1.
1471
1472 - gdbarch_displaced_step_fixup adjusts registers and memory after
1473 we have successfully single-stepped the instruction, to yield the
1474 same effect the instruction would have had if we had executed it
1475 at its original address. We use this in step n3.
1476
1477 The gdbarch_displaced_step_copy_insn and
1478 gdbarch_displaced_step_fixup functions must be written so that
1479 copying an instruction with gdbarch_displaced_step_copy_insn,
1480 single-stepping across the copied instruction, and then applying
1481 gdbarch_displaced_insn_fixup should have the same effects on the
1482 thread's memory and registers as stepping the instruction in place
1483 would have. Exactly which responsibilities fall to the copy and
1484 which fall to the fixup is up to the author of those functions.
1485
1486 See the comments in gdbarch.sh for details.
1487
1488 Note that displaced stepping and software single-step cannot
1489 currently be used in combination, although with some care I think
1490 they could be made to. Software single-step works by placing
1491 breakpoints on all possible subsequent instructions; if the
1492 displaced instruction is a PC-relative jump, those breakpoints
1493 could fall in very strange places --- on pages that aren't
1494 executable, or at addresses that are not proper instruction
1495 boundaries. (We do generally let other threads run while we wait
1496 to hit the software single-step breakpoint, and they might
1497 encounter such a corrupted instruction.) One way to work around
1498 this would be to have gdbarch_displaced_step_copy_insn fully
1499 simulate the effect of PC-relative instructions (and return NULL)
1500 on architectures that use software single-stepping.
1501
1502 In non-stop mode, we can have independent and simultaneous step
1503 requests, so more than one thread may need to simultaneously step
1504 over a breakpoint. The current implementation assumes there is
1505 only one scratch space per process. In this case, we have to
1506 serialize access to the scratch space. If thread A wants to step
1507 over a breakpoint, but we are currently waiting for some other
1508 thread to complete a displaced step, we leave thread A stopped and
1509 place it in the displaced_step_request_queue. Whenever a displaced
1510 step finishes, we pick the next thread in the queue and start a new
1511 displaced step operation on it. See displaced_step_prepare and
1512 displaced_step_finish for details. */
1513
1514 /* Return true if THREAD is doing a displaced step. */
1515
1516 static bool
1517 displaced_step_in_progress_thread (thread_info *thread)
1518 {
1519 gdb_assert (thread != nullptr);
1520
1521 return thread->displaced_step_state.in_progress ();
1522 }
1523
1524 /* Return true if INF has a thread doing a displaced step. */
1525
1526 static bool
1527 displaced_step_in_progress (inferior *inf)
1528 {
1529 return inf->displaced_step_state.in_progress_count > 0;
1530 }
1531
1532 /* Return true if any thread is doing a displaced step. */
1533
1534 static bool
1535 displaced_step_in_progress_any_thread ()
1536 {
1537 for (inferior *inf : all_non_exited_inferiors ())
1538 {
1539 if (displaced_step_in_progress (inf))
1540 return true;
1541 }
1542
1543 return false;
1544 }
1545
1546 static void
1547 infrun_inferior_exit (struct inferior *inf)
1548 {
1549 inf->displaced_step_state.reset ();
1550 inf->thread_waiting_for_vfork_done = nullptr;
1551 }
1552
1553 static void
1554 infrun_inferior_execd (inferior *inf)
1555 {
1556 /* If some threads where was doing a displaced step in this inferior at the
1557 moment of the exec, they no longer exist. Even if the exec'ing thread
1558 doing a displaced step, we don't want to to any fixup nor restore displaced
1559 stepping buffer bytes. */
1560 inf->displaced_step_state.reset ();
1561
1562 for (thread_info *thread : inf->threads ())
1563 thread->displaced_step_state.reset ();
1564
1565 /* Since an in-line step is done with everything else stopped, if there was
1566 one in progress at the time of the exec, it must have been the exec'ing
1567 thread. */
1568 clear_step_over_info ();
1569
1570 inf->thread_waiting_for_vfork_done = nullptr;
1571 }
1572
1573 /* If ON, and the architecture supports it, GDB will use displaced
1574 stepping to step over breakpoints. If OFF, or if the architecture
1575 doesn't support it, GDB will instead use the traditional
1576 hold-and-step approach. If AUTO (which is the default), GDB will
1577 decide which technique to use to step over breakpoints depending on
1578 whether the target works in a non-stop way (see use_displaced_stepping). */
1579
1580 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1581
1582 static void
1583 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1584 struct cmd_list_element *c,
1585 const char *value)
1586 {
1587 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1588 gdb_printf (file,
1589 _("Debugger's willingness to use displaced stepping "
1590 "to step over breakpoints is %s (currently %s).\n"),
1591 value, target_is_non_stop_p () ? "on" : "off");
1592 else
1593 gdb_printf (file,
1594 _("Debugger's willingness to use displaced stepping "
1595 "to step over breakpoints is %s.\n"), value);
1596 }
1597
1598 /* Return true if the gdbarch implements the required methods to use
1599 displaced stepping. */
1600
1601 static bool
1602 gdbarch_supports_displaced_stepping (gdbarch *arch)
1603 {
1604 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1605 that if `prepare` is provided, so is `finish`. */
1606 return gdbarch_displaced_step_prepare_p (arch);
1607 }
1608
1609 /* Return non-zero if displaced stepping can/should be used to step
1610 over breakpoints of thread TP. */
1611
1612 static bool
1613 use_displaced_stepping (thread_info *tp)
1614 {
1615 /* If the user disabled it explicitly, don't use displaced stepping. */
1616 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1617 return false;
1618
1619 /* If "auto", only use displaced stepping if the target operates in a non-stop
1620 way. */
1621 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1622 && !target_is_non_stop_p ())
1623 return false;
1624
1625 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1626
1627 /* If the architecture doesn't implement displaced stepping, don't use
1628 it. */
1629 if (!gdbarch_supports_displaced_stepping (gdbarch))
1630 return false;
1631
1632 /* If recording, don't use displaced stepping. */
1633 if (find_record_target () != nullptr)
1634 return false;
1635
1636 /* If displaced stepping failed before for this inferior, don't bother trying
1637 again. */
1638 if (tp->inf->displaced_step_state.failed_before)
1639 return false;
1640
1641 return true;
1642 }
1643
1644 /* Simple function wrapper around displaced_step_thread_state::reset. */
1645
1646 static void
1647 displaced_step_reset (displaced_step_thread_state *displaced)
1648 {
1649 displaced->reset ();
1650 }
1651
1652 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1653 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1654
1655 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1656
1657 /* See infrun.h. */
1658
1659 std::string
1660 displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
1661 {
1662 std::string ret;
1663
1664 for (size_t i = 0; i < len; i++)
1665 {
1666 if (i == 0)
1667 ret += string_printf ("%02x", buf[i]);
1668 else
1669 ret += string_printf (" %02x", buf[i]);
1670 }
1671
1672 return ret;
1673 }
1674
1675 /* Prepare to single-step, using displaced stepping.
1676
1677 Note that we cannot use displaced stepping when we have a signal to
1678 deliver. If we have a signal to deliver and an instruction to step
1679 over, then after the step, there will be no indication from the
1680 target whether the thread entered a signal handler or ignored the
1681 signal and stepped over the instruction successfully --- both cases
1682 result in a simple SIGTRAP. In the first case we mustn't do a
1683 fixup, and in the second case we must --- but we can't tell which.
1684 Comments in the code for 'random signals' in handle_inferior_event
1685 explain how we handle this case instead.
1686
1687 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1688 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1689 if displaced stepping this thread got queued; or
1690 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1691 stepped. */
1692
1693 static displaced_step_prepare_status
1694 displaced_step_prepare_throw (thread_info *tp)
1695 {
1696 regcache *regcache = get_thread_regcache (tp);
1697 struct gdbarch *gdbarch = regcache->arch ();
1698 displaced_step_thread_state &disp_step_thread_state
1699 = tp->displaced_step_state;
1700
1701 /* We should never reach this function if the architecture does not
1702 support displaced stepping. */
1703 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1704
1705 /* Nor if the thread isn't meant to step over a breakpoint. */
1706 gdb_assert (tp->control.trap_expected);
1707
1708 /* Disable range stepping while executing in the scratch pad. We
1709 want a single-step even if executing the displaced instruction in
1710 the scratch buffer lands within the stepping range (e.g., a
1711 jump/branch). */
1712 tp->control.may_range_step = 0;
1713
1714 /* We are about to start a displaced step for this thread. If one is already
1715 in progress, something's wrong. */
1716 gdb_assert (!disp_step_thread_state.in_progress ());
1717
1718 if (tp->inf->displaced_step_state.unavailable)
1719 {
1720 /* The gdbarch tells us it's not worth asking to try a prepare because
1721 it is likely that it will return unavailable, so don't bother asking. */
1722
1723 displaced_debug_printf ("deferring step of %s",
1724 tp->ptid.to_string ().c_str ());
1725
1726 global_thread_step_over_chain_enqueue (tp);
1727 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1728 }
1729
1730 displaced_debug_printf ("displaced-stepping %s now",
1731 tp->ptid.to_string ().c_str ());
1732
1733 scoped_restore_current_thread restore_thread;
1734
1735 switch_to_thread (tp);
1736
1737 CORE_ADDR original_pc = regcache_read_pc (regcache);
1738 CORE_ADDR displaced_pc;
1739
1740 displaced_step_prepare_status status
1741 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1742
1743 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1744 {
1745 displaced_debug_printf ("failed to prepare (%s)",
1746 tp->ptid.to_string ().c_str ());
1747
1748 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1749 }
1750 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1751 {
1752 /* Not enough displaced stepping resources available, defer this
1753 request by placing it the queue. */
1754
1755 displaced_debug_printf ("not enough resources available, "
1756 "deferring step of %s",
1757 tp->ptid.to_string ().c_str ());
1758
1759 global_thread_step_over_chain_enqueue (tp);
1760
1761 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1762 }
1763
1764 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1765
1766 /* Save the information we need to fix things up if the step
1767 succeeds. */
1768 disp_step_thread_state.set (gdbarch);
1769
1770 tp->inf->displaced_step_state.in_progress_count++;
1771
1772 displaced_debug_printf ("prepared successfully thread=%s, "
1773 "original_pc=%s, displaced_pc=%s",
1774 tp->ptid.to_string ().c_str (),
1775 paddress (gdbarch, original_pc),
1776 paddress (gdbarch, displaced_pc));
1777
1778 return DISPLACED_STEP_PREPARE_STATUS_OK;
1779 }
1780
1781 /* Wrapper for displaced_step_prepare_throw that disabled further
1782 attempts at displaced stepping if we get a memory error. */
1783
1784 static displaced_step_prepare_status
1785 displaced_step_prepare (thread_info *thread)
1786 {
1787 displaced_step_prepare_status status
1788 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1789
1790 try
1791 {
1792 status = displaced_step_prepare_throw (thread);
1793 }
1794 catch (const gdb_exception_error &ex)
1795 {
1796 if (ex.error != MEMORY_ERROR
1797 && ex.error != NOT_SUPPORTED_ERROR)
1798 throw;
1799
1800 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1801 ex.what ());
1802
1803 /* Be verbose if "set displaced-stepping" is "on", silent if
1804 "auto". */
1805 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1806 {
1807 warning (_("disabling displaced stepping: %s"),
1808 ex.what ());
1809 }
1810
1811 /* Disable further displaced stepping attempts. */
1812 thread->inf->displaced_step_state.failed_before = 1;
1813 }
1814
1815 return status;
1816 }
1817
1818 /* If we displaced stepped an instruction successfully, adjust registers and
1819 memory to yield the same effect the instruction would have had if we had
1820 executed it at its original address, and return
1821 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1822 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
1823
1824 If the thread wasn't displaced stepping, return
1825 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1826
1827 static displaced_step_finish_status
1828 displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
1829 {
1830 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
1831
1832 /* Was this thread performing a displaced step? */
1833 if (!displaced->in_progress ())
1834 return DISPLACED_STEP_FINISH_STATUS_OK;
1835
1836 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
1837 event_thread->inf->displaced_step_state.in_progress_count--;
1838
1839 /* Fixup may need to read memory/registers. Switch to the thread
1840 that we're fixing up. Also, target_stopped_by_watchpoint checks
1841 the current thread, and displaced_step_restore performs ptid-dependent
1842 memory accesses using current_inferior(). */
1843 switch_to_thread (event_thread);
1844
1845 displaced_step_reset_cleanup cleanup (displaced);
1846
1847 /* Do the fixup, and release the resources acquired to do the displaced
1848 step. */
1849 return gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1850 event_thread, signal);
1851 }
1852
1853 /* Data to be passed around while handling an event. This data is
1854 discarded between events. */
1855 struct execution_control_state
1856 {
1857 execution_control_state ()
1858 {
1859 this->reset ();
1860 }
1861
1862 void reset ()
1863 {
1864 this->target = nullptr;
1865 this->ptid = null_ptid;
1866 this->event_thread = nullptr;
1867 ws = target_waitstatus ();
1868 stop_func_filled_in = 0;
1869 stop_func_start = 0;
1870 stop_func_end = 0;
1871 stop_func_name = nullptr;
1872 wait_some_more = 0;
1873 hit_singlestep_breakpoint = 0;
1874 }
1875
1876 process_stratum_target *target;
1877 ptid_t ptid;
1878 /* The thread that got the event, if this was a thread event; NULL
1879 otherwise. */
1880 struct thread_info *event_thread;
1881
1882 struct target_waitstatus ws;
1883 int stop_func_filled_in;
1884 CORE_ADDR stop_func_start;
1885 CORE_ADDR stop_func_end;
1886 const char *stop_func_name;
1887 int wait_some_more;
1888
1889 /* True if the event thread hit the single-step breakpoint of
1890 another thread. Thus the event doesn't cause a stop, the thread
1891 needs to be single-stepped past the single-step breakpoint before
1892 we can switch back to the original stepping thread. */
1893 int hit_singlestep_breakpoint;
1894 };
1895
1896 /* Clear ECS and set it to point at TP. */
1897
1898 static void
1899 reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1900 {
1901 ecs->reset ();
1902 ecs->event_thread = tp;
1903 ecs->ptid = tp->ptid;
1904 }
1905
1906 static void keep_going_pass_signal (struct execution_control_state *ecs);
1907 static void prepare_to_wait (struct execution_control_state *ecs);
1908 static bool keep_going_stepped_thread (struct thread_info *tp);
1909 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
1910
1911 /* Are there any pending step-over requests? If so, run all we can
1912 now and return true. Otherwise, return false. */
1913
1914 static bool
1915 start_step_over (void)
1916 {
1917 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1918
1919 /* Don't start a new step-over if we already have an in-line
1920 step-over operation ongoing. */
1921 if (step_over_info_valid_p ())
1922 return false;
1923
1924 /* Steal the global thread step over chain. As we try to initiate displaced
1925 steps, threads will be enqueued in the global chain if no buffers are
1926 available. If we iterated on the global chain directly, we might iterate
1927 indefinitely. */
1928 thread_step_over_list threads_to_step
1929 = std::move (global_thread_step_over_list);
1930
1931 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1932 thread_step_over_chain_length (threads_to_step));
1933
1934 bool started = false;
1935
1936 /* On scope exit (whatever the reason, return or exception), if there are
1937 threads left in the THREADS_TO_STEP chain, put back these threads in the
1938 global list. */
1939 SCOPE_EXIT
1940 {
1941 if (threads_to_step.empty ())
1942 infrun_debug_printf ("step-over queue now empty");
1943 else
1944 {
1945 infrun_debug_printf ("putting back %d threads to step in global queue",
1946 thread_step_over_chain_length (threads_to_step));
1947
1948 global_thread_step_over_chain_enqueue_chain
1949 (std::move (threads_to_step));
1950 }
1951 };
1952
1953 thread_step_over_list_safe_range range
1954 = make_thread_step_over_list_safe_range (threads_to_step);
1955
1956 for (thread_info *tp : range)
1957 {
1958 struct execution_control_state ecss;
1959 struct execution_control_state *ecs = &ecss;
1960 step_over_what step_what;
1961 int must_be_in_line;
1962
1963 gdb_assert (!tp->stop_requested);
1964
1965 if (tp->inf->displaced_step_state.unavailable)
1966 {
1967 /* The arch told us to not even try preparing another displaced step
1968 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1969 will get moved to the global chain on scope exit. */
1970 continue;
1971 }
1972
1973 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
1974 {
1975 /* When we stop all threads, handling a vfork, any thread in the step
1976 over chain remains there. A user could also try to continue a
1977 thread stopped at a breakpoint while another thread is waiting for
1978 a vfork-done event. In any case, we don't want to start a step
1979 over right now. */
1980 continue;
1981 }
1982
1983 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1984 while we try to prepare the displaced step, we don't add it back to
1985 the global step over chain. This is to avoid a thread staying in the
1986 step over chain indefinitely if something goes wrong when resuming it
1987 If the error is intermittent and it still needs a step over, it will
1988 get enqueued again when we try to resume it normally. */
1989 threads_to_step.erase (threads_to_step.iterator_to (*tp));
1990
1991 step_what = thread_still_needs_step_over (tp);
1992 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1993 || ((step_what & STEP_OVER_BREAKPOINT)
1994 && !use_displaced_stepping (tp)));
1995
1996 /* We currently stop all threads of all processes to step-over
1997 in-line. If we need to start a new in-line step-over, let
1998 any pending displaced steps finish first. */
1999 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2000 {
2001 global_thread_step_over_chain_enqueue (tp);
2002 continue;
2003 }
2004
2005 if (tp->control.trap_expected
2006 || tp->resumed ()
2007 || tp->executing ())
2008 {
2009 internal_error ("[%s] has inconsistent state: "
2010 "trap_expected=%d, resumed=%d, executing=%d\n",
2011 tp->ptid.to_string ().c_str (),
2012 tp->control.trap_expected,
2013 tp->resumed (),
2014 tp->executing ());
2015 }
2016
2017 infrun_debug_printf ("resuming [%s] for step-over",
2018 tp->ptid.to_string ().c_str ());
2019
2020 /* keep_going_pass_signal skips the step-over if the breakpoint
2021 is no longer inserted. In all-stop, we want to keep looking
2022 for a thread that needs a step-over instead of resuming TP,
2023 because we wouldn't be able to resume anything else until the
2024 target stops again. In non-stop, the resume always resumes
2025 only TP, so it's OK to let the thread resume freely. */
2026 if (!target_is_non_stop_p () && !step_what)
2027 continue;
2028
2029 switch_to_thread (tp);
2030 reset_ecs (ecs, tp);
2031 keep_going_pass_signal (ecs);
2032
2033 if (!ecs->wait_some_more)
2034 error (_("Command aborted."));
2035
2036 /* If the thread's step over could not be initiated because no buffers
2037 were available, it was re-added to the global step over chain. */
2038 if (tp->resumed ())
2039 {
2040 infrun_debug_printf ("[%s] was resumed.",
2041 tp->ptid.to_string ().c_str ());
2042 gdb_assert (!thread_is_in_step_over_chain (tp));
2043 }
2044 else
2045 {
2046 infrun_debug_printf ("[%s] was NOT resumed.",
2047 tp->ptid.to_string ().c_str ());
2048 gdb_assert (thread_is_in_step_over_chain (tp));
2049 }
2050
2051 /* If we started a new in-line step-over, we're done. */
2052 if (step_over_info_valid_p ())
2053 {
2054 gdb_assert (tp->control.trap_expected);
2055 started = true;
2056 break;
2057 }
2058
2059 if (!target_is_non_stop_p ())
2060 {
2061 /* On all-stop, shouldn't have resumed unless we needed a
2062 step over. */
2063 gdb_assert (tp->control.trap_expected
2064 || tp->step_after_step_resume_breakpoint);
2065
2066 /* With remote targets (at least), in all-stop, we can't
2067 issue any further remote commands until the program stops
2068 again. */
2069 started = true;
2070 break;
2071 }
2072
2073 /* Either the thread no longer needed a step-over, or a new
2074 displaced stepping sequence started. Even in the latter
2075 case, continue looking. Maybe we can also start another
2076 displaced step on a thread of other process. */
2077 }
2078
2079 return started;
2080 }
2081
2082 /* Update global variables holding ptids to hold NEW_PTID if they were
2083 holding OLD_PTID. */
2084 static void
2085 infrun_thread_ptid_changed (process_stratum_target *target,
2086 ptid_t old_ptid, ptid_t new_ptid)
2087 {
2088 if (inferior_ptid == old_ptid
2089 && current_inferior ()->process_target () == target)
2090 inferior_ptid = new_ptid;
2091 }
2092
2093 \f
2094
2095 static const char schedlock_off[] = "off";
2096 static const char schedlock_on[] = "on";
2097 static const char schedlock_step[] = "step";
2098 static const char schedlock_replay[] = "replay";
2099 static const char *const scheduler_enums[] = {
2100 schedlock_off,
2101 schedlock_on,
2102 schedlock_step,
2103 schedlock_replay,
2104 nullptr
2105 };
2106 static const char *scheduler_mode = schedlock_replay;
2107 static void
2108 show_scheduler_mode (struct ui_file *file, int from_tty,
2109 struct cmd_list_element *c, const char *value)
2110 {
2111 gdb_printf (file,
2112 _("Mode for locking scheduler "
2113 "during execution is \"%s\".\n"),
2114 value);
2115 }
2116
2117 static void
2118 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2119 {
2120 if (!target_can_lock_scheduler ())
2121 {
2122 scheduler_mode = schedlock_off;
2123 error (_("Target '%s' cannot support this command."),
2124 target_shortname ());
2125 }
2126 }
2127
2128 /* True if execution commands resume all threads of all processes by
2129 default; otherwise, resume only threads of the current inferior
2130 process. */
2131 bool sched_multi = false;
2132
2133 /* Try to setup for software single stepping. Return true if target_resume()
2134 should use hardware single step.
2135
2136 GDBARCH the current gdbarch. */
2137
2138 static bool
2139 maybe_software_singlestep (struct gdbarch *gdbarch)
2140 {
2141 bool hw_step = true;
2142
2143 if (execution_direction == EXEC_FORWARD
2144 && gdbarch_software_single_step_p (gdbarch))
2145 hw_step = !insert_single_step_breakpoints (gdbarch);
2146
2147 return hw_step;
2148 }
2149
2150 /* See infrun.h. */
2151
2152 ptid_t
2153 user_visible_resume_ptid (int step)
2154 {
2155 ptid_t resume_ptid;
2156
2157 if (non_stop)
2158 {
2159 /* With non-stop mode on, threads are always handled
2160 individually. */
2161 resume_ptid = inferior_ptid;
2162 }
2163 else if ((scheduler_mode == schedlock_on)
2164 || (scheduler_mode == schedlock_step && step))
2165 {
2166 /* User-settable 'scheduler' mode requires solo thread
2167 resume. */
2168 resume_ptid = inferior_ptid;
2169 }
2170 else if ((scheduler_mode == schedlock_replay)
2171 && target_record_will_replay (minus_one_ptid, execution_direction))
2172 {
2173 /* User-settable 'scheduler' mode requires solo thread resume in replay
2174 mode. */
2175 resume_ptid = inferior_ptid;
2176 }
2177 else if (!sched_multi && target_supports_multi_process ())
2178 {
2179 /* Resume all threads of the current process (and none of other
2180 processes). */
2181 resume_ptid = ptid_t (inferior_ptid.pid ());
2182 }
2183 else
2184 {
2185 /* Resume all threads of all processes. */
2186 resume_ptid = RESUME_ALL;
2187 }
2188
2189 return resume_ptid;
2190 }
2191
2192 /* See infrun.h. */
2193
2194 process_stratum_target *
2195 user_visible_resume_target (ptid_t resume_ptid)
2196 {
2197 return (resume_ptid == minus_one_ptid && sched_multi
2198 ? nullptr
2199 : current_inferior ()->process_target ());
2200 }
2201
2202 /* Return a ptid representing the set of threads that we will resume,
2203 in the perspective of the target, assuming run control handling
2204 does not require leaving some threads stopped (e.g., stepping past
2205 breakpoint). USER_STEP indicates whether we're about to start the
2206 target for a stepping command. */
2207
2208 static ptid_t
2209 internal_resume_ptid (int user_step)
2210 {
2211 /* In non-stop, we always control threads individually. Note that
2212 the target may always work in non-stop mode even with "set
2213 non-stop off", in which case user_visible_resume_ptid could
2214 return a wildcard ptid. */
2215 if (target_is_non_stop_p ())
2216 return inferior_ptid;
2217
2218 /* The rest of the function assumes non-stop==off and
2219 target-non-stop==off.
2220
2221 If a thread is waiting for a vfork-done event, it means breakpoints are out
2222 for this inferior (well, program space in fact). We don't want to resume
2223 any thread other than the one waiting for vfork done, otherwise these other
2224 threads could miss breakpoints. So if a thread in the resumption set is
2225 waiting for a vfork-done event, resume only that thread.
2226
2227 The resumption set width depends on whether schedule-multiple is on or off.
2228
2229 Note that if the target_resume interface was more flexible, we could be
2230 smarter here when schedule-multiple is on. For example, imagine 3
2231 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2232 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2233 target(s) to resume:
2234
2235 - All threads of inferior 1
2236 - Thread 2.1
2237 - Thread 3.2
2238
2239 Since we don't have that flexibility (we can only pass one ptid), just
2240 resume the first thread waiting for a vfork-done event we find (e.g. thread
2241 2.1). */
2242 if (sched_multi)
2243 {
2244 for (inferior *inf : all_non_exited_inferiors ())
2245 if (inf->thread_waiting_for_vfork_done != nullptr)
2246 return inf->thread_waiting_for_vfork_done->ptid;
2247 }
2248 else if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2249 return current_inferior ()->thread_waiting_for_vfork_done->ptid;
2250
2251 return user_visible_resume_ptid (user_step);
2252 }
2253
2254 /* Wrapper for target_resume, that handles infrun-specific
2255 bookkeeping. */
2256
2257 static void
2258 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2259 {
2260 struct thread_info *tp = inferior_thread ();
2261
2262 gdb_assert (!tp->stop_requested);
2263
2264 /* Install inferior's terminal modes. */
2265 target_terminal::inferior ();
2266
2267 /* Avoid confusing the next resume, if the next stop/resume
2268 happens to apply to another thread. */
2269 tp->set_stop_signal (GDB_SIGNAL_0);
2270
2271 /* Advise target which signals may be handled silently.
2272
2273 If we have removed breakpoints because we are stepping over one
2274 in-line (in any thread), we need to receive all signals to avoid
2275 accidentally skipping a breakpoint during execution of a signal
2276 handler.
2277
2278 Likewise if we're displaced stepping, otherwise a trap for a
2279 breakpoint in a signal handler might be confused with the
2280 displaced step finishing. We don't make the displaced_step_finish
2281 step distinguish the cases instead, because:
2282
2283 - a backtrace while stopped in the signal handler would show the
2284 scratch pad as frame older than the signal handler, instead of
2285 the real mainline code.
2286
2287 - when the thread is later resumed, the signal handler would
2288 return to the scratch pad area, which would no longer be
2289 valid. */
2290 if (step_over_info_valid_p ()
2291 || displaced_step_in_progress (tp->inf))
2292 target_pass_signals ({});
2293 else
2294 target_pass_signals (signal_pass);
2295
2296 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2297 resume_ptid.to_string ().c_str (),
2298 step, gdb_signal_to_symbol_string (sig));
2299
2300 target_resume (resume_ptid, step, sig);
2301 }
2302
2303 /* Resume the inferior. SIG is the signal to give the inferior
2304 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2305 call 'resume', which handles exceptions. */
2306
2307 static void
2308 resume_1 (enum gdb_signal sig)
2309 {
2310 struct regcache *regcache = get_current_regcache ();
2311 struct gdbarch *gdbarch = regcache->arch ();
2312 struct thread_info *tp = inferior_thread ();
2313 const address_space *aspace = regcache->aspace ();
2314 ptid_t resume_ptid;
2315 /* This represents the user's step vs continue request. When
2316 deciding whether "set scheduler-locking step" applies, it's the
2317 user's intention that counts. */
2318 const int user_step = tp->control.stepping_command;
2319 /* This represents what we'll actually request the target to do.
2320 This can decay from a step to a continue, if e.g., we need to
2321 implement single-stepping with breakpoints (software
2322 single-step). */
2323 bool step;
2324
2325 gdb_assert (!tp->stop_requested);
2326 gdb_assert (!thread_is_in_step_over_chain (tp));
2327
2328 if (tp->has_pending_waitstatus ())
2329 {
2330 infrun_debug_printf
2331 ("thread %s has pending wait "
2332 "status %s (currently_stepping=%d).",
2333 tp->ptid.to_string ().c_str (),
2334 tp->pending_waitstatus ().to_string ().c_str (),
2335 currently_stepping (tp));
2336
2337 tp->inf->process_target ()->threads_executing = true;
2338 tp->set_resumed (true);
2339
2340 /* FIXME: What should we do if we are supposed to resume this
2341 thread with a signal? Maybe we should maintain a queue of
2342 pending signals to deliver. */
2343 if (sig != GDB_SIGNAL_0)
2344 {
2345 warning (_("Couldn't deliver signal %s to %s."),
2346 gdb_signal_to_name (sig),
2347 tp->ptid.to_string ().c_str ());
2348 }
2349
2350 tp->set_stop_signal (GDB_SIGNAL_0);
2351
2352 if (target_can_async_p ())
2353 {
2354 target_async (true);
2355 /* Tell the event loop we have an event to process. */
2356 mark_async_event_handler (infrun_async_inferior_event_token);
2357 }
2358 return;
2359 }
2360
2361 tp->stepped_breakpoint = 0;
2362
2363 /* Depends on stepped_breakpoint. */
2364 step = currently_stepping (tp);
2365
2366 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2367 {
2368 /* Don't try to single-step a vfork parent that is waiting for
2369 the child to get out of the shared memory region (by exec'ing
2370 or exiting). This is particularly important on software
2371 single-step archs, as the child process would trip on the
2372 software single step breakpoint inserted for the parent
2373 process. Since the parent will not actually execute any
2374 instruction until the child is out of the shared region (such
2375 are vfork's semantics), it is safe to simply continue it.
2376 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2377 the parent, and tell it to `keep_going', which automatically
2378 re-sets it stepping. */
2379 infrun_debug_printf ("resume : clear step");
2380 step = false;
2381 }
2382
2383 CORE_ADDR pc = regcache_read_pc (regcache);
2384
2385 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2386 "current thread [%s] at %s",
2387 step, gdb_signal_to_symbol_string (sig),
2388 tp->control.trap_expected,
2389 inferior_ptid.to_string ().c_str (),
2390 paddress (gdbarch, pc));
2391
2392 /* Normally, by the time we reach `resume', the breakpoints are either
2393 removed or inserted, as appropriate. The exception is if we're sitting
2394 at a permanent breakpoint; we need to step over it, but permanent
2395 breakpoints can't be removed. So we have to test for it here. */
2396 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2397 {
2398 if (sig != GDB_SIGNAL_0)
2399 {
2400 /* We have a signal to pass to the inferior. The resume
2401 may, or may not take us to the signal handler. If this
2402 is a step, we'll need to stop in the signal handler, if
2403 there's one, (if the target supports stepping into
2404 handlers), or in the next mainline instruction, if
2405 there's no handler. If this is a continue, we need to be
2406 sure to run the handler with all breakpoints inserted.
2407 In all cases, set a breakpoint at the current address
2408 (where the handler returns to), and once that breakpoint
2409 is hit, resume skipping the permanent breakpoint. If
2410 that breakpoint isn't hit, then we've stepped into the
2411 signal handler (or hit some other event). We'll delete
2412 the step-resume breakpoint then. */
2413
2414 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2415 "deliver signal first");
2416
2417 clear_step_over_info ();
2418 tp->control.trap_expected = 0;
2419
2420 if (tp->control.step_resume_breakpoint == nullptr)
2421 {
2422 /* Set a "high-priority" step-resume, as we don't want
2423 user breakpoints at PC to trigger (again) when this
2424 hits. */
2425 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2426 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2427
2428 tp->step_after_step_resume_breakpoint = step;
2429 }
2430
2431 insert_breakpoints ();
2432 }
2433 else
2434 {
2435 /* There's no signal to pass, we can go ahead and skip the
2436 permanent breakpoint manually. */
2437 infrun_debug_printf ("skipping permanent breakpoint");
2438 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2439 /* Update pc to reflect the new address from which we will
2440 execute instructions. */
2441 pc = regcache_read_pc (regcache);
2442
2443 if (step)
2444 {
2445 /* We've already advanced the PC, so the stepping part
2446 is done. Now we need to arrange for a trap to be
2447 reported to handle_inferior_event. Set a breakpoint
2448 at the current PC, and run to it. Don't update
2449 prev_pc, because if we end in
2450 switch_back_to_stepped_thread, we want the "expected
2451 thread advanced also" branch to be taken. IOW, we
2452 don't want this thread to step further from PC
2453 (overstep). */
2454 gdb_assert (!step_over_info_valid_p ());
2455 insert_single_step_breakpoint (gdbarch, aspace, pc);
2456 insert_breakpoints ();
2457
2458 resume_ptid = internal_resume_ptid (user_step);
2459 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2460 tp->set_resumed (true);
2461 return;
2462 }
2463 }
2464 }
2465
2466 /* If we have a breakpoint to step over, make sure to do a single
2467 step only. Same if we have software watchpoints. */
2468 if (tp->control.trap_expected || bpstat_should_step ())
2469 tp->control.may_range_step = 0;
2470
2471 /* If displaced stepping is enabled, step over breakpoints by executing a
2472 copy of the instruction at a different address.
2473
2474 We can't use displaced stepping when we have a signal to deliver;
2475 the comments for displaced_step_prepare explain why. The
2476 comments in the handle_inferior event for dealing with 'random
2477 signals' explain what we do instead.
2478
2479 We can't use displaced stepping when we are waiting for vfork_done
2480 event, displaced stepping breaks the vfork child similarly as single
2481 step software breakpoint. */
2482 if (tp->control.trap_expected
2483 && use_displaced_stepping (tp)
2484 && !step_over_info_valid_p ()
2485 && sig == GDB_SIGNAL_0
2486 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2487 {
2488 displaced_step_prepare_status prepare_status
2489 = displaced_step_prepare (tp);
2490
2491 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2492 {
2493 infrun_debug_printf ("Got placed in step-over queue");
2494
2495 tp->control.trap_expected = 0;
2496 return;
2497 }
2498 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2499 {
2500 /* Fallback to stepping over the breakpoint in-line. */
2501
2502 if (target_is_non_stop_p ())
2503 stop_all_threads ("displaced stepping falling back on inline stepping");
2504
2505 set_step_over_info (regcache->aspace (),
2506 regcache_read_pc (regcache), 0, tp->global_num);
2507
2508 step = maybe_software_singlestep (gdbarch);
2509
2510 insert_breakpoints ();
2511 }
2512 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2513 {
2514 /* Update pc to reflect the new address from which we will
2515 execute instructions due to displaced stepping. */
2516 pc = regcache_read_pc (get_thread_regcache (tp));
2517
2518 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2519 }
2520 else
2521 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2522 "value.");
2523 }
2524
2525 /* Do we need to do it the hard way, w/temp breakpoints? */
2526 else if (step)
2527 step = maybe_software_singlestep (gdbarch);
2528
2529 /* Currently, our software single-step implementation leads to different
2530 results than hardware single-stepping in one situation: when stepping
2531 into delivering a signal which has an associated signal handler,
2532 hardware single-step will stop at the first instruction of the handler,
2533 while software single-step will simply skip execution of the handler.
2534
2535 For now, this difference in behavior is accepted since there is no
2536 easy way to actually implement single-stepping into a signal handler
2537 without kernel support.
2538
2539 However, there is one scenario where this difference leads to follow-on
2540 problems: if we're stepping off a breakpoint by removing all breakpoints
2541 and then single-stepping. In this case, the software single-step
2542 behavior means that even if there is a *breakpoint* in the signal
2543 handler, GDB still would not stop.
2544
2545 Fortunately, we can at least fix this particular issue. We detect
2546 here the case where we are about to deliver a signal while software
2547 single-stepping with breakpoints removed. In this situation, we
2548 revert the decisions to remove all breakpoints and insert single-
2549 step breakpoints, and instead we install a step-resume breakpoint
2550 at the current address, deliver the signal without stepping, and
2551 once we arrive back at the step-resume breakpoint, actually step
2552 over the breakpoint we originally wanted to step over. */
2553 if (thread_has_single_step_breakpoints_set (tp)
2554 && sig != GDB_SIGNAL_0
2555 && step_over_info_valid_p ())
2556 {
2557 /* If we have nested signals or a pending signal is delivered
2558 immediately after a handler returns, might already have
2559 a step-resume breakpoint set on the earlier handler. We cannot
2560 set another step-resume breakpoint; just continue on until the
2561 original breakpoint is hit. */
2562 if (tp->control.step_resume_breakpoint == nullptr)
2563 {
2564 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2565 tp->step_after_step_resume_breakpoint = 1;
2566 }
2567
2568 delete_single_step_breakpoints (tp);
2569
2570 clear_step_over_info ();
2571 tp->control.trap_expected = 0;
2572
2573 insert_breakpoints ();
2574 }
2575
2576 /* If STEP is set, it's a request to use hardware stepping
2577 facilities. But in that case, we should never
2578 use singlestep breakpoint. */
2579 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2580
2581 /* Decide the set of threads to ask the target to resume. */
2582 if (tp->control.trap_expected)
2583 {
2584 /* We're allowing a thread to run past a breakpoint it has
2585 hit, either by single-stepping the thread with the breakpoint
2586 removed, or by displaced stepping, with the breakpoint inserted.
2587 In the former case, we need to single-step only this thread,
2588 and keep others stopped, as they can miss this breakpoint if
2589 allowed to run. That's not really a problem for displaced
2590 stepping, but, we still keep other threads stopped, in case
2591 another thread is also stopped for a breakpoint waiting for
2592 its turn in the displaced stepping queue. */
2593 resume_ptid = inferior_ptid;
2594 }
2595 else
2596 resume_ptid = internal_resume_ptid (user_step);
2597
2598 if (execution_direction != EXEC_REVERSE
2599 && step && breakpoint_inserted_here_p (aspace, pc))
2600 {
2601 /* There are two cases where we currently need to step a
2602 breakpoint instruction when we have a signal to deliver:
2603
2604 - See handle_signal_stop where we handle random signals that
2605 could take out us out of the stepping range. Normally, in
2606 that case we end up continuing (instead of stepping) over the
2607 signal handler with a breakpoint at PC, but there are cases
2608 where we should _always_ single-step, even if we have a
2609 step-resume breakpoint, like when a software watchpoint is
2610 set. Assuming single-stepping and delivering a signal at the
2611 same time would takes us to the signal handler, then we could
2612 have removed the breakpoint at PC to step over it. However,
2613 some hardware step targets (like e.g., Mac OS) can't step
2614 into signal handlers, and for those, we need to leave the
2615 breakpoint at PC inserted, as otherwise if the handler
2616 recurses and executes PC again, it'll miss the breakpoint.
2617 So we leave the breakpoint inserted anyway, but we need to
2618 record that we tried to step a breakpoint instruction, so
2619 that adjust_pc_after_break doesn't end up confused.
2620
2621 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2622 in one thread after another thread that was stepping had been
2623 momentarily paused for a step-over. When we re-resume the
2624 stepping thread, it may be resumed from that address with a
2625 breakpoint that hasn't trapped yet. Seen with
2626 gdb.threads/non-stop-fair-events.exp, on targets that don't
2627 do displaced stepping. */
2628
2629 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2630 tp->ptid.to_string ().c_str ());
2631
2632 tp->stepped_breakpoint = 1;
2633
2634 /* Most targets can step a breakpoint instruction, thus
2635 executing it normally. But if this one cannot, just
2636 continue and we will hit it anyway. */
2637 if (gdbarch_cannot_step_breakpoint (gdbarch))
2638 step = false;
2639 }
2640
2641 if (debug_displaced
2642 && tp->control.trap_expected
2643 && use_displaced_stepping (tp)
2644 && !step_over_info_valid_p ())
2645 {
2646 struct regcache *resume_regcache = get_thread_regcache (tp);
2647 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
2648 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2649 gdb_byte buf[4];
2650
2651 read_memory (actual_pc, buf, sizeof (buf));
2652 displaced_debug_printf ("run %s: %s",
2653 paddress (resume_gdbarch, actual_pc),
2654 displaced_step_dump_bytes
2655 (buf, sizeof (buf)).c_str ());
2656 }
2657
2658 if (tp->control.may_range_step)
2659 {
2660 /* If we're resuming a thread with the PC out of the step
2661 range, then we're doing some nested/finer run control
2662 operation, like stepping the thread out of the dynamic
2663 linker or the displaced stepping scratch pad. We
2664 shouldn't have allowed a range step then. */
2665 gdb_assert (pc_in_thread_step_range (pc, tp));
2666 }
2667
2668 do_target_resume (resume_ptid, step, sig);
2669 tp->set_resumed (true);
2670 }
2671
2672 /* Resume the inferior. SIG is the signal to give the inferior
2673 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2674 rolls back state on error. */
2675
2676 static void
2677 resume (gdb_signal sig)
2678 {
2679 try
2680 {
2681 resume_1 (sig);
2682 }
2683 catch (const gdb_exception &ex)
2684 {
2685 /* If resuming is being aborted for any reason, delete any
2686 single-step breakpoint resume_1 may have created, to avoid
2687 confusing the following resumption, and to avoid leaving
2688 single-step breakpoints perturbing other threads, in case
2689 we're running in non-stop mode. */
2690 if (inferior_ptid != null_ptid)
2691 delete_single_step_breakpoints (inferior_thread ());
2692 throw;
2693 }
2694 }
2695
2696 \f
2697 /* Proceeding. */
2698
2699 /* See infrun.h. */
2700
2701 /* Counter that tracks number of user visible stops. This can be used
2702 to tell whether a command has proceeded the inferior past the
2703 current location. This allows e.g., inferior function calls in
2704 breakpoint commands to not interrupt the command list. When the
2705 call finishes successfully, the inferior is standing at the same
2706 breakpoint as if nothing happened (and so we don't call
2707 normal_stop). */
2708 static ULONGEST current_stop_id;
2709
2710 /* See infrun.h. */
2711
2712 ULONGEST
2713 get_stop_id (void)
2714 {
2715 return current_stop_id;
2716 }
2717
2718 /* Called when we report a user visible stop. */
2719
2720 static void
2721 new_stop_id (void)
2722 {
2723 current_stop_id++;
2724 }
2725
2726 /* Clear out all variables saying what to do when inferior is continued.
2727 First do this, then set the ones you want, then call `proceed'. */
2728
2729 static void
2730 clear_proceed_status_thread (struct thread_info *tp)
2731 {
2732 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
2733
2734 /* If we're starting a new sequence, then the previous finished
2735 single-step is no longer relevant. */
2736 if (tp->has_pending_waitstatus ())
2737 {
2738 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
2739 {
2740 infrun_debug_printf ("pending event of %s was a finished step. "
2741 "Discarding.",
2742 tp->ptid.to_string ().c_str ());
2743
2744 tp->clear_pending_waitstatus ();
2745 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
2746 }
2747 else
2748 {
2749 infrun_debug_printf
2750 ("thread %s has pending wait status %s (currently_stepping=%d).",
2751 tp->ptid.to_string ().c_str (),
2752 tp->pending_waitstatus ().to_string ().c_str (),
2753 currently_stepping (tp));
2754 }
2755 }
2756
2757 /* If this signal should not be seen by program, give it zero.
2758 Used for debugging signals. */
2759 if (!signal_pass_state (tp->stop_signal ()))
2760 tp->set_stop_signal (GDB_SIGNAL_0);
2761
2762 tp->release_thread_fsm ();
2763
2764 tp->control.trap_expected = 0;
2765 tp->control.step_range_start = 0;
2766 tp->control.step_range_end = 0;
2767 tp->control.may_range_step = 0;
2768 tp->control.step_frame_id = null_frame_id;
2769 tp->control.step_stack_frame_id = null_frame_id;
2770 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2771 tp->control.step_start_function = nullptr;
2772 tp->stop_requested = 0;
2773
2774 tp->control.stop_step = 0;
2775
2776 tp->control.proceed_to_finish = 0;
2777
2778 tp->control.stepping_command = 0;
2779
2780 /* Discard any remaining commands or status from previous stop. */
2781 bpstat_clear (&tp->control.stop_bpstat);
2782 }
2783
2784 void
2785 clear_proceed_status (int step)
2786 {
2787 /* With scheduler-locking replay, stop replaying other threads if we're
2788 not replaying the user-visible resume ptid.
2789
2790 This is a convenience feature to not require the user to explicitly
2791 stop replaying the other threads. We're assuming that the user's
2792 intent is to resume tracing the recorded process. */
2793 if (!non_stop && scheduler_mode == schedlock_replay
2794 && target_record_is_replaying (minus_one_ptid)
2795 && !target_record_will_replay (user_visible_resume_ptid (step),
2796 execution_direction))
2797 target_record_stop_replaying ();
2798
2799 if (!non_stop && inferior_ptid != null_ptid)
2800 {
2801 ptid_t resume_ptid = user_visible_resume_ptid (step);
2802 process_stratum_target *resume_target
2803 = user_visible_resume_target (resume_ptid);
2804
2805 /* In all-stop mode, delete the per-thread status of all threads
2806 we're about to resume, implicitly and explicitly. */
2807 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
2808 clear_proceed_status_thread (tp);
2809 }
2810
2811 if (inferior_ptid != null_ptid)
2812 {
2813 struct inferior *inferior;
2814
2815 if (non_stop)
2816 {
2817 /* If in non-stop mode, only delete the per-thread status of
2818 the current thread. */
2819 clear_proceed_status_thread (inferior_thread ());
2820 }
2821
2822 inferior = current_inferior ();
2823 inferior->control.stop_soon = NO_STOP_QUIETLY;
2824 }
2825
2826 gdb::observers::about_to_proceed.notify ();
2827 }
2828
2829 /* Returns true if TP is still stopped at a breakpoint that needs
2830 stepping-over in order to make progress. If the breakpoint is gone
2831 meanwhile, we can skip the whole step-over dance. */
2832
2833 static bool
2834 thread_still_needs_step_over_bp (struct thread_info *tp)
2835 {
2836 if (tp->stepping_over_breakpoint)
2837 {
2838 struct regcache *regcache = get_thread_regcache (tp);
2839
2840 if (breakpoint_here_p (regcache->aspace (),
2841 regcache_read_pc (regcache))
2842 == ordinary_breakpoint_here)
2843 return true;
2844
2845 tp->stepping_over_breakpoint = 0;
2846 }
2847
2848 return false;
2849 }
2850
2851 /* Check whether thread TP still needs to start a step-over in order
2852 to make progress when resumed. Returns an bitwise or of enum
2853 step_over_what bits, indicating what needs to be stepped over. */
2854
2855 static step_over_what
2856 thread_still_needs_step_over (struct thread_info *tp)
2857 {
2858 step_over_what what = 0;
2859
2860 if (thread_still_needs_step_over_bp (tp))
2861 what |= STEP_OVER_BREAKPOINT;
2862
2863 if (tp->stepping_over_watchpoint
2864 && !target_have_steppable_watchpoint ())
2865 what |= STEP_OVER_WATCHPOINT;
2866
2867 return what;
2868 }
2869
2870 /* Returns true if scheduler locking applies. STEP indicates whether
2871 we're about to do a step/next-like command to a thread. */
2872
2873 static bool
2874 schedlock_applies (struct thread_info *tp)
2875 {
2876 return (scheduler_mode == schedlock_on
2877 || (scheduler_mode == schedlock_step
2878 && tp->control.stepping_command)
2879 || (scheduler_mode == schedlock_replay
2880 && target_record_will_replay (minus_one_ptid,
2881 execution_direction)));
2882 }
2883
2884 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2885 stacks that have threads executing and don't have threads with
2886 pending events. */
2887
2888 static void
2889 maybe_set_commit_resumed_all_targets ()
2890 {
2891 scoped_restore_current_thread restore_thread;
2892
2893 for (inferior *inf : all_non_exited_inferiors ())
2894 {
2895 process_stratum_target *proc_target = inf->process_target ();
2896
2897 if (proc_target->commit_resumed_state)
2898 {
2899 /* We already set this in a previous iteration, via another
2900 inferior sharing the process_stratum target. */
2901 continue;
2902 }
2903
2904 /* If the target has no resumed threads, it would be useless to
2905 ask it to commit the resumed threads. */
2906 if (!proc_target->threads_executing)
2907 {
2908 infrun_debug_printf ("not requesting commit-resumed for target "
2909 "%s, no resumed threads",
2910 proc_target->shortname ());
2911 continue;
2912 }
2913
2914 /* As an optimization, if a thread from this target has some
2915 status to report, handle it before requiring the target to
2916 commit its resumed threads: handling the status might lead to
2917 resuming more threads. */
2918 if (proc_target->has_resumed_with_pending_wait_status ())
2919 {
2920 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2921 " thread has a pending waitstatus",
2922 proc_target->shortname ());
2923 continue;
2924 }
2925
2926 switch_to_inferior_no_thread (inf);
2927
2928 if (target_has_pending_events ())
2929 {
2930 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2931 "target has pending events",
2932 proc_target->shortname ());
2933 continue;
2934 }
2935
2936 infrun_debug_printf ("enabling commit-resumed for target %s",
2937 proc_target->shortname ());
2938
2939 proc_target->commit_resumed_state = true;
2940 }
2941 }
2942
2943 /* See infrun.h. */
2944
2945 void
2946 maybe_call_commit_resumed_all_targets ()
2947 {
2948 scoped_restore_current_thread restore_thread;
2949
2950 for (inferior *inf : all_non_exited_inferiors ())
2951 {
2952 process_stratum_target *proc_target = inf->process_target ();
2953
2954 if (!proc_target->commit_resumed_state)
2955 continue;
2956
2957 switch_to_inferior_no_thread (inf);
2958
2959 infrun_debug_printf ("calling commit_resumed for target %s",
2960 proc_target->shortname());
2961
2962 target_commit_resumed ();
2963 }
2964 }
2965
2966 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
2967 that only the outermost one attempts to re-enable
2968 commit-resumed. */
2969 static bool enable_commit_resumed = true;
2970
2971 /* See infrun.h. */
2972
2973 scoped_disable_commit_resumed::scoped_disable_commit_resumed
2974 (const char *reason)
2975 : m_reason (reason),
2976 m_prev_enable_commit_resumed (enable_commit_resumed)
2977 {
2978 infrun_debug_printf ("reason=%s", m_reason);
2979
2980 enable_commit_resumed = false;
2981
2982 for (inferior *inf : all_non_exited_inferiors ())
2983 {
2984 process_stratum_target *proc_target = inf->process_target ();
2985
2986 if (m_prev_enable_commit_resumed)
2987 {
2988 /* This is the outermost instance: force all
2989 COMMIT_RESUMED_STATE to false. */
2990 proc_target->commit_resumed_state = false;
2991 }
2992 else
2993 {
2994 /* This is not the outermost instance, we expect
2995 COMMIT_RESUMED_STATE to have been cleared by the
2996 outermost instance. */
2997 gdb_assert (!proc_target->commit_resumed_state);
2998 }
2999 }
3000 }
3001
3002 /* See infrun.h. */
3003
3004 void
3005 scoped_disable_commit_resumed::reset ()
3006 {
3007 if (m_reset)
3008 return;
3009 m_reset = true;
3010
3011 infrun_debug_printf ("reason=%s", m_reason);
3012
3013 gdb_assert (!enable_commit_resumed);
3014
3015 enable_commit_resumed = m_prev_enable_commit_resumed;
3016
3017 if (m_prev_enable_commit_resumed)
3018 {
3019 /* This is the outermost instance, re-enable
3020 COMMIT_RESUMED_STATE on the targets where it's possible. */
3021 maybe_set_commit_resumed_all_targets ();
3022 }
3023 else
3024 {
3025 /* This is not the outermost instance, we expect
3026 COMMIT_RESUMED_STATE to still be false. */
3027 for (inferior *inf : all_non_exited_inferiors ())
3028 {
3029 process_stratum_target *proc_target = inf->process_target ();
3030 gdb_assert (!proc_target->commit_resumed_state);
3031 }
3032 }
3033 }
3034
3035 /* See infrun.h. */
3036
3037 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3038 {
3039 reset ();
3040 }
3041
3042 /* See infrun.h. */
3043
3044 void
3045 scoped_disable_commit_resumed::reset_and_commit ()
3046 {
3047 reset ();
3048 maybe_call_commit_resumed_all_targets ();
3049 }
3050
3051 /* See infrun.h. */
3052
3053 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3054 (const char *reason)
3055 : m_reason (reason),
3056 m_prev_enable_commit_resumed (enable_commit_resumed)
3057 {
3058 infrun_debug_printf ("reason=%s", m_reason);
3059
3060 if (!enable_commit_resumed)
3061 {
3062 enable_commit_resumed = true;
3063
3064 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3065 possible. */
3066 maybe_set_commit_resumed_all_targets ();
3067
3068 maybe_call_commit_resumed_all_targets ();
3069 }
3070 }
3071
3072 /* See infrun.h. */
3073
3074 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3075 {
3076 infrun_debug_printf ("reason=%s", m_reason);
3077
3078 gdb_assert (enable_commit_resumed);
3079
3080 enable_commit_resumed = m_prev_enable_commit_resumed;
3081
3082 if (!enable_commit_resumed)
3083 {
3084 /* Force all COMMIT_RESUMED_STATE back to false. */
3085 for (inferior *inf : all_non_exited_inferiors ())
3086 {
3087 process_stratum_target *proc_target = inf->process_target ();
3088 proc_target->commit_resumed_state = false;
3089 }
3090 }
3091 }
3092
3093 /* Check that all the targets we're about to resume are in non-stop
3094 mode. Ideally, we'd only care whether all targets support
3095 target-async, but we're not there yet. E.g., stop_all_threads
3096 doesn't know how to handle all-stop targets. Also, the remote
3097 protocol in all-stop mode is synchronous, irrespective of
3098 target-async, which means that things like a breakpoint re-set
3099 triggered by one target would try to read memory from all targets
3100 and fail. */
3101
3102 static void
3103 check_multi_target_resumption (process_stratum_target *resume_target)
3104 {
3105 if (!non_stop && resume_target == nullptr)
3106 {
3107 scoped_restore_current_thread restore_thread;
3108
3109 /* This is used to track whether we're resuming more than one
3110 target. */
3111 process_stratum_target *first_connection = nullptr;
3112
3113 /* The first inferior we see with a target that does not work in
3114 always-non-stop mode. */
3115 inferior *first_not_non_stop = nullptr;
3116
3117 for (inferior *inf : all_non_exited_inferiors ())
3118 {
3119 switch_to_inferior_no_thread (inf);
3120
3121 if (!target_has_execution ())
3122 continue;
3123
3124 process_stratum_target *proc_target
3125 = current_inferior ()->process_target();
3126
3127 if (!target_is_non_stop_p ())
3128 first_not_non_stop = inf;
3129
3130 if (first_connection == nullptr)
3131 first_connection = proc_target;
3132 else if (first_connection != proc_target
3133 && first_not_non_stop != nullptr)
3134 {
3135 switch_to_inferior_no_thread (first_not_non_stop);
3136
3137 proc_target = current_inferior ()->process_target();
3138
3139 error (_("Connection %d (%s) does not support "
3140 "multi-target resumption."),
3141 proc_target->connection_number,
3142 make_target_connection_string (proc_target).c_str ());
3143 }
3144 }
3145 }
3146 }
3147
3148 /* Basic routine for continuing the program in various fashions.
3149
3150 ADDR is the address to resume at, or -1 for resume where stopped.
3151 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3152 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3153
3154 You should call clear_proceed_status before calling proceed. */
3155
3156 void
3157 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3158 {
3159 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3160
3161 struct regcache *regcache;
3162 struct gdbarch *gdbarch;
3163 CORE_ADDR pc;
3164 struct execution_control_state ecss;
3165 struct execution_control_state *ecs = &ecss;
3166
3167 /* If we're stopped at a fork/vfork, follow the branch set by the
3168 "set follow-fork-mode" command; otherwise, we'll just proceed
3169 resuming the current thread. */
3170 if (!follow_fork ())
3171 {
3172 /* The target for some reason decided not to resume. */
3173 normal_stop ();
3174 if (target_can_async_p ())
3175 inferior_event_handler (INF_EXEC_COMPLETE);
3176 return;
3177 }
3178
3179 /* We'll update this if & when we switch to a new thread. */
3180 previous_inferior_ptid = inferior_ptid;
3181
3182 regcache = get_current_regcache ();
3183 gdbarch = regcache->arch ();
3184 const address_space *aspace = regcache->aspace ();
3185
3186 pc = regcache_read_pc_protected (regcache);
3187
3188 thread_info *cur_thr = inferior_thread ();
3189
3190 /* Fill in with reasonable starting values. */
3191 init_thread_stepping_state (cur_thr);
3192
3193 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3194
3195 ptid_t resume_ptid
3196 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3197 process_stratum_target *resume_target
3198 = user_visible_resume_target (resume_ptid);
3199
3200 check_multi_target_resumption (resume_target);
3201
3202 if (addr == (CORE_ADDR) -1)
3203 {
3204 if (cur_thr->stop_pc_p ()
3205 && pc == cur_thr->stop_pc ()
3206 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3207 && execution_direction != EXEC_REVERSE)
3208 /* There is a breakpoint at the address we will resume at,
3209 step one instruction before inserting breakpoints so that
3210 we do not stop right away (and report a second hit at this
3211 breakpoint).
3212
3213 Note, we don't do this in reverse, because we won't
3214 actually be executing the breakpoint insn anyway.
3215 We'll be (un-)executing the previous instruction. */
3216 cur_thr->stepping_over_breakpoint = 1;
3217 else if (gdbarch_single_step_through_delay_p (gdbarch)
3218 && gdbarch_single_step_through_delay (gdbarch,
3219 get_current_frame ()))
3220 /* We stepped onto an instruction that needs to be stepped
3221 again before re-inserting the breakpoint, do so. */
3222 cur_thr->stepping_over_breakpoint = 1;
3223 }
3224 else
3225 {
3226 regcache_write_pc (regcache, addr);
3227 }
3228
3229 if (siggnal != GDB_SIGNAL_DEFAULT)
3230 cur_thr->set_stop_signal (siggnal);
3231
3232 /* If an exception is thrown from this point on, make sure to
3233 propagate GDB's knowledge of the executing state to the
3234 frontend/user running state. */
3235 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3236
3237 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3238 threads (e.g., we might need to set threads stepping over
3239 breakpoints first), from the user/frontend's point of view, all
3240 threads in RESUME_PTID are now running. Unless we're calling an
3241 inferior function, as in that case we pretend the inferior
3242 doesn't run at all. */
3243 if (!cur_thr->control.in_infcall)
3244 set_running (resume_target, resume_ptid, true);
3245
3246 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
3247 gdb_signal_to_symbol_string (siggnal));
3248
3249 annotate_starting ();
3250
3251 /* Make sure that output from GDB appears before output from the
3252 inferior. */
3253 gdb_flush (gdb_stdout);
3254
3255 /* Since we've marked the inferior running, give it the terminal. A
3256 QUIT/Ctrl-C from here on is forwarded to the target (which can
3257 still detect attempts to unblock a stuck connection with repeated
3258 Ctrl-C from within target_pass_ctrlc). */
3259 target_terminal::inferior ();
3260
3261 /* In a multi-threaded task we may select another thread and
3262 then continue or step.
3263
3264 But if a thread that we're resuming had stopped at a breakpoint,
3265 it will immediately cause another breakpoint stop without any
3266 execution (i.e. it will report a breakpoint hit incorrectly). So
3267 we must step over it first.
3268
3269 Look for threads other than the current (TP) that reported a
3270 breakpoint hit and haven't been resumed yet since. */
3271
3272 /* If scheduler locking applies, we can avoid iterating over all
3273 threads. */
3274 if (!non_stop && !schedlock_applies (cur_thr))
3275 {
3276 for (thread_info *tp : all_non_exited_threads (resume_target,
3277 resume_ptid))
3278 {
3279 switch_to_thread_no_regs (tp);
3280
3281 /* Ignore the current thread here. It's handled
3282 afterwards. */
3283 if (tp == cur_thr)
3284 continue;
3285
3286 if (!thread_still_needs_step_over (tp))
3287 continue;
3288
3289 gdb_assert (!thread_is_in_step_over_chain (tp));
3290
3291 infrun_debug_printf ("need to step-over [%s] first",
3292 tp->ptid.to_string ().c_str ());
3293
3294 global_thread_step_over_chain_enqueue (tp);
3295 }
3296
3297 switch_to_thread (cur_thr);
3298 }
3299
3300 /* Enqueue the current thread last, so that we move all other
3301 threads over their breakpoints first. */
3302 if (cur_thr->stepping_over_breakpoint)
3303 global_thread_step_over_chain_enqueue (cur_thr);
3304
3305 /* If the thread isn't started, we'll still need to set its prev_pc,
3306 so that switch_back_to_stepped_thread knows the thread hasn't
3307 advanced. Must do this before resuming any thread, as in
3308 all-stop/remote, once we resume we can't send any other packet
3309 until the target stops again. */
3310 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3311
3312 {
3313 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3314 bool step_over_started = start_step_over ();
3315
3316 if (step_over_info_valid_p ())
3317 {
3318 /* Either this thread started a new in-line step over, or some
3319 other thread was already doing one. In either case, don't
3320 resume anything else until the step-over is finished. */
3321 }
3322 else if (step_over_started && !target_is_non_stop_p ())
3323 {
3324 /* A new displaced stepping sequence was started. In all-stop,
3325 we can't talk to the target anymore until it next stops. */
3326 }
3327 else if (!non_stop && target_is_non_stop_p ())
3328 {
3329 INFRUN_SCOPED_DEBUG_START_END
3330 ("resuming threads, all-stop-on-top-of-non-stop");
3331
3332 /* In all-stop, but the target is always in non-stop mode.
3333 Start all other threads that are implicitly resumed too. */
3334 for (thread_info *tp : all_non_exited_threads (resume_target,
3335 resume_ptid))
3336 {
3337 switch_to_thread_no_regs (tp);
3338
3339 if (!tp->inf->has_execution ())
3340 {
3341 infrun_debug_printf ("[%s] target has no execution",
3342 tp->ptid.to_string ().c_str ());
3343 continue;
3344 }
3345
3346 if (tp->resumed ())
3347 {
3348 infrun_debug_printf ("[%s] resumed",
3349 tp->ptid.to_string ().c_str ());
3350 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3351 continue;
3352 }
3353
3354 if (thread_is_in_step_over_chain (tp))
3355 {
3356 infrun_debug_printf ("[%s] needs step-over",
3357 tp->ptid.to_string ().c_str ());
3358 continue;
3359 }
3360
3361 /* If a thread of that inferior is waiting for a vfork-done
3362 (for a detached vfork child to exec or exit), breakpoints are
3363 removed. We must not resume any thread of that inferior, other
3364 than the one waiting for the vfork-done. */
3365 if (tp->inf->thread_waiting_for_vfork_done != nullptr
3366 && tp != tp->inf->thread_waiting_for_vfork_done)
3367 {
3368 infrun_debug_printf ("[%s] another thread of this inferior is "
3369 "waiting for vfork-done",
3370 tp->ptid.to_string ().c_str ());
3371 continue;
3372 }
3373
3374 infrun_debug_printf ("resuming %s",
3375 tp->ptid.to_string ().c_str ());
3376
3377 reset_ecs (ecs, tp);
3378 switch_to_thread (tp);
3379 keep_going_pass_signal (ecs);
3380 if (!ecs->wait_some_more)
3381 error (_("Command aborted."));
3382 }
3383 }
3384 else if (!cur_thr->resumed ()
3385 && !thread_is_in_step_over_chain (cur_thr)
3386 /* In non-stop, forbid resuming a thread if some other thread of
3387 that inferior is waiting for a vfork-done event (this means
3388 breakpoints are out for this inferior). */
3389 && !(non_stop
3390 && cur_thr->inf->thread_waiting_for_vfork_done != nullptr))
3391 {
3392 /* The thread wasn't started, and isn't queued, run it now. */
3393 reset_ecs (ecs, cur_thr);
3394 switch_to_thread (cur_thr);
3395 keep_going_pass_signal (ecs);
3396 if (!ecs->wait_some_more)
3397 error (_("Command aborted."));
3398 }
3399
3400 disable_commit_resumed.reset_and_commit ();
3401 }
3402
3403 finish_state.release ();
3404
3405 /* If we've switched threads above, switch back to the previously
3406 current thread. We don't want the user to see a different
3407 selected thread. */
3408 switch_to_thread (cur_thr);
3409
3410 /* Tell the event loop to wait for it to stop. If the target
3411 supports asynchronous execution, it'll do this from within
3412 target_resume. */
3413 if (!target_can_async_p ())
3414 mark_async_event_handler (infrun_async_inferior_event_token);
3415 }
3416 \f
3417
3418 /* Start remote-debugging of a machine over a serial link. */
3419
3420 void
3421 start_remote (int from_tty)
3422 {
3423 inferior *inf = current_inferior ();
3424 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3425
3426 /* Always go on waiting for the target, regardless of the mode. */
3427 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3428 indicate to wait_for_inferior that a target should timeout if
3429 nothing is returned (instead of just blocking). Because of this,
3430 targets expecting an immediate response need to, internally, set
3431 things up so that the target_wait() is forced to eventually
3432 timeout. */
3433 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3434 differentiate to its caller what the state of the target is after
3435 the initial open has been performed. Here we're assuming that
3436 the target has stopped. It should be possible to eventually have
3437 target_open() return to the caller an indication that the target
3438 is currently running and GDB state should be set to the same as
3439 for an async run. */
3440 wait_for_inferior (inf);
3441
3442 /* Now that the inferior has stopped, do any bookkeeping like
3443 loading shared libraries. We want to do this before normal_stop,
3444 so that the displayed frame is up to date. */
3445 post_create_inferior (from_tty);
3446
3447 normal_stop ();
3448 }
3449
3450 /* Initialize static vars when a new inferior begins. */
3451
3452 void
3453 init_wait_for_inferior (void)
3454 {
3455 /* These are meaningless until the first time through wait_for_inferior. */
3456
3457 breakpoint_init_inferior (inf_starting);
3458
3459 clear_proceed_status (0);
3460
3461 nullify_last_target_wait_ptid ();
3462
3463 previous_inferior_ptid = inferior_ptid;
3464 }
3465
3466 \f
3467
3468 static void handle_inferior_event (struct execution_control_state *ecs);
3469
3470 static void handle_step_into_function (struct gdbarch *gdbarch,
3471 struct execution_control_state *ecs);
3472 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3473 struct execution_control_state *ecs);
3474 static void handle_signal_stop (struct execution_control_state *ecs);
3475 static void check_exception_resume (struct execution_control_state *,
3476 frame_info_ptr);
3477
3478 static void end_stepping_range (struct execution_control_state *ecs);
3479 static void stop_waiting (struct execution_control_state *ecs);
3480 static void keep_going (struct execution_control_state *ecs);
3481 static void process_event_stop_test (struct execution_control_state *ecs);
3482 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3483
3484 /* This function is attached as a "thread_stop_requested" observer.
3485 Cleanup local state that assumed the PTID was to be resumed, and
3486 report the stop to the frontend. */
3487
3488 static void
3489 infrun_thread_stop_requested (ptid_t ptid)
3490 {
3491 process_stratum_target *curr_target = current_inferior ()->process_target ();
3492
3493 /* PTID was requested to stop. If the thread was already stopped,
3494 but the user/frontend doesn't know about that yet (e.g., the
3495 thread had been temporarily paused for some step-over), set up
3496 for reporting the stop now. */
3497 for (thread_info *tp : all_threads (curr_target, ptid))
3498 {
3499 if (tp->state != THREAD_RUNNING)
3500 continue;
3501 if (tp->executing ())
3502 continue;
3503
3504 /* Remove matching threads from the step-over queue, so
3505 start_step_over doesn't try to resume them
3506 automatically. */
3507 if (thread_is_in_step_over_chain (tp))
3508 global_thread_step_over_chain_remove (tp);
3509
3510 /* If the thread is stopped, but the user/frontend doesn't
3511 know about that yet, queue a pending event, as if the
3512 thread had just stopped now. Unless the thread already had
3513 a pending event. */
3514 if (!tp->has_pending_waitstatus ())
3515 {
3516 target_waitstatus ws;
3517 ws.set_stopped (GDB_SIGNAL_0);
3518 tp->set_pending_waitstatus (ws);
3519 }
3520
3521 /* Clear the inline-frame state, since we're re-processing the
3522 stop. */
3523 clear_inline_frame_state (tp);
3524
3525 /* If this thread was paused because some other thread was
3526 doing an inline-step over, let that finish first. Once
3527 that happens, we'll restart all threads and consume pending
3528 stop events then. */
3529 if (step_over_info_valid_p ())
3530 continue;
3531
3532 /* Otherwise we can process the (new) pending event now. Set
3533 it so this pending event is considered by
3534 do_target_wait. */
3535 tp->set_resumed (true);
3536 }
3537 }
3538
3539 static void
3540 infrun_thread_thread_exit (struct thread_info *tp, int silent)
3541 {
3542 if (target_last_proc_target == tp->inf->process_target ()
3543 && target_last_wait_ptid == tp->ptid)
3544 nullify_last_target_wait_ptid ();
3545 }
3546
3547 /* Delete the step resume, single-step and longjmp/exception resume
3548 breakpoints of TP. */
3549
3550 static void
3551 delete_thread_infrun_breakpoints (struct thread_info *tp)
3552 {
3553 delete_step_resume_breakpoint (tp);
3554 delete_exception_resume_breakpoint (tp);
3555 delete_single_step_breakpoints (tp);
3556 }
3557
3558 /* If the target still has execution, call FUNC for each thread that
3559 just stopped. In all-stop, that's all the non-exited threads; in
3560 non-stop, that's the current thread, only. */
3561
3562 typedef void (*for_each_just_stopped_thread_callback_func)
3563 (struct thread_info *tp);
3564
3565 static void
3566 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3567 {
3568 if (!target_has_execution () || inferior_ptid == null_ptid)
3569 return;
3570
3571 if (target_is_non_stop_p ())
3572 {
3573 /* If in non-stop mode, only the current thread stopped. */
3574 func (inferior_thread ());
3575 }
3576 else
3577 {
3578 /* In all-stop mode, all threads have stopped. */
3579 for (thread_info *tp : all_non_exited_threads ())
3580 func (tp);
3581 }
3582 }
3583
3584 /* Delete the step resume and longjmp/exception resume breakpoints of
3585 the threads that just stopped. */
3586
3587 static void
3588 delete_just_stopped_threads_infrun_breakpoints (void)
3589 {
3590 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3591 }
3592
3593 /* Delete the single-step breakpoints of the threads that just
3594 stopped. */
3595
3596 static void
3597 delete_just_stopped_threads_single_step_breakpoints (void)
3598 {
3599 for_each_just_stopped_thread (delete_single_step_breakpoints);
3600 }
3601
3602 /* See infrun.h. */
3603
3604 void
3605 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3606 const struct target_waitstatus &ws)
3607 {
3608 infrun_debug_printf ("target_wait (%s [%s], status) =",
3609 waiton_ptid.to_string ().c_str (),
3610 target_pid_to_str (waiton_ptid).c_str ());
3611 infrun_debug_printf (" %s [%s],",
3612 result_ptid.to_string ().c_str (),
3613 target_pid_to_str (result_ptid).c_str ());
3614 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3615 }
3616
3617 /* Select a thread at random, out of those which are resumed and have
3618 had events. */
3619
3620 static struct thread_info *
3621 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3622 {
3623 process_stratum_target *proc_target = inf->process_target ();
3624 thread_info *thread
3625 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
3626
3627 if (thread == nullptr)
3628 {
3629 infrun_debug_printf ("None found.");
3630 return nullptr;
3631 }
3632
3633 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
3634 gdb_assert (thread->resumed ());
3635 gdb_assert (thread->has_pending_waitstatus ());
3636
3637 return thread;
3638 }
3639
3640 /* Wrapper for target_wait that first checks whether threads have
3641 pending statuses to report before actually asking the target for
3642 more events. INF is the inferior we're using to call target_wait
3643 on. */
3644
3645 static ptid_t
3646 do_target_wait_1 (inferior *inf, ptid_t ptid,
3647 target_waitstatus *status, target_wait_flags options)
3648 {
3649 struct thread_info *tp;
3650
3651 /* We know that we are looking for an event in the target of inferior
3652 INF, but we don't know which thread the event might come from. As
3653 such we want to make sure that INFERIOR_PTID is reset so that none of
3654 the wait code relies on it - doing so is always a mistake. */
3655 switch_to_inferior_no_thread (inf);
3656
3657 /* First check if there is a resumed thread with a wait status
3658 pending. */
3659 if (ptid == minus_one_ptid || ptid.is_pid ())
3660 {
3661 tp = random_pending_event_thread (inf, ptid);
3662 }
3663 else
3664 {
3665 infrun_debug_printf ("Waiting for specific thread %s.",
3666 ptid.to_string ().c_str ());
3667
3668 /* We have a specific thread to check. */
3669 tp = find_thread_ptid (inf, ptid);
3670 gdb_assert (tp != nullptr);
3671 if (!tp->has_pending_waitstatus ())
3672 tp = nullptr;
3673 }
3674
3675 if (tp != nullptr
3676 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3677 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
3678 {
3679 struct regcache *regcache = get_thread_regcache (tp);
3680 struct gdbarch *gdbarch = regcache->arch ();
3681 CORE_ADDR pc;
3682 int discard = 0;
3683
3684 pc = regcache_read_pc (regcache);
3685
3686 if (pc != tp->stop_pc ())
3687 {
3688 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3689 tp->ptid.to_string ().c_str (),
3690 paddress (gdbarch, tp->stop_pc ()),
3691 paddress (gdbarch, pc));
3692 discard = 1;
3693 }
3694 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
3695 {
3696 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3697 tp->ptid.to_string ().c_str (),
3698 paddress (gdbarch, pc));
3699
3700 discard = 1;
3701 }
3702
3703 if (discard)
3704 {
3705 infrun_debug_printf ("pending event of %s cancelled.",
3706 tp->ptid.to_string ().c_str ());
3707
3708 tp->clear_pending_waitstatus ();
3709 target_waitstatus ws;
3710 ws.set_spurious ();
3711 tp->set_pending_waitstatus (ws);
3712 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3713 }
3714 }
3715
3716 if (tp != nullptr)
3717 {
3718 infrun_debug_printf ("Using pending wait status %s for %s.",
3719 tp->pending_waitstatus ().to_string ().c_str (),
3720 tp->ptid.to_string ().c_str ());
3721
3722 /* Now that we've selected our final event LWP, un-adjust its PC
3723 if it was a software breakpoint (and the target doesn't
3724 always adjust the PC itself). */
3725 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3726 && !target_supports_stopped_by_sw_breakpoint ())
3727 {
3728 struct regcache *regcache;
3729 struct gdbarch *gdbarch;
3730 int decr_pc;
3731
3732 regcache = get_thread_regcache (tp);
3733 gdbarch = regcache->arch ();
3734
3735 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3736 if (decr_pc != 0)
3737 {
3738 CORE_ADDR pc;
3739
3740 pc = regcache_read_pc (regcache);
3741 regcache_write_pc (regcache, pc + decr_pc);
3742 }
3743 }
3744
3745 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
3746 *status = tp->pending_waitstatus ();
3747 tp->clear_pending_waitstatus ();
3748
3749 /* Wake up the event loop again, until all pending events are
3750 processed. */
3751 if (target_is_async_p ())
3752 mark_async_event_handler (infrun_async_inferior_event_token);
3753 return tp->ptid;
3754 }
3755
3756 /* But if we don't find one, we'll have to wait. */
3757
3758 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3759 a blocking wait. */
3760 if (!target_can_async_p ())
3761 options &= ~TARGET_WNOHANG;
3762
3763 return target_wait (ptid, status, options);
3764 }
3765
3766 /* Wrapper for target_wait that first checks whether threads have
3767 pending statuses to report before actually asking the target for
3768 more events. Polls for events from all inferiors/targets. */
3769
3770 static bool
3771 do_target_wait (execution_control_state *ecs, target_wait_flags options)
3772 {
3773 int num_inferiors = 0;
3774 int random_selector;
3775
3776 /* For fairness, we pick the first inferior/target to poll at random
3777 out of all inferiors that may report events, and then continue
3778 polling the rest of the inferior list starting from that one in a
3779 circular fashion until the whole list is polled once. */
3780
3781 auto inferior_matches = [] (inferior *inf)
3782 {
3783 return inf->process_target () != nullptr;
3784 };
3785
3786 /* First see how many matching inferiors we have. */
3787 for (inferior *inf : all_inferiors ())
3788 if (inferior_matches (inf))
3789 num_inferiors++;
3790
3791 if (num_inferiors == 0)
3792 {
3793 ecs->ws.set_ignore ();
3794 return false;
3795 }
3796
3797 /* Now randomly pick an inferior out of those that matched. */
3798 random_selector = (int)
3799 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3800
3801 if (num_inferiors > 1)
3802 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3803 num_inferiors, random_selector);
3804
3805 /* Select the Nth inferior that matched. */
3806
3807 inferior *selected = nullptr;
3808
3809 for (inferior *inf : all_inferiors ())
3810 if (inferior_matches (inf))
3811 if (random_selector-- == 0)
3812 {
3813 selected = inf;
3814 break;
3815 }
3816
3817 /* Now poll for events out of each of the matching inferior's
3818 targets, starting from the selected one. */
3819
3820 auto do_wait = [&] (inferior *inf)
3821 {
3822 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
3823 ecs->target = inf->process_target ();
3824 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
3825 };
3826
3827 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3828 here spuriously after the target is all stopped and we've already
3829 reported the stop to the user, polling for events. */
3830 scoped_restore_current_thread restore_thread;
3831
3832 intrusive_list_iterator<inferior> start
3833 = inferior_list.iterator_to (*selected);
3834
3835 for (intrusive_list_iterator<inferior> it = start;
3836 it != inferior_list.end ();
3837 ++it)
3838 {
3839 inferior *inf = &*it;
3840
3841 if (inferior_matches (inf) && do_wait (inf))
3842 return true;
3843 }
3844
3845 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
3846 it != start;
3847 ++it)
3848 {
3849 inferior *inf = &*it;
3850
3851 if (inferior_matches (inf) && do_wait (inf))
3852 return true;
3853 }
3854
3855 ecs->ws.set_ignore ();
3856 return false;
3857 }
3858
3859 /* An event reported by wait_one. */
3860
3861 struct wait_one_event
3862 {
3863 /* The target the event came out of. */
3864 process_stratum_target *target;
3865
3866 /* The PTID the event was for. */
3867 ptid_t ptid;
3868
3869 /* The waitstatus. */
3870 target_waitstatus ws;
3871 };
3872
3873 static bool handle_one (const wait_one_event &event);
3874
3875 /* Prepare and stabilize the inferior for detaching it. E.g.,
3876 detaching while a thread is displaced stepping is a recipe for
3877 crashing it, as nothing would readjust the PC out of the scratch
3878 pad. */
3879
3880 void
3881 prepare_for_detach (void)
3882 {
3883 struct inferior *inf = current_inferior ();
3884 ptid_t pid_ptid = ptid_t (inf->pid);
3885 scoped_restore_current_thread restore_thread;
3886
3887 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
3888
3889 /* Remove all threads of INF from the global step-over chain. We
3890 want to stop any ongoing step-over, not start any new one. */
3891 thread_step_over_list_safe_range range
3892 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
3893
3894 for (thread_info *tp : range)
3895 if (tp->inf == inf)
3896 {
3897 infrun_debug_printf ("removing thread %s from global step over chain",
3898 tp->ptid.to_string ().c_str ());
3899 global_thread_step_over_chain_remove (tp);
3900 }
3901
3902 /* If we were already in the middle of an inline step-over, and the
3903 thread stepping belongs to the inferior we're detaching, we need
3904 to restart the threads of other inferiors. */
3905 if (step_over_info.thread != -1)
3906 {
3907 infrun_debug_printf ("inline step-over in-process while detaching");
3908
3909 thread_info *thr = find_thread_global_id (step_over_info.thread);
3910 if (thr->inf == inf)
3911 {
3912 /* Since we removed threads of INF from the step-over chain,
3913 we know this won't start a step-over for INF. */
3914 clear_step_over_info ();
3915
3916 if (target_is_non_stop_p ())
3917 {
3918 /* Start a new step-over in another thread if there's
3919 one that needs it. */
3920 start_step_over ();
3921
3922 /* Restart all other threads (except the
3923 previously-stepping thread, since that one is still
3924 running). */
3925 if (!step_over_info_valid_p ())
3926 restart_threads (thr);
3927 }
3928 }
3929 }
3930
3931 if (displaced_step_in_progress (inf))
3932 {
3933 infrun_debug_printf ("displaced-stepping in-process while detaching");
3934
3935 /* Stop threads currently displaced stepping, aborting it. */
3936
3937 for (thread_info *thr : inf->non_exited_threads ())
3938 {
3939 if (thr->displaced_step_state.in_progress ())
3940 {
3941 if (thr->executing ())
3942 {
3943 if (!thr->stop_requested)
3944 {
3945 target_stop (thr->ptid);
3946 thr->stop_requested = true;
3947 }
3948 }
3949 else
3950 thr->set_resumed (false);
3951 }
3952 }
3953
3954 while (displaced_step_in_progress (inf))
3955 {
3956 wait_one_event event;
3957
3958 event.target = inf->process_target ();
3959 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
3960
3961 if (debug_infrun)
3962 print_target_wait_results (pid_ptid, event.ptid, event.ws);
3963
3964 handle_one (event);
3965 }
3966
3967 /* It's OK to leave some of the threads of INF stopped, since
3968 they'll be detached shortly. */
3969 }
3970 }
3971
3972 /* If all-stop, but there exists a non-stop target, stop all threads
3973 now that we're presenting the stop to the user. */
3974
3975 static void
3976 stop_all_threads_if_all_stop_mode ()
3977 {
3978 if (!non_stop && exists_non_stop_target ())
3979 stop_all_threads ("presenting stop to user in all-stop");
3980 }
3981
3982 /* Wait for control to return from inferior to debugger.
3983
3984 If inferior gets a signal, we may decide to start it up again
3985 instead of returning. That is why there is a loop in this function.
3986 When this function actually returns it means the inferior
3987 should be left stopped and GDB should read more commands. */
3988
3989 static void
3990 wait_for_inferior (inferior *inf)
3991 {
3992 infrun_debug_printf ("wait_for_inferior ()");
3993
3994 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
3995
3996 /* If an error happens while handling the event, propagate GDB's
3997 knowledge of the executing state to the frontend/user running
3998 state. */
3999 scoped_finish_thread_state finish_state
4000 (inf->process_target (), minus_one_ptid);
4001
4002 while (1)
4003 {
4004 struct execution_control_state ecss;
4005 struct execution_control_state *ecs = &ecss;
4006
4007 overlay_cache_invalid = 1;
4008
4009 /* Flush target cache before starting to handle each event.
4010 Target was running and cache could be stale. This is just a
4011 heuristic. Running threads may modify target memory, but we
4012 don't get any event. */
4013 target_dcache_invalidate ();
4014
4015 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
4016 ecs->target = inf->process_target ();
4017
4018 if (debug_infrun)
4019 print_target_wait_results (minus_one_ptid, ecs->ptid, ecs->ws);
4020
4021 /* Now figure out what to do with the result of the result. */
4022 handle_inferior_event (ecs);
4023
4024 if (!ecs->wait_some_more)
4025 break;
4026 }
4027
4028 stop_all_threads_if_all_stop_mode ();
4029
4030 /* No error, don't finish the state yet. */
4031 finish_state.release ();
4032 }
4033
4034 /* Cleanup that reinstalls the readline callback handler, if the
4035 target is running in the background. If while handling the target
4036 event something triggered a secondary prompt, like e.g., a
4037 pagination prompt, we'll have removed the callback handler (see
4038 gdb_readline_wrapper_line). Need to do this as we go back to the
4039 event loop, ready to process further input. Note this has no
4040 effect if the handler hasn't actually been removed, because calling
4041 rl_callback_handler_install resets the line buffer, thus losing
4042 input. */
4043
4044 static void
4045 reinstall_readline_callback_handler_cleanup ()
4046 {
4047 struct ui *ui = current_ui;
4048
4049 if (!ui->async)
4050 {
4051 /* We're not going back to the top level event loop yet. Don't
4052 install the readline callback, as it'd prep the terminal,
4053 readline-style (raw, noecho) (e.g., --batch). We'll install
4054 it the next time the prompt is displayed, when we're ready
4055 for input. */
4056 return;
4057 }
4058
4059 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4060 gdb_rl_callback_handler_reinstall ();
4061 }
4062
4063 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4064 that's just the event thread. In all-stop, that's all threads. */
4065
4066 static void
4067 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4068 {
4069 /* The first clean_up call below assumes the event thread is the current
4070 one. */
4071 if (ecs->event_thread != nullptr)
4072 gdb_assert (ecs->event_thread == inferior_thread ());
4073
4074 if (ecs->event_thread != nullptr
4075 && ecs->event_thread->thread_fsm () != nullptr)
4076 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4077
4078 if (!non_stop)
4079 {
4080 scoped_restore_current_thread restore_thread;
4081
4082 for (thread_info *thr : all_non_exited_threads ())
4083 {
4084 if (thr->thread_fsm () == nullptr)
4085 continue;
4086 if (thr == ecs->event_thread)
4087 continue;
4088
4089 switch_to_thread (thr);
4090 thr->thread_fsm ()->clean_up (thr);
4091 }
4092 }
4093 }
4094
4095 /* Helper for all_uis_check_sync_execution_done that works on the
4096 current UI. */
4097
4098 static void
4099 check_curr_ui_sync_execution_done (void)
4100 {
4101 struct ui *ui = current_ui;
4102
4103 if (ui->prompt_state == PROMPT_NEEDED
4104 && ui->async
4105 && !gdb_in_secondary_prompt_p (ui))
4106 {
4107 target_terminal::ours ();
4108 gdb::observers::sync_execution_done.notify ();
4109 ui->register_file_handler ();
4110 }
4111 }
4112
4113 /* See infrun.h. */
4114
4115 void
4116 all_uis_check_sync_execution_done (void)
4117 {
4118 SWITCH_THRU_ALL_UIS ()
4119 {
4120 check_curr_ui_sync_execution_done ();
4121 }
4122 }
4123
4124 /* See infrun.h. */
4125
4126 void
4127 all_uis_on_sync_execution_starting (void)
4128 {
4129 SWITCH_THRU_ALL_UIS ()
4130 {
4131 if (current_ui->prompt_state == PROMPT_NEEDED)
4132 async_disable_stdin ();
4133 }
4134 }
4135
4136 /* Asynchronous version of wait_for_inferior. It is called by the
4137 event loop whenever a change of state is detected on the file
4138 descriptor corresponding to the target. It can be called more than
4139 once to complete a single execution command. In such cases we need
4140 to keep the state in a global variable ECSS. If it is the last time
4141 that this function is called for a single execution command, then
4142 report to the user that the inferior has stopped, and do the
4143 necessary cleanups. */
4144
4145 void
4146 fetch_inferior_event ()
4147 {
4148 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4149
4150 struct execution_control_state ecss;
4151 struct execution_control_state *ecs = &ecss;
4152 int cmd_done = 0;
4153
4154 /* Events are always processed with the main UI as current UI. This
4155 way, warnings, debug output, etc. are always consistently sent to
4156 the main console. */
4157 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4158
4159 /* Temporarily disable pagination. Otherwise, the user would be
4160 given an option to press 'q' to quit, which would cause an early
4161 exit and could leave GDB in a half-baked state. */
4162 scoped_restore save_pagination
4163 = make_scoped_restore (&pagination_enabled, false);
4164
4165 /* End up with readline processing input, if necessary. */
4166 {
4167 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4168
4169 /* We're handling a live event, so make sure we're doing live
4170 debugging. If we're looking at traceframes while the target is
4171 running, we're going to need to get back to that mode after
4172 handling the event. */
4173 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4174 if (non_stop)
4175 {
4176 maybe_restore_traceframe.emplace ();
4177 set_current_traceframe (-1);
4178 }
4179
4180 /* The user/frontend should not notice a thread switch due to
4181 internal events. Make sure we revert to the user selected
4182 thread and frame after handling the event and running any
4183 breakpoint commands. */
4184 scoped_restore_current_thread restore_thread;
4185
4186 overlay_cache_invalid = 1;
4187 /* Flush target cache before starting to handle each event. Target
4188 was running and cache could be stale. This is just a heuristic.
4189 Running threads may modify target memory, but we don't get any
4190 event. */
4191 target_dcache_invalidate ();
4192
4193 scoped_restore save_exec_dir
4194 = make_scoped_restore (&execution_direction,
4195 target_execution_direction ());
4196
4197 /* Allow targets to pause their resumed threads while we handle
4198 the event. */
4199 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4200
4201 if (!do_target_wait (ecs, TARGET_WNOHANG))
4202 {
4203 infrun_debug_printf ("do_target_wait returned no event");
4204 disable_commit_resumed.reset_and_commit ();
4205 return;
4206 }
4207
4208 gdb_assert (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4209
4210 /* Switch to the target that generated the event, so we can do
4211 target calls. */
4212 switch_to_target_no_thread (ecs->target);
4213
4214 if (debug_infrun)
4215 print_target_wait_results (minus_one_ptid, ecs->ptid, ecs->ws);
4216
4217 /* If an error happens while handling the event, propagate GDB's
4218 knowledge of the executing state to the frontend/user running
4219 state. */
4220 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
4221 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
4222
4223 /* Get executed before scoped_restore_current_thread above to apply
4224 still for the thread which has thrown the exception. */
4225 auto defer_bpstat_clear
4226 = make_scope_exit (bpstat_clear_actions);
4227 auto defer_delete_threads
4228 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4229
4230 /* Now figure out what to do with the result of the result. */
4231 handle_inferior_event (ecs);
4232
4233 if (!ecs->wait_some_more)
4234 {
4235 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4236 bool should_stop = true;
4237 struct thread_info *thr = ecs->event_thread;
4238
4239 delete_just_stopped_threads_infrun_breakpoints ();
4240
4241 if (thr != nullptr && thr->thread_fsm () != nullptr)
4242 should_stop = thr->thread_fsm ()->should_stop (thr);
4243
4244 if (!should_stop)
4245 {
4246 keep_going (ecs);
4247 }
4248 else
4249 {
4250 bool should_notify_stop = true;
4251 int proceeded = 0;
4252
4253 stop_all_threads_if_all_stop_mode ();
4254
4255 clean_up_just_stopped_threads_fsms (ecs);
4256
4257 if (thr != nullptr && thr->thread_fsm () != nullptr)
4258 should_notify_stop
4259 = thr->thread_fsm ()->should_notify_stop ();
4260
4261 if (should_notify_stop)
4262 {
4263 /* We may not find an inferior if this was a process exit. */
4264 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4265 proceeded = normal_stop ();
4266 }
4267
4268 if (!proceeded)
4269 {
4270 inferior_event_handler (INF_EXEC_COMPLETE);
4271 cmd_done = 1;
4272 }
4273
4274 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4275 previously selected thread is gone. We have two
4276 choices - switch to no thread selected, or restore the
4277 previously selected thread (now exited). We chose the
4278 later, just because that's what GDB used to do. After
4279 this, "info threads" says "The current thread <Thread
4280 ID 2> has terminated." instead of "No thread
4281 selected.". */
4282 if (!non_stop
4283 && cmd_done
4284 && ecs->ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4285 restore_thread.dont_restore ();
4286 }
4287 }
4288
4289 defer_delete_threads.release ();
4290 defer_bpstat_clear.release ();
4291
4292 /* No error, don't finish the thread states yet. */
4293 finish_state.release ();
4294
4295 disable_commit_resumed.reset_and_commit ();
4296
4297 /* This scope is used to ensure that readline callbacks are
4298 reinstalled here. */
4299 }
4300
4301 /* Handling this event might have caused some inferiors to become prunable.
4302 For example, the exit of an inferior that was automatically added. Try
4303 to get rid of them. Keeping those around slows down things linearly.
4304
4305 Note that this never removes the current inferior. Therefore, call this
4306 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4307 temporarily made the current inferior) is meant to be deleted.
4308
4309 Call this before all_uis_check_sync_execution_done, so that notifications about
4310 removed inferiors appear before the prompt. */
4311 prune_inferiors ();
4312
4313 /* If a UI was in sync execution mode, and now isn't, restore its
4314 prompt (a synchronous execution command has finished, and we're
4315 ready for input). */
4316 all_uis_check_sync_execution_done ();
4317
4318 if (cmd_done
4319 && exec_done_display_p
4320 && (inferior_ptid == null_ptid
4321 || inferior_thread ()->state != THREAD_RUNNING))
4322 gdb_printf (_("completed.\n"));
4323 }
4324
4325 /* See infrun.h. */
4326
4327 void
4328 set_step_info (thread_info *tp, frame_info_ptr frame,
4329 struct symtab_and_line sal)
4330 {
4331 /* This can be removed once this function no longer implicitly relies on the
4332 inferior_ptid value. */
4333 gdb_assert (inferior_ptid == tp->ptid);
4334
4335 tp->control.step_frame_id = get_frame_id (frame);
4336 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4337
4338 tp->current_symtab = sal.symtab;
4339 tp->current_line = sal.line;
4340
4341 infrun_debug_printf
4342 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4343 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4344 tp->current_line,
4345 tp->control.step_frame_id.to_string ().c_str (),
4346 tp->control.step_stack_frame_id.to_string ().c_str ());
4347 }
4348
4349 /* Clear context switchable stepping state. */
4350
4351 void
4352 init_thread_stepping_state (struct thread_info *tss)
4353 {
4354 tss->stepped_breakpoint = 0;
4355 tss->stepping_over_breakpoint = 0;
4356 tss->stepping_over_watchpoint = 0;
4357 tss->step_after_step_resume_breakpoint = 0;
4358 }
4359
4360 /* See infrun.h. */
4361
4362 void
4363 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4364 const target_waitstatus &status)
4365 {
4366 target_last_proc_target = target;
4367 target_last_wait_ptid = ptid;
4368 target_last_waitstatus = status;
4369 }
4370
4371 /* See infrun.h. */
4372
4373 void
4374 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4375 target_waitstatus *status)
4376 {
4377 if (target != nullptr)
4378 *target = target_last_proc_target;
4379 if (ptid != nullptr)
4380 *ptid = target_last_wait_ptid;
4381 if (status != nullptr)
4382 *status = target_last_waitstatus;
4383 }
4384
4385 /* See infrun.h. */
4386
4387 void
4388 nullify_last_target_wait_ptid (void)
4389 {
4390 target_last_proc_target = nullptr;
4391 target_last_wait_ptid = minus_one_ptid;
4392 target_last_waitstatus = {};
4393 }
4394
4395 /* Switch thread contexts. */
4396
4397 static void
4398 context_switch (execution_control_state *ecs)
4399 {
4400 if (ecs->ptid != inferior_ptid
4401 && (inferior_ptid == null_ptid
4402 || ecs->event_thread != inferior_thread ()))
4403 {
4404 infrun_debug_printf ("Switching context from %s to %s",
4405 inferior_ptid.to_string ().c_str (),
4406 ecs->ptid.to_string ().c_str ());
4407 }
4408
4409 switch_to_thread (ecs->event_thread);
4410 }
4411
4412 /* If the target can't tell whether we've hit breakpoints
4413 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4414 check whether that could have been caused by a breakpoint. If so,
4415 adjust the PC, per gdbarch_decr_pc_after_break. */
4416
4417 static void
4418 adjust_pc_after_break (struct thread_info *thread,
4419 const target_waitstatus &ws)
4420 {
4421 struct regcache *regcache;
4422 struct gdbarch *gdbarch;
4423 CORE_ADDR breakpoint_pc, decr_pc;
4424
4425 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4426 we aren't, just return.
4427
4428 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4429 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4430 implemented by software breakpoints should be handled through the normal
4431 breakpoint layer.
4432
4433 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4434 different signals (SIGILL or SIGEMT for instance), but it is less
4435 clear where the PC is pointing afterwards. It may not match
4436 gdbarch_decr_pc_after_break. I don't know any specific target that
4437 generates these signals at breakpoints (the code has been in GDB since at
4438 least 1992) so I can not guess how to handle them here.
4439
4440 In earlier versions of GDB, a target with
4441 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4442 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4443 target with both of these set in GDB history, and it seems unlikely to be
4444 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4445
4446 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4447 return;
4448
4449 if (ws.sig () != GDB_SIGNAL_TRAP)
4450 return;
4451
4452 /* In reverse execution, when a breakpoint is hit, the instruction
4453 under it has already been de-executed. The reported PC always
4454 points at the breakpoint address, so adjusting it further would
4455 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4456 architecture:
4457
4458 B1 0x08000000 : INSN1
4459 B2 0x08000001 : INSN2
4460 0x08000002 : INSN3
4461 PC -> 0x08000003 : INSN4
4462
4463 Say you're stopped at 0x08000003 as above. Reverse continuing
4464 from that point should hit B2 as below. Reading the PC when the
4465 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4466 been de-executed already.
4467
4468 B1 0x08000000 : INSN1
4469 B2 PC -> 0x08000001 : INSN2
4470 0x08000002 : INSN3
4471 0x08000003 : INSN4
4472
4473 We can't apply the same logic as for forward execution, because
4474 we would wrongly adjust the PC to 0x08000000, since there's a
4475 breakpoint at PC - 1. We'd then report a hit on B1, although
4476 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4477 behaviour. */
4478 if (execution_direction == EXEC_REVERSE)
4479 return;
4480
4481 /* If the target can tell whether the thread hit a SW breakpoint,
4482 trust it. Targets that can tell also adjust the PC
4483 themselves. */
4484 if (target_supports_stopped_by_sw_breakpoint ())
4485 return;
4486
4487 /* Note that relying on whether a breakpoint is planted in memory to
4488 determine this can fail. E.g,. the breakpoint could have been
4489 removed since. Or the thread could have been told to step an
4490 instruction the size of a breakpoint instruction, and only
4491 _after_ was a breakpoint inserted at its address. */
4492
4493 /* If this target does not decrement the PC after breakpoints, then
4494 we have nothing to do. */
4495 regcache = get_thread_regcache (thread);
4496 gdbarch = regcache->arch ();
4497
4498 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4499 if (decr_pc == 0)
4500 return;
4501
4502 const address_space *aspace = regcache->aspace ();
4503
4504 /* Find the location where (if we've hit a breakpoint) the
4505 breakpoint would be. */
4506 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4507
4508 /* If the target can't tell whether a software breakpoint triggered,
4509 fallback to figuring it out based on breakpoints we think were
4510 inserted in the target, and on whether the thread was stepped or
4511 continued. */
4512
4513 /* Check whether there actually is a software breakpoint inserted at
4514 that location.
4515
4516 If in non-stop mode, a race condition is possible where we've
4517 removed a breakpoint, but stop events for that breakpoint were
4518 already queued and arrive later. To suppress those spurious
4519 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4520 and retire them after a number of stop events are reported. Note
4521 this is an heuristic and can thus get confused. The real fix is
4522 to get the "stopped by SW BP and needs adjustment" info out of
4523 the target/kernel (and thus never reach here; see above). */
4524 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4525 || (target_is_non_stop_p ()
4526 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4527 {
4528 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4529
4530 if (record_full_is_used ())
4531 restore_operation_disable.emplace
4532 (record_full_gdb_operation_disable_set ());
4533
4534 /* When using hardware single-step, a SIGTRAP is reported for both
4535 a completed single-step and a software breakpoint. Need to
4536 differentiate between the two, as the latter needs adjusting
4537 but the former does not.
4538
4539 The SIGTRAP can be due to a completed hardware single-step only if
4540 - we didn't insert software single-step breakpoints
4541 - this thread is currently being stepped
4542
4543 If any of these events did not occur, we must have stopped due
4544 to hitting a software breakpoint, and have to back up to the
4545 breakpoint address.
4546
4547 As a special case, we could have hardware single-stepped a
4548 software breakpoint. In this case (prev_pc == breakpoint_pc),
4549 we also need to back up to the breakpoint address. */
4550
4551 if (thread_has_single_step_breakpoints_set (thread)
4552 || !currently_stepping (thread)
4553 || (thread->stepped_breakpoint
4554 && thread->prev_pc == breakpoint_pc))
4555 regcache_write_pc (regcache, breakpoint_pc);
4556 }
4557 }
4558
4559 static bool
4560 stepped_in_from (frame_info_ptr frame, struct frame_id step_frame_id)
4561 {
4562 for (frame = get_prev_frame (frame);
4563 frame != nullptr;
4564 frame = get_prev_frame (frame))
4565 {
4566 if (get_frame_id (frame) == step_frame_id)
4567 return true;
4568
4569 if (get_frame_type (frame) != INLINE_FRAME)
4570 break;
4571 }
4572
4573 return false;
4574 }
4575
4576 /* Look for an inline frame that is marked for skip.
4577 If PREV_FRAME is TRUE start at the previous frame,
4578 otherwise start at the current frame. Stop at the
4579 first non-inline frame, or at the frame where the
4580 step started. */
4581
4582 static bool
4583 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4584 {
4585 frame_info_ptr frame = get_current_frame ();
4586
4587 if (prev_frame)
4588 frame = get_prev_frame (frame);
4589
4590 for (; frame != nullptr; frame = get_prev_frame (frame))
4591 {
4592 const char *fn = nullptr;
4593 symtab_and_line sal;
4594 struct symbol *sym;
4595
4596 if (get_frame_id (frame) == tp->control.step_frame_id)
4597 break;
4598 if (get_frame_type (frame) != INLINE_FRAME)
4599 break;
4600
4601 sal = find_frame_sal (frame);
4602 sym = get_frame_function (frame);
4603
4604 if (sym != nullptr)
4605 fn = sym->print_name ();
4606
4607 if (sal.line != 0
4608 && function_name_is_marked_for_skip (fn, sal))
4609 return true;
4610 }
4611
4612 return false;
4613 }
4614
4615 /* If the event thread has the stop requested flag set, pretend it
4616 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4617 target_stop). */
4618
4619 static bool
4620 handle_stop_requested (struct execution_control_state *ecs)
4621 {
4622 if (ecs->event_thread->stop_requested)
4623 {
4624 ecs->ws.set_stopped (GDB_SIGNAL_0);
4625 handle_signal_stop (ecs);
4626 return true;
4627 }
4628 return false;
4629 }
4630
4631 /* Auxiliary function that handles syscall entry/return events.
4632 It returns true if the inferior should keep going (and GDB
4633 should ignore the event), or false if the event deserves to be
4634 processed. */
4635
4636 static bool
4637 handle_syscall_event (struct execution_control_state *ecs)
4638 {
4639 struct regcache *regcache;
4640 int syscall_number;
4641
4642 context_switch (ecs);
4643
4644 regcache = get_thread_regcache (ecs->event_thread);
4645 syscall_number = ecs->ws.syscall_number ();
4646 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
4647
4648 if (catch_syscall_enabled () > 0
4649 && catching_syscall_number (syscall_number))
4650 {
4651 infrun_debug_printf ("syscall number=%d", syscall_number);
4652
4653 ecs->event_thread->control.stop_bpstat
4654 = bpstat_stop_status_nowatch (regcache->aspace (),
4655 ecs->event_thread->stop_pc (),
4656 ecs->event_thread, ecs->ws);
4657
4658 if (handle_stop_requested (ecs))
4659 return false;
4660
4661 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4662 {
4663 /* Catchpoint hit. */
4664 return false;
4665 }
4666 }
4667
4668 if (handle_stop_requested (ecs))
4669 return false;
4670
4671 /* If no catchpoint triggered for this, then keep going. */
4672 keep_going (ecs);
4673
4674 return true;
4675 }
4676
4677 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4678
4679 static void
4680 fill_in_stop_func (struct gdbarch *gdbarch,
4681 struct execution_control_state *ecs)
4682 {
4683 if (!ecs->stop_func_filled_in)
4684 {
4685 const block *block;
4686 const general_symbol_info *gsi;
4687
4688 /* Don't care about return value; stop_func_start and stop_func_name
4689 will both be 0 if it doesn't work. */
4690 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
4691 &gsi,
4692 &ecs->stop_func_start,
4693 &ecs->stop_func_end,
4694 &block);
4695 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
4696
4697 /* The call to find_pc_partial_function, above, will set
4698 stop_func_start and stop_func_end to the start and end
4699 of the range containing the stop pc. If this range
4700 contains the entry pc for the block (which is always the
4701 case for contiguous blocks), advance stop_func_start past
4702 the function's start offset and entrypoint. Note that
4703 stop_func_start is NOT advanced when in a range of a
4704 non-contiguous block that does not contain the entry pc. */
4705 if (block != nullptr
4706 && ecs->stop_func_start <= block->entry_pc ()
4707 && block->entry_pc () < ecs->stop_func_end)
4708 {
4709 ecs->stop_func_start
4710 += gdbarch_deprecated_function_start_offset (gdbarch);
4711
4712 if (gdbarch_skip_entrypoint_p (gdbarch))
4713 ecs->stop_func_start
4714 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4715 }
4716
4717 ecs->stop_func_filled_in = 1;
4718 }
4719 }
4720
4721
4722 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4723
4724 static enum stop_kind
4725 get_inferior_stop_soon (execution_control_state *ecs)
4726 {
4727 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4728
4729 gdb_assert (inf != nullptr);
4730 return inf->control.stop_soon;
4731 }
4732
4733 /* Poll for one event out of the current target. Store the resulting
4734 waitstatus in WS, and return the event ptid. Does not block. */
4735
4736 static ptid_t
4737 poll_one_curr_target (struct target_waitstatus *ws)
4738 {
4739 ptid_t event_ptid;
4740
4741 overlay_cache_invalid = 1;
4742
4743 /* Flush target cache before starting to handle each event.
4744 Target was running and cache could be stale. This is just a
4745 heuristic. Running threads may modify target memory, but we
4746 don't get any event. */
4747 target_dcache_invalidate ();
4748
4749 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
4750
4751 if (debug_infrun)
4752 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
4753
4754 return event_ptid;
4755 }
4756
4757 /* Wait for one event out of any target. */
4758
4759 static wait_one_event
4760 wait_one ()
4761 {
4762 while (1)
4763 {
4764 for (inferior *inf : all_inferiors ())
4765 {
4766 process_stratum_target *target = inf->process_target ();
4767 if (target == nullptr
4768 || !target->is_async_p ()
4769 || !target->threads_executing)
4770 continue;
4771
4772 switch_to_inferior_no_thread (inf);
4773
4774 wait_one_event event;
4775 event.target = target;
4776 event.ptid = poll_one_curr_target (&event.ws);
4777
4778 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
4779 {
4780 /* If nothing is resumed, remove the target from the
4781 event loop. */
4782 target_async (false);
4783 }
4784 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
4785 return event;
4786 }
4787
4788 /* Block waiting for some event. */
4789
4790 fd_set readfds;
4791 int nfds = 0;
4792
4793 FD_ZERO (&readfds);
4794
4795 for (inferior *inf : all_inferiors ())
4796 {
4797 process_stratum_target *target = inf->process_target ();
4798 if (target == nullptr
4799 || !target->is_async_p ()
4800 || !target->threads_executing)
4801 continue;
4802
4803 int fd = target->async_wait_fd ();
4804 FD_SET (fd, &readfds);
4805 if (nfds <= fd)
4806 nfds = fd + 1;
4807 }
4808
4809 if (nfds == 0)
4810 {
4811 /* No waitable targets left. All must be stopped. */
4812 target_waitstatus ws;
4813 ws.set_no_resumed ();
4814 return {nullptr, minus_one_ptid, std::move (ws)};
4815 }
4816
4817 QUIT;
4818
4819 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
4820 if (numfds < 0)
4821 {
4822 if (errno == EINTR)
4823 continue;
4824 else
4825 perror_with_name ("interruptible_select");
4826 }
4827 }
4828 }
4829
4830 /* Save the thread's event and stop reason to process it later. */
4831
4832 static void
4833 save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
4834 {
4835 infrun_debug_printf ("saving status %s for %s",
4836 ws.to_string ().c_str (),
4837 tp->ptid.to_string ().c_str ());
4838
4839 /* Record for later. */
4840 tp->set_pending_waitstatus (ws);
4841
4842 if (ws.kind () == TARGET_WAITKIND_STOPPED
4843 && ws.sig () == GDB_SIGNAL_TRAP)
4844 {
4845 struct regcache *regcache = get_thread_regcache (tp);
4846 const address_space *aspace = regcache->aspace ();
4847 CORE_ADDR pc = regcache_read_pc (regcache);
4848
4849 adjust_pc_after_break (tp, tp->pending_waitstatus ());
4850
4851 scoped_restore_current_thread restore_thread;
4852 switch_to_thread (tp);
4853
4854 if (target_stopped_by_watchpoint ())
4855 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
4856 else if (target_supports_stopped_by_sw_breakpoint ()
4857 && target_stopped_by_sw_breakpoint ())
4858 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
4859 else if (target_supports_stopped_by_hw_breakpoint ()
4860 && target_stopped_by_hw_breakpoint ())
4861 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
4862 else if (!target_supports_stopped_by_hw_breakpoint ()
4863 && hardware_breakpoint_inserted_here_p (aspace, pc))
4864 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
4865 else if (!target_supports_stopped_by_sw_breakpoint ()
4866 && software_breakpoint_inserted_here_p (aspace, pc))
4867 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
4868 else if (!thread_has_single_step_breakpoints_set (tp)
4869 && currently_stepping (tp))
4870 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
4871 }
4872 }
4873
4874 /* Mark the non-executing threads accordingly. In all-stop, all
4875 threads of all processes are stopped when we get any event
4876 reported. In non-stop mode, only the event thread stops. */
4877
4878 static void
4879 mark_non_executing_threads (process_stratum_target *target,
4880 ptid_t event_ptid,
4881 const target_waitstatus &ws)
4882 {
4883 ptid_t mark_ptid;
4884
4885 if (!target_is_non_stop_p ())
4886 mark_ptid = minus_one_ptid;
4887 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
4888 || ws.kind () == TARGET_WAITKIND_EXITED)
4889 {
4890 /* If we're handling a process exit in non-stop mode, even
4891 though threads haven't been deleted yet, one would think
4892 that there is nothing to do, as threads of the dead process
4893 will be soon deleted, and threads of any other process were
4894 left running. However, on some targets, threads survive a
4895 process exit event. E.g., for the "checkpoint" command,
4896 when the current checkpoint/fork exits, linux-fork.c
4897 automatically switches to another fork from within
4898 target_mourn_inferior, by associating the same
4899 inferior/thread to another fork. We haven't mourned yet at
4900 this point, but we must mark any threads left in the
4901 process as not-executing so that finish_thread_state marks
4902 them stopped (in the user's perspective) if/when we present
4903 the stop to the user. */
4904 mark_ptid = ptid_t (event_ptid.pid ());
4905 }
4906 else
4907 mark_ptid = event_ptid;
4908
4909 set_executing (target, mark_ptid, false);
4910
4911 /* Likewise the resumed flag. */
4912 set_resumed (target, mark_ptid, false);
4913 }
4914
4915 /* Handle one event after stopping threads. If the eventing thread
4916 reports back any interesting event, we leave it pending. If the
4917 eventing thread was in the middle of a displaced step, we
4918 cancel/finish it, and unless the thread's inferior is being
4919 detached, put the thread back in the step-over chain. Returns true
4920 if there are no resumed threads left in the target (thus there's no
4921 point in waiting further), false otherwise. */
4922
4923 static bool
4924 handle_one (const wait_one_event &event)
4925 {
4926 infrun_debug_printf
4927 ("%s %s", event.ws.to_string ().c_str (),
4928 event.ptid.to_string ().c_str ());
4929
4930 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
4931 {
4932 /* All resumed threads exited. */
4933 return true;
4934 }
4935 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
4936 || event.ws.kind () == TARGET_WAITKIND_EXITED
4937 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
4938 {
4939 /* One thread/process exited/signalled. */
4940
4941 thread_info *t = nullptr;
4942
4943 /* The target may have reported just a pid. If so, try
4944 the first non-exited thread. */
4945 if (event.ptid.is_pid ())
4946 {
4947 int pid = event.ptid.pid ();
4948 inferior *inf = find_inferior_pid (event.target, pid);
4949 for (thread_info *tp : inf->non_exited_threads ())
4950 {
4951 t = tp;
4952 break;
4953 }
4954
4955 /* If there is no available thread, the event would
4956 have to be appended to a per-inferior event list,
4957 which does not exist (and if it did, we'd have
4958 to adjust run control command to be able to
4959 resume such an inferior). We assert here instead
4960 of going into an infinite loop. */
4961 gdb_assert (t != nullptr);
4962
4963 infrun_debug_printf
4964 ("using %s", t->ptid.to_string ().c_str ());
4965 }
4966 else
4967 {
4968 t = find_thread_ptid (event.target, event.ptid);
4969 /* Check if this is the first time we see this thread.
4970 Don't bother adding if it individually exited. */
4971 if (t == nullptr
4972 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
4973 t = add_thread (event.target, event.ptid);
4974 }
4975
4976 if (t != nullptr)
4977 {
4978 /* Set the threads as non-executing to avoid
4979 another stop attempt on them. */
4980 switch_to_thread_no_regs (t);
4981 mark_non_executing_threads (event.target, event.ptid,
4982 event.ws);
4983 save_waitstatus (t, event.ws);
4984 t->stop_requested = false;
4985 }
4986 }
4987 else
4988 {
4989 thread_info *t = find_thread_ptid (event.target, event.ptid);
4990 if (t == nullptr)
4991 t = add_thread (event.target, event.ptid);
4992
4993 t->stop_requested = 0;
4994 t->set_executing (false);
4995 t->set_resumed (false);
4996 t->control.may_range_step = 0;
4997
4998 /* This may be the first time we see the inferior report
4999 a stop. */
5000 if (t->inf->needs_setup)
5001 {
5002 switch_to_thread_no_regs (t);
5003 setup_inferior (0);
5004 }
5005
5006 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5007 && event.ws.sig () == GDB_SIGNAL_0)
5008 {
5009 /* We caught the event that we intended to catch, so
5010 there's no event to save as pending. */
5011
5012 if (displaced_step_finish (t, GDB_SIGNAL_0)
5013 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5014 {
5015 /* Add it back to the step-over queue. */
5016 infrun_debug_printf
5017 ("displaced-step of %s canceled",
5018 t->ptid.to_string ().c_str ());
5019
5020 t->control.trap_expected = 0;
5021 if (!t->inf->detaching)
5022 global_thread_step_over_chain_enqueue (t);
5023 }
5024 }
5025 else
5026 {
5027 enum gdb_signal sig;
5028 struct regcache *regcache;
5029
5030 infrun_debug_printf
5031 ("target_wait %s, saving status for %s",
5032 event.ws.to_string ().c_str (),
5033 t->ptid.to_string ().c_str ());
5034
5035 /* Record for later. */
5036 save_waitstatus (t, event.ws);
5037
5038 sig = (event.ws.kind () == TARGET_WAITKIND_STOPPED
5039 ? event.ws.sig () : GDB_SIGNAL_0);
5040
5041 if (displaced_step_finish (t, sig)
5042 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5043 {
5044 /* Add it back to the step-over queue. */
5045 t->control.trap_expected = 0;
5046 if (!t->inf->detaching)
5047 global_thread_step_over_chain_enqueue (t);
5048 }
5049
5050 regcache = get_thread_regcache (t);
5051 t->set_stop_pc (regcache_read_pc (regcache));
5052
5053 infrun_debug_printf ("saved stop_pc=%s for %s "
5054 "(currently_stepping=%d)",
5055 paddress (target_gdbarch (), t->stop_pc ()),
5056 t->ptid.to_string ().c_str (),
5057 currently_stepping (t));
5058 }
5059 }
5060
5061 return false;
5062 }
5063
5064 /* See infrun.h. */
5065
5066 void
5067 stop_all_threads (const char *reason, inferior *inf)
5068 {
5069 /* We may need multiple passes to discover all threads. */
5070 int pass;
5071 int iterations = 0;
5072
5073 gdb_assert (exists_non_stop_target ());
5074
5075 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5076 inf != nullptr ? inf->num : -1);
5077
5078 infrun_debug_show_threads ("non-exited threads",
5079 all_non_exited_threads ());
5080
5081 scoped_restore_current_thread restore_thread;
5082
5083 /* Enable thread events on relevant targets. */
5084 for (auto *target : all_non_exited_process_targets ())
5085 {
5086 if (inf != nullptr && inf->process_target () != target)
5087 continue;
5088
5089 switch_to_target_no_thread (target);
5090 target_thread_events (true);
5091 }
5092
5093 SCOPE_EXIT
5094 {
5095 /* Disable thread events on relevant targets. */
5096 for (auto *target : all_non_exited_process_targets ())
5097 {
5098 if (inf != nullptr && inf->process_target () != target)
5099 continue;
5100
5101 switch_to_target_no_thread (target);
5102 target_thread_events (false);
5103 }
5104
5105 /* Use debug_prefixed_printf directly to get a meaningful function
5106 name. */
5107 if (debug_infrun)
5108 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5109 };
5110
5111 /* Request threads to stop, and then wait for the stops. Because
5112 threads we already know about can spawn more threads while we're
5113 trying to stop them, and we only learn about new threads when we
5114 update the thread list, do this in a loop, and keep iterating
5115 until two passes find no threads that need to be stopped. */
5116 for (pass = 0; pass < 2; pass++, iterations++)
5117 {
5118 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5119 while (1)
5120 {
5121 int waits_needed = 0;
5122
5123 for (auto *target : all_non_exited_process_targets ())
5124 {
5125 if (inf != nullptr && inf->process_target () != target)
5126 continue;
5127
5128 switch_to_target_no_thread (target);
5129 update_thread_list ();
5130 }
5131
5132 /* Go through all threads looking for threads that we need
5133 to tell the target to stop. */
5134 for (thread_info *t : all_non_exited_threads ())
5135 {
5136 if (inf != nullptr && t->inf != inf)
5137 continue;
5138
5139 /* For a single-target setting with an all-stop target,
5140 we would not even arrive here. For a multi-target
5141 setting, until GDB is able to handle a mixture of
5142 all-stop and non-stop targets, simply skip all-stop
5143 targets' threads. This should be fine due to the
5144 protection of 'check_multi_target_resumption'. */
5145
5146 switch_to_thread_no_regs (t);
5147 if (!target_is_non_stop_p ())
5148 continue;
5149
5150 if (t->executing ())
5151 {
5152 /* If already stopping, don't request a stop again.
5153 We just haven't seen the notification yet. */
5154 if (!t->stop_requested)
5155 {
5156 infrun_debug_printf (" %s executing, need stop",
5157 t->ptid.to_string ().c_str ());
5158 target_stop (t->ptid);
5159 t->stop_requested = 1;
5160 }
5161 else
5162 {
5163 infrun_debug_printf (" %s executing, already stopping",
5164 t->ptid.to_string ().c_str ());
5165 }
5166
5167 if (t->stop_requested)
5168 waits_needed++;
5169 }
5170 else
5171 {
5172 infrun_debug_printf (" %s not executing",
5173 t->ptid.to_string ().c_str ());
5174
5175 /* The thread may be not executing, but still be
5176 resumed with a pending status to process. */
5177 t->set_resumed (false);
5178 }
5179 }
5180
5181 if (waits_needed == 0)
5182 break;
5183
5184 /* If we find new threads on the second iteration, restart
5185 over. We want to see two iterations in a row with all
5186 threads stopped. */
5187 if (pass > 0)
5188 pass = -1;
5189
5190 for (int i = 0; i < waits_needed; i++)
5191 {
5192 wait_one_event event = wait_one ();
5193 if (handle_one (event))
5194 break;
5195 }
5196 }
5197 }
5198 }
5199
5200 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5201
5202 static bool
5203 handle_no_resumed (struct execution_control_state *ecs)
5204 {
5205 if (target_can_async_p ())
5206 {
5207 bool any_sync = false;
5208
5209 for (ui *ui : all_uis ())
5210 {
5211 if (ui->prompt_state == PROMPT_BLOCKED)
5212 {
5213 any_sync = true;
5214 break;
5215 }
5216 }
5217 if (!any_sync)
5218 {
5219 /* There were no unwaited-for children left in the target, but,
5220 we're not synchronously waiting for events either. Just
5221 ignore. */
5222
5223 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5224 prepare_to_wait (ecs);
5225 return true;
5226 }
5227 }
5228
5229 /* Otherwise, if we were running a synchronous execution command, we
5230 may need to cancel it and give the user back the terminal.
5231
5232 In non-stop mode, the target can't tell whether we've already
5233 consumed previous stop events, so it can end up sending us a
5234 no-resumed event like so:
5235
5236 #0 - thread 1 is left stopped
5237
5238 #1 - thread 2 is resumed and hits breakpoint
5239 -> TARGET_WAITKIND_STOPPED
5240
5241 #2 - thread 3 is resumed and exits
5242 this is the last resumed thread, so
5243 -> TARGET_WAITKIND_NO_RESUMED
5244
5245 #3 - gdb processes stop for thread 2 and decides to re-resume
5246 it.
5247
5248 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5249 thread 2 is now resumed, so the event should be ignored.
5250
5251 IOW, if the stop for thread 2 doesn't end a foreground command,
5252 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5253 event. But it could be that the event meant that thread 2 itself
5254 (or whatever other thread was the last resumed thread) exited.
5255
5256 To address this we refresh the thread list and check whether we
5257 have resumed threads _now_. In the example above, this removes
5258 thread 3 from the thread list. If thread 2 was re-resumed, we
5259 ignore this event. If we find no thread resumed, then we cancel
5260 the synchronous command and show "no unwaited-for " to the
5261 user. */
5262
5263 inferior *curr_inf = current_inferior ();
5264
5265 scoped_restore_current_thread restore_thread;
5266 update_thread_list ();
5267
5268 /* If:
5269
5270 - the current target has no thread executing, and
5271 - the current inferior is native, and
5272 - the current inferior is the one which has the terminal, and
5273 - we did nothing,
5274
5275 then a Ctrl-C from this point on would remain stuck in the
5276 kernel, until a thread resumes and dequeues it. That would
5277 result in the GDB CLI not reacting to Ctrl-C, not able to
5278 interrupt the program. To address this, if the current inferior
5279 no longer has any thread executing, we give the terminal to some
5280 other inferior that has at least one thread executing. */
5281 bool swap_terminal = true;
5282
5283 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5284 whether to report it to the user. */
5285 bool ignore_event = false;
5286
5287 for (thread_info *thread : all_non_exited_threads ())
5288 {
5289 if (swap_terminal && thread->executing ())
5290 {
5291 if (thread->inf != curr_inf)
5292 {
5293 target_terminal::ours ();
5294
5295 switch_to_thread (thread);
5296 target_terminal::inferior ();
5297 }
5298 swap_terminal = false;
5299 }
5300
5301 if (!ignore_event && thread->resumed ())
5302 {
5303 /* Either there were no unwaited-for children left in the
5304 target at some point, but there are now, or some target
5305 other than the eventing one has unwaited-for children
5306 left. Just ignore. */
5307 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5308 "(ignoring: found resumed)");
5309
5310 ignore_event = true;
5311 }
5312
5313 if (ignore_event && !swap_terminal)
5314 break;
5315 }
5316
5317 if (ignore_event)
5318 {
5319 switch_to_inferior_no_thread (curr_inf);
5320 prepare_to_wait (ecs);
5321 return true;
5322 }
5323
5324 /* Go ahead and report the event. */
5325 return false;
5326 }
5327
5328 /* Given an execution control state that has been freshly filled in by
5329 an event from the inferior, figure out what it means and take
5330 appropriate action.
5331
5332 The alternatives are:
5333
5334 1) stop_waiting and return; to really stop and return to the
5335 debugger.
5336
5337 2) keep_going and return; to wait for the next event (set
5338 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5339 once). */
5340
5341 static void
5342 handle_inferior_event (struct execution_control_state *ecs)
5343 {
5344 /* Make sure that all temporary struct value objects that were
5345 created during the handling of the event get deleted at the
5346 end. */
5347 scoped_value_mark free_values;
5348
5349 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
5350
5351 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
5352 {
5353 /* We had an event in the inferior, but we are not interested in
5354 handling it at this level. The lower layers have already
5355 done what needs to be done, if anything.
5356
5357 One of the possible circumstances for this is when the
5358 inferior produces output for the console. The inferior has
5359 not stopped, and we are ignoring the event. Another possible
5360 circumstance is any event which the lower level knows will be
5361 reported multiple times without an intervening resume. */
5362 prepare_to_wait (ecs);
5363 return;
5364 }
5365
5366 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5367 {
5368 prepare_to_wait (ecs);
5369 return;
5370 }
5371
5372 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
5373 && handle_no_resumed (ecs))
5374 return;
5375
5376 /* Cache the last target/ptid/waitstatus. */
5377 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5378
5379 /* Always clear state belonging to the previous time we stopped. */
5380 stop_stack_dummy = STOP_NONE;
5381
5382 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5383 {
5384 /* No unwaited-for children left. IOW, all resumed children
5385 have exited. */
5386 stop_print_frame = false;
5387 stop_waiting (ecs);
5388 return;
5389 }
5390
5391 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
5392 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
5393 {
5394 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
5395 /* If it's a new thread, add it to the thread database. */
5396 if (ecs->event_thread == nullptr)
5397 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
5398
5399 /* Disable range stepping. If the next step request could use a
5400 range, this will be end up re-enabled then. */
5401 ecs->event_thread->control.may_range_step = 0;
5402 }
5403
5404 /* Dependent on valid ECS->EVENT_THREAD. */
5405 adjust_pc_after_break (ecs->event_thread, ecs->ws);
5406
5407 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5408 reinit_frame_cache ();
5409
5410 breakpoint_retire_moribund ();
5411
5412 /* First, distinguish signals caused by the debugger from signals
5413 that have to do with the program's own actions. Note that
5414 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5415 on the operating system version. Here we detect when a SIGILL or
5416 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5417 something similar for SIGSEGV, since a SIGSEGV will be generated
5418 when we're trying to execute a breakpoint instruction on a
5419 non-executable stack. This happens for call dummy breakpoints
5420 for architectures like SPARC that place call dummies on the
5421 stack. */
5422 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
5423 && (ecs->ws.sig () == GDB_SIGNAL_ILL
5424 || ecs->ws.sig () == GDB_SIGNAL_SEGV
5425 || ecs->ws.sig () == GDB_SIGNAL_EMT))
5426 {
5427 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5428
5429 if (breakpoint_inserted_here_p (regcache->aspace (),
5430 regcache_read_pc (regcache)))
5431 {
5432 infrun_debug_printf ("Treating signal as SIGTRAP");
5433 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
5434 }
5435 }
5436
5437 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
5438
5439 switch (ecs->ws.kind ())
5440 {
5441 case TARGET_WAITKIND_LOADED:
5442 {
5443 context_switch (ecs);
5444 /* Ignore gracefully during startup of the inferior, as it might
5445 be the shell which has just loaded some objects, otherwise
5446 add the symbols for the newly loaded objects. Also ignore at
5447 the beginning of an attach or remote session; we will query
5448 the full list of libraries once the connection is
5449 established. */
5450
5451 stop_kind stop_soon = get_inferior_stop_soon (ecs);
5452 if (stop_soon == NO_STOP_QUIETLY)
5453 {
5454 struct regcache *regcache;
5455
5456 regcache = get_thread_regcache (ecs->event_thread);
5457
5458 handle_solib_event ();
5459
5460 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5461 ecs->event_thread->control.stop_bpstat
5462 = bpstat_stop_status_nowatch (regcache->aspace (),
5463 ecs->event_thread->stop_pc (),
5464 ecs->event_thread, ecs->ws);
5465
5466 if (handle_stop_requested (ecs))
5467 return;
5468
5469 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5470 {
5471 /* A catchpoint triggered. */
5472 process_event_stop_test (ecs);
5473 return;
5474 }
5475
5476 /* If requested, stop when the dynamic linker notifies
5477 gdb of events. This allows the user to get control
5478 and place breakpoints in initializer routines for
5479 dynamically loaded objects (among other things). */
5480 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5481 if (stop_on_solib_events)
5482 {
5483 /* Make sure we print "Stopped due to solib-event" in
5484 normal_stop. */
5485 stop_print_frame = true;
5486
5487 stop_waiting (ecs);
5488 return;
5489 }
5490 }
5491
5492 /* If we are skipping through a shell, or through shared library
5493 loading that we aren't interested in, resume the program. If
5494 we're running the program normally, also resume. */
5495 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5496 {
5497 /* Loading of shared libraries might have changed breakpoint
5498 addresses. Make sure new breakpoints are inserted. */
5499 if (stop_soon == NO_STOP_QUIETLY)
5500 insert_breakpoints ();
5501 resume (GDB_SIGNAL_0);
5502 prepare_to_wait (ecs);
5503 return;
5504 }
5505
5506 /* But stop if we're attaching or setting up a remote
5507 connection. */
5508 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5509 || stop_soon == STOP_QUIETLY_REMOTE)
5510 {
5511 infrun_debug_printf ("quietly stopped");
5512 stop_waiting (ecs);
5513 return;
5514 }
5515
5516 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
5517 }
5518
5519 case TARGET_WAITKIND_SPURIOUS:
5520 if (handle_stop_requested (ecs))
5521 return;
5522 context_switch (ecs);
5523 resume (GDB_SIGNAL_0);
5524 prepare_to_wait (ecs);
5525 return;
5526
5527 case TARGET_WAITKIND_THREAD_CREATED:
5528 if (handle_stop_requested (ecs))
5529 return;
5530 context_switch (ecs);
5531 if (!switch_back_to_stepped_thread (ecs))
5532 keep_going (ecs);
5533 return;
5534
5535 case TARGET_WAITKIND_EXITED:
5536 case TARGET_WAITKIND_SIGNALLED:
5537 {
5538 /* Depending on the system, ecs->ptid may point to a thread or
5539 to a process. On some targets, target_mourn_inferior may
5540 need to have access to the just-exited thread. That is the
5541 case of GNU/Linux's "checkpoint" support, for example.
5542 Call the switch_to_xxx routine as appropriate. */
5543 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5544 if (thr != nullptr)
5545 switch_to_thread (thr);
5546 else
5547 {
5548 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5549 switch_to_inferior_no_thread (inf);
5550 }
5551 }
5552 handle_vfork_child_exec_or_exit (0);
5553 target_terminal::ours (); /* Must do this before mourn anyway. */
5554
5555 /* Clearing any previous state of convenience variables. */
5556 clear_exit_convenience_vars ();
5557
5558 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
5559 {
5560 /* Record the exit code in the convenience variable $_exitcode, so
5561 that the user can inspect this again later. */
5562 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5563 (LONGEST) ecs->ws.exit_status ());
5564
5565 /* Also record this in the inferior itself. */
5566 current_inferior ()->has_exit_code = 1;
5567 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
5568
5569 /* Support the --return-child-result option. */
5570 return_child_result_value = ecs->ws.exit_status ();
5571
5572 gdb::observers::exited.notify (ecs->ws.exit_status ());
5573 }
5574 else
5575 {
5576 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
5577
5578 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5579 {
5580 /* Set the value of the internal variable $_exitsignal,
5581 which holds the signal uncaught by the inferior. */
5582 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5583 gdbarch_gdb_signal_to_target (gdbarch,
5584 ecs->ws.sig ()));
5585 }
5586 else
5587 {
5588 /* We don't have access to the target's method used for
5589 converting between signal numbers (GDB's internal
5590 representation <-> target's representation).
5591 Therefore, we cannot do a good job at displaying this
5592 information to the user. It's better to just warn
5593 her about it (if infrun debugging is enabled), and
5594 give up. */
5595 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5596 "signal number.");
5597 }
5598
5599 gdb::observers::signal_exited.notify (ecs->ws.sig ());
5600 }
5601
5602 gdb_flush (gdb_stdout);
5603 target_mourn_inferior (inferior_ptid);
5604 stop_print_frame = false;
5605 stop_waiting (ecs);
5606 return;
5607
5608 case TARGET_WAITKIND_FORKED:
5609 case TARGET_WAITKIND_VFORKED:
5610 /* Check whether the inferior is displaced stepping. */
5611 {
5612 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5613 struct gdbarch *gdbarch = regcache->arch ();
5614 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
5615
5616 /* If this is a fork (child gets its own address space copy)
5617 and some displaced step buffers were in use at the time of
5618 the fork, restore the displaced step buffer bytes in the
5619 child process.
5620
5621 Architectures which support displaced stepping and fork
5622 events must supply an implementation of
5623 gdbarch_displaced_step_restore_all_in_ptid. This is not
5624 enforced during gdbarch validation to support architectures
5625 which support displaced stepping but not forks. */
5626 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED
5627 && gdbarch_supports_displaced_stepping (gdbarch))
5628 gdbarch_displaced_step_restore_all_in_ptid
5629 (gdbarch, parent_inf, ecs->ws.child_ptid ());
5630
5631 /* If displaced stepping is supported, and thread ecs->ptid is
5632 displaced stepping. */
5633 if (displaced_step_in_progress_thread (ecs->event_thread))
5634 {
5635 struct regcache *child_regcache;
5636 CORE_ADDR parent_pc;
5637
5638 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5639 indicating that the displaced stepping of syscall instruction
5640 has been done. Perform cleanup for parent process here. Note
5641 that this operation also cleans up the child process for vfork,
5642 because their pages are shared. */
5643 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
5644 /* Start a new step-over in another thread if there's one
5645 that needs it. */
5646 start_step_over ();
5647
5648 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5649 the child's PC is also within the scratchpad. Set the child's PC
5650 to the parent's PC value, which has already been fixed up.
5651 FIXME: we use the parent's aspace here, although we're touching
5652 the child, because the child hasn't been added to the inferior
5653 list yet at this point. */
5654
5655 child_regcache
5656 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5657 ecs->ws.child_ptid (),
5658 gdbarch,
5659 parent_inf->aspace);
5660 /* Read PC value of parent process. */
5661 parent_pc = regcache_read_pc (regcache);
5662
5663 displaced_debug_printf ("write child pc from %s to %s",
5664 paddress (gdbarch,
5665 regcache_read_pc (child_regcache)),
5666 paddress (gdbarch, parent_pc));
5667
5668 regcache_write_pc (child_regcache, parent_pc);
5669 }
5670 }
5671
5672 context_switch (ecs);
5673
5674 /* Immediately detach breakpoints from the child before there's
5675 any chance of letting the user delete breakpoints from the
5676 breakpoint lists. If we don't do this early, it's easy to
5677 leave left over traps in the child, vis: "break foo; catch
5678 fork; c; <fork>; del; c; <child calls foo>". We only follow
5679 the fork on the last `continue', and by that time the
5680 breakpoint at "foo" is long gone from the breakpoint table.
5681 If we vforked, then we don't need to unpatch here, since both
5682 parent and child are sharing the same memory pages; we'll
5683 need to unpatch at follow/detach time instead to be certain
5684 that new breakpoints added between catchpoint hit time and
5685 vfork follow are detached. */
5686 if (ecs->ws.kind () != TARGET_WAITKIND_VFORKED)
5687 {
5688 /* This won't actually modify the breakpoint list, but will
5689 physically remove the breakpoints from the child. */
5690 detach_breakpoints (ecs->ws.child_ptid ());
5691 }
5692
5693 delete_just_stopped_threads_single_step_breakpoints ();
5694
5695 /* In case the event is caught by a catchpoint, remember that
5696 the event is to be followed at the next resume of the thread,
5697 and not immediately. */
5698 ecs->event_thread->pending_follow = ecs->ws;
5699
5700 ecs->event_thread->set_stop_pc
5701 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
5702
5703 ecs->event_thread->control.stop_bpstat
5704 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
5705 ecs->event_thread->stop_pc (),
5706 ecs->event_thread, ecs->ws);
5707
5708 if (handle_stop_requested (ecs))
5709 return;
5710
5711 /* If no catchpoint triggered for this, then keep going. Note
5712 that we're interested in knowing the bpstat actually causes a
5713 stop, not just if it may explain the signal. Software
5714 watchpoints, for example, always appear in the bpstat. */
5715 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5716 {
5717 bool follow_child
5718 = (follow_fork_mode_string == follow_fork_mode_child);
5719
5720 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5721
5722 process_stratum_target *targ
5723 = ecs->event_thread->inf->process_target ();
5724
5725 bool should_resume = follow_fork ();
5726
5727 /* Note that one of these may be an invalid pointer,
5728 depending on detach_fork. */
5729 thread_info *parent = ecs->event_thread;
5730 thread_info *child = find_thread_ptid (targ, ecs->ws.child_ptid ());
5731
5732 /* At this point, the parent is marked running, and the
5733 child is marked stopped. */
5734
5735 /* If not resuming the parent, mark it stopped. */
5736 if (follow_child && !detach_fork && !non_stop && !sched_multi)
5737 parent->set_running (false);
5738
5739 /* If resuming the child, mark it running. */
5740 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
5741 child->set_running (true);
5742
5743 /* In non-stop mode, also resume the other branch. */
5744 if (!detach_fork && (non_stop
5745 || (sched_multi && target_is_non_stop_p ())))
5746 {
5747 if (follow_child)
5748 switch_to_thread (parent);
5749 else
5750 switch_to_thread (child);
5751
5752 ecs->event_thread = inferior_thread ();
5753 ecs->ptid = inferior_ptid;
5754 keep_going (ecs);
5755 }
5756
5757 if (follow_child)
5758 switch_to_thread (child);
5759 else
5760 switch_to_thread (parent);
5761
5762 ecs->event_thread = inferior_thread ();
5763 ecs->ptid = inferior_ptid;
5764
5765 if (should_resume)
5766 {
5767 /* Never call switch_back_to_stepped_thread if we are waiting for
5768 vfork-done (waiting for an external vfork child to exec or
5769 exit). We will resume only the vforking thread for the purpose
5770 of collecting the vfork-done event, and we will restart any
5771 step once the critical shared address space window is done. */
5772 if ((!follow_child
5773 && detach_fork
5774 && parent->inf->thread_waiting_for_vfork_done != nullptr)
5775 || !switch_back_to_stepped_thread (ecs))
5776 keep_going (ecs);
5777 }
5778 else
5779 stop_waiting (ecs);
5780 return;
5781 }
5782 process_event_stop_test (ecs);
5783 return;
5784
5785 case TARGET_WAITKIND_VFORK_DONE:
5786 /* Done with the shared memory region. Re-insert breakpoints in
5787 the parent, and keep going. */
5788
5789 context_switch (ecs);
5790
5791 handle_vfork_done (ecs->event_thread);
5792 gdb_assert (inferior_thread () == ecs->event_thread);
5793
5794 if (handle_stop_requested (ecs))
5795 return;
5796
5797 if (!switch_back_to_stepped_thread (ecs))
5798 {
5799 gdb_assert (inferior_thread () == ecs->event_thread);
5800 /* This also takes care of reinserting breakpoints in the
5801 previously locked inferior. */
5802 keep_going (ecs);
5803 }
5804 return;
5805
5806 case TARGET_WAITKIND_EXECD:
5807
5808 /* Note we can't read registers yet (the stop_pc), because we
5809 don't yet know the inferior's post-exec architecture.
5810 'stop_pc' is explicitly read below instead. */
5811 switch_to_thread_no_regs (ecs->event_thread);
5812
5813 /* Do whatever is necessary to the parent branch of the vfork. */
5814 handle_vfork_child_exec_or_exit (1);
5815
5816 /* This causes the eventpoints and symbol table to be reset.
5817 Must do this now, before trying to determine whether to
5818 stop. */
5819 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
5820
5821 /* In follow_exec we may have deleted the original thread and
5822 created a new one. Make sure that the event thread is the
5823 execd thread for that case (this is a nop otherwise). */
5824 ecs->event_thread = inferior_thread ();
5825
5826 ecs->event_thread->set_stop_pc
5827 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
5828
5829 ecs->event_thread->control.stop_bpstat
5830 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
5831 ecs->event_thread->stop_pc (),
5832 ecs->event_thread, ecs->ws);
5833
5834 if (handle_stop_requested (ecs))
5835 return;
5836
5837 /* If no catchpoint triggered for this, then keep going. */
5838 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5839 {
5840 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5841 keep_going (ecs);
5842 return;
5843 }
5844 process_event_stop_test (ecs);
5845 return;
5846
5847 /* Be careful not to try to gather much state about a thread
5848 that's in a syscall. It's frequently a losing proposition. */
5849 case TARGET_WAITKIND_SYSCALL_ENTRY:
5850 /* Getting the current syscall number. */
5851 if (handle_syscall_event (ecs) == 0)
5852 process_event_stop_test (ecs);
5853 return;
5854
5855 /* Before examining the threads further, step this thread to
5856 get it entirely out of the syscall. (We get notice of the
5857 event when the thread is just on the verge of exiting a
5858 syscall. Stepping one instruction seems to get it back
5859 into user code.) */
5860 case TARGET_WAITKIND_SYSCALL_RETURN:
5861 if (handle_syscall_event (ecs) == 0)
5862 process_event_stop_test (ecs);
5863 return;
5864
5865 case TARGET_WAITKIND_STOPPED:
5866 handle_signal_stop (ecs);
5867 return;
5868
5869 case TARGET_WAITKIND_NO_HISTORY:
5870 /* Reverse execution: target ran out of history info. */
5871
5872 /* Switch to the stopped thread. */
5873 context_switch (ecs);
5874 infrun_debug_printf ("stopped");
5875
5876 delete_just_stopped_threads_single_step_breakpoints ();
5877 ecs->event_thread->set_stop_pc
5878 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
5879
5880 if (handle_stop_requested (ecs))
5881 return;
5882
5883 gdb::observers::no_history.notify ();
5884 stop_waiting (ecs);
5885 return;
5886 }
5887 }
5888
5889 /* Restart threads back to what they were trying to do back when we
5890 paused them (because of an in-line step-over or vfork, for example).
5891 The EVENT_THREAD thread is ignored (not restarted).
5892
5893 If INF is non-nullptr, only resume threads from INF. */
5894
5895 static void
5896 restart_threads (struct thread_info *event_thread, inferior *inf)
5897 {
5898 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
5899 event_thread->ptid.to_string ().c_str (),
5900 inf != nullptr ? inf->num : -1);
5901
5902 gdb_assert (!step_over_info_valid_p ());
5903
5904 /* In case the instruction just stepped spawned a new thread. */
5905 update_thread_list ();
5906
5907 for (thread_info *tp : all_non_exited_threads ())
5908 {
5909 if (inf != nullptr && tp->inf != inf)
5910 continue;
5911
5912 if (tp->inf->detaching)
5913 {
5914 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5915 tp->ptid.to_string ().c_str ());
5916 continue;
5917 }
5918
5919 switch_to_thread_no_regs (tp);
5920
5921 if (tp == event_thread)
5922 {
5923 infrun_debug_printf ("restart threads: [%s] is event thread",
5924 tp->ptid.to_string ().c_str ());
5925 continue;
5926 }
5927
5928 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5929 {
5930 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5931 tp->ptid.to_string ().c_str ());
5932 continue;
5933 }
5934
5935 if (tp->resumed ())
5936 {
5937 infrun_debug_printf ("restart threads: [%s] resumed",
5938 tp->ptid.to_string ().c_str ());
5939 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
5940 continue;
5941 }
5942
5943 if (thread_is_in_step_over_chain (tp))
5944 {
5945 infrun_debug_printf ("restart threads: [%s] needs step-over",
5946 tp->ptid.to_string ().c_str ());
5947 gdb_assert (!tp->resumed ());
5948 continue;
5949 }
5950
5951
5952 if (tp->has_pending_waitstatus ())
5953 {
5954 infrun_debug_printf ("restart threads: [%s] has pending status",
5955 tp->ptid.to_string ().c_str ());
5956 tp->set_resumed (true);
5957 continue;
5958 }
5959
5960 gdb_assert (!tp->stop_requested);
5961
5962 /* If some thread needs to start a step-over at this point, it
5963 should still be in the step-over queue, and thus skipped
5964 above. */
5965 if (thread_still_needs_step_over (tp))
5966 {
5967 internal_error ("thread [%s] needs a step-over, but not in "
5968 "step-over queue\n",
5969 tp->ptid.to_string ().c_str ());
5970 }
5971
5972 if (currently_stepping (tp))
5973 {
5974 infrun_debug_printf ("restart threads: [%s] was stepping",
5975 tp->ptid.to_string ().c_str ());
5976 keep_going_stepped_thread (tp);
5977 }
5978 else
5979 {
5980 struct execution_control_state ecss;
5981 struct execution_control_state *ecs = &ecss;
5982
5983 infrun_debug_printf ("restart threads: [%s] continuing",
5984 tp->ptid.to_string ().c_str ());
5985 reset_ecs (ecs, tp);
5986 switch_to_thread (tp);
5987 keep_going_pass_signal (ecs);
5988 }
5989 }
5990 }
5991
5992 /* Callback for iterate_over_threads. Find a resumed thread that has
5993 a pending waitstatus. */
5994
5995 static int
5996 resumed_thread_with_pending_status (struct thread_info *tp,
5997 void *arg)
5998 {
5999 return tp->resumed () && tp->has_pending_waitstatus ();
6000 }
6001
6002 /* Called when we get an event that may finish an in-line or
6003 out-of-line (displaced stepping) step-over started previously.
6004 Return true if the event is processed and we should go back to the
6005 event loop; false if the caller should continue processing the
6006 event. */
6007
6008 static int
6009 finish_step_over (struct execution_control_state *ecs)
6010 {
6011 displaced_step_finish (ecs->event_thread, ecs->event_thread->stop_signal ());
6012
6013 bool had_step_over_info = step_over_info_valid_p ();
6014
6015 if (had_step_over_info)
6016 {
6017 /* If we're stepping over a breakpoint with all threads locked,
6018 then only the thread that was stepped should be reporting
6019 back an event. */
6020 gdb_assert (ecs->event_thread->control.trap_expected);
6021
6022 clear_step_over_info ();
6023 }
6024
6025 if (!target_is_non_stop_p ())
6026 return 0;
6027
6028 /* Start a new step-over in another thread if there's one that
6029 needs it. */
6030 start_step_over ();
6031
6032 /* If we were stepping over a breakpoint before, and haven't started
6033 a new in-line step-over sequence, then restart all other threads
6034 (except the event thread). We can't do this in all-stop, as then
6035 e.g., we wouldn't be able to issue any other remote packet until
6036 these other threads stop. */
6037 if (had_step_over_info && !step_over_info_valid_p ())
6038 {
6039 struct thread_info *pending;
6040
6041 /* If we only have threads with pending statuses, the restart
6042 below won't restart any thread and so nothing re-inserts the
6043 breakpoint we just stepped over. But we need it inserted
6044 when we later process the pending events, otherwise if
6045 another thread has a pending event for this breakpoint too,
6046 we'd discard its event (because the breakpoint that
6047 originally caused the event was no longer inserted). */
6048 context_switch (ecs);
6049 insert_breakpoints ();
6050
6051 restart_threads (ecs->event_thread);
6052
6053 /* If we have events pending, go through handle_inferior_event
6054 again, picking up a pending event at random. This avoids
6055 thread starvation. */
6056
6057 /* But not if we just stepped over a watchpoint in order to let
6058 the instruction execute so we can evaluate its expression.
6059 The set of watchpoints that triggered is recorded in the
6060 breakpoint objects themselves (see bp->watchpoint_triggered).
6061 If we processed another event first, that other event could
6062 clobber this info. */
6063 if (ecs->event_thread->stepping_over_watchpoint)
6064 return 0;
6065
6066 pending = iterate_over_threads (resumed_thread_with_pending_status,
6067 nullptr);
6068 if (pending != nullptr)
6069 {
6070 struct thread_info *tp = ecs->event_thread;
6071 struct regcache *regcache;
6072
6073 infrun_debug_printf ("found resumed threads with "
6074 "pending events, saving status");
6075
6076 gdb_assert (pending != tp);
6077
6078 /* Record the event thread's event for later. */
6079 save_waitstatus (tp, ecs->ws);
6080 /* This was cleared early, by handle_inferior_event. Set it
6081 so this pending event is considered by
6082 do_target_wait. */
6083 tp->set_resumed (true);
6084
6085 gdb_assert (!tp->executing ());
6086
6087 regcache = get_thread_regcache (tp);
6088 tp->set_stop_pc (regcache_read_pc (regcache));
6089
6090 infrun_debug_printf ("saved stop_pc=%s for %s "
6091 "(currently_stepping=%d)",
6092 paddress (target_gdbarch (), tp->stop_pc ()),
6093 tp->ptid.to_string ().c_str (),
6094 currently_stepping (tp));
6095
6096 /* This in-line step-over finished; clear this so we won't
6097 start a new one. This is what handle_signal_stop would
6098 do, if we returned false. */
6099 tp->stepping_over_breakpoint = 0;
6100
6101 /* Wake up the event loop again. */
6102 mark_async_event_handler (infrun_async_inferior_event_token);
6103
6104 prepare_to_wait (ecs);
6105 return 1;
6106 }
6107 }
6108
6109 return 0;
6110 }
6111
6112 /* Come here when the program has stopped with a signal. */
6113
6114 static void
6115 handle_signal_stop (struct execution_control_state *ecs)
6116 {
6117 frame_info_ptr frame;
6118 struct gdbarch *gdbarch;
6119 int stopped_by_watchpoint;
6120 enum stop_kind stop_soon;
6121 int random_signal;
6122
6123 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6124
6125 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6126
6127 /* Do we need to clean up the state of a thread that has
6128 completed a displaced single-step? (Doing so usually affects
6129 the PC, so do it here, before we set stop_pc.) */
6130 if (finish_step_over (ecs))
6131 return;
6132
6133 /* If we either finished a single-step or hit a breakpoint, but
6134 the user wanted this thread to be stopped, pretend we got a
6135 SIG0 (generic unsignaled stop). */
6136 if (ecs->event_thread->stop_requested
6137 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6138 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6139
6140 ecs->event_thread->set_stop_pc
6141 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6142
6143 context_switch (ecs);
6144
6145 if (deprecated_context_hook)
6146 deprecated_context_hook (ecs->event_thread->global_num);
6147
6148 if (debug_infrun)
6149 {
6150 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6151 struct gdbarch *reg_gdbarch = regcache->arch ();
6152
6153 infrun_debug_printf
6154 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6155 if (target_stopped_by_watchpoint ())
6156 {
6157 CORE_ADDR addr;
6158
6159 infrun_debug_printf ("stopped by watchpoint");
6160
6161 if (target_stopped_data_address (current_inferior ()->top_target (),
6162 &addr))
6163 infrun_debug_printf ("stopped data address=%s",
6164 paddress (reg_gdbarch, addr));
6165 else
6166 infrun_debug_printf ("(no data address available)");
6167 }
6168 }
6169
6170 /* This is originated from start_remote(), start_inferior() and
6171 shared libraries hook functions. */
6172 stop_soon = get_inferior_stop_soon (ecs);
6173 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6174 {
6175 infrun_debug_printf ("quietly stopped");
6176 stop_print_frame = true;
6177 stop_waiting (ecs);
6178 return;
6179 }
6180
6181 /* This originates from attach_command(). We need to overwrite
6182 the stop_signal here, because some kernels don't ignore a
6183 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6184 See more comments in inferior.h. On the other hand, if we
6185 get a non-SIGSTOP, report it to the user - assume the backend
6186 will handle the SIGSTOP if it should show up later.
6187
6188 Also consider that the attach is complete when we see a
6189 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6190 target extended-remote report it instead of a SIGSTOP
6191 (e.g. gdbserver). We already rely on SIGTRAP being our
6192 signal, so this is no exception.
6193
6194 Also consider that the attach is complete when we see a
6195 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6196 the target to stop all threads of the inferior, in case the
6197 low level attach operation doesn't stop them implicitly. If
6198 they weren't stopped implicitly, then the stub will report a
6199 GDB_SIGNAL_0, meaning: stopped for no particular reason
6200 other than GDB's request. */
6201 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6202 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6203 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6204 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6205 {
6206 stop_print_frame = true;
6207 stop_waiting (ecs);
6208 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6209 return;
6210 }
6211
6212 /* At this point, get hold of the now-current thread's frame. */
6213 frame = get_current_frame ();
6214 gdbarch = get_frame_arch (frame);
6215
6216 /* Pull the single step breakpoints out of the target. */
6217 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6218 {
6219 struct regcache *regcache;
6220 CORE_ADDR pc;
6221
6222 regcache = get_thread_regcache (ecs->event_thread);
6223 const address_space *aspace = regcache->aspace ();
6224
6225 pc = regcache_read_pc (regcache);
6226
6227 /* However, before doing so, if this single-step breakpoint was
6228 actually for another thread, set this thread up for moving
6229 past it. */
6230 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6231 aspace, pc))
6232 {
6233 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6234 {
6235 infrun_debug_printf ("[%s] hit another thread's single-step "
6236 "breakpoint",
6237 ecs->ptid.to_string ().c_str ());
6238 ecs->hit_singlestep_breakpoint = 1;
6239 }
6240 }
6241 else
6242 {
6243 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6244 ecs->ptid.to_string ().c_str ());
6245 }
6246 }
6247 delete_just_stopped_threads_single_step_breakpoints ();
6248
6249 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6250 && ecs->event_thread->control.trap_expected
6251 && ecs->event_thread->stepping_over_watchpoint)
6252 stopped_by_watchpoint = 0;
6253 else
6254 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6255
6256 /* If necessary, step over this watchpoint. We'll be back to display
6257 it in a moment. */
6258 if (stopped_by_watchpoint
6259 && (target_have_steppable_watchpoint ()
6260 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6261 {
6262 /* At this point, we are stopped at an instruction which has
6263 attempted to write to a piece of memory under control of
6264 a watchpoint. The instruction hasn't actually executed
6265 yet. If we were to evaluate the watchpoint expression
6266 now, we would get the old value, and therefore no change
6267 would seem to have occurred.
6268
6269 In order to make watchpoints work `right', we really need
6270 to complete the memory write, and then evaluate the
6271 watchpoint expression. We do this by single-stepping the
6272 target.
6273
6274 It may not be necessary to disable the watchpoint to step over
6275 it. For example, the PA can (with some kernel cooperation)
6276 single step over a watchpoint without disabling the watchpoint.
6277
6278 It is far more common to need to disable a watchpoint to step
6279 the inferior over it. If we have non-steppable watchpoints,
6280 we must disable the current watchpoint; it's simplest to
6281 disable all watchpoints.
6282
6283 Any breakpoint at PC must also be stepped over -- if there's
6284 one, it will have already triggered before the watchpoint
6285 triggered, and we either already reported it to the user, or
6286 it didn't cause a stop and we called keep_going. In either
6287 case, if there was a breakpoint at PC, we must be trying to
6288 step past it. */
6289 ecs->event_thread->stepping_over_watchpoint = 1;
6290 keep_going (ecs);
6291 return;
6292 }
6293
6294 ecs->event_thread->stepping_over_breakpoint = 0;
6295 ecs->event_thread->stepping_over_watchpoint = 0;
6296 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6297 ecs->event_thread->control.stop_step = 0;
6298 stop_print_frame = true;
6299 stopped_by_random_signal = 0;
6300 bpstat *stop_chain = nullptr;
6301
6302 /* Hide inlined functions starting here, unless we just performed stepi or
6303 nexti. After stepi and nexti, always show the innermost frame (not any
6304 inline function call sites). */
6305 if (ecs->event_thread->control.step_range_end != 1)
6306 {
6307 const address_space *aspace
6308 = get_thread_regcache (ecs->event_thread)->aspace ();
6309
6310 /* skip_inline_frames is expensive, so we avoid it if we can
6311 determine that the address is one where functions cannot have
6312 been inlined. This improves performance with inferiors that
6313 load a lot of shared libraries, because the solib event
6314 breakpoint is defined as the address of a function (i.e. not
6315 inline). Note that we have to check the previous PC as well
6316 as the current one to catch cases when we have just
6317 single-stepped off a breakpoint prior to reinstating it.
6318 Note that we're assuming that the code we single-step to is
6319 not inline, but that's not definitive: there's nothing
6320 preventing the event breakpoint function from containing
6321 inlined code, and the single-step ending up there. If the
6322 user had set a breakpoint on that inlined code, the missing
6323 skip_inline_frames call would break things. Fortunately
6324 that's an extremely unlikely scenario. */
6325 if (!pc_at_non_inline_function (aspace,
6326 ecs->event_thread->stop_pc (),
6327 ecs->ws)
6328 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6329 && ecs->event_thread->control.trap_expected
6330 && pc_at_non_inline_function (aspace,
6331 ecs->event_thread->prev_pc,
6332 ecs->ws)))
6333 {
6334 stop_chain = build_bpstat_chain (aspace,
6335 ecs->event_thread->stop_pc (),
6336 ecs->ws);
6337 skip_inline_frames (ecs->event_thread, stop_chain);
6338
6339 /* Re-fetch current thread's frame in case that invalidated
6340 the frame cache. */
6341 frame = get_current_frame ();
6342 gdbarch = get_frame_arch (frame);
6343 }
6344 }
6345
6346 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6347 && ecs->event_thread->control.trap_expected
6348 && gdbarch_single_step_through_delay_p (gdbarch)
6349 && currently_stepping (ecs->event_thread))
6350 {
6351 /* We're trying to step off a breakpoint. Turns out that we're
6352 also on an instruction that needs to be stepped multiple
6353 times before it's been fully executing. E.g., architectures
6354 with a delay slot. It needs to be stepped twice, once for
6355 the instruction and once for the delay slot. */
6356 int step_through_delay
6357 = gdbarch_single_step_through_delay (gdbarch, frame);
6358
6359 if (step_through_delay)
6360 infrun_debug_printf ("step through delay");
6361
6362 if (ecs->event_thread->control.step_range_end == 0
6363 && step_through_delay)
6364 {
6365 /* The user issued a continue when stopped at a breakpoint.
6366 Set up for another trap and get out of here. */
6367 ecs->event_thread->stepping_over_breakpoint = 1;
6368 keep_going (ecs);
6369 return;
6370 }
6371 else if (step_through_delay)
6372 {
6373 /* The user issued a step when stopped at a breakpoint.
6374 Maybe we should stop, maybe we should not - the delay
6375 slot *might* correspond to a line of source. In any
6376 case, don't decide that here, just set
6377 ecs->stepping_over_breakpoint, making sure we
6378 single-step again before breakpoints are re-inserted. */
6379 ecs->event_thread->stepping_over_breakpoint = 1;
6380 }
6381 }
6382
6383 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6384 handles this event. */
6385 ecs->event_thread->control.stop_bpstat
6386 = bpstat_stop_status (get_current_regcache ()->aspace (),
6387 ecs->event_thread->stop_pc (),
6388 ecs->event_thread, ecs->ws, stop_chain);
6389
6390 /* Following in case break condition called a
6391 function. */
6392 stop_print_frame = true;
6393
6394 /* This is where we handle "moribund" watchpoints. Unlike
6395 software breakpoints traps, hardware watchpoint traps are
6396 always distinguishable from random traps. If no high-level
6397 watchpoint is associated with the reported stop data address
6398 anymore, then the bpstat does not explain the signal ---
6399 simply make sure to ignore it if `stopped_by_watchpoint' is
6400 set. */
6401
6402 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6403 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6404 GDB_SIGNAL_TRAP)
6405 && stopped_by_watchpoint)
6406 {
6407 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6408 "ignoring");
6409 }
6410
6411 /* NOTE: cagney/2003-03-29: These checks for a random signal
6412 at one stage in the past included checks for an inferior
6413 function call's call dummy's return breakpoint. The original
6414 comment, that went with the test, read:
6415
6416 ``End of a stack dummy. Some systems (e.g. Sony news) give
6417 another signal besides SIGTRAP, so check here as well as
6418 above.''
6419
6420 If someone ever tries to get call dummys on a
6421 non-executable stack to work (where the target would stop
6422 with something like a SIGSEGV), then those tests might need
6423 to be re-instated. Given, however, that the tests were only
6424 enabled when momentary breakpoints were not being used, I
6425 suspect that it won't be the case.
6426
6427 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6428 be necessary for call dummies on a non-executable stack on
6429 SPARC. */
6430
6431 /* See if the breakpoints module can explain the signal. */
6432 random_signal
6433 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6434 ecs->event_thread->stop_signal ());
6435
6436 /* Maybe this was a trap for a software breakpoint that has since
6437 been removed. */
6438 if (random_signal && target_stopped_by_sw_breakpoint ())
6439 {
6440 if (gdbarch_program_breakpoint_here_p (gdbarch,
6441 ecs->event_thread->stop_pc ()))
6442 {
6443 struct regcache *regcache;
6444 int decr_pc;
6445
6446 /* Re-adjust PC to what the program would see if GDB was not
6447 debugging it. */
6448 regcache = get_thread_regcache (ecs->event_thread);
6449 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
6450 if (decr_pc != 0)
6451 {
6452 gdb::optional<scoped_restore_tmpl<int>>
6453 restore_operation_disable;
6454
6455 if (record_full_is_used ())
6456 restore_operation_disable.emplace
6457 (record_full_gdb_operation_disable_set ());
6458
6459 regcache_write_pc (regcache,
6460 ecs->event_thread->stop_pc () + decr_pc);
6461 }
6462 }
6463 else
6464 {
6465 /* A delayed software breakpoint event. Ignore the trap. */
6466 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
6467 random_signal = 0;
6468 }
6469 }
6470
6471 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6472 has since been removed. */
6473 if (random_signal && target_stopped_by_hw_breakpoint ())
6474 {
6475 /* A delayed hardware breakpoint event. Ignore the trap. */
6476 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6477 "trap, ignoring");
6478 random_signal = 0;
6479 }
6480
6481 /* If not, perhaps stepping/nexting can. */
6482 if (random_signal)
6483 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6484 && currently_stepping (ecs->event_thread));
6485
6486 /* Perhaps the thread hit a single-step breakpoint of _another_
6487 thread. Single-step breakpoints are transparent to the
6488 breakpoints module. */
6489 if (random_signal)
6490 random_signal = !ecs->hit_singlestep_breakpoint;
6491
6492 /* No? Perhaps we got a moribund watchpoint. */
6493 if (random_signal)
6494 random_signal = !stopped_by_watchpoint;
6495
6496 /* Always stop if the user explicitly requested this thread to
6497 remain stopped. */
6498 if (ecs->event_thread->stop_requested)
6499 {
6500 random_signal = 1;
6501 infrun_debug_printf ("user-requested stop");
6502 }
6503
6504 /* For the program's own signals, act according to
6505 the signal handling tables. */
6506
6507 if (random_signal)
6508 {
6509 /* Signal not for debugging purposes. */
6510 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
6511
6512 infrun_debug_printf ("random signal (%s)",
6513 gdb_signal_to_symbol_string (stop_signal));
6514
6515 stopped_by_random_signal = 1;
6516
6517 /* Always stop on signals if we're either just gaining control
6518 of the program, or the user explicitly requested this thread
6519 to remain stopped. */
6520 if (stop_soon != NO_STOP_QUIETLY
6521 || ecs->event_thread->stop_requested
6522 || signal_stop_state (ecs->event_thread->stop_signal ()))
6523 {
6524 stop_waiting (ecs);
6525 return;
6526 }
6527
6528 /* Notify observers the signal has "handle print" set. Note we
6529 returned early above if stopping; normal_stop handles the
6530 printing in that case. */
6531 if (signal_print[ecs->event_thread->stop_signal ()])
6532 {
6533 /* The signal table tells us to print about this signal. */
6534 target_terminal::ours_for_output ();
6535 gdb::observers::signal_received.notify (ecs->event_thread->stop_signal ());
6536 target_terminal::inferior ();
6537 }
6538
6539 /* Clear the signal if it should not be passed. */
6540 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
6541 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6542
6543 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
6544 && ecs->event_thread->control.trap_expected
6545 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
6546 {
6547 /* We were just starting a new sequence, attempting to
6548 single-step off of a breakpoint and expecting a SIGTRAP.
6549 Instead this signal arrives. This signal will take us out
6550 of the stepping range so GDB needs to remember to, when
6551 the signal handler returns, resume stepping off that
6552 breakpoint. */
6553 /* To simplify things, "continue" is forced to use the same
6554 code paths as single-step - set a breakpoint at the
6555 signal return address and then, once hit, step off that
6556 breakpoint. */
6557 infrun_debug_printf ("signal arrived while stepping over breakpoint");
6558
6559 insert_hp_step_resume_breakpoint_at_frame (frame);
6560 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6561 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6562 ecs->event_thread->control.trap_expected = 0;
6563
6564 /* If we were nexting/stepping some other thread, switch to
6565 it, so that we don't continue it, losing control. */
6566 if (!switch_back_to_stepped_thread (ecs))
6567 keep_going (ecs);
6568 return;
6569 }
6570
6571 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
6572 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
6573 ecs->event_thread)
6574 || ecs->event_thread->control.step_range_end == 1)
6575 && (get_stack_frame_id (frame)
6576 == ecs->event_thread->control.step_stack_frame_id)
6577 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
6578 {
6579 /* The inferior is about to take a signal that will take it
6580 out of the single step range. Set a breakpoint at the
6581 current PC (which is presumably where the signal handler
6582 will eventually return) and then allow the inferior to
6583 run free.
6584
6585 Note that this is only needed for a signal delivered
6586 while in the single-step range. Nested signals aren't a
6587 problem as they eventually all return. */
6588 infrun_debug_printf ("signal may take us out of single-step range");
6589
6590 clear_step_over_info ();
6591 insert_hp_step_resume_breakpoint_at_frame (frame);
6592 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6593 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6594 ecs->event_thread->control.trap_expected = 0;
6595 keep_going (ecs);
6596 return;
6597 }
6598
6599 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6600 when either there's a nested signal, or when there's a
6601 pending signal enabled just as the signal handler returns
6602 (leaving the inferior at the step-resume-breakpoint without
6603 actually executing it). Either way continue until the
6604 breakpoint is really hit. */
6605
6606 if (!switch_back_to_stepped_thread (ecs))
6607 {
6608 infrun_debug_printf ("random signal, keep going");
6609
6610 keep_going (ecs);
6611 }
6612 return;
6613 }
6614
6615 process_event_stop_test (ecs);
6616 }
6617
6618 /* Come here when we've got some debug event / signal we can explain
6619 (IOW, not a random signal), and test whether it should cause a
6620 stop, or whether we should resume the inferior (transparently).
6621 E.g., could be a breakpoint whose condition evaluates false; we
6622 could be still stepping within the line; etc. */
6623
6624 static void
6625 process_event_stop_test (struct execution_control_state *ecs)
6626 {
6627 struct symtab_and_line stop_pc_sal;
6628 frame_info_ptr frame;
6629 struct gdbarch *gdbarch;
6630 CORE_ADDR jmp_buf_pc;
6631 struct bpstat_what what;
6632
6633 /* Handle cases caused by hitting a breakpoint. */
6634
6635 frame = get_current_frame ();
6636 gdbarch = get_frame_arch (frame);
6637
6638 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
6639
6640 if (what.call_dummy)
6641 {
6642 stop_stack_dummy = what.call_dummy;
6643 }
6644
6645 /* A few breakpoint types have callbacks associated (e.g.,
6646 bp_jit_event). Run them now. */
6647 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6648
6649 /* If we hit an internal event that triggers symbol changes, the
6650 current frame will be invalidated within bpstat_what (e.g., if we
6651 hit an internal solib event). Re-fetch it. */
6652 frame = get_current_frame ();
6653 gdbarch = get_frame_arch (frame);
6654
6655 switch (what.main_action)
6656 {
6657 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6658 /* If we hit the breakpoint at longjmp while stepping, we
6659 install a momentary breakpoint at the target of the
6660 jmp_buf. */
6661
6662 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
6663
6664 ecs->event_thread->stepping_over_breakpoint = 1;
6665
6666 if (what.is_longjmp)
6667 {
6668 struct value *arg_value;
6669
6670 /* If we set the longjmp breakpoint via a SystemTap probe,
6671 then use it to extract the arguments. The destination PC
6672 is the third argument to the probe. */
6673 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6674 if (arg_value)
6675 {
6676 jmp_buf_pc = value_as_address (arg_value);
6677 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6678 }
6679 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6680 || !gdbarch_get_longjmp_target (gdbarch,
6681 frame, &jmp_buf_pc))
6682 {
6683 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6684 "(!gdbarch_get_longjmp_target)");
6685 keep_going (ecs);
6686 return;
6687 }
6688
6689 /* Insert a breakpoint at resume address. */
6690 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6691 }
6692 else
6693 check_exception_resume (ecs, frame);
6694 keep_going (ecs);
6695 return;
6696
6697 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6698 {
6699 frame_info_ptr init_frame;
6700
6701 /* There are several cases to consider.
6702
6703 1. The initiating frame no longer exists. In this case we
6704 must stop, because the exception or longjmp has gone too
6705 far.
6706
6707 2. The initiating frame exists, and is the same as the
6708 current frame. We stop, because the exception or longjmp
6709 has been caught.
6710
6711 3. The initiating frame exists and is different from the
6712 current frame. This means the exception or longjmp has
6713 been caught beneath the initiating frame, so keep going.
6714
6715 4. longjmp breakpoint has been placed just to protect
6716 against stale dummy frames and user is not interested in
6717 stopping around longjmps. */
6718
6719 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
6720
6721 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6722 != nullptr);
6723 delete_exception_resume_breakpoint (ecs->event_thread);
6724
6725 if (what.is_longjmp)
6726 {
6727 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
6728
6729 if (!frame_id_p (ecs->event_thread->initiating_frame))
6730 {
6731 /* Case 4. */
6732 keep_going (ecs);
6733 return;
6734 }
6735 }
6736
6737 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
6738
6739 if (init_frame)
6740 {
6741 struct frame_id current_id
6742 = get_frame_id (get_current_frame ());
6743 if (current_id == ecs->event_thread->initiating_frame)
6744 {
6745 /* Case 2. Fall through. */
6746 }
6747 else
6748 {
6749 /* Case 3. */
6750 keep_going (ecs);
6751 return;
6752 }
6753 }
6754
6755 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6756 exists. */
6757 delete_step_resume_breakpoint (ecs->event_thread);
6758
6759 end_stepping_range (ecs);
6760 }
6761 return;
6762
6763 case BPSTAT_WHAT_SINGLE:
6764 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
6765 ecs->event_thread->stepping_over_breakpoint = 1;
6766 /* Still need to check other stuff, at least the case where we
6767 are stepping and step out of the right range. */
6768 break;
6769
6770 case BPSTAT_WHAT_STEP_RESUME:
6771 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
6772
6773 delete_step_resume_breakpoint (ecs->event_thread);
6774 if (ecs->event_thread->control.proceed_to_finish
6775 && execution_direction == EXEC_REVERSE)
6776 {
6777 struct thread_info *tp = ecs->event_thread;
6778
6779 /* We are finishing a function in reverse, and just hit the
6780 step-resume breakpoint at the start address of the
6781 function, and we're almost there -- just need to back up
6782 by one more single-step, which should take us back to the
6783 function call. */
6784 tp->control.step_range_start = tp->control.step_range_end = 1;
6785 keep_going (ecs);
6786 return;
6787 }
6788 fill_in_stop_func (gdbarch, ecs);
6789 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
6790 && execution_direction == EXEC_REVERSE)
6791 {
6792 /* We are stepping over a function call in reverse, and just
6793 hit the step-resume breakpoint at the start address of
6794 the function. Go back to single-stepping, which should
6795 take us back to the function call. */
6796 ecs->event_thread->stepping_over_breakpoint = 1;
6797 keep_going (ecs);
6798 return;
6799 }
6800 break;
6801
6802 case BPSTAT_WHAT_STOP_NOISY:
6803 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
6804 stop_print_frame = true;
6805
6806 /* Assume the thread stopped for a breakpoint. We'll still check
6807 whether a/the breakpoint is there when the thread is next
6808 resumed. */
6809 ecs->event_thread->stepping_over_breakpoint = 1;
6810
6811 stop_waiting (ecs);
6812 return;
6813
6814 case BPSTAT_WHAT_STOP_SILENT:
6815 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
6816 stop_print_frame = false;
6817
6818 /* Assume the thread stopped for a breakpoint. We'll still check
6819 whether a/the breakpoint is there when the thread is next
6820 resumed. */
6821 ecs->event_thread->stepping_over_breakpoint = 1;
6822 stop_waiting (ecs);
6823 return;
6824
6825 case BPSTAT_WHAT_HP_STEP_RESUME:
6826 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
6827
6828 delete_step_resume_breakpoint (ecs->event_thread);
6829 if (ecs->event_thread->step_after_step_resume_breakpoint)
6830 {
6831 /* Back when the step-resume breakpoint was inserted, we
6832 were trying to single-step off a breakpoint. Go back to
6833 doing that. */
6834 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6835 ecs->event_thread->stepping_over_breakpoint = 1;
6836 keep_going (ecs);
6837 return;
6838 }
6839 break;
6840
6841 case BPSTAT_WHAT_KEEP_CHECKING:
6842 break;
6843 }
6844
6845 /* If we stepped a permanent breakpoint and we had a high priority
6846 step-resume breakpoint for the address we stepped, but we didn't
6847 hit it, then we must have stepped into the signal handler. The
6848 step-resume was only necessary to catch the case of _not_
6849 stepping into the handler, so delete it, and fall through to
6850 checking whether the step finished. */
6851 if (ecs->event_thread->stepped_breakpoint)
6852 {
6853 struct breakpoint *sr_bp
6854 = ecs->event_thread->control.step_resume_breakpoint;
6855
6856 if (sr_bp != nullptr
6857 && sr_bp->loc->permanent
6858 && sr_bp->type == bp_hp_step_resume
6859 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6860 {
6861 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
6862 delete_step_resume_breakpoint (ecs->event_thread);
6863 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6864 }
6865 }
6866
6867 /* We come here if we hit a breakpoint but should not stop for it.
6868 Possibly we also were stepping and should stop for that. So fall
6869 through and test for stepping. But, if not stepping, do not
6870 stop. */
6871
6872 /* In all-stop mode, if we're currently stepping but have stopped in
6873 some other thread, we need to switch back to the stepped thread. */
6874 if (switch_back_to_stepped_thread (ecs))
6875 return;
6876
6877 if (ecs->event_thread->control.step_resume_breakpoint)
6878 {
6879 infrun_debug_printf ("step-resume breakpoint is inserted");
6880
6881 /* Having a step-resume breakpoint overrides anything
6882 else having to do with stepping commands until
6883 that breakpoint is reached. */
6884 keep_going (ecs);
6885 return;
6886 }
6887
6888 if (ecs->event_thread->control.step_range_end == 0)
6889 {
6890 infrun_debug_printf ("no stepping, continue");
6891 /* Likewise if we aren't even stepping. */
6892 keep_going (ecs);
6893 return;
6894 }
6895
6896 /* Re-fetch current thread's frame in case the code above caused
6897 the frame cache to be re-initialized, making our FRAME variable
6898 a dangling pointer. */
6899 frame = get_current_frame ();
6900 gdbarch = get_frame_arch (frame);
6901 fill_in_stop_func (gdbarch, ecs);
6902
6903 /* If stepping through a line, keep going if still within it.
6904
6905 Note that step_range_end is the address of the first instruction
6906 beyond the step range, and NOT the address of the last instruction
6907 within it!
6908
6909 Note also that during reverse execution, we may be stepping
6910 through a function epilogue and therefore must detect when
6911 the current-frame changes in the middle of a line. */
6912
6913 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
6914 ecs->event_thread)
6915 && (execution_direction != EXEC_REVERSE
6916 || get_frame_id (frame) == ecs->event_thread->control.step_frame_id))
6917 {
6918 infrun_debug_printf
6919 ("stepping inside range [%s-%s]",
6920 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6921 paddress (gdbarch, ecs->event_thread->control.step_range_end));
6922
6923 /* Tentatively re-enable range stepping; `resume' disables it if
6924 necessary (e.g., if we're stepping over a breakpoint or we
6925 have software watchpoints). */
6926 ecs->event_thread->control.may_range_step = 1;
6927
6928 /* When stepping backward, stop at beginning of line range
6929 (unless it's the function entry point, in which case
6930 keep going back to the call point). */
6931 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
6932 if (stop_pc == ecs->event_thread->control.step_range_start
6933 && stop_pc != ecs->stop_func_start
6934 && execution_direction == EXEC_REVERSE)
6935 end_stepping_range (ecs);
6936 else
6937 keep_going (ecs);
6938
6939 return;
6940 }
6941
6942 /* We stepped out of the stepping range. */
6943
6944 /* If we are stepping at the source level and entered the runtime
6945 loader dynamic symbol resolution code...
6946
6947 EXEC_FORWARD: we keep on single stepping until we exit the run
6948 time loader code and reach the callee's address.
6949
6950 EXEC_REVERSE: we've already executed the callee (backward), and
6951 the runtime loader code is handled just like any other
6952 undebuggable function call. Now we need only keep stepping
6953 backward through the trampoline code, and that's handled further
6954 down, so there is nothing for us to do here. */
6955
6956 if (execution_direction != EXEC_REVERSE
6957 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6958 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
6959 && (ecs->event_thread->control.step_start_function == nullptr
6960 || !in_solib_dynsym_resolve_code (
6961 ecs->event_thread->control.step_start_function->value_block ()
6962 ->entry_pc ())))
6963 {
6964 CORE_ADDR pc_after_resolver =
6965 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
6966
6967 infrun_debug_printf ("stepped into dynsym resolve code");
6968
6969 if (pc_after_resolver)
6970 {
6971 /* Set up a step-resume breakpoint at the address
6972 indicated by SKIP_SOLIB_RESOLVER. */
6973 symtab_and_line sr_sal;
6974 sr_sal.pc = pc_after_resolver;
6975 sr_sal.pspace = get_frame_program_space (frame);
6976
6977 insert_step_resume_breakpoint_at_sal (gdbarch,
6978 sr_sal, null_frame_id);
6979 }
6980
6981 keep_going (ecs);
6982 return;
6983 }
6984
6985 /* Step through an indirect branch thunk. */
6986 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
6987 && gdbarch_in_indirect_branch_thunk (gdbarch,
6988 ecs->event_thread->stop_pc ()))
6989 {
6990 infrun_debug_printf ("stepped into indirect branch thunk");
6991 keep_going (ecs);
6992 return;
6993 }
6994
6995 if (ecs->event_thread->control.step_range_end != 1
6996 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6997 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
6998 && get_frame_type (frame) == SIGTRAMP_FRAME)
6999 {
7000 infrun_debug_printf ("stepped into signal trampoline");
7001 /* The inferior, while doing a "step" or "next", has ended up in
7002 a signal trampoline (either by a signal being delivered or by
7003 the signal handler returning). Just single-step until the
7004 inferior leaves the trampoline (either by calling the handler
7005 or returning). */
7006 keep_going (ecs);
7007 return;
7008 }
7009
7010 /* If we're in the return path from a shared library trampoline,
7011 we want to proceed through the trampoline when stepping. */
7012 /* macro/2012-04-25: This needs to come before the subroutine
7013 call check below as on some targets return trampolines look
7014 like subroutine calls (MIPS16 return thunks). */
7015 if (gdbarch_in_solib_return_trampoline (gdbarch,
7016 ecs->event_thread->stop_pc (),
7017 ecs->stop_func_name)
7018 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7019 {
7020 /* Determine where this trampoline returns. */
7021 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7022 CORE_ADDR real_stop_pc
7023 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7024
7025 infrun_debug_printf ("stepped into solib return tramp");
7026
7027 /* Only proceed through if we know where it's going. */
7028 if (real_stop_pc)
7029 {
7030 /* And put the step-breakpoint there and go until there. */
7031 symtab_and_line sr_sal;
7032 sr_sal.pc = real_stop_pc;
7033 sr_sal.section = find_pc_overlay (sr_sal.pc);
7034 sr_sal.pspace = get_frame_program_space (frame);
7035
7036 /* Do not specify what the fp should be when we stop since
7037 on some machines the prologue is where the new fp value
7038 is established. */
7039 insert_step_resume_breakpoint_at_sal (gdbarch,
7040 sr_sal, null_frame_id);
7041
7042 /* Restart without fiddling with the step ranges or
7043 other state. */
7044 keep_going (ecs);
7045 return;
7046 }
7047 }
7048
7049 /* Check for subroutine calls. The check for the current frame
7050 equalling the step ID is not necessary - the check of the
7051 previous frame's ID is sufficient - but it is a common case and
7052 cheaper than checking the previous frame's ID.
7053
7054 NOTE: frame_id::operator== will never report two invalid frame IDs as
7055 being equal, so to get into this block, both the current and
7056 previous frame must have valid frame IDs. */
7057 /* The outer_frame_id check is a heuristic to detect stepping
7058 through startup code. If we step over an instruction which
7059 sets the stack pointer from an invalid value to a valid value,
7060 we may detect that as a subroutine call from the mythical
7061 "outermost" function. This could be fixed by marking
7062 outermost frames as !stack_p,code_p,special_p. Then the
7063 initial outermost frame, before sp was valid, would
7064 have code_addr == &_start. See the comment in frame_id::operator==
7065 for more. */
7066 if ((get_stack_frame_id (frame)
7067 != ecs->event_thread->control.step_stack_frame_id)
7068 && ((frame_unwind_caller_id (get_current_frame ())
7069 == ecs->event_thread->control.step_stack_frame_id)
7070 && ((ecs->event_thread->control.step_stack_frame_id
7071 != outer_frame_id)
7072 || (ecs->event_thread->control.step_start_function
7073 != find_pc_function (ecs->event_thread->stop_pc ())))))
7074 {
7075 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7076 CORE_ADDR real_stop_pc;
7077
7078 infrun_debug_printf ("stepped into subroutine");
7079
7080 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7081 {
7082 /* I presume that step_over_calls is only 0 when we're
7083 supposed to be stepping at the assembly language level
7084 ("stepi"). Just stop. */
7085 /* And this works the same backward as frontward. MVS */
7086 end_stepping_range (ecs);
7087 return;
7088 }
7089
7090 /* Reverse stepping through solib trampolines. */
7091
7092 if (execution_direction == EXEC_REVERSE
7093 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7094 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7095 || (ecs->stop_func_start == 0
7096 && in_solib_dynsym_resolve_code (stop_pc))))
7097 {
7098 /* Any solib trampoline code can be handled in reverse
7099 by simply continuing to single-step. We have already
7100 executed the solib function (backwards), and a few
7101 steps will take us back through the trampoline to the
7102 caller. */
7103 keep_going (ecs);
7104 return;
7105 }
7106
7107 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7108 {
7109 /* We're doing a "next".
7110
7111 Normal (forward) execution: set a breakpoint at the
7112 callee's return address (the address at which the caller
7113 will resume).
7114
7115 Reverse (backward) execution. set the step-resume
7116 breakpoint at the start of the function that we just
7117 stepped into (backwards), and continue to there. When we
7118 get there, we'll need to single-step back to the caller. */
7119
7120 if (execution_direction == EXEC_REVERSE)
7121 {
7122 /* If we're already at the start of the function, we've either
7123 just stepped backward into a single instruction function,
7124 or stepped back out of a signal handler to the first instruction
7125 of the function. Just keep going, which will single-step back
7126 to the caller. */
7127 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7128 {
7129 /* Normal function call return (static or dynamic). */
7130 symtab_and_line sr_sal;
7131 sr_sal.pc = ecs->stop_func_start;
7132 sr_sal.pspace = get_frame_program_space (frame);
7133 insert_step_resume_breakpoint_at_sal (gdbarch,
7134 sr_sal, get_stack_frame_id (frame));
7135 }
7136 }
7137 else
7138 insert_step_resume_breakpoint_at_caller (frame);
7139
7140 keep_going (ecs);
7141 return;
7142 }
7143
7144 /* If we are in a function call trampoline (a stub between the
7145 calling routine and the real function), locate the real
7146 function. That's what tells us (a) whether we want to step
7147 into it at all, and (b) what prologue we want to run to the
7148 end of, if we do step into it. */
7149 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7150 if (real_stop_pc == 0)
7151 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7152 if (real_stop_pc != 0)
7153 ecs->stop_func_start = real_stop_pc;
7154
7155 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7156 {
7157 symtab_and_line sr_sal;
7158 sr_sal.pc = ecs->stop_func_start;
7159 sr_sal.pspace = get_frame_program_space (frame);
7160
7161 insert_step_resume_breakpoint_at_sal (gdbarch,
7162 sr_sal, null_frame_id);
7163 keep_going (ecs);
7164 return;
7165 }
7166
7167 /* If we have line number information for the function we are
7168 thinking of stepping into and the function isn't on the skip
7169 list, step into it.
7170
7171 If there are several symtabs at that PC (e.g. with include
7172 files), just want to know whether *any* of them have line
7173 numbers. find_pc_line handles this. */
7174 {
7175 struct symtab_and_line tmp_sal;
7176
7177 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7178 if (tmp_sal.line != 0
7179 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7180 tmp_sal)
7181 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7182 {
7183 if (execution_direction == EXEC_REVERSE)
7184 handle_step_into_function_backward (gdbarch, ecs);
7185 else
7186 handle_step_into_function (gdbarch, ecs);
7187 return;
7188 }
7189 }
7190
7191 /* If we have no line number and the step-stop-if-no-debug is
7192 set, we stop the step so that the user has a chance to switch
7193 in assembly mode. */
7194 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7195 && step_stop_if_no_debug)
7196 {
7197 end_stepping_range (ecs);
7198 return;
7199 }
7200
7201 if (execution_direction == EXEC_REVERSE)
7202 {
7203 /* If we're already at the start of the function, we've either just
7204 stepped backward into a single instruction function without line
7205 number info, or stepped back out of a signal handler to the first
7206 instruction of the function without line number info. Just keep
7207 going, which will single-step back to the caller. */
7208 if (ecs->stop_func_start != stop_pc)
7209 {
7210 /* Set a breakpoint at callee's start address.
7211 From there we can step once and be back in the caller. */
7212 symtab_and_line sr_sal;
7213 sr_sal.pc = ecs->stop_func_start;
7214 sr_sal.pspace = get_frame_program_space (frame);
7215 insert_step_resume_breakpoint_at_sal (gdbarch,
7216 sr_sal, null_frame_id);
7217 }
7218 }
7219 else
7220 /* Set a breakpoint at callee's return address (the address
7221 at which the caller will resume). */
7222 insert_step_resume_breakpoint_at_caller (frame);
7223
7224 keep_going (ecs);
7225 return;
7226 }
7227
7228 /* Reverse stepping through solib trampolines. */
7229
7230 if (execution_direction == EXEC_REVERSE
7231 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7232 {
7233 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7234
7235 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7236 || (ecs->stop_func_start == 0
7237 && in_solib_dynsym_resolve_code (stop_pc)))
7238 {
7239 /* Any solib trampoline code can be handled in reverse
7240 by simply continuing to single-step. We have already
7241 executed the solib function (backwards), and a few
7242 steps will take us back through the trampoline to the
7243 caller. */
7244 keep_going (ecs);
7245 return;
7246 }
7247 else if (in_solib_dynsym_resolve_code (stop_pc))
7248 {
7249 /* Stepped backward into the solib dynsym resolver.
7250 Set a breakpoint at its start and continue, then
7251 one more step will take us out. */
7252 symtab_and_line sr_sal;
7253 sr_sal.pc = ecs->stop_func_start;
7254 sr_sal.pspace = get_frame_program_space (frame);
7255 insert_step_resume_breakpoint_at_sal (gdbarch,
7256 sr_sal, null_frame_id);
7257 keep_going (ecs);
7258 return;
7259 }
7260 }
7261
7262 /* This always returns the sal for the inner-most frame when we are in a
7263 stack of inlined frames, even if GDB actually believes that it is in a
7264 more outer frame. This is checked for below by calls to
7265 inline_skipped_frames. */
7266 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7267
7268 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7269 the trampoline processing logic, however, there are some trampolines
7270 that have no names, so we should do trampoline handling first. */
7271 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7272 && ecs->stop_func_name == nullptr
7273 && stop_pc_sal.line == 0)
7274 {
7275 infrun_debug_printf ("stepped into undebuggable function");
7276
7277 /* The inferior just stepped into, or returned to, an
7278 undebuggable function (where there is no debugging information
7279 and no line number corresponding to the address where the
7280 inferior stopped). Since we want to skip this kind of code,
7281 we keep going until the inferior returns from this
7282 function - unless the user has asked us not to (via
7283 set step-mode) or we no longer know how to get back
7284 to the call site. */
7285 if (step_stop_if_no_debug
7286 || !frame_id_p (frame_unwind_caller_id (frame)))
7287 {
7288 /* If we have no line number and the step-stop-if-no-debug
7289 is set, we stop the step so that the user has a chance to
7290 switch in assembly mode. */
7291 end_stepping_range (ecs);
7292 return;
7293 }
7294 else
7295 {
7296 /* Set a breakpoint at callee's return address (the address
7297 at which the caller will resume). */
7298 insert_step_resume_breakpoint_at_caller (frame);
7299 keep_going (ecs);
7300 return;
7301 }
7302 }
7303
7304 if (ecs->event_thread->control.step_range_end == 1)
7305 {
7306 /* It is stepi or nexti. We always want to stop stepping after
7307 one instruction. */
7308 infrun_debug_printf ("stepi/nexti");
7309 end_stepping_range (ecs);
7310 return;
7311 }
7312
7313 if (stop_pc_sal.line == 0)
7314 {
7315 /* We have no line number information. That means to stop
7316 stepping (does this always happen right after one instruction,
7317 when we do "s" in a function with no line numbers,
7318 or can this happen as a result of a return or longjmp?). */
7319 infrun_debug_printf ("line number info");
7320 end_stepping_range (ecs);
7321 return;
7322 }
7323
7324 /* Look for "calls" to inlined functions, part one. If the inline
7325 frame machinery detected some skipped call sites, we have entered
7326 a new inline function. */
7327
7328 if ((get_frame_id (get_current_frame ())
7329 == ecs->event_thread->control.step_frame_id)
7330 && inline_skipped_frames (ecs->event_thread))
7331 {
7332 infrun_debug_printf ("stepped into inlined function");
7333
7334 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
7335
7336 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
7337 {
7338 /* For "step", we're going to stop. But if the call site
7339 for this inlined function is on the same source line as
7340 we were previously stepping, go down into the function
7341 first. Otherwise stop at the call site. */
7342
7343 if (call_sal.line == ecs->event_thread->current_line
7344 && call_sal.symtab == ecs->event_thread->current_symtab)
7345 {
7346 step_into_inline_frame (ecs->event_thread);
7347 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7348 {
7349 keep_going (ecs);
7350 return;
7351 }
7352 }
7353
7354 end_stepping_range (ecs);
7355 return;
7356 }
7357 else
7358 {
7359 /* For "next", we should stop at the call site if it is on a
7360 different source line. Otherwise continue through the
7361 inlined function. */
7362 if (call_sal.line == ecs->event_thread->current_line
7363 && call_sal.symtab == ecs->event_thread->current_symtab)
7364 keep_going (ecs);
7365 else
7366 end_stepping_range (ecs);
7367 return;
7368 }
7369 }
7370
7371 /* Look for "calls" to inlined functions, part two. If we are still
7372 in the same real function we were stepping through, but we have
7373 to go further up to find the exact frame ID, we are stepping
7374 through a more inlined call beyond its call site. */
7375
7376 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7377 && (get_frame_id (get_current_frame ())
7378 != ecs->event_thread->control.step_frame_id)
7379 && stepped_in_from (get_current_frame (),
7380 ecs->event_thread->control.step_frame_id))
7381 {
7382 infrun_debug_printf ("stepping through inlined function");
7383
7384 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7385 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
7386 keep_going (ecs);
7387 else
7388 end_stepping_range (ecs);
7389 return;
7390 }
7391
7392 bool refresh_step_info = true;
7393 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
7394 && (ecs->event_thread->current_line != stop_pc_sal.line
7395 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
7396 {
7397 /* We are at a different line. */
7398
7399 if (stop_pc_sal.is_stmt)
7400 {
7401 /* We are at the start of a statement.
7402
7403 So stop. Note that we don't stop if we step into the middle of a
7404 statement. That is said to make things like for (;;) statements
7405 work better. */
7406 infrun_debug_printf ("stepped to a different line");
7407 end_stepping_range (ecs);
7408 return;
7409 }
7410 else if (get_frame_id (get_current_frame ())
7411 == ecs->event_thread->control.step_frame_id)
7412 {
7413 /* We are not at the start of a statement, and we have not changed
7414 frame.
7415
7416 We ignore this line table entry, and continue stepping forward,
7417 looking for a better place to stop. */
7418 refresh_step_info = false;
7419 infrun_debug_printf ("stepped to a different line, but "
7420 "it's not the start of a statement");
7421 }
7422 else
7423 {
7424 /* We are not the start of a statement, and we have changed frame.
7425
7426 We ignore this line table entry, and continue stepping forward,
7427 looking for a better place to stop. Keep refresh_step_info at
7428 true to note that the frame has changed, but ignore the line
7429 number to make sure we don't ignore a subsequent entry with the
7430 same line number. */
7431 stop_pc_sal.line = 0;
7432 infrun_debug_printf ("stepped to a different frame, but "
7433 "it's not the start of a statement");
7434 }
7435 }
7436
7437 /* We aren't done stepping.
7438
7439 Optimize by setting the stepping range to the line.
7440 (We might not be in the original line, but if we entered a
7441 new line in mid-statement, we continue stepping. This makes
7442 things like for(;;) statements work better.)
7443
7444 If we entered a SAL that indicates a non-statement line table entry,
7445 then we update the stepping range, but we don't update the step info,
7446 which includes things like the line number we are stepping away from.
7447 This means we will stop when we find a line table entry that is marked
7448 as is-statement, even if it matches the non-statement one we just
7449 stepped into. */
7450
7451 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7452 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
7453 ecs->event_thread->control.may_range_step = 1;
7454 infrun_debug_printf
7455 ("updated step range, start = %s, end = %s, may_range_step = %d",
7456 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7457 paddress (gdbarch, ecs->event_thread->control.step_range_end),
7458 ecs->event_thread->control.may_range_step);
7459 if (refresh_step_info)
7460 set_step_info (ecs->event_thread, frame, stop_pc_sal);
7461
7462 infrun_debug_printf ("keep going");
7463 keep_going (ecs);
7464 }
7465
7466 static bool restart_stepped_thread (process_stratum_target *resume_target,
7467 ptid_t resume_ptid);
7468
7469 /* In all-stop mode, if we're currently stepping but have stopped in
7470 some other thread, we may need to switch back to the stepped
7471 thread. Returns true we set the inferior running, false if we left
7472 it stopped (and the event needs further processing). */
7473
7474 static bool
7475 switch_back_to_stepped_thread (struct execution_control_state *ecs)
7476 {
7477 if (!target_is_non_stop_p ())
7478 {
7479 /* If any thread is blocked on some internal breakpoint, and we
7480 simply need to step over that breakpoint to get it going
7481 again, do that first. */
7482
7483 /* However, if we see an event for the stepping thread, then we
7484 know all other threads have been moved past their breakpoints
7485 already. Let the caller check whether the step is finished,
7486 etc., before deciding to move it past a breakpoint. */
7487 if (ecs->event_thread->control.step_range_end != 0)
7488 return false;
7489
7490 /* Check if the current thread is blocked on an incomplete
7491 step-over, interrupted by a random signal. */
7492 if (ecs->event_thread->control.trap_expected
7493 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
7494 {
7495 infrun_debug_printf
7496 ("need to finish step-over of [%s]",
7497 ecs->event_thread->ptid.to_string ().c_str ());
7498 keep_going (ecs);
7499 return true;
7500 }
7501
7502 /* Check if the current thread is blocked by a single-step
7503 breakpoint of another thread. */
7504 if (ecs->hit_singlestep_breakpoint)
7505 {
7506 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7507 ecs->ptid.to_string ().c_str ());
7508 keep_going (ecs);
7509 return true;
7510 }
7511
7512 /* If this thread needs yet another step-over (e.g., stepping
7513 through a delay slot), do it first before moving on to
7514 another thread. */
7515 if (thread_still_needs_step_over (ecs->event_thread))
7516 {
7517 infrun_debug_printf
7518 ("thread [%s] still needs step-over",
7519 ecs->event_thread->ptid.to_string ().c_str ());
7520 keep_going (ecs);
7521 return true;
7522 }
7523
7524 /* If scheduler locking applies even if not stepping, there's no
7525 need to walk over threads. Above we've checked whether the
7526 current thread is stepping. If some other thread not the
7527 event thread is stepping, then it must be that scheduler
7528 locking is not in effect. */
7529 if (schedlock_applies (ecs->event_thread))
7530 return false;
7531
7532 /* Otherwise, we no longer expect a trap in the current thread.
7533 Clear the trap_expected flag before switching back -- this is
7534 what keep_going does as well, if we call it. */
7535 ecs->event_thread->control.trap_expected = 0;
7536
7537 /* Likewise, clear the signal if it should not be passed. */
7538 if (!signal_program[ecs->event_thread->stop_signal ()])
7539 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7540
7541 if (restart_stepped_thread (ecs->target, ecs->ptid))
7542 {
7543 prepare_to_wait (ecs);
7544 return true;
7545 }
7546
7547 switch_to_thread (ecs->event_thread);
7548 }
7549
7550 return false;
7551 }
7552
7553 /* Look for the thread that was stepping, and resume it.
7554 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7555 is resuming. Return true if a thread was started, false
7556 otherwise. */
7557
7558 static bool
7559 restart_stepped_thread (process_stratum_target *resume_target,
7560 ptid_t resume_ptid)
7561 {
7562 /* Do all pending step-overs before actually proceeding with
7563 step/next/etc. */
7564 if (start_step_over ())
7565 return true;
7566
7567 for (thread_info *tp : all_threads_safe ())
7568 {
7569 if (tp->state == THREAD_EXITED)
7570 continue;
7571
7572 if (tp->has_pending_waitstatus ())
7573 continue;
7574
7575 /* Ignore threads of processes the caller is not
7576 resuming. */
7577 if (!sched_multi
7578 && (tp->inf->process_target () != resume_target
7579 || tp->inf->pid != resume_ptid.pid ()))
7580 continue;
7581
7582 if (tp->control.trap_expected)
7583 {
7584 infrun_debug_printf ("switching back to stepped thread (step-over)");
7585
7586 if (keep_going_stepped_thread (tp))
7587 return true;
7588 }
7589 }
7590
7591 for (thread_info *tp : all_threads_safe ())
7592 {
7593 if (tp->state == THREAD_EXITED)
7594 continue;
7595
7596 if (tp->has_pending_waitstatus ())
7597 continue;
7598
7599 /* Ignore threads of processes the caller is not
7600 resuming. */
7601 if (!sched_multi
7602 && (tp->inf->process_target () != resume_target
7603 || tp->inf->pid != resume_ptid.pid ()))
7604 continue;
7605
7606 /* Did we find the stepping thread? */
7607 if (tp->control.step_range_end)
7608 {
7609 infrun_debug_printf ("switching back to stepped thread (stepping)");
7610
7611 if (keep_going_stepped_thread (tp))
7612 return true;
7613 }
7614 }
7615
7616 return false;
7617 }
7618
7619 /* See infrun.h. */
7620
7621 void
7622 restart_after_all_stop_detach (process_stratum_target *proc_target)
7623 {
7624 /* Note we don't check target_is_non_stop_p() here, because the
7625 current inferior may no longer have a process_stratum target
7626 pushed, as we just detached. */
7627
7628 /* See if we have a THREAD_RUNNING thread that need to be
7629 re-resumed. If we have any thread that is already executing,
7630 then we don't need to resume the target -- it is already been
7631 resumed. With the remote target (in all-stop), it's even
7632 impossible to issue another resumption if the target is already
7633 resumed, until the target reports a stop. */
7634 for (thread_info *thr : all_threads (proc_target))
7635 {
7636 if (thr->state != THREAD_RUNNING)
7637 continue;
7638
7639 /* If we have any thread that is already executing, then we
7640 don't need to resume the target -- it is already been
7641 resumed. */
7642 if (thr->executing ())
7643 return;
7644
7645 /* If we have a pending event to process, skip resuming the
7646 target and go straight to processing it. */
7647 if (thr->resumed () && thr->has_pending_waitstatus ())
7648 return;
7649 }
7650
7651 /* Alright, we need to re-resume the target. If a thread was
7652 stepping, we need to restart it stepping. */
7653 if (restart_stepped_thread (proc_target, minus_one_ptid))
7654 return;
7655
7656 /* Otherwise, find the first THREAD_RUNNING thread and resume
7657 it. */
7658 for (thread_info *thr : all_threads (proc_target))
7659 {
7660 if (thr->state != THREAD_RUNNING)
7661 continue;
7662
7663 execution_control_state ecs;
7664 reset_ecs (&ecs, thr);
7665 switch_to_thread (thr);
7666 keep_going (&ecs);
7667 return;
7668 }
7669 }
7670
7671 /* Set a previously stepped thread back to stepping. Returns true on
7672 success, false if the resume is not possible (e.g., the thread
7673 vanished). */
7674
7675 static bool
7676 keep_going_stepped_thread (struct thread_info *tp)
7677 {
7678 frame_info_ptr frame;
7679 struct execution_control_state ecss;
7680 struct execution_control_state *ecs = &ecss;
7681
7682 /* If the stepping thread exited, then don't try to switch back and
7683 resume it, which could fail in several different ways depending
7684 on the target. Instead, just keep going.
7685
7686 We can find a stepping dead thread in the thread list in two
7687 cases:
7688
7689 - The target supports thread exit events, and when the target
7690 tries to delete the thread from the thread list, inferior_ptid
7691 pointed at the exiting thread. In such case, calling
7692 delete_thread does not really remove the thread from the list;
7693 instead, the thread is left listed, with 'exited' state.
7694
7695 - The target's debug interface does not support thread exit
7696 events, and so we have no idea whatsoever if the previously
7697 stepping thread is still alive. For that reason, we need to
7698 synchronously query the target now. */
7699
7700 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
7701 {
7702 infrun_debug_printf ("not resuming previously stepped thread, it has "
7703 "vanished");
7704
7705 delete_thread (tp);
7706 return false;
7707 }
7708
7709 infrun_debug_printf ("resuming previously stepped thread");
7710
7711 reset_ecs (ecs, tp);
7712 switch_to_thread (tp);
7713
7714 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
7715 frame = get_current_frame ();
7716
7717 /* If the PC of the thread we were trying to single-step has
7718 changed, then that thread has trapped or been signaled, but the
7719 event has not been reported to GDB yet. Re-poll the target
7720 looking for this particular thread's event (i.e. temporarily
7721 enable schedlock) by:
7722
7723 - setting a break at the current PC
7724 - resuming that particular thread, only (by setting trap
7725 expected)
7726
7727 This prevents us continuously moving the single-step breakpoint
7728 forward, one instruction at a time, overstepping. */
7729
7730 if (tp->stop_pc () != tp->prev_pc)
7731 {
7732 ptid_t resume_ptid;
7733
7734 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7735 paddress (target_gdbarch (), tp->prev_pc),
7736 paddress (target_gdbarch (), tp->stop_pc ()));
7737
7738 /* Clear the info of the previous step-over, as it's no longer
7739 valid (if the thread was trying to step over a breakpoint, it
7740 has already succeeded). It's what keep_going would do too,
7741 if we called it. Do this before trying to insert the sss
7742 breakpoint, otherwise if we were previously trying to step
7743 over this exact address in another thread, the breakpoint is
7744 skipped. */
7745 clear_step_over_info ();
7746 tp->control.trap_expected = 0;
7747
7748 insert_single_step_breakpoint (get_frame_arch (frame),
7749 get_frame_address_space (frame),
7750 tp->stop_pc ());
7751
7752 tp->set_resumed (true);
7753 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
7754 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
7755 }
7756 else
7757 {
7758 infrun_debug_printf ("expected thread still hasn't advanced");
7759
7760 keep_going_pass_signal (ecs);
7761 }
7762
7763 return true;
7764 }
7765
7766 /* Is thread TP in the middle of (software or hardware)
7767 single-stepping? (Note the result of this function must never be
7768 passed directly as target_resume's STEP parameter.) */
7769
7770 static bool
7771 currently_stepping (struct thread_info *tp)
7772 {
7773 return ((tp->control.step_range_end
7774 && tp->control.step_resume_breakpoint == nullptr)
7775 || tp->control.trap_expected
7776 || tp->stepped_breakpoint
7777 || bpstat_should_step ());
7778 }
7779
7780 /* Inferior has stepped into a subroutine call with source code that
7781 we should not step over. Do step to the first line of code in
7782 it. */
7783
7784 static void
7785 handle_step_into_function (struct gdbarch *gdbarch,
7786 struct execution_control_state *ecs)
7787 {
7788 fill_in_stop_func (gdbarch, ecs);
7789
7790 compunit_symtab *cust
7791 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
7792 if (cust != nullptr && cust->language () != language_asm)
7793 ecs->stop_func_start
7794 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7795
7796 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
7797 /* Use the step_resume_break to step until the end of the prologue,
7798 even if that involves jumps (as it seems to on the vax under
7799 4.2). */
7800 /* If the prologue ends in the middle of a source line, continue to
7801 the end of that source line (if it is still within the function).
7802 Otherwise, just go to end of prologue. */
7803 if (stop_func_sal.end
7804 && stop_func_sal.pc != ecs->stop_func_start
7805 && stop_func_sal.end < ecs->stop_func_end)
7806 ecs->stop_func_start = stop_func_sal.end;
7807
7808 /* Architectures which require breakpoint adjustment might not be able
7809 to place a breakpoint at the computed address. If so, the test
7810 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7811 ecs->stop_func_start to an address at which a breakpoint may be
7812 legitimately placed.
7813
7814 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7815 made, GDB will enter an infinite loop when stepping through
7816 optimized code consisting of VLIW instructions which contain
7817 subinstructions corresponding to different source lines. On
7818 FR-V, it's not permitted to place a breakpoint on any but the
7819 first subinstruction of a VLIW instruction. When a breakpoint is
7820 set, GDB will adjust the breakpoint address to the beginning of
7821 the VLIW instruction. Thus, we need to make the corresponding
7822 adjustment here when computing the stop address. */
7823
7824 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
7825 {
7826 ecs->stop_func_start
7827 = gdbarch_adjust_breakpoint_address (gdbarch,
7828 ecs->stop_func_start);
7829 }
7830
7831 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
7832 {
7833 /* We are already there: stop now. */
7834 end_stepping_range (ecs);
7835 return;
7836 }
7837 else
7838 {
7839 /* Put the step-breakpoint there and go until there. */
7840 symtab_and_line sr_sal;
7841 sr_sal.pc = ecs->stop_func_start;
7842 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
7843 sr_sal.pspace = get_frame_program_space (get_current_frame ());
7844
7845 /* Do not specify what the fp should be when we stop since on
7846 some machines the prologue is where the new fp value is
7847 established. */
7848 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
7849
7850 /* And make sure stepping stops right away then. */
7851 ecs->event_thread->control.step_range_end
7852 = ecs->event_thread->control.step_range_start;
7853 }
7854 keep_going (ecs);
7855 }
7856
7857 /* Inferior has stepped backward into a subroutine call with source
7858 code that we should not step over. Do step to the beginning of the
7859 last line of code in it. */
7860
7861 static void
7862 handle_step_into_function_backward (struct gdbarch *gdbarch,
7863 struct execution_control_state *ecs)
7864 {
7865 struct compunit_symtab *cust;
7866 struct symtab_and_line stop_func_sal;
7867
7868 fill_in_stop_func (gdbarch, ecs);
7869
7870 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
7871 if (cust != nullptr && cust->language () != language_asm)
7872 ecs->stop_func_start
7873 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
7874
7875 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7876
7877 /* OK, we're just going to keep stepping here. */
7878 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
7879 {
7880 /* We're there already. Just stop stepping now. */
7881 end_stepping_range (ecs);
7882 }
7883 else
7884 {
7885 /* Else just reset the step range and keep going.
7886 No step-resume breakpoint, they don't work for
7887 epilogues, which can have multiple entry paths. */
7888 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7889 ecs->event_thread->control.step_range_end = stop_func_sal.end;
7890 keep_going (ecs);
7891 }
7892 return;
7893 }
7894
7895 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7896 This is used to both functions and to skip over code. */
7897
7898 static void
7899 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7900 struct symtab_and_line sr_sal,
7901 struct frame_id sr_id,
7902 enum bptype sr_type)
7903 {
7904 /* There should never be more than one step-resume or longjmp-resume
7905 breakpoint per thread, so we should never be setting a new
7906 step_resume_breakpoint when one is already active. */
7907 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
7908 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
7909
7910 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7911 paddress (gdbarch, sr_sal.pc));
7912
7913 inferior_thread ()->control.step_resume_breakpoint
7914 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
7915 }
7916
7917 void
7918 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7919 struct symtab_and_line sr_sal,
7920 struct frame_id sr_id)
7921 {
7922 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7923 sr_sal, sr_id,
7924 bp_step_resume);
7925 }
7926
7927 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7928 This is used to skip a potential signal handler.
7929
7930 This is called with the interrupted function's frame. The signal
7931 handler, when it returns, will resume the interrupted function at
7932 RETURN_FRAME.pc. */
7933
7934 static void
7935 insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr return_frame)
7936 {
7937 gdb_assert (return_frame != nullptr);
7938
7939 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7940
7941 symtab_and_line sr_sal;
7942 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
7943 sr_sal.section = find_pc_overlay (sr_sal.pc);
7944 sr_sal.pspace = get_frame_program_space (return_frame);
7945
7946 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7947 get_stack_frame_id (return_frame),
7948 bp_hp_step_resume);
7949 }
7950
7951 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7952 is used to skip a function after stepping into it (for "next" or if
7953 the called function has no debugging information).
7954
7955 The current function has almost always been reached by single
7956 stepping a call or return instruction. NEXT_FRAME belongs to the
7957 current function, and the breakpoint will be set at the caller's
7958 resume address.
7959
7960 This is a separate function rather than reusing
7961 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7962 get_prev_frame, which may stop prematurely (see the implementation
7963 of frame_unwind_caller_id for an example). */
7964
7965 static void
7966 insert_step_resume_breakpoint_at_caller (frame_info_ptr next_frame)
7967 {
7968 /* We shouldn't have gotten here if we don't know where the call site
7969 is. */
7970 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
7971
7972 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
7973
7974 symtab_and_line sr_sal;
7975 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7976 frame_unwind_caller_pc (next_frame));
7977 sr_sal.section = find_pc_overlay (sr_sal.pc);
7978 sr_sal.pspace = frame_unwind_program_space (next_frame);
7979
7980 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
7981 frame_unwind_caller_id (next_frame));
7982 }
7983
7984 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7985 new breakpoint at the target of a jmp_buf. The handling of
7986 longjmp-resume uses the same mechanisms used for handling
7987 "step-resume" breakpoints. */
7988
7989 static void
7990 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
7991 {
7992 /* There should never be more than one longjmp-resume breakpoint per
7993 thread, so we should never be setting a new
7994 longjmp_resume_breakpoint when one is already active. */
7995 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
7996
7997 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7998 paddress (gdbarch, pc));
7999
8000 inferior_thread ()->control.exception_resume_breakpoint =
8001 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
8002 }
8003
8004 /* Insert an exception resume breakpoint. TP is the thread throwing
8005 the exception. The block B is the block of the unwinder debug hook
8006 function. FRAME is the frame corresponding to the call to this
8007 function. SYM is the symbol of the function argument holding the
8008 target PC of the exception. */
8009
8010 static void
8011 insert_exception_resume_breakpoint (struct thread_info *tp,
8012 const struct block *b,
8013 frame_info_ptr frame,
8014 struct symbol *sym)
8015 {
8016 try
8017 {
8018 struct block_symbol vsym;
8019 struct value *value;
8020 CORE_ADDR handler;
8021 struct breakpoint *bp;
8022
8023 vsym = lookup_symbol_search_name (sym->search_name (),
8024 b, VAR_DOMAIN);
8025 value = read_var_value (vsym.symbol, vsym.block, frame);
8026 /* If the value was optimized out, revert to the old behavior. */
8027 if (! value_optimized_out (value))
8028 {
8029 handler = value_as_address (value);
8030
8031 infrun_debug_printf ("exception resume at %lx",
8032 (unsigned long) handler);
8033
8034 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8035 handler,
8036 bp_exception_resume).release ();
8037
8038 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8039 frame = nullptr;
8040
8041 bp->thread = tp->global_num;
8042 inferior_thread ()->control.exception_resume_breakpoint = bp;
8043 }
8044 }
8045 catch (const gdb_exception_error &e)
8046 {
8047 /* We want to ignore errors here. */
8048 }
8049 }
8050
8051 /* A helper for check_exception_resume that sets an
8052 exception-breakpoint based on a SystemTap probe. */
8053
8054 static void
8055 insert_exception_resume_from_probe (struct thread_info *tp,
8056 const struct bound_probe *probe,
8057 frame_info_ptr frame)
8058 {
8059 struct value *arg_value;
8060 CORE_ADDR handler;
8061 struct breakpoint *bp;
8062
8063 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8064 if (!arg_value)
8065 return;
8066
8067 handler = value_as_address (arg_value);
8068
8069 infrun_debug_printf ("exception resume at %s",
8070 paddress (probe->objfile->arch (), handler));
8071
8072 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8073 handler, bp_exception_resume).release ();
8074 bp->thread = tp->global_num;
8075 inferior_thread ()->control.exception_resume_breakpoint = bp;
8076 }
8077
8078 /* This is called when an exception has been intercepted. Check to
8079 see whether the exception's destination is of interest, and if so,
8080 set an exception resume breakpoint there. */
8081
8082 static void
8083 check_exception_resume (struct execution_control_state *ecs,
8084 frame_info_ptr frame)
8085 {
8086 struct bound_probe probe;
8087 struct symbol *func;
8088
8089 /* First see if this exception unwinding breakpoint was set via a
8090 SystemTap probe point. If so, the probe has two arguments: the
8091 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8092 set a breakpoint there. */
8093 probe = find_probe_by_pc (get_frame_pc (frame));
8094 if (probe.prob)
8095 {
8096 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8097 return;
8098 }
8099
8100 func = get_frame_function (frame);
8101 if (!func)
8102 return;
8103
8104 try
8105 {
8106 const struct block *b;
8107 struct block_iterator iter;
8108 struct symbol *sym;
8109 int argno = 0;
8110
8111 /* The exception breakpoint is a thread-specific breakpoint on
8112 the unwinder's debug hook, declared as:
8113
8114 void _Unwind_DebugHook (void *cfa, void *handler);
8115
8116 The CFA argument indicates the frame to which control is
8117 about to be transferred. HANDLER is the destination PC.
8118
8119 We ignore the CFA and set a temporary breakpoint at HANDLER.
8120 This is not extremely efficient but it avoids issues in gdb
8121 with computing the DWARF CFA, and it also works even in weird
8122 cases such as throwing an exception from inside a signal
8123 handler. */
8124
8125 b = func->value_block ();
8126 ALL_BLOCK_SYMBOLS (b, iter, sym)
8127 {
8128 if (!sym->is_argument ())
8129 continue;
8130
8131 if (argno == 0)
8132 ++argno;
8133 else
8134 {
8135 insert_exception_resume_breakpoint (ecs->event_thread,
8136 b, frame, sym);
8137 break;
8138 }
8139 }
8140 }
8141 catch (const gdb_exception_error &e)
8142 {
8143 }
8144 }
8145
8146 static void
8147 stop_waiting (struct execution_control_state *ecs)
8148 {
8149 infrun_debug_printf ("stop_waiting");
8150
8151 /* Let callers know we don't want to wait for the inferior anymore. */
8152 ecs->wait_some_more = 0;
8153 }
8154
8155 /* Like keep_going, but passes the signal to the inferior, even if the
8156 signal is set to nopass. */
8157
8158 static void
8159 keep_going_pass_signal (struct execution_control_state *ecs)
8160 {
8161 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8162 gdb_assert (!ecs->event_thread->resumed ());
8163
8164 /* Save the pc before execution, to compare with pc after stop. */
8165 ecs->event_thread->prev_pc
8166 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
8167
8168 if (ecs->event_thread->control.trap_expected)
8169 {
8170 struct thread_info *tp = ecs->event_thread;
8171
8172 infrun_debug_printf ("%s has trap_expected set, "
8173 "resuming to collect trap",
8174 tp->ptid.to_string ().c_str ());
8175
8176 /* We haven't yet gotten our trap, and either: intercepted a
8177 non-signal event (e.g., a fork); or took a signal which we
8178 are supposed to pass through to the inferior. Simply
8179 continue. */
8180 resume (ecs->event_thread->stop_signal ());
8181 }
8182 else if (step_over_info_valid_p ())
8183 {
8184 /* Another thread is stepping over a breakpoint in-line. If
8185 this thread needs a step-over too, queue the request. In
8186 either case, this resume must be deferred for later. */
8187 struct thread_info *tp = ecs->event_thread;
8188
8189 if (ecs->hit_singlestep_breakpoint
8190 || thread_still_needs_step_over (tp))
8191 {
8192 infrun_debug_printf ("step-over already in progress: "
8193 "step-over for %s deferred",
8194 tp->ptid.to_string ().c_str ());
8195 global_thread_step_over_chain_enqueue (tp);
8196 }
8197 else
8198 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8199 tp->ptid.to_string ().c_str ());
8200 }
8201 else
8202 {
8203 struct regcache *regcache = get_current_regcache ();
8204 int remove_bp;
8205 int remove_wps;
8206 step_over_what step_what;
8207
8208 /* Either the trap was not expected, but we are continuing
8209 anyway (if we got a signal, the user asked it be passed to
8210 the child)
8211 -- or --
8212 We got our expected trap, but decided we should resume from
8213 it.
8214
8215 We're going to run this baby now!
8216
8217 Note that insert_breakpoints won't try to re-insert
8218 already inserted breakpoints. Therefore, we don't
8219 care if breakpoints were already inserted, or not. */
8220
8221 /* If we need to step over a breakpoint, and we're not using
8222 displaced stepping to do so, insert all breakpoints
8223 (watchpoints, etc.) but the one we're stepping over, step one
8224 instruction, and then re-insert the breakpoint when that step
8225 is finished. */
8226
8227 step_what = thread_still_needs_step_over (ecs->event_thread);
8228
8229 remove_bp = (ecs->hit_singlestep_breakpoint
8230 || (step_what & STEP_OVER_BREAKPOINT));
8231 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
8232
8233 /* We can't use displaced stepping if we need to step past a
8234 watchpoint. The instruction copied to the scratch pad would
8235 still trigger the watchpoint. */
8236 if (remove_bp
8237 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
8238 {
8239 set_step_over_info (regcache->aspace (),
8240 regcache_read_pc (regcache), remove_wps,
8241 ecs->event_thread->global_num);
8242 }
8243 else if (remove_wps)
8244 set_step_over_info (nullptr, 0, remove_wps, -1);
8245
8246 /* If we now need to do an in-line step-over, we need to stop
8247 all other threads. Note this must be done before
8248 insert_breakpoints below, because that removes the breakpoint
8249 we're about to step over, otherwise other threads could miss
8250 it. */
8251 if (step_over_info_valid_p () && target_is_non_stop_p ())
8252 stop_all_threads ("starting in-line step-over");
8253
8254 /* Stop stepping if inserting breakpoints fails. */
8255 try
8256 {
8257 insert_breakpoints ();
8258 }
8259 catch (const gdb_exception_error &e)
8260 {
8261 exception_print (gdb_stderr, e);
8262 stop_waiting (ecs);
8263 clear_step_over_info ();
8264 return;
8265 }
8266
8267 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
8268
8269 resume (ecs->event_thread->stop_signal ());
8270 }
8271
8272 prepare_to_wait (ecs);
8273 }
8274
8275 /* Called when we should continue running the inferior, because the
8276 current event doesn't cause a user visible stop. This does the
8277 resuming part; waiting for the next event is done elsewhere. */
8278
8279 static void
8280 keep_going (struct execution_control_state *ecs)
8281 {
8282 if (ecs->event_thread->control.trap_expected
8283 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
8284 ecs->event_thread->control.trap_expected = 0;
8285
8286 if (!signal_program[ecs->event_thread->stop_signal ()])
8287 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8288 keep_going_pass_signal (ecs);
8289 }
8290
8291 /* This function normally comes after a resume, before
8292 handle_inferior_event exits. It takes care of any last bits of
8293 housekeeping, and sets the all-important wait_some_more flag. */
8294
8295 static void
8296 prepare_to_wait (struct execution_control_state *ecs)
8297 {
8298 infrun_debug_printf ("prepare_to_wait");
8299
8300 ecs->wait_some_more = 1;
8301
8302 /* If the target can't async, emulate it by marking the infrun event
8303 handler such that as soon as we get back to the event-loop, we
8304 immediately end up in fetch_inferior_event again calling
8305 target_wait. */
8306 if (!target_can_async_p ())
8307 mark_infrun_async_event_handler ();
8308 }
8309
8310 /* We are done with the step range of a step/next/si/ni command.
8311 Called once for each n of a "step n" operation. */
8312
8313 static void
8314 end_stepping_range (struct execution_control_state *ecs)
8315 {
8316 ecs->event_thread->control.stop_step = 1;
8317 stop_waiting (ecs);
8318 }
8319
8320 /* Several print_*_reason functions to print why the inferior has stopped.
8321 We always print something when the inferior exits, or receives a signal.
8322 The rest of the cases are dealt with later on in normal_stop and
8323 print_it_typical. Ideally there should be a call to one of these
8324 print_*_reason functions functions from handle_inferior_event each time
8325 stop_waiting is called.
8326
8327 Note that we don't call these directly, instead we delegate that to
8328 the interpreters, through observers. Interpreters then call these
8329 with whatever uiout is right. */
8330
8331 void
8332 print_end_stepping_range_reason (struct ui_out *uiout)
8333 {
8334 /* For CLI-like interpreters, print nothing. */
8335
8336 if (uiout->is_mi_like_p ())
8337 {
8338 uiout->field_string ("reason",
8339 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
8340 }
8341 }
8342
8343 void
8344 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8345 {
8346 annotate_signalled ();
8347 if (uiout->is_mi_like_p ())
8348 uiout->field_string
8349 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8350 uiout->text ("\nProgram terminated with signal ");
8351 annotate_signal_name ();
8352 uiout->field_string ("signal-name",
8353 gdb_signal_to_name (siggnal));
8354 annotate_signal_name_end ();
8355 uiout->text (", ");
8356 annotate_signal_string ();
8357 uiout->field_string ("signal-meaning",
8358 gdb_signal_to_string (siggnal));
8359 annotate_signal_string_end ();
8360 uiout->text (".\n");
8361 uiout->text ("The program no longer exists.\n");
8362 }
8363
8364 void
8365 print_exited_reason (struct ui_out *uiout, int exitstatus)
8366 {
8367 struct inferior *inf = current_inferior ();
8368 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
8369
8370 annotate_exited (exitstatus);
8371 if (exitstatus)
8372 {
8373 if (uiout->is_mi_like_p ())
8374 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
8375 std::string exit_code_str
8376 = string_printf ("0%o", (unsigned int) exitstatus);
8377 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8378 plongest (inf->num), pidstr.c_str (),
8379 string_field ("exit-code", exit_code_str.c_str ()));
8380 }
8381 else
8382 {
8383 if (uiout->is_mi_like_p ())
8384 uiout->field_string
8385 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
8386 uiout->message ("[Inferior %s (%s) exited normally]\n",
8387 plongest (inf->num), pidstr.c_str ());
8388 }
8389 }
8390
8391 void
8392 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8393 {
8394 struct thread_info *thr = inferior_thread ();
8395
8396 annotate_signal ();
8397
8398 if (uiout->is_mi_like_p ())
8399 ;
8400 else if (show_thread_that_caused_stop ())
8401 {
8402 uiout->text ("\nThread ");
8403 uiout->field_string ("thread-id", print_thread_id (thr));
8404
8405 const char *name = thread_name (thr);
8406 if (name != nullptr)
8407 {
8408 uiout->text (" \"");
8409 uiout->field_string ("name", name);
8410 uiout->text ("\"");
8411 }
8412 }
8413 else
8414 uiout->text ("\nProgram");
8415
8416 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8417 uiout->text (" stopped");
8418 else
8419 {
8420 uiout->text (" received signal ");
8421 annotate_signal_name ();
8422 if (uiout->is_mi_like_p ())
8423 uiout->field_string
8424 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8425 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8426 annotate_signal_name_end ();
8427 uiout->text (", ");
8428 annotate_signal_string ();
8429 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
8430
8431 struct regcache *regcache = get_current_regcache ();
8432 struct gdbarch *gdbarch = regcache->arch ();
8433 if (gdbarch_report_signal_info_p (gdbarch))
8434 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8435
8436 annotate_signal_string_end ();
8437 }
8438 uiout->text (".\n");
8439 }
8440
8441 void
8442 print_no_history_reason (struct ui_out *uiout)
8443 {
8444 uiout->text ("\nNo more reverse-execution history.\n");
8445 }
8446
8447 /* Print current location without a level number, if we have changed
8448 functions or hit a breakpoint. Print source line if we have one.
8449 bpstat_print contains the logic deciding in detail what to print,
8450 based on the event(s) that just occurred. */
8451
8452 static void
8453 print_stop_location (const target_waitstatus &ws)
8454 {
8455 int bpstat_ret;
8456 enum print_what source_flag;
8457 int do_frame_printing = 1;
8458 struct thread_info *tp = inferior_thread ();
8459
8460 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
8461 switch (bpstat_ret)
8462 {
8463 case PRINT_UNKNOWN:
8464 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8465 should) carry around the function and does (or should) use
8466 that when doing a frame comparison. */
8467 if (tp->control.stop_step
8468 && (tp->control.step_frame_id
8469 == get_frame_id (get_current_frame ()))
8470 && (tp->control.step_start_function
8471 == find_pc_function (tp->stop_pc ())))
8472 {
8473 /* Finished step, just print source line. */
8474 source_flag = SRC_LINE;
8475 }
8476 else
8477 {
8478 /* Print location and source line. */
8479 source_flag = SRC_AND_LOC;
8480 }
8481 break;
8482 case PRINT_SRC_AND_LOC:
8483 /* Print location and source line. */
8484 source_flag = SRC_AND_LOC;
8485 break;
8486 case PRINT_SRC_ONLY:
8487 source_flag = SRC_LINE;
8488 break;
8489 case PRINT_NOTHING:
8490 /* Something bogus. */
8491 source_flag = SRC_LINE;
8492 do_frame_printing = 0;
8493 break;
8494 default:
8495 internal_error (_("Unknown value."));
8496 }
8497
8498 /* The behavior of this routine with respect to the source
8499 flag is:
8500 SRC_LINE: Print only source line
8501 LOCATION: Print only location
8502 SRC_AND_LOC: Print location and source line. */
8503 if (do_frame_printing)
8504 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
8505 }
8506
8507 /* See infrun.h. */
8508
8509 void
8510 print_stop_event (struct ui_out *uiout, bool displays)
8511 {
8512 struct target_waitstatus last;
8513 struct thread_info *tp;
8514
8515 get_last_target_status (nullptr, nullptr, &last);
8516
8517 {
8518 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
8519
8520 print_stop_location (last);
8521
8522 /* Display the auto-display expressions. */
8523 if (displays)
8524 do_displays ();
8525 }
8526
8527 tp = inferior_thread ();
8528 if (tp->thread_fsm () != nullptr
8529 && tp->thread_fsm ()->finished_p ())
8530 {
8531 struct return_value_info *rv;
8532
8533 rv = tp->thread_fsm ()->return_value ();
8534 if (rv != nullptr)
8535 print_return_value (uiout, rv);
8536 }
8537 }
8538
8539 /* See infrun.h. */
8540
8541 void
8542 maybe_remove_breakpoints (void)
8543 {
8544 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
8545 {
8546 if (remove_breakpoints ())
8547 {
8548 target_terminal::ours_for_output ();
8549 gdb_printf (_("Cannot remove breakpoints because "
8550 "program is no longer writable.\nFurther "
8551 "execution is probably impossible.\n"));
8552 }
8553 }
8554 }
8555
8556 /* The execution context that just caused a normal stop. */
8557
8558 struct stop_context
8559 {
8560 stop_context ();
8561
8562 DISABLE_COPY_AND_ASSIGN (stop_context);
8563
8564 bool changed () const;
8565
8566 /* The stop ID. */
8567 ULONGEST stop_id;
8568
8569 /* The event PTID. */
8570
8571 ptid_t ptid;
8572
8573 /* If stopp for a thread event, this is the thread that caused the
8574 stop. */
8575 thread_info_ref thread;
8576
8577 /* The inferior that caused the stop. */
8578 int inf_num;
8579 };
8580
8581 /* Initializes a new stop context. If stopped for a thread event, this
8582 takes a strong reference to the thread. */
8583
8584 stop_context::stop_context ()
8585 {
8586 stop_id = get_stop_id ();
8587 ptid = inferior_ptid;
8588 inf_num = current_inferior ()->num;
8589
8590 if (inferior_ptid != null_ptid)
8591 {
8592 /* Take a strong reference so that the thread can't be deleted
8593 yet. */
8594 thread = thread_info_ref::new_reference (inferior_thread ());
8595 }
8596 }
8597
8598 /* Return true if the current context no longer matches the saved stop
8599 context. */
8600
8601 bool
8602 stop_context::changed () const
8603 {
8604 if (ptid != inferior_ptid)
8605 return true;
8606 if (inf_num != current_inferior ()->num)
8607 return true;
8608 if (thread != nullptr && thread->state != THREAD_STOPPED)
8609 return true;
8610 if (get_stop_id () != stop_id)
8611 return true;
8612 return false;
8613 }
8614
8615 /* See infrun.h. */
8616
8617 int
8618 normal_stop (void)
8619 {
8620 struct target_waitstatus last;
8621
8622 get_last_target_status (nullptr, nullptr, &last);
8623
8624 new_stop_id ();
8625
8626 /* If an exception is thrown from this point on, make sure to
8627 propagate GDB's knowledge of the executing state to the
8628 frontend/user running state. A QUIT is an easy exception to see
8629 here, so do this before any filtered output. */
8630
8631 ptid_t finish_ptid = null_ptid;
8632
8633 if (!non_stop)
8634 finish_ptid = minus_one_ptid;
8635 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
8636 || last.kind () == TARGET_WAITKIND_EXITED)
8637 {
8638 /* On some targets, we may still have live threads in the
8639 inferior when we get a process exit event. E.g., for
8640 "checkpoint", when the current checkpoint/fork exits,
8641 linux-fork.c automatically switches to another fork from
8642 within target_mourn_inferior. */
8643 if (inferior_ptid != null_ptid)
8644 finish_ptid = ptid_t (inferior_ptid.pid ());
8645 }
8646 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED)
8647 finish_ptid = inferior_ptid;
8648
8649 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8650 if (finish_ptid != null_ptid)
8651 {
8652 maybe_finish_thread_state.emplace
8653 (user_visible_resume_target (finish_ptid), finish_ptid);
8654 }
8655
8656 /* As we're presenting a stop, and potentially removing breakpoints,
8657 update the thread list so we can tell whether there are threads
8658 running on the target. With target remote, for example, we can
8659 only learn about new threads when we explicitly update the thread
8660 list. Do this before notifying the interpreters about signal
8661 stops, end of stepping ranges, etc., so that the "new thread"
8662 output is emitted before e.g., "Program received signal FOO",
8663 instead of after. */
8664 update_thread_list ();
8665
8666 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
8667 gdb::observers::signal_received.notify (inferior_thread ()->stop_signal ());
8668
8669 /* As with the notification of thread events, we want to delay
8670 notifying the user that we've switched thread context until
8671 the inferior actually stops.
8672
8673 There's no point in saying anything if the inferior has exited.
8674 Note that SIGNALLED here means "exited with a signal", not
8675 "received a signal".
8676
8677 Also skip saying anything in non-stop mode. In that mode, as we
8678 don't want GDB to switch threads behind the user's back, to avoid
8679 races where the user is typing a command to apply to thread x,
8680 but GDB switches to thread y before the user finishes entering
8681 the command, fetch_inferior_event installs a cleanup to restore
8682 the current thread back to the thread the user had selected right
8683 after this event is handled, so we're not really switching, only
8684 informing of a stop. */
8685 if (!non_stop
8686 && previous_inferior_ptid != inferior_ptid
8687 && target_has_execution ()
8688 && last.kind () != TARGET_WAITKIND_SIGNALLED
8689 && last.kind () != TARGET_WAITKIND_EXITED
8690 && last.kind () != TARGET_WAITKIND_NO_RESUMED)
8691 {
8692 SWITCH_THRU_ALL_UIS ()
8693 {
8694 target_terminal::ours_for_output ();
8695 gdb_printf (_("[Switching to %s]\n"),
8696 target_pid_to_str (inferior_ptid).c_str ());
8697 annotate_thread_changed ();
8698 }
8699 previous_inferior_ptid = inferior_ptid;
8700 }
8701
8702 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
8703 {
8704 SWITCH_THRU_ALL_UIS ()
8705 if (current_ui->prompt_state == PROMPT_BLOCKED)
8706 {
8707 target_terminal::ours_for_output ();
8708 gdb_printf (_("No unwaited-for children left.\n"));
8709 }
8710 }
8711
8712 /* Note: this depends on the update_thread_list call above. */
8713 maybe_remove_breakpoints ();
8714
8715 /* If an auto-display called a function and that got a signal,
8716 delete that auto-display to avoid an infinite recursion. */
8717
8718 if (stopped_by_random_signal)
8719 disable_current_display ();
8720
8721 SWITCH_THRU_ALL_UIS ()
8722 {
8723 async_enable_stdin ();
8724 }
8725
8726 /* Let the user/frontend see the threads as stopped. */
8727 maybe_finish_thread_state.reset ();
8728
8729 /* Select innermost stack frame - i.e., current frame is frame 0,
8730 and current location is based on that. Handle the case where the
8731 dummy call is returning after being stopped. E.g. the dummy call
8732 previously hit a breakpoint. (If the dummy call returns
8733 normally, we won't reach here.) Do this before the stop hook is
8734 run, so that it doesn't get to see the temporary dummy frame,
8735 which is not where we'll present the stop. */
8736 if (has_stack_frames ())
8737 {
8738 if (stop_stack_dummy == STOP_STACK_DUMMY)
8739 {
8740 /* Pop the empty frame that contains the stack dummy. This
8741 also restores inferior state prior to the call (struct
8742 infcall_suspend_state). */
8743 frame_info_ptr frame = get_current_frame ();
8744
8745 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8746 frame_pop (frame);
8747 /* frame_pop calls reinit_frame_cache as the last thing it
8748 does which means there's now no selected frame. */
8749 }
8750
8751 select_frame (get_current_frame ());
8752
8753 /* Set the current source location. */
8754 set_current_sal_from_frame (get_current_frame ());
8755 }
8756
8757 /* Look up the hook_stop and run it (CLI internally handles problem
8758 of stop_command's pre-hook not existing). */
8759 stop_context saved_context;
8760
8761 try
8762 {
8763 execute_cmd_pre_hook (stop_command);
8764 }
8765 catch (const gdb_exception &ex)
8766 {
8767 exception_fprintf (gdb_stderr, ex,
8768 "Error while running hook_stop:\n");
8769 }
8770
8771 /* If the stop hook resumes the target, then there's no point in
8772 trying to notify about the previous stop; its context is
8773 gone. Likewise if the command switches thread or inferior --
8774 the observers would print a stop for the wrong
8775 thread/inferior. */
8776 if (saved_context.changed ())
8777 return 1;
8778
8779 /* Notify observers about the stop. This is where the interpreters
8780 print the stop event. */
8781 if (inferior_ptid != null_ptid)
8782 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
8783 stop_print_frame);
8784 else
8785 gdb::observers::normal_stop.notify (nullptr, stop_print_frame);
8786
8787 annotate_stopped ();
8788
8789 if (target_has_execution ())
8790 {
8791 if (last.kind () != TARGET_WAITKIND_SIGNALLED
8792 && last.kind () != TARGET_WAITKIND_EXITED
8793 && last.kind () != TARGET_WAITKIND_NO_RESUMED)
8794 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8795 Delete any breakpoint that is to be deleted at the next stop. */
8796 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
8797 }
8798
8799 return 0;
8800 }
8801 \f
8802 int
8803 signal_stop_state (int signo)
8804 {
8805 return signal_stop[signo];
8806 }
8807
8808 int
8809 signal_print_state (int signo)
8810 {
8811 return signal_print[signo];
8812 }
8813
8814 int
8815 signal_pass_state (int signo)
8816 {
8817 return signal_program[signo];
8818 }
8819
8820 static void
8821 signal_cache_update (int signo)
8822 {
8823 if (signo == -1)
8824 {
8825 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
8826 signal_cache_update (signo);
8827
8828 return;
8829 }
8830
8831 signal_pass[signo] = (signal_stop[signo] == 0
8832 && signal_print[signo] == 0
8833 && signal_program[signo] == 1
8834 && signal_catch[signo] == 0);
8835 }
8836
8837 int
8838 signal_stop_update (int signo, int state)
8839 {
8840 int ret = signal_stop[signo];
8841
8842 signal_stop[signo] = state;
8843 signal_cache_update (signo);
8844 return ret;
8845 }
8846
8847 int
8848 signal_print_update (int signo, int state)
8849 {
8850 int ret = signal_print[signo];
8851
8852 signal_print[signo] = state;
8853 signal_cache_update (signo);
8854 return ret;
8855 }
8856
8857 int
8858 signal_pass_update (int signo, int state)
8859 {
8860 int ret = signal_program[signo];
8861
8862 signal_program[signo] = state;
8863 signal_cache_update (signo);
8864 return ret;
8865 }
8866
8867 /* Update the global 'signal_catch' from INFO and notify the
8868 target. */
8869
8870 void
8871 signal_catch_update (const unsigned int *info)
8872 {
8873 int i;
8874
8875 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8876 signal_catch[i] = info[i] > 0;
8877 signal_cache_update (-1);
8878 target_pass_signals (signal_pass);
8879 }
8880
8881 static void
8882 sig_print_header (void)
8883 {
8884 gdb_printf (_("Signal Stop\tPrint\tPass "
8885 "to program\tDescription\n"));
8886 }
8887
8888 static void
8889 sig_print_info (enum gdb_signal oursig)
8890 {
8891 const char *name = gdb_signal_to_name (oursig);
8892 int name_padding = 13 - strlen (name);
8893
8894 if (name_padding <= 0)
8895 name_padding = 0;
8896
8897 gdb_printf ("%s", name);
8898 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
8899 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8900 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
8901 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
8902 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
8903 }
8904
8905 /* Specify how various signals in the inferior should be handled. */
8906
8907 static void
8908 handle_command (const char *args, int from_tty)
8909 {
8910 int digits, wordlen;
8911 int sigfirst, siglast;
8912 enum gdb_signal oursig;
8913 int allsigs;
8914
8915 if (args == nullptr)
8916 {
8917 error_no_arg (_("signal to handle"));
8918 }
8919
8920 /* Allocate and zero an array of flags for which signals to handle. */
8921
8922 const size_t nsigs = GDB_SIGNAL_LAST;
8923 unsigned char sigs[nsigs] {};
8924
8925 /* Break the command line up into args. */
8926
8927 gdb_argv built_argv (args);
8928
8929 /* Walk through the args, looking for signal oursigs, signal names, and
8930 actions. Signal numbers and signal names may be interspersed with
8931 actions, with the actions being performed for all signals cumulatively
8932 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8933
8934 for (char *arg : built_argv)
8935 {
8936 wordlen = strlen (arg);
8937 for (digits = 0; isdigit (arg[digits]); digits++)
8938 {;
8939 }
8940 allsigs = 0;
8941 sigfirst = siglast = -1;
8942
8943 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
8944 {
8945 /* Apply action to all signals except those used by the
8946 debugger. Silently skip those. */
8947 allsigs = 1;
8948 sigfirst = 0;
8949 siglast = nsigs - 1;
8950 }
8951 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
8952 {
8953 SET_SIGS (nsigs, sigs, signal_stop);
8954 SET_SIGS (nsigs, sigs, signal_print);
8955 }
8956 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
8957 {
8958 UNSET_SIGS (nsigs, sigs, signal_program);
8959 }
8960 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
8961 {
8962 SET_SIGS (nsigs, sigs, signal_print);
8963 }
8964 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
8965 {
8966 SET_SIGS (nsigs, sigs, signal_program);
8967 }
8968 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
8969 {
8970 UNSET_SIGS (nsigs, sigs, signal_stop);
8971 }
8972 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
8973 {
8974 SET_SIGS (nsigs, sigs, signal_program);
8975 }
8976 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
8977 {
8978 UNSET_SIGS (nsigs, sigs, signal_print);
8979 UNSET_SIGS (nsigs, sigs, signal_stop);
8980 }
8981 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
8982 {
8983 UNSET_SIGS (nsigs, sigs, signal_program);
8984 }
8985 else if (digits > 0)
8986 {
8987 /* It is numeric. The numeric signal refers to our own
8988 internal signal numbering from target.h, not to host/target
8989 signal number. This is a feature; users really should be
8990 using symbolic names anyway, and the common ones like
8991 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8992
8993 sigfirst = siglast = (int)
8994 gdb_signal_from_command (atoi (arg));
8995 if (arg[digits] == '-')
8996 {
8997 siglast = (int)
8998 gdb_signal_from_command (atoi (arg + digits + 1));
8999 }
9000 if (sigfirst > siglast)
9001 {
9002 /* Bet he didn't figure we'd think of this case... */
9003 std::swap (sigfirst, siglast);
9004 }
9005 }
9006 else
9007 {
9008 oursig = gdb_signal_from_name (arg);
9009 if (oursig != GDB_SIGNAL_UNKNOWN)
9010 {
9011 sigfirst = siglast = (int) oursig;
9012 }
9013 else
9014 {
9015 /* Not a number and not a recognized flag word => complain. */
9016 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9017 }
9018 }
9019
9020 /* If any signal numbers or symbol names were found, set flags for
9021 which signals to apply actions to. */
9022
9023 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9024 {
9025 switch ((enum gdb_signal) signum)
9026 {
9027 case GDB_SIGNAL_TRAP:
9028 case GDB_SIGNAL_INT:
9029 if (!allsigs && !sigs[signum])
9030 {
9031 if (query (_("%s is used by the debugger.\n\
9032 Are you sure you want to change it? "),
9033 gdb_signal_to_name ((enum gdb_signal) signum)))
9034 {
9035 sigs[signum] = 1;
9036 }
9037 else
9038 gdb_printf (_("Not confirmed, unchanged.\n"));
9039 }
9040 break;
9041 case GDB_SIGNAL_0:
9042 case GDB_SIGNAL_DEFAULT:
9043 case GDB_SIGNAL_UNKNOWN:
9044 /* Make sure that "all" doesn't print these. */
9045 break;
9046 default:
9047 sigs[signum] = 1;
9048 break;
9049 }
9050 }
9051 }
9052
9053 for (int signum = 0; signum < nsigs; signum++)
9054 if (sigs[signum])
9055 {
9056 signal_cache_update (-1);
9057 target_pass_signals (signal_pass);
9058 target_program_signals (signal_program);
9059
9060 if (from_tty)
9061 {
9062 /* Show the results. */
9063 sig_print_header ();
9064 for (; signum < nsigs; signum++)
9065 if (sigs[signum])
9066 sig_print_info ((enum gdb_signal) signum);
9067 }
9068
9069 break;
9070 }
9071 }
9072
9073 /* Complete the "handle" command. */
9074
9075 static void
9076 handle_completer (struct cmd_list_element *ignore,
9077 completion_tracker &tracker,
9078 const char *text, const char *word)
9079 {
9080 static const char * const keywords[] =
9081 {
9082 "all",
9083 "stop",
9084 "ignore",
9085 "print",
9086 "pass",
9087 "nostop",
9088 "noignore",
9089 "noprint",
9090 "nopass",
9091 nullptr,
9092 };
9093
9094 signal_completer (ignore, tracker, text, word);
9095 complete_on_enum (tracker, keywords, word, word);
9096 }
9097
9098 enum gdb_signal
9099 gdb_signal_from_command (int num)
9100 {
9101 if (num >= 1 && num <= 15)
9102 return (enum gdb_signal) num;
9103 error (_("Only signals 1-15 are valid as numeric signals.\n\
9104 Use \"info signals\" for a list of symbolic signals."));
9105 }
9106
9107 /* Print current contents of the tables set by the handle command.
9108 It is possible we should just be printing signals actually used
9109 by the current target (but for things to work right when switching
9110 targets, all signals should be in the signal tables). */
9111
9112 static void
9113 info_signals_command (const char *signum_exp, int from_tty)
9114 {
9115 enum gdb_signal oursig;
9116
9117 sig_print_header ();
9118
9119 if (signum_exp)
9120 {
9121 /* First see if this is a symbol name. */
9122 oursig = gdb_signal_from_name (signum_exp);
9123 if (oursig == GDB_SIGNAL_UNKNOWN)
9124 {
9125 /* No, try numeric. */
9126 oursig =
9127 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9128 }
9129 sig_print_info (oursig);
9130 return;
9131 }
9132
9133 gdb_printf ("\n");
9134 /* These ugly casts brought to you by the native VAX compiler. */
9135 for (oursig = GDB_SIGNAL_FIRST;
9136 (int) oursig < (int) GDB_SIGNAL_LAST;
9137 oursig = (enum gdb_signal) ((int) oursig + 1))
9138 {
9139 QUIT;
9140
9141 if (oursig != GDB_SIGNAL_UNKNOWN
9142 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9143 sig_print_info (oursig);
9144 }
9145
9146 gdb_printf (_("\nUse the \"handle\" command "
9147 "to change these tables.\n"));
9148 }
9149
9150 /* The $_siginfo convenience variable is a bit special. We don't know
9151 for sure the type of the value until we actually have a chance to
9152 fetch the data. The type can change depending on gdbarch, so it is
9153 also dependent on which thread you have selected.
9154
9155 1. making $_siginfo be an internalvar that creates a new value on
9156 access.
9157
9158 2. making the value of $_siginfo be an lval_computed value. */
9159
9160 /* This function implements the lval_computed support for reading a
9161 $_siginfo value. */
9162
9163 static void
9164 siginfo_value_read (struct value *v)
9165 {
9166 LONGEST transferred;
9167
9168 /* If we can access registers, so can we access $_siginfo. Likewise
9169 vice versa. */
9170 validate_registers_access ();
9171
9172 transferred =
9173 target_read (current_inferior ()->top_target (),
9174 TARGET_OBJECT_SIGNAL_INFO,
9175 nullptr,
9176 value_contents_all_raw (v).data (),
9177 value_offset (v),
9178 value_type (v)->length ());
9179
9180 if (transferred != value_type (v)->length ())
9181 error (_("Unable to read siginfo"));
9182 }
9183
9184 /* This function implements the lval_computed support for writing a
9185 $_siginfo value. */
9186
9187 static void
9188 siginfo_value_write (struct value *v, struct value *fromval)
9189 {
9190 LONGEST transferred;
9191
9192 /* If we can access registers, so can we access $_siginfo. Likewise
9193 vice versa. */
9194 validate_registers_access ();
9195
9196 transferred = target_write (current_inferior ()->top_target (),
9197 TARGET_OBJECT_SIGNAL_INFO,
9198 nullptr,
9199 value_contents_all_raw (fromval).data (),
9200 value_offset (v),
9201 value_type (fromval)->length ());
9202
9203 if (transferred != value_type (fromval)->length ())
9204 error (_("Unable to write siginfo"));
9205 }
9206
9207 static const struct lval_funcs siginfo_value_funcs =
9208 {
9209 siginfo_value_read,
9210 siginfo_value_write
9211 };
9212
9213 /* Return a new value with the correct type for the siginfo object of
9214 the current thread using architecture GDBARCH. Return a void value
9215 if there's no object available. */
9216
9217 static struct value *
9218 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9219 void *ignore)
9220 {
9221 if (target_has_stack ()
9222 && inferior_ptid != null_ptid
9223 && gdbarch_get_siginfo_type_p (gdbarch))
9224 {
9225 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9226
9227 return allocate_computed_value (type, &siginfo_value_funcs, nullptr);
9228 }
9229
9230 return allocate_value (builtin_type (gdbarch)->builtin_void);
9231 }
9232
9233 \f
9234 /* infcall_suspend_state contains state about the program itself like its
9235 registers and any signal it received when it last stopped.
9236 This state must be restored regardless of how the inferior function call
9237 ends (either successfully, or after it hits a breakpoint or signal)
9238 if the program is to properly continue where it left off. */
9239
9240 class infcall_suspend_state
9241 {
9242 public:
9243 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9244 once the inferior function call has finished. */
9245 infcall_suspend_state (struct gdbarch *gdbarch,
9246 const struct thread_info *tp,
9247 struct regcache *regcache)
9248 : m_registers (new readonly_detached_regcache (*regcache))
9249 {
9250 tp->save_suspend_to (m_thread_suspend);
9251
9252 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9253
9254 if (gdbarch_get_siginfo_type_p (gdbarch))
9255 {
9256 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9257 size_t len = type->length ();
9258
9259 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9260
9261 if (target_read (current_inferior ()->top_target (),
9262 TARGET_OBJECT_SIGNAL_INFO, nullptr,
9263 siginfo_data.get (), 0, len) != len)
9264 {
9265 /* Errors ignored. */
9266 siginfo_data.reset (nullptr);
9267 }
9268 }
9269
9270 if (siginfo_data)
9271 {
9272 m_siginfo_gdbarch = gdbarch;
9273 m_siginfo_data = std::move (siginfo_data);
9274 }
9275 }
9276
9277 /* Return a pointer to the stored register state. */
9278
9279 readonly_detached_regcache *registers () const
9280 {
9281 return m_registers.get ();
9282 }
9283
9284 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9285
9286 void restore (struct gdbarch *gdbarch,
9287 struct thread_info *tp,
9288 struct regcache *regcache) const
9289 {
9290 tp->restore_suspend_from (m_thread_suspend);
9291
9292 if (m_siginfo_gdbarch == gdbarch)
9293 {
9294 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9295
9296 /* Errors ignored. */
9297 target_write (current_inferior ()->top_target (),
9298 TARGET_OBJECT_SIGNAL_INFO, nullptr,
9299 m_siginfo_data.get (), 0, type->length ());
9300 }
9301
9302 /* The inferior can be gone if the user types "print exit(0)"
9303 (and perhaps other times). */
9304 if (target_has_execution ())
9305 /* NB: The register write goes through to the target. */
9306 regcache->restore (registers ());
9307 }
9308
9309 private:
9310 /* How the current thread stopped before the inferior function call was
9311 executed. */
9312 struct thread_suspend_state m_thread_suspend;
9313
9314 /* The registers before the inferior function call was executed. */
9315 std::unique_ptr<readonly_detached_regcache> m_registers;
9316
9317 /* Format of SIGINFO_DATA or NULL if it is not present. */
9318 struct gdbarch *m_siginfo_gdbarch = nullptr;
9319
9320 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9321 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
9322 content would be invalid. */
9323 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
9324 };
9325
9326 infcall_suspend_state_up
9327 save_infcall_suspend_state ()
9328 {
9329 struct thread_info *tp = inferior_thread ();
9330 struct regcache *regcache = get_current_regcache ();
9331 struct gdbarch *gdbarch = regcache->arch ();
9332
9333 infcall_suspend_state_up inf_state
9334 (new struct infcall_suspend_state (gdbarch, tp, regcache));
9335
9336 /* Having saved the current state, adjust the thread state, discarding
9337 any stop signal information. The stop signal is not useful when
9338 starting an inferior function call, and run_inferior_call will not use
9339 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9340 tp->set_stop_signal (GDB_SIGNAL_0);
9341
9342 return inf_state;
9343 }
9344
9345 /* Restore inferior session state to INF_STATE. */
9346
9347 void
9348 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9349 {
9350 struct thread_info *tp = inferior_thread ();
9351 struct regcache *regcache = get_current_regcache ();
9352 struct gdbarch *gdbarch = regcache->arch ();
9353
9354 inf_state->restore (gdbarch, tp, regcache);
9355 discard_infcall_suspend_state (inf_state);
9356 }
9357
9358 void
9359 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9360 {
9361 delete inf_state;
9362 }
9363
9364 readonly_detached_regcache *
9365 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
9366 {
9367 return inf_state->registers ();
9368 }
9369
9370 /* infcall_control_state contains state regarding gdb's control of the
9371 inferior itself like stepping control. It also contains session state like
9372 the user's currently selected frame. */
9373
9374 struct infcall_control_state
9375 {
9376 struct thread_control_state thread_control;
9377 struct inferior_control_state inferior_control;
9378
9379 /* Other fields: */
9380 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9381 int stopped_by_random_signal = 0;
9382
9383 /* ID and level of the selected frame when the inferior function
9384 call was made. */
9385 struct frame_id selected_frame_id {};
9386 int selected_frame_level = -1;
9387 };
9388
9389 /* Save all of the information associated with the inferior<==>gdb
9390 connection. */
9391
9392 infcall_control_state_up
9393 save_infcall_control_state ()
9394 {
9395 infcall_control_state_up inf_status (new struct infcall_control_state);
9396 struct thread_info *tp = inferior_thread ();
9397 struct inferior *inf = current_inferior ();
9398
9399 inf_status->thread_control = tp->control;
9400 inf_status->inferior_control = inf->control;
9401
9402 tp->control.step_resume_breakpoint = nullptr;
9403 tp->control.exception_resume_breakpoint = nullptr;
9404
9405 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9406 chain. If caller's caller is walking the chain, they'll be happier if we
9407 hand them back the original chain when restore_infcall_control_state is
9408 called. */
9409 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
9410
9411 /* Other fields: */
9412 inf_status->stop_stack_dummy = stop_stack_dummy;
9413 inf_status->stopped_by_random_signal = stopped_by_random_signal;
9414
9415 save_selected_frame (&inf_status->selected_frame_id,
9416 &inf_status->selected_frame_level);
9417
9418 return inf_status;
9419 }
9420
9421 /* Restore inferior session state to INF_STATUS. */
9422
9423 void
9424 restore_infcall_control_state (struct infcall_control_state *inf_status)
9425 {
9426 struct thread_info *tp = inferior_thread ();
9427 struct inferior *inf = current_inferior ();
9428
9429 if (tp->control.step_resume_breakpoint)
9430 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9431
9432 if (tp->control.exception_resume_breakpoint)
9433 tp->control.exception_resume_breakpoint->disposition
9434 = disp_del_at_next_stop;
9435
9436 /* Handle the bpstat_copy of the chain. */
9437 bpstat_clear (&tp->control.stop_bpstat);
9438
9439 tp->control = inf_status->thread_control;
9440 inf->control = inf_status->inferior_control;
9441
9442 /* Other fields: */
9443 stop_stack_dummy = inf_status->stop_stack_dummy;
9444 stopped_by_random_signal = inf_status->stopped_by_random_signal;
9445
9446 if (target_has_stack ())
9447 {
9448 restore_selected_frame (inf_status->selected_frame_id,
9449 inf_status->selected_frame_level);
9450 }
9451
9452 delete inf_status;
9453 }
9454
9455 void
9456 discard_infcall_control_state (struct infcall_control_state *inf_status)
9457 {
9458 if (inf_status->thread_control.step_resume_breakpoint)
9459 inf_status->thread_control.step_resume_breakpoint->disposition
9460 = disp_del_at_next_stop;
9461
9462 if (inf_status->thread_control.exception_resume_breakpoint)
9463 inf_status->thread_control.exception_resume_breakpoint->disposition
9464 = disp_del_at_next_stop;
9465
9466 /* See save_infcall_control_state for info on stop_bpstat. */
9467 bpstat_clear (&inf_status->thread_control.stop_bpstat);
9468
9469 delete inf_status;
9470 }
9471 \f
9472 /* See infrun.h. */
9473
9474 void
9475 clear_exit_convenience_vars (void)
9476 {
9477 clear_internalvar (lookup_internalvar ("_exitsignal"));
9478 clear_internalvar (lookup_internalvar ("_exitcode"));
9479 }
9480 \f
9481
9482 /* User interface for reverse debugging:
9483 Set exec-direction / show exec-direction commands
9484 (returns error unless target implements to_set_exec_direction method). */
9485
9486 enum exec_direction_kind execution_direction = EXEC_FORWARD;
9487 static const char exec_forward[] = "forward";
9488 static const char exec_reverse[] = "reverse";
9489 static const char *exec_direction = exec_forward;
9490 static const char *const exec_direction_names[] = {
9491 exec_forward,
9492 exec_reverse,
9493 nullptr
9494 };
9495
9496 static void
9497 set_exec_direction_func (const char *args, int from_tty,
9498 struct cmd_list_element *cmd)
9499 {
9500 if (target_can_execute_reverse ())
9501 {
9502 if (!strcmp (exec_direction, exec_forward))
9503 execution_direction = EXEC_FORWARD;
9504 else if (!strcmp (exec_direction, exec_reverse))
9505 execution_direction = EXEC_REVERSE;
9506 }
9507 else
9508 {
9509 exec_direction = exec_forward;
9510 error (_("Target does not support this operation."));
9511 }
9512 }
9513
9514 static void
9515 show_exec_direction_func (struct ui_file *out, int from_tty,
9516 struct cmd_list_element *cmd, const char *value)
9517 {
9518 switch (execution_direction) {
9519 case EXEC_FORWARD:
9520 gdb_printf (out, _("Forward.\n"));
9521 break;
9522 case EXEC_REVERSE:
9523 gdb_printf (out, _("Reverse.\n"));
9524 break;
9525 default:
9526 internal_error (_("bogus execution_direction value: %d"),
9527 (int) execution_direction);
9528 }
9529 }
9530
9531 static void
9532 show_schedule_multiple (struct ui_file *file, int from_tty,
9533 struct cmd_list_element *c, const char *value)
9534 {
9535 gdb_printf (file, _("Resuming the execution of threads "
9536 "of all processes is %s.\n"), value);
9537 }
9538
9539 /* Implementation of `siginfo' variable. */
9540
9541 static const struct internalvar_funcs siginfo_funcs =
9542 {
9543 siginfo_make_value,
9544 nullptr,
9545 };
9546
9547 /* Callback for infrun's target events source. This is marked when a
9548 thread has a pending status to process. */
9549
9550 static void
9551 infrun_async_inferior_event_handler (gdb_client_data data)
9552 {
9553 clear_async_event_handler (infrun_async_inferior_event_token);
9554 inferior_event_handler (INF_REG_EVENT);
9555 }
9556
9557 #if GDB_SELF_TEST
9558 namespace selftests
9559 {
9560
9561 /* Verify that when two threads with the same ptid exist (from two different
9562 targets) and one of them changes ptid, we only update inferior_ptid if
9563 it is appropriate. */
9564
9565 static void
9566 infrun_thread_ptid_changed ()
9567 {
9568 gdbarch *arch = current_inferior ()->gdbarch;
9569
9570 /* The thread which inferior_ptid represents changes ptid. */
9571 {
9572 scoped_restore_current_pspace_and_thread restore;
9573
9574 scoped_mock_context<test_target_ops> target1 (arch);
9575 scoped_mock_context<test_target_ops> target2 (arch);
9576
9577 ptid_t old_ptid (111, 222);
9578 ptid_t new_ptid (111, 333);
9579
9580 target1.mock_inferior.pid = old_ptid.pid ();
9581 target1.mock_thread.ptid = old_ptid;
9582 target1.mock_inferior.ptid_thread_map.clear ();
9583 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9584
9585 target2.mock_inferior.pid = old_ptid.pid ();
9586 target2.mock_thread.ptid = old_ptid;
9587 target2.mock_inferior.ptid_thread_map.clear ();
9588 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
9589
9590 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9591 set_current_inferior (&target1.mock_inferior);
9592
9593 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9594
9595 gdb_assert (inferior_ptid == new_ptid);
9596 }
9597
9598 /* A thread with the same ptid as inferior_ptid, but from another target,
9599 changes ptid. */
9600 {
9601 scoped_restore_current_pspace_and_thread restore;
9602
9603 scoped_mock_context<test_target_ops> target1 (arch);
9604 scoped_mock_context<test_target_ops> target2 (arch);
9605
9606 ptid_t old_ptid (111, 222);
9607 ptid_t new_ptid (111, 333);
9608
9609 target1.mock_inferior.pid = old_ptid.pid ();
9610 target1.mock_thread.ptid = old_ptid;
9611 target1.mock_inferior.ptid_thread_map.clear ();
9612 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9613
9614 target2.mock_inferior.pid = old_ptid.pid ();
9615 target2.mock_thread.ptid = old_ptid;
9616 target2.mock_inferior.ptid_thread_map.clear ();
9617 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
9618
9619 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9620 set_current_inferior (&target2.mock_inferior);
9621
9622 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9623
9624 gdb_assert (inferior_ptid == old_ptid);
9625 }
9626 }
9627
9628 } /* namespace selftests */
9629
9630 #endif /* GDB_SELF_TEST */
9631
9632 void _initialize_infrun ();
9633 void
9634 _initialize_infrun ()
9635 {
9636 struct cmd_list_element *c;
9637
9638 /* Register extra event sources in the event loop. */
9639 infrun_async_inferior_event_token
9640 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
9641 "infrun");
9642
9643 cmd_list_element *info_signals_cmd
9644 = add_info ("signals", info_signals_command, _("\
9645 What debugger does when program gets various signals.\n\
9646 Specify a signal as argument to print info on that signal only."));
9647 add_info_alias ("handle", info_signals_cmd, 0);
9648
9649 c = add_com ("handle", class_run, handle_command, _("\
9650 Specify how to handle signals.\n\
9651 Usage: handle SIGNAL [ACTIONS]\n\
9652 Args are signals and actions to apply to those signals.\n\
9653 If no actions are specified, the current settings for the specified signals\n\
9654 will be displayed instead.\n\
9655 \n\
9656 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9657 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9658 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9659 The special arg \"all\" is recognized to mean all signals except those\n\
9660 used by the debugger, typically SIGTRAP and SIGINT.\n\
9661 \n\
9662 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9663 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9664 Stop means reenter debugger if this signal happens (implies print).\n\
9665 Print means print a message if this signal happens.\n\
9666 Pass means let program see this signal; otherwise program doesn't know.\n\
9667 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9668 Pass and Stop may be combined.\n\
9669 \n\
9670 Multiple signals may be specified. Signal numbers and signal names\n\
9671 may be interspersed with actions, with the actions being performed for\n\
9672 all signals cumulatively specified."));
9673 set_cmd_completer (c, handle_completer);
9674
9675 stop_command = add_cmd ("stop", class_obscure,
9676 not_just_help_class_command, _("\
9677 There is no `stop' command, but you can set a hook on `stop'.\n\
9678 This allows you to set a list of commands to be run each time execution\n\
9679 of the program stops."), &cmdlist);
9680
9681 add_setshow_boolean_cmd
9682 ("infrun", class_maintenance, &debug_infrun,
9683 _("Set inferior debugging."),
9684 _("Show inferior debugging."),
9685 _("When non-zero, inferior specific debugging is enabled."),
9686 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
9687
9688 add_setshow_boolean_cmd ("non-stop", no_class,
9689 &non_stop_1, _("\
9690 Set whether gdb controls the inferior in non-stop mode."), _("\
9691 Show whether gdb controls the inferior in non-stop mode."), _("\
9692 When debugging a multi-threaded program and this setting is\n\
9693 off (the default, also called all-stop mode), when one thread stops\n\
9694 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9695 all other threads in the program while you interact with the thread of\n\
9696 interest. When you continue or step a thread, you can allow the other\n\
9697 threads to run, or have them remain stopped, but while you inspect any\n\
9698 thread's state, all threads stop.\n\
9699 \n\
9700 In non-stop mode, when one thread stops, other threads can continue\n\
9701 to run freely. You'll be able to step each thread independently,\n\
9702 leave it stopped or free to run as needed."),
9703 set_non_stop,
9704 show_non_stop,
9705 &setlist,
9706 &showlist);
9707
9708 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
9709 {
9710 signal_stop[i] = 1;
9711 signal_print[i] = 1;
9712 signal_program[i] = 1;
9713 signal_catch[i] = 0;
9714 }
9715
9716 /* Signals caused by debugger's own actions should not be given to
9717 the program afterwards.
9718
9719 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9720 explicitly specifies that it should be delivered to the target
9721 program. Typically, that would occur when a user is debugging a
9722 target monitor on a simulator: the target monitor sets a
9723 breakpoint; the simulator encounters this breakpoint and halts
9724 the simulation handing control to GDB; GDB, noting that the stop
9725 address doesn't map to any known breakpoint, returns control back
9726 to the simulator; the simulator then delivers the hardware
9727 equivalent of a GDB_SIGNAL_TRAP to the program being
9728 debugged. */
9729 signal_program[GDB_SIGNAL_TRAP] = 0;
9730 signal_program[GDB_SIGNAL_INT] = 0;
9731
9732 /* Signals that are not errors should not normally enter the debugger. */
9733 signal_stop[GDB_SIGNAL_ALRM] = 0;
9734 signal_print[GDB_SIGNAL_ALRM] = 0;
9735 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9736 signal_print[GDB_SIGNAL_VTALRM] = 0;
9737 signal_stop[GDB_SIGNAL_PROF] = 0;
9738 signal_print[GDB_SIGNAL_PROF] = 0;
9739 signal_stop[GDB_SIGNAL_CHLD] = 0;
9740 signal_print[GDB_SIGNAL_CHLD] = 0;
9741 signal_stop[GDB_SIGNAL_IO] = 0;
9742 signal_print[GDB_SIGNAL_IO] = 0;
9743 signal_stop[GDB_SIGNAL_POLL] = 0;
9744 signal_print[GDB_SIGNAL_POLL] = 0;
9745 signal_stop[GDB_SIGNAL_URG] = 0;
9746 signal_print[GDB_SIGNAL_URG] = 0;
9747 signal_stop[GDB_SIGNAL_WINCH] = 0;
9748 signal_print[GDB_SIGNAL_WINCH] = 0;
9749 signal_stop[GDB_SIGNAL_PRIO] = 0;
9750 signal_print[GDB_SIGNAL_PRIO] = 0;
9751
9752 /* These signals are used internally by user-level thread
9753 implementations. (See signal(5) on Solaris.) Like the above
9754 signals, a healthy program receives and handles them as part of
9755 its normal operation. */
9756 signal_stop[GDB_SIGNAL_LWP] = 0;
9757 signal_print[GDB_SIGNAL_LWP] = 0;
9758 signal_stop[GDB_SIGNAL_WAITING] = 0;
9759 signal_print[GDB_SIGNAL_WAITING] = 0;
9760 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9761 signal_print[GDB_SIGNAL_CANCEL] = 0;
9762 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9763 signal_print[GDB_SIGNAL_LIBRT] = 0;
9764
9765 /* Update cached state. */
9766 signal_cache_update (-1);
9767
9768 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9769 &stop_on_solib_events, _("\
9770 Set stopping for shared library events."), _("\
9771 Show stopping for shared library events."), _("\
9772 If nonzero, gdb will give control to the user when the dynamic linker\n\
9773 notifies gdb of shared library events. The most common event of interest\n\
9774 to the user would be loading/unloading of a new library."),
9775 set_stop_on_solib_events,
9776 show_stop_on_solib_events,
9777 &setlist, &showlist);
9778
9779 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9780 follow_fork_mode_kind_names,
9781 &follow_fork_mode_string, _("\
9782 Set debugger response to a program call of fork or vfork."), _("\
9783 Show debugger response to a program call of fork or vfork."), _("\
9784 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9785 parent - the original process is debugged after a fork\n\
9786 child - the new process is debugged after a fork\n\
9787 The unfollowed process will continue to run.\n\
9788 By default, the debugger will follow the parent process."),
9789 nullptr,
9790 show_follow_fork_mode_string,
9791 &setlist, &showlist);
9792
9793 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9794 follow_exec_mode_names,
9795 &follow_exec_mode_string, _("\
9796 Set debugger response to a program call of exec."), _("\
9797 Show debugger response to a program call of exec."), _("\
9798 An exec call replaces the program image of a process.\n\
9799 \n\
9800 follow-exec-mode can be:\n\
9801 \n\
9802 new - the debugger creates a new inferior and rebinds the process\n\
9803 to this new inferior. The program the process was running before\n\
9804 the exec call can be restarted afterwards by restarting the original\n\
9805 inferior.\n\
9806 \n\
9807 same - the debugger keeps the process bound to the same inferior.\n\
9808 The new executable image replaces the previous executable loaded in\n\
9809 the inferior. Restarting the inferior after the exec call restarts\n\
9810 the executable the process was running after the exec call.\n\
9811 \n\
9812 By default, the debugger will use the same inferior."),
9813 nullptr,
9814 show_follow_exec_mode_string,
9815 &setlist, &showlist);
9816
9817 add_setshow_enum_cmd ("scheduler-locking", class_run,
9818 scheduler_enums, &scheduler_mode, _("\
9819 Set mode for locking scheduler during execution."), _("\
9820 Show mode for locking scheduler during execution."), _("\
9821 off == no locking (threads may preempt at any time)\n\
9822 on == full locking (no thread except the current thread may run)\n\
9823 This applies to both normal execution and replay mode.\n\
9824 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9825 In this mode, other threads may run during other commands.\n\
9826 This applies to both normal execution and replay mode.\n\
9827 replay == scheduler locked in replay mode and unlocked during normal execution."),
9828 set_schedlock_func, /* traps on target vector */
9829 show_scheduler_mode,
9830 &setlist, &showlist);
9831
9832 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9833 Set mode for resuming threads of all processes."), _("\
9834 Show mode for resuming threads of all processes."), _("\
9835 When on, execution commands (such as 'continue' or 'next') resume all\n\
9836 threads of all processes. When off (which is the default), execution\n\
9837 commands only resume the threads of the current process. The set of\n\
9838 threads that are resumed is further refined by the scheduler-locking\n\
9839 mode (see help set scheduler-locking)."),
9840 nullptr,
9841 show_schedule_multiple,
9842 &setlist, &showlist);
9843
9844 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9845 Set mode of the step operation."), _("\
9846 Show mode of the step operation."), _("\
9847 When set, doing a step over a function without debug line information\n\
9848 will stop at the first instruction of that function. Otherwise, the\n\
9849 function is skipped and the step command stops at a different source line."),
9850 nullptr,
9851 show_step_stop_if_no_debug,
9852 &setlist, &showlist);
9853
9854 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9855 &can_use_displaced_stepping, _("\
9856 Set debugger's willingness to use displaced stepping."), _("\
9857 Show debugger's willingness to use displaced stepping."), _("\
9858 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9859 supported by the target architecture. If off, gdb will not use displaced\n\
9860 stepping to step over breakpoints, even if such is supported by the target\n\
9861 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9862 if the target architecture supports it and non-stop mode is active, but will not\n\
9863 use it in all-stop mode (see help set non-stop)."),
9864 nullptr,
9865 show_can_use_displaced_stepping,
9866 &setlist, &showlist);
9867
9868 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9869 &exec_direction, _("Set direction of execution.\n\
9870 Options are 'forward' or 'reverse'."),
9871 _("Show direction of execution (forward/reverse)."),
9872 _("Tells gdb whether to execute forward or backward."),
9873 set_exec_direction_func, show_exec_direction_func,
9874 &setlist, &showlist);
9875
9876 /* Set/show detach-on-fork: user-settable mode. */
9877
9878 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9879 Set whether gdb will detach the child of a fork."), _("\
9880 Show whether gdb will detach the child of a fork."), _("\
9881 Tells gdb whether to detach the child of a fork."),
9882 nullptr, nullptr, &setlist, &showlist);
9883
9884 /* Set/show disable address space randomization mode. */
9885
9886 add_setshow_boolean_cmd ("disable-randomization", class_support,
9887 &disable_randomization, _("\
9888 Set disabling of debuggee's virtual address space randomization."), _("\
9889 Show disabling of debuggee's virtual address space randomization."), _("\
9890 When this mode is on (which is the default), randomization of the virtual\n\
9891 address space is disabled. Standalone programs run with the randomization\n\
9892 enabled by default on some platforms."),
9893 &set_disable_randomization,
9894 &show_disable_randomization,
9895 &setlist, &showlist);
9896
9897 /* ptid initializations */
9898 inferior_ptid = null_ptid;
9899 target_last_wait_ptid = minus_one_ptid;
9900
9901 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
9902 "infrun");
9903 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
9904 "infrun");
9905 gdb::observers::thread_exit.attach (infrun_thread_thread_exit, "infrun");
9906 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
9907 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
9908
9909 /* Explicitly create without lookup, since that tries to create a
9910 value with a void typed value, and when we get here, gdbarch
9911 isn't initialized yet. At this point, we're quite sure there
9912 isn't another convenience variable of the same name. */
9913 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
9914
9915 add_setshow_boolean_cmd ("observer", no_class,
9916 &observer_mode_1, _("\
9917 Set whether gdb controls the inferior in observer mode."), _("\
9918 Show whether gdb controls the inferior in observer mode."), _("\
9919 In observer mode, GDB can get data from the inferior, but not\n\
9920 affect its execution. Registers and memory may not be changed,\n\
9921 breakpoints may not be set, and the program cannot be interrupted\n\
9922 or signalled."),
9923 set_observer_mode,
9924 show_observer_mode,
9925 &setlist,
9926 &showlist);
9927
9928 #if GDB_SELF_TEST
9929 selftests::register_test ("infrun_thread_ptid_changed",
9930 selftests::infrun_thread_ptid_changed);
9931 #endif
9932 }