1 /* Target-struct-independent code to start (run) and stop an inferior
4 Copyright (C) 1986-2022 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "displaced-stepping.h"
28 #include "breakpoint.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
40 #include "observable.h"
45 #include "mi/mi-common.h"
46 #include "event-top.h"
48 #include "record-full.h"
49 #include "inline-frame.h"
51 #include "tracepoint.h"
55 #include "completer.h"
56 #include "target-descriptions.h"
57 #include "target-dcache.h"
60 #include "gdbsupport/event-loop.h"
61 #include "thread-fsm.h"
62 #include "gdbsupport/enum-flags.h"
63 #include "progspace-and-thread.h"
64 #include "gdbsupport/gdb_optional.h"
65 #include "arch-utils.h"
66 #include "gdbsupport/scope-exit.h"
67 #include "gdbsupport/forward-scope-exit.h"
68 #include "gdbsupport/gdb_select.h"
69 #include <unordered_map>
70 #include "async-event.h"
71 #include "gdbsupport/selftest.h"
72 #include "scoped-mock-context.h"
73 #include "test-target.h"
74 #include "gdbsupport/common-debug.h"
75 #include "gdbsupport/buildargv.h"
77 /* Prototypes for local functions */
79 static void sig_print_info (enum gdb_signal
);
81 static void sig_print_header (void);
83 static void follow_inferior_reset_breakpoints (void);
85 static bool currently_stepping (struct thread_info
*tp
);
87 static void insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr
);
89 static void insert_step_resume_breakpoint_at_caller (frame_info_ptr
);
91 static void insert_longjmp_resume_breakpoint (struct gdbarch
*, CORE_ADDR
);
93 static bool maybe_software_singlestep (struct gdbarch
*gdbarch
);
95 static void resume (gdb_signal sig
);
97 static void wait_for_inferior (inferior
*inf
);
99 static void restart_threads (struct thread_info
*event_thread
,
100 inferior
*inf
= nullptr);
102 static bool start_step_over (void);
104 static bool step_over_info_valid_p (void);
106 /* Asynchronous signal handler registered as event loop source for
107 when we have pending events ready to be passed to the core. */
108 static struct async_event_handler
*infrun_async_inferior_event_token
;
110 /* Stores whether infrun_async was previously enabled or disabled.
111 Starts off as -1, indicating "never enabled/disabled". */
112 static int infrun_is_async
= -1;
117 infrun_async (int enable
)
119 if (infrun_is_async
!= enable
)
121 infrun_is_async
= enable
;
123 infrun_debug_printf ("enable=%d", enable
);
126 mark_async_event_handler (infrun_async_inferior_event_token
);
128 clear_async_event_handler (infrun_async_inferior_event_token
);
135 mark_infrun_async_event_handler (void)
137 mark_async_event_handler (infrun_async_inferior_event_token
);
140 /* When set, stop the 'step' command if we enter a function which has
141 no line number information. The normal behavior is that we step
142 over such function. */
143 bool step_stop_if_no_debug
= false;
145 show_step_stop_if_no_debug (struct ui_file
*file
, int from_tty
,
146 struct cmd_list_element
*c
, const char *value
)
148 gdb_printf (file
, _("Mode of the step operation is %s.\n"), value
);
151 /* proceed and normal_stop use this to notify the user when the
152 inferior stopped in a different thread than it had been running
155 static ptid_t previous_inferior_ptid
;
157 /* If set (default for legacy reasons), when following a fork, GDB
158 will detach from one of the fork branches, child or parent.
159 Exactly which branch is detached depends on 'set follow-fork-mode'
162 static bool detach_fork
= true;
164 bool debug_infrun
= false;
166 show_debug_infrun (struct ui_file
*file
, int from_tty
,
167 struct cmd_list_element
*c
, const char *value
)
169 gdb_printf (file
, _("Inferior debugging is %s.\n"), value
);
172 /* Support for disabling address space randomization. */
174 bool disable_randomization
= true;
177 show_disable_randomization (struct ui_file
*file
, int from_tty
,
178 struct cmd_list_element
*c
, const char *value
)
180 if (target_supports_disable_randomization ())
182 _("Disabling randomization of debuggee's "
183 "virtual address space is %s.\n"),
186 gdb_puts (_("Disabling randomization of debuggee's "
187 "virtual address space is unsupported on\n"
188 "this platform.\n"), file
);
192 set_disable_randomization (const char *args
, int from_tty
,
193 struct cmd_list_element
*c
)
195 if (!target_supports_disable_randomization ())
196 error (_("Disabling randomization of debuggee's "
197 "virtual address space is unsupported on\n"
201 /* User interface for non-stop mode. */
203 bool non_stop
= false;
204 static bool non_stop_1
= false;
207 set_non_stop (const char *args
, int from_tty
,
208 struct cmd_list_element
*c
)
210 if (target_has_execution ())
212 non_stop_1
= non_stop
;
213 error (_("Cannot change this setting while the inferior is running."));
216 non_stop
= non_stop_1
;
220 show_non_stop (struct ui_file
*file
, int from_tty
,
221 struct cmd_list_element
*c
, const char *value
)
224 _("Controlling the inferior in non-stop mode is %s.\n"),
228 /* "Observer mode" is somewhat like a more extreme version of
229 non-stop, in which all GDB operations that might affect the
230 target's execution have been disabled. */
232 static bool observer_mode
= false;
233 static bool observer_mode_1
= false;
236 set_observer_mode (const char *args
, int from_tty
,
237 struct cmd_list_element
*c
)
239 if (target_has_execution ())
241 observer_mode_1
= observer_mode
;
242 error (_("Cannot change this setting while the inferior is running."));
245 observer_mode
= observer_mode_1
;
247 may_write_registers
= !observer_mode
;
248 may_write_memory
= !observer_mode
;
249 may_insert_breakpoints
= !observer_mode
;
250 may_insert_tracepoints
= !observer_mode
;
251 /* We can insert fast tracepoints in or out of observer mode,
252 but enable them if we're going into this mode. */
254 may_insert_fast_tracepoints
= true;
255 may_stop
= !observer_mode
;
256 update_target_permissions ();
258 /* Going *into* observer mode we must force non-stop, then
259 going out we leave it that way. */
262 pagination_enabled
= false;
263 non_stop
= non_stop_1
= true;
267 gdb_printf (_("Observer mode is now %s.\n"),
268 (observer_mode
? "on" : "off"));
272 show_observer_mode (struct ui_file
*file
, int from_tty
,
273 struct cmd_list_element
*c
, const char *value
)
275 gdb_printf (file
, _("Observer mode is %s.\n"), value
);
278 /* This updates the value of observer mode based on changes in
279 permissions. Note that we are deliberately ignoring the values of
280 may-write-registers and may-write-memory, since the user may have
281 reason to enable these during a session, for instance to turn on a
282 debugging-related global. */
285 update_observer_mode (void)
287 bool newval
= (!may_insert_breakpoints
288 && !may_insert_tracepoints
289 && may_insert_fast_tracepoints
293 /* Let the user know if things change. */
294 if (newval
!= observer_mode
)
295 gdb_printf (_("Observer mode is now %s.\n"),
296 (newval
? "on" : "off"));
298 observer_mode
= observer_mode_1
= newval
;
301 /* Tables of how to react to signals; the user sets them. */
303 static unsigned char signal_stop
[GDB_SIGNAL_LAST
];
304 static unsigned char signal_print
[GDB_SIGNAL_LAST
];
305 static unsigned char signal_program
[GDB_SIGNAL_LAST
];
307 /* Table of signals that are registered with "catch signal". A
308 non-zero entry indicates that the signal is caught by some "catch
310 static unsigned char signal_catch
[GDB_SIGNAL_LAST
];
312 /* Table of signals that the target may silently handle.
313 This is automatically determined from the flags above,
314 and simply cached here. */
315 static unsigned char signal_pass
[GDB_SIGNAL_LAST
];
317 #define SET_SIGS(nsigs,sigs,flags) \
319 int signum = (nsigs); \
320 while (signum-- > 0) \
321 if ((sigs)[signum]) \
322 (flags)[signum] = 1; \
325 #define UNSET_SIGS(nsigs,sigs,flags) \
327 int signum = (nsigs); \
328 while (signum-- > 0) \
329 if ((sigs)[signum]) \
330 (flags)[signum] = 0; \
333 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
334 this function is to avoid exporting `signal_program'. */
337 update_signals_program_target (void)
339 target_program_signals (signal_program
);
342 /* Value to pass to target_resume() to cause all threads to resume. */
344 #define RESUME_ALL minus_one_ptid
346 /* Command list pointer for the "stop" placeholder. */
348 static struct cmd_list_element
*stop_command
;
350 /* Nonzero if we want to give control to the user when we're notified
351 of shared library events by the dynamic linker. */
352 int stop_on_solib_events
;
354 /* Enable or disable optional shared library event breakpoints
355 as appropriate when the above flag is changed. */
358 set_stop_on_solib_events (const char *args
,
359 int from_tty
, struct cmd_list_element
*c
)
361 update_solib_breakpoints ();
365 show_stop_on_solib_events (struct ui_file
*file
, int from_tty
,
366 struct cmd_list_element
*c
, const char *value
)
368 gdb_printf (file
, _("Stopping for shared library events is %s.\n"),
372 /* True after stop if current stack frame should be printed. */
374 static bool stop_print_frame
;
376 /* This is a cached copy of the target/ptid/waitstatus of the last
377 event returned by target_wait().
378 This information is returned by get_last_target_status(). */
379 static process_stratum_target
*target_last_proc_target
;
380 static ptid_t target_last_wait_ptid
;
381 static struct target_waitstatus target_last_waitstatus
;
383 void init_thread_stepping_state (struct thread_info
*tss
);
385 static const char follow_fork_mode_child
[] = "child";
386 static const char follow_fork_mode_parent
[] = "parent";
388 static const char *const follow_fork_mode_kind_names
[] = {
389 follow_fork_mode_child
,
390 follow_fork_mode_parent
,
394 static const char *follow_fork_mode_string
= follow_fork_mode_parent
;
396 show_follow_fork_mode_string (struct ui_file
*file
, int from_tty
,
397 struct cmd_list_element
*c
, const char *value
)
400 _("Debugger response to a program "
401 "call of fork or vfork is \"%s\".\n"),
406 /* Handle changes to the inferior list based on the type of fork,
407 which process is being followed, and whether the other process
408 should be detached. On entry inferior_ptid must be the ptid of
409 the fork parent. At return inferior_ptid is the ptid of the
410 followed inferior. */
413 follow_fork_inferior (bool follow_child
, bool detach_fork
)
415 target_waitkind fork_kind
= inferior_thread ()->pending_follow
.kind ();
416 gdb_assert (fork_kind
== TARGET_WAITKIND_FORKED
417 || fork_kind
== TARGET_WAITKIND_VFORKED
);
418 bool has_vforked
= fork_kind
== TARGET_WAITKIND_VFORKED
;
419 ptid_t parent_ptid
= inferior_ptid
;
420 ptid_t child_ptid
= inferior_thread ()->pending_follow
.child_ptid ();
423 && !non_stop
/* Non-stop always resumes both branches. */
424 && current_ui
->prompt_state
== PROMPT_BLOCKED
425 && !(follow_child
|| detach_fork
|| sched_multi
))
427 /* The parent stays blocked inside the vfork syscall until the
428 child execs or exits. If we don't let the child run, then
429 the parent stays blocked. If we're telling the parent to run
430 in the foreground, the user will not be able to ctrl-c to get
431 back the terminal, effectively hanging the debug session. */
432 gdb_printf (gdb_stderr
, _("\
433 Can not resume the parent process over vfork in the foreground while\n\
434 holding the child stopped. Try \"set detach-on-fork\" or \
435 \"set schedule-multiple\".\n"));
439 inferior
*parent_inf
= current_inferior ();
440 inferior
*child_inf
= nullptr;
442 gdb_assert (parent_inf
->thread_waiting_for_vfork_done
== nullptr);
446 /* Detach new forked process? */
449 /* Before detaching from the child, remove all breakpoints
450 from it. If we forked, then this has already been taken
451 care of by infrun.c. If we vforked however, any
452 breakpoint inserted in the parent is visible in the
453 child, even those added while stopped in a vfork
454 catchpoint. This will remove the breakpoints from the
455 parent also, but they'll be reinserted below. */
458 /* Keep breakpoints list in sync. */
459 remove_breakpoints_inf (current_inferior ());
462 if (print_inferior_events
)
464 /* Ensure that we have a process ptid. */
465 ptid_t process_ptid
= ptid_t (child_ptid
.pid ());
467 target_terminal::ours_for_output ();
468 gdb_printf (_("[Detaching after %s from child %s]\n"),
469 has_vforked
? "vfork" : "fork",
470 target_pid_to_str (process_ptid
).c_str ());
475 /* Add process to GDB's tables. */
476 child_inf
= add_inferior (child_ptid
.pid ());
478 child_inf
->attach_flag
= parent_inf
->attach_flag
;
479 copy_terminal_info (child_inf
, parent_inf
);
480 child_inf
->gdbarch
= parent_inf
->gdbarch
;
481 copy_inferior_target_desc_info (child_inf
, parent_inf
);
483 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
485 /* If this is a vfork child, then the address-space is
486 shared with the parent. */
489 child_inf
->pspace
= parent_inf
->pspace
;
490 child_inf
->aspace
= parent_inf
->aspace
;
492 exec_on_vfork (child_inf
);
494 /* The parent will be frozen until the child is done
495 with the shared region. Keep track of the
497 child_inf
->vfork_parent
= parent_inf
;
498 child_inf
->pending_detach
= 0;
499 parent_inf
->vfork_child
= child_inf
;
500 parent_inf
->pending_detach
= 0;
504 child_inf
->aspace
= new address_space ();
505 child_inf
->pspace
= new program_space (child_inf
->aspace
);
506 child_inf
->removable
= 1;
507 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
513 /* If we detached from the child, then we have to be careful
514 to not insert breakpoints in the parent until the child
515 is done with the shared memory region. However, if we're
516 staying attached to the child, then we can and should
517 insert breakpoints, so that we can debug it. A
518 subsequent child exec or exit is enough to know when does
519 the child stops using the parent's address space. */
520 parent_inf
->thread_waiting_for_vfork_done
521 = detach_fork
? inferior_thread () : nullptr;
522 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
527 /* Follow the child. */
529 if (print_inferior_events
)
531 std::string parent_pid
= target_pid_to_str (parent_ptid
);
532 std::string child_pid
= target_pid_to_str (child_ptid
);
534 target_terminal::ours_for_output ();
535 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
537 has_vforked
? "vfork" : "fork",
541 /* Add the new inferior first, so that the target_detach below
542 doesn't unpush the target. */
544 child_inf
= add_inferior (child_ptid
.pid ());
546 child_inf
->attach_flag
= parent_inf
->attach_flag
;
547 copy_terminal_info (child_inf
, parent_inf
);
548 child_inf
->gdbarch
= parent_inf
->gdbarch
;
549 copy_inferior_target_desc_info (child_inf
, parent_inf
);
553 /* If this is a vfork child, then the address-space is shared
555 child_inf
->aspace
= parent_inf
->aspace
;
556 child_inf
->pspace
= parent_inf
->pspace
;
558 exec_on_vfork (child_inf
);
560 else if (detach_fork
)
562 /* We follow the child and detach from the parent: move the parent's
563 program space to the child. This simplifies some things, like
564 doing "next" over fork() and landing on the expected line in the
565 child (note, that is broken with "set detach-on-fork off").
567 Before assigning brand new spaces for the parent, remove
568 breakpoints from it: because the new pspace won't match
569 currently inserted locations, the normal detach procedure
570 wouldn't remove them, and we would leave them inserted when
572 remove_breakpoints_inf (parent_inf
);
574 child_inf
->aspace
= parent_inf
->aspace
;
575 child_inf
->pspace
= parent_inf
->pspace
;
576 parent_inf
->aspace
= new address_space ();
577 parent_inf
->pspace
= new program_space (parent_inf
->aspace
);
578 clone_program_space (parent_inf
->pspace
, child_inf
->pspace
);
580 /* The parent inferior is still the current one, so keep things
582 set_current_program_space (parent_inf
->pspace
);
586 child_inf
->aspace
= new address_space ();
587 child_inf
->pspace
= new program_space (child_inf
->aspace
);
588 child_inf
->removable
= 1;
589 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
590 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
594 gdb_assert (current_inferior () == parent_inf
);
596 /* If we are setting up an inferior for the child, target_follow_fork is
597 responsible for pushing the appropriate targets on the new inferior's
598 target stack and adding the initial thread (with ptid CHILD_PTID).
600 If we are not setting up an inferior for the child (because following
601 the parent and detach_fork is true), it is responsible for detaching
603 target_follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
606 /* target_follow_fork must leave the parent as the current inferior. If we
607 want to follow the child, we make it the current one below. */
608 gdb_assert (current_inferior () == parent_inf
);
610 /* If there is a child inferior, target_follow_fork must have created a thread
612 if (child_inf
!= nullptr)
613 gdb_assert (!child_inf
->thread_list
.empty ());
615 /* Clear the parent thread's pending follow field. Do this before calling
616 target_detach, so that the target can differentiate the two following
619 - We continue past a fork with "follow-fork-mode == child" &&
620 "detach-on-fork on", and therefore detach the parent. In that
621 case the target should not detach the fork child.
622 - We run to a fork catchpoint and the user types "detach". In that
623 case, the target should detach the fork child in addition to the
626 The former case will have pending_follow cleared, the later will have
627 pending_follow set. */
628 thread_info
*parent_thread
= find_thread_ptid (parent_inf
, parent_ptid
);
629 gdb_assert (parent_thread
!= nullptr);
630 parent_thread
->pending_follow
.set_spurious ();
632 /* Detach the parent if needed. */
635 /* If we're vforking, we want to hold on to the parent until
636 the child exits or execs. At child exec or exit time we
637 can remove the old breakpoints from the parent and detach
638 or resume debugging it. Otherwise, detach the parent now;
639 we'll want to reuse it's program/address spaces, but we
640 can't set them to the child before removing breakpoints
641 from the parent, otherwise, the breakpoints module could
642 decide to remove breakpoints from the wrong process (since
643 they'd be assigned to the same address space). */
647 gdb_assert (child_inf
->vfork_parent
== nullptr);
648 gdb_assert (parent_inf
->vfork_child
== nullptr);
649 child_inf
->vfork_parent
= parent_inf
;
650 child_inf
->pending_detach
= 0;
651 parent_inf
->vfork_child
= child_inf
;
652 parent_inf
->pending_detach
= detach_fork
;
654 else if (detach_fork
)
656 if (print_inferior_events
)
658 /* Ensure that we have a process ptid. */
659 ptid_t process_ptid
= ptid_t (parent_ptid
.pid ());
661 target_terminal::ours_for_output ();
662 gdb_printf (_("[Detaching after fork from "
664 target_pid_to_str (process_ptid
).c_str ());
667 target_detach (parent_inf
, 0);
671 /* If we ended up creating a new inferior, call post_create_inferior to inform
672 the various subcomponents. */
673 if (child_inf
!= nullptr)
675 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
676 (do not restore the parent as the current inferior). */
677 gdb::optional
<scoped_restore_current_thread
> maybe_restore
;
680 maybe_restore
.emplace ();
682 switch_to_thread (*child_inf
->threads ().begin ());
683 post_create_inferior (0);
689 /* Tell the target to follow the fork we're stopped at. Returns true
690 if the inferior should be resumed; false, if the target for some
691 reason decided it's best not to resume. */
696 bool follow_child
= (follow_fork_mode_string
== follow_fork_mode_child
);
697 bool should_resume
= true;
699 /* Copy user stepping state to the new inferior thread. FIXME: the
700 followed fork child thread should have a copy of most of the
701 parent thread structure's run control related fields, not just these.
702 Initialized to avoid "may be used uninitialized" warnings from gcc. */
703 struct breakpoint
*step_resume_breakpoint
= nullptr;
704 struct breakpoint
*exception_resume_breakpoint
= nullptr;
705 CORE_ADDR step_range_start
= 0;
706 CORE_ADDR step_range_end
= 0;
707 int current_line
= 0;
708 symtab
*current_symtab
= nullptr;
709 struct frame_id step_frame_id
= { 0 };
713 process_stratum_target
*wait_target
;
715 struct target_waitstatus wait_status
;
717 /* Get the last target status returned by target_wait(). */
718 get_last_target_status (&wait_target
, &wait_ptid
, &wait_status
);
720 /* If not stopped at a fork event, then there's nothing else to
722 if (wait_status
.kind () != TARGET_WAITKIND_FORKED
723 && wait_status
.kind () != TARGET_WAITKIND_VFORKED
)
726 /* Check if we switched over from WAIT_PTID, since the event was
728 if (wait_ptid
!= minus_one_ptid
729 && (current_inferior ()->process_target () != wait_target
730 || inferior_ptid
!= wait_ptid
))
732 /* We did. Switch back to WAIT_PTID thread, to tell the
733 target to follow it (in either direction). We'll
734 afterwards refuse to resume, and inform the user what
736 thread_info
*wait_thread
= find_thread_ptid (wait_target
, wait_ptid
);
737 switch_to_thread (wait_thread
);
738 should_resume
= false;
742 thread_info
*tp
= inferior_thread ();
744 /* If there were any forks/vforks that were caught and are now to be
745 followed, then do so now. */
746 switch (tp
->pending_follow
.kind ())
748 case TARGET_WAITKIND_FORKED
:
749 case TARGET_WAITKIND_VFORKED
:
751 ptid_t parent
, child
;
752 std::unique_ptr
<struct thread_fsm
> thread_fsm
;
754 /* If the user did a next/step, etc, over a fork call,
755 preserve the stepping state in the fork child. */
756 if (follow_child
&& should_resume
)
758 step_resume_breakpoint
= clone_momentary_breakpoint
759 (tp
->control
.step_resume_breakpoint
);
760 step_range_start
= tp
->control
.step_range_start
;
761 step_range_end
= tp
->control
.step_range_end
;
762 current_line
= tp
->current_line
;
763 current_symtab
= tp
->current_symtab
;
764 step_frame_id
= tp
->control
.step_frame_id
;
765 exception_resume_breakpoint
766 = clone_momentary_breakpoint (tp
->control
.exception_resume_breakpoint
);
767 thread_fsm
= tp
->release_thread_fsm ();
769 /* For now, delete the parent's sr breakpoint, otherwise,
770 parent/child sr breakpoints are considered duplicates,
771 and the child version will not be installed. Remove
772 this when the breakpoints module becomes aware of
773 inferiors and address spaces. */
774 delete_step_resume_breakpoint (tp
);
775 tp
->control
.step_range_start
= 0;
776 tp
->control
.step_range_end
= 0;
777 tp
->control
.step_frame_id
= null_frame_id
;
778 delete_exception_resume_breakpoint (tp
);
781 parent
= inferior_ptid
;
782 child
= tp
->pending_follow
.child_ptid ();
784 /* If handling a vfork, stop all the inferior's threads, they will be
785 restarted when the vfork shared region is complete. */
786 if (tp
->pending_follow
.kind () == TARGET_WAITKIND_VFORKED
787 && target_is_non_stop_p ())
788 stop_all_threads ("handling vfork", tp
->inf
);
790 process_stratum_target
*parent_targ
= tp
->inf
->process_target ();
791 /* Set up inferior(s) as specified by the caller, and tell the
792 target to do whatever is necessary to follow either parent
794 if (follow_fork_inferior (follow_child
, detach_fork
))
796 /* Target refused to follow, or there's some other reason
797 we shouldn't resume. */
802 /* This makes sure we don't try to apply the "Switched
803 over from WAIT_PID" logic above. */
804 nullify_last_target_wait_ptid ();
806 /* If we followed the child, switch to it... */
809 thread_info
*child_thr
= find_thread_ptid (parent_targ
, child
);
810 switch_to_thread (child_thr
);
812 /* ... and preserve the stepping state, in case the
813 user was stepping over the fork call. */
816 tp
= inferior_thread ();
817 tp
->control
.step_resume_breakpoint
818 = step_resume_breakpoint
;
819 tp
->control
.step_range_start
= step_range_start
;
820 tp
->control
.step_range_end
= step_range_end
;
821 tp
->current_line
= current_line
;
822 tp
->current_symtab
= current_symtab
;
823 tp
->control
.step_frame_id
= step_frame_id
;
824 tp
->control
.exception_resume_breakpoint
825 = exception_resume_breakpoint
;
826 tp
->set_thread_fsm (std::move (thread_fsm
));
830 /* If we get here, it was because we're trying to
831 resume from a fork catchpoint, but, the user
832 has switched threads away from the thread that
833 forked. In that case, the resume command
834 issued is most likely not applicable to the
835 child, so just warn, and refuse to resume. */
836 warning (_("Not resuming: switched threads "
837 "before following fork child."));
840 /* Reset breakpoints in the child as appropriate. */
841 follow_inferior_reset_breakpoints ();
846 case TARGET_WAITKIND_SPURIOUS
:
847 /* Nothing to follow. */
850 internal_error ("Unexpected pending_follow.kind %d\n",
851 tp
->pending_follow
.kind ());
855 return should_resume
;
859 follow_inferior_reset_breakpoints (void)
861 struct thread_info
*tp
= inferior_thread ();
863 /* Was there a step_resume breakpoint? (There was if the user
864 did a "next" at the fork() call.) If so, explicitly reset its
865 thread number. Cloned step_resume breakpoints are disabled on
866 creation, so enable it here now that it is associated with the
869 step_resumes are a form of bp that are made to be per-thread.
870 Since we created the step_resume bp when the parent process
871 was being debugged, and now are switching to the child process,
872 from the breakpoint package's viewpoint, that's a switch of
873 "threads". We must update the bp's notion of which thread
874 it is for, or it'll be ignored when it triggers. */
876 if (tp
->control
.step_resume_breakpoint
)
878 breakpoint_re_set_thread (tp
->control
.step_resume_breakpoint
);
879 tp
->control
.step_resume_breakpoint
->loc
->enabled
= 1;
882 /* Treat exception_resume breakpoints like step_resume breakpoints. */
883 if (tp
->control
.exception_resume_breakpoint
)
885 breakpoint_re_set_thread (tp
->control
.exception_resume_breakpoint
);
886 tp
->control
.exception_resume_breakpoint
->loc
->enabled
= 1;
889 /* Reinsert all breakpoints in the child. The user may have set
890 breakpoints after catching the fork, in which case those
891 were never set in the child, but only in the parent. This makes
892 sure the inserted breakpoints match the breakpoint list. */
894 breakpoint_re_set ();
895 insert_breakpoints ();
898 /* The child has exited or execed: resume THREAD, a thread of the parent,
899 if it was meant to be executing. */
902 proceed_after_vfork_done (thread_info
*thread
)
904 if (thread
->state
== THREAD_RUNNING
905 && !thread
->executing ()
906 && !thread
->stop_requested
907 && thread
->stop_signal () == GDB_SIGNAL_0
)
909 infrun_debug_printf ("resuming vfork parent thread %s",
910 thread
->ptid
.to_string ().c_str ());
912 switch_to_thread (thread
);
913 clear_proceed_status (0);
914 proceed ((CORE_ADDR
) -1, GDB_SIGNAL_DEFAULT
);
918 /* Called whenever we notice an exec or exit event, to handle
919 detaching or resuming a vfork parent. */
922 handle_vfork_child_exec_or_exit (int exec
)
924 struct inferior
*inf
= current_inferior ();
926 if (inf
->vfork_parent
)
928 inferior
*resume_parent
= nullptr;
930 /* This exec or exit marks the end of the shared memory region
931 between the parent and the child. Break the bonds. */
932 inferior
*vfork_parent
= inf
->vfork_parent
;
933 inf
->vfork_parent
->vfork_child
= nullptr;
934 inf
->vfork_parent
= nullptr;
936 /* If the user wanted to detach from the parent, now is the
938 if (vfork_parent
->pending_detach
)
940 struct program_space
*pspace
;
941 struct address_space
*aspace
;
943 /* follow-fork child, detach-on-fork on. */
945 vfork_parent
->pending_detach
= 0;
947 scoped_restore_current_pspace_and_thread restore_thread
;
949 /* We're letting loose of the parent. */
950 thread_info
*tp
= any_live_thread_of_inferior (vfork_parent
);
951 switch_to_thread (tp
);
953 /* We're about to detach from the parent, which implicitly
954 removes breakpoints from its address space. There's a
955 catch here: we want to reuse the spaces for the child,
956 but, parent/child are still sharing the pspace at this
957 point, although the exec in reality makes the kernel give
958 the child a fresh set of new pages. The problem here is
959 that the breakpoints module being unaware of this, would
960 likely chose the child process to write to the parent
961 address space. Swapping the child temporarily away from
962 the spaces has the desired effect. Yes, this is "sort
965 pspace
= inf
->pspace
;
966 aspace
= inf
->aspace
;
967 inf
->aspace
= nullptr;
968 inf
->pspace
= nullptr;
970 if (print_inferior_events
)
973 = target_pid_to_str (ptid_t (vfork_parent
->pid
));
975 target_terminal::ours_for_output ();
979 gdb_printf (_("[Detaching vfork parent %s "
980 "after child exec]\n"), pidstr
.c_str ());
984 gdb_printf (_("[Detaching vfork parent %s "
985 "after child exit]\n"), pidstr
.c_str ());
989 target_detach (vfork_parent
, 0);
992 inf
->pspace
= pspace
;
993 inf
->aspace
= aspace
;
997 /* We're staying attached to the parent, so, really give the
998 child a new address space. */
999 inf
->pspace
= new program_space (maybe_new_address_space ());
1000 inf
->aspace
= inf
->pspace
->aspace
;
1002 set_current_program_space (inf
->pspace
);
1004 resume_parent
= vfork_parent
;
1008 /* If this is a vfork child exiting, then the pspace and
1009 aspaces were shared with the parent. Since we're
1010 reporting the process exit, we'll be mourning all that is
1011 found in the address space, and switching to null_ptid,
1012 preparing to start a new inferior. But, since we don't
1013 want to clobber the parent's address/program spaces, we
1014 go ahead and create a new one for this exiting
1017 /* Switch to no-thread while running clone_program_space, so
1018 that clone_program_space doesn't want to read the
1019 selected frame of a dead process. */
1020 scoped_restore_current_thread restore_thread
;
1021 switch_to_no_thread ();
1023 inf
->pspace
= new program_space (maybe_new_address_space ());
1024 inf
->aspace
= inf
->pspace
->aspace
;
1025 set_current_program_space (inf
->pspace
);
1027 inf
->symfile_flags
= SYMFILE_NO_READ
;
1028 clone_program_space (inf
->pspace
, vfork_parent
->pspace
);
1030 resume_parent
= vfork_parent
;
1033 gdb_assert (current_program_space
== inf
->pspace
);
1035 if (non_stop
&& resume_parent
!= nullptr)
1037 /* If the user wanted the parent to be running, let it go
1039 scoped_restore_current_thread restore_thread
;
1041 infrun_debug_printf ("resuming vfork parent process %d",
1042 resume_parent
->pid
);
1044 for (thread_info
*thread
: resume_parent
->threads ())
1045 proceed_after_vfork_done (thread
);
1050 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1053 handle_vfork_done (thread_info
*event_thread
)
1055 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1056 set, that is if we are waiting for a vfork child not under our control
1057 (because we detached it) to exec or exit.
1059 If an inferior has vforked and we are debugging the child, we don't use
1060 the vfork-done event to get notified about the end of the shared address
1061 space window. We rely instead on the child's exec or exit event, and the
1062 inferior::vfork_{parent,child} fields are used instead. See
1063 handle_vfork_child_exec_or_exit for that. */
1064 if (event_thread
->inf
->thread_waiting_for_vfork_done
== nullptr)
1066 infrun_debug_printf ("not waiting for a vfork-done event");
1070 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1072 /* We stopped all threads (other than the vforking thread) of the inferior in
1073 follow_fork and kept them stopped until now. It should therefore not be
1074 possible for another thread to have reported a vfork during that window.
1075 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1076 vfork-done we are handling right now. */
1077 gdb_assert (event_thread
->inf
->thread_waiting_for_vfork_done
== event_thread
);
1079 event_thread
->inf
->thread_waiting_for_vfork_done
= nullptr;
1080 event_thread
->inf
->pspace
->breakpoints_not_allowed
= 0;
1082 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1083 resume them now. On all-stop targets, everything that needs to be resumed
1084 will be when we resume the event thread. */
1085 if (target_is_non_stop_p ())
1087 /* restart_threads and start_step_over may change the current thread, make
1088 sure we leave the event thread as the current thread. */
1089 scoped_restore_current_thread restore_thread
;
1091 insert_breakpoints ();
1094 if (!step_over_info_valid_p ())
1095 restart_threads (event_thread
, event_thread
->inf
);
1099 /* Enum strings for "set|show follow-exec-mode". */
1101 static const char follow_exec_mode_new
[] = "new";
1102 static const char follow_exec_mode_same
[] = "same";
1103 static const char *const follow_exec_mode_names
[] =
1105 follow_exec_mode_new
,
1106 follow_exec_mode_same
,
1110 static const char *follow_exec_mode_string
= follow_exec_mode_same
;
1112 show_follow_exec_mode_string (struct ui_file
*file
, int from_tty
,
1113 struct cmd_list_element
*c
, const char *value
)
1115 gdb_printf (file
, _("Follow exec mode is \"%s\".\n"), value
);
1118 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1121 follow_exec (ptid_t ptid
, const char *exec_file_target
)
1123 int pid
= ptid
.pid ();
1124 ptid_t process_ptid
;
1126 /* Switch terminal for any messages produced e.g. by
1127 breakpoint_re_set. */
1128 target_terminal::ours_for_output ();
1130 /* This is an exec event that we actually wish to pay attention to.
1131 Refresh our symbol table to the newly exec'd program, remove any
1132 momentary bp's, etc.
1134 If there are breakpoints, they aren't really inserted now,
1135 since the exec() transformed our inferior into a fresh set
1138 We want to preserve symbolic breakpoints on the list, since
1139 we have hopes that they can be reset after the new a.out's
1140 symbol table is read.
1142 However, any "raw" breakpoints must be removed from the list
1143 (e.g., the solib bp's), since their address is probably invalid
1146 And, we DON'T want to call delete_breakpoints() here, since
1147 that may write the bp's "shadow contents" (the instruction
1148 value that was overwritten with a TRAP instruction). Since
1149 we now have a new a.out, those shadow contents aren't valid. */
1151 mark_breakpoints_out ();
1153 /* The target reports the exec event to the main thread, even if
1154 some other thread does the exec, and even if the main thread was
1155 stopped or already gone. We may still have non-leader threads of
1156 the process on our list. E.g., on targets that don't have thread
1157 exit events (like remote); or on native Linux in non-stop mode if
1158 there were only two threads in the inferior and the non-leader
1159 one is the one that execs (and nothing forces an update of the
1160 thread list up to here). When debugging remotely, it's best to
1161 avoid extra traffic, when possible, so avoid syncing the thread
1162 list with the target, and instead go ahead and delete all threads
1163 of the process but one that reported the event. Note this must
1164 be done before calling update_breakpoints_after_exec, as
1165 otherwise clearing the threads' resources would reference stale
1166 thread breakpoints -- it may have been one of these threads that
1167 stepped across the exec. We could just clear their stepping
1168 states, but as long as we're iterating, might as well delete
1169 them. Deleting them now rather than at the next user-visible
1170 stop provides a nicer sequence of events for user and MI
1172 for (thread_info
*th
: all_threads_safe ())
1173 if (th
->ptid
.pid () == pid
&& th
->ptid
!= ptid
)
1176 /* We also need to clear any left over stale state for the
1177 leader/event thread. E.g., if there was any step-resume
1178 breakpoint or similar, it's gone now. We cannot truly
1179 step-to-next statement through an exec(). */
1180 thread_info
*th
= inferior_thread ();
1181 th
->control
.step_resume_breakpoint
= nullptr;
1182 th
->control
.exception_resume_breakpoint
= nullptr;
1183 th
->control
.single_step_breakpoints
= nullptr;
1184 th
->control
.step_range_start
= 0;
1185 th
->control
.step_range_end
= 0;
1187 /* The user may have had the main thread held stopped in the
1188 previous image (e.g., schedlock on, or non-stop). Release
1190 th
->stop_requested
= 0;
1192 update_breakpoints_after_exec ();
1194 /* What is this a.out's name? */
1195 process_ptid
= ptid_t (pid
);
1196 gdb_printf (_("%s is executing new program: %s\n"),
1197 target_pid_to_str (process_ptid
).c_str (),
1200 /* We've followed the inferior through an exec. Therefore, the
1201 inferior has essentially been killed & reborn. */
1203 breakpoint_init_inferior (inf_execd
);
1205 gdb::unique_xmalloc_ptr
<char> exec_file_host
1206 = exec_file_find (exec_file_target
, nullptr);
1208 /* If we were unable to map the executable target pathname onto a host
1209 pathname, tell the user that. Otherwise GDB's subsequent behavior
1210 is confusing. Maybe it would even be better to stop at this point
1211 so that the user can specify a file manually before continuing. */
1212 if (exec_file_host
== nullptr)
1213 warning (_("Could not load symbols for executable %s.\n"
1214 "Do you need \"set sysroot\"?"),
1217 /* Reset the shared library package. This ensures that we get a
1218 shlib event when the child reaches "_start", at which point the
1219 dld will have had a chance to initialize the child. */
1220 /* Also, loading a symbol file below may trigger symbol lookups, and
1221 we don't want those to be satisfied by the libraries of the
1222 previous incarnation of this process. */
1223 no_shared_libraries (nullptr, 0);
1225 struct inferior
*inf
= current_inferior ();
1227 if (follow_exec_mode_string
== follow_exec_mode_new
)
1229 /* The user wants to keep the old inferior and program spaces
1230 around. Create a new fresh one, and switch to it. */
1232 /* Do exit processing for the original inferior before setting the new
1233 inferior's pid. Having two inferiors with the same pid would confuse
1234 find_inferior_p(t)id. Transfer the terminal state and info from the
1235 old to the new inferior. */
1236 inferior
*new_inferior
= add_inferior_with_spaces ();
1238 swap_terminal_info (new_inferior
, inf
);
1239 exit_inferior_silent (inf
);
1241 new_inferior
->pid
= pid
;
1242 target_follow_exec (new_inferior
, ptid
, exec_file_target
);
1244 /* We continue with the new inferior. */
1249 /* The old description may no longer be fit for the new image.
1250 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1251 old description; we'll read a new one below. No need to do
1252 this on "follow-exec-mode new", as the old inferior stays
1253 around (its description is later cleared/refetched on
1255 target_clear_description ();
1256 target_follow_exec (inf
, ptid
, exec_file_target
);
1259 gdb_assert (current_inferior () == inf
);
1260 gdb_assert (current_program_space
== inf
->pspace
);
1262 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1263 because the proper displacement for a PIE (Position Independent
1264 Executable) main symbol file will only be computed by
1265 solib_create_inferior_hook below. breakpoint_re_set would fail
1266 to insert the breakpoints with the zero displacement. */
1267 try_open_exec_file (exec_file_host
.get (), inf
, SYMFILE_DEFER_BP_RESET
);
1269 /* If the target can specify a description, read it. Must do this
1270 after flipping to the new executable (because the target supplied
1271 description must be compatible with the executable's
1272 architecture, and the old executable may e.g., be 32-bit, while
1273 the new one 64-bit), and before anything involving memory or
1275 target_find_description ();
1277 gdb::observers::inferior_execd
.notify (inf
);
1279 breakpoint_re_set ();
1281 /* Reinsert all breakpoints. (Those which were symbolic have
1282 been reset to the proper address in the new a.out, thanks
1283 to symbol_file_command...). */
1284 insert_breakpoints ();
1286 /* The next resume of this inferior should bring it to the shlib
1287 startup breakpoints. (If the user had also set bp's on
1288 "main" from the old (parent) process, then they'll auto-
1289 matically get reset there in the new process.). */
1292 /* The chain of threads that need to do a step-over operation to get
1293 past e.g., a breakpoint. What technique is used to step over the
1294 breakpoint/watchpoint does not matter -- all threads end up in the
1295 same queue, to maintain rough temporal order of execution, in order
1296 to avoid starvation, otherwise, we could e.g., find ourselves
1297 constantly stepping the same couple threads past their breakpoints
1298 over and over, if the single-step finish fast enough. */
1299 thread_step_over_list global_thread_step_over_list
;
1301 /* Bit flags indicating what the thread needs to step over. */
1303 enum step_over_what_flag
1305 /* Step over a breakpoint. */
1306 STEP_OVER_BREAKPOINT
= 1,
1308 /* Step past a non-continuable watchpoint, in order to let the
1309 instruction execute so we can evaluate the watchpoint
1311 STEP_OVER_WATCHPOINT
= 2
1313 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag
, step_over_what
);
1315 /* Info about an instruction that is being stepped over. */
1317 struct step_over_info
1319 /* If we're stepping past a breakpoint, this is the address space
1320 and address of the instruction the breakpoint is set at. We'll
1321 skip inserting all breakpoints here. Valid iff ASPACE is
1323 const address_space
*aspace
= nullptr;
1324 CORE_ADDR address
= 0;
1326 /* The instruction being stepped over triggers a nonsteppable
1327 watchpoint. If true, we'll skip inserting watchpoints. */
1328 int nonsteppable_watchpoint_p
= 0;
1330 /* The thread's global number. */
1334 /* The step-over info of the location that is being stepped over.
1336 Note that with async/breakpoint always-inserted mode, a user might
1337 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1338 being stepped over. As setting a new breakpoint inserts all
1339 breakpoints, we need to make sure the breakpoint being stepped over
1340 isn't inserted then. We do that by only clearing the step-over
1341 info when the step-over is actually finished (or aborted).
1343 Presently GDB can only step over one breakpoint at any given time.
1344 Given threads that can't run code in the same address space as the
1345 breakpoint's can't really miss the breakpoint, GDB could be taught
1346 to step-over at most one breakpoint per address space (so this info
1347 could move to the address space object if/when GDB is extended).
1348 The set of breakpoints being stepped over will normally be much
1349 smaller than the set of all breakpoints, so a flag in the
1350 breakpoint location structure would be wasteful. A separate list
1351 also saves complexity and run-time, as otherwise we'd have to go
1352 through all breakpoint locations clearing their flag whenever we
1353 start a new sequence. Similar considerations weigh against storing
1354 this info in the thread object. Plus, not all step overs actually
1355 have breakpoint locations -- e.g., stepping past a single-step
1356 breakpoint, or stepping to complete a non-continuable
1358 static struct step_over_info step_over_info
;
1360 /* Record the address of the breakpoint/instruction we're currently
1362 N.B. We record the aspace and address now, instead of say just the thread,
1363 because when we need the info later the thread may be running. */
1366 set_step_over_info (const address_space
*aspace
, CORE_ADDR address
,
1367 int nonsteppable_watchpoint_p
,
1370 step_over_info
.aspace
= aspace
;
1371 step_over_info
.address
= address
;
1372 step_over_info
.nonsteppable_watchpoint_p
= nonsteppable_watchpoint_p
;
1373 step_over_info
.thread
= thread
;
1376 /* Called when we're not longer stepping over a breakpoint / an
1377 instruction, so all breakpoints are free to be (re)inserted. */
1380 clear_step_over_info (void)
1382 infrun_debug_printf ("clearing step over info");
1383 step_over_info
.aspace
= nullptr;
1384 step_over_info
.address
= 0;
1385 step_over_info
.nonsteppable_watchpoint_p
= 0;
1386 step_over_info
.thread
= -1;
1392 stepping_past_instruction_at (struct address_space
*aspace
,
1395 return (step_over_info
.aspace
!= nullptr
1396 && breakpoint_address_match (aspace
, address
,
1397 step_over_info
.aspace
,
1398 step_over_info
.address
));
1404 thread_is_stepping_over_breakpoint (int thread
)
1406 return (step_over_info
.thread
!= -1
1407 && thread
== step_over_info
.thread
);
1413 stepping_past_nonsteppable_watchpoint (void)
1415 return step_over_info
.nonsteppable_watchpoint_p
;
1418 /* Returns true if step-over info is valid. */
1421 step_over_info_valid_p (void)
1423 return (step_over_info
.aspace
!= nullptr
1424 || stepping_past_nonsteppable_watchpoint ());
1428 /* Displaced stepping. */
1430 /* In non-stop debugging mode, we must take special care to manage
1431 breakpoints properly; in particular, the traditional strategy for
1432 stepping a thread past a breakpoint it has hit is unsuitable.
1433 'Displaced stepping' is a tactic for stepping one thread past a
1434 breakpoint it has hit while ensuring that other threads running
1435 concurrently will hit the breakpoint as they should.
1437 The traditional way to step a thread T off a breakpoint in a
1438 multi-threaded program in all-stop mode is as follows:
1440 a0) Initially, all threads are stopped, and breakpoints are not
1442 a1) We single-step T, leaving breakpoints uninserted.
1443 a2) We insert breakpoints, and resume all threads.
1445 In non-stop debugging, however, this strategy is unsuitable: we
1446 don't want to have to stop all threads in the system in order to
1447 continue or step T past a breakpoint. Instead, we use displaced
1450 n0) Initially, T is stopped, other threads are running, and
1451 breakpoints are inserted.
1452 n1) We copy the instruction "under" the breakpoint to a separate
1453 location, outside the main code stream, making any adjustments
1454 to the instruction, register, and memory state as directed by
1456 n2) We single-step T over the instruction at its new location.
1457 n3) We adjust the resulting register and memory state as directed
1458 by T's architecture. This includes resetting T's PC to point
1459 back into the main instruction stream.
1462 This approach depends on the following gdbarch methods:
1464 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1465 indicate where to copy the instruction, and how much space must
1466 be reserved there. We use these in step n1.
1468 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1469 address, and makes any necessary adjustments to the instruction,
1470 register contents, and memory. We use this in step n1.
1472 - gdbarch_displaced_step_fixup adjusts registers and memory after
1473 we have successfully single-stepped the instruction, to yield the
1474 same effect the instruction would have had if we had executed it
1475 at its original address. We use this in step n3.
1477 The gdbarch_displaced_step_copy_insn and
1478 gdbarch_displaced_step_fixup functions must be written so that
1479 copying an instruction with gdbarch_displaced_step_copy_insn,
1480 single-stepping across the copied instruction, and then applying
1481 gdbarch_displaced_insn_fixup should have the same effects on the
1482 thread's memory and registers as stepping the instruction in place
1483 would have. Exactly which responsibilities fall to the copy and
1484 which fall to the fixup is up to the author of those functions.
1486 See the comments in gdbarch.sh for details.
1488 Note that displaced stepping and software single-step cannot
1489 currently be used in combination, although with some care I think
1490 they could be made to. Software single-step works by placing
1491 breakpoints on all possible subsequent instructions; if the
1492 displaced instruction is a PC-relative jump, those breakpoints
1493 could fall in very strange places --- on pages that aren't
1494 executable, or at addresses that are not proper instruction
1495 boundaries. (We do generally let other threads run while we wait
1496 to hit the software single-step breakpoint, and they might
1497 encounter such a corrupted instruction.) One way to work around
1498 this would be to have gdbarch_displaced_step_copy_insn fully
1499 simulate the effect of PC-relative instructions (and return NULL)
1500 on architectures that use software single-stepping.
1502 In non-stop mode, we can have independent and simultaneous step
1503 requests, so more than one thread may need to simultaneously step
1504 over a breakpoint. The current implementation assumes there is
1505 only one scratch space per process. In this case, we have to
1506 serialize access to the scratch space. If thread A wants to step
1507 over a breakpoint, but we are currently waiting for some other
1508 thread to complete a displaced step, we leave thread A stopped and
1509 place it in the displaced_step_request_queue. Whenever a displaced
1510 step finishes, we pick the next thread in the queue and start a new
1511 displaced step operation on it. See displaced_step_prepare and
1512 displaced_step_finish for details. */
1514 /* Return true if THREAD is doing a displaced step. */
1517 displaced_step_in_progress_thread (thread_info
*thread
)
1519 gdb_assert (thread
!= nullptr);
1521 return thread
->displaced_step_state
.in_progress ();
1524 /* Return true if INF has a thread doing a displaced step. */
1527 displaced_step_in_progress (inferior
*inf
)
1529 return inf
->displaced_step_state
.in_progress_count
> 0;
1532 /* Return true if any thread is doing a displaced step. */
1535 displaced_step_in_progress_any_thread ()
1537 for (inferior
*inf
: all_non_exited_inferiors ())
1539 if (displaced_step_in_progress (inf
))
1547 infrun_inferior_exit (struct inferior
*inf
)
1549 inf
->displaced_step_state
.reset ();
1550 inf
->thread_waiting_for_vfork_done
= nullptr;
1554 infrun_inferior_execd (inferior
*inf
)
1556 /* If some threads where was doing a displaced step in this inferior at the
1557 moment of the exec, they no longer exist. Even if the exec'ing thread
1558 doing a displaced step, we don't want to to any fixup nor restore displaced
1559 stepping buffer bytes. */
1560 inf
->displaced_step_state
.reset ();
1562 for (thread_info
*thread
: inf
->threads ())
1563 thread
->displaced_step_state
.reset ();
1565 /* Since an in-line step is done with everything else stopped, if there was
1566 one in progress at the time of the exec, it must have been the exec'ing
1568 clear_step_over_info ();
1570 inf
->thread_waiting_for_vfork_done
= nullptr;
1573 /* If ON, and the architecture supports it, GDB will use displaced
1574 stepping to step over breakpoints. If OFF, or if the architecture
1575 doesn't support it, GDB will instead use the traditional
1576 hold-and-step approach. If AUTO (which is the default), GDB will
1577 decide which technique to use to step over breakpoints depending on
1578 whether the target works in a non-stop way (see use_displaced_stepping). */
1580 static enum auto_boolean can_use_displaced_stepping
= AUTO_BOOLEAN_AUTO
;
1583 show_can_use_displaced_stepping (struct ui_file
*file
, int from_tty
,
1584 struct cmd_list_element
*c
,
1587 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
)
1589 _("Debugger's willingness to use displaced stepping "
1590 "to step over breakpoints is %s (currently %s).\n"),
1591 value
, target_is_non_stop_p () ? "on" : "off");
1594 _("Debugger's willingness to use displaced stepping "
1595 "to step over breakpoints is %s.\n"), value
);
1598 /* Return true if the gdbarch implements the required methods to use
1599 displaced stepping. */
1602 gdbarch_supports_displaced_stepping (gdbarch
*arch
)
1604 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1605 that if `prepare` is provided, so is `finish`. */
1606 return gdbarch_displaced_step_prepare_p (arch
);
1609 /* Return non-zero if displaced stepping can/should be used to step
1610 over breakpoints of thread TP. */
1613 use_displaced_stepping (thread_info
*tp
)
1615 /* If the user disabled it explicitly, don't use displaced stepping. */
1616 if (can_use_displaced_stepping
== AUTO_BOOLEAN_FALSE
)
1619 /* If "auto", only use displaced stepping if the target operates in a non-stop
1621 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
1622 && !target_is_non_stop_p ())
1625 gdbarch
*gdbarch
= get_thread_regcache (tp
)->arch ();
1627 /* If the architecture doesn't implement displaced stepping, don't use
1629 if (!gdbarch_supports_displaced_stepping (gdbarch
))
1632 /* If recording, don't use displaced stepping. */
1633 if (find_record_target () != nullptr)
1636 /* If displaced stepping failed before for this inferior, don't bother trying
1638 if (tp
->inf
->displaced_step_state
.failed_before
)
1644 /* Simple function wrapper around displaced_step_thread_state::reset. */
1647 displaced_step_reset (displaced_step_thread_state
*displaced
)
1649 displaced
->reset ();
1652 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1653 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1655 using displaced_step_reset_cleanup
= FORWARD_SCOPE_EXIT (displaced_step_reset
);
1660 displaced_step_dump_bytes (const gdb_byte
*buf
, size_t len
)
1664 for (size_t i
= 0; i
< len
; i
++)
1667 ret
+= string_printf ("%02x", buf
[i
]);
1669 ret
+= string_printf (" %02x", buf
[i
]);
1675 /* Prepare to single-step, using displaced stepping.
1677 Note that we cannot use displaced stepping when we have a signal to
1678 deliver. If we have a signal to deliver and an instruction to step
1679 over, then after the step, there will be no indication from the
1680 target whether the thread entered a signal handler or ignored the
1681 signal and stepped over the instruction successfully --- both cases
1682 result in a simple SIGTRAP. In the first case we mustn't do a
1683 fixup, and in the second case we must --- but we can't tell which.
1684 Comments in the code for 'random signals' in handle_inferior_event
1685 explain how we handle this case instead.
1687 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1688 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1689 if displaced stepping this thread got queued; or
1690 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1693 static displaced_step_prepare_status
1694 displaced_step_prepare_throw (thread_info
*tp
)
1696 regcache
*regcache
= get_thread_regcache (tp
);
1697 struct gdbarch
*gdbarch
= regcache
->arch ();
1698 displaced_step_thread_state
&disp_step_thread_state
1699 = tp
->displaced_step_state
;
1701 /* We should never reach this function if the architecture does not
1702 support displaced stepping. */
1703 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch
));
1705 /* Nor if the thread isn't meant to step over a breakpoint. */
1706 gdb_assert (tp
->control
.trap_expected
);
1708 /* Disable range stepping while executing in the scratch pad. We
1709 want a single-step even if executing the displaced instruction in
1710 the scratch buffer lands within the stepping range (e.g., a
1712 tp
->control
.may_range_step
= 0;
1714 /* We are about to start a displaced step for this thread. If one is already
1715 in progress, something's wrong. */
1716 gdb_assert (!disp_step_thread_state
.in_progress ());
1718 if (tp
->inf
->displaced_step_state
.unavailable
)
1720 /* The gdbarch tells us it's not worth asking to try a prepare because
1721 it is likely that it will return unavailable, so don't bother asking. */
1723 displaced_debug_printf ("deferring step of %s",
1724 tp
->ptid
.to_string ().c_str ());
1726 global_thread_step_over_chain_enqueue (tp
);
1727 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1730 displaced_debug_printf ("displaced-stepping %s now",
1731 tp
->ptid
.to_string ().c_str ());
1733 scoped_restore_current_thread restore_thread
;
1735 switch_to_thread (tp
);
1737 CORE_ADDR original_pc
= regcache_read_pc (regcache
);
1738 CORE_ADDR displaced_pc
;
1740 displaced_step_prepare_status status
1741 = gdbarch_displaced_step_prepare (gdbarch
, tp
, displaced_pc
);
1743 if (status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
1745 displaced_debug_printf ("failed to prepare (%s)",
1746 tp
->ptid
.to_string ().c_str ());
1748 return DISPLACED_STEP_PREPARE_STATUS_CANT
;
1750 else if (status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
1752 /* Not enough displaced stepping resources available, defer this
1753 request by placing it the queue. */
1755 displaced_debug_printf ("not enough resources available, "
1756 "deferring step of %s",
1757 tp
->ptid
.to_string ().c_str ());
1759 global_thread_step_over_chain_enqueue (tp
);
1761 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1764 gdb_assert (status
== DISPLACED_STEP_PREPARE_STATUS_OK
);
1766 /* Save the information we need to fix things up if the step
1768 disp_step_thread_state
.set (gdbarch
);
1770 tp
->inf
->displaced_step_state
.in_progress_count
++;
1772 displaced_debug_printf ("prepared successfully thread=%s, "
1773 "original_pc=%s, displaced_pc=%s",
1774 tp
->ptid
.to_string ().c_str (),
1775 paddress (gdbarch
, original_pc
),
1776 paddress (gdbarch
, displaced_pc
));
1778 return DISPLACED_STEP_PREPARE_STATUS_OK
;
1781 /* Wrapper for displaced_step_prepare_throw that disabled further
1782 attempts at displaced stepping if we get a memory error. */
1784 static displaced_step_prepare_status
1785 displaced_step_prepare (thread_info
*thread
)
1787 displaced_step_prepare_status status
1788 = DISPLACED_STEP_PREPARE_STATUS_CANT
;
1792 status
= displaced_step_prepare_throw (thread
);
1794 catch (const gdb_exception_error
&ex
)
1796 if (ex
.error
!= MEMORY_ERROR
1797 && ex
.error
!= NOT_SUPPORTED_ERROR
)
1800 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1803 /* Be verbose if "set displaced-stepping" is "on", silent if
1805 if (can_use_displaced_stepping
== AUTO_BOOLEAN_TRUE
)
1807 warning (_("disabling displaced stepping: %s"),
1811 /* Disable further displaced stepping attempts. */
1812 thread
->inf
->displaced_step_state
.failed_before
= 1;
1818 /* If we displaced stepped an instruction successfully, adjust registers and
1819 memory to yield the same effect the instruction would have had if we had
1820 executed it at its original address, and return
1821 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1822 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
1824 If the thread wasn't displaced stepping, return
1825 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1827 static displaced_step_finish_status
1828 displaced_step_finish (thread_info
*event_thread
, enum gdb_signal signal
)
1830 displaced_step_thread_state
*displaced
= &event_thread
->displaced_step_state
;
1832 /* Was this thread performing a displaced step? */
1833 if (!displaced
->in_progress ())
1834 return DISPLACED_STEP_FINISH_STATUS_OK
;
1836 gdb_assert (event_thread
->inf
->displaced_step_state
.in_progress_count
> 0);
1837 event_thread
->inf
->displaced_step_state
.in_progress_count
--;
1839 /* Fixup may need to read memory/registers. Switch to the thread
1840 that we're fixing up. Also, target_stopped_by_watchpoint checks
1841 the current thread, and displaced_step_restore performs ptid-dependent
1842 memory accesses using current_inferior(). */
1843 switch_to_thread (event_thread
);
1845 displaced_step_reset_cleanup
cleanup (displaced
);
1847 /* Do the fixup, and release the resources acquired to do the displaced
1849 return gdbarch_displaced_step_finish (displaced
->get_original_gdbarch (),
1850 event_thread
, signal
);
1853 /* Data to be passed around while handling an event. This data is
1854 discarded between events. */
1855 struct execution_control_state
1857 execution_control_state ()
1864 this->target
= nullptr;
1865 this->ptid
= null_ptid
;
1866 this->event_thread
= nullptr;
1867 ws
= target_waitstatus ();
1868 stop_func_filled_in
= 0;
1869 stop_func_start
= 0;
1871 stop_func_name
= nullptr;
1873 hit_singlestep_breakpoint
= 0;
1876 process_stratum_target
*target
;
1878 /* The thread that got the event, if this was a thread event; NULL
1880 struct thread_info
*event_thread
;
1882 struct target_waitstatus ws
;
1883 int stop_func_filled_in
;
1884 CORE_ADDR stop_func_start
;
1885 CORE_ADDR stop_func_end
;
1886 const char *stop_func_name
;
1889 /* True if the event thread hit the single-step breakpoint of
1890 another thread. Thus the event doesn't cause a stop, the thread
1891 needs to be single-stepped past the single-step breakpoint before
1892 we can switch back to the original stepping thread. */
1893 int hit_singlestep_breakpoint
;
1896 /* Clear ECS and set it to point at TP. */
1899 reset_ecs (struct execution_control_state
*ecs
, struct thread_info
*tp
)
1902 ecs
->event_thread
= tp
;
1903 ecs
->ptid
= tp
->ptid
;
1906 static void keep_going_pass_signal (struct execution_control_state
*ecs
);
1907 static void prepare_to_wait (struct execution_control_state
*ecs
);
1908 static bool keep_going_stepped_thread (struct thread_info
*tp
);
1909 static step_over_what
thread_still_needs_step_over (struct thread_info
*tp
);
1911 /* Are there any pending step-over requests? If so, run all we can
1912 now and return true. Otherwise, return false. */
1915 start_step_over (void)
1917 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1919 /* Don't start a new step-over if we already have an in-line
1920 step-over operation ongoing. */
1921 if (step_over_info_valid_p ())
1924 /* Steal the global thread step over chain. As we try to initiate displaced
1925 steps, threads will be enqueued in the global chain if no buffers are
1926 available. If we iterated on the global chain directly, we might iterate
1928 thread_step_over_list threads_to_step
1929 = std::move (global_thread_step_over_list
);
1931 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1932 thread_step_over_chain_length (threads_to_step
));
1934 bool started
= false;
1936 /* On scope exit (whatever the reason, return or exception), if there are
1937 threads left in the THREADS_TO_STEP chain, put back these threads in the
1941 if (threads_to_step
.empty ())
1942 infrun_debug_printf ("step-over queue now empty");
1945 infrun_debug_printf ("putting back %d threads to step in global queue",
1946 thread_step_over_chain_length (threads_to_step
));
1948 global_thread_step_over_chain_enqueue_chain
1949 (std::move (threads_to_step
));
1953 thread_step_over_list_safe_range range
1954 = make_thread_step_over_list_safe_range (threads_to_step
);
1956 for (thread_info
*tp
: range
)
1958 struct execution_control_state ecss
;
1959 struct execution_control_state
*ecs
= &ecss
;
1960 step_over_what step_what
;
1961 int must_be_in_line
;
1963 gdb_assert (!tp
->stop_requested
);
1965 if (tp
->inf
->displaced_step_state
.unavailable
)
1967 /* The arch told us to not even try preparing another displaced step
1968 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1969 will get moved to the global chain on scope exit. */
1973 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr)
1975 /* When we stop all threads, handling a vfork, any thread in the step
1976 over chain remains there. A user could also try to continue a
1977 thread stopped at a breakpoint while another thread is waiting for
1978 a vfork-done event. In any case, we don't want to start a step
1983 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1984 while we try to prepare the displaced step, we don't add it back to
1985 the global step over chain. This is to avoid a thread staying in the
1986 step over chain indefinitely if something goes wrong when resuming it
1987 If the error is intermittent and it still needs a step over, it will
1988 get enqueued again when we try to resume it normally. */
1989 threads_to_step
.erase (threads_to_step
.iterator_to (*tp
));
1991 step_what
= thread_still_needs_step_over (tp
);
1992 must_be_in_line
= ((step_what
& STEP_OVER_WATCHPOINT
)
1993 || ((step_what
& STEP_OVER_BREAKPOINT
)
1994 && !use_displaced_stepping (tp
)));
1996 /* We currently stop all threads of all processes to step-over
1997 in-line. If we need to start a new in-line step-over, let
1998 any pending displaced steps finish first. */
1999 if (must_be_in_line
&& displaced_step_in_progress_any_thread ())
2001 global_thread_step_over_chain_enqueue (tp
);
2005 if (tp
->control
.trap_expected
2007 || tp
->executing ())
2009 internal_error ("[%s] has inconsistent state: "
2010 "trap_expected=%d, resumed=%d, executing=%d\n",
2011 tp
->ptid
.to_string ().c_str (),
2012 tp
->control
.trap_expected
,
2017 infrun_debug_printf ("resuming [%s] for step-over",
2018 tp
->ptid
.to_string ().c_str ());
2020 /* keep_going_pass_signal skips the step-over if the breakpoint
2021 is no longer inserted. In all-stop, we want to keep looking
2022 for a thread that needs a step-over instead of resuming TP,
2023 because we wouldn't be able to resume anything else until the
2024 target stops again. In non-stop, the resume always resumes
2025 only TP, so it's OK to let the thread resume freely. */
2026 if (!target_is_non_stop_p () && !step_what
)
2029 switch_to_thread (tp
);
2030 reset_ecs (ecs
, tp
);
2031 keep_going_pass_signal (ecs
);
2033 if (!ecs
->wait_some_more
)
2034 error (_("Command aborted."));
2036 /* If the thread's step over could not be initiated because no buffers
2037 were available, it was re-added to the global step over chain. */
2040 infrun_debug_printf ("[%s] was resumed.",
2041 tp
->ptid
.to_string ().c_str ());
2042 gdb_assert (!thread_is_in_step_over_chain (tp
));
2046 infrun_debug_printf ("[%s] was NOT resumed.",
2047 tp
->ptid
.to_string ().c_str ());
2048 gdb_assert (thread_is_in_step_over_chain (tp
));
2051 /* If we started a new in-line step-over, we're done. */
2052 if (step_over_info_valid_p ())
2054 gdb_assert (tp
->control
.trap_expected
);
2059 if (!target_is_non_stop_p ())
2061 /* On all-stop, shouldn't have resumed unless we needed a
2063 gdb_assert (tp
->control
.trap_expected
2064 || tp
->step_after_step_resume_breakpoint
);
2066 /* With remote targets (at least), in all-stop, we can't
2067 issue any further remote commands until the program stops
2073 /* Either the thread no longer needed a step-over, or a new
2074 displaced stepping sequence started. Even in the latter
2075 case, continue looking. Maybe we can also start another
2076 displaced step on a thread of other process. */
2082 /* Update global variables holding ptids to hold NEW_PTID if they were
2083 holding OLD_PTID. */
2085 infrun_thread_ptid_changed (process_stratum_target
*target
,
2086 ptid_t old_ptid
, ptid_t new_ptid
)
2088 if (inferior_ptid
== old_ptid
2089 && current_inferior ()->process_target () == target
)
2090 inferior_ptid
= new_ptid
;
2095 static const char schedlock_off
[] = "off";
2096 static const char schedlock_on
[] = "on";
2097 static const char schedlock_step
[] = "step";
2098 static const char schedlock_replay
[] = "replay";
2099 static const char *const scheduler_enums
[] = {
2106 static const char *scheduler_mode
= schedlock_replay
;
2108 show_scheduler_mode (struct ui_file
*file
, int from_tty
,
2109 struct cmd_list_element
*c
, const char *value
)
2112 _("Mode for locking scheduler "
2113 "during execution is \"%s\".\n"),
2118 set_schedlock_func (const char *args
, int from_tty
, struct cmd_list_element
*c
)
2120 if (!target_can_lock_scheduler ())
2122 scheduler_mode
= schedlock_off
;
2123 error (_("Target '%s' cannot support this command."),
2124 target_shortname ());
2128 /* True if execution commands resume all threads of all processes by
2129 default; otherwise, resume only threads of the current inferior
2131 bool sched_multi
= false;
2133 /* Try to setup for software single stepping. Return true if target_resume()
2134 should use hardware single step.
2136 GDBARCH the current gdbarch. */
2139 maybe_software_singlestep (struct gdbarch
*gdbarch
)
2141 bool hw_step
= true;
2143 if (execution_direction
== EXEC_FORWARD
2144 && gdbarch_software_single_step_p (gdbarch
))
2145 hw_step
= !insert_single_step_breakpoints (gdbarch
);
2153 user_visible_resume_ptid (int step
)
2159 /* With non-stop mode on, threads are always handled
2161 resume_ptid
= inferior_ptid
;
2163 else if ((scheduler_mode
== schedlock_on
)
2164 || (scheduler_mode
== schedlock_step
&& step
))
2166 /* User-settable 'scheduler' mode requires solo thread
2168 resume_ptid
= inferior_ptid
;
2170 else if ((scheduler_mode
== schedlock_replay
)
2171 && target_record_will_replay (minus_one_ptid
, execution_direction
))
2173 /* User-settable 'scheduler' mode requires solo thread resume in replay
2175 resume_ptid
= inferior_ptid
;
2177 else if (!sched_multi
&& target_supports_multi_process ())
2179 /* Resume all threads of the current process (and none of other
2181 resume_ptid
= ptid_t (inferior_ptid
.pid ());
2185 /* Resume all threads of all processes. */
2186 resume_ptid
= RESUME_ALL
;
2194 process_stratum_target
*
2195 user_visible_resume_target (ptid_t resume_ptid
)
2197 return (resume_ptid
== minus_one_ptid
&& sched_multi
2199 : current_inferior ()->process_target ());
2202 /* Return a ptid representing the set of threads that we will resume,
2203 in the perspective of the target, assuming run control handling
2204 does not require leaving some threads stopped (e.g., stepping past
2205 breakpoint). USER_STEP indicates whether we're about to start the
2206 target for a stepping command. */
2209 internal_resume_ptid (int user_step
)
2211 /* In non-stop, we always control threads individually. Note that
2212 the target may always work in non-stop mode even with "set
2213 non-stop off", in which case user_visible_resume_ptid could
2214 return a wildcard ptid. */
2215 if (target_is_non_stop_p ())
2216 return inferior_ptid
;
2218 /* The rest of the function assumes non-stop==off and
2219 target-non-stop==off.
2221 If a thread is waiting for a vfork-done event, it means breakpoints are out
2222 for this inferior (well, program space in fact). We don't want to resume
2223 any thread other than the one waiting for vfork done, otherwise these other
2224 threads could miss breakpoints. So if a thread in the resumption set is
2225 waiting for a vfork-done event, resume only that thread.
2227 The resumption set width depends on whether schedule-multiple is on or off.
2229 Note that if the target_resume interface was more flexible, we could be
2230 smarter here when schedule-multiple is on. For example, imagine 3
2231 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2232 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2233 target(s) to resume:
2235 - All threads of inferior 1
2239 Since we don't have that flexibility (we can only pass one ptid), just
2240 resume the first thread waiting for a vfork-done event we find (e.g. thread
2244 for (inferior
*inf
: all_non_exited_inferiors ())
2245 if (inf
->thread_waiting_for_vfork_done
!= nullptr)
2246 return inf
->thread_waiting_for_vfork_done
->ptid
;
2248 else if (current_inferior ()->thread_waiting_for_vfork_done
!= nullptr)
2249 return current_inferior ()->thread_waiting_for_vfork_done
->ptid
;
2251 return user_visible_resume_ptid (user_step
);
2254 /* Wrapper for target_resume, that handles infrun-specific
2258 do_target_resume (ptid_t resume_ptid
, bool step
, enum gdb_signal sig
)
2260 struct thread_info
*tp
= inferior_thread ();
2262 gdb_assert (!tp
->stop_requested
);
2264 /* Install inferior's terminal modes. */
2265 target_terminal::inferior ();
2267 /* Avoid confusing the next resume, if the next stop/resume
2268 happens to apply to another thread. */
2269 tp
->set_stop_signal (GDB_SIGNAL_0
);
2271 /* Advise target which signals may be handled silently.
2273 If we have removed breakpoints because we are stepping over one
2274 in-line (in any thread), we need to receive all signals to avoid
2275 accidentally skipping a breakpoint during execution of a signal
2278 Likewise if we're displaced stepping, otherwise a trap for a
2279 breakpoint in a signal handler might be confused with the
2280 displaced step finishing. We don't make the displaced_step_finish
2281 step distinguish the cases instead, because:
2283 - a backtrace while stopped in the signal handler would show the
2284 scratch pad as frame older than the signal handler, instead of
2285 the real mainline code.
2287 - when the thread is later resumed, the signal handler would
2288 return to the scratch pad area, which would no longer be
2290 if (step_over_info_valid_p ()
2291 || displaced_step_in_progress (tp
->inf
))
2292 target_pass_signals ({});
2294 target_pass_signals (signal_pass
);
2296 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2297 resume_ptid
.to_string ().c_str (),
2298 step
, gdb_signal_to_symbol_string (sig
));
2300 target_resume (resume_ptid
, step
, sig
);
2303 /* Resume the inferior. SIG is the signal to give the inferior
2304 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2305 call 'resume', which handles exceptions. */
2308 resume_1 (enum gdb_signal sig
)
2310 struct regcache
*regcache
= get_current_regcache ();
2311 struct gdbarch
*gdbarch
= regcache
->arch ();
2312 struct thread_info
*tp
= inferior_thread ();
2313 const address_space
*aspace
= regcache
->aspace ();
2315 /* This represents the user's step vs continue request. When
2316 deciding whether "set scheduler-locking step" applies, it's the
2317 user's intention that counts. */
2318 const int user_step
= tp
->control
.stepping_command
;
2319 /* This represents what we'll actually request the target to do.
2320 This can decay from a step to a continue, if e.g., we need to
2321 implement single-stepping with breakpoints (software
2325 gdb_assert (!tp
->stop_requested
);
2326 gdb_assert (!thread_is_in_step_over_chain (tp
));
2328 if (tp
->has_pending_waitstatus ())
2331 ("thread %s has pending wait "
2332 "status %s (currently_stepping=%d).",
2333 tp
->ptid
.to_string ().c_str (),
2334 tp
->pending_waitstatus ().to_string ().c_str (),
2335 currently_stepping (tp
));
2337 tp
->inf
->process_target ()->threads_executing
= true;
2338 tp
->set_resumed (true);
2340 /* FIXME: What should we do if we are supposed to resume this
2341 thread with a signal? Maybe we should maintain a queue of
2342 pending signals to deliver. */
2343 if (sig
!= GDB_SIGNAL_0
)
2345 warning (_("Couldn't deliver signal %s to %s."),
2346 gdb_signal_to_name (sig
),
2347 tp
->ptid
.to_string ().c_str ());
2350 tp
->set_stop_signal (GDB_SIGNAL_0
);
2352 if (target_can_async_p ())
2354 target_async (true);
2355 /* Tell the event loop we have an event to process. */
2356 mark_async_event_handler (infrun_async_inferior_event_token
);
2361 tp
->stepped_breakpoint
= 0;
2363 /* Depends on stepped_breakpoint. */
2364 step
= currently_stepping (tp
);
2366 if (current_inferior ()->thread_waiting_for_vfork_done
!= nullptr)
2368 /* Don't try to single-step a vfork parent that is waiting for
2369 the child to get out of the shared memory region (by exec'ing
2370 or exiting). This is particularly important on software
2371 single-step archs, as the child process would trip on the
2372 software single step breakpoint inserted for the parent
2373 process. Since the parent will not actually execute any
2374 instruction until the child is out of the shared region (such
2375 are vfork's semantics), it is safe to simply continue it.
2376 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2377 the parent, and tell it to `keep_going', which automatically
2378 re-sets it stepping. */
2379 infrun_debug_printf ("resume : clear step");
2383 CORE_ADDR pc
= regcache_read_pc (regcache
);
2385 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2386 "current thread [%s] at %s",
2387 step
, gdb_signal_to_symbol_string (sig
),
2388 tp
->control
.trap_expected
,
2389 inferior_ptid
.to_string ().c_str (),
2390 paddress (gdbarch
, pc
));
2392 /* Normally, by the time we reach `resume', the breakpoints are either
2393 removed or inserted, as appropriate. The exception is if we're sitting
2394 at a permanent breakpoint; we need to step over it, but permanent
2395 breakpoints can't be removed. So we have to test for it here. */
2396 if (breakpoint_here_p (aspace
, pc
) == permanent_breakpoint_here
)
2398 if (sig
!= GDB_SIGNAL_0
)
2400 /* We have a signal to pass to the inferior. The resume
2401 may, or may not take us to the signal handler. If this
2402 is a step, we'll need to stop in the signal handler, if
2403 there's one, (if the target supports stepping into
2404 handlers), or in the next mainline instruction, if
2405 there's no handler. If this is a continue, we need to be
2406 sure to run the handler with all breakpoints inserted.
2407 In all cases, set a breakpoint at the current address
2408 (where the handler returns to), and once that breakpoint
2409 is hit, resume skipping the permanent breakpoint. If
2410 that breakpoint isn't hit, then we've stepped into the
2411 signal handler (or hit some other event). We'll delete
2412 the step-resume breakpoint then. */
2414 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2415 "deliver signal first");
2417 clear_step_over_info ();
2418 tp
->control
.trap_expected
= 0;
2420 if (tp
->control
.step_resume_breakpoint
== nullptr)
2422 /* Set a "high-priority" step-resume, as we don't want
2423 user breakpoints at PC to trigger (again) when this
2425 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2426 gdb_assert (tp
->control
.step_resume_breakpoint
->loc
->permanent
);
2428 tp
->step_after_step_resume_breakpoint
= step
;
2431 insert_breakpoints ();
2435 /* There's no signal to pass, we can go ahead and skip the
2436 permanent breakpoint manually. */
2437 infrun_debug_printf ("skipping permanent breakpoint");
2438 gdbarch_skip_permanent_breakpoint (gdbarch
, regcache
);
2439 /* Update pc to reflect the new address from which we will
2440 execute instructions. */
2441 pc
= regcache_read_pc (regcache
);
2445 /* We've already advanced the PC, so the stepping part
2446 is done. Now we need to arrange for a trap to be
2447 reported to handle_inferior_event. Set a breakpoint
2448 at the current PC, and run to it. Don't update
2449 prev_pc, because if we end in
2450 switch_back_to_stepped_thread, we want the "expected
2451 thread advanced also" branch to be taken. IOW, we
2452 don't want this thread to step further from PC
2454 gdb_assert (!step_over_info_valid_p ());
2455 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
2456 insert_breakpoints ();
2458 resume_ptid
= internal_resume_ptid (user_step
);
2459 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
2460 tp
->set_resumed (true);
2466 /* If we have a breakpoint to step over, make sure to do a single
2467 step only. Same if we have software watchpoints. */
2468 if (tp
->control
.trap_expected
|| bpstat_should_step ())
2469 tp
->control
.may_range_step
= 0;
2471 /* If displaced stepping is enabled, step over breakpoints by executing a
2472 copy of the instruction at a different address.
2474 We can't use displaced stepping when we have a signal to deliver;
2475 the comments for displaced_step_prepare explain why. The
2476 comments in the handle_inferior event for dealing with 'random
2477 signals' explain what we do instead.
2479 We can't use displaced stepping when we are waiting for vfork_done
2480 event, displaced stepping breaks the vfork child similarly as single
2481 step software breakpoint. */
2482 if (tp
->control
.trap_expected
2483 && use_displaced_stepping (tp
)
2484 && !step_over_info_valid_p ()
2485 && sig
== GDB_SIGNAL_0
2486 && current_inferior ()->thread_waiting_for_vfork_done
== nullptr)
2488 displaced_step_prepare_status prepare_status
2489 = displaced_step_prepare (tp
);
2491 if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
2493 infrun_debug_printf ("Got placed in step-over queue");
2495 tp
->control
.trap_expected
= 0;
2498 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
2500 /* Fallback to stepping over the breakpoint in-line. */
2502 if (target_is_non_stop_p ())
2503 stop_all_threads ("displaced stepping falling back on inline stepping");
2505 set_step_over_info (regcache
->aspace (),
2506 regcache_read_pc (regcache
), 0, tp
->global_num
);
2508 step
= maybe_software_singlestep (gdbarch
);
2510 insert_breakpoints ();
2512 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_OK
)
2514 /* Update pc to reflect the new address from which we will
2515 execute instructions due to displaced stepping. */
2516 pc
= regcache_read_pc (get_thread_regcache (tp
));
2518 step
= gdbarch_displaced_step_hw_singlestep (gdbarch
);
2521 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2525 /* Do we need to do it the hard way, w/temp breakpoints? */
2527 step
= maybe_software_singlestep (gdbarch
);
2529 /* Currently, our software single-step implementation leads to different
2530 results than hardware single-stepping in one situation: when stepping
2531 into delivering a signal which has an associated signal handler,
2532 hardware single-step will stop at the first instruction of the handler,
2533 while software single-step will simply skip execution of the handler.
2535 For now, this difference in behavior is accepted since there is no
2536 easy way to actually implement single-stepping into a signal handler
2537 without kernel support.
2539 However, there is one scenario where this difference leads to follow-on
2540 problems: if we're stepping off a breakpoint by removing all breakpoints
2541 and then single-stepping. In this case, the software single-step
2542 behavior means that even if there is a *breakpoint* in the signal
2543 handler, GDB still would not stop.
2545 Fortunately, we can at least fix this particular issue. We detect
2546 here the case where we are about to deliver a signal while software
2547 single-stepping with breakpoints removed. In this situation, we
2548 revert the decisions to remove all breakpoints and insert single-
2549 step breakpoints, and instead we install a step-resume breakpoint
2550 at the current address, deliver the signal without stepping, and
2551 once we arrive back at the step-resume breakpoint, actually step
2552 over the breakpoint we originally wanted to step over. */
2553 if (thread_has_single_step_breakpoints_set (tp
)
2554 && sig
!= GDB_SIGNAL_0
2555 && step_over_info_valid_p ())
2557 /* If we have nested signals or a pending signal is delivered
2558 immediately after a handler returns, might already have
2559 a step-resume breakpoint set on the earlier handler. We cannot
2560 set another step-resume breakpoint; just continue on until the
2561 original breakpoint is hit. */
2562 if (tp
->control
.step_resume_breakpoint
== nullptr)
2564 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2565 tp
->step_after_step_resume_breakpoint
= 1;
2568 delete_single_step_breakpoints (tp
);
2570 clear_step_over_info ();
2571 tp
->control
.trap_expected
= 0;
2573 insert_breakpoints ();
2576 /* If STEP is set, it's a request to use hardware stepping
2577 facilities. But in that case, we should never
2578 use singlestep breakpoint. */
2579 gdb_assert (!(thread_has_single_step_breakpoints_set (tp
) && step
));
2581 /* Decide the set of threads to ask the target to resume. */
2582 if (tp
->control
.trap_expected
)
2584 /* We're allowing a thread to run past a breakpoint it has
2585 hit, either by single-stepping the thread with the breakpoint
2586 removed, or by displaced stepping, with the breakpoint inserted.
2587 In the former case, we need to single-step only this thread,
2588 and keep others stopped, as they can miss this breakpoint if
2589 allowed to run. That's not really a problem for displaced
2590 stepping, but, we still keep other threads stopped, in case
2591 another thread is also stopped for a breakpoint waiting for
2592 its turn in the displaced stepping queue. */
2593 resume_ptid
= inferior_ptid
;
2596 resume_ptid
= internal_resume_ptid (user_step
);
2598 if (execution_direction
!= EXEC_REVERSE
2599 && step
&& breakpoint_inserted_here_p (aspace
, pc
))
2601 /* There are two cases where we currently need to step a
2602 breakpoint instruction when we have a signal to deliver:
2604 - See handle_signal_stop where we handle random signals that
2605 could take out us out of the stepping range. Normally, in
2606 that case we end up continuing (instead of stepping) over the
2607 signal handler with a breakpoint at PC, but there are cases
2608 where we should _always_ single-step, even if we have a
2609 step-resume breakpoint, like when a software watchpoint is
2610 set. Assuming single-stepping and delivering a signal at the
2611 same time would takes us to the signal handler, then we could
2612 have removed the breakpoint at PC to step over it. However,
2613 some hardware step targets (like e.g., Mac OS) can't step
2614 into signal handlers, and for those, we need to leave the
2615 breakpoint at PC inserted, as otherwise if the handler
2616 recurses and executes PC again, it'll miss the breakpoint.
2617 So we leave the breakpoint inserted anyway, but we need to
2618 record that we tried to step a breakpoint instruction, so
2619 that adjust_pc_after_break doesn't end up confused.
2621 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2622 in one thread after another thread that was stepping had been
2623 momentarily paused for a step-over. When we re-resume the
2624 stepping thread, it may be resumed from that address with a
2625 breakpoint that hasn't trapped yet. Seen with
2626 gdb.threads/non-stop-fair-events.exp, on targets that don't
2627 do displaced stepping. */
2629 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2630 tp
->ptid
.to_string ().c_str ());
2632 tp
->stepped_breakpoint
= 1;
2634 /* Most targets can step a breakpoint instruction, thus
2635 executing it normally. But if this one cannot, just
2636 continue and we will hit it anyway. */
2637 if (gdbarch_cannot_step_breakpoint (gdbarch
))
2642 && tp
->control
.trap_expected
2643 && use_displaced_stepping (tp
)
2644 && !step_over_info_valid_p ())
2646 struct regcache
*resume_regcache
= get_thread_regcache (tp
);
2647 struct gdbarch
*resume_gdbarch
= resume_regcache
->arch ();
2648 CORE_ADDR actual_pc
= regcache_read_pc (resume_regcache
);
2651 read_memory (actual_pc
, buf
, sizeof (buf
));
2652 displaced_debug_printf ("run %s: %s",
2653 paddress (resume_gdbarch
, actual_pc
),
2654 displaced_step_dump_bytes
2655 (buf
, sizeof (buf
)).c_str ());
2658 if (tp
->control
.may_range_step
)
2660 /* If we're resuming a thread with the PC out of the step
2661 range, then we're doing some nested/finer run control
2662 operation, like stepping the thread out of the dynamic
2663 linker or the displaced stepping scratch pad. We
2664 shouldn't have allowed a range step then. */
2665 gdb_assert (pc_in_thread_step_range (pc
, tp
));
2668 do_target_resume (resume_ptid
, step
, sig
);
2669 tp
->set_resumed (true);
2672 /* Resume the inferior. SIG is the signal to give the inferior
2673 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2674 rolls back state on error. */
2677 resume (gdb_signal sig
)
2683 catch (const gdb_exception
&ex
)
2685 /* If resuming is being aborted for any reason, delete any
2686 single-step breakpoint resume_1 may have created, to avoid
2687 confusing the following resumption, and to avoid leaving
2688 single-step breakpoints perturbing other threads, in case
2689 we're running in non-stop mode. */
2690 if (inferior_ptid
!= null_ptid
)
2691 delete_single_step_breakpoints (inferior_thread ());
2701 /* Counter that tracks number of user visible stops. This can be used
2702 to tell whether a command has proceeded the inferior past the
2703 current location. This allows e.g., inferior function calls in
2704 breakpoint commands to not interrupt the command list. When the
2705 call finishes successfully, the inferior is standing at the same
2706 breakpoint as if nothing happened (and so we don't call
2708 static ULONGEST current_stop_id
;
2715 return current_stop_id
;
2718 /* Called when we report a user visible stop. */
2726 /* Clear out all variables saying what to do when inferior is continued.
2727 First do this, then set the ones you want, then call `proceed'. */
2730 clear_proceed_status_thread (struct thread_info
*tp
)
2732 infrun_debug_printf ("%s", tp
->ptid
.to_string ().c_str ());
2734 /* If we're starting a new sequence, then the previous finished
2735 single-step is no longer relevant. */
2736 if (tp
->has_pending_waitstatus ())
2738 if (tp
->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP
)
2740 infrun_debug_printf ("pending event of %s was a finished step. "
2742 tp
->ptid
.to_string ().c_str ());
2744 tp
->clear_pending_waitstatus ();
2745 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
2750 ("thread %s has pending wait status %s (currently_stepping=%d).",
2751 tp
->ptid
.to_string ().c_str (),
2752 tp
->pending_waitstatus ().to_string ().c_str (),
2753 currently_stepping (tp
));
2757 /* If this signal should not be seen by program, give it zero.
2758 Used for debugging signals. */
2759 if (!signal_pass_state (tp
->stop_signal ()))
2760 tp
->set_stop_signal (GDB_SIGNAL_0
);
2762 tp
->release_thread_fsm ();
2764 tp
->control
.trap_expected
= 0;
2765 tp
->control
.step_range_start
= 0;
2766 tp
->control
.step_range_end
= 0;
2767 tp
->control
.may_range_step
= 0;
2768 tp
->control
.step_frame_id
= null_frame_id
;
2769 tp
->control
.step_stack_frame_id
= null_frame_id
;
2770 tp
->control
.step_over_calls
= STEP_OVER_UNDEBUGGABLE
;
2771 tp
->control
.step_start_function
= nullptr;
2772 tp
->stop_requested
= 0;
2774 tp
->control
.stop_step
= 0;
2776 tp
->control
.proceed_to_finish
= 0;
2778 tp
->control
.stepping_command
= 0;
2780 /* Discard any remaining commands or status from previous stop. */
2781 bpstat_clear (&tp
->control
.stop_bpstat
);
2785 clear_proceed_status (int step
)
2787 /* With scheduler-locking replay, stop replaying other threads if we're
2788 not replaying the user-visible resume ptid.
2790 This is a convenience feature to not require the user to explicitly
2791 stop replaying the other threads. We're assuming that the user's
2792 intent is to resume tracing the recorded process. */
2793 if (!non_stop
&& scheduler_mode
== schedlock_replay
2794 && target_record_is_replaying (minus_one_ptid
)
2795 && !target_record_will_replay (user_visible_resume_ptid (step
),
2796 execution_direction
))
2797 target_record_stop_replaying ();
2799 if (!non_stop
&& inferior_ptid
!= null_ptid
)
2801 ptid_t resume_ptid
= user_visible_resume_ptid (step
);
2802 process_stratum_target
*resume_target
2803 = user_visible_resume_target (resume_ptid
);
2805 /* In all-stop mode, delete the per-thread status of all threads
2806 we're about to resume, implicitly and explicitly. */
2807 for (thread_info
*tp
: all_non_exited_threads (resume_target
, resume_ptid
))
2808 clear_proceed_status_thread (tp
);
2811 if (inferior_ptid
!= null_ptid
)
2813 struct inferior
*inferior
;
2817 /* If in non-stop mode, only delete the per-thread status of
2818 the current thread. */
2819 clear_proceed_status_thread (inferior_thread ());
2822 inferior
= current_inferior ();
2823 inferior
->control
.stop_soon
= NO_STOP_QUIETLY
;
2826 gdb::observers::about_to_proceed
.notify ();
2829 /* Returns true if TP is still stopped at a breakpoint that needs
2830 stepping-over in order to make progress. If the breakpoint is gone
2831 meanwhile, we can skip the whole step-over dance. */
2834 thread_still_needs_step_over_bp (struct thread_info
*tp
)
2836 if (tp
->stepping_over_breakpoint
)
2838 struct regcache
*regcache
= get_thread_regcache (tp
);
2840 if (breakpoint_here_p (regcache
->aspace (),
2841 regcache_read_pc (regcache
))
2842 == ordinary_breakpoint_here
)
2845 tp
->stepping_over_breakpoint
= 0;
2851 /* Check whether thread TP still needs to start a step-over in order
2852 to make progress when resumed. Returns an bitwise or of enum
2853 step_over_what bits, indicating what needs to be stepped over. */
2855 static step_over_what
2856 thread_still_needs_step_over (struct thread_info
*tp
)
2858 step_over_what what
= 0;
2860 if (thread_still_needs_step_over_bp (tp
))
2861 what
|= STEP_OVER_BREAKPOINT
;
2863 if (tp
->stepping_over_watchpoint
2864 && !target_have_steppable_watchpoint ())
2865 what
|= STEP_OVER_WATCHPOINT
;
2870 /* Returns true if scheduler locking applies. STEP indicates whether
2871 we're about to do a step/next-like command to a thread. */
2874 schedlock_applies (struct thread_info
*tp
)
2876 return (scheduler_mode
== schedlock_on
2877 || (scheduler_mode
== schedlock_step
2878 && tp
->control
.stepping_command
)
2879 || (scheduler_mode
== schedlock_replay
2880 && target_record_will_replay (minus_one_ptid
,
2881 execution_direction
)));
2884 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2885 stacks that have threads executing and don't have threads with
2889 maybe_set_commit_resumed_all_targets ()
2891 scoped_restore_current_thread restore_thread
;
2893 for (inferior
*inf
: all_non_exited_inferiors ())
2895 process_stratum_target
*proc_target
= inf
->process_target ();
2897 if (proc_target
->commit_resumed_state
)
2899 /* We already set this in a previous iteration, via another
2900 inferior sharing the process_stratum target. */
2904 /* If the target has no resumed threads, it would be useless to
2905 ask it to commit the resumed threads. */
2906 if (!proc_target
->threads_executing
)
2908 infrun_debug_printf ("not requesting commit-resumed for target "
2909 "%s, no resumed threads",
2910 proc_target
->shortname ());
2914 /* As an optimization, if a thread from this target has some
2915 status to report, handle it before requiring the target to
2916 commit its resumed threads: handling the status might lead to
2917 resuming more threads. */
2918 if (proc_target
->has_resumed_with_pending_wait_status ())
2920 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2921 " thread has a pending waitstatus",
2922 proc_target
->shortname ());
2926 switch_to_inferior_no_thread (inf
);
2928 if (target_has_pending_events ())
2930 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2931 "target has pending events",
2932 proc_target
->shortname ());
2936 infrun_debug_printf ("enabling commit-resumed for target %s",
2937 proc_target
->shortname ());
2939 proc_target
->commit_resumed_state
= true;
2946 maybe_call_commit_resumed_all_targets ()
2948 scoped_restore_current_thread restore_thread
;
2950 for (inferior
*inf
: all_non_exited_inferiors ())
2952 process_stratum_target
*proc_target
= inf
->process_target ();
2954 if (!proc_target
->commit_resumed_state
)
2957 switch_to_inferior_no_thread (inf
);
2959 infrun_debug_printf ("calling commit_resumed for target %s",
2960 proc_target
->shortname());
2962 target_commit_resumed ();
2966 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
2967 that only the outermost one attempts to re-enable
2969 static bool enable_commit_resumed
= true;
2973 scoped_disable_commit_resumed::scoped_disable_commit_resumed
2974 (const char *reason
)
2975 : m_reason (reason
),
2976 m_prev_enable_commit_resumed (enable_commit_resumed
)
2978 infrun_debug_printf ("reason=%s", m_reason
);
2980 enable_commit_resumed
= false;
2982 for (inferior
*inf
: all_non_exited_inferiors ())
2984 process_stratum_target
*proc_target
= inf
->process_target ();
2986 if (m_prev_enable_commit_resumed
)
2988 /* This is the outermost instance: force all
2989 COMMIT_RESUMED_STATE to false. */
2990 proc_target
->commit_resumed_state
= false;
2994 /* This is not the outermost instance, we expect
2995 COMMIT_RESUMED_STATE to have been cleared by the
2996 outermost instance. */
2997 gdb_assert (!proc_target
->commit_resumed_state
);
3005 scoped_disable_commit_resumed::reset ()
3011 infrun_debug_printf ("reason=%s", m_reason
);
3013 gdb_assert (!enable_commit_resumed
);
3015 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3017 if (m_prev_enable_commit_resumed
)
3019 /* This is the outermost instance, re-enable
3020 COMMIT_RESUMED_STATE on the targets where it's possible. */
3021 maybe_set_commit_resumed_all_targets ();
3025 /* This is not the outermost instance, we expect
3026 COMMIT_RESUMED_STATE to still be false. */
3027 for (inferior
*inf
: all_non_exited_inferiors ())
3029 process_stratum_target
*proc_target
= inf
->process_target ();
3030 gdb_assert (!proc_target
->commit_resumed_state
);
3037 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3045 scoped_disable_commit_resumed::reset_and_commit ()
3048 maybe_call_commit_resumed_all_targets ();
3053 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3054 (const char *reason
)
3055 : m_reason (reason
),
3056 m_prev_enable_commit_resumed (enable_commit_resumed
)
3058 infrun_debug_printf ("reason=%s", m_reason
);
3060 if (!enable_commit_resumed
)
3062 enable_commit_resumed
= true;
3064 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3066 maybe_set_commit_resumed_all_targets ();
3068 maybe_call_commit_resumed_all_targets ();
3074 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3076 infrun_debug_printf ("reason=%s", m_reason
);
3078 gdb_assert (enable_commit_resumed
);
3080 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3082 if (!enable_commit_resumed
)
3084 /* Force all COMMIT_RESUMED_STATE back to false. */
3085 for (inferior
*inf
: all_non_exited_inferiors ())
3087 process_stratum_target
*proc_target
= inf
->process_target ();
3088 proc_target
->commit_resumed_state
= false;
3093 /* Check that all the targets we're about to resume are in non-stop
3094 mode. Ideally, we'd only care whether all targets support
3095 target-async, but we're not there yet. E.g., stop_all_threads
3096 doesn't know how to handle all-stop targets. Also, the remote
3097 protocol in all-stop mode is synchronous, irrespective of
3098 target-async, which means that things like a breakpoint re-set
3099 triggered by one target would try to read memory from all targets
3103 check_multi_target_resumption (process_stratum_target
*resume_target
)
3105 if (!non_stop
&& resume_target
== nullptr)
3107 scoped_restore_current_thread restore_thread
;
3109 /* This is used to track whether we're resuming more than one
3111 process_stratum_target
*first_connection
= nullptr;
3113 /* The first inferior we see with a target that does not work in
3114 always-non-stop mode. */
3115 inferior
*first_not_non_stop
= nullptr;
3117 for (inferior
*inf
: all_non_exited_inferiors ())
3119 switch_to_inferior_no_thread (inf
);
3121 if (!target_has_execution ())
3124 process_stratum_target
*proc_target
3125 = current_inferior ()->process_target();
3127 if (!target_is_non_stop_p ())
3128 first_not_non_stop
= inf
;
3130 if (first_connection
== nullptr)
3131 first_connection
= proc_target
;
3132 else if (first_connection
!= proc_target
3133 && first_not_non_stop
!= nullptr)
3135 switch_to_inferior_no_thread (first_not_non_stop
);
3137 proc_target
= current_inferior ()->process_target();
3139 error (_("Connection %d (%s) does not support "
3140 "multi-target resumption."),
3141 proc_target
->connection_number
,
3142 make_target_connection_string (proc_target
).c_str ());
3148 /* Basic routine for continuing the program in various fashions.
3150 ADDR is the address to resume at, or -1 for resume where stopped.
3151 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3152 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3154 You should call clear_proceed_status before calling proceed. */
3157 proceed (CORE_ADDR addr
, enum gdb_signal siggnal
)
3159 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
3161 struct regcache
*regcache
;
3162 struct gdbarch
*gdbarch
;
3164 struct execution_control_state ecss
;
3165 struct execution_control_state
*ecs
= &ecss
;
3167 /* If we're stopped at a fork/vfork, follow the branch set by the
3168 "set follow-fork-mode" command; otherwise, we'll just proceed
3169 resuming the current thread. */
3170 if (!follow_fork ())
3172 /* The target for some reason decided not to resume. */
3174 if (target_can_async_p ())
3175 inferior_event_handler (INF_EXEC_COMPLETE
);
3179 /* We'll update this if & when we switch to a new thread. */
3180 previous_inferior_ptid
= inferior_ptid
;
3182 regcache
= get_current_regcache ();
3183 gdbarch
= regcache
->arch ();
3184 const address_space
*aspace
= regcache
->aspace ();
3186 pc
= regcache_read_pc_protected (regcache
);
3188 thread_info
*cur_thr
= inferior_thread ();
3190 /* Fill in with reasonable starting values. */
3191 init_thread_stepping_state (cur_thr
);
3193 gdb_assert (!thread_is_in_step_over_chain (cur_thr
));
3196 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
3197 process_stratum_target
*resume_target
3198 = user_visible_resume_target (resume_ptid
);
3200 check_multi_target_resumption (resume_target
);
3202 if (addr
== (CORE_ADDR
) -1)
3204 if (cur_thr
->stop_pc_p ()
3205 && pc
== cur_thr
->stop_pc ()
3206 && breakpoint_here_p (aspace
, pc
) == ordinary_breakpoint_here
3207 && execution_direction
!= EXEC_REVERSE
)
3208 /* There is a breakpoint at the address we will resume at,
3209 step one instruction before inserting breakpoints so that
3210 we do not stop right away (and report a second hit at this
3213 Note, we don't do this in reverse, because we won't
3214 actually be executing the breakpoint insn anyway.
3215 We'll be (un-)executing the previous instruction. */
3216 cur_thr
->stepping_over_breakpoint
= 1;
3217 else if (gdbarch_single_step_through_delay_p (gdbarch
)
3218 && gdbarch_single_step_through_delay (gdbarch
,
3219 get_current_frame ()))
3220 /* We stepped onto an instruction that needs to be stepped
3221 again before re-inserting the breakpoint, do so. */
3222 cur_thr
->stepping_over_breakpoint
= 1;
3226 regcache_write_pc (regcache
, addr
);
3229 if (siggnal
!= GDB_SIGNAL_DEFAULT
)
3230 cur_thr
->set_stop_signal (siggnal
);
3232 /* If an exception is thrown from this point on, make sure to
3233 propagate GDB's knowledge of the executing state to the
3234 frontend/user running state. */
3235 scoped_finish_thread_state
finish_state (resume_target
, resume_ptid
);
3237 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3238 threads (e.g., we might need to set threads stepping over
3239 breakpoints first), from the user/frontend's point of view, all
3240 threads in RESUME_PTID are now running. Unless we're calling an
3241 inferior function, as in that case we pretend the inferior
3242 doesn't run at all. */
3243 if (!cur_thr
->control
.in_infcall
)
3244 set_running (resume_target
, resume_ptid
, true);
3246 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch
, addr
),
3247 gdb_signal_to_symbol_string (siggnal
));
3249 annotate_starting ();
3251 /* Make sure that output from GDB appears before output from the
3253 gdb_flush (gdb_stdout
);
3255 /* Since we've marked the inferior running, give it the terminal. A
3256 QUIT/Ctrl-C from here on is forwarded to the target (which can
3257 still detect attempts to unblock a stuck connection with repeated
3258 Ctrl-C from within target_pass_ctrlc). */
3259 target_terminal::inferior ();
3261 /* In a multi-threaded task we may select another thread and
3262 then continue or step.
3264 But if a thread that we're resuming had stopped at a breakpoint,
3265 it will immediately cause another breakpoint stop without any
3266 execution (i.e. it will report a breakpoint hit incorrectly). So
3267 we must step over it first.
3269 Look for threads other than the current (TP) that reported a
3270 breakpoint hit and haven't been resumed yet since. */
3272 /* If scheduler locking applies, we can avoid iterating over all
3274 if (!non_stop
&& !schedlock_applies (cur_thr
))
3276 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3279 switch_to_thread_no_regs (tp
);
3281 /* Ignore the current thread here. It's handled
3286 if (!thread_still_needs_step_over (tp
))
3289 gdb_assert (!thread_is_in_step_over_chain (tp
));
3291 infrun_debug_printf ("need to step-over [%s] first",
3292 tp
->ptid
.to_string ().c_str ());
3294 global_thread_step_over_chain_enqueue (tp
);
3297 switch_to_thread (cur_thr
);
3300 /* Enqueue the current thread last, so that we move all other
3301 threads over their breakpoints first. */
3302 if (cur_thr
->stepping_over_breakpoint
)
3303 global_thread_step_over_chain_enqueue (cur_thr
);
3305 /* If the thread isn't started, we'll still need to set its prev_pc,
3306 so that switch_back_to_stepped_thread knows the thread hasn't
3307 advanced. Must do this before resuming any thread, as in
3308 all-stop/remote, once we resume we can't send any other packet
3309 until the target stops again. */
3310 cur_thr
->prev_pc
= regcache_read_pc_protected (regcache
);
3313 scoped_disable_commit_resumed
disable_commit_resumed ("proceeding");
3314 bool step_over_started
= start_step_over ();
3316 if (step_over_info_valid_p ())
3318 /* Either this thread started a new in-line step over, or some
3319 other thread was already doing one. In either case, don't
3320 resume anything else until the step-over is finished. */
3322 else if (step_over_started
&& !target_is_non_stop_p ())
3324 /* A new displaced stepping sequence was started. In all-stop,
3325 we can't talk to the target anymore until it next stops. */
3327 else if (!non_stop
&& target_is_non_stop_p ())
3329 INFRUN_SCOPED_DEBUG_START_END
3330 ("resuming threads, all-stop-on-top-of-non-stop");
3332 /* In all-stop, but the target is always in non-stop mode.
3333 Start all other threads that are implicitly resumed too. */
3334 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3337 switch_to_thread_no_regs (tp
);
3339 if (!tp
->inf
->has_execution ())
3341 infrun_debug_printf ("[%s] target has no execution",
3342 tp
->ptid
.to_string ().c_str ());
3348 infrun_debug_printf ("[%s] resumed",
3349 tp
->ptid
.to_string ().c_str ());
3350 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
3354 if (thread_is_in_step_over_chain (tp
))
3356 infrun_debug_printf ("[%s] needs step-over",
3357 tp
->ptid
.to_string ().c_str ());
3361 /* If a thread of that inferior is waiting for a vfork-done
3362 (for a detached vfork child to exec or exit), breakpoints are
3363 removed. We must not resume any thread of that inferior, other
3364 than the one waiting for the vfork-done. */
3365 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr
3366 && tp
!= tp
->inf
->thread_waiting_for_vfork_done
)
3368 infrun_debug_printf ("[%s] another thread of this inferior is "
3369 "waiting for vfork-done",
3370 tp
->ptid
.to_string ().c_str ());
3374 infrun_debug_printf ("resuming %s",
3375 tp
->ptid
.to_string ().c_str ());
3377 reset_ecs (ecs
, tp
);
3378 switch_to_thread (tp
);
3379 keep_going_pass_signal (ecs
);
3380 if (!ecs
->wait_some_more
)
3381 error (_("Command aborted."));
3384 else if (!cur_thr
->resumed ()
3385 && !thread_is_in_step_over_chain (cur_thr
)
3386 /* In non-stop, forbid resuming a thread if some other thread of
3387 that inferior is waiting for a vfork-done event (this means
3388 breakpoints are out for this inferior). */
3390 && cur_thr
->inf
->thread_waiting_for_vfork_done
!= nullptr))
3392 /* The thread wasn't started, and isn't queued, run it now. */
3393 reset_ecs (ecs
, cur_thr
);
3394 switch_to_thread (cur_thr
);
3395 keep_going_pass_signal (ecs
);
3396 if (!ecs
->wait_some_more
)
3397 error (_("Command aborted."));
3400 disable_commit_resumed
.reset_and_commit ();
3403 finish_state
.release ();
3405 /* If we've switched threads above, switch back to the previously
3406 current thread. We don't want the user to see a different
3408 switch_to_thread (cur_thr
);
3410 /* Tell the event loop to wait for it to stop. If the target
3411 supports asynchronous execution, it'll do this from within
3413 if (!target_can_async_p ())
3414 mark_async_event_handler (infrun_async_inferior_event_token
);
3418 /* Start remote-debugging of a machine over a serial link. */
3421 start_remote (int from_tty
)
3423 inferior
*inf
= current_inferior ();
3424 inf
->control
.stop_soon
= STOP_QUIETLY_REMOTE
;
3426 /* Always go on waiting for the target, regardless of the mode. */
3427 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3428 indicate to wait_for_inferior that a target should timeout if
3429 nothing is returned (instead of just blocking). Because of this,
3430 targets expecting an immediate response need to, internally, set
3431 things up so that the target_wait() is forced to eventually
3433 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3434 differentiate to its caller what the state of the target is after
3435 the initial open has been performed. Here we're assuming that
3436 the target has stopped. It should be possible to eventually have
3437 target_open() return to the caller an indication that the target
3438 is currently running and GDB state should be set to the same as
3439 for an async run. */
3440 wait_for_inferior (inf
);
3442 /* Now that the inferior has stopped, do any bookkeeping like
3443 loading shared libraries. We want to do this before normal_stop,
3444 so that the displayed frame is up to date. */
3445 post_create_inferior (from_tty
);
3450 /* Initialize static vars when a new inferior begins. */
3453 init_wait_for_inferior (void)
3455 /* These are meaningless until the first time through wait_for_inferior. */
3457 breakpoint_init_inferior (inf_starting
);
3459 clear_proceed_status (0);
3461 nullify_last_target_wait_ptid ();
3463 previous_inferior_ptid
= inferior_ptid
;
3468 static void handle_inferior_event (struct execution_control_state
*ecs
);
3470 static void handle_step_into_function (struct gdbarch
*gdbarch
,
3471 struct execution_control_state
*ecs
);
3472 static void handle_step_into_function_backward (struct gdbarch
*gdbarch
,
3473 struct execution_control_state
*ecs
);
3474 static void handle_signal_stop (struct execution_control_state
*ecs
);
3475 static void check_exception_resume (struct execution_control_state
*,
3478 static void end_stepping_range (struct execution_control_state
*ecs
);
3479 static void stop_waiting (struct execution_control_state
*ecs
);
3480 static void keep_going (struct execution_control_state
*ecs
);
3481 static void process_event_stop_test (struct execution_control_state
*ecs
);
3482 static bool switch_back_to_stepped_thread (struct execution_control_state
*ecs
);
3484 /* This function is attached as a "thread_stop_requested" observer.
3485 Cleanup local state that assumed the PTID was to be resumed, and
3486 report the stop to the frontend. */
3489 infrun_thread_stop_requested (ptid_t ptid
)
3491 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
3493 /* PTID was requested to stop. If the thread was already stopped,
3494 but the user/frontend doesn't know about that yet (e.g., the
3495 thread had been temporarily paused for some step-over), set up
3496 for reporting the stop now. */
3497 for (thread_info
*tp
: all_threads (curr_target
, ptid
))
3499 if (tp
->state
!= THREAD_RUNNING
)
3501 if (tp
->executing ())
3504 /* Remove matching threads from the step-over queue, so
3505 start_step_over doesn't try to resume them
3507 if (thread_is_in_step_over_chain (tp
))
3508 global_thread_step_over_chain_remove (tp
);
3510 /* If the thread is stopped, but the user/frontend doesn't
3511 know about that yet, queue a pending event, as if the
3512 thread had just stopped now. Unless the thread already had
3514 if (!tp
->has_pending_waitstatus ())
3516 target_waitstatus ws
;
3517 ws
.set_stopped (GDB_SIGNAL_0
);
3518 tp
->set_pending_waitstatus (ws
);
3521 /* Clear the inline-frame state, since we're re-processing the
3523 clear_inline_frame_state (tp
);
3525 /* If this thread was paused because some other thread was
3526 doing an inline-step over, let that finish first. Once
3527 that happens, we'll restart all threads and consume pending
3528 stop events then. */
3529 if (step_over_info_valid_p ())
3532 /* Otherwise we can process the (new) pending event now. Set
3533 it so this pending event is considered by
3535 tp
->set_resumed (true);
3540 infrun_thread_thread_exit (struct thread_info
*tp
, int silent
)
3542 if (target_last_proc_target
== tp
->inf
->process_target ()
3543 && target_last_wait_ptid
== tp
->ptid
)
3544 nullify_last_target_wait_ptid ();
3547 /* Delete the step resume, single-step and longjmp/exception resume
3548 breakpoints of TP. */
3551 delete_thread_infrun_breakpoints (struct thread_info
*tp
)
3553 delete_step_resume_breakpoint (tp
);
3554 delete_exception_resume_breakpoint (tp
);
3555 delete_single_step_breakpoints (tp
);
3558 /* If the target still has execution, call FUNC for each thread that
3559 just stopped. In all-stop, that's all the non-exited threads; in
3560 non-stop, that's the current thread, only. */
3562 typedef void (*for_each_just_stopped_thread_callback_func
)
3563 (struct thread_info
*tp
);
3566 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func
)
3568 if (!target_has_execution () || inferior_ptid
== null_ptid
)
3571 if (target_is_non_stop_p ())
3573 /* If in non-stop mode, only the current thread stopped. */
3574 func (inferior_thread ());
3578 /* In all-stop mode, all threads have stopped. */
3579 for (thread_info
*tp
: all_non_exited_threads ())
3584 /* Delete the step resume and longjmp/exception resume breakpoints of
3585 the threads that just stopped. */
3588 delete_just_stopped_threads_infrun_breakpoints (void)
3590 for_each_just_stopped_thread (delete_thread_infrun_breakpoints
);
3593 /* Delete the single-step breakpoints of the threads that just
3597 delete_just_stopped_threads_single_step_breakpoints (void)
3599 for_each_just_stopped_thread (delete_single_step_breakpoints
);
3605 print_target_wait_results (ptid_t waiton_ptid
, ptid_t result_ptid
,
3606 const struct target_waitstatus
&ws
)
3608 infrun_debug_printf ("target_wait (%s [%s], status) =",
3609 waiton_ptid
.to_string ().c_str (),
3610 target_pid_to_str (waiton_ptid
).c_str ());
3611 infrun_debug_printf (" %s [%s],",
3612 result_ptid
.to_string ().c_str (),
3613 target_pid_to_str (result_ptid
).c_str ());
3614 infrun_debug_printf (" %s", ws
.to_string ().c_str ());
3617 /* Select a thread at random, out of those which are resumed and have
3620 static struct thread_info
*
3621 random_pending_event_thread (inferior
*inf
, ptid_t waiton_ptid
)
3623 process_stratum_target
*proc_target
= inf
->process_target ();
3625 = proc_target
->random_resumed_with_pending_wait_status (inf
, waiton_ptid
);
3627 if (thread
== nullptr)
3629 infrun_debug_printf ("None found.");
3633 infrun_debug_printf ("Found %s.", thread
->ptid
.to_string ().c_str ());
3634 gdb_assert (thread
->resumed ());
3635 gdb_assert (thread
->has_pending_waitstatus ());
3640 /* Wrapper for target_wait that first checks whether threads have
3641 pending statuses to report before actually asking the target for
3642 more events. INF is the inferior we're using to call target_wait
3646 do_target_wait_1 (inferior
*inf
, ptid_t ptid
,
3647 target_waitstatus
*status
, target_wait_flags options
)
3649 struct thread_info
*tp
;
3651 /* We know that we are looking for an event in the target of inferior
3652 INF, but we don't know which thread the event might come from. As
3653 such we want to make sure that INFERIOR_PTID is reset so that none of
3654 the wait code relies on it - doing so is always a mistake. */
3655 switch_to_inferior_no_thread (inf
);
3657 /* First check if there is a resumed thread with a wait status
3659 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
3661 tp
= random_pending_event_thread (inf
, ptid
);
3665 infrun_debug_printf ("Waiting for specific thread %s.",
3666 ptid
.to_string ().c_str ());
3668 /* We have a specific thread to check. */
3669 tp
= find_thread_ptid (inf
, ptid
);
3670 gdb_assert (tp
!= nullptr);
3671 if (!tp
->has_pending_waitstatus ())
3676 && (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3677 || tp
->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT
))
3679 struct regcache
*regcache
= get_thread_regcache (tp
);
3680 struct gdbarch
*gdbarch
= regcache
->arch ();
3684 pc
= regcache_read_pc (regcache
);
3686 if (pc
!= tp
->stop_pc ())
3688 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3689 tp
->ptid
.to_string ().c_str (),
3690 paddress (gdbarch
, tp
->stop_pc ()),
3691 paddress (gdbarch
, pc
));
3694 else if (!breakpoint_inserted_here_p (regcache
->aspace (), pc
))
3696 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3697 tp
->ptid
.to_string ().c_str (),
3698 paddress (gdbarch
, pc
));
3705 infrun_debug_printf ("pending event of %s cancelled.",
3706 tp
->ptid
.to_string ().c_str ());
3708 tp
->clear_pending_waitstatus ();
3709 target_waitstatus ws
;
3711 tp
->set_pending_waitstatus (ws
);
3712 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3718 infrun_debug_printf ("Using pending wait status %s for %s.",
3719 tp
->pending_waitstatus ().to_string ().c_str (),
3720 tp
->ptid
.to_string ().c_str ());
3722 /* Now that we've selected our final event LWP, un-adjust its PC
3723 if it was a software breakpoint (and the target doesn't
3724 always adjust the PC itself). */
3725 if (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3726 && !target_supports_stopped_by_sw_breakpoint ())
3728 struct regcache
*regcache
;
3729 struct gdbarch
*gdbarch
;
3732 regcache
= get_thread_regcache (tp
);
3733 gdbarch
= regcache
->arch ();
3735 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
3740 pc
= regcache_read_pc (regcache
);
3741 regcache_write_pc (regcache
, pc
+ decr_pc
);
3745 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3746 *status
= tp
->pending_waitstatus ();
3747 tp
->clear_pending_waitstatus ();
3749 /* Wake up the event loop again, until all pending events are
3751 if (target_is_async_p ())
3752 mark_async_event_handler (infrun_async_inferior_event_token
);
3756 /* But if we don't find one, we'll have to wait. */
3758 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3760 if (!target_can_async_p ())
3761 options
&= ~TARGET_WNOHANG
;
3763 return target_wait (ptid
, status
, options
);
3766 /* Wrapper for target_wait that first checks whether threads have
3767 pending statuses to report before actually asking the target for
3768 more events. Polls for events from all inferiors/targets. */
3771 do_target_wait (execution_control_state
*ecs
, target_wait_flags options
)
3773 int num_inferiors
= 0;
3774 int random_selector
;
3776 /* For fairness, we pick the first inferior/target to poll at random
3777 out of all inferiors that may report events, and then continue
3778 polling the rest of the inferior list starting from that one in a
3779 circular fashion until the whole list is polled once. */
3781 auto inferior_matches
= [] (inferior
*inf
)
3783 return inf
->process_target () != nullptr;
3786 /* First see how many matching inferiors we have. */
3787 for (inferior
*inf
: all_inferiors ())
3788 if (inferior_matches (inf
))
3791 if (num_inferiors
== 0)
3793 ecs
->ws
.set_ignore ();
3797 /* Now randomly pick an inferior out of those that matched. */
3798 random_selector
= (int)
3799 ((num_inferiors
* (double) rand ()) / (RAND_MAX
+ 1.0));
3801 if (num_inferiors
> 1)
3802 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3803 num_inferiors
, random_selector
);
3805 /* Select the Nth inferior that matched. */
3807 inferior
*selected
= nullptr;
3809 for (inferior
*inf
: all_inferiors ())
3810 if (inferior_matches (inf
))
3811 if (random_selector
-- == 0)
3817 /* Now poll for events out of each of the matching inferior's
3818 targets, starting from the selected one. */
3820 auto do_wait
= [&] (inferior
*inf
)
3822 ecs
->ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
->ws
, options
);
3823 ecs
->target
= inf
->process_target ();
3824 return (ecs
->ws
.kind () != TARGET_WAITKIND_IGNORE
);
3827 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3828 here spuriously after the target is all stopped and we've already
3829 reported the stop to the user, polling for events. */
3830 scoped_restore_current_thread restore_thread
;
3832 intrusive_list_iterator
<inferior
> start
3833 = inferior_list
.iterator_to (*selected
);
3835 for (intrusive_list_iterator
<inferior
> it
= start
;
3836 it
!= inferior_list
.end ();
3839 inferior
*inf
= &*it
;
3841 if (inferior_matches (inf
) && do_wait (inf
))
3845 for (intrusive_list_iterator
<inferior
> it
= inferior_list
.begin ();
3849 inferior
*inf
= &*it
;
3851 if (inferior_matches (inf
) && do_wait (inf
))
3855 ecs
->ws
.set_ignore ();
3859 /* An event reported by wait_one. */
3861 struct wait_one_event
3863 /* The target the event came out of. */
3864 process_stratum_target
*target
;
3866 /* The PTID the event was for. */
3869 /* The waitstatus. */
3870 target_waitstatus ws
;
3873 static bool handle_one (const wait_one_event
&event
);
3875 /* Prepare and stabilize the inferior for detaching it. E.g.,
3876 detaching while a thread is displaced stepping is a recipe for
3877 crashing it, as nothing would readjust the PC out of the scratch
3881 prepare_for_detach (void)
3883 struct inferior
*inf
= current_inferior ();
3884 ptid_t pid_ptid
= ptid_t (inf
->pid
);
3885 scoped_restore_current_thread restore_thread
;
3887 scoped_restore restore_detaching
= make_scoped_restore (&inf
->detaching
, true);
3889 /* Remove all threads of INF from the global step-over chain. We
3890 want to stop any ongoing step-over, not start any new one. */
3891 thread_step_over_list_safe_range range
3892 = make_thread_step_over_list_safe_range (global_thread_step_over_list
);
3894 for (thread_info
*tp
: range
)
3897 infrun_debug_printf ("removing thread %s from global step over chain",
3898 tp
->ptid
.to_string ().c_str ());
3899 global_thread_step_over_chain_remove (tp
);
3902 /* If we were already in the middle of an inline step-over, and the
3903 thread stepping belongs to the inferior we're detaching, we need
3904 to restart the threads of other inferiors. */
3905 if (step_over_info
.thread
!= -1)
3907 infrun_debug_printf ("inline step-over in-process while detaching");
3909 thread_info
*thr
= find_thread_global_id (step_over_info
.thread
);
3910 if (thr
->inf
== inf
)
3912 /* Since we removed threads of INF from the step-over chain,
3913 we know this won't start a step-over for INF. */
3914 clear_step_over_info ();
3916 if (target_is_non_stop_p ())
3918 /* Start a new step-over in another thread if there's
3919 one that needs it. */
3922 /* Restart all other threads (except the
3923 previously-stepping thread, since that one is still
3925 if (!step_over_info_valid_p ())
3926 restart_threads (thr
);
3931 if (displaced_step_in_progress (inf
))
3933 infrun_debug_printf ("displaced-stepping in-process while detaching");
3935 /* Stop threads currently displaced stepping, aborting it. */
3937 for (thread_info
*thr
: inf
->non_exited_threads ())
3939 if (thr
->displaced_step_state
.in_progress ())
3941 if (thr
->executing ())
3943 if (!thr
->stop_requested
)
3945 target_stop (thr
->ptid
);
3946 thr
->stop_requested
= true;
3950 thr
->set_resumed (false);
3954 while (displaced_step_in_progress (inf
))
3956 wait_one_event event
;
3958 event
.target
= inf
->process_target ();
3959 event
.ptid
= do_target_wait_1 (inf
, pid_ptid
, &event
.ws
, 0);
3962 print_target_wait_results (pid_ptid
, event
.ptid
, event
.ws
);
3967 /* It's OK to leave some of the threads of INF stopped, since
3968 they'll be detached shortly. */
3972 /* If all-stop, but there exists a non-stop target, stop all threads
3973 now that we're presenting the stop to the user. */
3976 stop_all_threads_if_all_stop_mode ()
3978 if (!non_stop
&& exists_non_stop_target ())
3979 stop_all_threads ("presenting stop to user in all-stop");
3982 /* Wait for control to return from inferior to debugger.
3984 If inferior gets a signal, we may decide to start it up again
3985 instead of returning. That is why there is a loop in this function.
3986 When this function actually returns it means the inferior
3987 should be left stopped and GDB should read more commands. */
3990 wait_for_inferior (inferior
*inf
)
3992 infrun_debug_printf ("wait_for_inferior ()");
3994 SCOPE_EXIT
{ delete_just_stopped_threads_infrun_breakpoints (); };
3996 /* If an error happens while handling the event, propagate GDB's
3997 knowledge of the executing state to the frontend/user running
3999 scoped_finish_thread_state finish_state
4000 (inf
->process_target (), minus_one_ptid
);
4004 struct execution_control_state ecss
;
4005 struct execution_control_state
*ecs
= &ecss
;
4007 overlay_cache_invalid
= 1;
4009 /* Flush target cache before starting to handle each event.
4010 Target was running and cache could be stale. This is just a
4011 heuristic. Running threads may modify target memory, but we
4012 don't get any event. */
4013 target_dcache_invalidate ();
4015 ecs
->ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
->ws
, 0);
4016 ecs
->target
= inf
->process_target ();
4019 print_target_wait_results (minus_one_ptid
, ecs
->ptid
, ecs
->ws
);
4021 /* Now figure out what to do with the result of the result. */
4022 handle_inferior_event (ecs
);
4024 if (!ecs
->wait_some_more
)
4028 stop_all_threads_if_all_stop_mode ();
4030 /* No error, don't finish the state yet. */
4031 finish_state
.release ();
4034 /* Cleanup that reinstalls the readline callback handler, if the
4035 target is running in the background. If while handling the target
4036 event something triggered a secondary prompt, like e.g., a
4037 pagination prompt, we'll have removed the callback handler (see
4038 gdb_readline_wrapper_line). Need to do this as we go back to the
4039 event loop, ready to process further input. Note this has no
4040 effect if the handler hasn't actually been removed, because calling
4041 rl_callback_handler_install resets the line buffer, thus losing
4045 reinstall_readline_callback_handler_cleanup ()
4047 struct ui
*ui
= current_ui
;
4051 /* We're not going back to the top level event loop yet. Don't
4052 install the readline callback, as it'd prep the terminal,
4053 readline-style (raw, noecho) (e.g., --batch). We'll install
4054 it the next time the prompt is displayed, when we're ready
4059 if (ui
->command_editing
&& ui
->prompt_state
!= PROMPT_BLOCKED
)
4060 gdb_rl_callback_handler_reinstall ();
4063 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4064 that's just the event thread. In all-stop, that's all threads. */
4067 clean_up_just_stopped_threads_fsms (struct execution_control_state
*ecs
)
4069 /* The first clean_up call below assumes the event thread is the current
4071 if (ecs
->event_thread
!= nullptr)
4072 gdb_assert (ecs
->event_thread
== inferior_thread ());
4074 if (ecs
->event_thread
!= nullptr
4075 && ecs
->event_thread
->thread_fsm () != nullptr)
4076 ecs
->event_thread
->thread_fsm ()->clean_up (ecs
->event_thread
);
4080 scoped_restore_current_thread restore_thread
;
4082 for (thread_info
*thr
: all_non_exited_threads ())
4084 if (thr
->thread_fsm () == nullptr)
4086 if (thr
== ecs
->event_thread
)
4089 switch_to_thread (thr
);
4090 thr
->thread_fsm ()->clean_up (thr
);
4095 /* Helper for all_uis_check_sync_execution_done that works on the
4099 check_curr_ui_sync_execution_done (void)
4101 struct ui
*ui
= current_ui
;
4103 if (ui
->prompt_state
== PROMPT_NEEDED
4105 && !gdb_in_secondary_prompt_p (ui
))
4107 target_terminal::ours ();
4108 gdb::observers::sync_execution_done
.notify ();
4109 ui
->register_file_handler ();
4116 all_uis_check_sync_execution_done (void)
4118 SWITCH_THRU_ALL_UIS ()
4120 check_curr_ui_sync_execution_done ();
4127 all_uis_on_sync_execution_starting (void)
4129 SWITCH_THRU_ALL_UIS ()
4131 if (current_ui
->prompt_state
== PROMPT_NEEDED
)
4132 async_disable_stdin ();
4136 /* Asynchronous version of wait_for_inferior. It is called by the
4137 event loop whenever a change of state is detected on the file
4138 descriptor corresponding to the target. It can be called more than
4139 once to complete a single execution command. In such cases we need
4140 to keep the state in a global variable ECSS. If it is the last time
4141 that this function is called for a single execution command, then
4142 report to the user that the inferior has stopped, and do the
4143 necessary cleanups. */
4146 fetch_inferior_event ()
4148 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
4150 struct execution_control_state ecss
;
4151 struct execution_control_state
*ecs
= &ecss
;
4154 /* Events are always processed with the main UI as current UI. This
4155 way, warnings, debug output, etc. are always consistently sent to
4156 the main console. */
4157 scoped_restore save_ui
= make_scoped_restore (¤t_ui
, main_ui
);
4159 /* Temporarily disable pagination. Otherwise, the user would be
4160 given an option to press 'q' to quit, which would cause an early
4161 exit and could leave GDB in a half-baked state. */
4162 scoped_restore save_pagination
4163 = make_scoped_restore (&pagination_enabled
, false);
4165 /* End up with readline processing input, if necessary. */
4167 SCOPE_EXIT
{ reinstall_readline_callback_handler_cleanup (); };
4169 /* We're handling a live event, so make sure we're doing live
4170 debugging. If we're looking at traceframes while the target is
4171 running, we're going to need to get back to that mode after
4172 handling the event. */
4173 gdb::optional
<scoped_restore_current_traceframe
> maybe_restore_traceframe
;
4176 maybe_restore_traceframe
.emplace ();
4177 set_current_traceframe (-1);
4180 /* The user/frontend should not notice a thread switch due to
4181 internal events. Make sure we revert to the user selected
4182 thread and frame after handling the event and running any
4183 breakpoint commands. */
4184 scoped_restore_current_thread restore_thread
;
4186 overlay_cache_invalid
= 1;
4187 /* Flush target cache before starting to handle each event. Target
4188 was running and cache could be stale. This is just a heuristic.
4189 Running threads may modify target memory, but we don't get any
4191 target_dcache_invalidate ();
4193 scoped_restore save_exec_dir
4194 = make_scoped_restore (&execution_direction
,
4195 target_execution_direction ());
4197 /* Allow targets to pause their resumed threads while we handle
4199 scoped_disable_commit_resumed
disable_commit_resumed ("handling event");
4201 if (!do_target_wait (ecs
, TARGET_WNOHANG
))
4203 infrun_debug_printf ("do_target_wait returned no event");
4204 disable_commit_resumed
.reset_and_commit ();
4208 gdb_assert (ecs
->ws
.kind () != TARGET_WAITKIND_IGNORE
);
4210 /* Switch to the target that generated the event, so we can do
4212 switch_to_target_no_thread (ecs
->target
);
4215 print_target_wait_results (minus_one_ptid
, ecs
->ptid
, ecs
->ws
);
4217 /* If an error happens while handling the event, propagate GDB's
4218 knowledge of the executing state to the frontend/user running
4220 ptid_t finish_ptid
= !target_is_non_stop_p () ? minus_one_ptid
: ecs
->ptid
;
4221 scoped_finish_thread_state
finish_state (ecs
->target
, finish_ptid
);
4223 /* Get executed before scoped_restore_current_thread above to apply
4224 still for the thread which has thrown the exception. */
4225 auto defer_bpstat_clear
4226 = make_scope_exit (bpstat_clear_actions
);
4227 auto defer_delete_threads
4228 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints
);
4230 /* Now figure out what to do with the result of the result. */
4231 handle_inferior_event (ecs
);
4233 if (!ecs
->wait_some_more
)
4235 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
4236 bool should_stop
= true;
4237 struct thread_info
*thr
= ecs
->event_thread
;
4239 delete_just_stopped_threads_infrun_breakpoints ();
4241 if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4242 should_stop
= thr
->thread_fsm ()->should_stop (thr
);
4250 bool should_notify_stop
= true;
4253 stop_all_threads_if_all_stop_mode ();
4255 clean_up_just_stopped_threads_fsms (ecs
);
4257 if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4259 = thr
->thread_fsm ()->should_notify_stop ();
4261 if (should_notify_stop
)
4263 /* We may not find an inferior if this was a process exit. */
4264 if (inf
== nullptr || inf
->control
.stop_soon
== NO_STOP_QUIETLY
)
4265 proceeded
= normal_stop ();
4270 inferior_event_handler (INF_EXEC_COMPLETE
);
4274 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4275 previously selected thread is gone. We have two
4276 choices - switch to no thread selected, or restore the
4277 previously selected thread (now exited). We chose the
4278 later, just because that's what GDB used to do. After
4279 this, "info threads" says "The current thread <Thread
4280 ID 2> has terminated." instead of "No thread
4284 && ecs
->ws
.kind () != TARGET_WAITKIND_NO_RESUMED
)
4285 restore_thread
.dont_restore ();
4289 defer_delete_threads
.release ();
4290 defer_bpstat_clear
.release ();
4292 /* No error, don't finish the thread states yet. */
4293 finish_state
.release ();
4295 disable_commit_resumed
.reset_and_commit ();
4297 /* This scope is used to ensure that readline callbacks are
4298 reinstalled here. */
4301 /* Handling this event might have caused some inferiors to become prunable.
4302 For example, the exit of an inferior that was automatically added. Try
4303 to get rid of them. Keeping those around slows down things linearly.
4305 Note that this never removes the current inferior. Therefore, call this
4306 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4307 temporarily made the current inferior) is meant to be deleted.
4309 Call this before all_uis_check_sync_execution_done, so that notifications about
4310 removed inferiors appear before the prompt. */
4313 /* If a UI was in sync execution mode, and now isn't, restore its
4314 prompt (a synchronous execution command has finished, and we're
4315 ready for input). */
4316 all_uis_check_sync_execution_done ();
4319 && exec_done_display_p
4320 && (inferior_ptid
== null_ptid
4321 || inferior_thread ()->state
!= THREAD_RUNNING
))
4322 gdb_printf (_("completed.\n"));
4328 set_step_info (thread_info
*tp
, frame_info_ptr frame
,
4329 struct symtab_and_line sal
)
4331 /* This can be removed once this function no longer implicitly relies on the
4332 inferior_ptid value. */
4333 gdb_assert (inferior_ptid
== tp
->ptid
);
4335 tp
->control
.step_frame_id
= get_frame_id (frame
);
4336 tp
->control
.step_stack_frame_id
= get_stack_frame_id (frame
);
4338 tp
->current_symtab
= sal
.symtab
;
4339 tp
->current_line
= sal
.line
;
4342 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4343 tp
->current_symtab
!= nullptr ? tp
->current_symtab
->filename
: "<null>",
4345 tp
->control
.step_frame_id
.to_string ().c_str (),
4346 tp
->control
.step_stack_frame_id
.to_string ().c_str ());
4349 /* Clear context switchable stepping state. */
4352 init_thread_stepping_state (struct thread_info
*tss
)
4354 tss
->stepped_breakpoint
= 0;
4355 tss
->stepping_over_breakpoint
= 0;
4356 tss
->stepping_over_watchpoint
= 0;
4357 tss
->step_after_step_resume_breakpoint
= 0;
4363 set_last_target_status (process_stratum_target
*target
, ptid_t ptid
,
4364 const target_waitstatus
&status
)
4366 target_last_proc_target
= target
;
4367 target_last_wait_ptid
= ptid
;
4368 target_last_waitstatus
= status
;
4374 get_last_target_status (process_stratum_target
**target
, ptid_t
*ptid
,
4375 target_waitstatus
*status
)
4377 if (target
!= nullptr)
4378 *target
= target_last_proc_target
;
4379 if (ptid
!= nullptr)
4380 *ptid
= target_last_wait_ptid
;
4381 if (status
!= nullptr)
4382 *status
= target_last_waitstatus
;
4388 nullify_last_target_wait_ptid (void)
4390 target_last_proc_target
= nullptr;
4391 target_last_wait_ptid
= minus_one_ptid
;
4392 target_last_waitstatus
= {};
4395 /* Switch thread contexts. */
4398 context_switch (execution_control_state
*ecs
)
4400 if (ecs
->ptid
!= inferior_ptid
4401 && (inferior_ptid
== null_ptid
4402 || ecs
->event_thread
!= inferior_thread ()))
4404 infrun_debug_printf ("Switching context from %s to %s",
4405 inferior_ptid
.to_string ().c_str (),
4406 ecs
->ptid
.to_string ().c_str ());
4409 switch_to_thread (ecs
->event_thread
);
4412 /* If the target can't tell whether we've hit breakpoints
4413 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4414 check whether that could have been caused by a breakpoint. If so,
4415 adjust the PC, per gdbarch_decr_pc_after_break. */
4418 adjust_pc_after_break (struct thread_info
*thread
,
4419 const target_waitstatus
&ws
)
4421 struct regcache
*regcache
;
4422 struct gdbarch
*gdbarch
;
4423 CORE_ADDR breakpoint_pc
, decr_pc
;
4425 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4426 we aren't, just return.
4428 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4429 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4430 implemented by software breakpoints should be handled through the normal
4433 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4434 different signals (SIGILL or SIGEMT for instance), but it is less
4435 clear where the PC is pointing afterwards. It may not match
4436 gdbarch_decr_pc_after_break. I don't know any specific target that
4437 generates these signals at breakpoints (the code has been in GDB since at
4438 least 1992) so I can not guess how to handle them here.
4440 In earlier versions of GDB, a target with
4441 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4442 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4443 target with both of these set in GDB history, and it seems unlikely to be
4444 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4446 if (ws
.kind () != TARGET_WAITKIND_STOPPED
)
4449 if (ws
.sig () != GDB_SIGNAL_TRAP
)
4452 /* In reverse execution, when a breakpoint is hit, the instruction
4453 under it has already been de-executed. The reported PC always
4454 points at the breakpoint address, so adjusting it further would
4455 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4458 B1 0x08000000 : INSN1
4459 B2 0x08000001 : INSN2
4461 PC -> 0x08000003 : INSN4
4463 Say you're stopped at 0x08000003 as above. Reverse continuing
4464 from that point should hit B2 as below. Reading the PC when the
4465 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4466 been de-executed already.
4468 B1 0x08000000 : INSN1
4469 B2 PC -> 0x08000001 : INSN2
4473 We can't apply the same logic as for forward execution, because
4474 we would wrongly adjust the PC to 0x08000000, since there's a
4475 breakpoint at PC - 1. We'd then report a hit on B1, although
4476 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4478 if (execution_direction
== EXEC_REVERSE
)
4481 /* If the target can tell whether the thread hit a SW breakpoint,
4482 trust it. Targets that can tell also adjust the PC
4484 if (target_supports_stopped_by_sw_breakpoint ())
4487 /* Note that relying on whether a breakpoint is planted in memory to
4488 determine this can fail. E.g,. the breakpoint could have been
4489 removed since. Or the thread could have been told to step an
4490 instruction the size of a breakpoint instruction, and only
4491 _after_ was a breakpoint inserted at its address. */
4493 /* If this target does not decrement the PC after breakpoints, then
4494 we have nothing to do. */
4495 regcache
= get_thread_regcache (thread
);
4496 gdbarch
= regcache
->arch ();
4498 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4502 const address_space
*aspace
= regcache
->aspace ();
4504 /* Find the location where (if we've hit a breakpoint) the
4505 breakpoint would be. */
4506 breakpoint_pc
= regcache_read_pc (regcache
) - decr_pc
;
4508 /* If the target can't tell whether a software breakpoint triggered,
4509 fallback to figuring it out based on breakpoints we think were
4510 inserted in the target, and on whether the thread was stepped or
4513 /* Check whether there actually is a software breakpoint inserted at
4516 If in non-stop mode, a race condition is possible where we've
4517 removed a breakpoint, but stop events for that breakpoint were
4518 already queued and arrive later. To suppress those spurious
4519 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4520 and retire them after a number of stop events are reported. Note
4521 this is an heuristic and can thus get confused. The real fix is
4522 to get the "stopped by SW BP and needs adjustment" info out of
4523 the target/kernel (and thus never reach here; see above). */
4524 if (software_breakpoint_inserted_here_p (aspace
, breakpoint_pc
)
4525 || (target_is_non_stop_p ()
4526 && moribund_breakpoint_here_p (aspace
, breakpoint_pc
)))
4528 gdb::optional
<scoped_restore_tmpl
<int>> restore_operation_disable
;
4530 if (record_full_is_used ())
4531 restore_operation_disable
.emplace
4532 (record_full_gdb_operation_disable_set ());
4534 /* When using hardware single-step, a SIGTRAP is reported for both
4535 a completed single-step and a software breakpoint. Need to
4536 differentiate between the two, as the latter needs adjusting
4537 but the former does not.
4539 The SIGTRAP can be due to a completed hardware single-step only if
4540 - we didn't insert software single-step breakpoints
4541 - this thread is currently being stepped
4543 If any of these events did not occur, we must have stopped due
4544 to hitting a software breakpoint, and have to back up to the
4547 As a special case, we could have hardware single-stepped a
4548 software breakpoint. In this case (prev_pc == breakpoint_pc),
4549 we also need to back up to the breakpoint address. */
4551 if (thread_has_single_step_breakpoints_set (thread
)
4552 || !currently_stepping (thread
)
4553 || (thread
->stepped_breakpoint
4554 && thread
->prev_pc
== breakpoint_pc
))
4555 regcache_write_pc (regcache
, breakpoint_pc
);
4560 stepped_in_from (frame_info_ptr frame
, struct frame_id step_frame_id
)
4562 for (frame
= get_prev_frame (frame
);
4564 frame
= get_prev_frame (frame
))
4566 if (get_frame_id (frame
) == step_frame_id
)
4569 if (get_frame_type (frame
) != INLINE_FRAME
)
4576 /* Look for an inline frame that is marked for skip.
4577 If PREV_FRAME is TRUE start at the previous frame,
4578 otherwise start at the current frame. Stop at the
4579 first non-inline frame, or at the frame where the
4583 inline_frame_is_marked_for_skip (bool prev_frame
, struct thread_info
*tp
)
4585 frame_info_ptr frame
= get_current_frame ();
4588 frame
= get_prev_frame (frame
);
4590 for (; frame
!= nullptr; frame
= get_prev_frame (frame
))
4592 const char *fn
= nullptr;
4593 symtab_and_line sal
;
4596 if (get_frame_id (frame
) == tp
->control
.step_frame_id
)
4598 if (get_frame_type (frame
) != INLINE_FRAME
)
4601 sal
= find_frame_sal (frame
);
4602 sym
= get_frame_function (frame
);
4605 fn
= sym
->print_name ();
4608 && function_name_is_marked_for_skip (fn
, sal
))
4615 /* If the event thread has the stop requested flag set, pretend it
4616 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4620 handle_stop_requested (struct execution_control_state
*ecs
)
4622 if (ecs
->event_thread
->stop_requested
)
4624 ecs
->ws
.set_stopped (GDB_SIGNAL_0
);
4625 handle_signal_stop (ecs
);
4631 /* Auxiliary function that handles syscall entry/return events.
4632 It returns true if the inferior should keep going (and GDB
4633 should ignore the event), or false if the event deserves to be
4637 handle_syscall_event (struct execution_control_state
*ecs
)
4639 struct regcache
*regcache
;
4642 context_switch (ecs
);
4644 regcache
= get_thread_regcache (ecs
->event_thread
);
4645 syscall_number
= ecs
->ws
.syscall_number ();
4646 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
4648 if (catch_syscall_enabled () > 0
4649 && catching_syscall_number (syscall_number
))
4651 infrun_debug_printf ("syscall number=%d", syscall_number
);
4653 ecs
->event_thread
->control
.stop_bpstat
4654 = bpstat_stop_status_nowatch (regcache
->aspace (),
4655 ecs
->event_thread
->stop_pc (),
4656 ecs
->event_thread
, ecs
->ws
);
4658 if (handle_stop_requested (ecs
))
4661 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
4663 /* Catchpoint hit. */
4668 if (handle_stop_requested (ecs
))
4671 /* If no catchpoint triggered for this, then keep going. */
4677 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4680 fill_in_stop_func (struct gdbarch
*gdbarch
,
4681 struct execution_control_state
*ecs
)
4683 if (!ecs
->stop_func_filled_in
)
4686 const general_symbol_info
*gsi
;
4688 /* Don't care about return value; stop_func_start and stop_func_name
4689 will both be 0 if it doesn't work. */
4690 find_pc_partial_function_sym (ecs
->event_thread
->stop_pc (),
4692 &ecs
->stop_func_start
,
4693 &ecs
->stop_func_end
,
4695 ecs
->stop_func_name
= gsi
== nullptr ? nullptr : gsi
->print_name ();
4697 /* The call to find_pc_partial_function, above, will set
4698 stop_func_start and stop_func_end to the start and end
4699 of the range containing the stop pc. If this range
4700 contains the entry pc for the block (which is always the
4701 case for contiguous blocks), advance stop_func_start past
4702 the function's start offset and entrypoint. Note that
4703 stop_func_start is NOT advanced when in a range of a
4704 non-contiguous block that does not contain the entry pc. */
4705 if (block
!= nullptr
4706 && ecs
->stop_func_start
<= block
->entry_pc ()
4707 && block
->entry_pc () < ecs
->stop_func_end
)
4709 ecs
->stop_func_start
4710 += gdbarch_deprecated_function_start_offset (gdbarch
);
4712 if (gdbarch_skip_entrypoint_p (gdbarch
))
4713 ecs
->stop_func_start
4714 = gdbarch_skip_entrypoint (gdbarch
, ecs
->stop_func_start
);
4717 ecs
->stop_func_filled_in
= 1;
4722 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4724 static enum stop_kind
4725 get_inferior_stop_soon (execution_control_state
*ecs
)
4727 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
4729 gdb_assert (inf
!= nullptr);
4730 return inf
->control
.stop_soon
;
4733 /* Poll for one event out of the current target. Store the resulting
4734 waitstatus in WS, and return the event ptid. Does not block. */
4737 poll_one_curr_target (struct target_waitstatus
*ws
)
4741 overlay_cache_invalid
= 1;
4743 /* Flush target cache before starting to handle each event.
4744 Target was running and cache could be stale. This is just a
4745 heuristic. Running threads may modify target memory, but we
4746 don't get any event. */
4747 target_dcache_invalidate ();
4749 event_ptid
= target_wait (minus_one_ptid
, ws
, TARGET_WNOHANG
);
4752 print_target_wait_results (minus_one_ptid
, event_ptid
, *ws
);
4757 /* Wait for one event out of any target. */
4759 static wait_one_event
4764 for (inferior
*inf
: all_inferiors ())
4766 process_stratum_target
*target
= inf
->process_target ();
4767 if (target
== nullptr
4768 || !target
->is_async_p ()
4769 || !target
->threads_executing
)
4772 switch_to_inferior_no_thread (inf
);
4774 wait_one_event event
;
4775 event
.target
= target
;
4776 event
.ptid
= poll_one_curr_target (&event
.ws
);
4778 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
4780 /* If nothing is resumed, remove the target from the
4782 target_async (false);
4784 else if (event
.ws
.kind () != TARGET_WAITKIND_IGNORE
)
4788 /* Block waiting for some event. */
4795 for (inferior
*inf
: all_inferiors ())
4797 process_stratum_target
*target
= inf
->process_target ();
4798 if (target
== nullptr
4799 || !target
->is_async_p ()
4800 || !target
->threads_executing
)
4803 int fd
= target
->async_wait_fd ();
4804 FD_SET (fd
, &readfds
);
4811 /* No waitable targets left. All must be stopped. */
4812 target_waitstatus ws
;
4813 ws
.set_no_resumed ();
4814 return {nullptr, minus_one_ptid
, std::move (ws
)};
4819 int numfds
= interruptible_select (nfds
, &readfds
, 0, nullptr, 0);
4825 perror_with_name ("interruptible_select");
4830 /* Save the thread's event and stop reason to process it later. */
4833 save_waitstatus (struct thread_info
*tp
, const target_waitstatus
&ws
)
4835 infrun_debug_printf ("saving status %s for %s",
4836 ws
.to_string ().c_str (),
4837 tp
->ptid
.to_string ().c_str ());
4839 /* Record for later. */
4840 tp
->set_pending_waitstatus (ws
);
4842 if (ws
.kind () == TARGET_WAITKIND_STOPPED
4843 && ws
.sig () == GDB_SIGNAL_TRAP
)
4845 struct regcache
*regcache
= get_thread_regcache (tp
);
4846 const address_space
*aspace
= regcache
->aspace ();
4847 CORE_ADDR pc
= regcache_read_pc (regcache
);
4849 adjust_pc_after_break (tp
, tp
->pending_waitstatus ());
4851 scoped_restore_current_thread restore_thread
;
4852 switch_to_thread (tp
);
4854 if (target_stopped_by_watchpoint ())
4855 tp
->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT
);
4856 else if (target_supports_stopped_by_sw_breakpoint ()
4857 && target_stopped_by_sw_breakpoint ())
4858 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
4859 else if (target_supports_stopped_by_hw_breakpoint ()
4860 && target_stopped_by_hw_breakpoint ())
4861 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
4862 else if (!target_supports_stopped_by_hw_breakpoint ()
4863 && hardware_breakpoint_inserted_here_p (aspace
, pc
))
4864 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
4865 else if (!target_supports_stopped_by_sw_breakpoint ()
4866 && software_breakpoint_inserted_here_p (aspace
, pc
))
4867 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
4868 else if (!thread_has_single_step_breakpoints_set (tp
)
4869 && currently_stepping (tp
))
4870 tp
->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP
);
4874 /* Mark the non-executing threads accordingly. In all-stop, all
4875 threads of all processes are stopped when we get any event
4876 reported. In non-stop mode, only the event thread stops. */
4879 mark_non_executing_threads (process_stratum_target
*target
,
4881 const target_waitstatus
&ws
)
4885 if (!target_is_non_stop_p ())
4886 mark_ptid
= minus_one_ptid
;
4887 else if (ws
.kind () == TARGET_WAITKIND_SIGNALLED
4888 || ws
.kind () == TARGET_WAITKIND_EXITED
)
4890 /* If we're handling a process exit in non-stop mode, even
4891 though threads haven't been deleted yet, one would think
4892 that there is nothing to do, as threads of the dead process
4893 will be soon deleted, and threads of any other process were
4894 left running. However, on some targets, threads survive a
4895 process exit event. E.g., for the "checkpoint" command,
4896 when the current checkpoint/fork exits, linux-fork.c
4897 automatically switches to another fork from within
4898 target_mourn_inferior, by associating the same
4899 inferior/thread to another fork. We haven't mourned yet at
4900 this point, but we must mark any threads left in the
4901 process as not-executing so that finish_thread_state marks
4902 them stopped (in the user's perspective) if/when we present
4903 the stop to the user. */
4904 mark_ptid
= ptid_t (event_ptid
.pid ());
4907 mark_ptid
= event_ptid
;
4909 set_executing (target
, mark_ptid
, false);
4911 /* Likewise the resumed flag. */
4912 set_resumed (target
, mark_ptid
, false);
4915 /* Handle one event after stopping threads. If the eventing thread
4916 reports back any interesting event, we leave it pending. If the
4917 eventing thread was in the middle of a displaced step, we
4918 cancel/finish it, and unless the thread's inferior is being
4919 detached, put the thread back in the step-over chain. Returns true
4920 if there are no resumed threads left in the target (thus there's no
4921 point in waiting further), false otherwise. */
4924 handle_one (const wait_one_event
&event
)
4927 ("%s %s", event
.ws
.to_string ().c_str (),
4928 event
.ptid
.to_string ().c_str ());
4930 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
4932 /* All resumed threads exited. */
4935 else if (event
.ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
4936 || event
.ws
.kind () == TARGET_WAITKIND_EXITED
4937 || event
.ws
.kind () == TARGET_WAITKIND_SIGNALLED
)
4939 /* One thread/process exited/signalled. */
4941 thread_info
*t
= nullptr;
4943 /* The target may have reported just a pid. If so, try
4944 the first non-exited thread. */
4945 if (event
.ptid
.is_pid ())
4947 int pid
= event
.ptid
.pid ();
4948 inferior
*inf
= find_inferior_pid (event
.target
, pid
);
4949 for (thread_info
*tp
: inf
->non_exited_threads ())
4955 /* If there is no available thread, the event would
4956 have to be appended to a per-inferior event list,
4957 which does not exist (and if it did, we'd have
4958 to adjust run control command to be able to
4959 resume such an inferior). We assert here instead
4960 of going into an infinite loop. */
4961 gdb_assert (t
!= nullptr);
4964 ("using %s", t
->ptid
.to_string ().c_str ());
4968 t
= find_thread_ptid (event
.target
, event
.ptid
);
4969 /* Check if this is the first time we see this thread.
4970 Don't bother adding if it individually exited. */
4972 && event
.ws
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
4973 t
= add_thread (event
.target
, event
.ptid
);
4978 /* Set the threads as non-executing to avoid
4979 another stop attempt on them. */
4980 switch_to_thread_no_regs (t
);
4981 mark_non_executing_threads (event
.target
, event
.ptid
,
4983 save_waitstatus (t
, event
.ws
);
4984 t
->stop_requested
= false;
4989 thread_info
*t
= find_thread_ptid (event
.target
, event
.ptid
);
4991 t
= add_thread (event
.target
, event
.ptid
);
4993 t
->stop_requested
= 0;
4994 t
->set_executing (false);
4995 t
->set_resumed (false);
4996 t
->control
.may_range_step
= 0;
4998 /* This may be the first time we see the inferior report
5000 if (t
->inf
->needs_setup
)
5002 switch_to_thread_no_regs (t
);
5006 if (event
.ws
.kind () == TARGET_WAITKIND_STOPPED
5007 && event
.ws
.sig () == GDB_SIGNAL_0
)
5009 /* We caught the event that we intended to catch, so
5010 there's no event to save as pending. */
5012 if (displaced_step_finish (t
, GDB_SIGNAL_0
)
5013 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5015 /* Add it back to the step-over queue. */
5017 ("displaced-step of %s canceled",
5018 t
->ptid
.to_string ().c_str ());
5020 t
->control
.trap_expected
= 0;
5021 if (!t
->inf
->detaching
)
5022 global_thread_step_over_chain_enqueue (t
);
5027 enum gdb_signal sig
;
5028 struct regcache
*regcache
;
5031 ("target_wait %s, saving status for %s",
5032 event
.ws
.to_string ().c_str (),
5033 t
->ptid
.to_string ().c_str ());
5035 /* Record for later. */
5036 save_waitstatus (t
, event
.ws
);
5038 sig
= (event
.ws
.kind () == TARGET_WAITKIND_STOPPED
5039 ? event
.ws
.sig () : GDB_SIGNAL_0
);
5041 if (displaced_step_finish (t
, sig
)
5042 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5044 /* Add it back to the step-over queue. */
5045 t
->control
.trap_expected
= 0;
5046 if (!t
->inf
->detaching
)
5047 global_thread_step_over_chain_enqueue (t
);
5050 regcache
= get_thread_regcache (t
);
5051 t
->set_stop_pc (regcache_read_pc (regcache
));
5053 infrun_debug_printf ("saved stop_pc=%s for %s "
5054 "(currently_stepping=%d)",
5055 paddress (target_gdbarch (), t
->stop_pc ()),
5056 t
->ptid
.to_string ().c_str (),
5057 currently_stepping (t
));
5067 stop_all_threads (const char *reason
, inferior
*inf
)
5069 /* We may need multiple passes to discover all threads. */
5073 gdb_assert (exists_non_stop_target ());
5075 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason
,
5076 inf
!= nullptr ? inf
->num
: -1);
5078 infrun_debug_show_threads ("non-exited threads",
5079 all_non_exited_threads ());
5081 scoped_restore_current_thread restore_thread
;
5083 /* Enable thread events on relevant targets. */
5084 for (auto *target
: all_non_exited_process_targets ())
5086 if (inf
!= nullptr && inf
->process_target () != target
)
5089 switch_to_target_no_thread (target
);
5090 target_thread_events (true);
5095 /* Disable thread events on relevant targets. */
5096 for (auto *target
: all_non_exited_process_targets ())
5098 if (inf
!= nullptr && inf
->process_target () != target
)
5101 switch_to_target_no_thread (target
);
5102 target_thread_events (false);
5105 /* Use debug_prefixed_printf directly to get a meaningful function
5108 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5111 /* Request threads to stop, and then wait for the stops. Because
5112 threads we already know about can spawn more threads while we're
5113 trying to stop them, and we only learn about new threads when we
5114 update the thread list, do this in a loop, and keep iterating
5115 until two passes find no threads that need to be stopped. */
5116 for (pass
= 0; pass
< 2; pass
++, iterations
++)
5118 infrun_debug_printf ("pass=%d, iterations=%d", pass
, iterations
);
5121 int waits_needed
= 0;
5123 for (auto *target
: all_non_exited_process_targets ())
5125 if (inf
!= nullptr && inf
->process_target () != target
)
5128 switch_to_target_no_thread (target
);
5129 update_thread_list ();
5132 /* Go through all threads looking for threads that we need
5133 to tell the target to stop. */
5134 for (thread_info
*t
: all_non_exited_threads ())
5136 if (inf
!= nullptr && t
->inf
!= inf
)
5139 /* For a single-target setting with an all-stop target,
5140 we would not even arrive here. For a multi-target
5141 setting, until GDB is able to handle a mixture of
5142 all-stop and non-stop targets, simply skip all-stop
5143 targets' threads. This should be fine due to the
5144 protection of 'check_multi_target_resumption'. */
5146 switch_to_thread_no_regs (t
);
5147 if (!target_is_non_stop_p ())
5150 if (t
->executing ())
5152 /* If already stopping, don't request a stop again.
5153 We just haven't seen the notification yet. */
5154 if (!t
->stop_requested
)
5156 infrun_debug_printf (" %s executing, need stop",
5157 t
->ptid
.to_string ().c_str ());
5158 target_stop (t
->ptid
);
5159 t
->stop_requested
= 1;
5163 infrun_debug_printf (" %s executing, already stopping",
5164 t
->ptid
.to_string ().c_str ());
5167 if (t
->stop_requested
)
5172 infrun_debug_printf (" %s not executing",
5173 t
->ptid
.to_string ().c_str ());
5175 /* The thread may be not executing, but still be
5176 resumed with a pending status to process. */
5177 t
->set_resumed (false);
5181 if (waits_needed
== 0)
5184 /* If we find new threads on the second iteration, restart
5185 over. We want to see two iterations in a row with all
5190 for (int i
= 0; i
< waits_needed
; i
++)
5192 wait_one_event event
= wait_one ();
5193 if (handle_one (event
))
5200 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5203 handle_no_resumed (struct execution_control_state
*ecs
)
5205 if (target_can_async_p ())
5207 bool any_sync
= false;
5209 for (ui
*ui
: all_uis ())
5211 if (ui
->prompt_state
== PROMPT_BLOCKED
)
5219 /* There were no unwaited-for children left in the target, but,
5220 we're not synchronously waiting for events either. Just
5223 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5224 prepare_to_wait (ecs
);
5229 /* Otherwise, if we were running a synchronous execution command, we
5230 may need to cancel it and give the user back the terminal.
5232 In non-stop mode, the target can't tell whether we've already
5233 consumed previous stop events, so it can end up sending us a
5234 no-resumed event like so:
5236 #0 - thread 1 is left stopped
5238 #1 - thread 2 is resumed and hits breakpoint
5239 -> TARGET_WAITKIND_STOPPED
5241 #2 - thread 3 is resumed and exits
5242 this is the last resumed thread, so
5243 -> TARGET_WAITKIND_NO_RESUMED
5245 #3 - gdb processes stop for thread 2 and decides to re-resume
5248 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5249 thread 2 is now resumed, so the event should be ignored.
5251 IOW, if the stop for thread 2 doesn't end a foreground command,
5252 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5253 event. But it could be that the event meant that thread 2 itself
5254 (or whatever other thread was the last resumed thread) exited.
5256 To address this we refresh the thread list and check whether we
5257 have resumed threads _now_. In the example above, this removes
5258 thread 3 from the thread list. If thread 2 was re-resumed, we
5259 ignore this event. If we find no thread resumed, then we cancel
5260 the synchronous command and show "no unwaited-for " to the
5263 inferior
*curr_inf
= current_inferior ();
5265 scoped_restore_current_thread restore_thread
;
5266 update_thread_list ();
5270 - the current target has no thread executing, and
5271 - the current inferior is native, and
5272 - the current inferior is the one which has the terminal, and
5275 then a Ctrl-C from this point on would remain stuck in the
5276 kernel, until a thread resumes and dequeues it. That would
5277 result in the GDB CLI not reacting to Ctrl-C, not able to
5278 interrupt the program. To address this, if the current inferior
5279 no longer has any thread executing, we give the terminal to some
5280 other inferior that has at least one thread executing. */
5281 bool swap_terminal
= true;
5283 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5284 whether to report it to the user. */
5285 bool ignore_event
= false;
5287 for (thread_info
*thread
: all_non_exited_threads ())
5289 if (swap_terminal
&& thread
->executing ())
5291 if (thread
->inf
!= curr_inf
)
5293 target_terminal::ours ();
5295 switch_to_thread (thread
);
5296 target_terminal::inferior ();
5298 swap_terminal
= false;
5301 if (!ignore_event
&& thread
->resumed ())
5303 /* Either there were no unwaited-for children left in the
5304 target at some point, but there are now, or some target
5305 other than the eventing one has unwaited-for children
5306 left. Just ignore. */
5307 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5308 "(ignoring: found resumed)");
5310 ignore_event
= true;
5313 if (ignore_event
&& !swap_terminal
)
5319 switch_to_inferior_no_thread (curr_inf
);
5320 prepare_to_wait (ecs
);
5324 /* Go ahead and report the event. */
5328 /* Given an execution control state that has been freshly filled in by
5329 an event from the inferior, figure out what it means and take
5332 The alternatives are:
5334 1) stop_waiting and return; to really stop and return to the
5337 2) keep_going and return; to wait for the next event (set
5338 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5342 handle_inferior_event (struct execution_control_state
*ecs
)
5344 /* Make sure that all temporary struct value objects that were
5345 created during the handling of the event get deleted at the
5347 scoped_value_mark free_values
;
5349 infrun_debug_printf ("%s", ecs
->ws
.to_string ().c_str ());
5351 if (ecs
->ws
.kind () == TARGET_WAITKIND_IGNORE
)
5353 /* We had an event in the inferior, but we are not interested in
5354 handling it at this level. The lower layers have already
5355 done what needs to be done, if anything.
5357 One of the possible circumstances for this is when the
5358 inferior produces output for the console. The inferior has
5359 not stopped, and we are ignoring the event. Another possible
5360 circumstance is any event which the lower level knows will be
5361 reported multiple times without an intervening resume. */
5362 prepare_to_wait (ecs
);
5366 if (ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
5368 prepare_to_wait (ecs
);
5372 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
5373 && handle_no_resumed (ecs
))
5376 /* Cache the last target/ptid/waitstatus. */
5377 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5379 /* Always clear state belonging to the previous time we stopped. */
5380 stop_stack_dummy
= STOP_NONE
;
5382 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
5384 /* No unwaited-for children left. IOW, all resumed children
5386 stop_print_frame
= false;
5391 if (ecs
->ws
.kind () != TARGET_WAITKIND_EXITED
5392 && ecs
->ws
.kind () != TARGET_WAITKIND_SIGNALLED
)
5394 ecs
->event_thread
= find_thread_ptid (ecs
->target
, ecs
->ptid
);
5395 /* If it's a new thread, add it to the thread database. */
5396 if (ecs
->event_thread
== nullptr)
5397 ecs
->event_thread
= add_thread (ecs
->target
, ecs
->ptid
);
5399 /* Disable range stepping. If the next step request could use a
5400 range, this will be end up re-enabled then. */
5401 ecs
->event_thread
->control
.may_range_step
= 0;
5404 /* Dependent on valid ECS->EVENT_THREAD. */
5405 adjust_pc_after_break (ecs
->event_thread
, ecs
->ws
);
5407 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5408 reinit_frame_cache ();
5410 breakpoint_retire_moribund ();
5412 /* First, distinguish signals caused by the debugger from signals
5413 that have to do with the program's own actions. Note that
5414 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5415 on the operating system version. Here we detect when a SIGILL or
5416 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5417 something similar for SIGSEGV, since a SIGSEGV will be generated
5418 when we're trying to execute a breakpoint instruction on a
5419 non-executable stack. This happens for call dummy breakpoints
5420 for architectures like SPARC that place call dummies on the
5422 if (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
5423 && (ecs
->ws
.sig () == GDB_SIGNAL_ILL
5424 || ecs
->ws
.sig () == GDB_SIGNAL_SEGV
5425 || ecs
->ws
.sig () == GDB_SIGNAL_EMT
))
5427 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5429 if (breakpoint_inserted_here_p (regcache
->aspace (),
5430 regcache_read_pc (regcache
)))
5432 infrun_debug_printf ("Treating signal as SIGTRAP");
5433 ecs
->ws
.set_stopped (GDB_SIGNAL_TRAP
);
5437 mark_non_executing_threads (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5439 switch (ecs
->ws
.kind ())
5441 case TARGET_WAITKIND_LOADED
:
5443 context_switch (ecs
);
5444 /* Ignore gracefully during startup of the inferior, as it might
5445 be the shell which has just loaded some objects, otherwise
5446 add the symbols for the newly loaded objects. Also ignore at
5447 the beginning of an attach or remote session; we will query
5448 the full list of libraries once the connection is
5451 stop_kind stop_soon
= get_inferior_stop_soon (ecs
);
5452 if (stop_soon
== NO_STOP_QUIETLY
)
5454 struct regcache
*regcache
;
5456 regcache
= get_thread_regcache (ecs
->event_thread
);
5458 handle_solib_event ();
5460 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
5461 ecs
->event_thread
->control
.stop_bpstat
5462 = bpstat_stop_status_nowatch (regcache
->aspace (),
5463 ecs
->event_thread
->stop_pc (),
5464 ecs
->event_thread
, ecs
->ws
);
5466 if (handle_stop_requested (ecs
))
5469 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5471 /* A catchpoint triggered. */
5472 process_event_stop_test (ecs
);
5476 /* If requested, stop when the dynamic linker notifies
5477 gdb of events. This allows the user to get control
5478 and place breakpoints in initializer routines for
5479 dynamically loaded objects (among other things). */
5480 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5481 if (stop_on_solib_events
)
5483 /* Make sure we print "Stopped due to solib-event" in
5485 stop_print_frame
= true;
5492 /* If we are skipping through a shell, or through shared library
5493 loading that we aren't interested in, resume the program. If
5494 we're running the program normally, also resume. */
5495 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== NO_STOP_QUIETLY
)
5497 /* Loading of shared libraries might have changed breakpoint
5498 addresses. Make sure new breakpoints are inserted. */
5499 if (stop_soon
== NO_STOP_QUIETLY
)
5500 insert_breakpoints ();
5501 resume (GDB_SIGNAL_0
);
5502 prepare_to_wait (ecs
);
5506 /* But stop if we're attaching or setting up a remote
5508 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
5509 || stop_soon
== STOP_QUIETLY_REMOTE
)
5511 infrun_debug_printf ("quietly stopped");
5516 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon
);
5519 case TARGET_WAITKIND_SPURIOUS
:
5520 if (handle_stop_requested (ecs
))
5522 context_switch (ecs
);
5523 resume (GDB_SIGNAL_0
);
5524 prepare_to_wait (ecs
);
5527 case TARGET_WAITKIND_THREAD_CREATED
:
5528 if (handle_stop_requested (ecs
))
5530 context_switch (ecs
);
5531 if (!switch_back_to_stepped_thread (ecs
))
5535 case TARGET_WAITKIND_EXITED
:
5536 case TARGET_WAITKIND_SIGNALLED
:
5538 /* Depending on the system, ecs->ptid may point to a thread or
5539 to a process. On some targets, target_mourn_inferior may
5540 need to have access to the just-exited thread. That is the
5541 case of GNU/Linux's "checkpoint" support, for example.
5542 Call the switch_to_xxx routine as appropriate. */
5543 thread_info
*thr
= find_thread_ptid (ecs
->target
, ecs
->ptid
);
5545 switch_to_thread (thr
);
5548 inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5549 switch_to_inferior_no_thread (inf
);
5552 handle_vfork_child_exec_or_exit (0);
5553 target_terminal::ours (); /* Must do this before mourn anyway. */
5555 /* Clearing any previous state of convenience variables. */
5556 clear_exit_convenience_vars ();
5558 if (ecs
->ws
.kind () == TARGET_WAITKIND_EXITED
)
5560 /* Record the exit code in the convenience variable $_exitcode, so
5561 that the user can inspect this again later. */
5562 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5563 (LONGEST
) ecs
->ws
.exit_status ());
5565 /* Also record this in the inferior itself. */
5566 current_inferior ()->has_exit_code
= 1;
5567 current_inferior ()->exit_code
= (LONGEST
) ecs
->ws
.exit_status ();
5569 /* Support the --return-child-result option. */
5570 return_child_result_value
= ecs
->ws
.exit_status ();
5572 gdb::observers::exited
.notify (ecs
->ws
.exit_status ());
5576 struct gdbarch
*gdbarch
= current_inferior ()->gdbarch
;
5578 if (gdbarch_gdb_signal_to_target_p (gdbarch
))
5580 /* Set the value of the internal variable $_exitsignal,
5581 which holds the signal uncaught by the inferior. */
5582 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5583 gdbarch_gdb_signal_to_target (gdbarch
,
5588 /* We don't have access to the target's method used for
5589 converting between signal numbers (GDB's internal
5590 representation <-> target's representation).
5591 Therefore, we cannot do a good job at displaying this
5592 information to the user. It's better to just warn
5593 her about it (if infrun debugging is enabled), and
5595 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5599 gdb::observers::signal_exited
.notify (ecs
->ws
.sig ());
5602 gdb_flush (gdb_stdout
);
5603 target_mourn_inferior (inferior_ptid
);
5604 stop_print_frame
= false;
5608 case TARGET_WAITKIND_FORKED
:
5609 case TARGET_WAITKIND_VFORKED
:
5610 /* Check whether the inferior is displaced stepping. */
5612 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5613 struct gdbarch
*gdbarch
= regcache
->arch ();
5614 inferior
*parent_inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5616 /* If this is a fork (child gets its own address space copy)
5617 and some displaced step buffers were in use at the time of
5618 the fork, restore the displaced step buffer bytes in the
5621 Architectures which support displaced stepping and fork
5622 events must supply an implementation of
5623 gdbarch_displaced_step_restore_all_in_ptid. This is not
5624 enforced during gdbarch validation to support architectures
5625 which support displaced stepping but not forks. */
5626 if (ecs
->ws
.kind () == TARGET_WAITKIND_FORKED
5627 && gdbarch_supports_displaced_stepping (gdbarch
))
5628 gdbarch_displaced_step_restore_all_in_ptid
5629 (gdbarch
, parent_inf
, ecs
->ws
.child_ptid ());
5631 /* If displaced stepping is supported, and thread ecs->ptid is
5632 displaced stepping. */
5633 if (displaced_step_in_progress_thread (ecs
->event_thread
))
5635 struct regcache
*child_regcache
;
5636 CORE_ADDR parent_pc
;
5638 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5639 indicating that the displaced stepping of syscall instruction
5640 has been done. Perform cleanup for parent process here. Note
5641 that this operation also cleans up the child process for vfork,
5642 because their pages are shared. */
5643 displaced_step_finish (ecs
->event_thread
, GDB_SIGNAL_TRAP
);
5644 /* Start a new step-over in another thread if there's one
5648 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5649 the child's PC is also within the scratchpad. Set the child's PC
5650 to the parent's PC value, which has already been fixed up.
5651 FIXME: we use the parent's aspace here, although we're touching
5652 the child, because the child hasn't been added to the inferior
5653 list yet at this point. */
5656 = get_thread_arch_aspace_regcache (parent_inf
->process_target (),
5657 ecs
->ws
.child_ptid (),
5659 parent_inf
->aspace
);
5660 /* Read PC value of parent process. */
5661 parent_pc
= regcache_read_pc (regcache
);
5663 displaced_debug_printf ("write child pc from %s to %s",
5665 regcache_read_pc (child_regcache
)),
5666 paddress (gdbarch
, parent_pc
));
5668 regcache_write_pc (child_regcache
, parent_pc
);
5672 context_switch (ecs
);
5674 /* Immediately detach breakpoints from the child before there's
5675 any chance of letting the user delete breakpoints from the
5676 breakpoint lists. If we don't do this early, it's easy to
5677 leave left over traps in the child, vis: "break foo; catch
5678 fork; c; <fork>; del; c; <child calls foo>". We only follow
5679 the fork on the last `continue', and by that time the
5680 breakpoint at "foo" is long gone from the breakpoint table.
5681 If we vforked, then we don't need to unpatch here, since both
5682 parent and child are sharing the same memory pages; we'll
5683 need to unpatch at follow/detach time instead to be certain
5684 that new breakpoints added between catchpoint hit time and
5685 vfork follow are detached. */
5686 if (ecs
->ws
.kind () != TARGET_WAITKIND_VFORKED
)
5688 /* This won't actually modify the breakpoint list, but will
5689 physically remove the breakpoints from the child. */
5690 detach_breakpoints (ecs
->ws
.child_ptid ());
5693 delete_just_stopped_threads_single_step_breakpoints ();
5695 /* In case the event is caught by a catchpoint, remember that
5696 the event is to be followed at the next resume of the thread,
5697 and not immediately. */
5698 ecs
->event_thread
->pending_follow
= ecs
->ws
;
5700 ecs
->event_thread
->set_stop_pc
5701 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
5703 ecs
->event_thread
->control
.stop_bpstat
5704 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
5705 ecs
->event_thread
->stop_pc (),
5706 ecs
->event_thread
, ecs
->ws
);
5708 if (handle_stop_requested (ecs
))
5711 /* If no catchpoint triggered for this, then keep going. Note
5712 that we're interested in knowing the bpstat actually causes a
5713 stop, not just if it may explain the signal. Software
5714 watchpoints, for example, always appear in the bpstat. */
5715 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5718 = (follow_fork_mode_string
== follow_fork_mode_child
);
5720 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5722 process_stratum_target
*targ
5723 = ecs
->event_thread
->inf
->process_target ();
5725 bool should_resume
= follow_fork ();
5727 /* Note that one of these may be an invalid pointer,
5728 depending on detach_fork. */
5729 thread_info
*parent
= ecs
->event_thread
;
5730 thread_info
*child
= find_thread_ptid (targ
, ecs
->ws
.child_ptid ());
5732 /* At this point, the parent is marked running, and the
5733 child is marked stopped. */
5735 /* If not resuming the parent, mark it stopped. */
5736 if (follow_child
&& !detach_fork
&& !non_stop
&& !sched_multi
)
5737 parent
->set_running (false);
5739 /* If resuming the child, mark it running. */
5740 if (follow_child
|| (!detach_fork
&& (non_stop
|| sched_multi
)))
5741 child
->set_running (true);
5743 /* In non-stop mode, also resume the other branch. */
5744 if (!detach_fork
&& (non_stop
5745 || (sched_multi
&& target_is_non_stop_p ())))
5748 switch_to_thread (parent
);
5750 switch_to_thread (child
);
5752 ecs
->event_thread
= inferior_thread ();
5753 ecs
->ptid
= inferior_ptid
;
5758 switch_to_thread (child
);
5760 switch_to_thread (parent
);
5762 ecs
->event_thread
= inferior_thread ();
5763 ecs
->ptid
= inferior_ptid
;
5767 /* Never call switch_back_to_stepped_thread if we are waiting for
5768 vfork-done (waiting for an external vfork child to exec or
5769 exit). We will resume only the vforking thread for the purpose
5770 of collecting the vfork-done event, and we will restart any
5771 step once the critical shared address space window is done. */
5774 && parent
->inf
->thread_waiting_for_vfork_done
!= nullptr)
5775 || !switch_back_to_stepped_thread (ecs
))
5782 process_event_stop_test (ecs
);
5785 case TARGET_WAITKIND_VFORK_DONE
:
5786 /* Done with the shared memory region. Re-insert breakpoints in
5787 the parent, and keep going. */
5789 context_switch (ecs
);
5791 handle_vfork_done (ecs
->event_thread
);
5792 gdb_assert (inferior_thread () == ecs
->event_thread
);
5794 if (handle_stop_requested (ecs
))
5797 if (!switch_back_to_stepped_thread (ecs
))
5799 gdb_assert (inferior_thread () == ecs
->event_thread
);
5800 /* This also takes care of reinserting breakpoints in the
5801 previously locked inferior. */
5806 case TARGET_WAITKIND_EXECD
:
5808 /* Note we can't read registers yet (the stop_pc), because we
5809 don't yet know the inferior's post-exec architecture.
5810 'stop_pc' is explicitly read below instead. */
5811 switch_to_thread_no_regs (ecs
->event_thread
);
5813 /* Do whatever is necessary to the parent branch of the vfork. */
5814 handle_vfork_child_exec_or_exit (1);
5816 /* This causes the eventpoints and symbol table to be reset.
5817 Must do this now, before trying to determine whether to
5819 follow_exec (inferior_ptid
, ecs
->ws
.execd_pathname ());
5821 /* In follow_exec we may have deleted the original thread and
5822 created a new one. Make sure that the event thread is the
5823 execd thread for that case (this is a nop otherwise). */
5824 ecs
->event_thread
= inferior_thread ();
5826 ecs
->event_thread
->set_stop_pc
5827 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
5829 ecs
->event_thread
->control
.stop_bpstat
5830 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
5831 ecs
->event_thread
->stop_pc (),
5832 ecs
->event_thread
, ecs
->ws
);
5834 if (handle_stop_requested (ecs
))
5837 /* If no catchpoint triggered for this, then keep going. */
5838 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5840 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5844 process_event_stop_test (ecs
);
5847 /* Be careful not to try to gather much state about a thread
5848 that's in a syscall. It's frequently a losing proposition. */
5849 case TARGET_WAITKIND_SYSCALL_ENTRY
:
5850 /* Getting the current syscall number. */
5851 if (handle_syscall_event (ecs
) == 0)
5852 process_event_stop_test (ecs
);
5855 /* Before examining the threads further, step this thread to
5856 get it entirely out of the syscall. (We get notice of the
5857 event when the thread is just on the verge of exiting a
5858 syscall. Stepping one instruction seems to get it back
5860 case TARGET_WAITKIND_SYSCALL_RETURN
:
5861 if (handle_syscall_event (ecs
) == 0)
5862 process_event_stop_test (ecs
);
5865 case TARGET_WAITKIND_STOPPED
:
5866 handle_signal_stop (ecs
);
5869 case TARGET_WAITKIND_NO_HISTORY
:
5870 /* Reverse execution: target ran out of history info. */
5872 /* Switch to the stopped thread. */
5873 context_switch (ecs
);
5874 infrun_debug_printf ("stopped");
5876 delete_just_stopped_threads_single_step_breakpoints ();
5877 ecs
->event_thread
->set_stop_pc
5878 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
5880 if (handle_stop_requested (ecs
))
5883 gdb::observers::no_history
.notify ();
5889 /* Restart threads back to what they were trying to do back when we
5890 paused them (because of an in-line step-over or vfork, for example).
5891 The EVENT_THREAD thread is ignored (not restarted).
5893 If INF is non-nullptr, only resume threads from INF. */
5896 restart_threads (struct thread_info
*event_thread
, inferior
*inf
)
5898 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
5899 event_thread
->ptid
.to_string ().c_str (),
5900 inf
!= nullptr ? inf
->num
: -1);
5902 gdb_assert (!step_over_info_valid_p ());
5904 /* In case the instruction just stepped spawned a new thread. */
5905 update_thread_list ();
5907 for (thread_info
*tp
: all_non_exited_threads ())
5909 if (inf
!= nullptr && tp
->inf
!= inf
)
5912 if (tp
->inf
->detaching
)
5914 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5915 tp
->ptid
.to_string ().c_str ());
5919 switch_to_thread_no_regs (tp
);
5921 if (tp
== event_thread
)
5923 infrun_debug_printf ("restart threads: [%s] is event thread",
5924 tp
->ptid
.to_string ().c_str ());
5928 if (!(tp
->state
== THREAD_RUNNING
|| tp
->control
.in_infcall
))
5930 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5931 tp
->ptid
.to_string ().c_str ());
5937 infrun_debug_printf ("restart threads: [%s] resumed",
5938 tp
->ptid
.to_string ().c_str ());
5939 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
5943 if (thread_is_in_step_over_chain (tp
))
5945 infrun_debug_printf ("restart threads: [%s] needs step-over",
5946 tp
->ptid
.to_string ().c_str ());
5947 gdb_assert (!tp
->resumed ());
5952 if (tp
->has_pending_waitstatus ())
5954 infrun_debug_printf ("restart threads: [%s] has pending status",
5955 tp
->ptid
.to_string ().c_str ());
5956 tp
->set_resumed (true);
5960 gdb_assert (!tp
->stop_requested
);
5962 /* If some thread needs to start a step-over at this point, it
5963 should still be in the step-over queue, and thus skipped
5965 if (thread_still_needs_step_over (tp
))
5967 internal_error ("thread [%s] needs a step-over, but not in "
5968 "step-over queue\n",
5969 tp
->ptid
.to_string ().c_str ());
5972 if (currently_stepping (tp
))
5974 infrun_debug_printf ("restart threads: [%s] was stepping",
5975 tp
->ptid
.to_string ().c_str ());
5976 keep_going_stepped_thread (tp
);
5980 struct execution_control_state ecss
;
5981 struct execution_control_state
*ecs
= &ecss
;
5983 infrun_debug_printf ("restart threads: [%s] continuing",
5984 tp
->ptid
.to_string ().c_str ());
5985 reset_ecs (ecs
, tp
);
5986 switch_to_thread (tp
);
5987 keep_going_pass_signal (ecs
);
5992 /* Callback for iterate_over_threads. Find a resumed thread that has
5993 a pending waitstatus. */
5996 resumed_thread_with_pending_status (struct thread_info
*tp
,
5999 return tp
->resumed () && tp
->has_pending_waitstatus ();
6002 /* Called when we get an event that may finish an in-line or
6003 out-of-line (displaced stepping) step-over started previously.
6004 Return true if the event is processed and we should go back to the
6005 event loop; false if the caller should continue processing the
6009 finish_step_over (struct execution_control_state
*ecs
)
6011 displaced_step_finish (ecs
->event_thread
, ecs
->event_thread
->stop_signal ());
6013 bool had_step_over_info
= step_over_info_valid_p ();
6015 if (had_step_over_info
)
6017 /* If we're stepping over a breakpoint with all threads locked,
6018 then only the thread that was stepped should be reporting
6020 gdb_assert (ecs
->event_thread
->control
.trap_expected
);
6022 clear_step_over_info ();
6025 if (!target_is_non_stop_p ())
6028 /* Start a new step-over in another thread if there's one that
6032 /* If we were stepping over a breakpoint before, and haven't started
6033 a new in-line step-over sequence, then restart all other threads
6034 (except the event thread). We can't do this in all-stop, as then
6035 e.g., we wouldn't be able to issue any other remote packet until
6036 these other threads stop. */
6037 if (had_step_over_info
&& !step_over_info_valid_p ())
6039 struct thread_info
*pending
;
6041 /* If we only have threads with pending statuses, the restart
6042 below won't restart any thread and so nothing re-inserts the
6043 breakpoint we just stepped over. But we need it inserted
6044 when we later process the pending events, otherwise if
6045 another thread has a pending event for this breakpoint too,
6046 we'd discard its event (because the breakpoint that
6047 originally caused the event was no longer inserted). */
6048 context_switch (ecs
);
6049 insert_breakpoints ();
6051 restart_threads (ecs
->event_thread
);
6053 /* If we have events pending, go through handle_inferior_event
6054 again, picking up a pending event at random. This avoids
6055 thread starvation. */
6057 /* But not if we just stepped over a watchpoint in order to let
6058 the instruction execute so we can evaluate its expression.
6059 The set of watchpoints that triggered is recorded in the
6060 breakpoint objects themselves (see bp->watchpoint_triggered).
6061 If we processed another event first, that other event could
6062 clobber this info. */
6063 if (ecs
->event_thread
->stepping_over_watchpoint
)
6066 pending
= iterate_over_threads (resumed_thread_with_pending_status
,
6068 if (pending
!= nullptr)
6070 struct thread_info
*tp
= ecs
->event_thread
;
6071 struct regcache
*regcache
;
6073 infrun_debug_printf ("found resumed threads with "
6074 "pending events, saving status");
6076 gdb_assert (pending
!= tp
);
6078 /* Record the event thread's event for later. */
6079 save_waitstatus (tp
, ecs
->ws
);
6080 /* This was cleared early, by handle_inferior_event. Set it
6081 so this pending event is considered by
6083 tp
->set_resumed (true);
6085 gdb_assert (!tp
->executing ());
6087 regcache
= get_thread_regcache (tp
);
6088 tp
->set_stop_pc (regcache_read_pc (regcache
));
6090 infrun_debug_printf ("saved stop_pc=%s for %s "
6091 "(currently_stepping=%d)",
6092 paddress (target_gdbarch (), tp
->stop_pc ()),
6093 tp
->ptid
.to_string ().c_str (),
6094 currently_stepping (tp
));
6096 /* This in-line step-over finished; clear this so we won't
6097 start a new one. This is what handle_signal_stop would
6098 do, if we returned false. */
6099 tp
->stepping_over_breakpoint
= 0;
6101 /* Wake up the event loop again. */
6102 mark_async_event_handler (infrun_async_inferior_event_token
);
6104 prepare_to_wait (ecs
);
6112 /* Come here when the program has stopped with a signal. */
6115 handle_signal_stop (struct execution_control_state
*ecs
)
6117 frame_info_ptr frame
;
6118 struct gdbarch
*gdbarch
;
6119 int stopped_by_watchpoint
;
6120 enum stop_kind stop_soon
;
6123 gdb_assert (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
);
6125 ecs
->event_thread
->set_stop_signal (ecs
->ws
.sig ());
6127 /* Do we need to clean up the state of a thread that has
6128 completed a displaced single-step? (Doing so usually affects
6129 the PC, so do it here, before we set stop_pc.) */
6130 if (finish_step_over (ecs
))
6133 /* If we either finished a single-step or hit a breakpoint, but
6134 the user wanted this thread to be stopped, pretend we got a
6135 SIG0 (generic unsignaled stop). */
6136 if (ecs
->event_thread
->stop_requested
6137 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6138 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6140 ecs
->event_thread
->set_stop_pc
6141 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6143 context_switch (ecs
);
6145 if (deprecated_context_hook
)
6146 deprecated_context_hook (ecs
->event_thread
->global_num
);
6150 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
6151 struct gdbarch
*reg_gdbarch
= regcache
->arch ();
6154 ("stop_pc=%s", paddress (reg_gdbarch
, ecs
->event_thread
->stop_pc ()));
6155 if (target_stopped_by_watchpoint ())
6159 infrun_debug_printf ("stopped by watchpoint");
6161 if (target_stopped_data_address (current_inferior ()->top_target (),
6163 infrun_debug_printf ("stopped data address=%s",
6164 paddress (reg_gdbarch
, addr
));
6166 infrun_debug_printf ("(no data address available)");
6170 /* This is originated from start_remote(), start_inferior() and
6171 shared libraries hook functions. */
6172 stop_soon
= get_inferior_stop_soon (ecs
);
6173 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== STOP_QUIETLY_REMOTE
)
6175 infrun_debug_printf ("quietly stopped");
6176 stop_print_frame
= true;
6181 /* This originates from attach_command(). We need to overwrite
6182 the stop_signal here, because some kernels don't ignore a
6183 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6184 See more comments in inferior.h. On the other hand, if we
6185 get a non-SIGSTOP, report it to the user - assume the backend
6186 will handle the SIGSTOP if it should show up later.
6188 Also consider that the attach is complete when we see a
6189 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6190 target extended-remote report it instead of a SIGSTOP
6191 (e.g. gdbserver). We already rely on SIGTRAP being our
6192 signal, so this is no exception.
6194 Also consider that the attach is complete when we see a
6195 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6196 the target to stop all threads of the inferior, in case the
6197 low level attach operation doesn't stop them implicitly. If
6198 they weren't stopped implicitly, then the stub will report a
6199 GDB_SIGNAL_0, meaning: stopped for no particular reason
6200 other than GDB's request. */
6201 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6202 && (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_STOP
6203 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6204 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_0
))
6206 stop_print_frame
= true;
6208 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6212 /* At this point, get hold of the now-current thread's frame. */
6213 frame
= get_current_frame ();
6214 gdbarch
= get_frame_arch (frame
);
6216 /* Pull the single step breakpoints out of the target. */
6217 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6219 struct regcache
*regcache
;
6222 regcache
= get_thread_regcache (ecs
->event_thread
);
6223 const address_space
*aspace
= regcache
->aspace ();
6225 pc
= regcache_read_pc (regcache
);
6227 /* However, before doing so, if this single-step breakpoint was
6228 actually for another thread, set this thread up for moving
6230 if (!thread_has_single_step_breakpoint_here (ecs
->event_thread
,
6233 if (single_step_breakpoint_inserted_here_p (aspace
, pc
))
6235 infrun_debug_printf ("[%s] hit another thread's single-step "
6237 ecs
->ptid
.to_string ().c_str ());
6238 ecs
->hit_singlestep_breakpoint
= 1;
6243 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6244 ecs
->ptid
.to_string ().c_str ());
6247 delete_just_stopped_threads_single_step_breakpoints ();
6249 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6250 && ecs
->event_thread
->control
.trap_expected
6251 && ecs
->event_thread
->stepping_over_watchpoint
)
6252 stopped_by_watchpoint
= 0;
6254 stopped_by_watchpoint
= watchpoints_triggered (ecs
->ws
);
6256 /* If necessary, step over this watchpoint. We'll be back to display
6258 if (stopped_by_watchpoint
6259 && (target_have_steppable_watchpoint ()
6260 || gdbarch_have_nonsteppable_watchpoint (gdbarch
)))
6262 /* At this point, we are stopped at an instruction which has
6263 attempted to write to a piece of memory under control of
6264 a watchpoint. The instruction hasn't actually executed
6265 yet. If we were to evaluate the watchpoint expression
6266 now, we would get the old value, and therefore no change
6267 would seem to have occurred.
6269 In order to make watchpoints work `right', we really need
6270 to complete the memory write, and then evaluate the
6271 watchpoint expression. We do this by single-stepping the
6274 It may not be necessary to disable the watchpoint to step over
6275 it. For example, the PA can (with some kernel cooperation)
6276 single step over a watchpoint without disabling the watchpoint.
6278 It is far more common to need to disable a watchpoint to step
6279 the inferior over it. If we have non-steppable watchpoints,
6280 we must disable the current watchpoint; it's simplest to
6281 disable all watchpoints.
6283 Any breakpoint at PC must also be stepped over -- if there's
6284 one, it will have already triggered before the watchpoint
6285 triggered, and we either already reported it to the user, or
6286 it didn't cause a stop and we called keep_going. In either
6287 case, if there was a breakpoint at PC, we must be trying to
6289 ecs
->event_thread
->stepping_over_watchpoint
= 1;
6294 ecs
->event_thread
->stepping_over_breakpoint
= 0;
6295 ecs
->event_thread
->stepping_over_watchpoint
= 0;
6296 bpstat_clear (&ecs
->event_thread
->control
.stop_bpstat
);
6297 ecs
->event_thread
->control
.stop_step
= 0;
6298 stop_print_frame
= true;
6299 stopped_by_random_signal
= 0;
6300 bpstat
*stop_chain
= nullptr;
6302 /* Hide inlined functions starting here, unless we just performed stepi or
6303 nexti. After stepi and nexti, always show the innermost frame (not any
6304 inline function call sites). */
6305 if (ecs
->event_thread
->control
.step_range_end
!= 1)
6307 const address_space
*aspace
6308 = get_thread_regcache (ecs
->event_thread
)->aspace ();
6310 /* skip_inline_frames is expensive, so we avoid it if we can
6311 determine that the address is one where functions cannot have
6312 been inlined. This improves performance with inferiors that
6313 load a lot of shared libraries, because the solib event
6314 breakpoint is defined as the address of a function (i.e. not
6315 inline). Note that we have to check the previous PC as well
6316 as the current one to catch cases when we have just
6317 single-stepped off a breakpoint prior to reinstating it.
6318 Note that we're assuming that the code we single-step to is
6319 not inline, but that's not definitive: there's nothing
6320 preventing the event breakpoint function from containing
6321 inlined code, and the single-step ending up there. If the
6322 user had set a breakpoint on that inlined code, the missing
6323 skip_inline_frames call would break things. Fortunately
6324 that's an extremely unlikely scenario. */
6325 if (!pc_at_non_inline_function (aspace
,
6326 ecs
->event_thread
->stop_pc (),
6328 && !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6329 && ecs
->event_thread
->control
.trap_expected
6330 && pc_at_non_inline_function (aspace
,
6331 ecs
->event_thread
->prev_pc
,
6334 stop_chain
= build_bpstat_chain (aspace
,
6335 ecs
->event_thread
->stop_pc (),
6337 skip_inline_frames (ecs
->event_thread
, stop_chain
);
6339 /* Re-fetch current thread's frame in case that invalidated
6341 frame
= get_current_frame ();
6342 gdbarch
= get_frame_arch (frame
);
6346 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6347 && ecs
->event_thread
->control
.trap_expected
6348 && gdbarch_single_step_through_delay_p (gdbarch
)
6349 && currently_stepping (ecs
->event_thread
))
6351 /* We're trying to step off a breakpoint. Turns out that we're
6352 also on an instruction that needs to be stepped multiple
6353 times before it's been fully executing. E.g., architectures
6354 with a delay slot. It needs to be stepped twice, once for
6355 the instruction and once for the delay slot. */
6356 int step_through_delay
6357 = gdbarch_single_step_through_delay (gdbarch
, frame
);
6359 if (step_through_delay
)
6360 infrun_debug_printf ("step through delay");
6362 if (ecs
->event_thread
->control
.step_range_end
== 0
6363 && step_through_delay
)
6365 /* The user issued a continue when stopped at a breakpoint.
6366 Set up for another trap and get out of here. */
6367 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6371 else if (step_through_delay
)
6373 /* The user issued a step when stopped at a breakpoint.
6374 Maybe we should stop, maybe we should not - the delay
6375 slot *might* correspond to a line of source. In any
6376 case, don't decide that here, just set
6377 ecs->stepping_over_breakpoint, making sure we
6378 single-step again before breakpoints are re-inserted. */
6379 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6383 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6384 handles this event. */
6385 ecs
->event_thread
->control
.stop_bpstat
6386 = bpstat_stop_status (get_current_regcache ()->aspace (),
6387 ecs
->event_thread
->stop_pc (),
6388 ecs
->event_thread
, ecs
->ws
, stop_chain
);
6390 /* Following in case break condition called a
6392 stop_print_frame
= true;
6394 /* This is where we handle "moribund" watchpoints. Unlike
6395 software breakpoints traps, hardware watchpoint traps are
6396 always distinguishable from random traps. If no high-level
6397 watchpoint is associated with the reported stop data address
6398 anymore, then the bpstat does not explain the signal ---
6399 simply make sure to ignore it if `stopped_by_watchpoint' is
6402 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6403 && !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
6405 && stopped_by_watchpoint
)
6407 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6411 /* NOTE: cagney/2003-03-29: These checks for a random signal
6412 at one stage in the past included checks for an inferior
6413 function call's call dummy's return breakpoint. The original
6414 comment, that went with the test, read:
6416 ``End of a stack dummy. Some systems (e.g. Sony news) give
6417 another signal besides SIGTRAP, so check here as well as
6420 If someone ever tries to get call dummys on a
6421 non-executable stack to work (where the target would stop
6422 with something like a SIGSEGV), then those tests might need
6423 to be re-instated. Given, however, that the tests were only
6424 enabled when momentary breakpoints were not being used, I
6425 suspect that it won't be the case.
6427 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6428 be necessary for call dummies on a non-executable stack on
6431 /* See if the breakpoints module can explain the signal. */
6433 = !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
6434 ecs
->event_thread
->stop_signal ());
6436 /* Maybe this was a trap for a software breakpoint that has since
6438 if (random_signal
&& target_stopped_by_sw_breakpoint ())
6440 if (gdbarch_program_breakpoint_here_p (gdbarch
,
6441 ecs
->event_thread
->stop_pc ()))
6443 struct regcache
*regcache
;
6446 /* Re-adjust PC to what the program would see if GDB was not
6448 regcache
= get_thread_regcache (ecs
->event_thread
);
6449 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
6452 gdb::optional
<scoped_restore_tmpl
<int>>
6453 restore_operation_disable
;
6455 if (record_full_is_used ())
6456 restore_operation_disable
.emplace
6457 (record_full_gdb_operation_disable_set ());
6459 regcache_write_pc (regcache
,
6460 ecs
->event_thread
->stop_pc () + decr_pc
);
6465 /* A delayed software breakpoint event. Ignore the trap. */
6466 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
6471 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6472 has since been removed. */
6473 if (random_signal
&& target_stopped_by_hw_breakpoint ())
6475 /* A delayed hardware breakpoint event. Ignore the trap. */
6476 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6481 /* If not, perhaps stepping/nexting can. */
6483 random_signal
= !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6484 && currently_stepping (ecs
->event_thread
));
6486 /* Perhaps the thread hit a single-step breakpoint of _another_
6487 thread. Single-step breakpoints are transparent to the
6488 breakpoints module. */
6490 random_signal
= !ecs
->hit_singlestep_breakpoint
;
6492 /* No? Perhaps we got a moribund watchpoint. */
6494 random_signal
= !stopped_by_watchpoint
;
6496 /* Always stop if the user explicitly requested this thread to
6498 if (ecs
->event_thread
->stop_requested
)
6501 infrun_debug_printf ("user-requested stop");
6504 /* For the program's own signals, act according to
6505 the signal handling tables. */
6509 /* Signal not for debugging purposes. */
6510 enum gdb_signal stop_signal
= ecs
->event_thread
->stop_signal ();
6512 infrun_debug_printf ("random signal (%s)",
6513 gdb_signal_to_symbol_string (stop_signal
));
6515 stopped_by_random_signal
= 1;
6517 /* Always stop on signals if we're either just gaining control
6518 of the program, or the user explicitly requested this thread
6519 to remain stopped. */
6520 if (stop_soon
!= NO_STOP_QUIETLY
6521 || ecs
->event_thread
->stop_requested
6522 || signal_stop_state (ecs
->event_thread
->stop_signal ()))
6528 /* Notify observers the signal has "handle print" set. Note we
6529 returned early above if stopping; normal_stop handles the
6530 printing in that case. */
6531 if (signal_print
[ecs
->event_thread
->stop_signal ()])
6533 /* The signal table tells us to print about this signal. */
6534 target_terminal::ours_for_output ();
6535 gdb::observers::signal_received
.notify (ecs
->event_thread
->stop_signal ());
6536 target_terminal::inferior ();
6539 /* Clear the signal if it should not be passed. */
6540 if (signal_program
[ecs
->event_thread
->stop_signal ()] == 0)
6541 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6543 if (ecs
->event_thread
->prev_pc
== ecs
->event_thread
->stop_pc ()
6544 && ecs
->event_thread
->control
.trap_expected
6545 && ecs
->event_thread
->control
.step_resume_breakpoint
== nullptr)
6547 /* We were just starting a new sequence, attempting to
6548 single-step off of a breakpoint and expecting a SIGTRAP.
6549 Instead this signal arrives. This signal will take us out
6550 of the stepping range so GDB needs to remember to, when
6551 the signal handler returns, resume stepping off that
6553 /* To simplify things, "continue" is forced to use the same
6554 code paths as single-step - set a breakpoint at the
6555 signal return address and then, once hit, step off that
6557 infrun_debug_printf ("signal arrived while stepping over breakpoint");
6559 insert_hp_step_resume_breakpoint_at_frame (frame
);
6560 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
6561 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6562 ecs
->event_thread
->control
.trap_expected
= 0;
6564 /* If we were nexting/stepping some other thread, switch to
6565 it, so that we don't continue it, losing control. */
6566 if (!switch_back_to_stepped_thread (ecs
))
6571 if (ecs
->event_thread
->stop_signal () != GDB_SIGNAL_0
6572 && (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
6574 || ecs
->event_thread
->control
.step_range_end
== 1)
6575 && (get_stack_frame_id (frame
)
6576 == ecs
->event_thread
->control
.step_stack_frame_id
)
6577 && ecs
->event_thread
->control
.step_resume_breakpoint
== nullptr)
6579 /* The inferior is about to take a signal that will take it
6580 out of the single step range. Set a breakpoint at the
6581 current PC (which is presumably where the signal handler
6582 will eventually return) and then allow the inferior to
6585 Note that this is only needed for a signal delivered
6586 while in the single-step range. Nested signals aren't a
6587 problem as they eventually all return. */
6588 infrun_debug_printf ("signal may take us out of single-step range");
6590 clear_step_over_info ();
6591 insert_hp_step_resume_breakpoint_at_frame (frame
);
6592 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
6593 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6594 ecs
->event_thread
->control
.trap_expected
= 0;
6599 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6600 when either there's a nested signal, or when there's a
6601 pending signal enabled just as the signal handler returns
6602 (leaving the inferior at the step-resume-breakpoint without
6603 actually executing it). Either way continue until the
6604 breakpoint is really hit. */
6606 if (!switch_back_to_stepped_thread (ecs
))
6608 infrun_debug_printf ("random signal, keep going");
6615 process_event_stop_test (ecs
);
6618 /* Come here when we've got some debug event / signal we can explain
6619 (IOW, not a random signal), and test whether it should cause a
6620 stop, or whether we should resume the inferior (transparently).
6621 E.g., could be a breakpoint whose condition evaluates false; we
6622 could be still stepping within the line; etc. */
6625 process_event_stop_test (struct execution_control_state
*ecs
)
6627 struct symtab_and_line stop_pc_sal
;
6628 frame_info_ptr frame
;
6629 struct gdbarch
*gdbarch
;
6630 CORE_ADDR jmp_buf_pc
;
6631 struct bpstat_what what
;
6633 /* Handle cases caused by hitting a breakpoint. */
6635 frame
= get_current_frame ();
6636 gdbarch
= get_frame_arch (frame
);
6638 what
= bpstat_what (ecs
->event_thread
->control
.stop_bpstat
);
6640 if (what
.call_dummy
)
6642 stop_stack_dummy
= what
.call_dummy
;
6645 /* A few breakpoint types have callbacks associated (e.g.,
6646 bp_jit_event). Run them now. */
6647 bpstat_run_callbacks (ecs
->event_thread
->control
.stop_bpstat
);
6649 /* If we hit an internal event that triggers symbol changes, the
6650 current frame will be invalidated within bpstat_what (e.g., if we
6651 hit an internal solib event). Re-fetch it. */
6652 frame
= get_current_frame ();
6653 gdbarch
= get_frame_arch (frame
);
6655 switch (what
.main_action
)
6657 case BPSTAT_WHAT_SET_LONGJMP_RESUME
:
6658 /* If we hit the breakpoint at longjmp while stepping, we
6659 install a momentary breakpoint at the target of the
6662 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
6664 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6666 if (what
.is_longjmp
)
6668 struct value
*arg_value
;
6670 /* If we set the longjmp breakpoint via a SystemTap probe,
6671 then use it to extract the arguments. The destination PC
6672 is the third argument to the probe. */
6673 arg_value
= probe_safe_evaluate_at_pc (frame
, 2);
6676 jmp_buf_pc
= value_as_address (arg_value
);
6677 jmp_buf_pc
= gdbarch_addr_bits_remove (gdbarch
, jmp_buf_pc
);
6679 else if (!gdbarch_get_longjmp_target_p (gdbarch
)
6680 || !gdbarch_get_longjmp_target (gdbarch
,
6681 frame
, &jmp_buf_pc
))
6683 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6684 "(!gdbarch_get_longjmp_target)");
6689 /* Insert a breakpoint at resume address. */
6690 insert_longjmp_resume_breakpoint (gdbarch
, jmp_buf_pc
);
6693 check_exception_resume (ecs
, frame
);
6697 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME
:
6699 frame_info_ptr init_frame
;
6701 /* There are several cases to consider.
6703 1. The initiating frame no longer exists. In this case we
6704 must stop, because the exception or longjmp has gone too
6707 2. The initiating frame exists, and is the same as the
6708 current frame. We stop, because the exception or longjmp
6711 3. The initiating frame exists and is different from the
6712 current frame. This means the exception or longjmp has
6713 been caught beneath the initiating frame, so keep going.
6715 4. longjmp breakpoint has been placed just to protect
6716 against stale dummy frames and user is not interested in
6717 stopping around longjmps. */
6719 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
6721 gdb_assert (ecs
->event_thread
->control
.exception_resume_breakpoint
6723 delete_exception_resume_breakpoint (ecs
->event_thread
);
6725 if (what
.is_longjmp
)
6727 check_longjmp_breakpoint_for_call_dummy (ecs
->event_thread
);
6729 if (!frame_id_p (ecs
->event_thread
->initiating_frame
))
6737 init_frame
= frame_find_by_id (ecs
->event_thread
->initiating_frame
);
6741 struct frame_id current_id
6742 = get_frame_id (get_current_frame ());
6743 if (current_id
== ecs
->event_thread
->initiating_frame
)
6745 /* Case 2. Fall through. */
6755 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6757 delete_step_resume_breakpoint (ecs
->event_thread
);
6759 end_stepping_range (ecs
);
6763 case BPSTAT_WHAT_SINGLE
:
6764 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
6765 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6766 /* Still need to check other stuff, at least the case where we
6767 are stepping and step out of the right range. */
6770 case BPSTAT_WHAT_STEP_RESUME
:
6771 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
6773 delete_step_resume_breakpoint (ecs
->event_thread
);
6774 if (ecs
->event_thread
->control
.proceed_to_finish
6775 && execution_direction
== EXEC_REVERSE
)
6777 struct thread_info
*tp
= ecs
->event_thread
;
6779 /* We are finishing a function in reverse, and just hit the
6780 step-resume breakpoint at the start address of the
6781 function, and we're almost there -- just need to back up
6782 by one more single-step, which should take us back to the
6784 tp
->control
.step_range_start
= tp
->control
.step_range_end
= 1;
6788 fill_in_stop_func (gdbarch
, ecs
);
6789 if (ecs
->event_thread
->stop_pc () == ecs
->stop_func_start
6790 && execution_direction
== EXEC_REVERSE
)
6792 /* We are stepping over a function call in reverse, and just
6793 hit the step-resume breakpoint at the start address of
6794 the function. Go back to single-stepping, which should
6795 take us back to the function call. */
6796 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6802 case BPSTAT_WHAT_STOP_NOISY
:
6803 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
6804 stop_print_frame
= true;
6806 /* Assume the thread stopped for a breakpoint. We'll still check
6807 whether a/the breakpoint is there when the thread is next
6809 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6814 case BPSTAT_WHAT_STOP_SILENT
:
6815 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
6816 stop_print_frame
= false;
6818 /* Assume the thread stopped for a breakpoint. We'll still check
6819 whether a/the breakpoint is there when the thread is next
6821 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6825 case BPSTAT_WHAT_HP_STEP_RESUME
:
6826 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
6828 delete_step_resume_breakpoint (ecs
->event_thread
);
6829 if (ecs
->event_thread
->step_after_step_resume_breakpoint
)
6831 /* Back when the step-resume breakpoint was inserted, we
6832 were trying to single-step off a breakpoint. Go back to
6834 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
6835 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6841 case BPSTAT_WHAT_KEEP_CHECKING
:
6845 /* If we stepped a permanent breakpoint and we had a high priority
6846 step-resume breakpoint for the address we stepped, but we didn't
6847 hit it, then we must have stepped into the signal handler. The
6848 step-resume was only necessary to catch the case of _not_
6849 stepping into the handler, so delete it, and fall through to
6850 checking whether the step finished. */
6851 if (ecs
->event_thread
->stepped_breakpoint
)
6853 struct breakpoint
*sr_bp
6854 = ecs
->event_thread
->control
.step_resume_breakpoint
;
6856 if (sr_bp
!= nullptr
6857 && sr_bp
->loc
->permanent
6858 && sr_bp
->type
== bp_hp_step_resume
6859 && sr_bp
->loc
->address
== ecs
->event_thread
->prev_pc
)
6861 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
6862 delete_step_resume_breakpoint (ecs
->event_thread
);
6863 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
6867 /* We come here if we hit a breakpoint but should not stop for it.
6868 Possibly we also were stepping and should stop for that. So fall
6869 through and test for stepping. But, if not stepping, do not
6872 /* In all-stop mode, if we're currently stepping but have stopped in
6873 some other thread, we need to switch back to the stepped thread. */
6874 if (switch_back_to_stepped_thread (ecs
))
6877 if (ecs
->event_thread
->control
.step_resume_breakpoint
)
6879 infrun_debug_printf ("step-resume breakpoint is inserted");
6881 /* Having a step-resume breakpoint overrides anything
6882 else having to do with stepping commands until
6883 that breakpoint is reached. */
6888 if (ecs
->event_thread
->control
.step_range_end
== 0)
6890 infrun_debug_printf ("no stepping, continue");
6891 /* Likewise if we aren't even stepping. */
6896 /* Re-fetch current thread's frame in case the code above caused
6897 the frame cache to be re-initialized, making our FRAME variable
6898 a dangling pointer. */
6899 frame
= get_current_frame ();
6900 gdbarch
= get_frame_arch (frame
);
6901 fill_in_stop_func (gdbarch
, ecs
);
6903 /* If stepping through a line, keep going if still within it.
6905 Note that step_range_end is the address of the first instruction
6906 beyond the step range, and NOT the address of the last instruction
6909 Note also that during reverse execution, we may be stepping
6910 through a function epilogue and therefore must detect when
6911 the current-frame changes in the middle of a line. */
6913 if (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
6915 && (execution_direction
!= EXEC_REVERSE
6916 || get_frame_id (frame
) == ecs
->event_thread
->control
.step_frame_id
))
6919 ("stepping inside range [%s-%s]",
6920 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
6921 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
));
6923 /* Tentatively re-enable range stepping; `resume' disables it if
6924 necessary (e.g., if we're stepping over a breakpoint or we
6925 have software watchpoints). */
6926 ecs
->event_thread
->control
.may_range_step
= 1;
6928 /* When stepping backward, stop at beginning of line range
6929 (unless it's the function entry point, in which case
6930 keep going back to the call point). */
6931 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
6932 if (stop_pc
== ecs
->event_thread
->control
.step_range_start
6933 && stop_pc
!= ecs
->stop_func_start
6934 && execution_direction
== EXEC_REVERSE
)
6935 end_stepping_range (ecs
);
6942 /* We stepped out of the stepping range. */
6944 /* If we are stepping at the source level and entered the runtime
6945 loader dynamic symbol resolution code...
6947 EXEC_FORWARD: we keep on single stepping until we exit the run
6948 time loader code and reach the callee's address.
6950 EXEC_REVERSE: we've already executed the callee (backward), and
6951 the runtime loader code is handled just like any other
6952 undebuggable function call. Now we need only keep stepping
6953 backward through the trampoline code, and that's handled further
6954 down, so there is nothing for us to do here. */
6956 if (execution_direction
!= EXEC_REVERSE
6957 && ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
6958 && in_solib_dynsym_resolve_code (ecs
->event_thread
->stop_pc ())
6959 && (ecs
->event_thread
->control
.step_start_function
== nullptr
6960 || !in_solib_dynsym_resolve_code (
6961 ecs
->event_thread
->control
.step_start_function
->value_block ()
6964 CORE_ADDR pc_after_resolver
=
6965 gdbarch_skip_solib_resolver (gdbarch
, ecs
->event_thread
->stop_pc ());
6967 infrun_debug_printf ("stepped into dynsym resolve code");
6969 if (pc_after_resolver
)
6971 /* Set up a step-resume breakpoint at the address
6972 indicated by SKIP_SOLIB_RESOLVER. */
6973 symtab_and_line sr_sal
;
6974 sr_sal
.pc
= pc_after_resolver
;
6975 sr_sal
.pspace
= get_frame_program_space (frame
);
6977 insert_step_resume_breakpoint_at_sal (gdbarch
,
6978 sr_sal
, null_frame_id
);
6985 /* Step through an indirect branch thunk. */
6986 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
6987 && gdbarch_in_indirect_branch_thunk (gdbarch
,
6988 ecs
->event_thread
->stop_pc ()))
6990 infrun_debug_printf ("stepped into indirect branch thunk");
6995 if (ecs
->event_thread
->control
.step_range_end
!= 1
6996 && (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
6997 || ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
6998 && get_frame_type (frame
) == SIGTRAMP_FRAME
)
7000 infrun_debug_printf ("stepped into signal trampoline");
7001 /* The inferior, while doing a "step" or "next", has ended up in
7002 a signal trampoline (either by a signal being delivered or by
7003 the signal handler returning). Just single-step until the
7004 inferior leaves the trampoline (either by calling the handler
7010 /* If we're in the return path from a shared library trampoline,
7011 we want to proceed through the trampoline when stepping. */
7012 /* macro/2012-04-25: This needs to come before the subroutine
7013 call check below as on some targets return trampolines look
7014 like subroutine calls (MIPS16 return thunks). */
7015 if (gdbarch_in_solib_return_trampoline (gdbarch
,
7016 ecs
->event_thread
->stop_pc (),
7017 ecs
->stop_func_name
)
7018 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7020 /* Determine where this trampoline returns. */
7021 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7022 CORE_ADDR real_stop_pc
7023 = gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7025 infrun_debug_printf ("stepped into solib return tramp");
7027 /* Only proceed through if we know where it's going. */
7030 /* And put the step-breakpoint there and go until there. */
7031 symtab_and_line sr_sal
;
7032 sr_sal
.pc
= real_stop_pc
;
7033 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7034 sr_sal
.pspace
= get_frame_program_space (frame
);
7036 /* Do not specify what the fp should be when we stop since
7037 on some machines the prologue is where the new fp value
7039 insert_step_resume_breakpoint_at_sal (gdbarch
,
7040 sr_sal
, null_frame_id
);
7042 /* Restart without fiddling with the step ranges or
7049 /* Check for subroutine calls. The check for the current frame
7050 equalling the step ID is not necessary - the check of the
7051 previous frame's ID is sufficient - but it is a common case and
7052 cheaper than checking the previous frame's ID.
7054 NOTE: frame_id::operator== will never report two invalid frame IDs as
7055 being equal, so to get into this block, both the current and
7056 previous frame must have valid frame IDs. */
7057 /* The outer_frame_id check is a heuristic to detect stepping
7058 through startup code. If we step over an instruction which
7059 sets the stack pointer from an invalid value to a valid value,
7060 we may detect that as a subroutine call from the mythical
7061 "outermost" function. This could be fixed by marking
7062 outermost frames as !stack_p,code_p,special_p. Then the
7063 initial outermost frame, before sp was valid, would
7064 have code_addr == &_start. See the comment in frame_id::operator==
7066 if ((get_stack_frame_id (frame
)
7067 != ecs
->event_thread
->control
.step_stack_frame_id
)
7068 && ((frame_unwind_caller_id (get_current_frame ())
7069 == ecs
->event_thread
->control
.step_stack_frame_id
)
7070 && ((ecs
->event_thread
->control
.step_stack_frame_id
7072 || (ecs
->event_thread
->control
.step_start_function
7073 != find_pc_function (ecs
->event_thread
->stop_pc ())))))
7075 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7076 CORE_ADDR real_stop_pc
;
7078 infrun_debug_printf ("stepped into subroutine");
7080 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_NONE
)
7082 /* I presume that step_over_calls is only 0 when we're
7083 supposed to be stepping at the assembly language level
7084 ("stepi"). Just stop. */
7085 /* And this works the same backward as frontward. MVS */
7086 end_stepping_range (ecs
);
7090 /* Reverse stepping through solib trampolines. */
7092 if (execution_direction
== EXEC_REVERSE
7093 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
7094 && (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7095 || (ecs
->stop_func_start
== 0
7096 && in_solib_dynsym_resolve_code (stop_pc
))))
7098 /* Any solib trampoline code can be handled in reverse
7099 by simply continuing to single-step. We have already
7100 executed the solib function (backwards), and a few
7101 steps will take us back through the trampoline to the
7107 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
7109 /* We're doing a "next".
7111 Normal (forward) execution: set a breakpoint at the
7112 callee's return address (the address at which the caller
7115 Reverse (backward) execution. set the step-resume
7116 breakpoint at the start of the function that we just
7117 stepped into (backwards), and continue to there. When we
7118 get there, we'll need to single-step back to the caller. */
7120 if (execution_direction
== EXEC_REVERSE
)
7122 /* If we're already at the start of the function, we've either
7123 just stepped backward into a single instruction function,
7124 or stepped back out of a signal handler to the first instruction
7125 of the function. Just keep going, which will single-step back
7127 if (ecs
->stop_func_start
!= stop_pc
&& ecs
->stop_func_start
!= 0)
7129 /* Normal function call return (static or dynamic). */
7130 symtab_and_line sr_sal
;
7131 sr_sal
.pc
= ecs
->stop_func_start
;
7132 sr_sal
.pspace
= get_frame_program_space (frame
);
7133 insert_step_resume_breakpoint_at_sal (gdbarch
,
7134 sr_sal
, get_stack_frame_id (frame
));
7138 insert_step_resume_breakpoint_at_caller (frame
);
7144 /* If we are in a function call trampoline (a stub between the
7145 calling routine and the real function), locate the real
7146 function. That's what tells us (a) whether we want to step
7147 into it at all, and (b) what prologue we want to run to the
7148 end of, if we do step into it. */
7149 real_stop_pc
= skip_language_trampoline (frame
, stop_pc
);
7150 if (real_stop_pc
== 0)
7151 real_stop_pc
= gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7152 if (real_stop_pc
!= 0)
7153 ecs
->stop_func_start
= real_stop_pc
;
7155 if (real_stop_pc
!= 0 && in_solib_dynsym_resolve_code (real_stop_pc
))
7157 symtab_and_line sr_sal
;
7158 sr_sal
.pc
= ecs
->stop_func_start
;
7159 sr_sal
.pspace
= get_frame_program_space (frame
);
7161 insert_step_resume_breakpoint_at_sal (gdbarch
,
7162 sr_sal
, null_frame_id
);
7167 /* If we have line number information for the function we are
7168 thinking of stepping into and the function isn't on the skip
7171 If there are several symtabs at that PC (e.g. with include
7172 files), just want to know whether *any* of them have line
7173 numbers. find_pc_line handles this. */
7175 struct symtab_and_line tmp_sal
;
7177 tmp_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7178 if (tmp_sal
.line
!= 0
7179 && !function_name_is_marked_for_skip (ecs
->stop_func_name
,
7181 && !inline_frame_is_marked_for_skip (true, ecs
->event_thread
))
7183 if (execution_direction
== EXEC_REVERSE
)
7184 handle_step_into_function_backward (gdbarch
, ecs
);
7186 handle_step_into_function (gdbarch
, ecs
);
7191 /* If we have no line number and the step-stop-if-no-debug is
7192 set, we stop the step so that the user has a chance to switch
7193 in assembly mode. */
7194 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7195 && step_stop_if_no_debug
)
7197 end_stepping_range (ecs
);
7201 if (execution_direction
== EXEC_REVERSE
)
7203 /* If we're already at the start of the function, we've either just
7204 stepped backward into a single instruction function without line
7205 number info, or stepped back out of a signal handler to the first
7206 instruction of the function without line number info. Just keep
7207 going, which will single-step back to the caller. */
7208 if (ecs
->stop_func_start
!= stop_pc
)
7210 /* Set a breakpoint at callee's start address.
7211 From there we can step once and be back in the caller. */
7212 symtab_and_line sr_sal
;
7213 sr_sal
.pc
= ecs
->stop_func_start
;
7214 sr_sal
.pspace
= get_frame_program_space (frame
);
7215 insert_step_resume_breakpoint_at_sal (gdbarch
,
7216 sr_sal
, null_frame_id
);
7220 /* Set a breakpoint at callee's return address (the address
7221 at which the caller will resume). */
7222 insert_step_resume_breakpoint_at_caller (frame
);
7228 /* Reverse stepping through solib trampolines. */
7230 if (execution_direction
== EXEC_REVERSE
7231 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7233 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7235 if (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7236 || (ecs
->stop_func_start
== 0
7237 && in_solib_dynsym_resolve_code (stop_pc
)))
7239 /* Any solib trampoline code can be handled in reverse
7240 by simply continuing to single-step. We have already
7241 executed the solib function (backwards), and a few
7242 steps will take us back through the trampoline to the
7247 else if (in_solib_dynsym_resolve_code (stop_pc
))
7249 /* Stepped backward into the solib dynsym resolver.
7250 Set a breakpoint at its start and continue, then
7251 one more step will take us out. */
7252 symtab_and_line sr_sal
;
7253 sr_sal
.pc
= ecs
->stop_func_start
;
7254 sr_sal
.pspace
= get_frame_program_space (frame
);
7255 insert_step_resume_breakpoint_at_sal (gdbarch
,
7256 sr_sal
, null_frame_id
);
7262 /* This always returns the sal for the inner-most frame when we are in a
7263 stack of inlined frames, even if GDB actually believes that it is in a
7264 more outer frame. This is checked for below by calls to
7265 inline_skipped_frames. */
7266 stop_pc_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
7268 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7269 the trampoline processing logic, however, there are some trampolines
7270 that have no names, so we should do trampoline handling first. */
7271 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7272 && ecs
->stop_func_name
== nullptr
7273 && stop_pc_sal
.line
== 0)
7275 infrun_debug_printf ("stepped into undebuggable function");
7277 /* The inferior just stepped into, or returned to, an
7278 undebuggable function (where there is no debugging information
7279 and no line number corresponding to the address where the
7280 inferior stopped). Since we want to skip this kind of code,
7281 we keep going until the inferior returns from this
7282 function - unless the user has asked us not to (via
7283 set step-mode) or we no longer know how to get back
7284 to the call site. */
7285 if (step_stop_if_no_debug
7286 || !frame_id_p (frame_unwind_caller_id (frame
)))
7288 /* If we have no line number and the step-stop-if-no-debug
7289 is set, we stop the step so that the user has a chance to
7290 switch in assembly mode. */
7291 end_stepping_range (ecs
);
7296 /* Set a breakpoint at callee's return address (the address
7297 at which the caller will resume). */
7298 insert_step_resume_breakpoint_at_caller (frame
);
7304 if (ecs
->event_thread
->control
.step_range_end
== 1)
7306 /* It is stepi or nexti. We always want to stop stepping after
7308 infrun_debug_printf ("stepi/nexti");
7309 end_stepping_range (ecs
);
7313 if (stop_pc_sal
.line
== 0)
7315 /* We have no line number information. That means to stop
7316 stepping (does this always happen right after one instruction,
7317 when we do "s" in a function with no line numbers,
7318 or can this happen as a result of a return or longjmp?). */
7319 infrun_debug_printf ("line number info");
7320 end_stepping_range (ecs
);
7324 /* Look for "calls" to inlined functions, part one. If the inline
7325 frame machinery detected some skipped call sites, we have entered
7326 a new inline function. */
7328 if ((get_frame_id (get_current_frame ())
7329 == ecs
->event_thread
->control
.step_frame_id
)
7330 && inline_skipped_frames (ecs
->event_thread
))
7332 infrun_debug_printf ("stepped into inlined function");
7334 symtab_and_line call_sal
= find_frame_sal (get_current_frame ());
7336 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_ALL
)
7338 /* For "step", we're going to stop. But if the call site
7339 for this inlined function is on the same source line as
7340 we were previously stepping, go down into the function
7341 first. Otherwise stop at the call site. */
7343 if (call_sal
.line
== ecs
->event_thread
->current_line
7344 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
7346 step_into_inline_frame (ecs
->event_thread
);
7347 if (inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
7354 end_stepping_range (ecs
);
7359 /* For "next", we should stop at the call site if it is on a
7360 different source line. Otherwise continue through the
7361 inlined function. */
7362 if (call_sal
.line
== ecs
->event_thread
->current_line
7363 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
7366 end_stepping_range (ecs
);
7371 /* Look for "calls" to inlined functions, part two. If we are still
7372 in the same real function we were stepping through, but we have
7373 to go further up to find the exact frame ID, we are stepping
7374 through a more inlined call beyond its call site. */
7376 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7377 && (get_frame_id (get_current_frame ())
7378 != ecs
->event_thread
->control
.step_frame_id
)
7379 && stepped_in_from (get_current_frame (),
7380 ecs
->event_thread
->control
.step_frame_id
))
7382 infrun_debug_printf ("stepping through inlined function");
7384 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
7385 || inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
7388 end_stepping_range (ecs
);
7392 bool refresh_step_info
= true;
7393 if ((ecs
->event_thread
->stop_pc () == stop_pc_sal
.pc
)
7394 && (ecs
->event_thread
->current_line
!= stop_pc_sal
.line
7395 || ecs
->event_thread
->current_symtab
!= stop_pc_sal
.symtab
))
7397 /* We are at a different line. */
7399 if (stop_pc_sal
.is_stmt
)
7401 /* We are at the start of a statement.
7403 So stop. Note that we don't stop if we step into the middle of a
7404 statement. That is said to make things like for (;;) statements
7406 infrun_debug_printf ("stepped to a different line");
7407 end_stepping_range (ecs
);
7410 else if (get_frame_id (get_current_frame ())
7411 == ecs
->event_thread
->control
.step_frame_id
)
7413 /* We are not at the start of a statement, and we have not changed
7416 We ignore this line table entry, and continue stepping forward,
7417 looking for a better place to stop. */
7418 refresh_step_info
= false;
7419 infrun_debug_printf ("stepped to a different line, but "
7420 "it's not the start of a statement");
7424 /* We are not the start of a statement, and we have changed frame.
7426 We ignore this line table entry, and continue stepping forward,
7427 looking for a better place to stop. Keep refresh_step_info at
7428 true to note that the frame has changed, but ignore the line
7429 number to make sure we don't ignore a subsequent entry with the
7430 same line number. */
7431 stop_pc_sal
.line
= 0;
7432 infrun_debug_printf ("stepped to a different frame, but "
7433 "it's not the start of a statement");
7437 /* We aren't done stepping.
7439 Optimize by setting the stepping range to the line.
7440 (We might not be in the original line, but if we entered a
7441 new line in mid-statement, we continue stepping. This makes
7442 things like for(;;) statements work better.)
7444 If we entered a SAL that indicates a non-statement line table entry,
7445 then we update the stepping range, but we don't update the step info,
7446 which includes things like the line number we are stepping away from.
7447 This means we will stop when we find a line table entry that is marked
7448 as is-statement, even if it matches the non-statement one we just
7451 ecs
->event_thread
->control
.step_range_start
= stop_pc_sal
.pc
;
7452 ecs
->event_thread
->control
.step_range_end
= stop_pc_sal
.end
;
7453 ecs
->event_thread
->control
.may_range_step
= 1;
7455 ("updated step range, start = %s, end = %s, may_range_step = %d",
7456 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
7457 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
),
7458 ecs
->event_thread
->control
.may_range_step
);
7459 if (refresh_step_info
)
7460 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
7462 infrun_debug_printf ("keep going");
7466 static bool restart_stepped_thread (process_stratum_target
*resume_target
,
7467 ptid_t resume_ptid
);
7469 /* In all-stop mode, if we're currently stepping but have stopped in
7470 some other thread, we may need to switch back to the stepped
7471 thread. Returns true we set the inferior running, false if we left
7472 it stopped (and the event needs further processing). */
7475 switch_back_to_stepped_thread (struct execution_control_state
*ecs
)
7477 if (!target_is_non_stop_p ())
7479 /* If any thread is blocked on some internal breakpoint, and we
7480 simply need to step over that breakpoint to get it going
7481 again, do that first. */
7483 /* However, if we see an event for the stepping thread, then we
7484 know all other threads have been moved past their breakpoints
7485 already. Let the caller check whether the step is finished,
7486 etc., before deciding to move it past a breakpoint. */
7487 if (ecs
->event_thread
->control
.step_range_end
!= 0)
7490 /* Check if the current thread is blocked on an incomplete
7491 step-over, interrupted by a random signal. */
7492 if (ecs
->event_thread
->control
.trap_expected
7493 && ecs
->event_thread
->stop_signal () != GDB_SIGNAL_TRAP
)
7496 ("need to finish step-over of [%s]",
7497 ecs
->event_thread
->ptid
.to_string ().c_str ());
7502 /* Check if the current thread is blocked by a single-step
7503 breakpoint of another thread. */
7504 if (ecs
->hit_singlestep_breakpoint
)
7506 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7507 ecs
->ptid
.to_string ().c_str ());
7512 /* If this thread needs yet another step-over (e.g., stepping
7513 through a delay slot), do it first before moving on to
7515 if (thread_still_needs_step_over (ecs
->event_thread
))
7518 ("thread [%s] still needs step-over",
7519 ecs
->event_thread
->ptid
.to_string ().c_str ());
7524 /* If scheduler locking applies even if not stepping, there's no
7525 need to walk over threads. Above we've checked whether the
7526 current thread is stepping. If some other thread not the
7527 event thread is stepping, then it must be that scheduler
7528 locking is not in effect. */
7529 if (schedlock_applies (ecs
->event_thread
))
7532 /* Otherwise, we no longer expect a trap in the current thread.
7533 Clear the trap_expected flag before switching back -- this is
7534 what keep_going does as well, if we call it. */
7535 ecs
->event_thread
->control
.trap_expected
= 0;
7537 /* Likewise, clear the signal if it should not be passed. */
7538 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
7539 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
7541 if (restart_stepped_thread (ecs
->target
, ecs
->ptid
))
7543 prepare_to_wait (ecs
);
7547 switch_to_thread (ecs
->event_thread
);
7553 /* Look for the thread that was stepping, and resume it.
7554 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7555 is resuming. Return true if a thread was started, false
7559 restart_stepped_thread (process_stratum_target
*resume_target
,
7562 /* Do all pending step-overs before actually proceeding with
7564 if (start_step_over ())
7567 for (thread_info
*tp
: all_threads_safe ())
7569 if (tp
->state
== THREAD_EXITED
)
7572 if (tp
->has_pending_waitstatus ())
7575 /* Ignore threads of processes the caller is not
7578 && (tp
->inf
->process_target () != resume_target
7579 || tp
->inf
->pid
!= resume_ptid
.pid ()))
7582 if (tp
->control
.trap_expected
)
7584 infrun_debug_printf ("switching back to stepped thread (step-over)");
7586 if (keep_going_stepped_thread (tp
))
7591 for (thread_info
*tp
: all_threads_safe ())
7593 if (tp
->state
== THREAD_EXITED
)
7596 if (tp
->has_pending_waitstatus ())
7599 /* Ignore threads of processes the caller is not
7602 && (tp
->inf
->process_target () != resume_target
7603 || tp
->inf
->pid
!= resume_ptid
.pid ()))
7606 /* Did we find the stepping thread? */
7607 if (tp
->control
.step_range_end
)
7609 infrun_debug_printf ("switching back to stepped thread (stepping)");
7611 if (keep_going_stepped_thread (tp
))
7622 restart_after_all_stop_detach (process_stratum_target
*proc_target
)
7624 /* Note we don't check target_is_non_stop_p() here, because the
7625 current inferior may no longer have a process_stratum target
7626 pushed, as we just detached. */
7628 /* See if we have a THREAD_RUNNING thread that need to be
7629 re-resumed. If we have any thread that is already executing,
7630 then we don't need to resume the target -- it is already been
7631 resumed. With the remote target (in all-stop), it's even
7632 impossible to issue another resumption if the target is already
7633 resumed, until the target reports a stop. */
7634 for (thread_info
*thr
: all_threads (proc_target
))
7636 if (thr
->state
!= THREAD_RUNNING
)
7639 /* If we have any thread that is already executing, then we
7640 don't need to resume the target -- it is already been
7642 if (thr
->executing ())
7645 /* If we have a pending event to process, skip resuming the
7646 target and go straight to processing it. */
7647 if (thr
->resumed () && thr
->has_pending_waitstatus ())
7651 /* Alright, we need to re-resume the target. If a thread was
7652 stepping, we need to restart it stepping. */
7653 if (restart_stepped_thread (proc_target
, minus_one_ptid
))
7656 /* Otherwise, find the first THREAD_RUNNING thread and resume
7658 for (thread_info
*thr
: all_threads (proc_target
))
7660 if (thr
->state
!= THREAD_RUNNING
)
7663 execution_control_state ecs
;
7664 reset_ecs (&ecs
, thr
);
7665 switch_to_thread (thr
);
7671 /* Set a previously stepped thread back to stepping. Returns true on
7672 success, false if the resume is not possible (e.g., the thread
7676 keep_going_stepped_thread (struct thread_info
*tp
)
7678 frame_info_ptr frame
;
7679 struct execution_control_state ecss
;
7680 struct execution_control_state
*ecs
= &ecss
;
7682 /* If the stepping thread exited, then don't try to switch back and
7683 resume it, which could fail in several different ways depending
7684 on the target. Instead, just keep going.
7686 We can find a stepping dead thread in the thread list in two
7689 - The target supports thread exit events, and when the target
7690 tries to delete the thread from the thread list, inferior_ptid
7691 pointed at the exiting thread. In such case, calling
7692 delete_thread does not really remove the thread from the list;
7693 instead, the thread is left listed, with 'exited' state.
7695 - The target's debug interface does not support thread exit
7696 events, and so we have no idea whatsoever if the previously
7697 stepping thread is still alive. For that reason, we need to
7698 synchronously query the target now. */
7700 if (tp
->state
== THREAD_EXITED
|| !target_thread_alive (tp
->ptid
))
7702 infrun_debug_printf ("not resuming previously stepped thread, it has "
7709 infrun_debug_printf ("resuming previously stepped thread");
7711 reset_ecs (ecs
, tp
);
7712 switch_to_thread (tp
);
7714 tp
->set_stop_pc (regcache_read_pc (get_thread_regcache (tp
)));
7715 frame
= get_current_frame ();
7717 /* If the PC of the thread we were trying to single-step has
7718 changed, then that thread has trapped or been signaled, but the
7719 event has not been reported to GDB yet. Re-poll the target
7720 looking for this particular thread's event (i.e. temporarily
7721 enable schedlock) by:
7723 - setting a break at the current PC
7724 - resuming that particular thread, only (by setting trap
7727 This prevents us continuously moving the single-step breakpoint
7728 forward, one instruction at a time, overstepping. */
7730 if (tp
->stop_pc () != tp
->prev_pc
)
7734 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7735 paddress (target_gdbarch (), tp
->prev_pc
),
7736 paddress (target_gdbarch (), tp
->stop_pc ()));
7738 /* Clear the info of the previous step-over, as it's no longer
7739 valid (if the thread was trying to step over a breakpoint, it
7740 has already succeeded). It's what keep_going would do too,
7741 if we called it. Do this before trying to insert the sss
7742 breakpoint, otherwise if we were previously trying to step
7743 over this exact address in another thread, the breakpoint is
7745 clear_step_over_info ();
7746 tp
->control
.trap_expected
= 0;
7748 insert_single_step_breakpoint (get_frame_arch (frame
),
7749 get_frame_address_space (frame
),
7752 tp
->set_resumed (true);
7753 resume_ptid
= internal_resume_ptid (tp
->control
.stepping_command
);
7754 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
7758 infrun_debug_printf ("expected thread still hasn't advanced");
7760 keep_going_pass_signal (ecs
);
7766 /* Is thread TP in the middle of (software or hardware)
7767 single-stepping? (Note the result of this function must never be
7768 passed directly as target_resume's STEP parameter.) */
7771 currently_stepping (struct thread_info
*tp
)
7773 return ((tp
->control
.step_range_end
7774 && tp
->control
.step_resume_breakpoint
== nullptr)
7775 || tp
->control
.trap_expected
7776 || tp
->stepped_breakpoint
7777 || bpstat_should_step ());
7780 /* Inferior has stepped into a subroutine call with source code that
7781 we should not step over. Do step to the first line of code in
7785 handle_step_into_function (struct gdbarch
*gdbarch
,
7786 struct execution_control_state
*ecs
)
7788 fill_in_stop_func (gdbarch
, ecs
);
7790 compunit_symtab
*cust
7791 = find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
7792 if (cust
!= nullptr && cust
->language () != language_asm
)
7793 ecs
->stop_func_start
7794 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
7796 symtab_and_line stop_func_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7797 /* Use the step_resume_break to step until the end of the prologue,
7798 even if that involves jumps (as it seems to on the vax under
7800 /* If the prologue ends in the middle of a source line, continue to
7801 the end of that source line (if it is still within the function).
7802 Otherwise, just go to end of prologue. */
7803 if (stop_func_sal
.end
7804 && stop_func_sal
.pc
!= ecs
->stop_func_start
7805 && stop_func_sal
.end
< ecs
->stop_func_end
)
7806 ecs
->stop_func_start
= stop_func_sal
.end
;
7808 /* Architectures which require breakpoint adjustment might not be able
7809 to place a breakpoint at the computed address. If so, the test
7810 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7811 ecs->stop_func_start to an address at which a breakpoint may be
7812 legitimately placed.
7814 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7815 made, GDB will enter an infinite loop when stepping through
7816 optimized code consisting of VLIW instructions which contain
7817 subinstructions corresponding to different source lines. On
7818 FR-V, it's not permitted to place a breakpoint on any but the
7819 first subinstruction of a VLIW instruction. When a breakpoint is
7820 set, GDB will adjust the breakpoint address to the beginning of
7821 the VLIW instruction. Thus, we need to make the corresponding
7822 adjustment here when computing the stop address. */
7824 if (gdbarch_adjust_breakpoint_address_p (gdbarch
))
7826 ecs
->stop_func_start
7827 = gdbarch_adjust_breakpoint_address (gdbarch
,
7828 ecs
->stop_func_start
);
7831 if (ecs
->stop_func_start
== ecs
->event_thread
->stop_pc ())
7833 /* We are already there: stop now. */
7834 end_stepping_range (ecs
);
7839 /* Put the step-breakpoint there and go until there. */
7840 symtab_and_line sr_sal
;
7841 sr_sal
.pc
= ecs
->stop_func_start
;
7842 sr_sal
.section
= find_pc_overlay (ecs
->stop_func_start
);
7843 sr_sal
.pspace
= get_frame_program_space (get_current_frame ());
7845 /* Do not specify what the fp should be when we stop since on
7846 some machines the prologue is where the new fp value is
7848 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
, null_frame_id
);
7850 /* And make sure stepping stops right away then. */
7851 ecs
->event_thread
->control
.step_range_end
7852 = ecs
->event_thread
->control
.step_range_start
;
7857 /* Inferior has stepped backward into a subroutine call with source
7858 code that we should not step over. Do step to the beginning of the
7859 last line of code in it. */
7862 handle_step_into_function_backward (struct gdbarch
*gdbarch
,
7863 struct execution_control_state
*ecs
)
7865 struct compunit_symtab
*cust
;
7866 struct symtab_and_line stop_func_sal
;
7868 fill_in_stop_func (gdbarch
, ecs
);
7870 cust
= find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
7871 if (cust
!= nullptr && cust
->language () != language_asm
)
7872 ecs
->stop_func_start
7873 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
7875 stop_func_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
7877 /* OK, we're just going to keep stepping here. */
7878 if (stop_func_sal
.pc
== ecs
->event_thread
->stop_pc ())
7880 /* We're there already. Just stop stepping now. */
7881 end_stepping_range (ecs
);
7885 /* Else just reset the step range and keep going.
7886 No step-resume breakpoint, they don't work for
7887 epilogues, which can have multiple entry paths. */
7888 ecs
->event_thread
->control
.step_range_start
= stop_func_sal
.pc
;
7889 ecs
->event_thread
->control
.step_range_end
= stop_func_sal
.end
;
7895 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7896 This is used to both functions and to skip over code. */
7899 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch
*gdbarch
,
7900 struct symtab_and_line sr_sal
,
7901 struct frame_id sr_id
,
7902 enum bptype sr_type
)
7904 /* There should never be more than one step-resume or longjmp-resume
7905 breakpoint per thread, so we should never be setting a new
7906 step_resume_breakpoint when one is already active. */
7907 gdb_assert (inferior_thread ()->control
.step_resume_breakpoint
== nullptr);
7908 gdb_assert (sr_type
== bp_step_resume
|| sr_type
== bp_hp_step_resume
);
7910 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7911 paddress (gdbarch
, sr_sal
.pc
));
7913 inferior_thread ()->control
.step_resume_breakpoint
7914 = set_momentary_breakpoint (gdbarch
, sr_sal
, sr_id
, sr_type
).release ();
7918 insert_step_resume_breakpoint_at_sal (struct gdbarch
*gdbarch
,
7919 struct symtab_and_line sr_sal
,
7920 struct frame_id sr_id
)
7922 insert_step_resume_breakpoint_at_sal_1 (gdbarch
,
7927 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7928 This is used to skip a potential signal handler.
7930 This is called with the interrupted function's frame. The signal
7931 handler, when it returns, will resume the interrupted function at
7935 insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr return_frame
)
7937 gdb_assert (return_frame
!= nullptr);
7939 struct gdbarch
*gdbarch
= get_frame_arch (return_frame
);
7941 symtab_and_line sr_sal
;
7942 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
, get_frame_pc (return_frame
));
7943 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7944 sr_sal
.pspace
= get_frame_program_space (return_frame
);
7946 insert_step_resume_breakpoint_at_sal_1 (gdbarch
, sr_sal
,
7947 get_stack_frame_id (return_frame
),
7951 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7952 is used to skip a function after stepping into it (for "next" or if
7953 the called function has no debugging information).
7955 The current function has almost always been reached by single
7956 stepping a call or return instruction. NEXT_FRAME belongs to the
7957 current function, and the breakpoint will be set at the caller's
7960 This is a separate function rather than reusing
7961 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7962 get_prev_frame, which may stop prematurely (see the implementation
7963 of frame_unwind_caller_id for an example). */
7966 insert_step_resume_breakpoint_at_caller (frame_info_ptr next_frame
)
7968 /* We shouldn't have gotten here if we don't know where the call site
7970 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame
)));
7972 struct gdbarch
*gdbarch
= frame_unwind_caller_arch (next_frame
);
7974 symtab_and_line sr_sal
;
7975 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
,
7976 frame_unwind_caller_pc (next_frame
));
7977 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7978 sr_sal
.pspace
= frame_unwind_program_space (next_frame
);
7980 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
,
7981 frame_unwind_caller_id (next_frame
));
7984 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7985 new breakpoint at the target of a jmp_buf. The handling of
7986 longjmp-resume uses the same mechanisms used for handling
7987 "step-resume" breakpoints. */
7990 insert_longjmp_resume_breakpoint (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
7992 /* There should never be more than one longjmp-resume breakpoint per
7993 thread, so we should never be setting a new
7994 longjmp_resume_breakpoint when one is already active. */
7995 gdb_assert (inferior_thread ()->control
.exception_resume_breakpoint
== nullptr);
7997 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7998 paddress (gdbarch
, pc
));
8000 inferior_thread ()->control
.exception_resume_breakpoint
=
8001 set_momentary_breakpoint_at_pc (gdbarch
, pc
, bp_longjmp_resume
).release ();
8004 /* Insert an exception resume breakpoint. TP is the thread throwing
8005 the exception. The block B is the block of the unwinder debug hook
8006 function. FRAME is the frame corresponding to the call to this
8007 function. SYM is the symbol of the function argument holding the
8008 target PC of the exception. */
8011 insert_exception_resume_breakpoint (struct thread_info
*tp
,
8012 const struct block
*b
,
8013 frame_info_ptr frame
,
8018 struct block_symbol vsym
;
8019 struct value
*value
;
8021 struct breakpoint
*bp
;
8023 vsym
= lookup_symbol_search_name (sym
->search_name (),
8025 value
= read_var_value (vsym
.symbol
, vsym
.block
, frame
);
8026 /* If the value was optimized out, revert to the old behavior. */
8027 if (! value_optimized_out (value
))
8029 handler
= value_as_address (value
);
8031 infrun_debug_printf ("exception resume at %lx",
8032 (unsigned long) handler
);
8034 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8036 bp_exception_resume
).release ();
8038 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8041 bp
->thread
= tp
->global_num
;
8042 inferior_thread ()->control
.exception_resume_breakpoint
= bp
;
8045 catch (const gdb_exception_error
&e
)
8047 /* We want to ignore errors here. */
8051 /* A helper for check_exception_resume that sets an
8052 exception-breakpoint based on a SystemTap probe. */
8055 insert_exception_resume_from_probe (struct thread_info
*tp
,
8056 const struct bound_probe
*probe
,
8057 frame_info_ptr frame
)
8059 struct value
*arg_value
;
8061 struct breakpoint
*bp
;
8063 arg_value
= probe_safe_evaluate_at_pc (frame
, 1);
8067 handler
= value_as_address (arg_value
);
8069 infrun_debug_printf ("exception resume at %s",
8070 paddress (probe
->objfile
->arch (), handler
));
8072 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8073 handler
, bp_exception_resume
).release ();
8074 bp
->thread
= tp
->global_num
;
8075 inferior_thread ()->control
.exception_resume_breakpoint
= bp
;
8078 /* This is called when an exception has been intercepted. Check to
8079 see whether the exception's destination is of interest, and if so,
8080 set an exception resume breakpoint there. */
8083 check_exception_resume (struct execution_control_state
*ecs
,
8084 frame_info_ptr frame
)
8086 struct bound_probe probe
;
8087 struct symbol
*func
;
8089 /* First see if this exception unwinding breakpoint was set via a
8090 SystemTap probe point. If so, the probe has two arguments: the
8091 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8092 set a breakpoint there. */
8093 probe
= find_probe_by_pc (get_frame_pc (frame
));
8096 insert_exception_resume_from_probe (ecs
->event_thread
, &probe
, frame
);
8100 func
= get_frame_function (frame
);
8106 const struct block
*b
;
8107 struct block_iterator iter
;
8111 /* The exception breakpoint is a thread-specific breakpoint on
8112 the unwinder's debug hook, declared as:
8114 void _Unwind_DebugHook (void *cfa, void *handler);
8116 The CFA argument indicates the frame to which control is
8117 about to be transferred. HANDLER is the destination PC.
8119 We ignore the CFA and set a temporary breakpoint at HANDLER.
8120 This is not extremely efficient but it avoids issues in gdb
8121 with computing the DWARF CFA, and it also works even in weird
8122 cases such as throwing an exception from inside a signal
8125 b
= func
->value_block ();
8126 ALL_BLOCK_SYMBOLS (b
, iter
, sym
)
8128 if (!sym
->is_argument ())
8135 insert_exception_resume_breakpoint (ecs
->event_thread
,
8141 catch (const gdb_exception_error
&e
)
8147 stop_waiting (struct execution_control_state
*ecs
)
8149 infrun_debug_printf ("stop_waiting");
8151 /* Let callers know we don't want to wait for the inferior anymore. */
8152 ecs
->wait_some_more
= 0;
8155 /* Like keep_going, but passes the signal to the inferior, even if the
8156 signal is set to nopass. */
8159 keep_going_pass_signal (struct execution_control_state
*ecs
)
8161 gdb_assert (ecs
->event_thread
->ptid
== inferior_ptid
);
8162 gdb_assert (!ecs
->event_thread
->resumed ());
8164 /* Save the pc before execution, to compare with pc after stop. */
8165 ecs
->event_thread
->prev_pc
8166 = regcache_read_pc_protected (get_thread_regcache (ecs
->event_thread
));
8168 if (ecs
->event_thread
->control
.trap_expected
)
8170 struct thread_info
*tp
= ecs
->event_thread
;
8172 infrun_debug_printf ("%s has trap_expected set, "
8173 "resuming to collect trap",
8174 tp
->ptid
.to_string ().c_str ());
8176 /* We haven't yet gotten our trap, and either: intercepted a
8177 non-signal event (e.g., a fork); or took a signal which we
8178 are supposed to pass through to the inferior. Simply
8180 resume (ecs
->event_thread
->stop_signal ());
8182 else if (step_over_info_valid_p ())
8184 /* Another thread is stepping over a breakpoint in-line. If
8185 this thread needs a step-over too, queue the request. In
8186 either case, this resume must be deferred for later. */
8187 struct thread_info
*tp
= ecs
->event_thread
;
8189 if (ecs
->hit_singlestep_breakpoint
8190 || thread_still_needs_step_over (tp
))
8192 infrun_debug_printf ("step-over already in progress: "
8193 "step-over for %s deferred",
8194 tp
->ptid
.to_string ().c_str ());
8195 global_thread_step_over_chain_enqueue (tp
);
8198 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8199 tp
->ptid
.to_string ().c_str ());
8203 struct regcache
*regcache
= get_current_regcache ();
8206 step_over_what step_what
;
8208 /* Either the trap was not expected, but we are continuing
8209 anyway (if we got a signal, the user asked it be passed to
8212 We got our expected trap, but decided we should resume from
8215 We're going to run this baby now!
8217 Note that insert_breakpoints won't try to re-insert
8218 already inserted breakpoints. Therefore, we don't
8219 care if breakpoints were already inserted, or not. */
8221 /* If we need to step over a breakpoint, and we're not using
8222 displaced stepping to do so, insert all breakpoints
8223 (watchpoints, etc.) but the one we're stepping over, step one
8224 instruction, and then re-insert the breakpoint when that step
8227 step_what
= thread_still_needs_step_over (ecs
->event_thread
);
8229 remove_bp
= (ecs
->hit_singlestep_breakpoint
8230 || (step_what
& STEP_OVER_BREAKPOINT
));
8231 remove_wps
= (step_what
& STEP_OVER_WATCHPOINT
);
8233 /* We can't use displaced stepping if we need to step past a
8234 watchpoint. The instruction copied to the scratch pad would
8235 still trigger the watchpoint. */
8237 && (remove_wps
|| !use_displaced_stepping (ecs
->event_thread
)))
8239 set_step_over_info (regcache
->aspace (),
8240 regcache_read_pc (regcache
), remove_wps
,
8241 ecs
->event_thread
->global_num
);
8243 else if (remove_wps
)
8244 set_step_over_info (nullptr, 0, remove_wps
, -1);
8246 /* If we now need to do an in-line step-over, we need to stop
8247 all other threads. Note this must be done before
8248 insert_breakpoints below, because that removes the breakpoint
8249 we're about to step over, otherwise other threads could miss
8251 if (step_over_info_valid_p () && target_is_non_stop_p ())
8252 stop_all_threads ("starting in-line step-over");
8254 /* Stop stepping if inserting breakpoints fails. */
8257 insert_breakpoints ();
8259 catch (const gdb_exception_error
&e
)
8261 exception_print (gdb_stderr
, e
);
8263 clear_step_over_info ();
8267 ecs
->event_thread
->control
.trap_expected
= (remove_bp
|| remove_wps
);
8269 resume (ecs
->event_thread
->stop_signal ());
8272 prepare_to_wait (ecs
);
8275 /* Called when we should continue running the inferior, because the
8276 current event doesn't cause a user visible stop. This does the
8277 resuming part; waiting for the next event is done elsewhere. */
8280 keep_going (struct execution_control_state
*ecs
)
8282 if (ecs
->event_thread
->control
.trap_expected
8283 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
8284 ecs
->event_thread
->control
.trap_expected
= 0;
8286 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
8287 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
8288 keep_going_pass_signal (ecs
);
8291 /* This function normally comes after a resume, before
8292 handle_inferior_event exits. It takes care of any last bits of
8293 housekeeping, and sets the all-important wait_some_more flag. */
8296 prepare_to_wait (struct execution_control_state
*ecs
)
8298 infrun_debug_printf ("prepare_to_wait");
8300 ecs
->wait_some_more
= 1;
8302 /* If the target can't async, emulate it by marking the infrun event
8303 handler such that as soon as we get back to the event-loop, we
8304 immediately end up in fetch_inferior_event again calling
8306 if (!target_can_async_p ())
8307 mark_infrun_async_event_handler ();
8310 /* We are done with the step range of a step/next/si/ni command.
8311 Called once for each n of a "step n" operation. */
8314 end_stepping_range (struct execution_control_state
*ecs
)
8316 ecs
->event_thread
->control
.stop_step
= 1;
8320 /* Several print_*_reason functions to print why the inferior has stopped.
8321 We always print something when the inferior exits, or receives a signal.
8322 The rest of the cases are dealt with later on in normal_stop and
8323 print_it_typical. Ideally there should be a call to one of these
8324 print_*_reason functions functions from handle_inferior_event each time
8325 stop_waiting is called.
8327 Note that we don't call these directly, instead we delegate that to
8328 the interpreters, through observers. Interpreters then call these
8329 with whatever uiout is right. */
8332 print_end_stepping_range_reason (struct ui_out
*uiout
)
8334 /* For CLI-like interpreters, print nothing. */
8336 if (uiout
->is_mi_like_p ())
8338 uiout
->field_string ("reason",
8339 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE
));
8344 print_signal_exited_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
8346 annotate_signalled ();
8347 if (uiout
->is_mi_like_p ())
8349 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED
));
8350 uiout
->text ("\nProgram terminated with signal ");
8351 annotate_signal_name ();
8352 uiout
->field_string ("signal-name",
8353 gdb_signal_to_name (siggnal
));
8354 annotate_signal_name_end ();
8356 annotate_signal_string ();
8357 uiout
->field_string ("signal-meaning",
8358 gdb_signal_to_string (siggnal
));
8359 annotate_signal_string_end ();
8360 uiout
->text (".\n");
8361 uiout
->text ("The program no longer exists.\n");
8365 print_exited_reason (struct ui_out
*uiout
, int exitstatus
)
8367 struct inferior
*inf
= current_inferior ();
8368 std::string pidstr
= target_pid_to_str (ptid_t (inf
->pid
));
8370 annotate_exited (exitstatus
);
8373 if (uiout
->is_mi_like_p ())
8374 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED
));
8375 std::string exit_code_str
8376 = string_printf ("0%o", (unsigned int) exitstatus
);
8377 uiout
->message ("[Inferior %s (%s) exited with code %pF]\n",
8378 plongest (inf
->num
), pidstr
.c_str (),
8379 string_field ("exit-code", exit_code_str
.c_str ()));
8383 if (uiout
->is_mi_like_p ())
8385 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY
));
8386 uiout
->message ("[Inferior %s (%s) exited normally]\n",
8387 plongest (inf
->num
), pidstr
.c_str ());
8392 print_signal_received_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
8394 struct thread_info
*thr
= inferior_thread ();
8398 if (uiout
->is_mi_like_p ())
8400 else if (show_thread_that_caused_stop ())
8402 uiout
->text ("\nThread ");
8403 uiout
->field_string ("thread-id", print_thread_id (thr
));
8405 const char *name
= thread_name (thr
);
8406 if (name
!= nullptr)
8408 uiout
->text (" \"");
8409 uiout
->field_string ("name", name
);
8414 uiout
->text ("\nProgram");
8416 if (siggnal
== GDB_SIGNAL_0
&& !uiout
->is_mi_like_p ())
8417 uiout
->text (" stopped");
8420 uiout
->text (" received signal ");
8421 annotate_signal_name ();
8422 if (uiout
->is_mi_like_p ())
8424 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED
));
8425 uiout
->field_string ("signal-name", gdb_signal_to_name (siggnal
));
8426 annotate_signal_name_end ();
8428 annotate_signal_string ();
8429 uiout
->field_string ("signal-meaning", gdb_signal_to_string (siggnal
));
8431 struct regcache
*regcache
= get_current_regcache ();
8432 struct gdbarch
*gdbarch
= regcache
->arch ();
8433 if (gdbarch_report_signal_info_p (gdbarch
))
8434 gdbarch_report_signal_info (gdbarch
, uiout
, siggnal
);
8436 annotate_signal_string_end ();
8438 uiout
->text (".\n");
8442 print_no_history_reason (struct ui_out
*uiout
)
8444 uiout
->text ("\nNo more reverse-execution history.\n");
8447 /* Print current location without a level number, if we have changed
8448 functions or hit a breakpoint. Print source line if we have one.
8449 bpstat_print contains the logic deciding in detail what to print,
8450 based on the event(s) that just occurred. */
8453 print_stop_location (const target_waitstatus
&ws
)
8456 enum print_what source_flag
;
8457 int do_frame_printing
= 1;
8458 struct thread_info
*tp
= inferior_thread ();
8460 bpstat_ret
= bpstat_print (tp
->control
.stop_bpstat
, ws
.kind ());
8464 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8465 should) carry around the function and does (or should) use
8466 that when doing a frame comparison. */
8467 if (tp
->control
.stop_step
8468 && (tp
->control
.step_frame_id
8469 == get_frame_id (get_current_frame ()))
8470 && (tp
->control
.step_start_function
8471 == find_pc_function (tp
->stop_pc ())))
8473 /* Finished step, just print source line. */
8474 source_flag
= SRC_LINE
;
8478 /* Print location and source line. */
8479 source_flag
= SRC_AND_LOC
;
8482 case PRINT_SRC_AND_LOC
:
8483 /* Print location and source line. */
8484 source_flag
= SRC_AND_LOC
;
8486 case PRINT_SRC_ONLY
:
8487 source_flag
= SRC_LINE
;
8490 /* Something bogus. */
8491 source_flag
= SRC_LINE
;
8492 do_frame_printing
= 0;
8495 internal_error (_("Unknown value."));
8498 /* The behavior of this routine with respect to the source
8500 SRC_LINE: Print only source line
8501 LOCATION: Print only location
8502 SRC_AND_LOC: Print location and source line. */
8503 if (do_frame_printing
)
8504 print_stack_frame (get_selected_frame (nullptr), 0, source_flag
, 1);
8510 print_stop_event (struct ui_out
*uiout
, bool displays
)
8512 struct target_waitstatus last
;
8513 struct thread_info
*tp
;
8515 get_last_target_status (nullptr, nullptr, &last
);
8518 scoped_restore save_uiout
= make_scoped_restore (¤t_uiout
, uiout
);
8520 print_stop_location (last
);
8522 /* Display the auto-display expressions. */
8527 tp
= inferior_thread ();
8528 if (tp
->thread_fsm () != nullptr
8529 && tp
->thread_fsm ()->finished_p ())
8531 struct return_value_info
*rv
;
8533 rv
= tp
->thread_fsm ()->return_value ();
8535 print_return_value (uiout
, rv
);
8542 maybe_remove_breakpoints (void)
8544 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
8546 if (remove_breakpoints ())
8548 target_terminal::ours_for_output ();
8549 gdb_printf (_("Cannot remove breakpoints because "
8550 "program is no longer writable.\nFurther "
8551 "execution is probably impossible.\n"));
8556 /* The execution context that just caused a normal stop. */
8562 DISABLE_COPY_AND_ASSIGN (stop_context
);
8564 bool changed () const;
8569 /* The event PTID. */
8573 /* If stopp for a thread event, this is the thread that caused the
8575 thread_info_ref thread
;
8577 /* The inferior that caused the stop. */
8581 /* Initializes a new stop context. If stopped for a thread event, this
8582 takes a strong reference to the thread. */
8584 stop_context::stop_context ()
8586 stop_id
= get_stop_id ();
8587 ptid
= inferior_ptid
;
8588 inf_num
= current_inferior ()->num
;
8590 if (inferior_ptid
!= null_ptid
)
8592 /* Take a strong reference so that the thread can't be deleted
8594 thread
= thread_info_ref::new_reference (inferior_thread ());
8598 /* Return true if the current context no longer matches the saved stop
8602 stop_context::changed () const
8604 if (ptid
!= inferior_ptid
)
8606 if (inf_num
!= current_inferior ()->num
)
8608 if (thread
!= nullptr && thread
->state
!= THREAD_STOPPED
)
8610 if (get_stop_id () != stop_id
)
8620 struct target_waitstatus last
;
8622 get_last_target_status (nullptr, nullptr, &last
);
8626 /* If an exception is thrown from this point on, make sure to
8627 propagate GDB's knowledge of the executing state to the
8628 frontend/user running state. A QUIT is an easy exception to see
8629 here, so do this before any filtered output. */
8631 ptid_t finish_ptid
= null_ptid
;
8634 finish_ptid
= minus_one_ptid
;
8635 else if (last
.kind () == TARGET_WAITKIND_SIGNALLED
8636 || last
.kind () == TARGET_WAITKIND_EXITED
)
8638 /* On some targets, we may still have live threads in the
8639 inferior when we get a process exit event. E.g., for
8640 "checkpoint", when the current checkpoint/fork exits,
8641 linux-fork.c automatically switches to another fork from
8642 within target_mourn_inferior. */
8643 if (inferior_ptid
!= null_ptid
)
8644 finish_ptid
= ptid_t (inferior_ptid
.pid ());
8646 else if (last
.kind () != TARGET_WAITKIND_NO_RESUMED
)
8647 finish_ptid
= inferior_ptid
;
8649 gdb::optional
<scoped_finish_thread_state
> maybe_finish_thread_state
;
8650 if (finish_ptid
!= null_ptid
)
8652 maybe_finish_thread_state
.emplace
8653 (user_visible_resume_target (finish_ptid
), finish_ptid
);
8656 /* As we're presenting a stop, and potentially removing breakpoints,
8657 update the thread list so we can tell whether there are threads
8658 running on the target. With target remote, for example, we can
8659 only learn about new threads when we explicitly update the thread
8660 list. Do this before notifying the interpreters about signal
8661 stops, end of stepping ranges, etc., so that the "new thread"
8662 output is emitted before e.g., "Program received signal FOO",
8663 instead of after. */
8664 update_thread_list ();
8666 if (last
.kind () == TARGET_WAITKIND_STOPPED
&& stopped_by_random_signal
)
8667 gdb::observers::signal_received
.notify (inferior_thread ()->stop_signal ());
8669 /* As with the notification of thread events, we want to delay
8670 notifying the user that we've switched thread context until
8671 the inferior actually stops.
8673 There's no point in saying anything if the inferior has exited.
8674 Note that SIGNALLED here means "exited with a signal", not
8675 "received a signal".
8677 Also skip saying anything in non-stop mode. In that mode, as we
8678 don't want GDB to switch threads behind the user's back, to avoid
8679 races where the user is typing a command to apply to thread x,
8680 but GDB switches to thread y before the user finishes entering
8681 the command, fetch_inferior_event installs a cleanup to restore
8682 the current thread back to the thread the user had selected right
8683 after this event is handled, so we're not really switching, only
8684 informing of a stop. */
8686 && previous_inferior_ptid
!= inferior_ptid
8687 && target_has_execution ()
8688 && last
.kind () != TARGET_WAITKIND_SIGNALLED
8689 && last
.kind () != TARGET_WAITKIND_EXITED
8690 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
)
8692 SWITCH_THRU_ALL_UIS ()
8694 target_terminal::ours_for_output ();
8695 gdb_printf (_("[Switching to %s]\n"),
8696 target_pid_to_str (inferior_ptid
).c_str ());
8697 annotate_thread_changed ();
8699 previous_inferior_ptid
= inferior_ptid
;
8702 if (last
.kind () == TARGET_WAITKIND_NO_RESUMED
)
8704 SWITCH_THRU_ALL_UIS ()
8705 if (current_ui
->prompt_state
== PROMPT_BLOCKED
)
8707 target_terminal::ours_for_output ();
8708 gdb_printf (_("No unwaited-for children left.\n"));
8712 /* Note: this depends on the update_thread_list call above. */
8713 maybe_remove_breakpoints ();
8715 /* If an auto-display called a function and that got a signal,
8716 delete that auto-display to avoid an infinite recursion. */
8718 if (stopped_by_random_signal
)
8719 disable_current_display ();
8721 SWITCH_THRU_ALL_UIS ()
8723 async_enable_stdin ();
8726 /* Let the user/frontend see the threads as stopped. */
8727 maybe_finish_thread_state
.reset ();
8729 /* Select innermost stack frame - i.e., current frame is frame 0,
8730 and current location is based on that. Handle the case where the
8731 dummy call is returning after being stopped. E.g. the dummy call
8732 previously hit a breakpoint. (If the dummy call returns
8733 normally, we won't reach here.) Do this before the stop hook is
8734 run, so that it doesn't get to see the temporary dummy frame,
8735 which is not where we'll present the stop. */
8736 if (has_stack_frames ())
8738 if (stop_stack_dummy
== STOP_STACK_DUMMY
)
8740 /* Pop the empty frame that contains the stack dummy. This
8741 also restores inferior state prior to the call (struct
8742 infcall_suspend_state). */
8743 frame_info_ptr frame
= get_current_frame ();
8745 gdb_assert (get_frame_type (frame
) == DUMMY_FRAME
);
8747 /* frame_pop calls reinit_frame_cache as the last thing it
8748 does which means there's now no selected frame. */
8751 select_frame (get_current_frame ());
8753 /* Set the current source location. */
8754 set_current_sal_from_frame (get_current_frame ());
8757 /* Look up the hook_stop and run it (CLI internally handles problem
8758 of stop_command's pre-hook not existing). */
8759 stop_context saved_context
;
8763 execute_cmd_pre_hook (stop_command
);
8765 catch (const gdb_exception
&ex
)
8767 exception_fprintf (gdb_stderr
, ex
,
8768 "Error while running hook_stop:\n");
8771 /* If the stop hook resumes the target, then there's no point in
8772 trying to notify about the previous stop; its context is
8773 gone. Likewise if the command switches thread or inferior --
8774 the observers would print a stop for the wrong
8776 if (saved_context
.changed ())
8779 /* Notify observers about the stop. This is where the interpreters
8780 print the stop event. */
8781 if (inferior_ptid
!= null_ptid
)
8782 gdb::observers::normal_stop
.notify (inferior_thread ()->control
.stop_bpstat
,
8785 gdb::observers::normal_stop
.notify (nullptr, stop_print_frame
);
8787 annotate_stopped ();
8789 if (target_has_execution ())
8791 if (last
.kind () != TARGET_WAITKIND_SIGNALLED
8792 && last
.kind () != TARGET_WAITKIND_EXITED
8793 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
)
8794 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8795 Delete any breakpoint that is to be deleted at the next stop. */
8796 breakpoint_auto_delete (inferior_thread ()->control
.stop_bpstat
);
8803 signal_stop_state (int signo
)
8805 return signal_stop
[signo
];
8809 signal_print_state (int signo
)
8811 return signal_print
[signo
];
8815 signal_pass_state (int signo
)
8817 return signal_program
[signo
];
8821 signal_cache_update (int signo
)
8825 for (signo
= 0; signo
< (int) GDB_SIGNAL_LAST
; signo
++)
8826 signal_cache_update (signo
);
8831 signal_pass
[signo
] = (signal_stop
[signo
] == 0
8832 && signal_print
[signo
] == 0
8833 && signal_program
[signo
] == 1
8834 && signal_catch
[signo
] == 0);
8838 signal_stop_update (int signo
, int state
)
8840 int ret
= signal_stop
[signo
];
8842 signal_stop
[signo
] = state
;
8843 signal_cache_update (signo
);
8848 signal_print_update (int signo
, int state
)
8850 int ret
= signal_print
[signo
];
8852 signal_print
[signo
] = state
;
8853 signal_cache_update (signo
);
8858 signal_pass_update (int signo
, int state
)
8860 int ret
= signal_program
[signo
];
8862 signal_program
[signo
] = state
;
8863 signal_cache_update (signo
);
8867 /* Update the global 'signal_catch' from INFO and notify the
8871 signal_catch_update (const unsigned int *info
)
8875 for (i
= 0; i
< GDB_SIGNAL_LAST
; ++i
)
8876 signal_catch
[i
] = info
[i
] > 0;
8877 signal_cache_update (-1);
8878 target_pass_signals (signal_pass
);
8882 sig_print_header (void)
8884 gdb_printf (_("Signal Stop\tPrint\tPass "
8885 "to program\tDescription\n"));
8889 sig_print_info (enum gdb_signal oursig
)
8891 const char *name
= gdb_signal_to_name (oursig
);
8892 int name_padding
= 13 - strlen (name
);
8894 if (name_padding
<= 0)
8897 gdb_printf ("%s", name
);
8898 gdb_printf ("%*.*s ", name_padding
, name_padding
, " ");
8899 gdb_printf ("%s\t", signal_stop
[oursig
] ? "Yes" : "No");
8900 gdb_printf ("%s\t", signal_print
[oursig
] ? "Yes" : "No");
8901 gdb_printf ("%s\t\t", signal_program
[oursig
] ? "Yes" : "No");
8902 gdb_printf ("%s\n", gdb_signal_to_string (oursig
));
8905 /* Specify how various signals in the inferior should be handled. */
8908 handle_command (const char *args
, int from_tty
)
8910 int digits
, wordlen
;
8911 int sigfirst
, siglast
;
8912 enum gdb_signal oursig
;
8915 if (args
== nullptr)
8917 error_no_arg (_("signal to handle"));
8920 /* Allocate and zero an array of flags for which signals to handle. */
8922 const size_t nsigs
= GDB_SIGNAL_LAST
;
8923 unsigned char sigs
[nsigs
] {};
8925 /* Break the command line up into args. */
8927 gdb_argv
built_argv (args
);
8929 /* Walk through the args, looking for signal oursigs, signal names, and
8930 actions. Signal numbers and signal names may be interspersed with
8931 actions, with the actions being performed for all signals cumulatively
8932 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8934 for (char *arg
: built_argv
)
8936 wordlen
= strlen (arg
);
8937 for (digits
= 0; isdigit (arg
[digits
]); digits
++)
8941 sigfirst
= siglast
= -1;
8943 if (wordlen
>= 1 && !strncmp (arg
, "all", wordlen
))
8945 /* Apply action to all signals except those used by the
8946 debugger. Silently skip those. */
8949 siglast
= nsigs
- 1;
8951 else if (wordlen
>= 1 && !strncmp (arg
, "stop", wordlen
))
8953 SET_SIGS (nsigs
, sigs
, signal_stop
);
8954 SET_SIGS (nsigs
, sigs
, signal_print
);
8956 else if (wordlen
>= 1 && !strncmp (arg
, "ignore", wordlen
))
8958 UNSET_SIGS (nsigs
, sigs
, signal_program
);
8960 else if (wordlen
>= 2 && !strncmp (arg
, "print", wordlen
))
8962 SET_SIGS (nsigs
, sigs
, signal_print
);
8964 else if (wordlen
>= 2 && !strncmp (arg
, "pass", wordlen
))
8966 SET_SIGS (nsigs
, sigs
, signal_program
);
8968 else if (wordlen
>= 3 && !strncmp (arg
, "nostop", wordlen
))
8970 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
8972 else if (wordlen
>= 3 && !strncmp (arg
, "noignore", wordlen
))
8974 SET_SIGS (nsigs
, sigs
, signal_program
);
8976 else if (wordlen
>= 4 && !strncmp (arg
, "noprint", wordlen
))
8978 UNSET_SIGS (nsigs
, sigs
, signal_print
);
8979 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
8981 else if (wordlen
>= 4 && !strncmp (arg
, "nopass", wordlen
))
8983 UNSET_SIGS (nsigs
, sigs
, signal_program
);
8985 else if (digits
> 0)
8987 /* It is numeric. The numeric signal refers to our own
8988 internal signal numbering from target.h, not to host/target
8989 signal number. This is a feature; users really should be
8990 using symbolic names anyway, and the common ones like
8991 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8993 sigfirst
= siglast
= (int)
8994 gdb_signal_from_command (atoi (arg
));
8995 if (arg
[digits
] == '-')
8998 gdb_signal_from_command (atoi (arg
+ digits
+ 1));
9000 if (sigfirst
> siglast
)
9002 /* Bet he didn't figure we'd think of this case... */
9003 std::swap (sigfirst
, siglast
);
9008 oursig
= gdb_signal_from_name (arg
);
9009 if (oursig
!= GDB_SIGNAL_UNKNOWN
)
9011 sigfirst
= siglast
= (int) oursig
;
9015 /* Not a number and not a recognized flag word => complain. */
9016 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg
);
9020 /* If any signal numbers or symbol names were found, set flags for
9021 which signals to apply actions to. */
9023 for (int signum
= sigfirst
; signum
>= 0 && signum
<= siglast
; signum
++)
9025 switch ((enum gdb_signal
) signum
)
9027 case GDB_SIGNAL_TRAP
:
9028 case GDB_SIGNAL_INT
:
9029 if (!allsigs
&& !sigs
[signum
])
9031 if (query (_("%s is used by the debugger.\n\
9032 Are you sure you want to change it? "),
9033 gdb_signal_to_name ((enum gdb_signal
) signum
)))
9038 gdb_printf (_("Not confirmed, unchanged.\n"));
9042 case GDB_SIGNAL_DEFAULT
:
9043 case GDB_SIGNAL_UNKNOWN
:
9044 /* Make sure that "all" doesn't print these. */
9053 for (int signum
= 0; signum
< nsigs
; signum
++)
9056 signal_cache_update (-1);
9057 target_pass_signals (signal_pass
);
9058 target_program_signals (signal_program
);
9062 /* Show the results. */
9063 sig_print_header ();
9064 for (; signum
< nsigs
; signum
++)
9066 sig_print_info ((enum gdb_signal
) signum
);
9073 /* Complete the "handle" command. */
9076 handle_completer (struct cmd_list_element
*ignore
,
9077 completion_tracker
&tracker
,
9078 const char *text
, const char *word
)
9080 static const char * const keywords
[] =
9094 signal_completer (ignore
, tracker
, text
, word
);
9095 complete_on_enum (tracker
, keywords
, word
, word
);
9099 gdb_signal_from_command (int num
)
9101 if (num
>= 1 && num
<= 15)
9102 return (enum gdb_signal
) num
;
9103 error (_("Only signals 1-15 are valid as numeric signals.\n\
9104 Use \"info signals\" for a list of symbolic signals."));
9107 /* Print current contents of the tables set by the handle command.
9108 It is possible we should just be printing signals actually used
9109 by the current target (but for things to work right when switching
9110 targets, all signals should be in the signal tables). */
9113 info_signals_command (const char *signum_exp
, int from_tty
)
9115 enum gdb_signal oursig
;
9117 sig_print_header ();
9121 /* First see if this is a symbol name. */
9122 oursig
= gdb_signal_from_name (signum_exp
);
9123 if (oursig
== GDB_SIGNAL_UNKNOWN
)
9125 /* No, try numeric. */
9127 gdb_signal_from_command (parse_and_eval_long (signum_exp
));
9129 sig_print_info (oursig
);
9134 /* These ugly casts brought to you by the native VAX compiler. */
9135 for (oursig
= GDB_SIGNAL_FIRST
;
9136 (int) oursig
< (int) GDB_SIGNAL_LAST
;
9137 oursig
= (enum gdb_signal
) ((int) oursig
+ 1))
9141 if (oursig
!= GDB_SIGNAL_UNKNOWN
9142 && oursig
!= GDB_SIGNAL_DEFAULT
&& oursig
!= GDB_SIGNAL_0
)
9143 sig_print_info (oursig
);
9146 gdb_printf (_("\nUse the \"handle\" command "
9147 "to change these tables.\n"));
9150 /* The $_siginfo convenience variable is a bit special. We don't know
9151 for sure the type of the value until we actually have a chance to
9152 fetch the data. The type can change depending on gdbarch, so it is
9153 also dependent on which thread you have selected.
9155 1. making $_siginfo be an internalvar that creates a new value on
9158 2. making the value of $_siginfo be an lval_computed value. */
9160 /* This function implements the lval_computed support for reading a
9164 siginfo_value_read (struct value
*v
)
9166 LONGEST transferred
;
9168 /* If we can access registers, so can we access $_siginfo. Likewise
9170 validate_registers_access ();
9173 target_read (current_inferior ()->top_target (),
9174 TARGET_OBJECT_SIGNAL_INFO
,
9176 value_contents_all_raw (v
).data (),
9178 value_type (v
)->length ());
9180 if (transferred
!= value_type (v
)->length ())
9181 error (_("Unable to read siginfo"));
9184 /* This function implements the lval_computed support for writing a
9188 siginfo_value_write (struct value
*v
, struct value
*fromval
)
9190 LONGEST transferred
;
9192 /* If we can access registers, so can we access $_siginfo. Likewise
9194 validate_registers_access ();
9196 transferred
= target_write (current_inferior ()->top_target (),
9197 TARGET_OBJECT_SIGNAL_INFO
,
9199 value_contents_all_raw (fromval
).data (),
9201 value_type (fromval
)->length ());
9203 if (transferred
!= value_type (fromval
)->length ())
9204 error (_("Unable to write siginfo"));
9207 static const struct lval_funcs siginfo_value_funcs
=
9213 /* Return a new value with the correct type for the siginfo object of
9214 the current thread using architecture GDBARCH. Return a void value
9215 if there's no object available. */
9217 static struct value
*
9218 siginfo_make_value (struct gdbarch
*gdbarch
, struct internalvar
*var
,
9221 if (target_has_stack ()
9222 && inferior_ptid
!= null_ptid
9223 && gdbarch_get_siginfo_type_p (gdbarch
))
9225 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9227 return allocate_computed_value (type
, &siginfo_value_funcs
, nullptr);
9230 return allocate_value (builtin_type (gdbarch
)->builtin_void
);
9234 /* infcall_suspend_state contains state about the program itself like its
9235 registers and any signal it received when it last stopped.
9236 This state must be restored regardless of how the inferior function call
9237 ends (either successfully, or after it hits a breakpoint or signal)
9238 if the program is to properly continue where it left off. */
9240 class infcall_suspend_state
9243 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9244 once the inferior function call has finished. */
9245 infcall_suspend_state (struct gdbarch
*gdbarch
,
9246 const struct thread_info
*tp
,
9247 struct regcache
*regcache
)
9248 : m_registers (new readonly_detached_regcache (*regcache
))
9250 tp
->save_suspend_to (m_thread_suspend
);
9252 gdb::unique_xmalloc_ptr
<gdb_byte
> siginfo_data
;
9254 if (gdbarch_get_siginfo_type_p (gdbarch
))
9256 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9257 size_t len
= type
->length ();
9259 siginfo_data
.reset ((gdb_byte
*) xmalloc (len
));
9261 if (target_read (current_inferior ()->top_target (),
9262 TARGET_OBJECT_SIGNAL_INFO
, nullptr,
9263 siginfo_data
.get (), 0, len
) != len
)
9265 /* Errors ignored. */
9266 siginfo_data
.reset (nullptr);
9272 m_siginfo_gdbarch
= gdbarch
;
9273 m_siginfo_data
= std::move (siginfo_data
);
9277 /* Return a pointer to the stored register state. */
9279 readonly_detached_regcache
*registers () const
9281 return m_registers
.get ();
9284 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9286 void restore (struct gdbarch
*gdbarch
,
9287 struct thread_info
*tp
,
9288 struct regcache
*regcache
) const
9290 tp
->restore_suspend_from (m_thread_suspend
);
9292 if (m_siginfo_gdbarch
== gdbarch
)
9294 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9296 /* Errors ignored. */
9297 target_write (current_inferior ()->top_target (),
9298 TARGET_OBJECT_SIGNAL_INFO
, nullptr,
9299 m_siginfo_data
.get (), 0, type
->length ());
9302 /* The inferior can be gone if the user types "print exit(0)"
9303 (and perhaps other times). */
9304 if (target_has_execution ())
9305 /* NB: The register write goes through to the target. */
9306 regcache
->restore (registers ());
9310 /* How the current thread stopped before the inferior function call was
9312 struct thread_suspend_state m_thread_suspend
;
9314 /* The registers before the inferior function call was executed. */
9315 std::unique_ptr
<readonly_detached_regcache
> m_registers
;
9317 /* Format of SIGINFO_DATA or NULL if it is not present. */
9318 struct gdbarch
*m_siginfo_gdbarch
= nullptr;
9320 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9321 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
9322 content would be invalid. */
9323 gdb::unique_xmalloc_ptr
<gdb_byte
> m_siginfo_data
;
9326 infcall_suspend_state_up
9327 save_infcall_suspend_state ()
9329 struct thread_info
*tp
= inferior_thread ();
9330 struct regcache
*regcache
= get_current_regcache ();
9331 struct gdbarch
*gdbarch
= regcache
->arch ();
9333 infcall_suspend_state_up inf_state
9334 (new struct infcall_suspend_state (gdbarch
, tp
, regcache
));
9336 /* Having saved the current state, adjust the thread state, discarding
9337 any stop signal information. The stop signal is not useful when
9338 starting an inferior function call, and run_inferior_call will not use
9339 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9340 tp
->set_stop_signal (GDB_SIGNAL_0
);
9345 /* Restore inferior session state to INF_STATE. */
9348 restore_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
9350 struct thread_info
*tp
= inferior_thread ();
9351 struct regcache
*regcache
= get_current_regcache ();
9352 struct gdbarch
*gdbarch
= regcache
->arch ();
9354 inf_state
->restore (gdbarch
, tp
, regcache
);
9355 discard_infcall_suspend_state (inf_state
);
9359 discard_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
9364 readonly_detached_regcache
*
9365 get_infcall_suspend_state_regcache (struct infcall_suspend_state
*inf_state
)
9367 return inf_state
->registers ();
9370 /* infcall_control_state contains state regarding gdb's control of the
9371 inferior itself like stepping control. It also contains session state like
9372 the user's currently selected frame. */
9374 struct infcall_control_state
9376 struct thread_control_state thread_control
;
9377 struct inferior_control_state inferior_control
;
9380 enum stop_stack_kind stop_stack_dummy
= STOP_NONE
;
9381 int stopped_by_random_signal
= 0;
9383 /* ID and level of the selected frame when the inferior function
9385 struct frame_id selected_frame_id
{};
9386 int selected_frame_level
= -1;
9389 /* Save all of the information associated with the inferior<==>gdb
9392 infcall_control_state_up
9393 save_infcall_control_state ()
9395 infcall_control_state_up
inf_status (new struct infcall_control_state
);
9396 struct thread_info
*tp
= inferior_thread ();
9397 struct inferior
*inf
= current_inferior ();
9399 inf_status
->thread_control
= tp
->control
;
9400 inf_status
->inferior_control
= inf
->control
;
9402 tp
->control
.step_resume_breakpoint
= nullptr;
9403 tp
->control
.exception_resume_breakpoint
= nullptr;
9405 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9406 chain. If caller's caller is walking the chain, they'll be happier if we
9407 hand them back the original chain when restore_infcall_control_state is
9409 tp
->control
.stop_bpstat
= bpstat_copy (tp
->control
.stop_bpstat
);
9412 inf_status
->stop_stack_dummy
= stop_stack_dummy
;
9413 inf_status
->stopped_by_random_signal
= stopped_by_random_signal
;
9415 save_selected_frame (&inf_status
->selected_frame_id
,
9416 &inf_status
->selected_frame_level
);
9421 /* Restore inferior session state to INF_STATUS. */
9424 restore_infcall_control_state (struct infcall_control_state
*inf_status
)
9426 struct thread_info
*tp
= inferior_thread ();
9427 struct inferior
*inf
= current_inferior ();
9429 if (tp
->control
.step_resume_breakpoint
)
9430 tp
->control
.step_resume_breakpoint
->disposition
= disp_del_at_next_stop
;
9432 if (tp
->control
.exception_resume_breakpoint
)
9433 tp
->control
.exception_resume_breakpoint
->disposition
9434 = disp_del_at_next_stop
;
9436 /* Handle the bpstat_copy of the chain. */
9437 bpstat_clear (&tp
->control
.stop_bpstat
);
9439 tp
->control
= inf_status
->thread_control
;
9440 inf
->control
= inf_status
->inferior_control
;
9443 stop_stack_dummy
= inf_status
->stop_stack_dummy
;
9444 stopped_by_random_signal
= inf_status
->stopped_by_random_signal
;
9446 if (target_has_stack ())
9448 restore_selected_frame (inf_status
->selected_frame_id
,
9449 inf_status
->selected_frame_level
);
9456 discard_infcall_control_state (struct infcall_control_state
*inf_status
)
9458 if (inf_status
->thread_control
.step_resume_breakpoint
)
9459 inf_status
->thread_control
.step_resume_breakpoint
->disposition
9460 = disp_del_at_next_stop
;
9462 if (inf_status
->thread_control
.exception_resume_breakpoint
)
9463 inf_status
->thread_control
.exception_resume_breakpoint
->disposition
9464 = disp_del_at_next_stop
;
9466 /* See save_infcall_control_state for info on stop_bpstat. */
9467 bpstat_clear (&inf_status
->thread_control
.stop_bpstat
);
9475 clear_exit_convenience_vars (void)
9477 clear_internalvar (lookup_internalvar ("_exitsignal"));
9478 clear_internalvar (lookup_internalvar ("_exitcode"));
9482 /* User interface for reverse debugging:
9483 Set exec-direction / show exec-direction commands
9484 (returns error unless target implements to_set_exec_direction method). */
9486 enum exec_direction_kind execution_direction
= EXEC_FORWARD
;
9487 static const char exec_forward
[] = "forward";
9488 static const char exec_reverse
[] = "reverse";
9489 static const char *exec_direction
= exec_forward
;
9490 static const char *const exec_direction_names
[] = {
9497 set_exec_direction_func (const char *args
, int from_tty
,
9498 struct cmd_list_element
*cmd
)
9500 if (target_can_execute_reverse ())
9502 if (!strcmp (exec_direction
, exec_forward
))
9503 execution_direction
= EXEC_FORWARD
;
9504 else if (!strcmp (exec_direction
, exec_reverse
))
9505 execution_direction
= EXEC_REVERSE
;
9509 exec_direction
= exec_forward
;
9510 error (_("Target does not support this operation."));
9515 show_exec_direction_func (struct ui_file
*out
, int from_tty
,
9516 struct cmd_list_element
*cmd
, const char *value
)
9518 switch (execution_direction
) {
9520 gdb_printf (out
, _("Forward.\n"));
9523 gdb_printf (out
, _("Reverse.\n"));
9526 internal_error (_("bogus execution_direction value: %d"),
9527 (int) execution_direction
);
9532 show_schedule_multiple (struct ui_file
*file
, int from_tty
,
9533 struct cmd_list_element
*c
, const char *value
)
9535 gdb_printf (file
, _("Resuming the execution of threads "
9536 "of all processes is %s.\n"), value
);
9539 /* Implementation of `siginfo' variable. */
9541 static const struct internalvar_funcs siginfo_funcs
=
9547 /* Callback for infrun's target events source. This is marked when a
9548 thread has a pending status to process. */
9551 infrun_async_inferior_event_handler (gdb_client_data data
)
9553 clear_async_event_handler (infrun_async_inferior_event_token
);
9554 inferior_event_handler (INF_REG_EVENT
);
9561 /* Verify that when two threads with the same ptid exist (from two different
9562 targets) and one of them changes ptid, we only update inferior_ptid if
9563 it is appropriate. */
9566 infrun_thread_ptid_changed ()
9568 gdbarch
*arch
= current_inferior ()->gdbarch
;
9570 /* The thread which inferior_ptid represents changes ptid. */
9572 scoped_restore_current_pspace_and_thread restore
;
9574 scoped_mock_context
<test_target_ops
> target1 (arch
);
9575 scoped_mock_context
<test_target_ops
> target2 (arch
);
9577 ptid_t
old_ptid (111, 222);
9578 ptid_t
new_ptid (111, 333);
9580 target1
.mock_inferior
.pid
= old_ptid
.pid ();
9581 target1
.mock_thread
.ptid
= old_ptid
;
9582 target1
.mock_inferior
.ptid_thread_map
.clear ();
9583 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
9585 target2
.mock_inferior
.pid
= old_ptid
.pid ();
9586 target2
.mock_thread
.ptid
= old_ptid
;
9587 target2
.mock_inferior
.ptid_thread_map
.clear ();
9588 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
9590 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
9591 set_current_inferior (&target1
.mock_inferior
);
9593 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
9595 gdb_assert (inferior_ptid
== new_ptid
);
9598 /* A thread with the same ptid as inferior_ptid, but from another target,
9601 scoped_restore_current_pspace_and_thread restore
;
9603 scoped_mock_context
<test_target_ops
> target1 (arch
);
9604 scoped_mock_context
<test_target_ops
> target2 (arch
);
9606 ptid_t
old_ptid (111, 222);
9607 ptid_t
new_ptid (111, 333);
9609 target1
.mock_inferior
.pid
= old_ptid
.pid ();
9610 target1
.mock_thread
.ptid
= old_ptid
;
9611 target1
.mock_inferior
.ptid_thread_map
.clear ();
9612 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
9614 target2
.mock_inferior
.pid
= old_ptid
.pid ();
9615 target2
.mock_thread
.ptid
= old_ptid
;
9616 target2
.mock_inferior
.ptid_thread_map
.clear ();
9617 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
9619 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
9620 set_current_inferior (&target2
.mock_inferior
);
9622 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
9624 gdb_assert (inferior_ptid
== old_ptid
);
9628 } /* namespace selftests */
9630 #endif /* GDB_SELF_TEST */
9632 void _initialize_infrun ();
9634 _initialize_infrun ()
9636 struct cmd_list_element
*c
;
9638 /* Register extra event sources in the event loop. */
9639 infrun_async_inferior_event_token
9640 = create_async_event_handler (infrun_async_inferior_event_handler
, nullptr,
9643 cmd_list_element
*info_signals_cmd
9644 = add_info ("signals", info_signals_command
, _("\
9645 What debugger does when program gets various signals.\n\
9646 Specify a signal as argument to print info on that signal only."));
9647 add_info_alias ("handle", info_signals_cmd
, 0);
9649 c
= add_com ("handle", class_run
, handle_command
, _("\
9650 Specify how to handle signals.\n\
9651 Usage: handle SIGNAL [ACTIONS]\n\
9652 Args are signals and actions to apply to those signals.\n\
9653 If no actions are specified, the current settings for the specified signals\n\
9654 will be displayed instead.\n\
9656 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9657 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9658 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9659 The special arg \"all\" is recognized to mean all signals except those\n\
9660 used by the debugger, typically SIGTRAP and SIGINT.\n\
9662 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9663 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9664 Stop means reenter debugger if this signal happens (implies print).\n\
9665 Print means print a message if this signal happens.\n\
9666 Pass means let program see this signal; otherwise program doesn't know.\n\
9667 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9668 Pass and Stop may be combined.\n\
9670 Multiple signals may be specified. Signal numbers and signal names\n\
9671 may be interspersed with actions, with the actions being performed for\n\
9672 all signals cumulatively specified."));
9673 set_cmd_completer (c
, handle_completer
);
9675 stop_command
= add_cmd ("stop", class_obscure
,
9676 not_just_help_class_command
, _("\
9677 There is no `stop' command, but you can set a hook on `stop'.\n\
9678 This allows you to set a list of commands to be run each time execution\n\
9679 of the program stops."), &cmdlist
);
9681 add_setshow_boolean_cmd
9682 ("infrun", class_maintenance
, &debug_infrun
,
9683 _("Set inferior debugging."),
9684 _("Show inferior debugging."),
9685 _("When non-zero, inferior specific debugging is enabled."),
9686 nullptr, show_debug_infrun
, &setdebuglist
, &showdebuglist
);
9688 add_setshow_boolean_cmd ("non-stop", no_class
,
9690 Set whether gdb controls the inferior in non-stop mode."), _("\
9691 Show whether gdb controls the inferior in non-stop mode."), _("\
9692 When debugging a multi-threaded program and this setting is\n\
9693 off (the default, also called all-stop mode), when one thread stops\n\
9694 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9695 all other threads in the program while you interact with the thread of\n\
9696 interest. When you continue or step a thread, you can allow the other\n\
9697 threads to run, or have them remain stopped, but while you inspect any\n\
9698 thread's state, all threads stop.\n\
9700 In non-stop mode, when one thread stops, other threads can continue\n\
9701 to run freely. You'll be able to step each thread independently,\n\
9702 leave it stopped or free to run as needed."),
9708 for (size_t i
= 0; i
< GDB_SIGNAL_LAST
; i
++)
9711 signal_print
[i
] = 1;
9712 signal_program
[i
] = 1;
9713 signal_catch
[i
] = 0;
9716 /* Signals caused by debugger's own actions should not be given to
9717 the program afterwards.
9719 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9720 explicitly specifies that it should be delivered to the target
9721 program. Typically, that would occur when a user is debugging a
9722 target monitor on a simulator: the target monitor sets a
9723 breakpoint; the simulator encounters this breakpoint and halts
9724 the simulation handing control to GDB; GDB, noting that the stop
9725 address doesn't map to any known breakpoint, returns control back
9726 to the simulator; the simulator then delivers the hardware
9727 equivalent of a GDB_SIGNAL_TRAP to the program being
9729 signal_program
[GDB_SIGNAL_TRAP
] = 0;
9730 signal_program
[GDB_SIGNAL_INT
] = 0;
9732 /* Signals that are not errors should not normally enter the debugger. */
9733 signal_stop
[GDB_SIGNAL_ALRM
] = 0;
9734 signal_print
[GDB_SIGNAL_ALRM
] = 0;
9735 signal_stop
[GDB_SIGNAL_VTALRM
] = 0;
9736 signal_print
[GDB_SIGNAL_VTALRM
] = 0;
9737 signal_stop
[GDB_SIGNAL_PROF
] = 0;
9738 signal_print
[GDB_SIGNAL_PROF
] = 0;
9739 signal_stop
[GDB_SIGNAL_CHLD
] = 0;
9740 signal_print
[GDB_SIGNAL_CHLD
] = 0;
9741 signal_stop
[GDB_SIGNAL_IO
] = 0;
9742 signal_print
[GDB_SIGNAL_IO
] = 0;
9743 signal_stop
[GDB_SIGNAL_POLL
] = 0;
9744 signal_print
[GDB_SIGNAL_POLL
] = 0;
9745 signal_stop
[GDB_SIGNAL_URG
] = 0;
9746 signal_print
[GDB_SIGNAL_URG
] = 0;
9747 signal_stop
[GDB_SIGNAL_WINCH
] = 0;
9748 signal_print
[GDB_SIGNAL_WINCH
] = 0;
9749 signal_stop
[GDB_SIGNAL_PRIO
] = 0;
9750 signal_print
[GDB_SIGNAL_PRIO
] = 0;
9752 /* These signals are used internally by user-level thread
9753 implementations. (See signal(5) on Solaris.) Like the above
9754 signals, a healthy program receives and handles them as part of
9755 its normal operation. */
9756 signal_stop
[GDB_SIGNAL_LWP
] = 0;
9757 signal_print
[GDB_SIGNAL_LWP
] = 0;
9758 signal_stop
[GDB_SIGNAL_WAITING
] = 0;
9759 signal_print
[GDB_SIGNAL_WAITING
] = 0;
9760 signal_stop
[GDB_SIGNAL_CANCEL
] = 0;
9761 signal_print
[GDB_SIGNAL_CANCEL
] = 0;
9762 signal_stop
[GDB_SIGNAL_LIBRT
] = 0;
9763 signal_print
[GDB_SIGNAL_LIBRT
] = 0;
9765 /* Update cached state. */
9766 signal_cache_update (-1);
9768 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support
,
9769 &stop_on_solib_events
, _("\
9770 Set stopping for shared library events."), _("\
9771 Show stopping for shared library events."), _("\
9772 If nonzero, gdb will give control to the user when the dynamic linker\n\
9773 notifies gdb of shared library events. The most common event of interest\n\
9774 to the user would be loading/unloading of a new library."),
9775 set_stop_on_solib_events
,
9776 show_stop_on_solib_events
,
9777 &setlist
, &showlist
);
9779 add_setshow_enum_cmd ("follow-fork-mode", class_run
,
9780 follow_fork_mode_kind_names
,
9781 &follow_fork_mode_string
, _("\
9782 Set debugger response to a program call of fork or vfork."), _("\
9783 Show debugger response to a program call of fork or vfork."), _("\
9784 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9785 parent - the original process is debugged after a fork\n\
9786 child - the new process is debugged after a fork\n\
9787 The unfollowed process will continue to run.\n\
9788 By default, the debugger will follow the parent process."),
9790 show_follow_fork_mode_string
,
9791 &setlist
, &showlist
);
9793 add_setshow_enum_cmd ("follow-exec-mode", class_run
,
9794 follow_exec_mode_names
,
9795 &follow_exec_mode_string
, _("\
9796 Set debugger response to a program call of exec."), _("\
9797 Show debugger response to a program call of exec."), _("\
9798 An exec call replaces the program image of a process.\n\
9800 follow-exec-mode can be:\n\
9802 new - the debugger creates a new inferior and rebinds the process\n\
9803 to this new inferior. The program the process was running before\n\
9804 the exec call can be restarted afterwards by restarting the original\n\
9807 same - the debugger keeps the process bound to the same inferior.\n\
9808 The new executable image replaces the previous executable loaded in\n\
9809 the inferior. Restarting the inferior after the exec call restarts\n\
9810 the executable the process was running after the exec call.\n\
9812 By default, the debugger will use the same inferior."),
9814 show_follow_exec_mode_string
,
9815 &setlist
, &showlist
);
9817 add_setshow_enum_cmd ("scheduler-locking", class_run
,
9818 scheduler_enums
, &scheduler_mode
, _("\
9819 Set mode for locking scheduler during execution."), _("\
9820 Show mode for locking scheduler during execution."), _("\
9821 off == no locking (threads may preempt at any time)\n\
9822 on == full locking (no thread except the current thread may run)\n\
9823 This applies to both normal execution and replay mode.\n\
9824 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9825 In this mode, other threads may run during other commands.\n\
9826 This applies to both normal execution and replay mode.\n\
9827 replay == scheduler locked in replay mode and unlocked during normal execution."),
9828 set_schedlock_func
, /* traps on target vector */
9829 show_scheduler_mode
,
9830 &setlist
, &showlist
);
9832 add_setshow_boolean_cmd ("schedule-multiple", class_run
, &sched_multi
, _("\
9833 Set mode for resuming threads of all processes."), _("\
9834 Show mode for resuming threads of all processes."), _("\
9835 When on, execution commands (such as 'continue' or 'next') resume all\n\
9836 threads of all processes. When off (which is the default), execution\n\
9837 commands only resume the threads of the current process. The set of\n\
9838 threads that are resumed is further refined by the scheduler-locking\n\
9839 mode (see help set scheduler-locking)."),
9841 show_schedule_multiple
,
9842 &setlist
, &showlist
);
9844 add_setshow_boolean_cmd ("step-mode", class_run
, &step_stop_if_no_debug
, _("\
9845 Set mode of the step operation."), _("\
9846 Show mode of the step operation."), _("\
9847 When set, doing a step over a function without debug line information\n\
9848 will stop at the first instruction of that function. Otherwise, the\n\
9849 function is skipped and the step command stops at a different source line."),
9851 show_step_stop_if_no_debug
,
9852 &setlist
, &showlist
);
9854 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run
,
9855 &can_use_displaced_stepping
, _("\
9856 Set debugger's willingness to use displaced stepping."), _("\
9857 Show debugger's willingness to use displaced stepping."), _("\
9858 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9859 supported by the target architecture. If off, gdb will not use displaced\n\
9860 stepping to step over breakpoints, even if such is supported by the target\n\
9861 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9862 if the target architecture supports it and non-stop mode is active, but will not\n\
9863 use it in all-stop mode (see help set non-stop)."),
9865 show_can_use_displaced_stepping
,
9866 &setlist
, &showlist
);
9868 add_setshow_enum_cmd ("exec-direction", class_run
, exec_direction_names
,
9869 &exec_direction
, _("Set direction of execution.\n\
9870 Options are 'forward' or 'reverse'."),
9871 _("Show direction of execution (forward/reverse)."),
9872 _("Tells gdb whether to execute forward or backward."),
9873 set_exec_direction_func
, show_exec_direction_func
,
9874 &setlist
, &showlist
);
9876 /* Set/show detach-on-fork: user-settable mode. */
9878 add_setshow_boolean_cmd ("detach-on-fork", class_run
, &detach_fork
, _("\
9879 Set whether gdb will detach the child of a fork."), _("\
9880 Show whether gdb will detach the child of a fork."), _("\
9881 Tells gdb whether to detach the child of a fork."),
9882 nullptr, nullptr, &setlist
, &showlist
);
9884 /* Set/show disable address space randomization mode. */
9886 add_setshow_boolean_cmd ("disable-randomization", class_support
,
9887 &disable_randomization
, _("\
9888 Set disabling of debuggee's virtual address space randomization."), _("\
9889 Show disabling of debuggee's virtual address space randomization."), _("\
9890 When this mode is on (which is the default), randomization of the virtual\n\
9891 address space is disabled. Standalone programs run with the randomization\n\
9892 enabled by default on some platforms."),
9893 &set_disable_randomization
,
9894 &show_disable_randomization
,
9895 &setlist
, &showlist
);
9897 /* ptid initializations */
9898 inferior_ptid
= null_ptid
;
9899 target_last_wait_ptid
= minus_one_ptid
;
9901 gdb::observers::thread_ptid_changed
.attach (infrun_thread_ptid_changed
,
9903 gdb::observers::thread_stop_requested
.attach (infrun_thread_stop_requested
,
9905 gdb::observers::thread_exit
.attach (infrun_thread_thread_exit
, "infrun");
9906 gdb::observers::inferior_exit
.attach (infrun_inferior_exit
, "infrun");
9907 gdb::observers::inferior_execd
.attach (infrun_inferior_execd
, "infrun");
9909 /* Explicitly create without lookup, since that tries to create a
9910 value with a void typed value, and when we get here, gdbarch
9911 isn't initialized yet. At this point, we're quite sure there
9912 isn't another convenience variable of the same name. */
9913 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs
, nullptr);
9915 add_setshow_boolean_cmd ("observer", no_class
,
9916 &observer_mode_1
, _("\
9917 Set whether gdb controls the inferior in observer mode."), _("\
9918 Show whether gdb controls the inferior in observer mode."), _("\
9919 In observer mode, GDB can get data from the inferior, but not\n\
9920 affect its execution. Registers and memory may not be changed,\n\
9921 breakpoints may not be set, and the program cannot be interrupted\n\
9929 selftests::register_test ("infrun_thread_ptid_changed",
9930 selftests::infrun_thread_ptid_changed
);