1 /* Target-struct-independent code to start (run) and stop an inferior
4 Copyright (C) 1986-2022 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "displaced-stepping.h"
28 #include "breakpoint.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
40 #include "observable.h"
45 #include "mi/mi-common.h"
46 #include "event-top.h"
48 #include "record-full.h"
49 #include "inline-frame.h"
51 #include "tracepoint.h"
55 #include "completer.h"
56 #include "target-descriptions.h"
57 #include "target-dcache.h"
60 #include "gdbsupport/event-loop.h"
61 #include "thread-fsm.h"
62 #include "gdbsupport/enum-flags.h"
63 #include "progspace-and-thread.h"
64 #include "gdbsupport/gdb_optional.h"
65 #include "arch-utils.h"
66 #include "gdbsupport/scope-exit.h"
67 #include "gdbsupport/forward-scope-exit.h"
68 #include "gdbsupport/gdb_select.h"
69 #include <unordered_map>
70 #include "async-event.h"
71 #include "gdbsupport/selftest.h"
72 #include "scoped-mock-context.h"
73 #include "test-target.h"
74 #include "gdbsupport/common-debug.h"
75 #include "gdbsupport/buildargv.h"
77 /* Prototypes for local functions */
79 static void sig_print_info (enum gdb_signal
);
81 static void sig_print_header (void);
83 static void follow_inferior_reset_breakpoints (void);
85 static bool currently_stepping (struct thread_info
*tp
);
87 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info
*);
89 static void insert_step_resume_breakpoint_at_caller (struct frame_info
*);
91 static void insert_longjmp_resume_breakpoint (struct gdbarch
*, CORE_ADDR
);
93 static bool maybe_software_singlestep (struct gdbarch
*gdbarch
);
95 static void resume (gdb_signal sig
);
97 static void wait_for_inferior (inferior
*inf
);
99 static void restart_threads (struct thread_info
*event_thread
,
100 inferior
*inf
= nullptr);
102 static bool start_step_over (void);
104 /* Asynchronous signal handler registered as event loop source for
105 when we have pending events ready to be passed to the core. */
106 static struct async_event_handler
*infrun_async_inferior_event_token
;
108 /* Stores whether infrun_async was previously enabled or disabled.
109 Starts off as -1, indicating "never enabled/disabled". */
110 static int infrun_is_async
= -1;
115 infrun_async (int enable
)
117 if (infrun_is_async
!= enable
)
119 infrun_is_async
= enable
;
121 infrun_debug_printf ("enable=%d", enable
);
124 mark_async_event_handler (infrun_async_inferior_event_token
);
126 clear_async_event_handler (infrun_async_inferior_event_token
);
133 mark_infrun_async_event_handler (void)
135 mark_async_event_handler (infrun_async_inferior_event_token
);
138 /* When set, stop the 'step' command if we enter a function which has
139 no line number information. The normal behavior is that we step
140 over such function. */
141 bool step_stop_if_no_debug
= false;
143 show_step_stop_if_no_debug (struct ui_file
*file
, int from_tty
,
144 struct cmd_list_element
*c
, const char *value
)
146 gdb_printf (file
, _("Mode of the step operation is %s.\n"), value
);
149 /* proceed and normal_stop use this to notify the user when the
150 inferior stopped in a different thread than it had been running
153 static ptid_t previous_inferior_ptid
;
155 /* If set (default for legacy reasons), when following a fork, GDB
156 will detach from one of the fork branches, child or parent.
157 Exactly which branch is detached depends on 'set follow-fork-mode'
160 static bool detach_fork
= true;
162 bool debug_infrun
= false;
164 show_debug_infrun (struct ui_file
*file
, int from_tty
,
165 struct cmd_list_element
*c
, const char *value
)
167 gdb_printf (file
, _("Inferior debugging is %s.\n"), value
);
170 /* Support for disabling address space randomization. */
172 bool disable_randomization
= true;
175 show_disable_randomization (struct ui_file
*file
, int from_tty
,
176 struct cmd_list_element
*c
, const char *value
)
178 if (target_supports_disable_randomization ())
180 _("Disabling randomization of debuggee's "
181 "virtual address space is %s.\n"),
184 gdb_puts (_("Disabling randomization of debuggee's "
185 "virtual address space is unsupported on\n"
186 "this platform.\n"), file
);
190 set_disable_randomization (const char *args
, int from_tty
,
191 struct cmd_list_element
*c
)
193 if (!target_supports_disable_randomization ())
194 error (_("Disabling randomization of debuggee's "
195 "virtual address space is unsupported on\n"
199 /* User interface for non-stop mode. */
201 bool non_stop
= false;
202 static bool non_stop_1
= false;
205 set_non_stop (const char *args
, int from_tty
,
206 struct cmd_list_element
*c
)
208 if (target_has_execution ())
210 non_stop_1
= non_stop
;
211 error (_("Cannot change this setting while the inferior is running."));
214 non_stop
= non_stop_1
;
218 show_non_stop (struct ui_file
*file
, int from_tty
,
219 struct cmd_list_element
*c
, const char *value
)
222 _("Controlling the inferior in non-stop mode is %s.\n"),
226 /* "Observer mode" is somewhat like a more extreme version of
227 non-stop, in which all GDB operations that might affect the
228 target's execution have been disabled. */
230 static bool observer_mode
= false;
231 static bool observer_mode_1
= false;
234 set_observer_mode (const char *args
, int from_tty
,
235 struct cmd_list_element
*c
)
237 if (target_has_execution ())
239 observer_mode_1
= observer_mode
;
240 error (_("Cannot change this setting while the inferior is running."));
243 observer_mode
= observer_mode_1
;
245 may_write_registers
= !observer_mode
;
246 may_write_memory
= !observer_mode
;
247 may_insert_breakpoints
= !observer_mode
;
248 may_insert_tracepoints
= !observer_mode
;
249 /* We can insert fast tracepoints in or out of observer mode,
250 but enable them if we're going into this mode. */
252 may_insert_fast_tracepoints
= true;
253 may_stop
= !observer_mode
;
254 update_target_permissions ();
256 /* Going *into* observer mode we must force non-stop, then
257 going out we leave it that way. */
260 pagination_enabled
= 0;
261 non_stop
= non_stop_1
= true;
265 gdb_printf (_("Observer mode is now %s.\n"),
266 (observer_mode
? "on" : "off"));
270 show_observer_mode (struct ui_file
*file
, int from_tty
,
271 struct cmd_list_element
*c
, const char *value
)
273 gdb_printf (file
, _("Observer mode is %s.\n"), value
);
276 /* This updates the value of observer mode based on changes in
277 permissions. Note that we are deliberately ignoring the values of
278 may-write-registers and may-write-memory, since the user may have
279 reason to enable these during a session, for instance to turn on a
280 debugging-related global. */
283 update_observer_mode (void)
285 bool newval
= (!may_insert_breakpoints
286 && !may_insert_tracepoints
287 && may_insert_fast_tracepoints
291 /* Let the user know if things change. */
292 if (newval
!= observer_mode
)
293 gdb_printf (_("Observer mode is now %s.\n"),
294 (newval
? "on" : "off"));
296 observer_mode
= observer_mode_1
= newval
;
299 /* Tables of how to react to signals; the user sets them. */
301 static unsigned char signal_stop
[GDB_SIGNAL_LAST
];
302 static unsigned char signal_print
[GDB_SIGNAL_LAST
];
303 static unsigned char signal_program
[GDB_SIGNAL_LAST
];
305 /* Table of signals that are registered with "catch signal". A
306 non-zero entry indicates that the signal is caught by some "catch
308 static unsigned char signal_catch
[GDB_SIGNAL_LAST
];
310 /* Table of signals that the target may silently handle.
311 This is automatically determined from the flags above,
312 and simply cached here. */
313 static unsigned char signal_pass
[GDB_SIGNAL_LAST
];
315 #define SET_SIGS(nsigs,sigs,flags) \
317 int signum = (nsigs); \
318 while (signum-- > 0) \
319 if ((sigs)[signum]) \
320 (flags)[signum] = 1; \
323 #define UNSET_SIGS(nsigs,sigs,flags) \
325 int signum = (nsigs); \
326 while (signum-- > 0) \
327 if ((sigs)[signum]) \
328 (flags)[signum] = 0; \
331 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
332 this function is to avoid exporting `signal_program'. */
335 update_signals_program_target (void)
337 target_program_signals (signal_program
);
340 /* Value to pass to target_resume() to cause all threads to resume. */
342 #define RESUME_ALL minus_one_ptid
344 /* Command list pointer for the "stop" placeholder. */
346 static struct cmd_list_element
*stop_command
;
348 /* Nonzero if we want to give control to the user when we're notified
349 of shared library events by the dynamic linker. */
350 int stop_on_solib_events
;
352 /* Enable or disable optional shared library event breakpoints
353 as appropriate when the above flag is changed. */
356 set_stop_on_solib_events (const char *args
,
357 int from_tty
, struct cmd_list_element
*c
)
359 update_solib_breakpoints ();
363 show_stop_on_solib_events (struct ui_file
*file
, int from_tty
,
364 struct cmd_list_element
*c
, const char *value
)
366 gdb_printf (file
, _("Stopping for shared library events is %s.\n"),
370 /* True after stop if current stack frame should be printed. */
372 static bool stop_print_frame
;
374 /* This is a cached copy of the target/ptid/waitstatus of the last
375 event returned by target_wait().
376 This information is returned by get_last_target_status(). */
377 static process_stratum_target
*target_last_proc_target
;
378 static ptid_t target_last_wait_ptid
;
379 static struct target_waitstatus target_last_waitstatus
;
381 void init_thread_stepping_state (struct thread_info
*tss
);
383 static const char follow_fork_mode_child
[] = "child";
384 static const char follow_fork_mode_parent
[] = "parent";
386 static const char *const follow_fork_mode_kind_names
[] = {
387 follow_fork_mode_child
,
388 follow_fork_mode_parent
,
392 static const char *follow_fork_mode_string
= follow_fork_mode_parent
;
394 show_follow_fork_mode_string (struct ui_file
*file
, int from_tty
,
395 struct cmd_list_element
*c
, const char *value
)
398 _("Debugger response to a program "
399 "call of fork or vfork is \"%s\".\n"),
404 /* Handle changes to the inferior list based on the type of fork,
405 which process is being followed, and whether the other process
406 should be detached. On entry inferior_ptid must be the ptid of
407 the fork parent. At return inferior_ptid is the ptid of the
408 followed inferior. */
411 follow_fork_inferior (bool follow_child
, bool detach_fork
)
413 target_waitkind fork_kind
= inferior_thread ()->pending_follow
.kind ();
414 gdb_assert (fork_kind
== TARGET_WAITKIND_FORKED
415 || fork_kind
== TARGET_WAITKIND_VFORKED
);
416 bool has_vforked
= fork_kind
== TARGET_WAITKIND_VFORKED
;
417 ptid_t parent_ptid
= inferior_ptid
;
418 ptid_t child_ptid
= inferior_thread ()->pending_follow
.child_ptid ();
421 && !non_stop
/* Non-stop always resumes both branches. */
422 && current_ui
->prompt_state
== PROMPT_BLOCKED
423 && !(follow_child
|| detach_fork
|| sched_multi
))
425 /* The parent stays blocked inside the vfork syscall until the
426 child execs or exits. If we don't let the child run, then
427 the parent stays blocked. If we're telling the parent to run
428 in the foreground, the user will not be able to ctrl-c to get
429 back the terminal, effectively hanging the debug session. */
430 gdb_printf (gdb_stderr
, _("\
431 Can not resume the parent process over vfork in the foreground while\n\
432 holding the child stopped. Try \"set detach-on-fork\" or \
433 \"set schedule-multiple\".\n"));
437 inferior
*parent_inf
= current_inferior ();
438 inferior
*child_inf
= nullptr;
440 gdb_assert (parent_inf
->thread_waiting_for_vfork_done
== nullptr);
444 /* Detach new forked process? */
447 /* Before detaching from the child, remove all breakpoints
448 from it. If we forked, then this has already been taken
449 care of by infrun.c. If we vforked however, any
450 breakpoint inserted in the parent is visible in the
451 child, even those added while stopped in a vfork
452 catchpoint. This will remove the breakpoints from the
453 parent also, but they'll be reinserted below. */
456 /* Keep breakpoints list in sync. */
457 remove_breakpoints_inf (current_inferior ());
460 if (print_inferior_events
)
462 /* Ensure that we have a process ptid. */
463 ptid_t process_ptid
= ptid_t (child_ptid
.pid ());
465 target_terminal::ours_for_output ();
466 gdb_printf (_("[Detaching after %s from child %s]\n"),
467 has_vforked
? "vfork" : "fork",
468 target_pid_to_str (process_ptid
).c_str ());
473 /* Add process to GDB's tables. */
474 child_inf
= add_inferior (child_ptid
.pid ());
476 child_inf
->attach_flag
= parent_inf
->attach_flag
;
477 copy_terminal_info (child_inf
, parent_inf
);
478 child_inf
->gdbarch
= parent_inf
->gdbarch
;
479 copy_inferior_target_desc_info (child_inf
, parent_inf
);
481 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
483 /* If this is a vfork child, then the address-space is
484 shared with the parent. */
487 child_inf
->pspace
= parent_inf
->pspace
;
488 child_inf
->aspace
= parent_inf
->aspace
;
490 exec_on_vfork (child_inf
);
492 /* The parent will be frozen until the child is done
493 with the shared region. Keep track of the
495 child_inf
->vfork_parent
= parent_inf
;
496 child_inf
->pending_detach
= 0;
497 parent_inf
->vfork_child
= child_inf
;
498 parent_inf
->pending_detach
= 0;
502 child_inf
->aspace
= new_address_space ();
503 child_inf
->pspace
= new program_space (child_inf
->aspace
);
504 child_inf
->removable
= 1;
505 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
511 /* If we detached from the child, then we have to be careful
512 to not insert breakpoints in the parent until the child
513 is done with the shared memory region. However, if we're
514 staying attached to the child, then we can and should
515 insert breakpoints, so that we can debug it. A
516 subsequent child exec or exit is enough to know when does
517 the child stops using the parent's address space. */
518 parent_inf
->thread_waiting_for_vfork_done
519 = detach_fork
? inferior_thread () : nullptr;
520 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
525 /* Follow the child. */
527 if (print_inferior_events
)
529 std::string parent_pid
= target_pid_to_str (parent_ptid
);
530 std::string child_pid
= target_pid_to_str (child_ptid
);
532 target_terminal::ours_for_output ();
533 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
535 has_vforked
? "vfork" : "fork",
539 /* Add the new inferior first, so that the target_detach below
540 doesn't unpush the target. */
542 child_inf
= add_inferior (child_ptid
.pid ());
544 child_inf
->attach_flag
= parent_inf
->attach_flag
;
545 copy_terminal_info (child_inf
, parent_inf
);
546 child_inf
->gdbarch
= parent_inf
->gdbarch
;
547 copy_inferior_target_desc_info (child_inf
, parent_inf
);
551 /* If this is a vfork child, then the address-space is shared
553 child_inf
->aspace
= parent_inf
->aspace
;
554 child_inf
->pspace
= parent_inf
->pspace
;
556 exec_on_vfork (child_inf
);
558 else if (detach_fork
)
560 /* We follow the child and detach from the parent: move the parent's
561 program space to the child. This simplifies some things, like
562 doing "next" over fork() and landing on the expected line in the
563 child (note, that is broken with "set detach-on-fork off").
565 Before assigning brand new spaces for the parent, remove
566 breakpoints from it: because the new pspace won't match
567 currently inserted locations, the normal detach procedure
568 wouldn't remove them, and we would leave them inserted when
570 remove_breakpoints_inf (parent_inf
);
572 child_inf
->aspace
= parent_inf
->aspace
;
573 child_inf
->pspace
= parent_inf
->pspace
;
574 parent_inf
->aspace
= new_address_space ();
575 parent_inf
->pspace
= new program_space (parent_inf
->aspace
);
576 clone_program_space (parent_inf
->pspace
, child_inf
->pspace
);
578 /* The parent inferior is still the current one, so keep things
580 set_current_program_space (parent_inf
->pspace
);
584 child_inf
->aspace
= new_address_space ();
585 child_inf
->pspace
= new program_space (child_inf
->aspace
);
586 child_inf
->removable
= 1;
587 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
588 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
592 gdb_assert (current_inferior () == parent_inf
);
594 /* If we are setting up an inferior for the child, target_follow_fork is
595 responsible for pushing the appropriate targets on the new inferior's
596 target stack and adding the initial thread (with ptid CHILD_PTID).
598 If we are not setting up an inferior for the child (because following
599 the parent and detach_fork is true), it is responsible for detaching
601 target_follow_fork (child_inf
, child_ptid
, fork_kind
, follow_child
,
604 /* target_follow_fork must leave the parent as the current inferior. If we
605 want to follow the child, we make it the current one below. */
606 gdb_assert (current_inferior () == parent_inf
);
608 /* If there is a child inferior, target_follow_fork must have created a thread
610 if (child_inf
!= nullptr)
611 gdb_assert (!child_inf
->thread_list
.empty ());
613 /* Clear the parent thread's pending follow field. Do this before calling
614 target_detach, so that the target can differentiate the two following
617 - We continue past a fork with "follow-fork-mode == child" &&
618 "detach-on-fork on", and therefore detach the parent. In that
619 case the target should not detach the fork child.
620 - We run to a fork catchpoint and the user types "detach". In that
621 case, the target should detach the fork child in addition to the
624 The former case will have pending_follow cleared, the later will have
625 pending_follow set. */
626 thread_info
*parent_thread
= find_thread_ptid (parent_inf
, parent_ptid
);
627 gdb_assert (parent_thread
!= nullptr);
628 parent_thread
->pending_follow
.set_spurious ();
630 /* Detach the parent if needed. */
633 /* If we're vforking, we want to hold on to the parent until
634 the child exits or execs. At child exec or exit time we
635 can remove the old breakpoints from the parent and detach
636 or resume debugging it. Otherwise, detach the parent now;
637 we'll want to reuse it's program/address spaces, but we
638 can't set them to the child before removing breakpoints
639 from the parent, otherwise, the breakpoints module could
640 decide to remove breakpoints from the wrong process (since
641 they'd be assigned to the same address space). */
645 gdb_assert (child_inf
->vfork_parent
== NULL
);
646 gdb_assert (parent_inf
->vfork_child
== NULL
);
647 child_inf
->vfork_parent
= parent_inf
;
648 child_inf
->pending_detach
= 0;
649 parent_inf
->vfork_child
= child_inf
;
650 parent_inf
->pending_detach
= detach_fork
;
652 else if (detach_fork
)
654 if (print_inferior_events
)
656 /* Ensure that we have a process ptid. */
657 ptid_t process_ptid
= ptid_t (parent_ptid
.pid ());
659 target_terminal::ours_for_output ();
660 gdb_printf (_("[Detaching after fork from "
662 target_pid_to_str (process_ptid
).c_str ());
665 target_detach (parent_inf
, 0);
669 /* If we ended up creating a new inferior, call post_create_inferior to inform
670 the various subcomponents. */
671 if (child_inf
!= nullptr)
673 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
674 (do not restore the parent as the current inferior). */
675 gdb::optional
<scoped_restore_current_thread
> maybe_restore
;
678 maybe_restore
.emplace ();
680 switch_to_thread (*child_inf
->threads ().begin ());
681 post_create_inferior (0);
687 /* Tell the target to follow the fork we're stopped at. Returns true
688 if the inferior should be resumed; false, if the target for some
689 reason decided it's best not to resume. */
694 bool follow_child
= (follow_fork_mode_string
== follow_fork_mode_child
);
695 bool should_resume
= true;
697 /* Copy user stepping state to the new inferior thread. FIXME: the
698 followed fork child thread should have a copy of most of the
699 parent thread structure's run control related fields, not just these.
700 Initialized to avoid "may be used uninitialized" warnings from gcc. */
701 struct breakpoint
*step_resume_breakpoint
= NULL
;
702 struct breakpoint
*exception_resume_breakpoint
= NULL
;
703 CORE_ADDR step_range_start
= 0;
704 CORE_ADDR step_range_end
= 0;
705 int current_line
= 0;
706 symtab
*current_symtab
= NULL
;
707 struct frame_id step_frame_id
= { 0 };
711 process_stratum_target
*wait_target
;
713 struct target_waitstatus wait_status
;
715 /* Get the last target status returned by target_wait(). */
716 get_last_target_status (&wait_target
, &wait_ptid
, &wait_status
);
718 /* If not stopped at a fork event, then there's nothing else to
720 if (wait_status
.kind () != TARGET_WAITKIND_FORKED
721 && wait_status
.kind () != TARGET_WAITKIND_VFORKED
)
724 /* Check if we switched over from WAIT_PTID, since the event was
726 if (wait_ptid
!= minus_one_ptid
727 && (current_inferior ()->process_target () != wait_target
728 || inferior_ptid
!= wait_ptid
))
730 /* We did. Switch back to WAIT_PTID thread, to tell the
731 target to follow it (in either direction). We'll
732 afterwards refuse to resume, and inform the user what
734 thread_info
*wait_thread
= find_thread_ptid (wait_target
, wait_ptid
);
735 switch_to_thread (wait_thread
);
736 should_resume
= false;
740 thread_info
*tp
= inferior_thread ();
742 /* If there were any forks/vforks that were caught and are now to be
743 followed, then do so now. */
744 switch (tp
->pending_follow
.kind ())
746 case TARGET_WAITKIND_FORKED
:
747 case TARGET_WAITKIND_VFORKED
:
749 ptid_t parent
, child
;
750 std::unique_ptr
<struct thread_fsm
> thread_fsm
;
752 /* If the user did a next/step, etc, over a fork call,
753 preserve the stepping state in the fork child. */
754 if (follow_child
&& should_resume
)
756 step_resume_breakpoint
= clone_momentary_breakpoint
757 (tp
->control
.step_resume_breakpoint
);
758 step_range_start
= tp
->control
.step_range_start
;
759 step_range_end
= tp
->control
.step_range_end
;
760 current_line
= tp
->current_line
;
761 current_symtab
= tp
->current_symtab
;
762 step_frame_id
= tp
->control
.step_frame_id
;
763 exception_resume_breakpoint
764 = clone_momentary_breakpoint (tp
->control
.exception_resume_breakpoint
);
765 thread_fsm
= tp
->release_thread_fsm ();
767 /* For now, delete the parent's sr breakpoint, otherwise,
768 parent/child sr breakpoints are considered duplicates,
769 and the child version will not be installed. Remove
770 this when the breakpoints module becomes aware of
771 inferiors and address spaces. */
772 delete_step_resume_breakpoint (tp
);
773 tp
->control
.step_range_start
= 0;
774 tp
->control
.step_range_end
= 0;
775 tp
->control
.step_frame_id
= null_frame_id
;
776 delete_exception_resume_breakpoint (tp
);
779 parent
= inferior_ptid
;
780 child
= tp
->pending_follow
.child_ptid ();
782 /* If handling a vfork, stop all the inferior's threads, they will be
783 restarted when the vfork shared region is complete. */
784 if (tp
->pending_follow
.kind () == TARGET_WAITKIND_VFORKED
785 && target_is_non_stop_p ())
786 stop_all_threads ("handling vfork", tp
->inf
);
788 process_stratum_target
*parent_targ
= tp
->inf
->process_target ();
789 /* Set up inferior(s) as specified by the caller, and tell the
790 target to do whatever is necessary to follow either parent
792 if (follow_fork_inferior (follow_child
, detach_fork
))
794 /* Target refused to follow, or there's some other reason
795 we shouldn't resume. */
800 /* This makes sure we don't try to apply the "Switched
801 over from WAIT_PID" logic above. */
802 nullify_last_target_wait_ptid ();
804 /* If we followed the child, switch to it... */
807 thread_info
*child_thr
= find_thread_ptid (parent_targ
, child
);
808 switch_to_thread (child_thr
);
810 /* ... and preserve the stepping state, in case the
811 user was stepping over the fork call. */
814 tp
= inferior_thread ();
815 tp
->control
.step_resume_breakpoint
816 = step_resume_breakpoint
;
817 tp
->control
.step_range_start
= step_range_start
;
818 tp
->control
.step_range_end
= step_range_end
;
819 tp
->current_line
= current_line
;
820 tp
->current_symtab
= current_symtab
;
821 tp
->control
.step_frame_id
= step_frame_id
;
822 tp
->control
.exception_resume_breakpoint
823 = exception_resume_breakpoint
;
824 tp
->set_thread_fsm (std::move (thread_fsm
));
828 /* If we get here, it was because we're trying to
829 resume from a fork catchpoint, but, the user
830 has switched threads away from the thread that
831 forked. In that case, the resume command
832 issued is most likely not applicable to the
833 child, so just warn, and refuse to resume. */
834 warning (_("Not resuming: switched threads "
835 "before following fork child."));
838 /* Reset breakpoints in the child as appropriate. */
839 follow_inferior_reset_breakpoints ();
844 case TARGET_WAITKIND_SPURIOUS
:
845 /* Nothing to follow. */
848 internal_error (__FILE__
, __LINE__
,
849 "Unexpected pending_follow.kind %d\n",
850 tp
->pending_follow
.kind ());
854 return should_resume
;
858 follow_inferior_reset_breakpoints (void)
860 struct thread_info
*tp
= inferior_thread ();
862 /* Was there a step_resume breakpoint? (There was if the user
863 did a "next" at the fork() call.) If so, explicitly reset its
864 thread number. Cloned step_resume breakpoints are disabled on
865 creation, so enable it here now that it is associated with the
868 step_resumes are a form of bp that are made to be per-thread.
869 Since we created the step_resume bp when the parent process
870 was being debugged, and now are switching to the child process,
871 from the breakpoint package's viewpoint, that's a switch of
872 "threads". We must update the bp's notion of which thread
873 it is for, or it'll be ignored when it triggers. */
875 if (tp
->control
.step_resume_breakpoint
)
877 breakpoint_re_set_thread (tp
->control
.step_resume_breakpoint
);
878 tp
->control
.step_resume_breakpoint
->loc
->enabled
= 1;
881 /* Treat exception_resume breakpoints like step_resume breakpoints. */
882 if (tp
->control
.exception_resume_breakpoint
)
884 breakpoint_re_set_thread (tp
->control
.exception_resume_breakpoint
);
885 tp
->control
.exception_resume_breakpoint
->loc
->enabled
= 1;
888 /* Reinsert all breakpoints in the child. The user may have set
889 breakpoints after catching the fork, in which case those
890 were never set in the child, but only in the parent. This makes
891 sure the inserted breakpoints match the breakpoint list. */
893 breakpoint_re_set ();
894 insert_breakpoints ();
897 /* The child has exited or execed: resume THREAD, a thread of the parent,
898 if it was meant to be executing. */
901 proceed_after_vfork_done (thread_info
*thread
)
903 if (thread
->state
== THREAD_RUNNING
904 && !thread
->executing ()
905 && !thread
->stop_requested
906 && thread
->stop_signal () == GDB_SIGNAL_0
)
908 infrun_debug_printf ("resuming vfork parent thread %s",
909 thread
->ptid
.to_string ().c_str ());
911 switch_to_thread (thread
);
912 clear_proceed_status (0);
913 proceed ((CORE_ADDR
) -1, GDB_SIGNAL_DEFAULT
);
917 /* Called whenever we notice an exec or exit event, to handle
918 detaching or resuming a vfork parent. */
921 handle_vfork_child_exec_or_exit (int exec
)
923 struct inferior
*inf
= current_inferior ();
925 if (inf
->vfork_parent
)
927 inferior
*resume_parent
= nullptr;
929 /* This exec or exit marks the end of the shared memory region
930 between the parent and the child. Break the bonds. */
931 inferior
*vfork_parent
= inf
->vfork_parent
;
932 inf
->vfork_parent
->vfork_child
= NULL
;
933 inf
->vfork_parent
= NULL
;
935 /* If the user wanted to detach from the parent, now is the
937 if (vfork_parent
->pending_detach
)
939 struct program_space
*pspace
;
940 struct address_space
*aspace
;
942 /* follow-fork child, detach-on-fork on. */
944 vfork_parent
->pending_detach
= 0;
946 scoped_restore_current_pspace_and_thread restore_thread
;
948 /* We're letting loose of the parent. */
949 thread_info
*tp
= any_live_thread_of_inferior (vfork_parent
);
950 switch_to_thread (tp
);
952 /* We're about to detach from the parent, which implicitly
953 removes breakpoints from its address space. There's a
954 catch here: we want to reuse the spaces for the child,
955 but, parent/child are still sharing the pspace at this
956 point, although the exec in reality makes the kernel give
957 the child a fresh set of new pages. The problem here is
958 that the breakpoints module being unaware of this, would
959 likely chose the child process to write to the parent
960 address space. Swapping the child temporarily away from
961 the spaces has the desired effect. Yes, this is "sort
964 pspace
= inf
->pspace
;
965 aspace
= inf
->aspace
;
969 if (print_inferior_events
)
972 = target_pid_to_str (ptid_t (vfork_parent
->pid
));
974 target_terminal::ours_for_output ();
978 gdb_printf (_("[Detaching vfork parent %s "
979 "after child exec]\n"), pidstr
.c_str ());
983 gdb_printf (_("[Detaching vfork parent %s "
984 "after child exit]\n"), pidstr
.c_str ());
988 target_detach (vfork_parent
, 0);
991 inf
->pspace
= pspace
;
992 inf
->aspace
= aspace
;
996 /* We're staying attached to the parent, so, really give the
997 child a new address space. */
998 inf
->pspace
= new program_space (maybe_new_address_space ());
999 inf
->aspace
= inf
->pspace
->aspace
;
1001 set_current_program_space (inf
->pspace
);
1003 resume_parent
= vfork_parent
;
1007 /* If this is a vfork child exiting, then the pspace and
1008 aspaces were shared with the parent. Since we're
1009 reporting the process exit, we'll be mourning all that is
1010 found in the address space, and switching to null_ptid,
1011 preparing to start a new inferior. But, since we don't
1012 want to clobber the parent's address/program spaces, we
1013 go ahead and create a new one for this exiting
1016 /* Switch to no-thread while running clone_program_space, so
1017 that clone_program_space doesn't want to read the
1018 selected frame of a dead process. */
1019 scoped_restore_current_thread restore_thread
;
1020 switch_to_no_thread ();
1022 inf
->pspace
= new program_space (maybe_new_address_space ());
1023 inf
->aspace
= inf
->pspace
->aspace
;
1024 set_current_program_space (inf
->pspace
);
1026 inf
->symfile_flags
= SYMFILE_NO_READ
;
1027 clone_program_space (inf
->pspace
, vfork_parent
->pspace
);
1029 resume_parent
= vfork_parent
;
1032 gdb_assert (current_program_space
== inf
->pspace
);
1034 if (non_stop
&& resume_parent
!= nullptr)
1036 /* If the user wanted the parent to be running, let it go
1038 scoped_restore_current_thread restore_thread
;
1040 infrun_debug_printf ("resuming vfork parent process %d",
1041 resume_parent
->pid
);
1043 for (thread_info
*thread
: resume_parent
->threads ())
1044 proceed_after_vfork_done (thread
);
1049 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1052 handle_vfork_done (thread_info
*event_thread
)
1054 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1055 set, that is if we are waiting for a vfork child not under our control
1056 (because we detached it) to exec or exit.
1058 If an inferior has vforked and we are debugging the child, we don't use
1059 the vfork-done event to get notified about the end of the shared address
1060 space window. We rely instead on the child's exec or exit event, and the
1061 inferior::vfork_{parent,child} fields are used instead. See
1062 handle_vfork_child_exec_or_exit for that. */
1063 if (event_thread
->inf
->thread_waiting_for_vfork_done
== nullptr)
1065 infrun_debug_printf ("not waiting for a vfork-done event");
1069 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1071 /* We stopped all threads (other than the vforking thread) of the inferior in
1072 follow_fork and kept them stopped until now. It should therefore not be
1073 possible for another thread to have reported a vfork during that window.
1074 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1075 vfork-done we are handling right now. */
1076 gdb_assert (event_thread
->inf
->thread_waiting_for_vfork_done
== event_thread
);
1078 event_thread
->inf
->thread_waiting_for_vfork_done
= nullptr;
1079 event_thread
->inf
->pspace
->breakpoints_not_allowed
= 0;
1081 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1082 resume them now. On all-stop targets, everything that needs to be resumed
1083 will be when we resume the event thread. */
1084 if (target_is_non_stop_p ())
1086 /* restart_threads and start_step_over may change the current thread, make
1087 sure we leave the event thread as the current thread. */
1088 scoped_restore_current_thread restore_thread
;
1090 insert_breakpoints ();
1091 restart_threads (event_thread
, event_thread
->inf
);
1096 /* Enum strings for "set|show follow-exec-mode". */
1098 static const char follow_exec_mode_new
[] = "new";
1099 static const char follow_exec_mode_same
[] = "same";
1100 static const char *const follow_exec_mode_names
[] =
1102 follow_exec_mode_new
,
1103 follow_exec_mode_same
,
1107 static const char *follow_exec_mode_string
= follow_exec_mode_same
;
1109 show_follow_exec_mode_string (struct ui_file
*file
, int from_tty
,
1110 struct cmd_list_element
*c
, const char *value
)
1112 gdb_printf (file
, _("Follow exec mode is \"%s\".\n"), value
);
1115 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1118 follow_exec (ptid_t ptid
, const char *exec_file_target
)
1120 int pid
= ptid
.pid ();
1121 ptid_t process_ptid
;
1123 /* Switch terminal for any messages produced e.g. by
1124 breakpoint_re_set. */
1125 target_terminal::ours_for_output ();
1127 /* This is an exec event that we actually wish to pay attention to.
1128 Refresh our symbol table to the newly exec'd program, remove any
1129 momentary bp's, etc.
1131 If there are breakpoints, they aren't really inserted now,
1132 since the exec() transformed our inferior into a fresh set
1135 We want to preserve symbolic breakpoints on the list, since
1136 we have hopes that they can be reset after the new a.out's
1137 symbol table is read.
1139 However, any "raw" breakpoints must be removed from the list
1140 (e.g., the solib bp's), since their address is probably invalid
1143 And, we DON'T want to call delete_breakpoints() here, since
1144 that may write the bp's "shadow contents" (the instruction
1145 value that was overwritten with a TRAP instruction). Since
1146 we now have a new a.out, those shadow contents aren't valid. */
1148 mark_breakpoints_out ();
1150 /* The target reports the exec event to the main thread, even if
1151 some other thread does the exec, and even if the main thread was
1152 stopped or already gone. We may still have non-leader threads of
1153 the process on our list. E.g., on targets that don't have thread
1154 exit events (like remote); or on native Linux in non-stop mode if
1155 there were only two threads in the inferior and the non-leader
1156 one is the one that execs (and nothing forces an update of the
1157 thread list up to here). When debugging remotely, it's best to
1158 avoid extra traffic, when possible, so avoid syncing the thread
1159 list with the target, and instead go ahead and delete all threads
1160 of the process but one that reported the event. Note this must
1161 be done before calling update_breakpoints_after_exec, as
1162 otherwise clearing the threads' resources would reference stale
1163 thread breakpoints -- it may have been one of these threads that
1164 stepped across the exec. We could just clear their stepping
1165 states, but as long as we're iterating, might as well delete
1166 them. Deleting them now rather than at the next user-visible
1167 stop provides a nicer sequence of events for user and MI
1169 for (thread_info
*th
: all_threads_safe ())
1170 if (th
->ptid
.pid () == pid
&& th
->ptid
!= ptid
)
1173 /* We also need to clear any left over stale state for the
1174 leader/event thread. E.g., if there was any step-resume
1175 breakpoint or similar, it's gone now. We cannot truly
1176 step-to-next statement through an exec(). */
1177 thread_info
*th
= inferior_thread ();
1178 th
->control
.step_resume_breakpoint
= NULL
;
1179 th
->control
.exception_resume_breakpoint
= NULL
;
1180 th
->control
.single_step_breakpoints
= NULL
;
1181 th
->control
.step_range_start
= 0;
1182 th
->control
.step_range_end
= 0;
1184 /* The user may have had the main thread held stopped in the
1185 previous image (e.g., schedlock on, or non-stop). Release
1187 th
->stop_requested
= 0;
1189 update_breakpoints_after_exec ();
1191 /* What is this a.out's name? */
1192 process_ptid
= ptid_t (pid
);
1193 gdb_printf (_("%s is executing new program: %s\n"),
1194 target_pid_to_str (process_ptid
).c_str (),
1197 /* We've followed the inferior through an exec. Therefore, the
1198 inferior has essentially been killed & reborn. */
1200 breakpoint_init_inferior (inf_execd
);
1202 gdb::unique_xmalloc_ptr
<char> exec_file_host
1203 = exec_file_find (exec_file_target
, NULL
);
1205 /* If we were unable to map the executable target pathname onto a host
1206 pathname, tell the user that. Otherwise GDB's subsequent behavior
1207 is confusing. Maybe it would even be better to stop at this point
1208 so that the user can specify a file manually before continuing. */
1209 if (exec_file_host
== NULL
)
1210 warning (_("Could not load symbols for executable %s.\n"
1211 "Do you need \"set sysroot\"?"),
1214 /* Reset the shared library package. This ensures that we get a
1215 shlib event when the child reaches "_start", at which point the
1216 dld will have had a chance to initialize the child. */
1217 /* Also, loading a symbol file below may trigger symbol lookups, and
1218 we don't want those to be satisfied by the libraries of the
1219 previous incarnation of this process. */
1220 no_shared_libraries (NULL
, 0);
1222 struct inferior
*inf
= current_inferior ();
1224 if (follow_exec_mode_string
== follow_exec_mode_new
)
1226 /* The user wants to keep the old inferior and program spaces
1227 around. Create a new fresh one, and switch to it. */
1229 /* Do exit processing for the original inferior before setting the new
1230 inferior's pid. Having two inferiors with the same pid would confuse
1231 find_inferior_p(t)id. Transfer the terminal state and info from the
1232 old to the new inferior. */
1233 inferior
*new_inferior
= add_inferior_with_spaces ();
1235 swap_terminal_info (new_inferior
, inf
);
1236 exit_inferior_silent (inf
);
1238 new_inferior
->pid
= pid
;
1239 target_follow_exec (new_inferior
, ptid
, exec_file_target
);
1241 /* We continue with the new inferior. */
1246 /* The old description may no longer be fit for the new image.
1247 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1248 old description; we'll read a new one below. No need to do
1249 this on "follow-exec-mode new", as the old inferior stays
1250 around (its description is later cleared/refetched on
1252 target_clear_description ();
1253 target_follow_exec (inf
, ptid
, exec_file_target
);
1256 gdb_assert (current_inferior () == inf
);
1257 gdb_assert (current_program_space
== inf
->pspace
);
1259 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1260 because the proper displacement for a PIE (Position Independent
1261 Executable) main symbol file will only be computed by
1262 solib_create_inferior_hook below. breakpoint_re_set would fail
1263 to insert the breakpoints with the zero displacement. */
1264 try_open_exec_file (exec_file_host
.get (), inf
, SYMFILE_DEFER_BP_RESET
);
1266 /* If the target can specify a description, read it. Must do this
1267 after flipping to the new executable (because the target supplied
1268 description must be compatible with the executable's
1269 architecture, and the old executable may e.g., be 32-bit, while
1270 the new one 64-bit), and before anything involving memory or
1272 target_find_description ();
1274 gdb::observers::inferior_execd
.notify (inf
);
1276 breakpoint_re_set ();
1278 /* Reinsert all breakpoints. (Those which were symbolic have
1279 been reset to the proper address in the new a.out, thanks
1280 to symbol_file_command...). */
1281 insert_breakpoints ();
1283 /* The next resume of this inferior should bring it to the shlib
1284 startup breakpoints. (If the user had also set bp's on
1285 "main" from the old (parent) process, then they'll auto-
1286 matically get reset there in the new process.). */
1289 /* The chain of threads that need to do a step-over operation to get
1290 past e.g., a breakpoint. What technique is used to step over the
1291 breakpoint/watchpoint does not matter -- all threads end up in the
1292 same queue, to maintain rough temporal order of execution, in order
1293 to avoid starvation, otherwise, we could e.g., find ourselves
1294 constantly stepping the same couple threads past their breakpoints
1295 over and over, if the single-step finish fast enough. */
1296 thread_step_over_list global_thread_step_over_list
;
1298 /* Bit flags indicating what the thread needs to step over. */
1300 enum step_over_what_flag
1302 /* Step over a breakpoint. */
1303 STEP_OVER_BREAKPOINT
= 1,
1305 /* Step past a non-continuable watchpoint, in order to let the
1306 instruction execute so we can evaluate the watchpoint
1308 STEP_OVER_WATCHPOINT
= 2
1310 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag
, step_over_what
);
1312 /* Info about an instruction that is being stepped over. */
1314 struct step_over_info
1316 /* If we're stepping past a breakpoint, this is the address space
1317 and address of the instruction the breakpoint is set at. We'll
1318 skip inserting all breakpoints here. Valid iff ASPACE is
1320 const address_space
*aspace
= nullptr;
1321 CORE_ADDR address
= 0;
1323 /* The instruction being stepped over triggers a nonsteppable
1324 watchpoint. If true, we'll skip inserting watchpoints. */
1325 int nonsteppable_watchpoint_p
= 0;
1327 /* The thread's global number. */
1331 /* The step-over info of the location that is being stepped over.
1333 Note that with async/breakpoint always-inserted mode, a user might
1334 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1335 being stepped over. As setting a new breakpoint inserts all
1336 breakpoints, we need to make sure the breakpoint being stepped over
1337 isn't inserted then. We do that by only clearing the step-over
1338 info when the step-over is actually finished (or aborted).
1340 Presently GDB can only step over one breakpoint at any given time.
1341 Given threads that can't run code in the same address space as the
1342 breakpoint's can't really miss the breakpoint, GDB could be taught
1343 to step-over at most one breakpoint per address space (so this info
1344 could move to the address space object if/when GDB is extended).
1345 The set of breakpoints being stepped over will normally be much
1346 smaller than the set of all breakpoints, so a flag in the
1347 breakpoint location structure would be wasteful. A separate list
1348 also saves complexity and run-time, as otherwise we'd have to go
1349 through all breakpoint locations clearing their flag whenever we
1350 start a new sequence. Similar considerations weigh against storing
1351 this info in the thread object. Plus, not all step overs actually
1352 have breakpoint locations -- e.g., stepping past a single-step
1353 breakpoint, or stepping to complete a non-continuable
1355 static struct step_over_info step_over_info
;
1357 /* Record the address of the breakpoint/instruction we're currently
1359 N.B. We record the aspace and address now, instead of say just the thread,
1360 because when we need the info later the thread may be running. */
1363 set_step_over_info (const address_space
*aspace
, CORE_ADDR address
,
1364 int nonsteppable_watchpoint_p
,
1367 step_over_info
.aspace
= aspace
;
1368 step_over_info
.address
= address
;
1369 step_over_info
.nonsteppable_watchpoint_p
= nonsteppable_watchpoint_p
;
1370 step_over_info
.thread
= thread
;
1373 /* Called when we're not longer stepping over a breakpoint / an
1374 instruction, so all breakpoints are free to be (re)inserted. */
1377 clear_step_over_info (void)
1379 infrun_debug_printf ("clearing step over info");
1380 step_over_info
.aspace
= NULL
;
1381 step_over_info
.address
= 0;
1382 step_over_info
.nonsteppable_watchpoint_p
= 0;
1383 step_over_info
.thread
= -1;
1389 stepping_past_instruction_at (struct address_space
*aspace
,
1392 return (step_over_info
.aspace
!= NULL
1393 && breakpoint_address_match (aspace
, address
,
1394 step_over_info
.aspace
,
1395 step_over_info
.address
));
1401 thread_is_stepping_over_breakpoint (int thread
)
1403 return (step_over_info
.thread
!= -1
1404 && thread
== step_over_info
.thread
);
1410 stepping_past_nonsteppable_watchpoint (void)
1412 return step_over_info
.nonsteppable_watchpoint_p
;
1415 /* Returns true if step-over info is valid. */
1418 step_over_info_valid_p (void)
1420 return (step_over_info
.aspace
!= NULL
1421 || stepping_past_nonsteppable_watchpoint ());
1425 /* Displaced stepping. */
1427 /* In non-stop debugging mode, we must take special care to manage
1428 breakpoints properly; in particular, the traditional strategy for
1429 stepping a thread past a breakpoint it has hit is unsuitable.
1430 'Displaced stepping' is a tactic for stepping one thread past a
1431 breakpoint it has hit while ensuring that other threads running
1432 concurrently will hit the breakpoint as they should.
1434 The traditional way to step a thread T off a breakpoint in a
1435 multi-threaded program in all-stop mode is as follows:
1437 a0) Initially, all threads are stopped, and breakpoints are not
1439 a1) We single-step T, leaving breakpoints uninserted.
1440 a2) We insert breakpoints, and resume all threads.
1442 In non-stop debugging, however, this strategy is unsuitable: we
1443 don't want to have to stop all threads in the system in order to
1444 continue or step T past a breakpoint. Instead, we use displaced
1447 n0) Initially, T is stopped, other threads are running, and
1448 breakpoints are inserted.
1449 n1) We copy the instruction "under" the breakpoint to a separate
1450 location, outside the main code stream, making any adjustments
1451 to the instruction, register, and memory state as directed by
1453 n2) We single-step T over the instruction at its new location.
1454 n3) We adjust the resulting register and memory state as directed
1455 by T's architecture. This includes resetting T's PC to point
1456 back into the main instruction stream.
1459 This approach depends on the following gdbarch methods:
1461 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1462 indicate where to copy the instruction, and how much space must
1463 be reserved there. We use these in step n1.
1465 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1466 address, and makes any necessary adjustments to the instruction,
1467 register contents, and memory. We use this in step n1.
1469 - gdbarch_displaced_step_fixup adjusts registers and memory after
1470 we have successfully single-stepped the instruction, to yield the
1471 same effect the instruction would have had if we had executed it
1472 at its original address. We use this in step n3.
1474 The gdbarch_displaced_step_copy_insn and
1475 gdbarch_displaced_step_fixup functions must be written so that
1476 copying an instruction with gdbarch_displaced_step_copy_insn,
1477 single-stepping across the copied instruction, and then applying
1478 gdbarch_displaced_insn_fixup should have the same effects on the
1479 thread's memory and registers as stepping the instruction in place
1480 would have. Exactly which responsibilities fall to the copy and
1481 which fall to the fixup is up to the author of those functions.
1483 See the comments in gdbarch.sh for details.
1485 Note that displaced stepping and software single-step cannot
1486 currently be used in combination, although with some care I think
1487 they could be made to. Software single-step works by placing
1488 breakpoints on all possible subsequent instructions; if the
1489 displaced instruction is a PC-relative jump, those breakpoints
1490 could fall in very strange places --- on pages that aren't
1491 executable, or at addresses that are not proper instruction
1492 boundaries. (We do generally let other threads run while we wait
1493 to hit the software single-step breakpoint, and they might
1494 encounter such a corrupted instruction.) One way to work around
1495 this would be to have gdbarch_displaced_step_copy_insn fully
1496 simulate the effect of PC-relative instructions (and return NULL)
1497 on architectures that use software single-stepping.
1499 In non-stop mode, we can have independent and simultaneous step
1500 requests, so more than one thread may need to simultaneously step
1501 over a breakpoint. The current implementation assumes there is
1502 only one scratch space per process. In this case, we have to
1503 serialize access to the scratch space. If thread A wants to step
1504 over a breakpoint, but we are currently waiting for some other
1505 thread to complete a displaced step, we leave thread A stopped and
1506 place it in the displaced_step_request_queue. Whenever a displaced
1507 step finishes, we pick the next thread in the queue and start a new
1508 displaced step operation on it. See displaced_step_prepare and
1509 displaced_step_finish for details. */
1511 /* Return true if THREAD is doing a displaced step. */
1514 displaced_step_in_progress_thread (thread_info
*thread
)
1516 gdb_assert (thread
!= NULL
);
1518 return thread
->displaced_step_state
.in_progress ();
1521 /* Return true if INF has a thread doing a displaced step. */
1524 displaced_step_in_progress (inferior
*inf
)
1526 return inf
->displaced_step_state
.in_progress_count
> 0;
1529 /* Return true if any thread is doing a displaced step. */
1532 displaced_step_in_progress_any_thread ()
1534 for (inferior
*inf
: all_non_exited_inferiors ())
1536 if (displaced_step_in_progress (inf
))
1544 infrun_inferior_exit (struct inferior
*inf
)
1546 inf
->displaced_step_state
.reset ();
1547 inf
->thread_waiting_for_vfork_done
= nullptr;
1551 infrun_inferior_execd (inferior
*inf
)
1553 /* If some threads where was doing a displaced step in this inferior at the
1554 moment of the exec, they no longer exist. Even if the exec'ing thread
1555 doing a displaced step, we don't want to to any fixup nor restore displaced
1556 stepping buffer bytes. */
1557 inf
->displaced_step_state
.reset ();
1559 for (thread_info
*thread
: inf
->threads ())
1560 thread
->displaced_step_state
.reset ();
1562 /* Since an in-line step is done with everything else stopped, if there was
1563 one in progress at the time of the exec, it must have been the exec'ing
1565 clear_step_over_info ();
1567 inf
->thread_waiting_for_vfork_done
= nullptr;
1570 /* If ON, and the architecture supports it, GDB will use displaced
1571 stepping to step over breakpoints. If OFF, or if the architecture
1572 doesn't support it, GDB will instead use the traditional
1573 hold-and-step approach. If AUTO (which is the default), GDB will
1574 decide which technique to use to step over breakpoints depending on
1575 whether the target works in a non-stop way (see use_displaced_stepping). */
1577 static enum auto_boolean can_use_displaced_stepping
= AUTO_BOOLEAN_AUTO
;
1580 show_can_use_displaced_stepping (struct ui_file
*file
, int from_tty
,
1581 struct cmd_list_element
*c
,
1584 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
)
1586 _("Debugger's willingness to use displaced stepping "
1587 "to step over breakpoints is %s (currently %s).\n"),
1588 value
, target_is_non_stop_p () ? "on" : "off");
1591 _("Debugger's willingness to use displaced stepping "
1592 "to step over breakpoints is %s.\n"), value
);
1595 /* Return true if the gdbarch implements the required methods to use
1596 displaced stepping. */
1599 gdbarch_supports_displaced_stepping (gdbarch
*arch
)
1601 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1602 that if `prepare` is provided, so is `finish`. */
1603 return gdbarch_displaced_step_prepare_p (arch
);
1606 /* Return non-zero if displaced stepping can/should be used to step
1607 over breakpoints of thread TP. */
1610 use_displaced_stepping (thread_info
*tp
)
1612 /* If the user disabled it explicitly, don't use displaced stepping. */
1613 if (can_use_displaced_stepping
== AUTO_BOOLEAN_FALSE
)
1616 /* If "auto", only use displaced stepping if the target operates in a non-stop
1618 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
1619 && !target_is_non_stop_p ())
1622 gdbarch
*gdbarch
= get_thread_regcache (tp
)->arch ();
1624 /* If the architecture doesn't implement displaced stepping, don't use
1626 if (!gdbarch_supports_displaced_stepping (gdbarch
))
1629 /* If recording, don't use displaced stepping. */
1630 if (find_record_target () != nullptr)
1633 /* If displaced stepping failed before for this inferior, don't bother trying
1635 if (tp
->inf
->displaced_step_state
.failed_before
)
1641 /* Simple function wrapper around displaced_step_thread_state::reset. */
1644 displaced_step_reset (displaced_step_thread_state
*displaced
)
1646 displaced
->reset ();
1649 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1650 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1652 using displaced_step_reset_cleanup
= FORWARD_SCOPE_EXIT (displaced_step_reset
);
1657 displaced_step_dump_bytes (const gdb_byte
*buf
, size_t len
)
1661 for (size_t i
= 0; i
< len
; i
++)
1664 ret
+= string_printf ("%02x", buf
[i
]);
1666 ret
+= string_printf (" %02x", buf
[i
]);
1672 /* Prepare to single-step, using displaced stepping.
1674 Note that we cannot use displaced stepping when we have a signal to
1675 deliver. If we have a signal to deliver and an instruction to step
1676 over, then after the step, there will be no indication from the
1677 target whether the thread entered a signal handler or ignored the
1678 signal and stepped over the instruction successfully --- both cases
1679 result in a simple SIGTRAP. In the first case we mustn't do a
1680 fixup, and in the second case we must --- but we can't tell which.
1681 Comments in the code for 'random signals' in handle_inferior_event
1682 explain how we handle this case instead.
1684 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1685 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1686 if displaced stepping this thread got queued; or
1687 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1690 static displaced_step_prepare_status
1691 displaced_step_prepare_throw (thread_info
*tp
)
1693 regcache
*regcache
= get_thread_regcache (tp
);
1694 struct gdbarch
*gdbarch
= regcache
->arch ();
1695 displaced_step_thread_state
&disp_step_thread_state
1696 = tp
->displaced_step_state
;
1698 /* We should never reach this function if the architecture does not
1699 support displaced stepping. */
1700 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch
));
1702 /* Nor if the thread isn't meant to step over a breakpoint. */
1703 gdb_assert (tp
->control
.trap_expected
);
1705 /* Disable range stepping while executing in the scratch pad. We
1706 want a single-step even if executing the displaced instruction in
1707 the scratch buffer lands within the stepping range (e.g., a
1709 tp
->control
.may_range_step
= 0;
1711 /* We are about to start a displaced step for this thread. If one is already
1712 in progress, something's wrong. */
1713 gdb_assert (!disp_step_thread_state
.in_progress ());
1715 if (tp
->inf
->displaced_step_state
.unavailable
)
1717 /* The gdbarch tells us it's not worth asking to try a prepare because
1718 it is likely that it will return unavailable, so don't bother asking. */
1720 displaced_debug_printf ("deferring step of %s",
1721 tp
->ptid
.to_string ().c_str ());
1723 global_thread_step_over_chain_enqueue (tp
);
1724 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1727 displaced_debug_printf ("displaced-stepping %s now",
1728 tp
->ptid
.to_string ().c_str ());
1730 scoped_restore_current_thread restore_thread
;
1732 switch_to_thread (tp
);
1734 CORE_ADDR original_pc
= regcache_read_pc (regcache
);
1735 CORE_ADDR displaced_pc
;
1737 displaced_step_prepare_status status
1738 = gdbarch_displaced_step_prepare (gdbarch
, tp
, displaced_pc
);
1740 if (status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
1742 displaced_debug_printf ("failed to prepare (%s)",
1743 tp
->ptid
.to_string ().c_str ());
1745 return DISPLACED_STEP_PREPARE_STATUS_CANT
;
1747 else if (status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
1749 /* Not enough displaced stepping resources available, defer this
1750 request by placing it the queue. */
1752 displaced_debug_printf ("not enough resources available, "
1753 "deferring step of %s",
1754 tp
->ptid
.to_string ().c_str ());
1756 global_thread_step_over_chain_enqueue (tp
);
1758 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1761 gdb_assert (status
== DISPLACED_STEP_PREPARE_STATUS_OK
);
1763 /* Save the information we need to fix things up if the step
1765 disp_step_thread_state
.set (gdbarch
);
1767 tp
->inf
->displaced_step_state
.in_progress_count
++;
1769 displaced_debug_printf ("prepared successfully thread=%s, "
1770 "original_pc=%s, displaced_pc=%s",
1771 tp
->ptid
.to_string ().c_str (),
1772 paddress (gdbarch
, original_pc
),
1773 paddress (gdbarch
, displaced_pc
));
1775 return DISPLACED_STEP_PREPARE_STATUS_OK
;
1778 /* Wrapper for displaced_step_prepare_throw that disabled further
1779 attempts at displaced stepping if we get a memory error. */
1781 static displaced_step_prepare_status
1782 displaced_step_prepare (thread_info
*thread
)
1784 displaced_step_prepare_status status
1785 = DISPLACED_STEP_PREPARE_STATUS_CANT
;
1789 status
= displaced_step_prepare_throw (thread
);
1791 catch (const gdb_exception_error
&ex
)
1793 if (ex
.error
!= MEMORY_ERROR
1794 && ex
.error
!= NOT_SUPPORTED_ERROR
)
1797 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1800 /* Be verbose if "set displaced-stepping" is "on", silent if
1802 if (can_use_displaced_stepping
== AUTO_BOOLEAN_TRUE
)
1804 warning (_("disabling displaced stepping: %s"),
1808 /* Disable further displaced stepping attempts. */
1809 thread
->inf
->displaced_step_state
.failed_before
= 1;
1815 /* If we displaced stepped an instruction successfully, adjust registers and
1816 memory to yield the same effect the instruction would have had if we had
1817 executed it at its original address, and return
1818 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1819 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
1821 If the thread wasn't displaced stepping, return
1822 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1824 static displaced_step_finish_status
1825 displaced_step_finish (thread_info
*event_thread
, enum gdb_signal signal
)
1827 displaced_step_thread_state
*displaced
= &event_thread
->displaced_step_state
;
1829 /* Was this thread performing a displaced step? */
1830 if (!displaced
->in_progress ())
1831 return DISPLACED_STEP_FINISH_STATUS_OK
;
1833 gdb_assert (event_thread
->inf
->displaced_step_state
.in_progress_count
> 0);
1834 event_thread
->inf
->displaced_step_state
.in_progress_count
--;
1836 /* Fixup may need to read memory/registers. Switch to the thread
1837 that we're fixing up. Also, target_stopped_by_watchpoint checks
1838 the current thread, and displaced_step_restore performs ptid-dependent
1839 memory accesses using current_inferior(). */
1840 switch_to_thread (event_thread
);
1842 displaced_step_reset_cleanup
cleanup (displaced
);
1844 /* Do the fixup, and release the resources acquired to do the displaced
1846 return gdbarch_displaced_step_finish (displaced
->get_original_gdbarch (),
1847 event_thread
, signal
);
1850 /* Data to be passed around while handling an event. This data is
1851 discarded between events. */
1852 struct execution_control_state
1854 execution_control_state ()
1861 this->target
= nullptr;
1862 this->ptid
= null_ptid
;
1863 this->event_thread
= nullptr;
1864 ws
= target_waitstatus ();
1865 stop_func_filled_in
= 0;
1866 stop_func_start
= 0;
1868 stop_func_name
= nullptr;
1870 hit_singlestep_breakpoint
= 0;
1873 process_stratum_target
*target
;
1875 /* The thread that got the event, if this was a thread event; NULL
1877 struct thread_info
*event_thread
;
1879 struct target_waitstatus ws
;
1880 int stop_func_filled_in
;
1881 CORE_ADDR stop_func_start
;
1882 CORE_ADDR stop_func_end
;
1883 const char *stop_func_name
;
1886 /* True if the event thread hit the single-step breakpoint of
1887 another thread. Thus the event doesn't cause a stop, the thread
1888 needs to be single-stepped past the single-step breakpoint before
1889 we can switch back to the original stepping thread. */
1890 int hit_singlestep_breakpoint
;
1893 /* Clear ECS and set it to point at TP. */
1896 reset_ecs (struct execution_control_state
*ecs
, struct thread_info
*tp
)
1899 ecs
->event_thread
= tp
;
1900 ecs
->ptid
= tp
->ptid
;
1903 static void keep_going_pass_signal (struct execution_control_state
*ecs
);
1904 static void prepare_to_wait (struct execution_control_state
*ecs
);
1905 static bool keep_going_stepped_thread (struct thread_info
*tp
);
1906 static step_over_what
thread_still_needs_step_over (struct thread_info
*tp
);
1908 /* Are there any pending step-over requests? If so, run all we can
1909 now and return true. Otherwise, return false. */
1912 start_step_over (void)
1914 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1916 /* Don't start a new step-over if we already have an in-line
1917 step-over operation ongoing. */
1918 if (step_over_info_valid_p ())
1921 /* Steal the global thread step over chain. As we try to initiate displaced
1922 steps, threads will be enqueued in the global chain if no buffers are
1923 available. If we iterated on the global chain directly, we might iterate
1925 thread_step_over_list threads_to_step
1926 = std::move (global_thread_step_over_list
);
1928 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1929 thread_step_over_chain_length (threads_to_step
));
1931 bool started
= false;
1933 /* On scope exit (whatever the reason, return or exception), if there are
1934 threads left in the THREADS_TO_STEP chain, put back these threads in the
1938 if (threads_to_step
.empty ())
1939 infrun_debug_printf ("step-over queue now empty");
1942 infrun_debug_printf ("putting back %d threads to step in global queue",
1943 thread_step_over_chain_length (threads_to_step
));
1945 global_thread_step_over_chain_enqueue_chain
1946 (std::move (threads_to_step
));
1950 thread_step_over_list_safe_range range
1951 = make_thread_step_over_list_safe_range (threads_to_step
);
1953 for (thread_info
*tp
: range
)
1955 struct execution_control_state ecss
;
1956 struct execution_control_state
*ecs
= &ecss
;
1957 step_over_what step_what
;
1958 int must_be_in_line
;
1960 gdb_assert (!tp
->stop_requested
);
1962 if (tp
->inf
->displaced_step_state
.unavailable
)
1964 /* The arch told us to not even try preparing another displaced step
1965 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1966 will get moved to the global chain on scope exit. */
1970 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr)
1972 /* When we stop all threads, handling a vfork, any thread in the step
1973 over chain remains there. A user could also try to continue a
1974 thread stopped at a breakpoint while another thread is waiting for
1975 a vfork-done event. In any case, we don't want to start a step
1980 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1981 while we try to prepare the displaced step, we don't add it back to
1982 the global step over chain. This is to avoid a thread staying in the
1983 step over chain indefinitely if something goes wrong when resuming it
1984 If the error is intermittent and it still needs a step over, it will
1985 get enqueued again when we try to resume it normally. */
1986 threads_to_step
.erase (threads_to_step
.iterator_to (*tp
));
1988 step_what
= thread_still_needs_step_over (tp
);
1989 must_be_in_line
= ((step_what
& STEP_OVER_WATCHPOINT
)
1990 || ((step_what
& STEP_OVER_BREAKPOINT
)
1991 && !use_displaced_stepping (tp
)));
1993 /* We currently stop all threads of all processes to step-over
1994 in-line. If we need to start a new in-line step-over, let
1995 any pending displaced steps finish first. */
1996 if (must_be_in_line
&& displaced_step_in_progress_any_thread ())
1998 global_thread_step_over_chain_enqueue (tp
);
2002 if (tp
->control
.trap_expected
2004 || tp
->executing ())
2006 internal_error (__FILE__
, __LINE__
,
2007 "[%s] has inconsistent state: "
2008 "trap_expected=%d, resumed=%d, executing=%d\n",
2009 tp
->ptid
.to_string ().c_str (),
2010 tp
->control
.trap_expected
,
2015 infrun_debug_printf ("resuming [%s] for step-over",
2016 tp
->ptid
.to_string ().c_str ());
2018 /* keep_going_pass_signal skips the step-over if the breakpoint
2019 is no longer inserted. In all-stop, we want to keep looking
2020 for a thread that needs a step-over instead of resuming TP,
2021 because we wouldn't be able to resume anything else until the
2022 target stops again. In non-stop, the resume always resumes
2023 only TP, so it's OK to let the thread resume freely. */
2024 if (!target_is_non_stop_p () && !step_what
)
2027 switch_to_thread (tp
);
2028 reset_ecs (ecs
, tp
);
2029 keep_going_pass_signal (ecs
);
2031 if (!ecs
->wait_some_more
)
2032 error (_("Command aborted."));
2034 /* If the thread's step over could not be initiated because no buffers
2035 were available, it was re-added to the global step over chain. */
2038 infrun_debug_printf ("[%s] was resumed.",
2039 tp
->ptid
.to_string ().c_str ());
2040 gdb_assert (!thread_is_in_step_over_chain (tp
));
2044 infrun_debug_printf ("[%s] was NOT resumed.",
2045 tp
->ptid
.to_string ().c_str ());
2046 gdb_assert (thread_is_in_step_over_chain (tp
));
2049 /* If we started a new in-line step-over, we're done. */
2050 if (step_over_info_valid_p ())
2052 gdb_assert (tp
->control
.trap_expected
);
2057 if (!target_is_non_stop_p ())
2059 /* On all-stop, shouldn't have resumed unless we needed a
2061 gdb_assert (tp
->control
.trap_expected
2062 || tp
->step_after_step_resume_breakpoint
);
2064 /* With remote targets (at least), in all-stop, we can't
2065 issue any further remote commands until the program stops
2071 /* Either the thread no longer needed a step-over, or a new
2072 displaced stepping sequence started. Even in the latter
2073 case, continue looking. Maybe we can also start another
2074 displaced step on a thread of other process. */
2080 /* Update global variables holding ptids to hold NEW_PTID if they were
2081 holding OLD_PTID. */
2083 infrun_thread_ptid_changed (process_stratum_target
*target
,
2084 ptid_t old_ptid
, ptid_t new_ptid
)
2086 if (inferior_ptid
== old_ptid
2087 && current_inferior ()->process_target () == target
)
2088 inferior_ptid
= new_ptid
;
2093 static const char schedlock_off
[] = "off";
2094 static const char schedlock_on
[] = "on";
2095 static const char schedlock_step
[] = "step";
2096 static const char schedlock_replay
[] = "replay";
2097 static const char *const scheduler_enums
[] = {
2104 static const char *scheduler_mode
= schedlock_replay
;
2106 show_scheduler_mode (struct ui_file
*file
, int from_tty
,
2107 struct cmd_list_element
*c
, const char *value
)
2110 _("Mode for locking scheduler "
2111 "during execution is \"%s\".\n"),
2116 set_schedlock_func (const char *args
, int from_tty
, struct cmd_list_element
*c
)
2118 if (!target_can_lock_scheduler ())
2120 scheduler_mode
= schedlock_off
;
2121 error (_("Target '%s' cannot support this command."),
2122 target_shortname ());
2126 /* True if execution commands resume all threads of all processes by
2127 default; otherwise, resume only threads of the current inferior
2129 bool sched_multi
= false;
2131 /* Try to setup for software single stepping. Return true if target_resume()
2132 should use hardware single step.
2134 GDBARCH the current gdbarch. */
2137 maybe_software_singlestep (struct gdbarch
*gdbarch
)
2139 bool hw_step
= true;
2141 if (execution_direction
== EXEC_FORWARD
2142 && gdbarch_software_single_step_p (gdbarch
))
2143 hw_step
= !insert_single_step_breakpoints (gdbarch
);
2151 user_visible_resume_ptid (int step
)
2157 /* With non-stop mode on, threads are always handled
2159 resume_ptid
= inferior_ptid
;
2161 else if ((scheduler_mode
== schedlock_on
)
2162 || (scheduler_mode
== schedlock_step
&& step
))
2164 /* User-settable 'scheduler' mode requires solo thread
2166 resume_ptid
= inferior_ptid
;
2168 else if ((scheduler_mode
== schedlock_replay
)
2169 && target_record_will_replay (minus_one_ptid
, execution_direction
))
2171 /* User-settable 'scheduler' mode requires solo thread resume in replay
2173 resume_ptid
= inferior_ptid
;
2175 else if (!sched_multi
&& target_supports_multi_process ())
2177 /* Resume all threads of the current process (and none of other
2179 resume_ptid
= ptid_t (inferior_ptid
.pid ());
2183 /* Resume all threads of all processes. */
2184 resume_ptid
= RESUME_ALL
;
2192 process_stratum_target
*
2193 user_visible_resume_target (ptid_t resume_ptid
)
2195 return (resume_ptid
== minus_one_ptid
&& sched_multi
2197 : current_inferior ()->process_target ());
2200 /* Return a ptid representing the set of threads that we will resume,
2201 in the perspective of the target, assuming run control handling
2202 does not require leaving some threads stopped (e.g., stepping past
2203 breakpoint). USER_STEP indicates whether we're about to start the
2204 target for a stepping command. */
2207 internal_resume_ptid (int user_step
)
2209 /* In non-stop, we always control threads individually. Note that
2210 the target may always work in non-stop mode even with "set
2211 non-stop off", in which case user_visible_resume_ptid could
2212 return a wildcard ptid. */
2213 if (target_is_non_stop_p ())
2214 return inferior_ptid
;
2216 /* The rest of the function assumes non-stop==off and
2217 target-non-stop==off.
2219 If a thread is waiting for a vfork-done event, it means breakpoints are out
2220 for this inferior (well, program space in fact). We don't want to resume
2221 any thread other than the one waiting for vfork done, otherwise these other
2222 threads could miss breakpoints. So if a thread in the resumption set is
2223 waiting for a vfork-done event, resume only that thread.
2225 The resumption set width depends on whether schedule-multiple is on or off.
2227 Note that if the target_resume interface was more flexible, we could be
2228 smarter here when schedule-multiple is on. For example, imagine 3
2229 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2230 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2231 target(s) to resume:
2233 - All threads of inferior 1
2237 Since we don't have that flexibility (we can only pass one ptid), just
2238 resume the first thread waiting for a vfork-done event we find (e.g. thread
2242 for (inferior
*inf
: all_non_exited_inferiors ())
2243 if (inf
->thread_waiting_for_vfork_done
!= nullptr)
2244 return inf
->thread_waiting_for_vfork_done
->ptid
;
2246 else if (current_inferior ()->thread_waiting_for_vfork_done
!= nullptr)
2247 return current_inferior ()->thread_waiting_for_vfork_done
->ptid
;
2249 return user_visible_resume_ptid (user_step
);
2252 /* Wrapper for target_resume, that handles infrun-specific
2256 do_target_resume (ptid_t resume_ptid
, bool step
, enum gdb_signal sig
)
2258 struct thread_info
*tp
= inferior_thread ();
2260 gdb_assert (!tp
->stop_requested
);
2262 /* Install inferior's terminal modes. */
2263 target_terminal::inferior ();
2265 /* Avoid confusing the next resume, if the next stop/resume
2266 happens to apply to another thread. */
2267 tp
->set_stop_signal (GDB_SIGNAL_0
);
2269 /* Advise target which signals may be handled silently.
2271 If we have removed breakpoints because we are stepping over one
2272 in-line (in any thread), we need to receive all signals to avoid
2273 accidentally skipping a breakpoint during execution of a signal
2276 Likewise if we're displaced stepping, otherwise a trap for a
2277 breakpoint in a signal handler might be confused with the
2278 displaced step finishing. We don't make the displaced_step_finish
2279 step distinguish the cases instead, because:
2281 - a backtrace while stopped in the signal handler would show the
2282 scratch pad as frame older than the signal handler, instead of
2283 the real mainline code.
2285 - when the thread is later resumed, the signal handler would
2286 return to the scratch pad area, which would no longer be
2288 if (step_over_info_valid_p ()
2289 || displaced_step_in_progress (tp
->inf
))
2290 target_pass_signals ({});
2292 target_pass_signals (signal_pass
);
2294 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2295 resume_ptid
.to_string ().c_str (),
2296 step
, gdb_signal_to_symbol_string (sig
));
2298 target_resume (resume_ptid
, step
, sig
);
2301 /* Resume the inferior. SIG is the signal to give the inferior
2302 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2303 call 'resume', which handles exceptions. */
2306 resume_1 (enum gdb_signal sig
)
2308 struct regcache
*regcache
= get_current_regcache ();
2309 struct gdbarch
*gdbarch
= regcache
->arch ();
2310 struct thread_info
*tp
= inferior_thread ();
2311 const address_space
*aspace
= regcache
->aspace ();
2313 /* This represents the user's step vs continue request. When
2314 deciding whether "set scheduler-locking step" applies, it's the
2315 user's intention that counts. */
2316 const int user_step
= tp
->control
.stepping_command
;
2317 /* This represents what we'll actually request the target to do.
2318 This can decay from a step to a continue, if e.g., we need to
2319 implement single-stepping with breakpoints (software
2323 gdb_assert (!tp
->stop_requested
);
2324 gdb_assert (!thread_is_in_step_over_chain (tp
));
2326 if (tp
->has_pending_waitstatus ())
2329 ("thread %s has pending wait "
2330 "status %s (currently_stepping=%d).",
2331 tp
->ptid
.to_string ().c_str (),
2332 tp
->pending_waitstatus ().to_string ().c_str (),
2333 currently_stepping (tp
));
2335 tp
->inf
->process_target ()->threads_executing
= true;
2336 tp
->set_resumed (true);
2338 /* FIXME: What should we do if we are supposed to resume this
2339 thread with a signal? Maybe we should maintain a queue of
2340 pending signals to deliver. */
2341 if (sig
!= GDB_SIGNAL_0
)
2343 warning (_("Couldn't deliver signal %s to %s."),
2344 gdb_signal_to_name (sig
),
2345 tp
->ptid
.to_string ().c_str ());
2348 tp
->set_stop_signal (GDB_SIGNAL_0
);
2350 if (target_can_async_p ())
2353 /* Tell the event loop we have an event to process. */
2354 mark_async_event_handler (infrun_async_inferior_event_token
);
2359 tp
->stepped_breakpoint
= 0;
2361 /* Depends on stepped_breakpoint. */
2362 step
= currently_stepping (tp
);
2364 if (current_inferior ()->thread_waiting_for_vfork_done
!= nullptr)
2366 /* Don't try to single-step a vfork parent that is waiting for
2367 the child to get out of the shared memory region (by exec'ing
2368 or exiting). This is particularly important on software
2369 single-step archs, as the child process would trip on the
2370 software single step breakpoint inserted for the parent
2371 process. Since the parent will not actually execute any
2372 instruction until the child is out of the shared region (such
2373 are vfork's semantics), it is safe to simply continue it.
2374 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2375 the parent, and tell it to `keep_going', which automatically
2376 re-sets it stepping. */
2377 infrun_debug_printf ("resume : clear step");
2381 CORE_ADDR pc
= regcache_read_pc (regcache
);
2383 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2384 "current thread [%s] at %s",
2385 step
, gdb_signal_to_symbol_string (sig
),
2386 tp
->control
.trap_expected
,
2387 inferior_ptid
.to_string ().c_str (),
2388 paddress (gdbarch
, pc
));
2390 /* Normally, by the time we reach `resume', the breakpoints are either
2391 removed or inserted, as appropriate. The exception is if we're sitting
2392 at a permanent breakpoint; we need to step over it, but permanent
2393 breakpoints can't be removed. So we have to test for it here. */
2394 if (breakpoint_here_p (aspace
, pc
) == permanent_breakpoint_here
)
2396 if (sig
!= GDB_SIGNAL_0
)
2398 /* We have a signal to pass to the inferior. The resume
2399 may, or may not take us to the signal handler. If this
2400 is a step, we'll need to stop in the signal handler, if
2401 there's one, (if the target supports stepping into
2402 handlers), or in the next mainline instruction, if
2403 there's no handler. If this is a continue, we need to be
2404 sure to run the handler with all breakpoints inserted.
2405 In all cases, set a breakpoint at the current address
2406 (where the handler returns to), and once that breakpoint
2407 is hit, resume skipping the permanent breakpoint. If
2408 that breakpoint isn't hit, then we've stepped into the
2409 signal handler (or hit some other event). We'll delete
2410 the step-resume breakpoint then. */
2412 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2413 "deliver signal first");
2415 clear_step_over_info ();
2416 tp
->control
.trap_expected
= 0;
2418 if (tp
->control
.step_resume_breakpoint
== NULL
)
2420 /* Set a "high-priority" step-resume, as we don't want
2421 user breakpoints at PC to trigger (again) when this
2423 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2424 gdb_assert (tp
->control
.step_resume_breakpoint
->loc
->permanent
);
2426 tp
->step_after_step_resume_breakpoint
= step
;
2429 insert_breakpoints ();
2433 /* There's no signal to pass, we can go ahead and skip the
2434 permanent breakpoint manually. */
2435 infrun_debug_printf ("skipping permanent breakpoint");
2436 gdbarch_skip_permanent_breakpoint (gdbarch
, regcache
);
2437 /* Update pc to reflect the new address from which we will
2438 execute instructions. */
2439 pc
= regcache_read_pc (regcache
);
2443 /* We've already advanced the PC, so the stepping part
2444 is done. Now we need to arrange for a trap to be
2445 reported to handle_inferior_event. Set a breakpoint
2446 at the current PC, and run to it. Don't update
2447 prev_pc, because if we end in
2448 switch_back_to_stepped_thread, we want the "expected
2449 thread advanced also" branch to be taken. IOW, we
2450 don't want this thread to step further from PC
2452 gdb_assert (!step_over_info_valid_p ());
2453 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
2454 insert_breakpoints ();
2456 resume_ptid
= internal_resume_ptid (user_step
);
2457 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
2458 tp
->set_resumed (true);
2464 /* If we have a breakpoint to step over, make sure to do a single
2465 step only. Same if we have software watchpoints. */
2466 if (tp
->control
.trap_expected
|| bpstat_should_step ())
2467 tp
->control
.may_range_step
= 0;
2469 /* If displaced stepping is enabled, step over breakpoints by executing a
2470 copy of the instruction at a different address.
2472 We can't use displaced stepping when we have a signal to deliver;
2473 the comments for displaced_step_prepare explain why. The
2474 comments in the handle_inferior event for dealing with 'random
2475 signals' explain what we do instead.
2477 We can't use displaced stepping when we are waiting for vfork_done
2478 event, displaced stepping breaks the vfork child similarly as single
2479 step software breakpoint. */
2480 if (tp
->control
.trap_expected
2481 && use_displaced_stepping (tp
)
2482 && !step_over_info_valid_p ()
2483 && sig
== GDB_SIGNAL_0
2484 && current_inferior ()->thread_waiting_for_vfork_done
== nullptr)
2486 displaced_step_prepare_status prepare_status
2487 = displaced_step_prepare (tp
);
2489 if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
2491 infrun_debug_printf ("Got placed in step-over queue");
2493 tp
->control
.trap_expected
= 0;
2496 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
2498 /* Fallback to stepping over the breakpoint in-line. */
2500 if (target_is_non_stop_p ())
2501 stop_all_threads ("displaced stepping falling back on inline stepping");
2503 set_step_over_info (regcache
->aspace (),
2504 regcache_read_pc (regcache
), 0, tp
->global_num
);
2506 step
= maybe_software_singlestep (gdbarch
);
2508 insert_breakpoints ();
2510 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_OK
)
2512 /* Update pc to reflect the new address from which we will
2513 execute instructions due to displaced stepping. */
2514 pc
= regcache_read_pc (get_thread_regcache (tp
));
2516 step
= gdbarch_displaced_step_hw_singlestep (gdbarch
);
2519 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2523 /* Do we need to do it the hard way, w/temp breakpoints? */
2525 step
= maybe_software_singlestep (gdbarch
);
2527 /* Currently, our software single-step implementation leads to different
2528 results than hardware single-stepping in one situation: when stepping
2529 into delivering a signal which has an associated signal handler,
2530 hardware single-step will stop at the first instruction of the handler,
2531 while software single-step will simply skip execution of the handler.
2533 For now, this difference in behavior is accepted since there is no
2534 easy way to actually implement single-stepping into a signal handler
2535 without kernel support.
2537 However, there is one scenario where this difference leads to follow-on
2538 problems: if we're stepping off a breakpoint by removing all breakpoints
2539 and then single-stepping. In this case, the software single-step
2540 behavior means that even if there is a *breakpoint* in the signal
2541 handler, GDB still would not stop.
2543 Fortunately, we can at least fix this particular issue. We detect
2544 here the case where we are about to deliver a signal while software
2545 single-stepping with breakpoints removed. In this situation, we
2546 revert the decisions to remove all breakpoints and insert single-
2547 step breakpoints, and instead we install a step-resume breakpoint
2548 at the current address, deliver the signal without stepping, and
2549 once we arrive back at the step-resume breakpoint, actually step
2550 over the breakpoint we originally wanted to step over. */
2551 if (thread_has_single_step_breakpoints_set (tp
)
2552 && sig
!= GDB_SIGNAL_0
2553 && step_over_info_valid_p ())
2555 /* If we have nested signals or a pending signal is delivered
2556 immediately after a handler returns, might already have
2557 a step-resume breakpoint set on the earlier handler. We cannot
2558 set another step-resume breakpoint; just continue on until the
2559 original breakpoint is hit. */
2560 if (tp
->control
.step_resume_breakpoint
== NULL
)
2562 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2563 tp
->step_after_step_resume_breakpoint
= 1;
2566 delete_single_step_breakpoints (tp
);
2568 clear_step_over_info ();
2569 tp
->control
.trap_expected
= 0;
2571 insert_breakpoints ();
2574 /* If STEP is set, it's a request to use hardware stepping
2575 facilities. But in that case, we should never
2576 use singlestep breakpoint. */
2577 gdb_assert (!(thread_has_single_step_breakpoints_set (tp
) && step
));
2579 /* Decide the set of threads to ask the target to resume. */
2580 if (tp
->control
.trap_expected
)
2582 /* We're allowing a thread to run past a breakpoint it has
2583 hit, either by single-stepping the thread with the breakpoint
2584 removed, or by displaced stepping, with the breakpoint inserted.
2585 In the former case, we need to single-step only this thread,
2586 and keep others stopped, as they can miss this breakpoint if
2587 allowed to run. That's not really a problem for displaced
2588 stepping, but, we still keep other threads stopped, in case
2589 another thread is also stopped for a breakpoint waiting for
2590 its turn in the displaced stepping queue. */
2591 resume_ptid
= inferior_ptid
;
2594 resume_ptid
= internal_resume_ptid (user_step
);
2596 if (execution_direction
!= EXEC_REVERSE
2597 && step
&& breakpoint_inserted_here_p (aspace
, pc
))
2599 /* There are two cases where we currently need to step a
2600 breakpoint instruction when we have a signal to deliver:
2602 - See handle_signal_stop where we handle random signals that
2603 could take out us out of the stepping range. Normally, in
2604 that case we end up continuing (instead of stepping) over the
2605 signal handler with a breakpoint at PC, but there are cases
2606 where we should _always_ single-step, even if we have a
2607 step-resume breakpoint, like when a software watchpoint is
2608 set. Assuming single-stepping and delivering a signal at the
2609 same time would takes us to the signal handler, then we could
2610 have removed the breakpoint at PC to step over it. However,
2611 some hardware step targets (like e.g., Mac OS) can't step
2612 into signal handlers, and for those, we need to leave the
2613 breakpoint at PC inserted, as otherwise if the handler
2614 recurses and executes PC again, it'll miss the breakpoint.
2615 So we leave the breakpoint inserted anyway, but we need to
2616 record that we tried to step a breakpoint instruction, so
2617 that adjust_pc_after_break doesn't end up confused.
2619 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2620 in one thread after another thread that was stepping had been
2621 momentarily paused for a step-over. When we re-resume the
2622 stepping thread, it may be resumed from that address with a
2623 breakpoint that hasn't trapped yet. Seen with
2624 gdb.threads/non-stop-fair-events.exp, on targets that don't
2625 do displaced stepping. */
2627 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2628 tp
->ptid
.to_string ().c_str ());
2630 tp
->stepped_breakpoint
= 1;
2632 /* Most targets can step a breakpoint instruction, thus
2633 executing it normally. But if this one cannot, just
2634 continue and we will hit it anyway. */
2635 if (gdbarch_cannot_step_breakpoint (gdbarch
))
2640 && tp
->control
.trap_expected
2641 && use_displaced_stepping (tp
)
2642 && !step_over_info_valid_p ())
2644 struct regcache
*resume_regcache
= get_thread_regcache (tp
);
2645 struct gdbarch
*resume_gdbarch
= resume_regcache
->arch ();
2646 CORE_ADDR actual_pc
= regcache_read_pc (resume_regcache
);
2649 read_memory (actual_pc
, buf
, sizeof (buf
));
2650 displaced_debug_printf ("run %s: %s",
2651 paddress (resume_gdbarch
, actual_pc
),
2652 displaced_step_dump_bytes
2653 (buf
, sizeof (buf
)).c_str ());
2656 if (tp
->control
.may_range_step
)
2658 /* If we're resuming a thread with the PC out of the step
2659 range, then we're doing some nested/finer run control
2660 operation, like stepping the thread out of the dynamic
2661 linker or the displaced stepping scratch pad. We
2662 shouldn't have allowed a range step then. */
2663 gdb_assert (pc_in_thread_step_range (pc
, tp
));
2666 do_target_resume (resume_ptid
, step
, sig
);
2667 tp
->set_resumed (true);
2670 /* Resume the inferior. SIG is the signal to give the inferior
2671 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2672 rolls back state on error. */
2675 resume (gdb_signal sig
)
2681 catch (const gdb_exception
&ex
)
2683 /* If resuming is being aborted for any reason, delete any
2684 single-step breakpoint resume_1 may have created, to avoid
2685 confusing the following resumption, and to avoid leaving
2686 single-step breakpoints perturbing other threads, in case
2687 we're running in non-stop mode. */
2688 if (inferior_ptid
!= null_ptid
)
2689 delete_single_step_breakpoints (inferior_thread ());
2699 /* Counter that tracks number of user visible stops. This can be used
2700 to tell whether a command has proceeded the inferior past the
2701 current location. This allows e.g., inferior function calls in
2702 breakpoint commands to not interrupt the command list. When the
2703 call finishes successfully, the inferior is standing at the same
2704 breakpoint as if nothing happened (and so we don't call
2706 static ULONGEST current_stop_id
;
2713 return current_stop_id
;
2716 /* Called when we report a user visible stop. */
2724 /* Clear out all variables saying what to do when inferior is continued.
2725 First do this, then set the ones you want, then call `proceed'. */
2728 clear_proceed_status_thread (struct thread_info
*tp
)
2730 infrun_debug_printf ("%s", tp
->ptid
.to_string ().c_str ());
2732 /* If we're starting a new sequence, then the previous finished
2733 single-step is no longer relevant. */
2734 if (tp
->has_pending_waitstatus ())
2736 if (tp
->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP
)
2738 infrun_debug_printf ("pending event of %s was a finished step. "
2740 tp
->ptid
.to_string ().c_str ());
2742 tp
->clear_pending_waitstatus ();
2743 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
2748 ("thread %s has pending wait status %s (currently_stepping=%d).",
2749 tp
->ptid
.to_string ().c_str (),
2750 tp
->pending_waitstatus ().to_string ().c_str (),
2751 currently_stepping (tp
));
2755 /* If this signal should not be seen by program, give it zero.
2756 Used for debugging signals. */
2757 if (!signal_pass_state (tp
->stop_signal ()))
2758 tp
->set_stop_signal (GDB_SIGNAL_0
);
2760 tp
->release_thread_fsm ();
2762 tp
->control
.trap_expected
= 0;
2763 tp
->control
.step_range_start
= 0;
2764 tp
->control
.step_range_end
= 0;
2765 tp
->control
.may_range_step
= 0;
2766 tp
->control
.step_frame_id
= null_frame_id
;
2767 tp
->control
.step_stack_frame_id
= null_frame_id
;
2768 tp
->control
.step_over_calls
= STEP_OVER_UNDEBUGGABLE
;
2769 tp
->control
.step_start_function
= NULL
;
2770 tp
->stop_requested
= 0;
2772 tp
->control
.stop_step
= 0;
2774 tp
->control
.proceed_to_finish
= 0;
2776 tp
->control
.stepping_command
= 0;
2778 /* Discard any remaining commands or status from previous stop. */
2779 bpstat_clear (&tp
->control
.stop_bpstat
);
2783 clear_proceed_status (int step
)
2785 /* With scheduler-locking replay, stop replaying other threads if we're
2786 not replaying the user-visible resume ptid.
2788 This is a convenience feature to not require the user to explicitly
2789 stop replaying the other threads. We're assuming that the user's
2790 intent is to resume tracing the recorded process. */
2791 if (!non_stop
&& scheduler_mode
== schedlock_replay
2792 && target_record_is_replaying (minus_one_ptid
)
2793 && !target_record_will_replay (user_visible_resume_ptid (step
),
2794 execution_direction
))
2795 target_record_stop_replaying ();
2797 if (!non_stop
&& inferior_ptid
!= null_ptid
)
2799 ptid_t resume_ptid
= user_visible_resume_ptid (step
);
2800 process_stratum_target
*resume_target
2801 = user_visible_resume_target (resume_ptid
);
2803 /* In all-stop mode, delete the per-thread status of all threads
2804 we're about to resume, implicitly and explicitly. */
2805 for (thread_info
*tp
: all_non_exited_threads (resume_target
, resume_ptid
))
2806 clear_proceed_status_thread (tp
);
2809 if (inferior_ptid
!= null_ptid
)
2811 struct inferior
*inferior
;
2815 /* If in non-stop mode, only delete the per-thread status of
2816 the current thread. */
2817 clear_proceed_status_thread (inferior_thread ());
2820 inferior
= current_inferior ();
2821 inferior
->control
.stop_soon
= NO_STOP_QUIETLY
;
2824 gdb::observers::about_to_proceed
.notify ();
2827 /* Returns true if TP is still stopped at a breakpoint that needs
2828 stepping-over in order to make progress. If the breakpoint is gone
2829 meanwhile, we can skip the whole step-over dance. */
2832 thread_still_needs_step_over_bp (struct thread_info
*tp
)
2834 if (tp
->stepping_over_breakpoint
)
2836 struct regcache
*regcache
= get_thread_regcache (tp
);
2838 if (breakpoint_here_p (regcache
->aspace (),
2839 regcache_read_pc (regcache
))
2840 == ordinary_breakpoint_here
)
2843 tp
->stepping_over_breakpoint
= 0;
2849 /* Check whether thread TP still needs to start a step-over in order
2850 to make progress when resumed. Returns an bitwise or of enum
2851 step_over_what bits, indicating what needs to be stepped over. */
2853 static step_over_what
2854 thread_still_needs_step_over (struct thread_info
*tp
)
2856 step_over_what what
= 0;
2858 if (thread_still_needs_step_over_bp (tp
))
2859 what
|= STEP_OVER_BREAKPOINT
;
2861 if (tp
->stepping_over_watchpoint
2862 && !target_have_steppable_watchpoint ())
2863 what
|= STEP_OVER_WATCHPOINT
;
2868 /* Returns true if scheduler locking applies. STEP indicates whether
2869 we're about to do a step/next-like command to a thread. */
2872 schedlock_applies (struct thread_info
*tp
)
2874 return (scheduler_mode
== schedlock_on
2875 || (scheduler_mode
== schedlock_step
2876 && tp
->control
.stepping_command
)
2877 || (scheduler_mode
== schedlock_replay
2878 && target_record_will_replay (minus_one_ptid
,
2879 execution_direction
)));
2882 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2883 stacks that have threads executing and don't have threads with
2887 maybe_set_commit_resumed_all_targets ()
2889 scoped_restore_current_thread restore_thread
;
2891 for (inferior
*inf
: all_non_exited_inferiors ())
2893 process_stratum_target
*proc_target
= inf
->process_target ();
2895 if (proc_target
->commit_resumed_state
)
2897 /* We already set this in a previous iteration, via another
2898 inferior sharing the process_stratum target. */
2902 /* If the target has no resumed threads, it would be useless to
2903 ask it to commit the resumed threads. */
2904 if (!proc_target
->threads_executing
)
2906 infrun_debug_printf ("not requesting commit-resumed for target "
2907 "%s, no resumed threads",
2908 proc_target
->shortname ());
2912 /* As an optimization, if a thread from this target has some
2913 status to report, handle it before requiring the target to
2914 commit its resumed threads: handling the status might lead to
2915 resuming more threads. */
2916 if (proc_target
->has_resumed_with_pending_wait_status ())
2918 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2919 " thread has a pending waitstatus",
2920 proc_target
->shortname ());
2924 switch_to_inferior_no_thread (inf
);
2926 if (target_has_pending_events ())
2928 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2929 "target has pending events",
2930 proc_target
->shortname ());
2934 infrun_debug_printf ("enabling commit-resumed for target %s",
2935 proc_target
->shortname ());
2937 proc_target
->commit_resumed_state
= true;
2944 maybe_call_commit_resumed_all_targets ()
2946 scoped_restore_current_thread restore_thread
;
2948 for (inferior
*inf
: all_non_exited_inferiors ())
2950 process_stratum_target
*proc_target
= inf
->process_target ();
2952 if (!proc_target
->commit_resumed_state
)
2955 switch_to_inferior_no_thread (inf
);
2957 infrun_debug_printf ("calling commit_resumed for target %s",
2958 proc_target
->shortname());
2960 target_commit_resumed ();
2964 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
2965 that only the outermost one attempts to re-enable
2967 static bool enable_commit_resumed
= true;
2971 scoped_disable_commit_resumed::scoped_disable_commit_resumed
2972 (const char *reason
)
2973 : m_reason (reason
),
2974 m_prev_enable_commit_resumed (enable_commit_resumed
)
2976 infrun_debug_printf ("reason=%s", m_reason
);
2978 enable_commit_resumed
= false;
2980 for (inferior
*inf
: all_non_exited_inferiors ())
2982 process_stratum_target
*proc_target
= inf
->process_target ();
2984 if (m_prev_enable_commit_resumed
)
2986 /* This is the outermost instance: force all
2987 COMMIT_RESUMED_STATE to false. */
2988 proc_target
->commit_resumed_state
= false;
2992 /* This is not the outermost instance, we expect
2993 COMMIT_RESUMED_STATE to have been cleared by the
2994 outermost instance. */
2995 gdb_assert (!proc_target
->commit_resumed_state
);
3003 scoped_disable_commit_resumed::reset ()
3009 infrun_debug_printf ("reason=%s", m_reason
);
3011 gdb_assert (!enable_commit_resumed
);
3013 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3015 if (m_prev_enable_commit_resumed
)
3017 /* This is the outermost instance, re-enable
3018 COMMIT_RESUMED_STATE on the targets where it's possible. */
3019 maybe_set_commit_resumed_all_targets ();
3023 /* This is not the outermost instance, we expect
3024 COMMIT_RESUMED_STATE to still be false. */
3025 for (inferior
*inf
: all_non_exited_inferiors ())
3027 process_stratum_target
*proc_target
= inf
->process_target ();
3028 gdb_assert (!proc_target
->commit_resumed_state
);
3035 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3043 scoped_disable_commit_resumed::reset_and_commit ()
3046 maybe_call_commit_resumed_all_targets ();
3051 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3052 (const char *reason
)
3053 : m_reason (reason
),
3054 m_prev_enable_commit_resumed (enable_commit_resumed
)
3056 infrun_debug_printf ("reason=%s", m_reason
);
3058 if (!enable_commit_resumed
)
3060 enable_commit_resumed
= true;
3062 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3064 maybe_set_commit_resumed_all_targets ();
3066 maybe_call_commit_resumed_all_targets ();
3072 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3074 infrun_debug_printf ("reason=%s", m_reason
);
3076 gdb_assert (enable_commit_resumed
);
3078 enable_commit_resumed
= m_prev_enable_commit_resumed
;
3080 if (!enable_commit_resumed
)
3082 /* Force all COMMIT_RESUMED_STATE back to false. */
3083 for (inferior
*inf
: all_non_exited_inferiors ())
3085 process_stratum_target
*proc_target
= inf
->process_target ();
3086 proc_target
->commit_resumed_state
= false;
3091 /* Check that all the targets we're about to resume are in non-stop
3092 mode. Ideally, we'd only care whether all targets support
3093 target-async, but we're not there yet. E.g., stop_all_threads
3094 doesn't know how to handle all-stop targets. Also, the remote
3095 protocol in all-stop mode is synchronous, irrespective of
3096 target-async, which means that things like a breakpoint re-set
3097 triggered by one target would try to read memory from all targets
3101 check_multi_target_resumption (process_stratum_target
*resume_target
)
3103 if (!non_stop
&& resume_target
== nullptr)
3105 scoped_restore_current_thread restore_thread
;
3107 /* This is used to track whether we're resuming more than one
3109 process_stratum_target
*first_connection
= nullptr;
3111 /* The first inferior we see with a target that does not work in
3112 always-non-stop mode. */
3113 inferior
*first_not_non_stop
= nullptr;
3115 for (inferior
*inf
: all_non_exited_inferiors ())
3117 switch_to_inferior_no_thread (inf
);
3119 if (!target_has_execution ())
3122 process_stratum_target
*proc_target
3123 = current_inferior ()->process_target();
3125 if (!target_is_non_stop_p ())
3126 first_not_non_stop
= inf
;
3128 if (first_connection
== nullptr)
3129 first_connection
= proc_target
;
3130 else if (first_connection
!= proc_target
3131 && first_not_non_stop
!= nullptr)
3133 switch_to_inferior_no_thread (first_not_non_stop
);
3135 proc_target
= current_inferior ()->process_target();
3137 error (_("Connection %d (%s) does not support "
3138 "multi-target resumption."),
3139 proc_target
->connection_number
,
3140 make_target_connection_string (proc_target
).c_str ());
3146 /* Basic routine for continuing the program in various fashions.
3148 ADDR is the address to resume at, or -1 for resume where stopped.
3149 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3150 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3152 You should call clear_proceed_status before calling proceed. */
3155 proceed (CORE_ADDR addr
, enum gdb_signal siggnal
)
3157 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
3159 struct regcache
*regcache
;
3160 struct gdbarch
*gdbarch
;
3162 struct execution_control_state ecss
;
3163 struct execution_control_state
*ecs
= &ecss
;
3165 /* If we're stopped at a fork/vfork, follow the branch set by the
3166 "set follow-fork-mode" command; otherwise, we'll just proceed
3167 resuming the current thread. */
3168 if (!follow_fork ())
3170 /* The target for some reason decided not to resume. */
3172 if (target_can_async_p ())
3173 inferior_event_handler (INF_EXEC_COMPLETE
);
3177 /* We'll update this if & when we switch to a new thread. */
3178 previous_inferior_ptid
= inferior_ptid
;
3180 regcache
= get_current_regcache ();
3181 gdbarch
= regcache
->arch ();
3182 const address_space
*aspace
= regcache
->aspace ();
3184 pc
= regcache_read_pc_protected (regcache
);
3186 thread_info
*cur_thr
= inferior_thread ();
3188 /* Fill in with reasonable starting values. */
3189 init_thread_stepping_state (cur_thr
);
3191 gdb_assert (!thread_is_in_step_over_chain (cur_thr
));
3194 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
3195 process_stratum_target
*resume_target
3196 = user_visible_resume_target (resume_ptid
);
3198 check_multi_target_resumption (resume_target
);
3200 if (addr
== (CORE_ADDR
) -1)
3202 if (cur_thr
->stop_pc_p ()
3203 && pc
== cur_thr
->stop_pc ()
3204 && breakpoint_here_p (aspace
, pc
) == ordinary_breakpoint_here
3205 && execution_direction
!= EXEC_REVERSE
)
3206 /* There is a breakpoint at the address we will resume at,
3207 step one instruction before inserting breakpoints so that
3208 we do not stop right away (and report a second hit at this
3211 Note, we don't do this in reverse, because we won't
3212 actually be executing the breakpoint insn anyway.
3213 We'll be (un-)executing the previous instruction. */
3214 cur_thr
->stepping_over_breakpoint
= 1;
3215 else if (gdbarch_single_step_through_delay_p (gdbarch
)
3216 && gdbarch_single_step_through_delay (gdbarch
,
3217 get_current_frame ()))
3218 /* We stepped onto an instruction that needs to be stepped
3219 again before re-inserting the breakpoint, do so. */
3220 cur_thr
->stepping_over_breakpoint
= 1;
3224 regcache_write_pc (regcache
, addr
);
3227 if (siggnal
!= GDB_SIGNAL_DEFAULT
)
3228 cur_thr
->set_stop_signal (siggnal
);
3230 /* If an exception is thrown from this point on, make sure to
3231 propagate GDB's knowledge of the executing state to the
3232 frontend/user running state. */
3233 scoped_finish_thread_state
finish_state (resume_target
, resume_ptid
);
3235 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3236 threads (e.g., we might need to set threads stepping over
3237 breakpoints first), from the user/frontend's point of view, all
3238 threads in RESUME_PTID are now running. Unless we're calling an
3239 inferior function, as in that case we pretend the inferior
3240 doesn't run at all. */
3241 if (!cur_thr
->control
.in_infcall
)
3242 set_running (resume_target
, resume_ptid
, true);
3244 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch
, addr
),
3245 gdb_signal_to_symbol_string (siggnal
));
3247 annotate_starting ();
3249 /* Make sure that output from GDB appears before output from the
3251 gdb_flush (gdb_stdout
);
3253 /* Since we've marked the inferior running, give it the terminal. A
3254 QUIT/Ctrl-C from here on is forwarded to the target (which can
3255 still detect attempts to unblock a stuck connection with repeated
3256 Ctrl-C from within target_pass_ctrlc). */
3257 target_terminal::inferior ();
3259 /* In a multi-threaded task we may select another thread and
3260 then continue or step.
3262 But if a thread that we're resuming had stopped at a breakpoint,
3263 it will immediately cause another breakpoint stop without any
3264 execution (i.e. it will report a breakpoint hit incorrectly). So
3265 we must step over it first.
3267 Look for threads other than the current (TP) that reported a
3268 breakpoint hit and haven't been resumed yet since. */
3270 /* If scheduler locking applies, we can avoid iterating over all
3272 if (!non_stop
&& !schedlock_applies (cur_thr
))
3274 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3277 switch_to_thread_no_regs (tp
);
3279 /* Ignore the current thread here. It's handled
3284 if (!thread_still_needs_step_over (tp
))
3287 gdb_assert (!thread_is_in_step_over_chain (tp
));
3289 infrun_debug_printf ("need to step-over [%s] first",
3290 tp
->ptid
.to_string ().c_str ());
3292 global_thread_step_over_chain_enqueue (tp
);
3295 switch_to_thread (cur_thr
);
3298 /* Enqueue the current thread last, so that we move all other
3299 threads over their breakpoints first. */
3300 if (cur_thr
->stepping_over_breakpoint
)
3301 global_thread_step_over_chain_enqueue (cur_thr
);
3303 /* If the thread isn't started, we'll still need to set its prev_pc,
3304 so that switch_back_to_stepped_thread knows the thread hasn't
3305 advanced. Must do this before resuming any thread, as in
3306 all-stop/remote, once we resume we can't send any other packet
3307 until the target stops again. */
3308 cur_thr
->prev_pc
= regcache_read_pc_protected (regcache
);
3311 scoped_disable_commit_resumed
disable_commit_resumed ("proceeding");
3312 bool step_over_started
= start_step_over ();
3314 if (step_over_info_valid_p ())
3316 /* Either this thread started a new in-line step over, or some
3317 other thread was already doing one. In either case, don't
3318 resume anything else until the step-over is finished. */
3320 else if (step_over_started
&& !target_is_non_stop_p ())
3322 /* A new displaced stepping sequence was started. In all-stop,
3323 we can't talk to the target anymore until it next stops. */
3325 else if (!non_stop
&& target_is_non_stop_p ())
3327 INFRUN_SCOPED_DEBUG_START_END
3328 ("resuming threads, all-stop-on-top-of-non-stop");
3330 /* In all-stop, but the target is always in non-stop mode.
3331 Start all other threads that are implicitly resumed too. */
3332 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3335 switch_to_thread_no_regs (tp
);
3337 if (!tp
->inf
->has_execution ())
3339 infrun_debug_printf ("[%s] target has no execution",
3340 tp
->ptid
.to_string ().c_str ());
3346 infrun_debug_printf ("[%s] resumed",
3347 tp
->ptid
.to_string ().c_str ());
3348 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
3352 if (thread_is_in_step_over_chain (tp
))
3354 infrun_debug_printf ("[%s] needs step-over",
3355 tp
->ptid
.to_string ().c_str ());
3359 /* If a thread of that inferior is waiting for a vfork-done
3360 (for a detached vfork child to exec or exit), breakpoints are
3361 removed. We must not resume any thread of that inferior, other
3362 than the one waiting for the vfork-done. */
3363 if (tp
->inf
->thread_waiting_for_vfork_done
!= nullptr
3364 && tp
!= tp
->inf
->thread_waiting_for_vfork_done
)
3366 infrun_debug_printf ("[%s] another thread of this inferior is "
3367 "waiting for vfork-done",
3368 tp
->ptid
.to_string ().c_str ());
3372 infrun_debug_printf ("resuming %s",
3373 tp
->ptid
.to_string ().c_str ());
3375 reset_ecs (ecs
, tp
);
3376 switch_to_thread (tp
);
3377 keep_going_pass_signal (ecs
);
3378 if (!ecs
->wait_some_more
)
3379 error (_("Command aborted."));
3382 else if (!cur_thr
->resumed ()
3383 && !thread_is_in_step_over_chain (cur_thr
)
3384 /* In non-stop, forbid resuming a thread if some other thread of
3385 that inferior is waiting for a vfork-done event (this means
3386 breakpoints are out for this inferior). */
3388 && cur_thr
->inf
->thread_waiting_for_vfork_done
!= nullptr))
3390 /* The thread wasn't started, and isn't queued, run it now. */
3391 reset_ecs (ecs
, cur_thr
);
3392 switch_to_thread (cur_thr
);
3393 keep_going_pass_signal (ecs
);
3394 if (!ecs
->wait_some_more
)
3395 error (_("Command aborted."));
3398 disable_commit_resumed
.reset_and_commit ();
3401 finish_state
.release ();
3403 /* If we've switched threads above, switch back to the previously
3404 current thread. We don't want the user to see a different
3406 switch_to_thread (cur_thr
);
3408 /* Tell the event loop to wait for it to stop. If the target
3409 supports asynchronous execution, it'll do this from within
3411 if (!target_can_async_p ())
3412 mark_async_event_handler (infrun_async_inferior_event_token
);
3416 /* Start remote-debugging of a machine over a serial link. */
3419 start_remote (int from_tty
)
3421 inferior
*inf
= current_inferior ();
3422 inf
->control
.stop_soon
= STOP_QUIETLY_REMOTE
;
3424 /* Always go on waiting for the target, regardless of the mode. */
3425 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3426 indicate to wait_for_inferior that a target should timeout if
3427 nothing is returned (instead of just blocking). Because of this,
3428 targets expecting an immediate response need to, internally, set
3429 things up so that the target_wait() is forced to eventually
3431 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3432 differentiate to its caller what the state of the target is after
3433 the initial open has been performed. Here we're assuming that
3434 the target has stopped. It should be possible to eventually have
3435 target_open() return to the caller an indication that the target
3436 is currently running and GDB state should be set to the same as
3437 for an async run. */
3438 wait_for_inferior (inf
);
3440 /* Now that the inferior has stopped, do any bookkeeping like
3441 loading shared libraries. We want to do this before normal_stop,
3442 so that the displayed frame is up to date. */
3443 post_create_inferior (from_tty
);
3448 /* Initialize static vars when a new inferior begins. */
3451 init_wait_for_inferior (void)
3453 /* These are meaningless until the first time through wait_for_inferior. */
3455 breakpoint_init_inferior (inf_starting
);
3457 clear_proceed_status (0);
3459 nullify_last_target_wait_ptid ();
3461 previous_inferior_ptid
= inferior_ptid
;
3466 static void handle_inferior_event (struct execution_control_state
*ecs
);
3468 static void handle_step_into_function (struct gdbarch
*gdbarch
,
3469 struct execution_control_state
*ecs
);
3470 static void handle_step_into_function_backward (struct gdbarch
*gdbarch
,
3471 struct execution_control_state
*ecs
);
3472 static void handle_signal_stop (struct execution_control_state
*ecs
);
3473 static void check_exception_resume (struct execution_control_state
*,
3474 struct frame_info
*);
3476 static void end_stepping_range (struct execution_control_state
*ecs
);
3477 static void stop_waiting (struct execution_control_state
*ecs
);
3478 static void keep_going (struct execution_control_state
*ecs
);
3479 static void process_event_stop_test (struct execution_control_state
*ecs
);
3480 static bool switch_back_to_stepped_thread (struct execution_control_state
*ecs
);
3482 /* This function is attached as a "thread_stop_requested" observer.
3483 Cleanup local state that assumed the PTID was to be resumed, and
3484 report the stop to the frontend. */
3487 infrun_thread_stop_requested (ptid_t ptid
)
3489 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
3491 /* PTID was requested to stop. If the thread was already stopped,
3492 but the user/frontend doesn't know about that yet (e.g., the
3493 thread had been temporarily paused for some step-over), set up
3494 for reporting the stop now. */
3495 for (thread_info
*tp
: all_threads (curr_target
, ptid
))
3497 if (tp
->state
!= THREAD_RUNNING
)
3499 if (tp
->executing ())
3502 /* Remove matching threads from the step-over queue, so
3503 start_step_over doesn't try to resume them
3505 if (thread_is_in_step_over_chain (tp
))
3506 global_thread_step_over_chain_remove (tp
);
3508 /* If the thread is stopped, but the user/frontend doesn't
3509 know about that yet, queue a pending event, as if the
3510 thread had just stopped now. Unless the thread already had
3512 if (!tp
->has_pending_waitstatus ())
3514 target_waitstatus ws
;
3515 ws
.set_stopped (GDB_SIGNAL_0
);
3516 tp
->set_pending_waitstatus (ws
);
3519 /* Clear the inline-frame state, since we're re-processing the
3521 clear_inline_frame_state (tp
);
3523 /* If this thread was paused because some other thread was
3524 doing an inline-step over, let that finish first. Once
3525 that happens, we'll restart all threads and consume pending
3526 stop events then. */
3527 if (step_over_info_valid_p ())
3530 /* Otherwise we can process the (new) pending event now. Set
3531 it so this pending event is considered by
3533 tp
->set_resumed (true);
3538 infrun_thread_thread_exit (struct thread_info
*tp
, int silent
)
3540 if (target_last_proc_target
== tp
->inf
->process_target ()
3541 && target_last_wait_ptid
== tp
->ptid
)
3542 nullify_last_target_wait_ptid ();
3545 /* Delete the step resume, single-step and longjmp/exception resume
3546 breakpoints of TP. */
3549 delete_thread_infrun_breakpoints (struct thread_info
*tp
)
3551 delete_step_resume_breakpoint (tp
);
3552 delete_exception_resume_breakpoint (tp
);
3553 delete_single_step_breakpoints (tp
);
3556 /* If the target still has execution, call FUNC for each thread that
3557 just stopped. In all-stop, that's all the non-exited threads; in
3558 non-stop, that's the current thread, only. */
3560 typedef void (*for_each_just_stopped_thread_callback_func
)
3561 (struct thread_info
*tp
);
3564 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func
)
3566 if (!target_has_execution () || inferior_ptid
== null_ptid
)
3569 if (target_is_non_stop_p ())
3571 /* If in non-stop mode, only the current thread stopped. */
3572 func (inferior_thread ());
3576 /* In all-stop mode, all threads have stopped. */
3577 for (thread_info
*tp
: all_non_exited_threads ())
3582 /* Delete the step resume and longjmp/exception resume breakpoints of
3583 the threads that just stopped. */
3586 delete_just_stopped_threads_infrun_breakpoints (void)
3588 for_each_just_stopped_thread (delete_thread_infrun_breakpoints
);
3591 /* Delete the single-step breakpoints of the threads that just
3595 delete_just_stopped_threads_single_step_breakpoints (void)
3597 for_each_just_stopped_thread (delete_single_step_breakpoints
);
3603 print_target_wait_results (ptid_t waiton_ptid
, ptid_t result_ptid
,
3604 const struct target_waitstatus
&ws
)
3606 infrun_debug_printf ("target_wait (%s [%s], status) =",
3607 waiton_ptid
.to_string ().c_str (),
3608 target_pid_to_str (waiton_ptid
).c_str ());
3609 infrun_debug_printf (" %s [%s],",
3610 result_ptid
.to_string ().c_str (),
3611 target_pid_to_str (result_ptid
).c_str ());
3612 infrun_debug_printf (" %s", ws
.to_string ().c_str ());
3615 /* Select a thread at random, out of those which are resumed and have
3618 static struct thread_info
*
3619 random_pending_event_thread (inferior
*inf
, ptid_t waiton_ptid
)
3621 process_stratum_target
*proc_target
= inf
->process_target ();
3623 = proc_target
->random_resumed_with_pending_wait_status (inf
, waiton_ptid
);
3625 if (thread
== nullptr)
3627 infrun_debug_printf ("None found.");
3631 infrun_debug_printf ("Found %s.", thread
->ptid
.to_string ().c_str ());
3632 gdb_assert (thread
->resumed ());
3633 gdb_assert (thread
->has_pending_waitstatus ());
3638 /* Wrapper for target_wait that first checks whether threads have
3639 pending statuses to report before actually asking the target for
3640 more events. INF is the inferior we're using to call target_wait
3644 do_target_wait_1 (inferior
*inf
, ptid_t ptid
,
3645 target_waitstatus
*status
, target_wait_flags options
)
3647 struct thread_info
*tp
;
3649 /* We know that we are looking for an event in the target of inferior
3650 INF, but we don't know which thread the event might come from. As
3651 such we want to make sure that INFERIOR_PTID is reset so that none of
3652 the wait code relies on it - doing so is always a mistake. */
3653 switch_to_inferior_no_thread (inf
);
3655 /* First check if there is a resumed thread with a wait status
3657 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
3659 tp
= random_pending_event_thread (inf
, ptid
);
3663 infrun_debug_printf ("Waiting for specific thread %s.",
3664 ptid
.to_string ().c_str ());
3666 /* We have a specific thread to check. */
3667 tp
= find_thread_ptid (inf
, ptid
);
3668 gdb_assert (tp
!= NULL
);
3669 if (!tp
->has_pending_waitstatus ())
3674 && (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3675 || tp
->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT
))
3677 struct regcache
*regcache
= get_thread_regcache (tp
);
3678 struct gdbarch
*gdbarch
= regcache
->arch ();
3682 pc
= regcache_read_pc (regcache
);
3684 if (pc
!= tp
->stop_pc ())
3686 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3687 tp
->ptid
.to_string ().c_str (),
3688 paddress (gdbarch
, tp
->stop_pc ()),
3689 paddress (gdbarch
, pc
));
3692 else if (!breakpoint_inserted_here_p (regcache
->aspace (), pc
))
3694 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3695 tp
->ptid
.to_string ().c_str (),
3696 paddress (gdbarch
, pc
));
3703 infrun_debug_printf ("pending event of %s cancelled.",
3704 tp
->ptid
.to_string ().c_str ());
3706 tp
->clear_pending_waitstatus ();
3707 target_waitstatus ws
;
3709 tp
->set_pending_waitstatus (ws
);
3710 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3716 infrun_debug_printf ("Using pending wait status %s for %s.",
3717 tp
->pending_waitstatus ().to_string ().c_str (),
3718 tp
->ptid
.to_string ().c_str ());
3720 /* Now that we've selected our final event LWP, un-adjust its PC
3721 if it was a software breakpoint (and the target doesn't
3722 always adjust the PC itself). */
3723 if (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3724 && !target_supports_stopped_by_sw_breakpoint ())
3726 struct regcache
*regcache
;
3727 struct gdbarch
*gdbarch
;
3730 regcache
= get_thread_regcache (tp
);
3731 gdbarch
= regcache
->arch ();
3733 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
3738 pc
= regcache_read_pc (regcache
);
3739 regcache_write_pc (regcache
, pc
+ decr_pc
);
3743 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3744 *status
= tp
->pending_waitstatus ();
3745 tp
->clear_pending_waitstatus ();
3747 /* Wake up the event loop again, until all pending events are
3749 if (target_is_async_p ())
3750 mark_async_event_handler (infrun_async_inferior_event_token
);
3754 /* But if we don't find one, we'll have to wait. */
3756 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3758 if (!target_can_async_p ())
3759 options
&= ~TARGET_WNOHANG
;
3761 return target_wait (ptid
, status
, options
);
3764 /* Wrapper for target_wait that first checks whether threads have
3765 pending statuses to report before actually asking the target for
3766 more events. Polls for events from all inferiors/targets. */
3769 do_target_wait (execution_control_state
*ecs
, target_wait_flags options
)
3771 int num_inferiors
= 0;
3772 int random_selector
;
3774 /* For fairness, we pick the first inferior/target to poll at random
3775 out of all inferiors that may report events, and then continue
3776 polling the rest of the inferior list starting from that one in a
3777 circular fashion until the whole list is polled once. */
3779 auto inferior_matches
= [] (inferior
*inf
)
3781 return inf
->process_target () != nullptr;
3784 /* First see how many matching inferiors we have. */
3785 for (inferior
*inf
: all_inferiors ())
3786 if (inferior_matches (inf
))
3789 if (num_inferiors
== 0)
3791 ecs
->ws
.set_ignore ();
3795 /* Now randomly pick an inferior out of those that matched. */
3796 random_selector
= (int)
3797 ((num_inferiors
* (double) rand ()) / (RAND_MAX
+ 1.0));
3799 if (num_inferiors
> 1)
3800 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3801 num_inferiors
, random_selector
);
3803 /* Select the Nth inferior that matched. */
3805 inferior
*selected
= nullptr;
3807 for (inferior
*inf
: all_inferiors ())
3808 if (inferior_matches (inf
))
3809 if (random_selector
-- == 0)
3815 /* Now poll for events out of each of the matching inferior's
3816 targets, starting from the selected one. */
3818 auto do_wait
= [&] (inferior
*inf
)
3820 ecs
->ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
->ws
, options
);
3821 ecs
->target
= inf
->process_target ();
3822 return (ecs
->ws
.kind () != TARGET_WAITKIND_IGNORE
);
3825 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3826 here spuriously after the target is all stopped and we've already
3827 reported the stop to the user, polling for events. */
3828 scoped_restore_current_thread restore_thread
;
3830 intrusive_list_iterator
<inferior
> start
3831 = inferior_list
.iterator_to (*selected
);
3833 for (intrusive_list_iterator
<inferior
> it
= start
;
3834 it
!= inferior_list
.end ();
3837 inferior
*inf
= &*it
;
3839 if (inferior_matches (inf
) && do_wait (inf
))
3843 for (intrusive_list_iterator
<inferior
> it
= inferior_list
.begin ();
3847 inferior
*inf
= &*it
;
3849 if (inferior_matches (inf
) && do_wait (inf
))
3853 ecs
->ws
.set_ignore ();
3857 /* An event reported by wait_one. */
3859 struct wait_one_event
3861 /* The target the event came out of. */
3862 process_stratum_target
*target
;
3864 /* The PTID the event was for. */
3867 /* The waitstatus. */
3868 target_waitstatus ws
;
3871 static bool handle_one (const wait_one_event
&event
);
3873 /* Prepare and stabilize the inferior for detaching it. E.g.,
3874 detaching while a thread is displaced stepping is a recipe for
3875 crashing it, as nothing would readjust the PC out of the scratch
3879 prepare_for_detach (void)
3881 struct inferior
*inf
= current_inferior ();
3882 ptid_t pid_ptid
= ptid_t (inf
->pid
);
3883 scoped_restore_current_thread restore_thread
;
3885 scoped_restore restore_detaching
= make_scoped_restore (&inf
->detaching
, true);
3887 /* Remove all threads of INF from the global step-over chain. We
3888 want to stop any ongoing step-over, not start any new one. */
3889 thread_step_over_list_safe_range range
3890 = make_thread_step_over_list_safe_range (global_thread_step_over_list
);
3892 for (thread_info
*tp
: range
)
3895 infrun_debug_printf ("removing thread %s from global step over chain",
3896 tp
->ptid
.to_string ().c_str ());
3897 global_thread_step_over_chain_remove (tp
);
3900 /* If we were already in the middle of an inline step-over, and the
3901 thread stepping belongs to the inferior we're detaching, we need
3902 to restart the threads of other inferiors. */
3903 if (step_over_info
.thread
!= -1)
3905 infrun_debug_printf ("inline step-over in-process while detaching");
3907 thread_info
*thr
= find_thread_global_id (step_over_info
.thread
);
3908 if (thr
->inf
== inf
)
3910 /* Since we removed threads of INF from the step-over chain,
3911 we know this won't start a step-over for INF. */
3912 clear_step_over_info ();
3914 if (target_is_non_stop_p ())
3916 /* Start a new step-over in another thread if there's
3917 one that needs it. */
3920 /* Restart all other threads (except the
3921 previously-stepping thread, since that one is still
3923 if (!step_over_info_valid_p ())
3924 restart_threads (thr
);
3929 if (displaced_step_in_progress (inf
))
3931 infrun_debug_printf ("displaced-stepping in-process while detaching");
3933 /* Stop threads currently displaced stepping, aborting it. */
3935 for (thread_info
*thr
: inf
->non_exited_threads ())
3937 if (thr
->displaced_step_state
.in_progress ())
3939 if (thr
->executing ())
3941 if (!thr
->stop_requested
)
3943 target_stop (thr
->ptid
);
3944 thr
->stop_requested
= true;
3948 thr
->set_resumed (false);
3952 while (displaced_step_in_progress (inf
))
3954 wait_one_event event
;
3956 event
.target
= inf
->process_target ();
3957 event
.ptid
= do_target_wait_1 (inf
, pid_ptid
, &event
.ws
, 0);
3960 print_target_wait_results (pid_ptid
, event
.ptid
, event
.ws
);
3965 /* It's OK to leave some of the threads of INF stopped, since
3966 they'll be detached shortly. */
3970 /* Wait for control to return from inferior to debugger.
3972 If inferior gets a signal, we may decide to start it up again
3973 instead of returning. That is why there is a loop in this function.
3974 When this function actually returns it means the inferior
3975 should be left stopped and GDB should read more commands. */
3978 wait_for_inferior (inferior
*inf
)
3980 infrun_debug_printf ("wait_for_inferior ()");
3982 SCOPE_EXIT
{ delete_just_stopped_threads_infrun_breakpoints (); };
3984 /* If an error happens while handling the event, propagate GDB's
3985 knowledge of the executing state to the frontend/user running
3987 scoped_finish_thread_state finish_state
3988 (inf
->process_target (), minus_one_ptid
);
3992 struct execution_control_state ecss
;
3993 struct execution_control_state
*ecs
= &ecss
;
3995 overlay_cache_invalid
= 1;
3997 /* Flush target cache before starting to handle each event.
3998 Target was running and cache could be stale. This is just a
3999 heuristic. Running threads may modify target memory, but we
4000 don't get any event. */
4001 target_dcache_invalidate ();
4003 ecs
->ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
->ws
, 0);
4004 ecs
->target
= inf
->process_target ();
4007 print_target_wait_results (minus_one_ptid
, ecs
->ptid
, ecs
->ws
);
4009 /* Now figure out what to do with the result of the result. */
4010 handle_inferior_event (ecs
);
4012 if (!ecs
->wait_some_more
)
4016 /* No error, don't finish the state yet. */
4017 finish_state
.release ();
4020 /* Cleanup that reinstalls the readline callback handler, if the
4021 target is running in the background. If while handling the target
4022 event something triggered a secondary prompt, like e.g., a
4023 pagination prompt, we'll have removed the callback handler (see
4024 gdb_readline_wrapper_line). Need to do this as we go back to the
4025 event loop, ready to process further input. Note this has no
4026 effect if the handler hasn't actually been removed, because calling
4027 rl_callback_handler_install resets the line buffer, thus losing
4031 reinstall_readline_callback_handler_cleanup ()
4033 struct ui
*ui
= current_ui
;
4037 /* We're not going back to the top level event loop yet. Don't
4038 install the readline callback, as it'd prep the terminal,
4039 readline-style (raw, noecho) (e.g., --batch). We'll install
4040 it the next time the prompt is displayed, when we're ready
4045 if (ui
->command_editing
&& ui
->prompt_state
!= PROMPT_BLOCKED
)
4046 gdb_rl_callback_handler_reinstall ();
4049 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4050 that's just the event thread. In all-stop, that's all threads. */
4053 clean_up_just_stopped_threads_fsms (struct execution_control_state
*ecs
)
4055 if (ecs
->event_thread
!= nullptr
4056 && ecs
->event_thread
->thread_fsm () != nullptr)
4057 ecs
->event_thread
->thread_fsm ()->clean_up (ecs
->event_thread
);
4061 for (thread_info
*thr
: all_non_exited_threads ())
4063 if (thr
->thread_fsm () == nullptr)
4065 if (thr
== ecs
->event_thread
)
4068 switch_to_thread (thr
);
4069 thr
->thread_fsm ()->clean_up (thr
);
4072 if (ecs
->event_thread
!= nullptr)
4073 switch_to_thread (ecs
->event_thread
);
4077 /* Helper for all_uis_check_sync_execution_done that works on the
4081 check_curr_ui_sync_execution_done (void)
4083 struct ui
*ui
= current_ui
;
4085 if (ui
->prompt_state
== PROMPT_NEEDED
4087 && !gdb_in_secondary_prompt_p (ui
))
4089 target_terminal::ours ();
4090 gdb::observers::sync_execution_done
.notify ();
4091 ui_register_input_event_handler (ui
);
4098 all_uis_check_sync_execution_done (void)
4100 SWITCH_THRU_ALL_UIS ()
4102 check_curr_ui_sync_execution_done ();
4109 all_uis_on_sync_execution_starting (void)
4111 SWITCH_THRU_ALL_UIS ()
4113 if (current_ui
->prompt_state
== PROMPT_NEEDED
)
4114 async_disable_stdin ();
4118 /* Asynchronous version of wait_for_inferior. It is called by the
4119 event loop whenever a change of state is detected on the file
4120 descriptor corresponding to the target. It can be called more than
4121 once to complete a single execution command. In such cases we need
4122 to keep the state in a global variable ECSS. If it is the last time
4123 that this function is called for a single execution command, then
4124 report to the user that the inferior has stopped, and do the
4125 necessary cleanups. */
4128 fetch_inferior_event ()
4130 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
4132 struct execution_control_state ecss
;
4133 struct execution_control_state
*ecs
= &ecss
;
4136 /* Events are always processed with the main UI as current UI. This
4137 way, warnings, debug output, etc. are always consistently sent to
4138 the main console. */
4139 scoped_restore save_ui
= make_scoped_restore (¤t_ui
, main_ui
);
4141 /* Temporarily disable pagination. Otherwise, the user would be
4142 given an option to press 'q' to quit, which would cause an early
4143 exit and could leave GDB in a half-baked state. */
4144 scoped_restore save_pagination
4145 = make_scoped_restore (&pagination_enabled
, false);
4147 /* End up with readline processing input, if necessary. */
4149 SCOPE_EXIT
{ reinstall_readline_callback_handler_cleanup (); };
4151 /* We're handling a live event, so make sure we're doing live
4152 debugging. If we're looking at traceframes while the target is
4153 running, we're going to need to get back to that mode after
4154 handling the event. */
4155 gdb::optional
<scoped_restore_current_traceframe
> maybe_restore_traceframe
;
4158 maybe_restore_traceframe
.emplace ();
4159 set_current_traceframe (-1);
4162 /* The user/frontend should not notice a thread switch due to
4163 internal events. Make sure we revert to the user selected
4164 thread and frame after handling the event and running any
4165 breakpoint commands. */
4166 scoped_restore_current_thread restore_thread
;
4168 overlay_cache_invalid
= 1;
4169 /* Flush target cache before starting to handle each event. Target
4170 was running and cache could be stale. This is just a heuristic.
4171 Running threads may modify target memory, but we don't get any
4173 target_dcache_invalidate ();
4175 scoped_restore save_exec_dir
4176 = make_scoped_restore (&execution_direction
,
4177 target_execution_direction ());
4179 /* Allow targets to pause their resumed threads while we handle
4181 scoped_disable_commit_resumed
disable_commit_resumed ("handling event");
4183 if (!do_target_wait (ecs
, TARGET_WNOHANG
))
4185 infrun_debug_printf ("do_target_wait returned no event");
4186 disable_commit_resumed
.reset_and_commit ();
4190 gdb_assert (ecs
->ws
.kind () != TARGET_WAITKIND_IGNORE
);
4192 /* Switch to the target that generated the event, so we can do
4194 switch_to_target_no_thread (ecs
->target
);
4197 print_target_wait_results (minus_one_ptid
, ecs
->ptid
, ecs
->ws
);
4199 /* If an error happens while handling the event, propagate GDB's
4200 knowledge of the executing state to the frontend/user running
4202 ptid_t finish_ptid
= !target_is_non_stop_p () ? minus_one_ptid
: ecs
->ptid
;
4203 scoped_finish_thread_state
finish_state (ecs
->target
, finish_ptid
);
4205 /* Get executed before scoped_restore_current_thread above to apply
4206 still for the thread which has thrown the exception. */
4207 auto defer_bpstat_clear
4208 = make_scope_exit (bpstat_clear_actions
);
4209 auto defer_delete_threads
4210 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints
);
4212 /* Now figure out what to do with the result of the result. */
4213 handle_inferior_event (ecs
);
4215 if (!ecs
->wait_some_more
)
4217 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
4218 bool should_stop
= true;
4219 struct thread_info
*thr
= ecs
->event_thread
;
4221 delete_just_stopped_threads_infrun_breakpoints ();
4223 if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4224 should_stop
= thr
->thread_fsm ()->should_stop (thr
);
4232 bool should_notify_stop
= true;
4235 clean_up_just_stopped_threads_fsms (ecs
);
4237 if (thr
!= nullptr && thr
->thread_fsm () != nullptr)
4239 = thr
->thread_fsm ()->should_notify_stop ();
4241 if (should_notify_stop
)
4243 /* We may not find an inferior if this was a process exit. */
4244 if (inf
== NULL
|| inf
->control
.stop_soon
== NO_STOP_QUIETLY
)
4245 proceeded
= normal_stop ();
4250 inferior_event_handler (INF_EXEC_COMPLETE
);
4254 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4255 previously selected thread is gone. We have two
4256 choices - switch to no thread selected, or restore the
4257 previously selected thread (now exited). We chose the
4258 later, just because that's what GDB used to do. After
4259 this, "info threads" says "The current thread <Thread
4260 ID 2> has terminated." instead of "No thread
4264 && ecs
->ws
.kind () != TARGET_WAITKIND_NO_RESUMED
)
4265 restore_thread
.dont_restore ();
4269 defer_delete_threads
.release ();
4270 defer_bpstat_clear
.release ();
4272 /* No error, don't finish the thread states yet. */
4273 finish_state
.release ();
4275 disable_commit_resumed
.reset_and_commit ();
4277 /* This scope is used to ensure that readline callbacks are
4278 reinstalled here. */
4281 /* If a UI was in sync execution mode, and now isn't, restore its
4282 prompt (a synchronous execution command has finished, and we're
4283 ready for input). */
4284 all_uis_check_sync_execution_done ();
4287 && exec_done_display_p
4288 && (inferior_ptid
== null_ptid
4289 || inferior_thread ()->state
!= THREAD_RUNNING
))
4290 gdb_printf (_("completed.\n"));
4296 set_step_info (thread_info
*tp
, struct frame_info
*frame
,
4297 struct symtab_and_line sal
)
4299 /* This can be removed once this function no longer implicitly relies on the
4300 inferior_ptid value. */
4301 gdb_assert (inferior_ptid
== tp
->ptid
);
4303 tp
->control
.step_frame_id
= get_frame_id (frame
);
4304 tp
->control
.step_stack_frame_id
= get_stack_frame_id (frame
);
4306 tp
->current_symtab
= sal
.symtab
;
4307 tp
->current_line
= sal
.line
;
4310 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4311 tp
->current_symtab
!= nullptr ? tp
->current_symtab
->filename
: "<null>",
4313 tp
->control
.step_frame_id
.to_string ().c_str (),
4314 tp
->control
.step_stack_frame_id
.to_string ().c_str ());
4317 /* Clear context switchable stepping state. */
4320 init_thread_stepping_state (struct thread_info
*tss
)
4322 tss
->stepped_breakpoint
= 0;
4323 tss
->stepping_over_breakpoint
= 0;
4324 tss
->stepping_over_watchpoint
= 0;
4325 tss
->step_after_step_resume_breakpoint
= 0;
4331 set_last_target_status (process_stratum_target
*target
, ptid_t ptid
,
4332 const target_waitstatus
&status
)
4334 target_last_proc_target
= target
;
4335 target_last_wait_ptid
= ptid
;
4336 target_last_waitstatus
= status
;
4342 get_last_target_status (process_stratum_target
**target
, ptid_t
*ptid
,
4343 target_waitstatus
*status
)
4345 if (target
!= nullptr)
4346 *target
= target_last_proc_target
;
4347 if (ptid
!= nullptr)
4348 *ptid
= target_last_wait_ptid
;
4349 if (status
!= nullptr)
4350 *status
= target_last_waitstatus
;
4356 nullify_last_target_wait_ptid (void)
4358 target_last_proc_target
= nullptr;
4359 target_last_wait_ptid
= minus_one_ptid
;
4360 target_last_waitstatus
= {};
4363 /* Switch thread contexts. */
4366 context_switch (execution_control_state
*ecs
)
4368 if (ecs
->ptid
!= inferior_ptid
4369 && (inferior_ptid
== null_ptid
4370 || ecs
->event_thread
!= inferior_thread ()))
4372 infrun_debug_printf ("Switching context from %s to %s",
4373 inferior_ptid
.to_string ().c_str (),
4374 ecs
->ptid
.to_string ().c_str ());
4377 switch_to_thread (ecs
->event_thread
);
4380 /* If the target can't tell whether we've hit breakpoints
4381 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4382 check whether that could have been caused by a breakpoint. If so,
4383 adjust the PC, per gdbarch_decr_pc_after_break. */
4386 adjust_pc_after_break (struct thread_info
*thread
,
4387 const target_waitstatus
&ws
)
4389 struct regcache
*regcache
;
4390 struct gdbarch
*gdbarch
;
4391 CORE_ADDR breakpoint_pc
, decr_pc
;
4393 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4394 we aren't, just return.
4396 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4397 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4398 implemented by software breakpoints should be handled through the normal
4401 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4402 different signals (SIGILL or SIGEMT for instance), but it is less
4403 clear where the PC is pointing afterwards. It may not match
4404 gdbarch_decr_pc_after_break. I don't know any specific target that
4405 generates these signals at breakpoints (the code has been in GDB since at
4406 least 1992) so I can not guess how to handle them here.
4408 In earlier versions of GDB, a target with
4409 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4410 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4411 target with both of these set in GDB history, and it seems unlikely to be
4412 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4414 if (ws
.kind () != TARGET_WAITKIND_STOPPED
)
4417 if (ws
.sig () != GDB_SIGNAL_TRAP
)
4420 /* In reverse execution, when a breakpoint is hit, the instruction
4421 under it has already been de-executed. The reported PC always
4422 points at the breakpoint address, so adjusting it further would
4423 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4426 B1 0x08000000 : INSN1
4427 B2 0x08000001 : INSN2
4429 PC -> 0x08000003 : INSN4
4431 Say you're stopped at 0x08000003 as above. Reverse continuing
4432 from that point should hit B2 as below. Reading the PC when the
4433 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4434 been de-executed already.
4436 B1 0x08000000 : INSN1
4437 B2 PC -> 0x08000001 : INSN2
4441 We can't apply the same logic as for forward execution, because
4442 we would wrongly adjust the PC to 0x08000000, since there's a
4443 breakpoint at PC - 1. We'd then report a hit on B1, although
4444 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4446 if (execution_direction
== EXEC_REVERSE
)
4449 /* If the target can tell whether the thread hit a SW breakpoint,
4450 trust it. Targets that can tell also adjust the PC
4452 if (target_supports_stopped_by_sw_breakpoint ())
4455 /* Note that relying on whether a breakpoint is planted in memory to
4456 determine this can fail. E.g,. the breakpoint could have been
4457 removed since. Or the thread could have been told to step an
4458 instruction the size of a breakpoint instruction, and only
4459 _after_ was a breakpoint inserted at its address. */
4461 /* If this target does not decrement the PC after breakpoints, then
4462 we have nothing to do. */
4463 regcache
= get_thread_regcache (thread
);
4464 gdbarch
= regcache
->arch ();
4466 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4470 const address_space
*aspace
= regcache
->aspace ();
4472 /* Find the location where (if we've hit a breakpoint) the
4473 breakpoint would be. */
4474 breakpoint_pc
= regcache_read_pc (regcache
) - decr_pc
;
4476 /* If the target can't tell whether a software breakpoint triggered,
4477 fallback to figuring it out based on breakpoints we think were
4478 inserted in the target, and on whether the thread was stepped or
4481 /* Check whether there actually is a software breakpoint inserted at
4484 If in non-stop mode, a race condition is possible where we've
4485 removed a breakpoint, but stop events for that breakpoint were
4486 already queued and arrive later. To suppress those spurious
4487 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4488 and retire them after a number of stop events are reported. Note
4489 this is an heuristic and can thus get confused. The real fix is
4490 to get the "stopped by SW BP and needs adjustment" info out of
4491 the target/kernel (and thus never reach here; see above). */
4492 if (software_breakpoint_inserted_here_p (aspace
, breakpoint_pc
)
4493 || (target_is_non_stop_p ()
4494 && moribund_breakpoint_here_p (aspace
, breakpoint_pc
)))
4496 gdb::optional
<scoped_restore_tmpl
<int>> restore_operation_disable
;
4498 if (record_full_is_used ())
4499 restore_operation_disable
.emplace
4500 (record_full_gdb_operation_disable_set ());
4502 /* When using hardware single-step, a SIGTRAP is reported for both
4503 a completed single-step and a software breakpoint. Need to
4504 differentiate between the two, as the latter needs adjusting
4505 but the former does not.
4507 The SIGTRAP can be due to a completed hardware single-step only if
4508 - we didn't insert software single-step breakpoints
4509 - this thread is currently being stepped
4511 If any of these events did not occur, we must have stopped due
4512 to hitting a software breakpoint, and have to back up to the
4515 As a special case, we could have hardware single-stepped a
4516 software breakpoint. In this case (prev_pc == breakpoint_pc),
4517 we also need to back up to the breakpoint address. */
4519 if (thread_has_single_step_breakpoints_set (thread
)
4520 || !currently_stepping (thread
)
4521 || (thread
->stepped_breakpoint
4522 && thread
->prev_pc
== breakpoint_pc
))
4523 regcache_write_pc (regcache
, breakpoint_pc
);
4528 stepped_in_from (struct frame_info
*frame
, struct frame_id step_frame_id
)
4530 for (frame
= get_prev_frame (frame
);
4532 frame
= get_prev_frame (frame
))
4534 if (frame_id_eq (get_frame_id (frame
), step_frame_id
))
4537 if (get_frame_type (frame
) != INLINE_FRAME
)
4544 /* Look for an inline frame that is marked for skip.
4545 If PREV_FRAME is TRUE start at the previous frame,
4546 otherwise start at the current frame. Stop at the
4547 first non-inline frame, or at the frame where the
4551 inline_frame_is_marked_for_skip (bool prev_frame
, struct thread_info
*tp
)
4553 struct frame_info
*frame
= get_current_frame ();
4556 frame
= get_prev_frame (frame
);
4558 for (; frame
!= NULL
; frame
= get_prev_frame (frame
))
4560 const char *fn
= NULL
;
4561 symtab_and_line sal
;
4564 if (frame_id_eq (get_frame_id (frame
), tp
->control
.step_frame_id
))
4566 if (get_frame_type (frame
) != INLINE_FRAME
)
4569 sal
= find_frame_sal (frame
);
4570 sym
= get_frame_function (frame
);
4573 fn
= sym
->print_name ();
4576 && function_name_is_marked_for_skip (fn
, sal
))
4583 /* If the event thread has the stop requested flag set, pretend it
4584 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4588 handle_stop_requested (struct execution_control_state
*ecs
)
4590 if (ecs
->event_thread
->stop_requested
)
4592 ecs
->ws
.set_stopped (GDB_SIGNAL_0
);
4593 handle_signal_stop (ecs
);
4599 /* Auxiliary function that handles syscall entry/return events.
4600 It returns true if the inferior should keep going (and GDB
4601 should ignore the event), or false if the event deserves to be
4605 handle_syscall_event (struct execution_control_state
*ecs
)
4607 struct regcache
*regcache
;
4610 context_switch (ecs
);
4612 regcache
= get_thread_regcache (ecs
->event_thread
);
4613 syscall_number
= ecs
->ws
.syscall_number ();
4614 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
4616 if (catch_syscall_enabled () > 0
4617 && catching_syscall_number (syscall_number
))
4619 infrun_debug_printf ("syscall number=%d", syscall_number
);
4621 ecs
->event_thread
->control
.stop_bpstat
4622 = bpstat_stop_status_nowatch (regcache
->aspace (),
4623 ecs
->event_thread
->stop_pc (),
4624 ecs
->event_thread
, ecs
->ws
);
4626 if (handle_stop_requested (ecs
))
4629 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
4631 /* Catchpoint hit. */
4636 if (handle_stop_requested (ecs
))
4639 /* If no catchpoint triggered for this, then keep going. */
4645 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4648 fill_in_stop_func (struct gdbarch
*gdbarch
,
4649 struct execution_control_state
*ecs
)
4651 if (!ecs
->stop_func_filled_in
)
4654 const general_symbol_info
*gsi
;
4656 /* Don't care about return value; stop_func_start and stop_func_name
4657 will both be 0 if it doesn't work. */
4658 find_pc_partial_function_sym (ecs
->event_thread
->stop_pc (),
4660 &ecs
->stop_func_start
,
4661 &ecs
->stop_func_end
,
4663 ecs
->stop_func_name
= gsi
== nullptr ? nullptr : gsi
->print_name ();
4665 /* The call to find_pc_partial_function, above, will set
4666 stop_func_start and stop_func_end to the start and end
4667 of the range containing the stop pc. If this range
4668 contains the entry pc for the block (which is always the
4669 case for contiguous blocks), advance stop_func_start past
4670 the function's start offset and entrypoint. Note that
4671 stop_func_start is NOT advanced when in a range of a
4672 non-contiguous block that does not contain the entry pc. */
4673 if (block
!= nullptr
4674 && ecs
->stop_func_start
<= BLOCK_ENTRY_PC (block
)
4675 && BLOCK_ENTRY_PC (block
) < ecs
->stop_func_end
)
4677 ecs
->stop_func_start
4678 += gdbarch_deprecated_function_start_offset (gdbarch
);
4680 if (gdbarch_skip_entrypoint_p (gdbarch
))
4681 ecs
->stop_func_start
4682 = gdbarch_skip_entrypoint (gdbarch
, ecs
->stop_func_start
);
4685 ecs
->stop_func_filled_in
= 1;
4690 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4692 static enum stop_kind
4693 get_inferior_stop_soon (execution_control_state
*ecs
)
4695 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
4697 gdb_assert (inf
!= NULL
);
4698 return inf
->control
.stop_soon
;
4701 /* Poll for one event out of the current target. Store the resulting
4702 waitstatus in WS, and return the event ptid. Does not block. */
4705 poll_one_curr_target (struct target_waitstatus
*ws
)
4709 overlay_cache_invalid
= 1;
4711 /* Flush target cache before starting to handle each event.
4712 Target was running and cache could be stale. This is just a
4713 heuristic. Running threads may modify target memory, but we
4714 don't get any event. */
4715 target_dcache_invalidate ();
4717 event_ptid
= target_wait (minus_one_ptid
, ws
, TARGET_WNOHANG
);
4720 print_target_wait_results (minus_one_ptid
, event_ptid
, *ws
);
4725 /* Wait for one event out of any target. */
4727 static wait_one_event
4732 for (inferior
*inf
: all_inferiors ())
4734 process_stratum_target
*target
= inf
->process_target ();
4736 || !target
->is_async_p ()
4737 || !target
->threads_executing
)
4740 switch_to_inferior_no_thread (inf
);
4742 wait_one_event event
;
4743 event
.target
= target
;
4744 event
.ptid
= poll_one_curr_target (&event
.ws
);
4746 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
4748 /* If nothing is resumed, remove the target from the
4752 else if (event
.ws
.kind () != TARGET_WAITKIND_IGNORE
)
4756 /* Block waiting for some event. */
4763 for (inferior
*inf
: all_inferiors ())
4765 process_stratum_target
*target
= inf
->process_target ();
4767 || !target
->is_async_p ()
4768 || !target
->threads_executing
)
4771 int fd
= target
->async_wait_fd ();
4772 FD_SET (fd
, &readfds
);
4779 /* No waitable targets left. All must be stopped. */
4780 target_waitstatus ws
;
4781 ws
.set_no_resumed ();
4782 return {NULL
, minus_one_ptid
, std::move (ws
)};
4787 int numfds
= interruptible_select (nfds
, &readfds
, 0, NULL
, 0);
4793 perror_with_name ("interruptible_select");
4798 /* Save the thread's event and stop reason to process it later. */
4801 save_waitstatus (struct thread_info
*tp
, const target_waitstatus
&ws
)
4803 infrun_debug_printf ("saving status %s for %s",
4804 ws
.to_string ().c_str (),
4805 tp
->ptid
.to_string ().c_str ());
4807 /* Record for later. */
4808 tp
->set_pending_waitstatus (ws
);
4810 if (ws
.kind () == TARGET_WAITKIND_STOPPED
4811 && ws
.sig () == GDB_SIGNAL_TRAP
)
4813 struct regcache
*regcache
= get_thread_regcache (tp
);
4814 const address_space
*aspace
= regcache
->aspace ();
4815 CORE_ADDR pc
= regcache_read_pc (regcache
);
4817 adjust_pc_after_break (tp
, tp
->pending_waitstatus ());
4819 scoped_restore_current_thread restore_thread
;
4820 switch_to_thread (tp
);
4822 if (target_stopped_by_watchpoint ())
4823 tp
->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT
);
4824 else if (target_supports_stopped_by_sw_breakpoint ()
4825 && target_stopped_by_sw_breakpoint ())
4826 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
4827 else if (target_supports_stopped_by_hw_breakpoint ()
4828 && target_stopped_by_hw_breakpoint ())
4829 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
4830 else if (!target_supports_stopped_by_hw_breakpoint ()
4831 && hardware_breakpoint_inserted_here_p (aspace
, pc
))
4832 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
4833 else if (!target_supports_stopped_by_sw_breakpoint ()
4834 && software_breakpoint_inserted_here_p (aspace
, pc
))
4835 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
4836 else if (!thread_has_single_step_breakpoints_set (tp
)
4837 && currently_stepping (tp
))
4838 tp
->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP
);
4842 /* Mark the non-executing threads accordingly. In all-stop, all
4843 threads of all processes are stopped when we get any event
4844 reported. In non-stop mode, only the event thread stops. */
4847 mark_non_executing_threads (process_stratum_target
*target
,
4849 const target_waitstatus
&ws
)
4853 if (!target_is_non_stop_p ())
4854 mark_ptid
= minus_one_ptid
;
4855 else if (ws
.kind () == TARGET_WAITKIND_SIGNALLED
4856 || ws
.kind () == TARGET_WAITKIND_EXITED
)
4858 /* If we're handling a process exit in non-stop mode, even
4859 though threads haven't been deleted yet, one would think
4860 that there is nothing to do, as threads of the dead process
4861 will be soon deleted, and threads of any other process were
4862 left running. However, on some targets, threads survive a
4863 process exit event. E.g., for the "checkpoint" command,
4864 when the current checkpoint/fork exits, linux-fork.c
4865 automatically switches to another fork from within
4866 target_mourn_inferior, by associating the same
4867 inferior/thread to another fork. We haven't mourned yet at
4868 this point, but we must mark any threads left in the
4869 process as not-executing so that finish_thread_state marks
4870 them stopped (in the user's perspective) if/when we present
4871 the stop to the user. */
4872 mark_ptid
= ptid_t (event_ptid
.pid ());
4875 mark_ptid
= event_ptid
;
4877 set_executing (target
, mark_ptid
, false);
4879 /* Likewise the resumed flag. */
4880 set_resumed (target
, mark_ptid
, false);
4883 /* Handle one event after stopping threads. If the eventing thread
4884 reports back any interesting event, we leave it pending. If the
4885 eventing thread was in the middle of a displaced step, we
4886 cancel/finish it, and unless the thread's inferior is being
4887 detached, put the thread back in the step-over chain. Returns true
4888 if there are no resumed threads left in the target (thus there's no
4889 point in waiting further), false otherwise. */
4892 handle_one (const wait_one_event
&event
)
4895 ("%s %s", event
.ws
.to_string ().c_str (),
4896 event
.ptid
.to_string ().c_str ());
4898 if (event
.ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
4900 /* All resumed threads exited. */
4903 else if (event
.ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
4904 || event
.ws
.kind () == TARGET_WAITKIND_EXITED
4905 || event
.ws
.kind () == TARGET_WAITKIND_SIGNALLED
)
4907 /* One thread/process exited/signalled. */
4909 thread_info
*t
= nullptr;
4911 /* The target may have reported just a pid. If so, try
4912 the first non-exited thread. */
4913 if (event
.ptid
.is_pid ())
4915 int pid
= event
.ptid
.pid ();
4916 inferior
*inf
= find_inferior_pid (event
.target
, pid
);
4917 for (thread_info
*tp
: inf
->non_exited_threads ())
4923 /* If there is no available thread, the event would
4924 have to be appended to a per-inferior event list,
4925 which does not exist (and if it did, we'd have
4926 to adjust run control command to be able to
4927 resume such an inferior). We assert here instead
4928 of going into an infinite loop. */
4929 gdb_assert (t
!= nullptr);
4932 ("using %s", t
->ptid
.to_string ().c_str ());
4936 t
= find_thread_ptid (event
.target
, event
.ptid
);
4937 /* Check if this is the first time we see this thread.
4938 Don't bother adding if it individually exited. */
4940 && event
.ws
.kind () != TARGET_WAITKIND_THREAD_EXITED
)
4941 t
= add_thread (event
.target
, event
.ptid
);
4946 /* Set the threads as non-executing to avoid
4947 another stop attempt on them. */
4948 switch_to_thread_no_regs (t
);
4949 mark_non_executing_threads (event
.target
, event
.ptid
,
4951 save_waitstatus (t
, event
.ws
);
4952 t
->stop_requested
= false;
4957 thread_info
*t
= find_thread_ptid (event
.target
, event
.ptid
);
4959 t
= add_thread (event
.target
, event
.ptid
);
4961 t
->stop_requested
= 0;
4962 t
->set_executing (false);
4963 t
->set_resumed (false);
4964 t
->control
.may_range_step
= 0;
4966 /* This may be the first time we see the inferior report
4968 if (t
->inf
->needs_setup
)
4970 switch_to_thread_no_regs (t
);
4974 if (event
.ws
.kind () == TARGET_WAITKIND_STOPPED
4975 && event
.ws
.sig () == GDB_SIGNAL_0
)
4977 /* We caught the event that we intended to catch, so
4978 there's no event to save as pending. */
4980 if (displaced_step_finish (t
, GDB_SIGNAL_0
)
4981 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
4983 /* Add it back to the step-over queue. */
4985 ("displaced-step of %s canceled",
4986 t
->ptid
.to_string ().c_str ());
4988 t
->control
.trap_expected
= 0;
4989 if (!t
->inf
->detaching
)
4990 global_thread_step_over_chain_enqueue (t
);
4995 enum gdb_signal sig
;
4996 struct regcache
*regcache
;
4999 ("target_wait %s, saving status for %s",
5000 event
.ws
.to_string ().c_str (),
5001 t
->ptid
.to_string ().c_str ());
5003 /* Record for later. */
5004 save_waitstatus (t
, event
.ws
);
5006 sig
= (event
.ws
.kind () == TARGET_WAITKIND_STOPPED
5007 ? event
.ws
.sig () : GDB_SIGNAL_0
);
5009 if (displaced_step_finish (t
, sig
)
5010 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
5012 /* Add it back to the step-over queue. */
5013 t
->control
.trap_expected
= 0;
5014 if (!t
->inf
->detaching
)
5015 global_thread_step_over_chain_enqueue (t
);
5018 regcache
= get_thread_regcache (t
);
5019 t
->set_stop_pc (regcache_read_pc (regcache
));
5021 infrun_debug_printf ("saved stop_pc=%s for %s "
5022 "(currently_stepping=%d)",
5023 paddress (target_gdbarch (), t
->stop_pc ()),
5024 t
->ptid
.to_string ().c_str (),
5025 currently_stepping (t
));
5035 stop_all_threads (const char *reason
, inferior
*inf
)
5037 /* We may need multiple passes to discover all threads. */
5041 gdb_assert (exists_non_stop_target ());
5043 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason
,
5044 inf
!= nullptr ? inf
->num
: -1);
5046 scoped_restore_current_thread restore_thread
;
5048 /* Enable thread events on relevant targets. */
5049 for (auto *target
: all_non_exited_process_targets ())
5051 if (inf
!= nullptr && inf
->process_target () != target
)
5054 switch_to_target_no_thread (target
);
5055 target_thread_events (true);
5060 /* Disable thread events on relevant targets. */
5061 for (auto *target
: all_non_exited_process_targets ())
5063 if (inf
!= nullptr && inf
->process_target () != target
)
5066 switch_to_target_no_thread (target
);
5067 target_thread_events (false);
5070 /* Use debug_prefixed_printf directly to get a meaningful function
5073 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5076 /* Request threads to stop, and then wait for the stops. Because
5077 threads we already know about can spawn more threads while we're
5078 trying to stop them, and we only learn about new threads when we
5079 update the thread list, do this in a loop, and keep iterating
5080 until two passes find no threads that need to be stopped. */
5081 for (pass
= 0; pass
< 2; pass
++, iterations
++)
5083 infrun_debug_printf ("pass=%d, iterations=%d", pass
, iterations
);
5086 int waits_needed
= 0;
5088 for (auto *target
: all_non_exited_process_targets ())
5090 if (inf
!= nullptr && inf
->process_target () != target
)
5093 switch_to_target_no_thread (target
);
5094 update_thread_list ();
5097 /* Go through all threads looking for threads that we need
5098 to tell the target to stop. */
5099 for (thread_info
*t
: all_non_exited_threads ())
5101 if (inf
!= nullptr && t
->inf
!= inf
)
5104 /* For a single-target setting with an all-stop target,
5105 we would not even arrive here. For a multi-target
5106 setting, until GDB is able to handle a mixture of
5107 all-stop and non-stop targets, simply skip all-stop
5108 targets' threads. This should be fine due to the
5109 protection of 'check_multi_target_resumption'. */
5111 switch_to_thread_no_regs (t
);
5112 if (!target_is_non_stop_p ())
5115 if (t
->executing ())
5117 /* If already stopping, don't request a stop again.
5118 We just haven't seen the notification yet. */
5119 if (!t
->stop_requested
)
5121 infrun_debug_printf (" %s executing, need stop",
5122 t
->ptid
.to_string ().c_str ());
5123 target_stop (t
->ptid
);
5124 t
->stop_requested
= 1;
5128 infrun_debug_printf (" %s executing, already stopping",
5129 t
->ptid
.to_string ().c_str ());
5132 if (t
->stop_requested
)
5137 infrun_debug_printf (" %s not executing",
5138 t
->ptid
.to_string ().c_str ());
5140 /* The thread may be not executing, but still be
5141 resumed with a pending status to process. */
5142 t
->set_resumed (false);
5146 if (waits_needed
== 0)
5149 /* If we find new threads on the second iteration, restart
5150 over. We want to see two iterations in a row with all
5155 for (int i
= 0; i
< waits_needed
; i
++)
5157 wait_one_event event
= wait_one ();
5158 if (handle_one (event
))
5165 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5168 handle_no_resumed (struct execution_control_state
*ecs
)
5170 if (target_can_async_p ())
5172 bool any_sync
= false;
5174 for (ui
*ui
: all_uis ())
5176 if (ui
->prompt_state
== PROMPT_BLOCKED
)
5184 /* There were no unwaited-for children left in the target, but,
5185 we're not synchronously waiting for events either. Just
5188 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5189 prepare_to_wait (ecs
);
5194 /* Otherwise, if we were running a synchronous execution command, we
5195 may need to cancel it and give the user back the terminal.
5197 In non-stop mode, the target can't tell whether we've already
5198 consumed previous stop events, so it can end up sending us a
5199 no-resumed event like so:
5201 #0 - thread 1 is left stopped
5203 #1 - thread 2 is resumed and hits breakpoint
5204 -> TARGET_WAITKIND_STOPPED
5206 #2 - thread 3 is resumed and exits
5207 this is the last resumed thread, so
5208 -> TARGET_WAITKIND_NO_RESUMED
5210 #3 - gdb processes stop for thread 2 and decides to re-resume
5213 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5214 thread 2 is now resumed, so the event should be ignored.
5216 IOW, if the stop for thread 2 doesn't end a foreground command,
5217 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5218 event. But it could be that the event meant that thread 2 itself
5219 (or whatever other thread was the last resumed thread) exited.
5221 To address this we refresh the thread list and check whether we
5222 have resumed threads _now_. In the example above, this removes
5223 thread 3 from the thread list. If thread 2 was re-resumed, we
5224 ignore this event. If we find no thread resumed, then we cancel
5225 the synchronous command and show "no unwaited-for " to the
5228 inferior
*curr_inf
= current_inferior ();
5230 scoped_restore_current_thread restore_thread
;
5232 for (auto *target
: all_non_exited_process_targets ())
5234 switch_to_target_no_thread (target
);
5235 update_thread_list ();
5240 - the current target has no thread executing, and
5241 - the current inferior is native, and
5242 - the current inferior is the one which has the terminal, and
5245 then a Ctrl-C from this point on would remain stuck in the
5246 kernel, until a thread resumes and dequeues it. That would
5247 result in the GDB CLI not reacting to Ctrl-C, not able to
5248 interrupt the program. To address this, if the current inferior
5249 no longer has any thread executing, we give the terminal to some
5250 other inferior that has at least one thread executing. */
5251 bool swap_terminal
= true;
5253 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5254 whether to report it to the user. */
5255 bool ignore_event
= false;
5257 for (thread_info
*thread
: all_non_exited_threads ())
5259 if (swap_terminal
&& thread
->executing ())
5261 if (thread
->inf
!= curr_inf
)
5263 target_terminal::ours ();
5265 switch_to_thread (thread
);
5266 target_terminal::inferior ();
5268 swap_terminal
= false;
5271 if (!ignore_event
&& thread
->resumed ())
5273 /* Either there were no unwaited-for children left in the
5274 target at some point, but there are now, or some target
5275 other than the eventing one has unwaited-for children
5276 left. Just ignore. */
5277 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5278 "(ignoring: found resumed)");
5280 ignore_event
= true;
5283 if (ignore_event
&& !swap_terminal
)
5289 switch_to_inferior_no_thread (curr_inf
);
5290 prepare_to_wait (ecs
);
5294 /* Go ahead and report the event. */
5298 /* Given an execution control state that has been freshly filled in by
5299 an event from the inferior, figure out what it means and take
5302 The alternatives are:
5304 1) stop_waiting and return; to really stop and return to the
5307 2) keep_going and return; to wait for the next event (set
5308 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5312 handle_inferior_event (struct execution_control_state
*ecs
)
5314 /* Make sure that all temporary struct value objects that were
5315 created during the handling of the event get deleted at the
5317 scoped_value_mark free_values
;
5319 infrun_debug_printf ("%s", ecs
->ws
.to_string ().c_str ());
5321 if (ecs
->ws
.kind () == TARGET_WAITKIND_IGNORE
)
5323 /* We had an event in the inferior, but we are not interested in
5324 handling it at this level. The lower layers have already
5325 done what needs to be done, if anything.
5327 One of the possible circumstances for this is when the
5328 inferior produces output for the console. The inferior has
5329 not stopped, and we are ignoring the event. Another possible
5330 circumstance is any event which the lower level knows will be
5331 reported multiple times without an intervening resume. */
5332 prepare_to_wait (ecs
);
5336 if (ecs
->ws
.kind () == TARGET_WAITKIND_THREAD_EXITED
)
5338 prepare_to_wait (ecs
);
5342 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
5343 && handle_no_resumed (ecs
))
5346 /* Cache the last target/ptid/waitstatus. */
5347 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5349 /* Always clear state belonging to the previous time we stopped. */
5350 stop_stack_dummy
= STOP_NONE
;
5352 if (ecs
->ws
.kind () == TARGET_WAITKIND_NO_RESUMED
)
5354 /* No unwaited-for children left. IOW, all resumed children
5356 stop_print_frame
= false;
5361 if (ecs
->ws
.kind () != TARGET_WAITKIND_EXITED
5362 && ecs
->ws
.kind () != TARGET_WAITKIND_SIGNALLED
)
5364 ecs
->event_thread
= find_thread_ptid (ecs
->target
, ecs
->ptid
);
5365 /* If it's a new thread, add it to the thread database. */
5366 if (ecs
->event_thread
== NULL
)
5367 ecs
->event_thread
= add_thread (ecs
->target
, ecs
->ptid
);
5369 /* Disable range stepping. If the next step request could use a
5370 range, this will be end up re-enabled then. */
5371 ecs
->event_thread
->control
.may_range_step
= 0;
5374 /* Dependent on valid ECS->EVENT_THREAD. */
5375 adjust_pc_after_break (ecs
->event_thread
, ecs
->ws
);
5377 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5378 reinit_frame_cache ();
5380 breakpoint_retire_moribund ();
5382 /* First, distinguish signals caused by the debugger from signals
5383 that have to do with the program's own actions. Note that
5384 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5385 on the operating system version. Here we detect when a SIGILL or
5386 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5387 something similar for SIGSEGV, since a SIGSEGV will be generated
5388 when we're trying to execute a breakpoint instruction on a
5389 non-executable stack. This happens for call dummy breakpoints
5390 for architectures like SPARC that place call dummies on the
5392 if (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
5393 && (ecs
->ws
.sig () == GDB_SIGNAL_ILL
5394 || ecs
->ws
.sig () == GDB_SIGNAL_SEGV
5395 || ecs
->ws
.sig () == GDB_SIGNAL_EMT
))
5397 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5399 if (breakpoint_inserted_here_p (regcache
->aspace (),
5400 regcache_read_pc (regcache
)))
5402 infrun_debug_printf ("Treating signal as SIGTRAP");
5403 ecs
->ws
.set_stopped (GDB_SIGNAL_TRAP
);
5407 mark_non_executing_threads (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5409 switch (ecs
->ws
.kind ())
5411 case TARGET_WAITKIND_LOADED
:
5413 context_switch (ecs
);
5414 /* Ignore gracefully during startup of the inferior, as it might
5415 be the shell which has just loaded some objects, otherwise
5416 add the symbols for the newly loaded objects. Also ignore at
5417 the beginning of an attach or remote session; we will query
5418 the full list of libraries once the connection is
5421 stop_kind stop_soon
= get_inferior_stop_soon (ecs
);
5422 if (stop_soon
== NO_STOP_QUIETLY
)
5424 struct regcache
*regcache
;
5426 regcache
= get_thread_regcache (ecs
->event_thread
);
5428 handle_solib_event ();
5430 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
5431 ecs
->event_thread
->control
.stop_bpstat
5432 = bpstat_stop_status_nowatch (regcache
->aspace (),
5433 ecs
->event_thread
->stop_pc (),
5434 ecs
->event_thread
, ecs
->ws
);
5436 if (handle_stop_requested (ecs
))
5439 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5441 /* A catchpoint triggered. */
5442 process_event_stop_test (ecs
);
5446 /* If requested, stop when the dynamic linker notifies
5447 gdb of events. This allows the user to get control
5448 and place breakpoints in initializer routines for
5449 dynamically loaded objects (among other things). */
5450 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5451 if (stop_on_solib_events
)
5453 /* Make sure we print "Stopped due to solib-event" in
5455 stop_print_frame
= true;
5462 /* If we are skipping through a shell, or through shared library
5463 loading that we aren't interested in, resume the program. If
5464 we're running the program normally, also resume. */
5465 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== NO_STOP_QUIETLY
)
5467 /* Loading of shared libraries might have changed breakpoint
5468 addresses. Make sure new breakpoints are inserted. */
5469 if (stop_soon
== NO_STOP_QUIETLY
)
5470 insert_breakpoints ();
5471 resume (GDB_SIGNAL_0
);
5472 prepare_to_wait (ecs
);
5476 /* But stop if we're attaching or setting up a remote
5478 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
5479 || stop_soon
== STOP_QUIETLY_REMOTE
)
5481 infrun_debug_printf ("quietly stopped");
5486 internal_error (__FILE__
, __LINE__
,
5487 _("unhandled stop_soon: %d"), (int) stop_soon
);
5490 case TARGET_WAITKIND_SPURIOUS
:
5491 if (handle_stop_requested (ecs
))
5493 context_switch (ecs
);
5494 resume (GDB_SIGNAL_0
);
5495 prepare_to_wait (ecs
);
5498 case TARGET_WAITKIND_THREAD_CREATED
:
5499 if (handle_stop_requested (ecs
))
5501 context_switch (ecs
);
5502 if (!switch_back_to_stepped_thread (ecs
))
5506 case TARGET_WAITKIND_EXITED
:
5507 case TARGET_WAITKIND_SIGNALLED
:
5509 /* Depending on the system, ecs->ptid may point to a thread or
5510 to a process. On some targets, target_mourn_inferior may
5511 need to have access to the just-exited thread. That is the
5512 case of GNU/Linux's "checkpoint" support, for example.
5513 Call the switch_to_xxx routine as appropriate. */
5514 thread_info
*thr
= find_thread_ptid (ecs
->target
, ecs
->ptid
);
5516 switch_to_thread (thr
);
5519 inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5520 switch_to_inferior_no_thread (inf
);
5523 handle_vfork_child_exec_or_exit (0);
5524 target_terminal::ours (); /* Must do this before mourn anyway. */
5526 /* Clearing any previous state of convenience variables. */
5527 clear_exit_convenience_vars ();
5529 if (ecs
->ws
.kind () == TARGET_WAITKIND_EXITED
)
5531 /* Record the exit code in the convenience variable $_exitcode, so
5532 that the user can inspect this again later. */
5533 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5534 (LONGEST
) ecs
->ws
.exit_status ());
5536 /* Also record this in the inferior itself. */
5537 current_inferior ()->has_exit_code
= 1;
5538 current_inferior ()->exit_code
= (LONGEST
) ecs
->ws
.exit_status ();
5540 /* Support the --return-child-result option. */
5541 return_child_result_value
= ecs
->ws
.exit_status ();
5543 gdb::observers::exited
.notify (ecs
->ws
.exit_status ());
5547 struct gdbarch
*gdbarch
= current_inferior ()->gdbarch
;
5549 if (gdbarch_gdb_signal_to_target_p (gdbarch
))
5551 /* Set the value of the internal variable $_exitsignal,
5552 which holds the signal uncaught by the inferior. */
5553 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5554 gdbarch_gdb_signal_to_target (gdbarch
,
5559 /* We don't have access to the target's method used for
5560 converting between signal numbers (GDB's internal
5561 representation <-> target's representation).
5562 Therefore, we cannot do a good job at displaying this
5563 information to the user. It's better to just warn
5564 her about it (if infrun debugging is enabled), and
5566 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5570 gdb::observers::signal_exited
.notify (ecs
->ws
.sig ());
5573 gdb_flush (gdb_stdout
);
5574 target_mourn_inferior (inferior_ptid
);
5575 stop_print_frame
= false;
5579 case TARGET_WAITKIND_FORKED
:
5580 case TARGET_WAITKIND_VFORKED
:
5581 /* Check whether the inferior is displaced stepping. */
5583 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5584 struct gdbarch
*gdbarch
= regcache
->arch ();
5585 inferior
*parent_inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5587 /* If this is a fork (child gets its own address space copy)
5588 and some displaced step buffers were in use at the time of
5589 the fork, restore the displaced step buffer bytes in the
5592 Architectures which support displaced stepping and fork
5593 events must supply an implementation of
5594 gdbarch_displaced_step_restore_all_in_ptid. This is not
5595 enforced during gdbarch validation to support architectures
5596 which support displaced stepping but not forks. */
5597 if (ecs
->ws
.kind () == TARGET_WAITKIND_FORKED
5598 && gdbarch_supports_displaced_stepping (gdbarch
))
5599 gdbarch_displaced_step_restore_all_in_ptid
5600 (gdbarch
, parent_inf
, ecs
->ws
.child_ptid ());
5602 /* If displaced stepping is supported, and thread ecs->ptid is
5603 displaced stepping. */
5604 if (displaced_step_in_progress_thread (ecs
->event_thread
))
5606 struct regcache
*child_regcache
;
5607 CORE_ADDR parent_pc
;
5609 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5610 indicating that the displaced stepping of syscall instruction
5611 has been done. Perform cleanup for parent process here. Note
5612 that this operation also cleans up the child process for vfork,
5613 because their pages are shared. */
5614 displaced_step_finish (ecs
->event_thread
, GDB_SIGNAL_TRAP
);
5615 /* Start a new step-over in another thread if there's one
5619 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5620 the child's PC is also within the scratchpad. Set the child's PC
5621 to the parent's PC value, which has already been fixed up.
5622 FIXME: we use the parent's aspace here, although we're touching
5623 the child, because the child hasn't been added to the inferior
5624 list yet at this point. */
5627 = get_thread_arch_aspace_regcache (parent_inf
->process_target (),
5628 ecs
->ws
.child_ptid (),
5630 parent_inf
->aspace
);
5631 /* Read PC value of parent process. */
5632 parent_pc
= regcache_read_pc (regcache
);
5634 displaced_debug_printf ("write child pc from %s to %s",
5636 regcache_read_pc (child_regcache
)),
5637 paddress (gdbarch
, parent_pc
));
5639 regcache_write_pc (child_regcache
, parent_pc
);
5643 context_switch (ecs
);
5645 /* Immediately detach breakpoints from the child before there's
5646 any chance of letting the user delete breakpoints from the
5647 breakpoint lists. If we don't do this early, it's easy to
5648 leave left over traps in the child, vis: "break foo; catch
5649 fork; c; <fork>; del; c; <child calls foo>". We only follow
5650 the fork on the last `continue', and by that time the
5651 breakpoint at "foo" is long gone from the breakpoint table.
5652 If we vforked, then we don't need to unpatch here, since both
5653 parent and child are sharing the same memory pages; we'll
5654 need to unpatch at follow/detach time instead to be certain
5655 that new breakpoints added between catchpoint hit time and
5656 vfork follow are detached. */
5657 if (ecs
->ws
.kind () != TARGET_WAITKIND_VFORKED
)
5659 /* This won't actually modify the breakpoint list, but will
5660 physically remove the breakpoints from the child. */
5661 detach_breakpoints (ecs
->ws
.child_ptid ());
5664 delete_just_stopped_threads_single_step_breakpoints ();
5666 /* In case the event is caught by a catchpoint, remember that
5667 the event is to be followed at the next resume of the thread,
5668 and not immediately. */
5669 ecs
->event_thread
->pending_follow
= ecs
->ws
;
5671 ecs
->event_thread
->set_stop_pc
5672 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
5674 ecs
->event_thread
->control
.stop_bpstat
5675 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
5676 ecs
->event_thread
->stop_pc (),
5677 ecs
->event_thread
, ecs
->ws
);
5679 if (handle_stop_requested (ecs
))
5682 /* If no catchpoint triggered for this, then keep going. Note
5683 that we're interested in knowing the bpstat actually causes a
5684 stop, not just if it may explain the signal. Software
5685 watchpoints, for example, always appear in the bpstat. */
5686 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5689 = (follow_fork_mode_string
== follow_fork_mode_child
);
5691 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5693 process_stratum_target
*targ
5694 = ecs
->event_thread
->inf
->process_target ();
5696 bool should_resume
= follow_fork ();
5698 /* Note that one of these may be an invalid pointer,
5699 depending on detach_fork. */
5700 thread_info
*parent
= ecs
->event_thread
;
5701 thread_info
*child
= find_thread_ptid (targ
, ecs
->ws
.child_ptid ());
5703 /* At this point, the parent is marked running, and the
5704 child is marked stopped. */
5706 /* If not resuming the parent, mark it stopped. */
5707 if (follow_child
&& !detach_fork
&& !non_stop
&& !sched_multi
)
5708 parent
->set_running (false);
5710 /* If resuming the child, mark it running. */
5711 if (follow_child
|| (!detach_fork
&& (non_stop
|| sched_multi
)))
5712 child
->set_running (true);
5714 /* In non-stop mode, also resume the other branch. */
5715 if (!detach_fork
&& (non_stop
5716 || (sched_multi
&& target_is_non_stop_p ())))
5719 switch_to_thread (parent
);
5721 switch_to_thread (child
);
5723 ecs
->event_thread
= inferior_thread ();
5724 ecs
->ptid
= inferior_ptid
;
5729 switch_to_thread (child
);
5731 switch_to_thread (parent
);
5733 ecs
->event_thread
= inferior_thread ();
5734 ecs
->ptid
= inferior_ptid
;
5738 /* Never call switch_back_to_stepped_thread if we are waiting for
5739 vfork-done (waiting for an external vfork child to exec or
5740 exit). We will resume only the vforking thread for the purpose
5741 of collecting the vfork-done event, and we will restart any
5742 step once the critical shared address space window is done. */
5745 && parent
->inf
->thread_waiting_for_vfork_done
!= nullptr)
5746 || !switch_back_to_stepped_thread (ecs
))
5753 process_event_stop_test (ecs
);
5756 case TARGET_WAITKIND_VFORK_DONE
:
5757 /* Done with the shared memory region. Re-insert breakpoints in
5758 the parent, and keep going. */
5760 context_switch (ecs
);
5762 handle_vfork_done (ecs
->event_thread
);
5763 gdb_assert (inferior_thread () == ecs
->event_thread
);
5765 if (handle_stop_requested (ecs
))
5768 if (!switch_back_to_stepped_thread (ecs
))
5770 gdb_assert (inferior_thread () == ecs
->event_thread
);
5771 /* This also takes care of reinserting breakpoints in the
5772 previously locked inferior. */
5777 case TARGET_WAITKIND_EXECD
:
5779 /* Note we can't read registers yet (the stop_pc), because we
5780 don't yet know the inferior's post-exec architecture.
5781 'stop_pc' is explicitly read below instead. */
5782 switch_to_thread_no_regs (ecs
->event_thread
);
5784 /* Do whatever is necessary to the parent branch of the vfork. */
5785 handle_vfork_child_exec_or_exit (1);
5787 /* This causes the eventpoints and symbol table to be reset.
5788 Must do this now, before trying to determine whether to
5790 follow_exec (inferior_ptid
, ecs
->ws
.execd_pathname ());
5792 /* In follow_exec we may have deleted the original thread and
5793 created a new one. Make sure that the event thread is the
5794 execd thread for that case (this is a nop otherwise). */
5795 ecs
->event_thread
= inferior_thread ();
5797 ecs
->event_thread
->set_stop_pc
5798 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
5800 ecs
->event_thread
->control
.stop_bpstat
5801 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
5802 ecs
->event_thread
->stop_pc (),
5803 ecs
->event_thread
, ecs
->ws
);
5805 if (handle_stop_requested (ecs
))
5808 /* If no catchpoint triggered for this, then keep going. */
5809 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5811 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5815 process_event_stop_test (ecs
);
5818 /* Be careful not to try to gather much state about a thread
5819 that's in a syscall. It's frequently a losing proposition. */
5820 case TARGET_WAITKIND_SYSCALL_ENTRY
:
5821 /* Getting the current syscall number. */
5822 if (handle_syscall_event (ecs
) == 0)
5823 process_event_stop_test (ecs
);
5826 /* Before examining the threads further, step this thread to
5827 get it entirely out of the syscall. (We get notice of the
5828 event when the thread is just on the verge of exiting a
5829 syscall. Stepping one instruction seems to get it back
5831 case TARGET_WAITKIND_SYSCALL_RETURN
:
5832 if (handle_syscall_event (ecs
) == 0)
5833 process_event_stop_test (ecs
);
5836 case TARGET_WAITKIND_STOPPED
:
5837 handle_signal_stop (ecs
);
5840 case TARGET_WAITKIND_NO_HISTORY
:
5841 /* Reverse execution: target ran out of history info. */
5843 /* Switch to the stopped thread. */
5844 context_switch (ecs
);
5845 infrun_debug_printf ("stopped");
5847 delete_just_stopped_threads_single_step_breakpoints ();
5848 ecs
->event_thread
->set_stop_pc
5849 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
5851 if (handle_stop_requested (ecs
))
5854 gdb::observers::no_history
.notify ();
5860 /* Restart threads back to what they were trying to do back when we
5861 paused them (because of an in-line step-over or vfork, for example).
5862 The EVENT_THREAD thread is ignored (not restarted).
5864 If INF is non-nullptr, only resume threads from INF. */
5867 restart_threads (struct thread_info
*event_thread
, inferior
*inf
)
5869 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
5870 event_thread
->ptid
.to_string ().c_str (),
5871 inf
!= nullptr ? inf
->num
: -1);
5873 /* In case the instruction just stepped spawned a new thread. */
5874 update_thread_list ();
5876 for (thread_info
*tp
: all_non_exited_threads ())
5878 if (inf
!= nullptr && tp
->inf
!= inf
)
5881 if (tp
->inf
->detaching
)
5883 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5884 tp
->ptid
.to_string ().c_str ());
5888 switch_to_thread_no_regs (tp
);
5890 if (tp
== event_thread
)
5892 infrun_debug_printf ("restart threads: [%s] is event thread",
5893 tp
->ptid
.to_string ().c_str ());
5897 if (!(tp
->state
== THREAD_RUNNING
|| tp
->control
.in_infcall
))
5899 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5900 tp
->ptid
.to_string ().c_str ());
5906 infrun_debug_printf ("restart threads: [%s] resumed",
5907 tp
->ptid
.to_string ().c_str ());
5908 gdb_assert (tp
->executing () || tp
->has_pending_waitstatus ());
5912 if (thread_is_in_step_over_chain (tp
))
5914 infrun_debug_printf ("restart threads: [%s] needs step-over",
5915 tp
->ptid
.to_string ().c_str ());
5916 gdb_assert (!tp
->resumed ());
5921 if (tp
->has_pending_waitstatus ())
5923 infrun_debug_printf ("restart threads: [%s] has pending status",
5924 tp
->ptid
.to_string ().c_str ());
5925 tp
->set_resumed (true);
5929 gdb_assert (!tp
->stop_requested
);
5931 /* If some thread needs to start a step-over at this point, it
5932 should still be in the step-over queue, and thus skipped
5934 if (thread_still_needs_step_over (tp
))
5936 internal_error (__FILE__
, __LINE__
,
5937 "thread [%s] needs a step-over, but not in "
5938 "step-over queue\n",
5939 tp
->ptid
.to_string ().c_str ());
5942 if (currently_stepping (tp
))
5944 infrun_debug_printf ("restart threads: [%s] was stepping",
5945 tp
->ptid
.to_string ().c_str ());
5946 keep_going_stepped_thread (tp
);
5950 struct execution_control_state ecss
;
5951 struct execution_control_state
*ecs
= &ecss
;
5953 infrun_debug_printf ("restart threads: [%s] continuing",
5954 tp
->ptid
.to_string ().c_str ());
5955 reset_ecs (ecs
, tp
);
5956 switch_to_thread (tp
);
5957 keep_going_pass_signal (ecs
);
5962 /* Callback for iterate_over_threads. Find a resumed thread that has
5963 a pending waitstatus. */
5966 resumed_thread_with_pending_status (struct thread_info
*tp
,
5969 return tp
->resumed () && tp
->has_pending_waitstatus ();
5972 /* Called when we get an event that may finish an in-line or
5973 out-of-line (displaced stepping) step-over started previously.
5974 Return true if the event is processed and we should go back to the
5975 event loop; false if the caller should continue processing the
5979 finish_step_over (struct execution_control_state
*ecs
)
5981 displaced_step_finish (ecs
->event_thread
, ecs
->event_thread
->stop_signal ());
5983 bool had_step_over_info
= step_over_info_valid_p ();
5985 if (had_step_over_info
)
5987 /* If we're stepping over a breakpoint with all threads locked,
5988 then only the thread that was stepped should be reporting
5990 gdb_assert (ecs
->event_thread
->control
.trap_expected
);
5992 clear_step_over_info ();
5995 if (!target_is_non_stop_p ())
5998 /* Start a new step-over in another thread if there's one that
6002 /* If we were stepping over a breakpoint before, and haven't started
6003 a new in-line step-over sequence, then restart all other threads
6004 (except the event thread). We can't do this in all-stop, as then
6005 e.g., we wouldn't be able to issue any other remote packet until
6006 these other threads stop. */
6007 if (had_step_over_info
&& !step_over_info_valid_p ())
6009 struct thread_info
*pending
;
6011 /* If we only have threads with pending statuses, the restart
6012 below won't restart any thread and so nothing re-inserts the
6013 breakpoint we just stepped over. But we need it inserted
6014 when we later process the pending events, otherwise if
6015 another thread has a pending event for this breakpoint too,
6016 we'd discard its event (because the breakpoint that
6017 originally caused the event was no longer inserted). */
6018 context_switch (ecs
);
6019 insert_breakpoints ();
6021 restart_threads (ecs
->event_thread
);
6023 /* If we have events pending, go through handle_inferior_event
6024 again, picking up a pending event at random. This avoids
6025 thread starvation. */
6027 /* But not if we just stepped over a watchpoint in order to let
6028 the instruction execute so we can evaluate its expression.
6029 The set of watchpoints that triggered is recorded in the
6030 breakpoint objects themselves (see bp->watchpoint_triggered).
6031 If we processed another event first, that other event could
6032 clobber this info. */
6033 if (ecs
->event_thread
->stepping_over_watchpoint
)
6036 pending
= iterate_over_threads (resumed_thread_with_pending_status
,
6038 if (pending
!= NULL
)
6040 struct thread_info
*tp
= ecs
->event_thread
;
6041 struct regcache
*regcache
;
6043 infrun_debug_printf ("found resumed threads with "
6044 "pending events, saving status");
6046 gdb_assert (pending
!= tp
);
6048 /* Record the event thread's event for later. */
6049 save_waitstatus (tp
, ecs
->ws
);
6050 /* This was cleared early, by handle_inferior_event. Set it
6051 so this pending event is considered by
6053 tp
->set_resumed (true);
6055 gdb_assert (!tp
->executing ());
6057 regcache
= get_thread_regcache (tp
);
6058 tp
->set_stop_pc (regcache_read_pc (regcache
));
6060 infrun_debug_printf ("saved stop_pc=%s for %s "
6061 "(currently_stepping=%d)",
6062 paddress (target_gdbarch (), tp
->stop_pc ()),
6063 tp
->ptid
.to_string ().c_str (),
6064 currently_stepping (tp
));
6066 /* This in-line step-over finished; clear this so we won't
6067 start a new one. This is what handle_signal_stop would
6068 do, if we returned false. */
6069 tp
->stepping_over_breakpoint
= 0;
6071 /* Wake up the event loop again. */
6072 mark_async_event_handler (infrun_async_inferior_event_token
);
6074 prepare_to_wait (ecs
);
6082 /* Come here when the program has stopped with a signal. */
6085 handle_signal_stop (struct execution_control_state
*ecs
)
6087 struct frame_info
*frame
;
6088 struct gdbarch
*gdbarch
;
6089 int stopped_by_watchpoint
;
6090 enum stop_kind stop_soon
;
6093 gdb_assert (ecs
->ws
.kind () == TARGET_WAITKIND_STOPPED
);
6095 ecs
->event_thread
->set_stop_signal (ecs
->ws
.sig ());
6097 /* Do we need to clean up the state of a thread that has
6098 completed a displaced single-step? (Doing so usually affects
6099 the PC, so do it here, before we set stop_pc.) */
6100 if (finish_step_over (ecs
))
6103 /* If we either finished a single-step or hit a breakpoint, but
6104 the user wanted this thread to be stopped, pretend we got a
6105 SIG0 (generic unsignaled stop). */
6106 if (ecs
->event_thread
->stop_requested
6107 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6108 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6110 ecs
->event_thread
->set_stop_pc
6111 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
6113 context_switch (ecs
);
6115 if (deprecated_context_hook
)
6116 deprecated_context_hook (ecs
->event_thread
->global_num
);
6120 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
6121 struct gdbarch
*reg_gdbarch
= regcache
->arch ();
6124 ("stop_pc=%s", paddress (reg_gdbarch
, ecs
->event_thread
->stop_pc ()));
6125 if (target_stopped_by_watchpoint ())
6129 infrun_debug_printf ("stopped by watchpoint");
6131 if (target_stopped_data_address (current_inferior ()->top_target (),
6133 infrun_debug_printf ("stopped data address=%s",
6134 paddress (reg_gdbarch
, addr
));
6136 infrun_debug_printf ("(no data address available)");
6140 /* This is originated from start_remote(), start_inferior() and
6141 shared libraries hook functions. */
6142 stop_soon
= get_inferior_stop_soon (ecs
);
6143 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== STOP_QUIETLY_REMOTE
)
6145 infrun_debug_printf ("quietly stopped");
6146 stop_print_frame
= true;
6151 /* This originates from attach_command(). We need to overwrite
6152 the stop_signal here, because some kernels don't ignore a
6153 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6154 See more comments in inferior.h. On the other hand, if we
6155 get a non-SIGSTOP, report it to the user - assume the backend
6156 will handle the SIGSTOP if it should show up later.
6158 Also consider that the attach is complete when we see a
6159 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6160 target extended-remote report it instead of a SIGSTOP
6161 (e.g. gdbserver). We already rely on SIGTRAP being our
6162 signal, so this is no exception.
6164 Also consider that the attach is complete when we see a
6165 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6166 the target to stop all threads of the inferior, in case the
6167 low level attach operation doesn't stop them implicitly. If
6168 they weren't stopped implicitly, then the stub will report a
6169 GDB_SIGNAL_0, meaning: stopped for no particular reason
6170 other than GDB's request. */
6171 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6172 && (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_STOP
6173 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6174 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_0
))
6176 stop_print_frame
= true;
6178 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6182 /* At this point, get hold of the now-current thread's frame. */
6183 frame
= get_current_frame ();
6184 gdbarch
= get_frame_arch (frame
);
6186 /* Pull the single step breakpoints out of the target. */
6187 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6189 struct regcache
*regcache
;
6192 regcache
= get_thread_regcache (ecs
->event_thread
);
6193 const address_space
*aspace
= regcache
->aspace ();
6195 pc
= regcache_read_pc (regcache
);
6197 /* However, before doing so, if this single-step breakpoint was
6198 actually for another thread, set this thread up for moving
6200 if (!thread_has_single_step_breakpoint_here (ecs
->event_thread
,
6203 if (single_step_breakpoint_inserted_here_p (aspace
, pc
))
6205 infrun_debug_printf ("[%s] hit another thread's single-step "
6207 ecs
->ptid
.to_string ().c_str ());
6208 ecs
->hit_singlestep_breakpoint
= 1;
6213 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6214 ecs
->ptid
.to_string ().c_str ());
6217 delete_just_stopped_threads_single_step_breakpoints ();
6219 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6220 && ecs
->event_thread
->control
.trap_expected
6221 && ecs
->event_thread
->stepping_over_watchpoint
)
6222 stopped_by_watchpoint
= 0;
6224 stopped_by_watchpoint
= watchpoints_triggered (ecs
->ws
);
6226 /* If necessary, step over this watchpoint. We'll be back to display
6228 if (stopped_by_watchpoint
6229 && (target_have_steppable_watchpoint ()
6230 || gdbarch_have_nonsteppable_watchpoint (gdbarch
)))
6232 /* At this point, we are stopped at an instruction which has
6233 attempted to write to a piece of memory under control of
6234 a watchpoint. The instruction hasn't actually executed
6235 yet. If we were to evaluate the watchpoint expression
6236 now, we would get the old value, and therefore no change
6237 would seem to have occurred.
6239 In order to make watchpoints work `right', we really need
6240 to complete the memory write, and then evaluate the
6241 watchpoint expression. We do this by single-stepping the
6244 It may not be necessary to disable the watchpoint to step over
6245 it. For example, the PA can (with some kernel cooperation)
6246 single step over a watchpoint without disabling the watchpoint.
6248 It is far more common to need to disable a watchpoint to step
6249 the inferior over it. If we have non-steppable watchpoints,
6250 we must disable the current watchpoint; it's simplest to
6251 disable all watchpoints.
6253 Any breakpoint at PC must also be stepped over -- if there's
6254 one, it will have already triggered before the watchpoint
6255 triggered, and we either already reported it to the user, or
6256 it didn't cause a stop and we called keep_going. In either
6257 case, if there was a breakpoint at PC, we must be trying to
6259 ecs
->event_thread
->stepping_over_watchpoint
= 1;
6264 ecs
->event_thread
->stepping_over_breakpoint
= 0;
6265 ecs
->event_thread
->stepping_over_watchpoint
= 0;
6266 bpstat_clear (&ecs
->event_thread
->control
.stop_bpstat
);
6267 ecs
->event_thread
->control
.stop_step
= 0;
6268 stop_print_frame
= true;
6269 stopped_by_random_signal
= 0;
6270 bpstat
*stop_chain
= nullptr;
6272 /* Hide inlined functions starting here, unless we just performed stepi or
6273 nexti. After stepi and nexti, always show the innermost frame (not any
6274 inline function call sites). */
6275 if (ecs
->event_thread
->control
.step_range_end
!= 1)
6277 const address_space
*aspace
6278 = get_thread_regcache (ecs
->event_thread
)->aspace ();
6280 /* skip_inline_frames is expensive, so we avoid it if we can
6281 determine that the address is one where functions cannot have
6282 been inlined. This improves performance with inferiors that
6283 load a lot of shared libraries, because the solib event
6284 breakpoint is defined as the address of a function (i.e. not
6285 inline). Note that we have to check the previous PC as well
6286 as the current one to catch cases when we have just
6287 single-stepped off a breakpoint prior to reinstating it.
6288 Note that we're assuming that the code we single-step to is
6289 not inline, but that's not definitive: there's nothing
6290 preventing the event breakpoint function from containing
6291 inlined code, and the single-step ending up there. If the
6292 user had set a breakpoint on that inlined code, the missing
6293 skip_inline_frames call would break things. Fortunately
6294 that's an extremely unlikely scenario. */
6295 if (!pc_at_non_inline_function (aspace
,
6296 ecs
->event_thread
->stop_pc (),
6298 && !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6299 && ecs
->event_thread
->control
.trap_expected
6300 && pc_at_non_inline_function (aspace
,
6301 ecs
->event_thread
->prev_pc
,
6304 stop_chain
= build_bpstat_chain (aspace
,
6305 ecs
->event_thread
->stop_pc (),
6307 skip_inline_frames (ecs
->event_thread
, stop_chain
);
6309 /* Re-fetch current thread's frame in case that invalidated
6311 frame
= get_current_frame ();
6312 gdbarch
= get_frame_arch (frame
);
6316 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6317 && ecs
->event_thread
->control
.trap_expected
6318 && gdbarch_single_step_through_delay_p (gdbarch
)
6319 && currently_stepping (ecs
->event_thread
))
6321 /* We're trying to step off a breakpoint. Turns out that we're
6322 also on an instruction that needs to be stepped multiple
6323 times before it's been fully executing. E.g., architectures
6324 with a delay slot. It needs to be stepped twice, once for
6325 the instruction and once for the delay slot. */
6326 int step_through_delay
6327 = gdbarch_single_step_through_delay (gdbarch
, frame
);
6329 if (step_through_delay
)
6330 infrun_debug_printf ("step through delay");
6332 if (ecs
->event_thread
->control
.step_range_end
== 0
6333 && step_through_delay
)
6335 /* The user issued a continue when stopped at a breakpoint.
6336 Set up for another trap and get out of here. */
6337 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6341 else if (step_through_delay
)
6343 /* The user issued a step when stopped at a breakpoint.
6344 Maybe we should stop, maybe we should not - the delay
6345 slot *might* correspond to a line of source. In any
6346 case, don't decide that here, just set
6347 ecs->stepping_over_breakpoint, making sure we
6348 single-step again before breakpoints are re-inserted. */
6349 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6353 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6354 handles this event. */
6355 ecs
->event_thread
->control
.stop_bpstat
6356 = bpstat_stop_status (get_current_regcache ()->aspace (),
6357 ecs
->event_thread
->stop_pc (),
6358 ecs
->event_thread
, ecs
->ws
, stop_chain
);
6360 /* Following in case break condition called a
6362 stop_print_frame
= true;
6364 /* This is where we handle "moribund" watchpoints. Unlike
6365 software breakpoints traps, hardware watchpoint traps are
6366 always distinguishable from random traps. If no high-level
6367 watchpoint is associated with the reported stop data address
6368 anymore, then the bpstat does not explain the signal ---
6369 simply make sure to ignore it if `stopped_by_watchpoint' is
6372 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6373 && !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
6375 && stopped_by_watchpoint
)
6377 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6381 /* NOTE: cagney/2003-03-29: These checks for a random signal
6382 at one stage in the past included checks for an inferior
6383 function call's call dummy's return breakpoint. The original
6384 comment, that went with the test, read:
6386 ``End of a stack dummy. Some systems (e.g. Sony news) give
6387 another signal besides SIGTRAP, so check here as well as
6390 If someone ever tries to get call dummys on a
6391 non-executable stack to work (where the target would stop
6392 with something like a SIGSEGV), then those tests might need
6393 to be re-instated. Given, however, that the tests were only
6394 enabled when momentary breakpoints were not being used, I
6395 suspect that it won't be the case.
6397 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6398 be necessary for call dummies on a non-executable stack on
6401 /* See if the breakpoints module can explain the signal. */
6403 = !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
6404 ecs
->event_thread
->stop_signal ());
6406 /* Maybe this was a trap for a software breakpoint that has since
6408 if (random_signal
&& target_stopped_by_sw_breakpoint ())
6410 if (gdbarch_program_breakpoint_here_p (gdbarch
,
6411 ecs
->event_thread
->stop_pc ()))
6413 struct regcache
*regcache
;
6416 /* Re-adjust PC to what the program would see if GDB was not
6418 regcache
= get_thread_regcache (ecs
->event_thread
);
6419 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
6422 gdb::optional
<scoped_restore_tmpl
<int>>
6423 restore_operation_disable
;
6425 if (record_full_is_used ())
6426 restore_operation_disable
.emplace
6427 (record_full_gdb_operation_disable_set ());
6429 regcache_write_pc (regcache
,
6430 ecs
->event_thread
->stop_pc () + decr_pc
);
6435 /* A delayed software breakpoint event. Ignore the trap. */
6436 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
6441 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6442 has since been removed. */
6443 if (random_signal
&& target_stopped_by_hw_breakpoint ())
6445 /* A delayed hardware breakpoint event. Ignore the trap. */
6446 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6451 /* If not, perhaps stepping/nexting can. */
6453 random_signal
= !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6454 && currently_stepping (ecs
->event_thread
));
6456 /* Perhaps the thread hit a single-step breakpoint of _another_
6457 thread. Single-step breakpoints are transparent to the
6458 breakpoints module. */
6460 random_signal
= !ecs
->hit_singlestep_breakpoint
;
6462 /* No? Perhaps we got a moribund watchpoint. */
6464 random_signal
= !stopped_by_watchpoint
;
6466 /* Always stop if the user explicitly requested this thread to
6468 if (ecs
->event_thread
->stop_requested
)
6471 infrun_debug_printf ("user-requested stop");
6474 /* For the program's own signals, act according to
6475 the signal handling tables. */
6479 /* Signal not for debugging purposes. */
6480 enum gdb_signal stop_signal
= ecs
->event_thread
->stop_signal ();
6482 infrun_debug_printf ("random signal (%s)",
6483 gdb_signal_to_symbol_string (stop_signal
));
6485 stopped_by_random_signal
= 1;
6487 /* Always stop on signals if we're either just gaining control
6488 of the program, or the user explicitly requested this thread
6489 to remain stopped. */
6490 if (stop_soon
!= NO_STOP_QUIETLY
6491 || ecs
->event_thread
->stop_requested
6492 || signal_stop_state (ecs
->event_thread
->stop_signal ()))
6498 /* Notify observers the signal has "handle print" set. Note we
6499 returned early above if stopping; normal_stop handles the
6500 printing in that case. */
6501 if (signal_print
[ecs
->event_thread
->stop_signal ()])
6503 /* The signal table tells us to print about this signal. */
6504 target_terminal::ours_for_output ();
6505 gdb::observers::signal_received
.notify (ecs
->event_thread
->stop_signal ());
6506 target_terminal::inferior ();
6509 /* Clear the signal if it should not be passed. */
6510 if (signal_program
[ecs
->event_thread
->stop_signal ()] == 0)
6511 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6513 if (ecs
->event_thread
->prev_pc
== ecs
->event_thread
->stop_pc ()
6514 && ecs
->event_thread
->control
.trap_expected
6515 && ecs
->event_thread
->control
.step_resume_breakpoint
== NULL
)
6517 /* We were just starting a new sequence, attempting to
6518 single-step off of a breakpoint and expecting a SIGTRAP.
6519 Instead this signal arrives. This signal will take us out
6520 of the stepping range so GDB needs to remember to, when
6521 the signal handler returns, resume stepping off that
6523 /* To simplify things, "continue" is forced to use the same
6524 code paths as single-step - set a breakpoint at the
6525 signal return address and then, once hit, step off that
6527 infrun_debug_printf ("signal arrived while stepping over breakpoint");
6529 insert_hp_step_resume_breakpoint_at_frame (frame
);
6530 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
6531 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6532 ecs
->event_thread
->control
.trap_expected
= 0;
6534 /* If we were nexting/stepping some other thread, switch to
6535 it, so that we don't continue it, losing control. */
6536 if (!switch_back_to_stepped_thread (ecs
))
6541 if (ecs
->event_thread
->stop_signal () != GDB_SIGNAL_0
6542 && (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
6544 || ecs
->event_thread
->control
.step_range_end
== 1)
6545 && frame_id_eq (get_stack_frame_id (frame
),
6546 ecs
->event_thread
->control
.step_stack_frame_id
)
6547 && ecs
->event_thread
->control
.step_resume_breakpoint
== NULL
)
6549 /* The inferior is about to take a signal that will take it
6550 out of the single step range. Set a breakpoint at the
6551 current PC (which is presumably where the signal handler
6552 will eventually return) and then allow the inferior to
6555 Note that this is only needed for a signal delivered
6556 while in the single-step range. Nested signals aren't a
6557 problem as they eventually all return. */
6558 infrun_debug_printf ("signal may take us out of single-step range");
6560 clear_step_over_info ();
6561 insert_hp_step_resume_breakpoint_at_frame (frame
);
6562 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
6563 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6564 ecs
->event_thread
->control
.trap_expected
= 0;
6569 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6570 when either there's a nested signal, or when there's a
6571 pending signal enabled just as the signal handler returns
6572 (leaving the inferior at the step-resume-breakpoint without
6573 actually executing it). Either way continue until the
6574 breakpoint is really hit. */
6576 if (!switch_back_to_stepped_thread (ecs
))
6578 infrun_debug_printf ("random signal, keep going");
6585 process_event_stop_test (ecs
);
6588 /* Come here when we've got some debug event / signal we can explain
6589 (IOW, not a random signal), and test whether it should cause a
6590 stop, or whether we should resume the inferior (transparently).
6591 E.g., could be a breakpoint whose condition evaluates false; we
6592 could be still stepping within the line; etc. */
6595 process_event_stop_test (struct execution_control_state
*ecs
)
6597 struct symtab_and_line stop_pc_sal
;
6598 struct frame_info
*frame
;
6599 struct gdbarch
*gdbarch
;
6600 CORE_ADDR jmp_buf_pc
;
6601 struct bpstat_what what
;
6603 /* Handle cases caused by hitting a breakpoint. */
6605 frame
= get_current_frame ();
6606 gdbarch
= get_frame_arch (frame
);
6608 what
= bpstat_what (ecs
->event_thread
->control
.stop_bpstat
);
6610 if (what
.call_dummy
)
6612 stop_stack_dummy
= what
.call_dummy
;
6615 /* A few breakpoint types have callbacks associated (e.g.,
6616 bp_jit_event). Run them now. */
6617 bpstat_run_callbacks (ecs
->event_thread
->control
.stop_bpstat
);
6619 /* If we hit an internal event that triggers symbol changes, the
6620 current frame will be invalidated within bpstat_what (e.g., if we
6621 hit an internal solib event). Re-fetch it. */
6622 frame
= get_current_frame ();
6623 gdbarch
= get_frame_arch (frame
);
6625 switch (what
.main_action
)
6627 case BPSTAT_WHAT_SET_LONGJMP_RESUME
:
6628 /* If we hit the breakpoint at longjmp while stepping, we
6629 install a momentary breakpoint at the target of the
6632 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
6634 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6636 if (what
.is_longjmp
)
6638 struct value
*arg_value
;
6640 /* If we set the longjmp breakpoint via a SystemTap probe,
6641 then use it to extract the arguments. The destination PC
6642 is the third argument to the probe. */
6643 arg_value
= probe_safe_evaluate_at_pc (frame
, 2);
6646 jmp_buf_pc
= value_as_address (arg_value
);
6647 jmp_buf_pc
= gdbarch_addr_bits_remove (gdbarch
, jmp_buf_pc
);
6649 else if (!gdbarch_get_longjmp_target_p (gdbarch
)
6650 || !gdbarch_get_longjmp_target (gdbarch
,
6651 frame
, &jmp_buf_pc
))
6653 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6654 "(!gdbarch_get_longjmp_target)");
6659 /* Insert a breakpoint at resume address. */
6660 insert_longjmp_resume_breakpoint (gdbarch
, jmp_buf_pc
);
6663 check_exception_resume (ecs
, frame
);
6667 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME
:
6669 struct frame_info
*init_frame
;
6671 /* There are several cases to consider.
6673 1. The initiating frame no longer exists. In this case we
6674 must stop, because the exception or longjmp has gone too
6677 2. The initiating frame exists, and is the same as the
6678 current frame. We stop, because the exception or longjmp
6681 3. The initiating frame exists and is different from the
6682 current frame. This means the exception or longjmp has
6683 been caught beneath the initiating frame, so keep going.
6685 4. longjmp breakpoint has been placed just to protect
6686 against stale dummy frames and user is not interested in
6687 stopping around longjmps. */
6689 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
6691 gdb_assert (ecs
->event_thread
->control
.exception_resume_breakpoint
6693 delete_exception_resume_breakpoint (ecs
->event_thread
);
6695 if (what
.is_longjmp
)
6697 check_longjmp_breakpoint_for_call_dummy (ecs
->event_thread
);
6699 if (!frame_id_p (ecs
->event_thread
->initiating_frame
))
6707 init_frame
= frame_find_by_id (ecs
->event_thread
->initiating_frame
);
6711 struct frame_id current_id
6712 = get_frame_id (get_current_frame ());
6713 if (frame_id_eq (current_id
,
6714 ecs
->event_thread
->initiating_frame
))
6716 /* Case 2. Fall through. */
6726 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6728 delete_step_resume_breakpoint (ecs
->event_thread
);
6730 end_stepping_range (ecs
);
6734 case BPSTAT_WHAT_SINGLE
:
6735 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
6736 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6737 /* Still need to check other stuff, at least the case where we
6738 are stepping and step out of the right range. */
6741 case BPSTAT_WHAT_STEP_RESUME
:
6742 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
6744 delete_step_resume_breakpoint (ecs
->event_thread
);
6745 if (ecs
->event_thread
->control
.proceed_to_finish
6746 && execution_direction
== EXEC_REVERSE
)
6748 struct thread_info
*tp
= ecs
->event_thread
;
6750 /* We are finishing a function in reverse, and just hit the
6751 step-resume breakpoint at the start address of the
6752 function, and we're almost there -- just need to back up
6753 by one more single-step, which should take us back to the
6755 tp
->control
.step_range_start
= tp
->control
.step_range_end
= 1;
6759 fill_in_stop_func (gdbarch
, ecs
);
6760 if (ecs
->event_thread
->stop_pc () == ecs
->stop_func_start
6761 && execution_direction
== EXEC_REVERSE
)
6763 /* We are stepping over a function call in reverse, and just
6764 hit the step-resume breakpoint at the start address of
6765 the function. Go back to single-stepping, which should
6766 take us back to the function call. */
6767 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6773 case BPSTAT_WHAT_STOP_NOISY
:
6774 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
6775 stop_print_frame
= true;
6777 /* Assume the thread stopped for a breakpoint. We'll still check
6778 whether a/the breakpoint is there when the thread is next
6780 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6785 case BPSTAT_WHAT_STOP_SILENT
:
6786 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
6787 stop_print_frame
= false;
6789 /* Assume the thread stopped for a breakpoint. We'll still check
6790 whether a/the breakpoint is there when the thread is next
6792 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6796 case BPSTAT_WHAT_HP_STEP_RESUME
:
6797 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
6799 delete_step_resume_breakpoint (ecs
->event_thread
);
6800 if (ecs
->event_thread
->step_after_step_resume_breakpoint
)
6802 /* Back when the step-resume breakpoint was inserted, we
6803 were trying to single-step off a breakpoint. Go back to
6805 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
6806 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6812 case BPSTAT_WHAT_KEEP_CHECKING
:
6816 /* If we stepped a permanent breakpoint and we had a high priority
6817 step-resume breakpoint for the address we stepped, but we didn't
6818 hit it, then we must have stepped into the signal handler. The
6819 step-resume was only necessary to catch the case of _not_
6820 stepping into the handler, so delete it, and fall through to
6821 checking whether the step finished. */
6822 if (ecs
->event_thread
->stepped_breakpoint
)
6824 struct breakpoint
*sr_bp
6825 = ecs
->event_thread
->control
.step_resume_breakpoint
;
6828 && sr_bp
->loc
->permanent
6829 && sr_bp
->type
== bp_hp_step_resume
6830 && sr_bp
->loc
->address
== ecs
->event_thread
->prev_pc
)
6832 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
6833 delete_step_resume_breakpoint (ecs
->event_thread
);
6834 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
6838 /* We come here if we hit a breakpoint but should not stop for it.
6839 Possibly we also were stepping and should stop for that. So fall
6840 through and test for stepping. But, if not stepping, do not
6843 /* In all-stop mode, if we're currently stepping but have stopped in
6844 some other thread, we need to switch back to the stepped thread. */
6845 if (switch_back_to_stepped_thread (ecs
))
6848 if (ecs
->event_thread
->control
.step_resume_breakpoint
)
6850 infrun_debug_printf ("step-resume breakpoint is inserted");
6852 /* Having a step-resume breakpoint overrides anything
6853 else having to do with stepping commands until
6854 that breakpoint is reached. */
6859 if (ecs
->event_thread
->control
.step_range_end
== 0)
6861 infrun_debug_printf ("no stepping, continue");
6862 /* Likewise if we aren't even stepping. */
6867 /* Re-fetch current thread's frame in case the code above caused
6868 the frame cache to be re-initialized, making our FRAME variable
6869 a dangling pointer. */
6870 frame
= get_current_frame ();
6871 gdbarch
= get_frame_arch (frame
);
6872 fill_in_stop_func (gdbarch
, ecs
);
6874 /* If stepping through a line, keep going if still within it.
6876 Note that step_range_end is the address of the first instruction
6877 beyond the step range, and NOT the address of the last instruction
6880 Note also that during reverse execution, we may be stepping
6881 through a function epilogue and therefore must detect when
6882 the current-frame changes in the middle of a line. */
6884 if (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
6886 && (execution_direction
!= EXEC_REVERSE
6887 || frame_id_eq (get_frame_id (frame
),
6888 ecs
->event_thread
->control
.step_frame_id
)))
6891 ("stepping inside range [%s-%s]",
6892 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
6893 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
));
6895 /* Tentatively re-enable range stepping; `resume' disables it if
6896 necessary (e.g., if we're stepping over a breakpoint or we
6897 have software watchpoints). */
6898 ecs
->event_thread
->control
.may_range_step
= 1;
6900 /* When stepping backward, stop at beginning of line range
6901 (unless it's the function entry point, in which case
6902 keep going back to the call point). */
6903 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
6904 if (stop_pc
== ecs
->event_thread
->control
.step_range_start
6905 && stop_pc
!= ecs
->stop_func_start
6906 && execution_direction
== EXEC_REVERSE
)
6907 end_stepping_range (ecs
);
6914 /* We stepped out of the stepping range. */
6916 /* If we are stepping at the source level and entered the runtime
6917 loader dynamic symbol resolution code...
6919 EXEC_FORWARD: we keep on single stepping until we exit the run
6920 time loader code and reach the callee's address.
6922 EXEC_REVERSE: we've already executed the callee (backward), and
6923 the runtime loader code is handled just like any other
6924 undebuggable function call. Now we need only keep stepping
6925 backward through the trampoline code, and that's handled further
6926 down, so there is nothing for us to do here. */
6928 if (execution_direction
!= EXEC_REVERSE
6929 && ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
6930 && in_solib_dynsym_resolve_code (ecs
->event_thread
->stop_pc ()))
6932 CORE_ADDR pc_after_resolver
=
6933 gdbarch_skip_solib_resolver (gdbarch
, ecs
->event_thread
->stop_pc ());
6935 infrun_debug_printf ("stepped into dynsym resolve code");
6937 if (pc_after_resolver
)
6939 /* Set up a step-resume breakpoint at the address
6940 indicated by SKIP_SOLIB_RESOLVER. */
6941 symtab_and_line sr_sal
;
6942 sr_sal
.pc
= pc_after_resolver
;
6943 sr_sal
.pspace
= get_frame_program_space (frame
);
6945 insert_step_resume_breakpoint_at_sal (gdbarch
,
6946 sr_sal
, null_frame_id
);
6953 /* Step through an indirect branch thunk. */
6954 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
6955 && gdbarch_in_indirect_branch_thunk (gdbarch
,
6956 ecs
->event_thread
->stop_pc ()))
6958 infrun_debug_printf ("stepped into indirect branch thunk");
6963 if (ecs
->event_thread
->control
.step_range_end
!= 1
6964 && (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
6965 || ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
6966 && get_frame_type (frame
) == SIGTRAMP_FRAME
)
6968 infrun_debug_printf ("stepped into signal trampoline");
6969 /* The inferior, while doing a "step" or "next", has ended up in
6970 a signal trampoline (either by a signal being delivered or by
6971 the signal handler returning). Just single-step until the
6972 inferior leaves the trampoline (either by calling the handler
6978 /* If we're in the return path from a shared library trampoline,
6979 we want to proceed through the trampoline when stepping. */
6980 /* macro/2012-04-25: This needs to come before the subroutine
6981 call check below as on some targets return trampolines look
6982 like subroutine calls (MIPS16 return thunks). */
6983 if (gdbarch_in_solib_return_trampoline (gdbarch
,
6984 ecs
->event_thread
->stop_pc (),
6985 ecs
->stop_func_name
)
6986 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
6988 /* Determine where this trampoline returns. */
6989 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
6990 CORE_ADDR real_stop_pc
6991 = gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
6993 infrun_debug_printf ("stepped into solib return tramp");
6995 /* Only proceed through if we know where it's going. */
6998 /* And put the step-breakpoint there and go until there. */
6999 symtab_and_line sr_sal
;
7000 sr_sal
.pc
= real_stop_pc
;
7001 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7002 sr_sal
.pspace
= get_frame_program_space (frame
);
7004 /* Do not specify what the fp should be when we stop since
7005 on some machines the prologue is where the new fp value
7007 insert_step_resume_breakpoint_at_sal (gdbarch
,
7008 sr_sal
, null_frame_id
);
7010 /* Restart without fiddling with the step ranges or
7017 /* Check for subroutine calls. The check for the current frame
7018 equalling the step ID is not necessary - the check of the
7019 previous frame's ID is sufficient - but it is a common case and
7020 cheaper than checking the previous frame's ID.
7022 NOTE: frame_id_eq will never report two invalid frame IDs as
7023 being equal, so to get into this block, both the current and
7024 previous frame must have valid frame IDs. */
7025 /* The outer_frame_id check is a heuristic to detect stepping
7026 through startup code. If we step over an instruction which
7027 sets the stack pointer from an invalid value to a valid value,
7028 we may detect that as a subroutine call from the mythical
7029 "outermost" function. This could be fixed by marking
7030 outermost frames as !stack_p,code_p,special_p. Then the
7031 initial outermost frame, before sp was valid, would
7032 have code_addr == &_start. See the comment in frame_id_eq
7034 if (!frame_id_eq (get_stack_frame_id (frame
),
7035 ecs
->event_thread
->control
.step_stack_frame_id
)
7036 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
7037 ecs
->event_thread
->control
.step_stack_frame_id
)
7038 && (!frame_id_eq (ecs
->event_thread
->control
.step_stack_frame_id
,
7040 || (ecs
->event_thread
->control
.step_start_function
7041 != find_pc_function (ecs
->event_thread
->stop_pc ())))))
7043 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7044 CORE_ADDR real_stop_pc
;
7046 infrun_debug_printf ("stepped into subroutine");
7048 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_NONE
)
7050 /* I presume that step_over_calls is only 0 when we're
7051 supposed to be stepping at the assembly language level
7052 ("stepi"). Just stop. */
7053 /* And this works the same backward as frontward. MVS */
7054 end_stepping_range (ecs
);
7058 /* Reverse stepping through solib trampolines. */
7060 if (execution_direction
== EXEC_REVERSE
7061 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
7062 && (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7063 || (ecs
->stop_func_start
== 0
7064 && in_solib_dynsym_resolve_code (stop_pc
))))
7066 /* Any solib trampoline code can be handled in reverse
7067 by simply continuing to single-step. We have already
7068 executed the solib function (backwards), and a few
7069 steps will take us back through the trampoline to the
7075 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
7077 /* We're doing a "next".
7079 Normal (forward) execution: set a breakpoint at the
7080 callee's return address (the address at which the caller
7083 Reverse (backward) execution. set the step-resume
7084 breakpoint at the start of the function that we just
7085 stepped into (backwards), and continue to there. When we
7086 get there, we'll need to single-step back to the caller. */
7088 if (execution_direction
== EXEC_REVERSE
)
7090 /* If we're already at the start of the function, we've either
7091 just stepped backward into a single instruction function,
7092 or stepped back out of a signal handler to the first instruction
7093 of the function. Just keep going, which will single-step back
7095 if (ecs
->stop_func_start
!= stop_pc
&& ecs
->stop_func_start
!= 0)
7097 /* Normal function call return (static or dynamic). */
7098 symtab_and_line sr_sal
;
7099 sr_sal
.pc
= ecs
->stop_func_start
;
7100 sr_sal
.pspace
= get_frame_program_space (frame
);
7101 insert_step_resume_breakpoint_at_sal (gdbarch
,
7102 sr_sal
, null_frame_id
);
7106 insert_step_resume_breakpoint_at_caller (frame
);
7112 /* If we are in a function call trampoline (a stub between the
7113 calling routine and the real function), locate the real
7114 function. That's what tells us (a) whether we want to step
7115 into it at all, and (b) what prologue we want to run to the
7116 end of, if we do step into it. */
7117 real_stop_pc
= skip_language_trampoline (frame
, stop_pc
);
7118 if (real_stop_pc
== 0)
7119 real_stop_pc
= gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7120 if (real_stop_pc
!= 0)
7121 ecs
->stop_func_start
= real_stop_pc
;
7123 if (real_stop_pc
!= 0 && in_solib_dynsym_resolve_code (real_stop_pc
))
7125 symtab_and_line sr_sal
;
7126 sr_sal
.pc
= ecs
->stop_func_start
;
7127 sr_sal
.pspace
= get_frame_program_space (frame
);
7129 insert_step_resume_breakpoint_at_sal (gdbarch
,
7130 sr_sal
, null_frame_id
);
7135 /* If we have line number information for the function we are
7136 thinking of stepping into and the function isn't on the skip
7139 If there are several symtabs at that PC (e.g. with include
7140 files), just want to know whether *any* of them have line
7141 numbers. find_pc_line handles this. */
7143 struct symtab_and_line tmp_sal
;
7145 tmp_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7146 if (tmp_sal
.line
!= 0
7147 && !function_name_is_marked_for_skip (ecs
->stop_func_name
,
7149 && !inline_frame_is_marked_for_skip (true, ecs
->event_thread
))
7151 if (execution_direction
== EXEC_REVERSE
)
7152 handle_step_into_function_backward (gdbarch
, ecs
);
7154 handle_step_into_function (gdbarch
, ecs
);
7159 /* If we have no line number and the step-stop-if-no-debug is
7160 set, we stop the step so that the user has a chance to switch
7161 in assembly mode. */
7162 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7163 && step_stop_if_no_debug
)
7165 end_stepping_range (ecs
);
7169 if (execution_direction
== EXEC_REVERSE
)
7171 /* If we're already at the start of the function, we've either just
7172 stepped backward into a single instruction function without line
7173 number info, or stepped back out of a signal handler to the first
7174 instruction of the function without line number info. Just keep
7175 going, which will single-step back to the caller. */
7176 if (ecs
->stop_func_start
!= stop_pc
)
7178 /* Set a breakpoint at callee's start address.
7179 From there we can step once and be back in the caller. */
7180 symtab_and_line sr_sal
;
7181 sr_sal
.pc
= ecs
->stop_func_start
;
7182 sr_sal
.pspace
= get_frame_program_space (frame
);
7183 insert_step_resume_breakpoint_at_sal (gdbarch
,
7184 sr_sal
, null_frame_id
);
7188 /* Set a breakpoint at callee's return address (the address
7189 at which the caller will resume). */
7190 insert_step_resume_breakpoint_at_caller (frame
);
7196 /* Reverse stepping through solib trampolines. */
7198 if (execution_direction
== EXEC_REVERSE
7199 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7201 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7203 if (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7204 || (ecs
->stop_func_start
== 0
7205 && in_solib_dynsym_resolve_code (stop_pc
)))
7207 /* Any solib trampoline code can be handled in reverse
7208 by simply continuing to single-step. We have already
7209 executed the solib function (backwards), and a few
7210 steps will take us back through the trampoline to the
7215 else if (in_solib_dynsym_resolve_code (stop_pc
))
7217 /* Stepped backward into the solib dynsym resolver.
7218 Set a breakpoint at its start and continue, then
7219 one more step will take us out. */
7220 symtab_and_line sr_sal
;
7221 sr_sal
.pc
= ecs
->stop_func_start
;
7222 sr_sal
.pspace
= get_frame_program_space (frame
);
7223 insert_step_resume_breakpoint_at_sal (gdbarch
,
7224 sr_sal
, null_frame_id
);
7230 /* This always returns the sal for the inner-most frame when we are in a
7231 stack of inlined frames, even if GDB actually believes that it is in a
7232 more outer frame. This is checked for below by calls to
7233 inline_skipped_frames. */
7234 stop_pc_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
7236 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7237 the trampoline processing logic, however, there are some trampolines
7238 that have no names, so we should do trampoline handling first. */
7239 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7240 && ecs
->stop_func_name
== NULL
7241 && stop_pc_sal
.line
== 0)
7243 infrun_debug_printf ("stepped into undebuggable function");
7245 /* The inferior just stepped into, or returned to, an
7246 undebuggable function (where there is no debugging information
7247 and no line number corresponding to the address where the
7248 inferior stopped). Since we want to skip this kind of code,
7249 we keep going until the inferior returns from this
7250 function - unless the user has asked us not to (via
7251 set step-mode) or we no longer know how to get back
7252 to the call site. */
7253 if (step_stop_if_no_debug
7254 || !frame_id_p (frame_unwind_caller_id (frame
)))
7256 /* If we have no line number and the step-stop-if-no-debug
7257 is set, we stop the step so that the user has a chance to
7258 switch in assembly mode. */
7259 end_stepping_range (ecs
);
7264 /* Set a breakpoint at callee's return address (the address
7265 at which the caller will resume). */
7266 insert_step_resume_breakpoint_at_caller (frame
);
7272 if (ecs
->event_thread
->control
.step_range_end
== 1)
7274 /* It is stepi or nexti. We always want to stop stepping after
7276 infrun_debug_printf ("stepi/nexti");
7277 end_stepping_range (ecs
);
7281 if (stop_pc_sal
.line
== 0)
7283 /* We have no line number information. That means to stop
7284 stepping (does this always happen right after one instruction,
7285 when we do "s" in a function with no line numbers,
7286 or can this happen as a result of a return or longjmp?). */
7287 infrun_debug_printf ("line number info");
7288 end_stepping_range (ecs
);
7292 /* Look for "calls" to inlined functions, part one. If the inline
7293 frame machinery detected some skipped call sites, we have entered
7294 a new inline function. */
7296 if (frame_id_eq (get_frame_id (get_current_frame ()),
7297 ecs
->event_thread
->control
.step_frame_id
)
7298 && inline_skipped_frames (ecs
->event_thread
))
7300 infrun_debug_printf ("stepped into inlined function");
7302 symtab_and_line call_sal
= find_frame_sal (get_current_frame ());
7304 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_ALL
)
7306 /* For "step", we're going to stop. But if the call site
7307 for this inlined function is on the same source line as
7308 we were previously stepping, go down into the function
7309 first. Otherwise stop at the call site. */
7311 if (call_sal
.line
== ecs
->event_thread
->current_line
7312 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
7314 step_into_inline_frame (ecs
->event_thread
);
7315 if (inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
7322 end_stepping_range (ecs
);
7327 /* For "next", we should stop at the call site if it is on a
7328 different source line. Otherwise continue through the
7329 inlined function. */
7330 if (call_sal
.line
== ecs
->event_thread
->current_line
7331 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
7334 end_stepping_range (ecs
);
7339 /* Look for "calls" to inlined functions, part two. If we are still
7340 in the same real function we were stepping through, but we have
7341 to go further up to find the exact frame ID, we are stepping
7342 through a more inlined call beyond its call site. */
7344 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7345 && !frame_id_eq (get_frame_id (get_current_frame ()),
7346 ecs
->event_thread
->control
.step_frame_id
)
7347 && stepped_in_from (get_current_frame (),
7348 ecs
->event_thread
->control
.step_frame_id
))
7350 infrun_debug_printf ("stepping through inlined function");
7352 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
7353 || inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
7356 end_stepping_range (ecs
);
7360 bool refresh_step_info
= true;
7361 if ((ecs
->event_thread
->stop_pc () == stop_pc_sal
.pc
)
7362 && (ecs
->event_thread
->current_line
!= stop_pc_sal
.line
7363 || ecs
->event_thread
->current_symtab
!= stop_pc_sal
.symtab
))
7365 /* We are at a different line. */
7367 if (stop_pc_sal
.is_stmt
)
7369 /* We are at the start of a statement.
7371 So stop. Note that we don't stop if we step into the middle of a
7372 statement. That is said to make things like for (;;) statements
7374 infrun_debug_printf ("stepped to a different line");
7375 end_stepping_range (ecs
);
7378 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7379 ecs
->event_thread
->control
.step_frame_id
))
7381 /* We are not at the start of a statement, and we have not changed
7384 We ignore this line table entry, and continue stepping forward,
7385 looking for a better place to stop. */
7386 refresh_step_info
= false;
7387 infrun_debug_printf ("stepped to a different line, but "
7388 "it's not the start of a statement");
7392 /* We are not the start of a statement, and we have changed frame.
7394 We ignore this line table entry, and continue stepping forward,
7395 looking for a better place to stop. Keep refresh_step_info at
7396 true to note that the frame has changed, but ignore the line
7397 number to make sure we don't ignore a subsequent entry with the
7398 same line number. */
7399 stop_pc_sal
.line
= 0;
7400 infrun_debug_printf ("stepped to a different frame, but "
7401 "it's not the start of a statement");
7405 /* We aren't done stepping.
7407 Optimize by setting the stepping range to the line.
7408 (We might not be in the original line, but if we entered a
7409 new line in mid-statement, we continue stepping. This makes
7410 things like for(;;) statements work better.)
7412 If we entered a SAL that indicates a non-statement line table entry,
7413 then we update the stepping range, but we don't update the step info,
7414 which includes things like the line number we are stepping away from.
7415 This means we will stop when we find a line table entry that is marked
7416 as is-statement, even if it matches the non-statement one we just
7419 ecs
->event_thread
->control
.step_range_start
= stop_pc_sal
.pc
;
7420 ecs
->event_thread
->control
.step_range_end
= stop_pc_sal
.end
;
7421 ecs
->event_thread
->control
.may_range_step
= 1;
7423 ("updated step range, start = %s, end = %s, may_range_step = %d",
7424 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
7425 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
),
7426 ecs
->event_thread
->control
.may_range_step
);
7427 if (refresh_step_info
)
7428 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
7430 infrun_debug_printf ("keep going");
7434 static bool restart_stepped_thread (process_stratum_target
*resume_target
,
7435 ptid_t resume_ptid
);
7437 /* In all-stop mode, if we're currently stepping but have stopped in
7438 some other thread, we may need to switch back to the stepped
7439 thread. Returns true we set the inferior running, false if we left
7440 it stopped (and the event needs further processing). */
7443 switch_back_to_stepped_thread (struct execution_control_state
*ecs
)
7445 if (!target_is_non_stop_p ())
7447 /* If any thread is blocked on some internal breakpoint, and we
7448 simply need to step over that breakpoint to get it going
7449 again, do that first. */
7451 /* However, if we see an event for the stepping thread, then we
7452 know all other threads have been moved past their breakpoints
7453 already. Let the caller check whether the step is finished,
7454 etc., before deciding to move it past a breakpoint. */
7455 if (ecs
->event_thread
->control
.step_range_end
!= 0)
7458 /* Check if the current thread is blocked on an incomplete
7459 step-over, interrupted by a random signal. */
7460 if (ecs
->event_thread
->control
.trap_expected
7461 && ecs
->event_thread
->stop_signal () != GDB_SIGNAL_TRAP
)
7464 ("need to finish step-over of [%s]",
7465 ecs
->event_thread
->ptid
.to_string ().c_str ());
7470 /* Check if the current thread is blocked by a single-step
7471 breakpoint of another thread. */
7472 if (ecs
->hit_singlestep_breakpoint
)
7474 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7475 ecs
->ptid
.to_string ().c_str ());
7480 /* If this thread needs yet another step-over (e.g., stepping
7481 through a delay slot), do it first before moving on to
7483 if (thread_still_needs_step_over (ecs
->event_thread
))
7486 ("thread [%s] still needs step-over",
7487 ecs
->event_thread
->ptid
.to_string ().c_str ());
7492 /* If scheduler locking applies even if not stepping, there's no
7493 need to walk over threads. Above we've checked whether the
7494 current thread is stepping. If some other thread not the
7495 event thread is stepping, then it must be that scheduler
7496 locking is not in effect. */
7497 if (schedlock_applies (ecs
->event_thread
))
7500 /* Otherwise, we no longer expect a trap in the current thread.
7501 Clear the trap_expected flag before switching back -- this is
7502 what keep_going does as well, if we call it. */
7503 ecs
->event_thread
->control
.trap_expected
= 0;
7505 /* Likewise, clear the signal if it should not be passed. */
7506 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
7507 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
7509 if (restart_stepped_thread (ecs
->target
, ecs
->ptid
))
7511 prepare_to_wait (ecs
);
7515 switch_to_thread (ecs
->event_thread
);
7521 /* Look for the thread that was stepping, and resume it.
7522 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7523 is resuming. Return true if a thread was started, false
7527 restart_stepped_thread (process_stratum_target
*resume_target
,
7530 /* Do all pending step-overs before actually proceeding with
7532 if (start_step_over ())
7535 for (thread_info
*tp
: all_threads_safe ())
7537 if (tp
->state
== THREAD_EXITED
)
7540 if (tp
->has_pending_waitstatus ())
7543 /* Ignore threads of processes the caller is not
7546 && (tp
->inf
->process_target () != resume_target
7547 || tp
->inf
->pid
!= resume_ptid
.pid ()))
7550 if (tp
->control
.trap_expected
)
7552 infrun_debug_printf ("switching back to stepped thread (step-over)");
7554 if (keep_going_stepped_thread (tp
))
7559 for (thread_info
*tp
: all_threads_safe ())
7561 if (tp
->state
== THREAD_EXITED
)
7564 if (tp
->has_pending_waitstatus ())
7567 /* Ignore threads of processes the caller is not
7570 && (tp
->inf
->process_target () != resume_target
7571 || tp
->inf
->pid
!= resume_ptid
.pid ()))
7574 /* Did we find the stepping thread? */
7575 if (tp
->control
.step_range_end
)
7577 infrun_debug_printf ("switching back to stepped thread (stepping)");
7579 if (keep_going_stepped_thread (tp
))
7590 restart_after_all_stop_detach (process_stratum_target
*proc_target
)
7592 /* Note we don't check target_is_non_stop_p() here, because the
7593 current inferior may no longer have a process_stratum target
7594 pushed, as we just detached. */
7596 /* See if we have a THREAD_RUNNING thread that need to be
7597 re-resumed. If we have any thread that is already executing,
7598 then we don't need to resume the target -- it is already been
7599 resumed. With the remote target (in all-stop), it's even
7600 impossible to issue another resumption if the target is already
7601 resumed, until the target reports a stop. */
7602 for (thread_info
*thr
: all_threads (proc_target
))
7604 if (thr
->state
!= THREAD_RUNNING
)
7607 /* If we have any thread that is already executing, then we
7608 don't need to resume the target -- it is already been
7610 if (thr
->executing ())
7613 /* If we have a pending event to process, skip resuming the
7614 target and go straight to processing it. */
7615 if (thr
->resumed () && thr
->has_pending_waitstatus ())
7619 /* Alright, we need to re-resume the target. If a thread was
7620 stepping, we need to restart it stepping. */
7621 if (restart_stepped_thread (proc_target
, minus_one_ptid
))
7624 /* Otherwise, find the first THREAD_RUNNING thread and resume
7626 for (thread_info
*thr
: all_threads (proc_target
))
7628 if (thr
->state
!= THREAD_RUNNING
)
7631 execution_control_state ecs
;
7632 reset_ecs (&ecs
, thr
);
7633 switch_to_thread (thr
);
7639 /* Set a previously stepped thread back to stepping. Returns true on
7640 success, false if the resume is not possible (e.g., the thread
7644 keep_going_stepped_thread (struct thread_info
*tp
)
7646 struct frame_info
*frame
;
7647 struct execution_control_state ecss
;
7648 struct execution_control_state
*ecs
= &ecss
;
7650 /* If the stepping thread exited, then don't try to switch back and
7651 resume it, which could fail in several different ways depending
7652 on the target. Instead, just keep going.
7654 We can find a stepping dead thread in the thread list in two
7657 - The target supports thread exit events, and when the target
7658 tries to delete the thread from the thread list, inferior_ptid
7659 pointed at the exiting thread. In such case, calling
7660 delete_thread does not really remove the thread from the list;
7661 instead, the thread is left listed, with 'exited' state.
7663 - The target's debug interface does not support thread exit
7664 events, and so we have no idea whatsoever if the previously
7665 stepping thread is still alive. For that reason, we need to
7666 synchronously query the target now. */
7668 if (tp
->state
== THREAD_EXITED
|| !target_thread_alive (tp
->ptid
))
7670 infrun_debug_printf ("not resuming previously stepped thread, it has "
7677 infrun_debug_printf ("resuming previously stepped thread");
7679 reset_ecs (ecs
, tp
);
7680 switch_to_thread (tp
);
7682 tp
->set_stop_pc (regcache_read_pc (get_thread_regcache (tp
)));
7683 frame
= get_current_frame ();
7685 /* If the PC of the thread we were trying to single-step has
7686 changed, then that thread has trapped or been signaled, but the
7687 event has not been reported to GDB yet. Re-poll the target
7688 looking for this particular thread's event (i.e. temporarily
7689 enable schedlock) by:
7691 - setting a break at the current PC
7692 - resuming that particular thread, only (by setting trap
7695 This prevents us continuously moving the single-step breakpoint
7696 forward, one instruction at a time, overstepping. */
7698 if (tp
->stop_pc () != tp
->prev_pc
)
7702 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7703 paddress (target_gdbarch (), tp
->prev_pc
),
7704 paddress (target_gdbarch (), tp
->stop_pc ()));
7706 /* Clear the info of the previous step-over, as it's no longer
7707 valid (if the thread was trying to step over a breakpoint, it
7708 has already succeeded). It's what keep_going would do too,
7709 if we called it. Do this before trying to insert the sss
7710 breakpoint, otherwise if we were previously trying to step
7711 over this exact address in another thread, the breakpoint is
7713 clear_step_over_info ();
7714 tp
->control
.trap_expected
= 0;
7716 insert_single_step_breakpoint (get_frame_arch (frame
),
7717 get_frame_address_space (frame
),
7720 tp
->set_resumed (true);
7721 resume_ptid
= internal_resume_ptid (tp
->control
.stepping_command
);
7722 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
7726 infrun_debug_printf ("expected thread still hasn't advanced");
7728 keep_going_pass_signal (ecs
);
7734 /* Is thread TP in the middle of (software or hardware)
7735 single-stepping? (Note the result of this function must never be
7736 passed directly as target_resume's STEP parameter.) */
7739 currently_stepping (struct thread_info
*tp
)
7741 return ((tp
->control
.step_range_end
7742 && tp
->control
.step_resume_breakpoint
== NULL
)
7743 || tp
->control
.trap_expected
7744 || tp
->stepped_breakpoint
7745 || bpstat_should_step ());
7748 /* Inferior has stepped into a subroutine call with source code that
7749 we should not step over. Do step to the first line of code in
7753 handle_step_into_function (struct gdbarch
*gdbarch
,
7754 struct execution_control_state
*ecs
)
7756 fill_in_stop_func (gdbarch
, ecs
);
7758 compunit_symtab
*cust
7759 = find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
7760 if (cust
!= NULL
&& compunit_language (cust
) != language_asm
)
7761 ecs
->stop_func_start
7762 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
7764 symtab_and_line stop_func_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7765 /* Use the step_resume_break to step until the end of the prologue,
7766 even if that involves jumps (as it seems to on the vax under
7768 /* If the prologue ends in the middle of a source line, continue to
7769 the end of that source line (if it is still within the function).
7770 Otherwise, just go to end of prologue. */
7771 if (stop_func_sal
.end
7772 && stop_func_sal
.pc
!= ecs
->stop_func_start
7773 && stop_func_sal
.end
< ecs
->stop_func_end
)
7774 ecs
->stop_func_start
= stop_func_sal
.end
;
7776 /* Architectures which require breakpoint adjustment might not be able
7777 to place a breakpoint at the computed address. If so, the test
7778 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7779 ecs->stop_func_start to an address at which a breakpoint may be
7780 legitimately placed.
7782 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7783 made, GDB will enter an infinite loop when stepping through
7784 optimized code consisting of VLIW instructions which contain
7785 subinstructions corresponding to different source lines. On
7786 FR-V, it's not permitted to place a breakpoint on any but the
7787 first subinstruction of a VLIW instruction. When a breakpoint is
7788 set, GDB will adjust the breakpoint address to the beginning of
7789 the VLIW instruction. Thus, we need to make the corresponding
7790 adjustment here when computing the stop address. */
7792 if (gdbarch_adjust_breakpoint_address_p (gdbarch
))
7794 ecs
->stop_func_start
7795 = gdbarch_adjust_breakpoint_address (gdbarch
,
7796 ecs
->stop_func_start
);
7799 if (ecs
->stop_func_start
== ecs
->event_thread
->stop_pc ())
7801 /* We are already there: stop now. */
7802 end_stepping_range (ecs
);
7807 /* Put the step-breakpoint there and go until there. */
7808 symtab_and_line sr_sal
;
7809 sr_sal
.pc
= ecs
->stop_func_start
;
7810 sr_sal
.section
= find_pc_overlay (ecs
->stop_func_start
);
7811 sr_sal
.pspace
= get_frame_program_space (get_current_frame ());
7813 /* Do not specify what the fp should be when we stop since on
7814 some machines the prologue is where the new fp value is
7816 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
, null_frame_id
);
7818 /* And make sure stepping stops right away then. */
7819 ecs
->event_thread
->control
.step_range_end
7820 = ecs
->event_thread
->control
.step_range_start
;
7825 /* Inferior has stepped backward into a subroutine call with source
7826 code that we should not step over. Do step to the beginning of the
7827 last line of code in it. */
7830 handle_step_into_function_backward (struct gdbarch
*gdbarch
,
7831 struct execution_control_state
*ecs
)
7833 struct compunit_symtab
*cust
;
7834 struct symtab_and_line stop_func_sal
;
7836 fill_in_stop_func (gdbarch
, ecs
);
7838 cust
= find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
7839 if (cust
!= NULL
&& compunit_language (cust
) != language_asm
)
7840 ecs
->stop_func_start
7841 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
7843 stop_func_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
7845 /* OK, we're just going to keep stepping here. */
7846 if (stop_func_sal
.pc
== ecs
->event_thread
->stop_pc ())
7848 /* We're there already. Just stop stepping now. */
7849 end_stepping_range (ecs
);
7853 /* Else just reset the step range and keep going.
7854 No step-resume breakpoint, they don't work for
7855 epilogues, which can have multiple entry paths. */
7856 ecs
->event_thread
->control
.step_range_start
= stop_func_sal
.pc
;
7857 ecs
->event_thread
->control
.step_range_end
= stop_func_sal
.end
;
7863 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7864 This is used to both functions and to skip over code. */
7867 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch
*gdbarch
,
7868 struct symtab_and_line sr_sal
,
7869 struct frame_id sr_id
,
7870 enum bptype sr_type
)
7872 /* There should never be more than one step-resume or longjmp-resume
7873 breakpoint per thread, so we should never be setting a new
7874 step_resume_breakpoint when one is already active. */
7875 gdb_assert (inferior_thread ()->control
.step_resume_breakpoint
== NULL
);
7876 gdb_assert (sr_type
== bp_step_resume
|| sr_type
== bp_hp_step_resume
);
7878 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7879 paddress (gdbarch
, sr_sal
.pc
));
7881 inferior_thread ()->control
.step_resume_breakpoint
7882 = set_momentary_breakpoint (gdbarch
, sr_sal
, sr_id
, sr_type
).release ();
7886 insert_step_resume_breakpoint_at_sal (struct gdbarch
*gdbarch
,
7887 struct symtab_and_line sr_sal
,
7888 struct frame_id sr_id
)
7890 insert_step_resume_breakpoint_at_sal_1 (gdbarch
,
7895 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7896 This is used to skip a potential signal handler.
7898 This is called with the interrupted function's frame. The signal
7899 handler, when it returns, will resume the interrupted function at
7903 insert_hp_step_resume_breakpoint_at_frame (struct frame_info
*return_frame
)
7905 gdb_assert (return_frame
!= NULL
);
7907 struct gdbarch
*gdbarch
= get_frame_arch (return_frame
);
7909 symtab_and_line sr_sal
;
7910 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
, get_frame_pc (return_frame
));
7911 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7912 sr_sal
.pspace
= get_frame_program_space (return_frame
);
7914 insert_step_resume_breakpoint_at_sal_1 (gdbarch
, sr_sal
,
7915 get_stack_frame_id (return_frame
),
7919 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7920 is used to skip a function after stepping into it (for "next" or if
7921 the called function has no debugging information).
7923 The current function has almost always been reached by single
7924 stepping a call or return instruction. NEXT_FRAME belongs to the
7925 current function, and the breakpoint will be set at the caller's
7928 This is a separate function rather than reusing
7929 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7930 get_prev_frame, which may stop prematurely (see the implementation
7931 of frame_unwind_caller_id for an example). */
7934 insert_step_resume_breakpoint_at_caller (struct frame_info
*next_frame
)
7936 /* We shouldn't have gotten here if we don't know where the call site
7938 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame
)));
7940 struct gdbarch
*gdbarch
= frame_unwind_caller_arch (next_frame
);
7942 symtab_and_line sr_sal
;
7943 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
,
7944 frame_unwind_caller_pc (next_frame
));
7945 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7946 sr_sal
.pspace
= frame_unwind_program_space (next_frame
);
7948 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
,
7949 frame_unwind_caller_id (next_frame
));
7952 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7953 new breakpoint at the target of a jmp_buf. The handling of
7954 longjmp-resume uses the same mechanisms used for handling
7955 "step-resume" breakpoints. */
7958 insert_longjmp_resume_breakpoint (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
7960 /* There should never be more than one longjmp-resume breakpoint per
7961 thread, so we should never be setting a new
7962 longjmp_resume_breakpoint when one is already active. */
7963 gdb_assert (inferior_thread ()->control
.exception_resume_breakpoint
== NULL
);
7965 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7966 paddress (gdbarch
, pc
));
7968 inferior_thread ()->control
.exception_resume_breakpoint
=
7969 set_momentary_breakpoint_at_pc (gdbarch
, pc
, bp_longjmp_resume
).release ();
7972 /* Insert an exception resume breakpoint. TP is the thread throwing
7973 the exception. The block B is the block of the unwinder debug hook
7974 function. FRAME is the frame corresponding to the call to this
7975 function. SYM is the symbol of the function argument holding the
7976 target PC of the exception. */
7979 insert_exception_resume_breakpoint (struct thread_info
*tp
,
7980 const struct block
*b
,
7981 struct frame_info
*frame
,
7986 struct block_symbol vsym
;
7987 struct value
*value
;
7989 struct breakpoint
*bp
;
7991 vsym
= lookup_symbol_search_name (sym
->search_name (),
7993 value
= read_var_value (vsym
.symbol
, vsym
.block
, frame
);
7994 /* If the value was optimized out, revert to the old behavior. */
7995 if (! value_optimized_out (value
))
7997 handler
= value_as_address (value
);
7999 infrun_debug_printf ("exception resume at %lx",
8000 (unsigned long) handler
);
8002 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8004 bp_exception_resume
).release ();
8006 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8009 bp
->thread
= tp
->global_num
;
8010 inferior_thread ()->control
.exception_resume_breakpoint
= bp
;
8013 catch (const gdb_exception_error
&e
)
8015 /* We want to ignore errors here. */
8019 /* A helper for check_exception_resume that sets an
8020 exception-breakpoint based on a SystemTap probe. */
8023 insert_exception_resume_from_probe (struct thread_info
*tp
,
8024 const struct bound_probe
*probe
,
8025 struct frame_info
*frame
)
8027 struct value
*arg_value
;
8029 struct breakpoint
*bp
;
8031 arg_value
= probe_safe_evaluate_at_pc (frame
, 1);
8035 handler
= value_as_address (arg_value
);
8037 infrun_debug_printf ("exception resume at %s",
8038 paddress (probe
->objfile
->arch (), handler
));
8040 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
8041 handler
, bp_exception_resume
).release ();
8042 bp
->thread
= tp
->global_num
;
8043 inferior_thread ()->control
.exception_resume_breakpoint
= bp
;
8046 /* This is called when an exception has been intercepted. Check to
8047 see whether the exception's destination is of interest, and if so,
8048 set an exception resume breakpoint there. */
8051 check_exception_resume (struct execution_control_state
*ecs
,
8052 struct frame_info
*frame
)
8054 struct bound_probe probe
;
8055 struct symbol
*func
;
8057 /* First see if this exception unwinding breakpoint was set via a
8058 SystemTap probe point. If so, the probe has two arguments: the
8059 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8060 set a breakpoint there. */
8061 probe
= find_probe_by_pc (get_frame_pc (frame
));
8064 insert_exception_resume_from_probe (ecs
->event_thread
, &probe
, frame
);
8068 func
= get_frame_function (frame
);
8074 const struct block
*b
;
8075 struct block_iterator iter
;
8079 /* The exception breakpoint is a thread-specific breakpoint on
8080 the unwinder's debug hook, declared as:
8082 void _Unwind_DebugHook (void *cfa, void *handler);
8084 The CFA argument indicates the frame to which control is
8085 about to be transferred. HANDLER is the destination PC.
8087 We ignore the CFA and set a temporary breakpoint at HANDLER.
8088 This is not extremely efficient but it avoids issues in gdb
8089 with computing the DWARF CFA, and it also works even in weird
8090 cases such as throwing an exception from inside a signal
8093 b
= func
->value_block ();
8094 ALL_BLOCK_SYMBOLS (b
, iter
, sym
)
8096 if (!sym
->is_argument ())
8103 insert_exception_resume_breakpoint (ecs
->event_thread
,
8109 catch (const gdb_exception_error
&e
)
8115 stop_waiting (struct execution_control_state
*ecs
)
8117 infrun_debug_printf ("stop_waiting");
8119 /* Let callers know we don't want to wait for the inferior anymore. */
8120 ecs
->wait_some_more
= 0;
8122 /* If all-stop, but there exists a non-stop target, stop all
8123 threads now that we're presenting the stop to the user. */
8124 if (!non_stop
&& exists_non_stop_target ())
8125 stop_all_threads ("presenting stop to user in all-stop");
8128 /* Like keep_going, but passes the signal to the inferior, even if the
8129 signal is set to nopass. */
8132 keep_going_pass_signal (struct execution_control_state
*ecs
)
8134 gdb_assert (ecs
->event_thread
->ptid
== inferior_ptid
);
8135 gdb_assert (!ecs
->event_thread
->resumed ());
8137 /* Save the pc before execution, to compare with pc after stop. */
8138 ecs
->event_thread
->prev_pc
8139 = regcache_read_pc_protected (get_thread_regcache (ecs
->event_thread
));
8141 if (ecs
->event_thread
->control
.trap_expected
)
8143 struct thread_info
*tp
= ecs
->event_thread
;
8145 infrun_debug_printf ("%s has trap_expected set, "
8146 "resuming to collect trap",
8147 tp
->ptid
.to_string ().c_str ());
8149 /* We haven't yet gotten our trap, and either: intercepted a
8150 non-signal event (e.g., a fork); or took a signal which we
8151 are supposed to pass through to the inferior. Simply
8153 resume (ecs
->event_thread
->stop_signal ());
8155 else if (step_over_info_valid_p ())
8157 /* Another thread is stepping over a breakpoint in-line. If
8158 this thread needs a step-over too, queue the request. In
8159 either case, this resume must be deferred for later. */
8160 struct thread_info
*tp
= ecs
->event_thread
;
8162 if (ecs
->hit_singlestep_breakpoint
8163 || thread_still_needs_step_over (tp
))
8165 infrun_debug_printf ("step-over already in progress: "
8166 "step-over for %s deferred",
8167 tp
->ptid
.to_string ().c_str ());
8168 global_thread_step_over_chain_enqueue (tp
);
8171 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8172 tp
->ptid
.to_string ().c_str ());
8176 struct regcache
*regcache
= get_current_regcache ();
8179 step_over_what step_what
;
8181 /* Either the trap was not expected, but we are continuing
8182 anyway (if we got a signal, the user asked it be passed to
8185 We got our expected trap, but decided we should resume from
8188 We're going to run this baby now!
8190 Note that insert_breakpoints won't try to re-insert
8191 already inserted breakpoints. Therefore, we don't
8192 care if breakpoints were already inserted, or not. */
8194 /* If we need to step over a breakpoint, and we're not using
8195 displaced stepping to do so, insert all breakpoints
8196 (watchpoints, etc.) but the one we're stepping over, step one
8197 instruction, and then re-insert the breakpoint when that step
8200 step_what
= thread_still_needs_step_over (ecs
->event_thread
);
8202 remove_bp
= (ecs
->hit_singlestep_breakpoint
8203 || (step_what
& STEP_OVER_BREAKPOINT
));
8204 remove_wps
= (step_what
& STEP_OVER_WATCHPOINT
);
8206 /* We can't use displaced stepping if we need to step past a
8207 watchpoint. The instruction copied to the scratch pad would
8208 still trigger the watchpoint. */
8210 && (remove_wps
|| !use_displaced_stepping (ecs
->event_thread
)))
8212 set_step_over_info (regcache
->aspace (),
8213 regcache_read_pc (regcache
), remove_wps
,
8214 ecs
->event_thread
->global_num
);
8216 else if (remove_wps
)
8217 set_step_over_info (NULL
, 0, remove_wps
, -1);
8219 /* If we now need to do an in-line step-over, we need to stop
8220 all other threads. Note this must be done before
8221 insert_breakpoints below, because that removes the breakpoint
8222 we're about to step over, otherwise other threads could miss
8224 if (step_over_info_valid_p () && target_is_non_stop_p ())
8225 stop_all_threads ("starting in-line step-over");
8227 /* Stop stepping if inserting breakpoints fails. */
8230 insert_breakpoints ();
8232 catch (const gdb_exception_error
&e
)
8234 exception_print (gdb_stderr
, e
);
8236 clear_step_over_info ();
8240 ecs
->event_thread
->control
.trap_expected
= (remove_bp
|| remove_wps
);
8242 resume (ecs
->event_thread
->stop_signal ());
8245 prepare_to_wait (ecs
);
8248 /* Called when we should continue running the inferior, because the
8249 current event doesn't cause a user visible stop. This does the
8250 resuming part; waiting for the next event is done elsewhere. */
8253 keep_going (struct execution_control_state
*ecs
)
8255 if (ecs
->event_thread
->control
.trap_expected
8256 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
8257 ecs
->event_thread
->control
.trap_expected
= 0;
8259 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
8260 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
8261 keep_going_pass_signal (ecs
);
8264 /* This function normally comes after a resume, before
8265 handle_inferior_event exits. It takes care of any last bits of
8266 housekeeping, and sets the all-important wait_some_more flag. */
8269 prepare_to_wait (struct execution_control_state
*ecs
)
8271 infrun_debug_printf ("prepare_to_wait");
8273 ecs
->wait_some_more
= 1;
8275 /* If the target can't async, emulate it by marking the infrun event
8276 handler such that as soon as we get back to the event-loop, we
8277 immediately end up in fetch_inferior_event again calling
8279 if (!target_can_async_p ())
8280 mark_infrun_async_event_handler ();
8283 /* We are done with the step range of a step/next/si/ni command.
8284 Called once for each n of a "step n" operation. */
8287 end_stepping_range (struct execution_control_state
*ecs
)
8289 ecs
->event_thread
->control
.stop_step
= 1;
8293 /* Several print_*_reason functions to print why the inferior has stopped.
8294 We always print something when the inferior exits, or receives a signal.
8295 The rest of the cases are dealt with later on in normal_stop and
8296 print_it_typical. Ideally there should be a call to one of these
8297 print_*_reason functions functions from handle_inferior_event each time
8298 stop_waiting is called.
8300 Note that we don't call these directly, instead we delegate that to
8301 the interpreters, through observers. Interpreters then call these
8302 with whatever uiout is right. */
8305 print_end_stepping_range_reason (struct ui_out
*uiout
)
8307 /* For CLI-like interpreters, print nothing. */
8309 if (uiout
->is_mi_like_p ())
8311 uiout
->field_string ("reason",
8312 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE
));
8317 print_signal_exited_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
8319 annotate_signalled ();
8320 if (uiout
->is_mi_like_p ())
8322 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED
));
8323 uiout
->text ("\nProgram terminated with signal ");
8324 annotate_signal_name ();
8325 uiout
->field_string ("signal-name",
8326 gdb_signal_to_name (siggnal
));
8327 annotate_signal_name_end ();
8329 annotate_signal_string ();
8330 uiout
->field_string ("signal-meaning",
8331 gdb_signal_to_string (siggnal
));
8332 annotate_signal_string_end ();
8333 uiout
->text (".\n");
8334 uiout
->text ("The program no longer exists.\n");
8338 print_exited_reason (struct ui_out
*uiout
, int exitstatus
)
8340 struct inferior
*inf
= current_inferior ();
8341 std::string pidstr
= target_pid_to_str (ptid_t (inf
->pid
));
8343 annotate_exited (exitstatus
);
8346 if (uiout
->is_mi_like_p ())
8347 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED
));
8348 std::string exit_code_str
8349 = string_printf ("0%o", (unsigned int) exitstatus
);
8350 uiout
->message ("[Inferior %s (%s) exited with code %pF]\n",
8351 plongest (inf
->num
), pidstr
.c_str (),
8352 string_field ("exit-code", exit_code_str
.c_str ()));
8356 if (uiout
->is_mi_like_p ())
8358 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY
));
8359 uiout
->message ("[Inferior %s (%s) exited normally]\n",
8360 plongest (inf
->num
), pidstr
.c_str ());
8365 print_signal_received_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
8367 struct thread_info
*thr
= inferior_thread ();
8371 if (uiout
->is_mi_like_p ())
8373 else if (show_thread_that_caused_stop ())
8375 uiout
->text ("\nThread ");
8376 uiout
->field_string ("thread-id", print_thread_id (thr
));
8378 const char *name
= thread_name (thr
);
8381 uiout
->text (" \"");
8382 uiout
->field_string ("name", name
);
8387 uiout
->text ("\nProgram");
8389 if (siggnal
== GDB_SIGNAL_0
&& !uiout
->is_mi_like_p ())
8390 uiout
->text (" stopped");
8393 uiout
->text (" received signal ");
8394 annotate_signal_name ();
8395 if (uiout
->is_mi_like_p ())
8397 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED
));
8398 uiout
->field_string ("signal-name", gdb_signal_to_name (siggnal
));
8399 annotate_signal_name_end ();
8401 annotate_signal_string ();
8402 uiout
->field_string ("signal-meaning", gdb_signal_to_string (siggnal
));
8404 struct regcache
*regcache
= get_current_regcache ();
8405 struct gdbarch
*gdbarch
= regcache
->arch ();
8406 if (gdbarch_report_signal_info_p (gdbarch
))
8407 gdbarch_report_signal_info (gdbarch
, uiout
, siggnal
);
8409 annotate_signal_string_end ();
8411 uiout
->text (".\n");
8415 print_no_history_reason (struct ui_out
*uiout
)
8417 uiout
->text ("\nNo more reverse-execution history.\n");
8420 /* Print current location without a level number, if we have changed
8421 functions or hit a breakpoint. Print source line if we have one.
8422 bpstat_print contains the logic deciding in detail what to print,
8423 based on the event(s) that just occurred. */
8426 print_stop_location (const target_waitstatus
&ws
)
8429 enum print_what source_flag
;
8430 int do_frame_printing
= 1;
8431 struct thread_info
*tp
= inferior_thread ();
8433 bpstat_ret
= bpstat_print (tp
->control
.stop_bpstat
, ws
.kind ());
8437 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8438 should) carry around the function and does (or should) use
8439 that when doing a frame comparison. */
8440 if (tp
->control
.stop_step
8441 && frame_id_eq (tp
->control
.step_frame_id
,
8442 get_frame_id (get_current_frame ()))
8443 && (tp
->control
.step_start_function
8444 == find_pc_function (tp
->stop_pc ())))
8446 /* Finished step, just print source line. */
8447 source_flag
= SRC_LINE
;
8451 /* Print location and source line. */
8452 source_flag
= SRC_AND_LOC
;
8455 case PRINT_SRC_AND_LOC
:
8456 /* Print location and source line. */
8457 source_flag
= SRC_AND_LOC
;
8459 case PRINT_SRC_ONLY
:
8460 source_flag
= SRC_LINE
;
8463 /* Something bogus. */
8464 source_flag
= SRC_LINE
;
8465 do_frame_printing
= 0;
8468 internal_error (__FILE__
, __LINE__
, _("Unknown value."));
8471 /* The behavior of this routine with respect to the source
8473 SRC_LINE: Print only source line
8474 LOCATION: Print only location
8475 SRC_AND_LOC: Print location and source line. */
8476 if (do_frame_printing
)
8477 print_stack_frame (get_selected_frame (NULL
), 0, source_flag
, 1);
8483 print_stop_event (struct ui_out
*uiout
, bool displays
)
8485 struct target_waitstatus last
;
8486 struct thread_info
*tp
;
8488 get_last_target_status (nullptr, nullptr, &last
);
8491 scoped_restore save_uiout
= make_scoped_restore (¤t_uiout
, uiout
);
8493 print_stop_location (last
);
8495 /* Display the auto-display expressions. */
8500 tp
= inferior_thread ();
8501 if (tp
->thread_fsm () != nullptr
8502 && tp
->thread_fsm ()->finished_p ())
8504 struct return_value_info
*rv
;
8506 rv
= tp
->thread_fsm ()->return_value ();
8508 print_return_value (uiout
, rv
);
8515 maybe_remove_breakpoints (void)
8517 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
8519 if (remove_breakpoints ())
8521 target_terminal::ours_for_output ();
8522 gdb_printf (_("Cannot remove breakpoints because "
8523 "program is no longer writable.\nFurther "
8524 "execution is probably impossible.\n"));
8529 /* The execution context that just caused a normal stop. */
8535 DISABLE_COPY_AND_ASSIGN (stop_context
);
8537 bool changed () const;
8542 /* The event PTID. */
8546 /* If stopp for a thread event, this is the thread that caused the
8548 thread_info_ref thread
;
8550 /* The inferior that caused the stop. */
8554 /* Initializes a new stop context. If stopped for a thread event, this
8555 takes a strong reference to the thread. */
8557 stop_context::stop_context ()
8559 stop_id
= get_stop_id ();
8560 ptid
= inferior_ptid
;
8561 inf_num
= current_inferior ()->num
;
8563 if (inferior_ptid
!= null_ptid
)
8565 /* Take a strong reference so that the thread can't be deleted
8567 thread
= thread_info_ref::new_reference (inferior_thread ());
8571 /* Return true if the current context no longer matches the saved stop
8575 stop_context::changed () const
8577 if (ptid
!= inferior_ptid
)
8579 if (inf_num
!= current_inferior ()->num
)
8581 if (thread
!= NULL
&& thread
->state
!= THREAD_STOPPED
)
8583 if (get_stop_id () != stop_id
)
8593 struct target_waitstatus last
;
8595 get_last_target_status (nullptr, nullptr, &last
);
8599 /* If an exception is thrown from this point on, make sure to
8600 propagate GDB's knowledge of the executing state to the
8601 frontend/user running state. A QUIT is an easy exception to see
8602 here, so do this before any filtered output. */
8604 ptid_t finish_ptid
= null_ptid
;
8607 finish_ptid
= minus_one_ptid
;
8608 else if (last
.kind () == TARGET_WAITKIND_SIGNALLED
8609 || last
.kind () == TARGET_WAITKIND_EXITED
)
8611 /* On some targets, we may still have live threads in the
8612 inferior when we get a process exit event. E.g., for
8613 "checkpoint", when the current checkpoint/fork exits,
8614 linux-fork.c automatically switches to another fork from
8615 within target_mourn_inferior. */
8616 if (inferior_ptid
!= null_ptid
)
8617 finish_ptid
= ptid_t (inferior_ptid
.pid ());
8619 else if (last
.kind () != TARGET_WAITKIND_NO_RESUMED
)
8620 finish_ptid
= inferior_ptid
;
8622 gdb::optional
<scoped_finish_thread_state
> maybe_finish_thread_state
;
8623 if (finish_ptid
!= null_ptid
)
8625 maybe_finish_thread_state
.emplace
8626 (user_visible_resume_target (finish_ptid
), finish_ptid
);
8629 /* As we're presenting a stop, and potentially removing breakpoints,
8630 update the thread list so we can tell whether there are threads
8631 running on the target. With target remote, for example, we can
8632 only learn about new threads when we explicitly update the thread
8633 list. Do this before notifying the interpreters about signal
8634 stops, end of stepping ranges, etc., so that the "new thread"
8635 output is emitted before e.g., "Program received signal FOO",
8636 instead of after. */
8637 update_thread_list ();
8639 if (last
.kind () == TARGET_WAITKIND_STOPPED
&& stopped_by_random_signal
)
8640 gdb::observers::signal_received
.notify (inferior_thread ()->stop_signal ());
8642 /* As with the notification of thread events, we want to delay
8643 notifying the user that we've switched thread context until
8644 the inferior actually stops.
8646 There's no point in saying anything if the inferior has exited.
8647 Note that SIGNALLED here means "exited with a signal", not
8648 "received a signal".
8650 Also skip saying anything in non-stop mode. In that mode, as we
8651 don't want GDB to switch threads behind the user's back, to avoid
8652 races where the user is typing a command to apply to thread x,
8653 but GDB switches to thread y before the user finishes entering
8654 the command, fetch_inferior_event installs a cleanup to restore
8655 the current thread back to the thread the user had selected right
8656 after this event is handled, so we're not really switching, only
8657 informing of a stop. */
8659 && previous_inferior_ptid
!= inferior_ptid
8660 && target_has_execution ()
8661 && last
.kind () != TARGET_WAITKIND_SIGNALLED
8662 && last
.kind () != TARGET_WAITKIND_EXITED
8663 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
)
8665 SWITCH_THRU_ALL_UIS ()
8667 target_terminal::ours_for_output ();
8668 gdb_printf (_("[Switching to %s]\n"),
8669 target_pid_to_str (inferior_ptid
).c_str ());
8670 annotate_thread_changed ();
8672 previous_inferior_ptid
= inferior_ptid
;
8675 if (last
.kind () == TARGET_WAITKIND_NO_RESUMED
)
8677 SWITCH_THRU_ALL_UIS ()
8678 if (current_ui
->prompt_state
== PROMPT_BLOCKED
)
8680 target_terminal::ours_for_output ();
8681 gdb_printf (_("No unwaited-for children left.\n"));
8685 /* Note: this depends on the update_thread_list call above. */
8686 maybe_remove_breakpoints ();
8688 /* If an auto-display called a function and that got a signal,
8689 delete that auto-display to avoid an infinite recursion. */
8691 if (stopped_by_random_signal
)
8692 disable_current_display ();
8694 SWITCH_THRU_ALL_UIS ()
8696 async_enable_stdin ();
8699 /* Let the user/frontend see the threads as stopped. */
8700 maybe_finish_thread_state
.reset ();
8702 /* Select innermost stack frame - i.e., current frame is frame 0,
8703 and current location is based on that. Handle the case where the
8704 dummy call is returning after being stopped. E.g. the dummy call
8705 previously hit a breakpoint. (If the dummy call returns
8706 normally, we won't reach here.) Do this before the stop hook is
8707 run, so that it doesn't get to see the temporary dummy frame,
8708 which is not where we'll present the stop. */
8709 if (has_stack_frames ())
8711 if (stop_stack_dummy
== STOP_STACK_DUMMY
)
8713 /* Pop the empty frame that contains the stack dummy. This
8714 also restores inferior state prior to the call (struct
8715 infcall_suspend_state). */
8716 struct frame_info
*frame
= get_current_frame ();
8718 gdb_assert (get_frame_type (frame
) == DUMMY_FRAME
);
8720 /* frame_pop calls reinit_frame_cache as the last thing it
8721 does which means there's now no selected frame. */
8724 select_frame (get_current_frame ());
8726 /* Set the current source location. */
8727 set_current_sal_from_frame (get_current_frame ());
8730 /* Look up the hook_stop and run it (CLI internally handles problem
8731 of stop_command's pre-hook not existing). */
8732 stop_context saved_context
;
8736 execute_cmd_pre_hook (stop_command
);
8738 catch (const gdb_exception
&ex
)
8740 exception_fprintf (gdb_stderr
, ex
,
8741 "Error while running hook_stop:\n");
8744 /* If the stop hook resumes the target, then there's no point in
8745 trying to notify about the previous stop; its context is
8746 gone. Likewise if the command switches thread or inferior --
8747 the observers would print a stop for the wrong
8749 if (saved_context
.changed ())
8752 /* Notify observers about the stop. This is where the interpreters
8753 print the stop event. */
8754 if (inferior_ptid
!= null_ptid
)
8755 gdb::observers::normal_stop
.notify (inferior_thread ()->control
.stop_bpstat
,
8758 gdb::observers::normal_stop
.notify (NULL
, stop_print_frame
);
8760 annotate_stopped ();
8762 if (target_has_execution ())
8764 if (last
.kind () != TARGET_WAITKIND_SIGNALLED
8765 && last
.kind () != TARGET_WAITKIND_EXITED
8766 && last
.kind () != TARGET_WAITKIND_NO_RESUMED
)
8767 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8768 Delete any breakpoint that is to be deleted at the next stop. */
8769 breakpoint_auto_delete (inferior_thread ()->control
.stop_bpstat
);
8772 /* Try to get rid of automatically added inferiors that are no
8773 longer needed. Keeping those around slows down things linearly.
8774 Note that this never removes the current inferior. */
8781 signal_stop_state (int signo
)
8783 return signal_stop
[signo
];
8787 signal_print_state (int signo
)
8789 return signal_print
[signo
];
8793 signal_pass_state (int signo
)
8795 return signal_program
[signo
];
8799 signal_cache_update (int signo
)
8803 for (signo
= 0; signo
< (int) GDB_SIGNAL_LAST
; signo
++)
8804 signal_cache_update (signo
);
8809 signal_pass
[signo
] = (signal_stop
[signo
] == 0
8810 && signal_print
[signo
] == 0
8811 && signal_program
[signo
] == 1
8812 && signal_catch
[signo
] == 0);
8816 signal_stop_update (int signo
, int state
)
8818 int ret
= signal_stop
[signo
];
8820 signal_stop
[signo
] = state
;
8821 signal_cache_update (signo
);
8826 signal_print_update (int signo
, int state
)
8828 int ret
= signal_print
[signo
];
8830 signal_print
[signo
] = state
;
8831 signal_cache_update (signo
);
8836 signal_pass_update (int signo
, int state
)
8838 int ret
= signal_program
[signo
];
8840 signal_program
[signo
] = state
;
8841 signal_cache_update (signo
);
8845 /* Update the global 'signal_catch' from INFO and notify the
8849 signal_catch_update (const unsigned int *info
)
8853 for (i
= 0; i
< GDB_SIGNAL_LAST
; ++i
)
8854 signal_catch
[i
] = info
[i
] > 0;
8855 signal_cache_update (-1);
8856 target_pass_signals (signal_pass
);
8860 sig_print_header (void)
8862 gdb_printf (_("Signal Stop\tPrint\tPass "
8863 "to program\tDescription\n"));
8867 sig_print_info (enum gdb_signal oursig
)
8869 const char *name
= gdb_signal_to_name (oursig
);
8870 int name_padding
= 13 - strlen (name
);
8872 if (name_padding
<= 0)
8875 gdb_printf ("%s", name
);
8876 gdb_printf ("%*.*s ", name_padding
, name_padding
, " ");
8877 gdb_printf ("%s\t", signal_stop
[oursig
] ? "Yes" : "No");
8878 gdb_printf ("%s\t", signal_print
[oursig
] ? "Yes" : "No");
8879 gdb_printf ("%s\t\t", signal_program
[oursig
] ? "Yes" : "No");
8880 gdb_printf ("%s\n", gdb_signal_to_string (oursig
));
8883 /* Specify how various signals in the inferior should be handled. */
8886 handle_command (const char *args
, int from_tty
)
8888 int digits
, wordlen
;
8889 int sigfirst
, siglast
;
8890 enum gdb_signal oursig
;
8895 error_no_arg (_("signal to handle"));
8898 /* Allocate and zero an array of flags for which signals to handle. */
8900 const size_t nsigs
= GDB_SIGNAL_LAST
;
8901 unsigned char sigs
[nsigs
] {};
8903 /* Break the command line up into args. */
8905 gdb_argv
built_argv (args
);
8907 /* Walk through the args, looking for signal oursigs, signal names, and
8908 actions. Signal numbers and signal names may be interspersed with
8909 actions, with the actions being performed for all signals cumulatively
8910 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8912 for (char *arg
: built_argv
)
8914 wordlen
= strlen (arg
);
8915 for (digits
= 0; isdigit (arg
[digits
]); digits
++)
8919 sigfirst
= siglast
= -1;
8921 if (wordlen
>= 1 && !strncmp (arg
, "all", wordlen
))
8923 /* Apply action to all signals except those used by the
8924 debugger. Silently skip those. */
8927 siglast
= nsigs
- 1;
8929 else if (wordlen
>= 1 && !strncmp (arg
, "stop", wordlen
))
8931 SET_SIGS (nsigs
, sigs
, signal_stop
);
8932 SET_SIGS (nsigs
, sigs
, signal_print
);
8934 else if (wordlen
>= 1 && !strncmp (arg
, "ignore", wordlen
))
8936 UNSET_SIGS (nsigs
, sigs
, signal_program
);
8938 else if (wordlen
>= 2 && !strncmp (arg
, "print", wordlen
))
8940 SET_SIGS (nsigs
, sigs
, signal_print
);
8942 else if (wordlen
>= 2 && !strncmp (arg
, "pass", wordlen
))
8944 SET_SIGS (nsigs
, sigs
, signal_program
);
8946 else if (wordlen
>= 3 && !strncmp (arg
, "nostop", wordlen
))
8948 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
8950 else if (wordlen
>= 3 && !strncmp (arg
, "noignore", wordlen
))
8952 SET_SIGS (nsigs
, sigs
, signal_program
);
8954 else if (wordlen
>= 4 && !strncmp (arg
, "noprint", wordlen
))
8956 UNSET_SIGS (nsigs
, sigs
, signal_print
);
8957 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
8959 else if (wordlen
>= 4 && !strncmp (arg
, "nopass", wordlen
))
8961 UNSET_SIGS (nsigs
, sigs
, signal_program
);
8963 else if (digits
> 0)
8965 /* It is numeric. The numeric signal refers to our own
8966 internal signal numbering from target.h, not to host/target
8967 signal number. This is a feature; users really should be
8968 using symbolic names anyway, and the common ones like
8969 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8971 sigfirst
= siglast
= (int)
8972 gdb_signal_from_command (atoi (arg
));
8973 if (arg
[digits
] == '-')
8976 gdb_signal_from_command (atoi (arg
+ digits
+ 1));
8978 if (sigfirst
> siglast
)
8980 /* Bet he didn't figure we'd think of this case... */
8981 std::swap (sigfirst
, siglast
);
8986 oursig
= gdb_signal_from_name (arg
);
8987 if (oursig
!= GDB_SIGNAL_UNKNOWN
)
8989 sigfirst
= siglast
= (int) oursig
;
8993 /* Not a number and not a recognized flag word => complain. */
8994 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg
);
8998 /* If any signal numbers or symbol names were found, set flags for
8999 which signals to apply actions to. */
9001 for (int signum
= sigfirst
; signum
>= 0 && signum
<= siglast
; signum
++)
9003 switch ((enum gdb_signal
) signum
)
9005 case GDB_SIGNAL_TRAP
:
9006 case GDB_SIGNAL_INT
:
9007 if (!allsigs
&& !sigs
[signum
])
9009 if (query (_("%s is used by the debugger.\n\
9010 Are you sure you want to change it? "),
9011 gdb_signal_to_name ((enum gdb_signal
) signum
)))
9016 gdb_printf (_("Not confirmed, unchanged.\n"));
9020 case GDB_SIGNAL_DEFAULT
:
9021 case GDB_SIGNAL_UNKNOWN
:
9022 /* Make sure that "all" doesn't print these. */
9031 for (int signum
= 0; signum
< nsigs
; signum
++)
9034 signal_cache_update (-1);
9035 target_pass_signals (signal_pass
);
9036 target_program_signals (signal_program
);
9040 /* Show the results. */
9041 sig_print_header ();
9042 for (; signum
< nsigs
; signum
++)
9044 sig_print_info ((enum gdb_signal
) signum
);
9051 /* Complete the "handle" command. */
9054 handle_completer (struct cmd_list_element
*ignore
,
9055 completion_tracker
&tracker
,
9056 const char *text
, const char *word
)
9058 static const char * const keywords
[] =
9072 signal_completer (ignore
, tracker
, text
, word
);
9073 complete_on_enum (tracker
, keywords
, word
, word
);
9077 gdb_signal_from_command (int num
)
9079 if (num
>= 1 && num
<= 15)
9080 return (enum gdb_signal
) num
;
9081 error (_("Only signals 1-15 are valid as numeric signals.\n\
9082 Use \"info signals\" for a list of symbolic signals."));
9085 /* Print current contents of the tables set by the handle command.
9086 It is possible we should just be printing signals actually used
9087 by the current target (but for things to work right when switching
9088 targets, all signals should be in the signal tables). */
9091 info_signals_command (const char *signum_exp
, int from_tty
)
9093 enum gdb_signal oursig
;
9095 sig_print_header ();
9099 /* First see if this is a symbol name. */
9100 oursig
= gdb_signal_from_name (signum_exp
);
9101 if (oursig
== GDB_SIGNAL_UNKNOWN
)
9103 /* No, try numeric. */
9105 gdb_signal_from_command (parse_and_eval_long (signum_exp
));
9107 sig_print_info (oursig
);
9112 /* These ugly casts brought to you by the native VAX compiler. */
9113 for (oursig
= GDB_SIGNAL_FIRST
;
9114 (int) oursig
< (int) GDB_SIGNAL_LAST
;
9115 oursig
= (enum gdb_signal
) ((int) oursig
+ 1))
9119 if (oursig
!= GDB_SIGNAL_UNKNOWN
9120 && oursig
!= GDB_SIGNAL_DEFAULT
&& oursig
!= GDB_SIGNAL_0
)
9121 sig_print_info (oursig
);
9124 gdb_printf (_("\nUse the \"handle\" command "
9125 "to change these tables.\n"));
9128 /* The $_siginfo convenience variable is a bit special. We don't know
9129 for sure the type of the value until we actually have a chance to
9130 fetch the data. The type can change depending on gdbarch, so it is
9131 also dependent on which thread you have selected.
9133 1. making $_siginfo be an internalvar that creates a new value on
9136 2. making the value of $_siginfo be an lval_computed value. */
9138 /* This function implements the lval_computed support for reading a
9142 siginfo_value_read (struct value
*v
)
9144 LONGEST transferred
;
9146 /* If we can access registers, so can we access $_siginfo. Likewise
9148 validate_registers_access ();
9151 target_read (current_inferior ()->top_target (),
9152 TARGET_OBJECT_SIGNAL_INFO
,
9154 value_contents_all_raw (v
).data (),
9156 TYPE_LENGTH (value_type (v
)));
9158 if (transferred
!= TYPE_LENGTH (value_type (v
)))
9159 error (_("Unable to read siginfo"));
9162 /* This function implements the lval_computed support for writing a
9166 siginfo_value_write (struct value
*v
, struct value
*fromval
)
9168 LONGEST transferred
;
9170 /* If we can access registers, so can we access $_siginfo. Likewise
9172 validate_registers_access ();
9174 transferred
= target_write (current_inferior ()->top_target (),
9175 TARGET_OBJECT_SIGNAL_INFO
,
9177 value_contents_all_raw (fromval
).data (),
9179 TYPE_LENGTH (value_type (fromval
)));
9181 if (transferred
!= TYPE_LENGTH (value_type (fromval
)))
9182 error (_("Unable to write siginfo"));
9185 static const struct lval_funcs siginfo_value_funcs
=
9191 /* Return a new value with the correct type for the siginfo object of
9192 the current thread using architecture GDBARCH. Return a void value
9193 if there's no object available. */
9195 static struct value
*
9196 siginfo_make_value (struct gdbarch
*gdbarch
, struct internalvar
*var
,
9199 if (target_has_stack ()
9200 && inferior_ptid
!= null_ptid
9201 && gdbarch_get_siginfo_type_p (gdbarch
))
9203 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9205 return allocate_computed_value (type
, &siginfo_value_funcs
, NULL
);
9208 return allocate_value (builtin_type (gdbarch
)->builtin_void
);
9212 /* infcall_suspend_state contains state about the program itself like its
9213 registers and any signal it received when it last stopped.
9214 This state must be restored regardless of how the inferior function call
9215 ends (either successfully, or after it hits a breakpoint or signal)
9216 if the program is to properly continue where it left off. */
9218 class infcall_suspend_state
9221 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9222 once the inferior function call has finished. */
9223 infcall_suspend_state (struct gdbarch
*gdbarch
,
9224 const struct thread_info
*tp
,
9225 struct regcache
*regcache
)
9226 : m_registers (new readonly_detached_regcache (*regcache
))
9228 tp
->save_suspend_to (m_thread_suspend
);
9230 gdb::unique_xmalloc_ptr
<gdb_byte
> siginfo_data
;
9232 if (gdbarch_get_siginfo_type_p (gdbarch
))
9234 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9235 size_t len
= TYPE_LENGTH (type
);
9237 siginfo_data
.reset ((gdb_byte
*) xmalloc (len
));
9239 if (target_read (current_inferior ()->top_target (),
9240 TARGET_OBJECT_SIGNAL_INFO
, NULL
,
9241 siginfo_data
.get (), 0, len
) != len
)
9243 /* Errors ignored. */
9244 siginfo_data
.reset (nullptr);
9250 m_siginfo_gdbarch
= gdbarch
;
9251 m_siginfo_data
= std::move (siginfo_data
);
9255 /* Return a pointer to the stored register state. */
9257 readonly_detached_regcache
*registers () const
9259 return m_registers
.get ();
9262 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9264 void restore (struct gdbarch
*gdbarch
,
9265 struct thread_info
*tp
,
9266 struct regcache
*regcache
) const
9268 tp
->restore_suspend_from (m_thread_suspend
);
9270 if (m_siginfo_gdbarch
== gdbarch
)
9272 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9274 /* Errors ignored. */
9275 target_write (current_inferior ()->top_target (),
9276 TARGET_OBJECT_SIGNAL_INFO
, NULL
,
9277 m_siginfo_data
.get (), 0, TYPE_LENGTH (type
));
9280 /* The inferior can be gone if the user types "print exit(0)"
9281 (and perhaps other times). */
9282 if (target_has_execution ())
9283 /* NB: The register write goes through to the target. */
9284 regcache
->restore (registers ());
9288 /* How the current thread stopped before the inferior function call was
9290 struct thread_suspend_state m_thread_suspend
;
9292 /* The registers before the inferior function call was executed. */
9293 std::unique_ptr
<readonly_detached_regcache
> m_registers
;
9295 /* Format of SIGINFO_DATA or NULL if it is not present. */
9296 struct gdbarch
*m_siginfo_gdbarch
= nullptr;
9298 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9299 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9300 content would be invalid. */
9301 gdb::unique_xmalloc_ptr
<gdb_byte
> m_siginfo_data
;
9304 infcall_suspend_state_up
9305 save_infcall_suspend_state ()
9307 struct thread_info
*tp
= inferior_thread ();
9308 struct regcache
*regcache
= get_current_regcache ();
9309 struct gdbarch
*gdbarch
= regcache
->arch ();
9311 infcall_suspend_state_up inf_state
9312 (new struct infcall_suspend_state (gdbarch
, tp
, regcache
));
9314 /* Having saved the current state, adjust the thread state, discarding
9315 any stop signal information. The stop signal is not useful when
9316 starting an inferior function call, and run_inferior_call will not use
9317 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9318 tp
->set_stop_signal (GDB_SIGNAL_0
);
9323 /* Restore inferior session state to INF_STATE. */
9326 restore_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
9328 struct thread_info
*tp
= inferior_thread ();
9329 struct regcache
*regcache
= get_current_regcache ();
9330 struct gdbarch
*gdbarch
= regcache
->arch ();
9332 inf_state
->restore (gdbarch
, tp
, regcache
);
9333 discard_infcall_suspend_state (inf_state
);
9337 discard_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
9342 readonly_detached_regcache
*
9343 get_infcall_suspend_state_regcache (struct infcall_suspend_state
*inf_state
)
9345 return inf_state
->registers ();
9348 /* infcall_control_state contains state regarding gdb's control of the
9349 inferior itself like stepping control. It also contains session state like
9350 the user's currently selected frame. */
9352 struct infcall_control_state
9354 struct thread_control_state thread_control
;
9355 struct inferior_control_state inferior_control
;
9358 enum stop_stack_kind stop_stack_dummy
= STOP_NONE
;
9359 int stopped_by_random_signal
= 0;
9361 /* ID and level of the selected frame when the inferior function
9363 struct frame_id selected_frame_id
{};
9364 int selected_frame_level
= -1;
9367 /* Save all of the information associated with the inferior<==>gdb
9370 infcall_control_state_up
9371 save_infcall_control_state ()
9373 infcall_control_state_up
inf_status (new struct infcall_control_state
);
9374 struct thread_info
*tp
= inferior_thread ();
9375 struct inferior
*inf
= current_inferior ();
9377 inf_status
->thread_control
= tp
->control
;
9378 inf_status
->inferior_control
= inf
->control
;
9380 tp
->control
.step_resume_breakpoint
= NULL
;
9381 tp
->control
.exception_resume_breakpoint
= NULL
;
9383 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9384 chain. If caller's caller is walking the chain, they'll be happier if we
9385 hand them back the original chain when restore_infcall_control_state is
9387 tp
->control
.stop_bpstat
= bpstat_copy (tp
->control
.stop_bpstat
);
9390 inf_status
->stop_stack_dummy
= stop_stack_dummy
;
9391 inf_status
->stopped_by_random_signal
= stopped_by_random_signal
;
9393 save_selected_frame (&inf_status
->selected_frame_id
,
9394 &inf_status
->selected_frame_level
);
9399 /* Restore inferior session state to INF_STATUS. */
9402 restore_infcall_control_state (struct infcall_control_state
*inf_status
)
9404 struct thread_info
*tp
= inferior_thread ();
9405 struct inferior
*inf
= current_inferior ();
9407 if (tp
->control
.step_resume_breakpoint
)
9408 tp
->control
.step_resume_breakpoint
->disposition
= disp_del_at_next_stop
;
9410 if (tp
->control
.exception_resume_breakpoint
)
9411 tp
->control
.exception_resume_breakpoint
->disposition
9412 = disp_del_at_next_stop
;
9414 /* Handle the bpstat_copy of the chain. */
9415 bpstat_clear (&tp
->control
.stop_bpstat
);
9417 tp
->control
= inf_status
->thread_control
;
9418 inf
->control
= inf_status
->inferior_control
;
9421 stop_stack_dummy
= inf_status
->stop_stack_dummy
;
9422 stopped_by_random_signal
= inf_status
->stopped_by_random_signal
;
9424 if (target_has_stack ())
9426 restore_selected_frame (inf_status
->selected_frame_id
,
9427 inf_status
->selected_frame_level
);
9434 discard_infcall_control_state (struct infcall_control_state
*inf_status
)
9436 if (inf_status
->thread_control
.step_resume_breakpoint
)
9437 inf_status
->thread_control
.step_resume_breakpoint
->disposition
9438 = disp_del_at_next_stop
;
9440 if (inf_status
->thread_control
.exception_resume_breakpoint
)
9441 inf_status
->thread_control
.exception_resume_breakpoint
->disposition
9442 = disp_del_at_next_stop
;
9444 /* See save_infcall_control_state for info on stop_bpstat. */
9445 bpstat_clear (&inf_status
->thread_control
.stop_bpstat
);
9453 clear_exit_convenience_vars (void)
9455 clear_internalvar (lookup_internalvar ("_exitsignal"));
9456 clear_internalvar (lookup_internalvar ("_exitcode"));
9460 /* User interface for reverse debugging:
9461 Set exec-direction / show exec-direction commands
9462 (returns error unless target implements to_set_exec_direction method). */
9464 enum exec_direction_kind execution_direction
= EXEC_FORWARD
;
9465 static const char exec_forward
[] = "forward";
9466 static const char exec_reverse
[] = "reverse";
9467 static const char *exec_direction
= exec_forward
;
9468 static const char *const exec_direction_names
[] = {
9475 set_exec_direction_func (const char *args
, int from_tty
,
9476 struct cmd_list_element
*cmd
)
9478 if (target_can_execute_reverse ())
9480 if (!strcmp (exec_direction
, exec_forward
))
9481 execution_direction
= EXEC_FORWARD
;
9482 else if (!strcmp (exec_direction
, exec_reverse
))
9483 execution_direction
= EXEC_REVERSE
;
9487 exec_direction
= exec_forward
;
9488 error (_("Target does not support this operation."));
9493 show_exec_direction_func (struct ui_file
*out
, int from_tty
,
9494 struct cmd_list_element
*cmd
, const char *value
)
9496 switch (execution_direction
) {
9498 gdb_printf (out
, _("Forward.\n"));
9501 gdb_printf (out
, _("Reverse.\n"));
9504 internal_error (__FILE__
, __LINE__
,
9505 _("bogus execution_direction value: %d"),
9506 (int) execution_direction
);
9511 show_schedule_multiple (struct ui_file
*file
, int from_tty
,
9512 struct cmd_list_element
*c
, const char *value
)
9514 gdb_printf (file
, _("Resuming the execution of threads "
9515 "of all processes is %s.\n"), value
);
9518 /* Implementation of `siginfo' variable. */
9520 static const struct internalvar_funcs siginfo_funcs
=
9526 /* Callback for infrun's target events source. This is marked when a
9527 thread has a pending status to process. */
9530 infrun_async_inferior_event_handler (gdb_client_data data
)
9532 clear_async_event_handler (infrun_async_inferior_event_token
);
9533 inferior_event_handler (INF_REG_EVENT
);
9540 /* Verify that when two threads with the same ptid exist (from two different
9541 targets) and one of them changes ptid, we only update inferior_ptid if
9542 it is appropriate. */
9545 infrun_thread_ptid_changed ()
9547 gdbarch
*arch
= current_inferior ()->gdbarch
;
9549 /* The thread which inferior_ptid represents changes ptid. */
9551 scoped_restore_current_pspace_and_thread restore
;
9553 scoped_mock_context
<test_target_ops
> target1 (arch
);
9554 scoped_mock_context
<test_target_ops
> target2 (arch
);
9556 ptid_t
old_ptid (111, 222);
9557 ptid_t
new_ptid (111, 333);
9559 target1
.mock_inferior
.pid
= old_ptid
.pid ();
9560 target1
.mock_thread
.ptid
= old_ptid
;
9561 target1
.mock_inferior
.ptid_thread_map
.clear ();
9562 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
9564 target2
.mock_inferior
.pid
= old_ptid
.pid ();
9565 target2
.mock_thread
.ptid
= old_ptid
;
9566 target2
.mock_inferior
.ptid_thread_map
.clear ();
9567 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
9569 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
9570 set_current_inferior (&target1
.mock_inferior
);
9572 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
9574 gdb_assert (inferior_ptid
== new_ptid
);
9577 /* A thread with the same ptid as inferior_ptid, but from another target,
9580 scoped_restore_current_pspace_and_thread restore
;
9582 scoped_mock_context
<test_target_ops
> target1 (arch
);
9583 scoped_mock_context
<test_target_ops
> target2 (arch
);
9585 ptid_t
old_ptid (111, 222);
9586 ptid_t
new_ptid (111, 333);
9588 target1
.mock_inferior
.pid
= old_ptid
.pid ();
9589 target1
.mock_thread
.ptid
= old_ptid
;
9590 target1
.mock_inferior
.ptid_thread_map
.clear ();
9591 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
9593 target2
.mock_inferior
.pid
= old_ptid
.pid ();
9594 target2
.mock_thread
.ptid
= old_ptid
;
9595 target2
.mock_inferior
.ptid_thread_map
.clear ();
9596 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
9598 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
9599 set_current_inferior (&target2
.mock_inferior
);
9601 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
9603 gdb_assert (inferior_ptid
== old_ptid
);
9607 } /* namespace selftests */
9609 #endif /* GDB_SELF_TEST */
9611 void _initialize_infrun ();
9613 _initialize_infrun ()
9615 struct cmd_list_element
*c
;
9617 /* Register extra event sources in the event loop. */
9618 infrun_async_inferior_event_token
9619 = create_async_event_handler (infrun_async_inferior_event_handler
, NULL
,
9622 cmd_list_element
*info_signals_cmd
9623 = add_info ("signals", info_signals_command
, _("\
9624 What debugger does when program gets various signals.\n\
9625 Specify a signal as argument to print info on that signal only."));
9626 add_info_alias ("handle", info_signals_cmd
, 0);
9628 c
= add_com ("handle", class_run
, handle_command
, _("\
9629 Specify how to handle signals.\n\
9630 Usage: handle SIGNAL [ACTIONS]\n\
9631 Args are signals and actions to apply to those signals.\n\
9632 If no actions are specified, the current settings for the specified signals\n\
9633 will be displayed instead.\n\
9635 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9636 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9637 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9638 The special arg \"all\" is recognized to mean all signals except those\n\
9639 used by the debugger, typically SIGTRAP and SIGINT.\n\
9641 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9642 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9643 Stop means reenter debugger if this signal happens (implies print).\n\
9644 Print means print a message if this signal happens.\n\
9645 Pass means let program see this signal; otherwise program doesn't know.\n\
9646 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9647 Pass and Stop may be combined.\n\
9649 Multiple signals may be specified. Signal numbers and signal names\n\
9650 may be interspersed with actions, with the actions being performed for\n\
9651 all signals cumulatively specified."));
9652 set_cmd_completer (c
, handle_completer
);
9654 stop_command
= add_cmd ("stop", class_obscure
,
9655 not_just_help_class_command
, _("\
9656 There is no `stop' command, but you can set a hook on `stop'.\n\
9657 This allows you to set a list of commands to be run each time execution\n\
9658 of the program stops."), &cmdlist
);
9660 add_setshow_boolean_cmd
9661 ("infrun", class_maintenance
, &debug_infrun
,
9662 _("Set inferior debugging."),
9663 _("Show inferior debugging."),
9664 _("When non-zero, inferior specific debugging is enabled."),
9665 NULL
, show_debug_infrun
, &setdebuglist
, &showdebuglist
);
9667 add_setshow_boolean_cmd ("non-stop", no_class
,
9669 Set whether gdb controls the inferior in non-stop mode."), _("\
9670 Show whether gdb controls the inferior in non-stop mode."), _("\
9671 When debugging a multi-threaded program and this setting is\n\
9672 off (the default, also called all-stop mode), when one thread stops\n\
9673 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9674 all other threads in the program while you interact with the thread of\n\
9675 interest. When you continue or step a thread, you can allow the other\n\
9676 threads to run, or have them remain stopped, but while you inspect any\n\
9677 thread's state, all threads stop.\n\
9679 In non-stop mode, when one thread stops, other threads can continue\n\
9680 to run freely. You'll be able to step each thread independently,\n\
9681 leave it stopped or free to run as needed."),
9687 for (size_t i
= 0; i
< GDB_SIGNAL_LAST
; i
++)
9690 signal_print
[i
] = 1;
9691 signal_program
[i
] = 1;
9692 signal_catch
[i
] = 0;
9695 /* Signals caused by debugger's own actions should not be given to
9696 the program afterwards.
9698 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9699 explicitly specifies that it should be delivered to the target
9700 program. Typically, that would occur when a user is debugging a
9701 target monitor on a simulator: the target monitor sets a
9702 breakpoint; the simulator encounters this breakpoint and halts
9703 the simulation handing control to GDB; GDB, noting that the stop
9704 address doesn't map to any known breakpoint, returns control back
9705 to the simulator; the simulator then delivers the hardware
9706 equivalent of a GDB_SIGNAL_TRAP to the program being
9708 signal_program
[GDB_SIGNAL_TRAP
] = 0;
9709 signal_program
[GDB_SIGNAL_INT
] = 0;
9711 /* Signals that are not errors should not normally enter the debugger. */
9712 signal_stop
[GDB_SIGNAL_ALRM
] = 0;
9713 signal_print
[GDB_SIGNAL_ALRM
] = 0;
9714 signal_stop
[GDB_SIGNAL_VTALRM
] = 0;
9715 signal_print
[GDB_SIGNAL_VTALRM
] = 0;
9716 signal_stop
[GDB_SIGNAL_PROF
] = 0;
9717 signal_print
[GDB_SIGNAL_PROF
] = 0;
9718 signal_stop
[GDB_SIGNAL_CHLD
] = 0;
9719 signal_print
[GDB_SIGNAL_CHLD
] = 0;
9720 signal_stop
[GDB_SIGNAL_IO
] = 0;
9721 signal_print
[GDB_SIGNAL_IO
] = 0;
9722 signal_stop
[GDB_SIGNAL_POLL
] = 0;
9723 signal_print
[GDB_SIGNAL_POLL
] = 0;
9724 signal_stop
[GDB_SIGNAL_URG
] = 0;
9725 signal_print
[GDB_SIGNAL_URG
] = 0;
9726 signal_stop
[GDB_SIGNAL_WINCH
] = 0;
9727 signal_print
[GDB_SIGNAL_WINCH
] = 0;
9728 signal_stop
[GDB_SIGNAL_PRIO
] = 0;
9729 signal_print
[GDB_SIGNAL_PRIO
] = 0;
9731 /* These signals are used internally by user-level thread
9732 implementations. (See signal(5) on Solaris.) Like the above
9733 signals, a healthy program receives and handles them as part of
9734 its normal operation. */
9735 signal_stop
[GDB_SIGNAL_LWP
] = 0;
9736 signal_print
[GDB_SIGNAL_LWP
] = 0;
9737 signal_stop
[GDB_SIGNAL_WAITING
] = 0;
9738 signal_print
[GDB_SIGNAL_WAITING
] = 0;
9739 signal_stop
[GDB_SIGNAL_CANCEL
] = 0;
9740 signal_print
[GDB_SIGNAL_CANCEL
] = 0;
9741 signal_stop
[GDB_SIGNAL_LIBRT
] = 0;
9742 signal_print
[GDB_SIGNAL_LIBRT
] = 0;
9744 /* Update cached state. */
9745 signal_cache_update (-1);
9747 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support
,
9748 &stop_on_solib_events
, _("\
9749 Set stopping for shared library events."), _("\
9750 Show stopping for shared library events."), _("\
9751 If nonzero, gdb will give control to the user when the dynamic linker\n\
9752 notifies gdb of shared library events. The most common event of interest\n\
9753 to the user would be loading/unloading of a new library."),
9754 set_stop_on_solib_events
,
9755 show_stop_on_solib_events
,
9756 &setlist
, &showlist
);
9758 add_setshow_enum_cmd ("follow-fork-mode", class_run
,
9759 follow_fork_mode_kind_names
,
9760 &follow_fork_mode_string
, _("\
9761 Set debugger response to a program call of fork or vfork."), _("\
9762 Show debugger response to a program call of fork or vfork."), _("\
9763 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9764 parent - the original process is debugged after a fork\n\
9765 child - the new process is debugged after a fork\n\
9766 The unfollowed process will continue to run.\n\
9767 By default, the debugger will follow the parent process."),
9769 show_follow_fork_mode_string
,
9770 &setlist
, &showlist
);
9772 add_setshow_enum_cmd ("follow-exec-mode", class_run
,
9773 follow_exec_mode_names
,
9774 &follow_exec_mode_string
, _("\
9775 Set debugger response to a program call of exec."), _("\
9776 Show debugger response to a program call of exec."), _("\
9777 An exec call replaces the program image of a process.\n\
9779 follow-exec-mode can be:\n\
9781 new - the debugger creates a new inferior and rebinds the process\n\
9782 to this new inferior. The program the process was running before\n\
9783 the exec call can be restarted afterwards by restarting the original\n\
9786 same - the debugger keeps the process bound to the same inferior.\n\
9787 The new executable image replaces the previous executable loaded in\n\
9788 the inferior. Restarting the inferior after the exec call restarts\n\
9789 the executable the process was running after the exec call.\n\
9791 By default, the debugger will use the same inferior."),
9793 show_follow_exec_mode_string
,
9794 &setlist
, &showlist
);
9796 add_setshow_enum_cmd ("scheduler-locking", class_run
,
9797 scheduler_enums
, &scheduler_mode
, _("\
9798 Set mode for locking scheduler during execution."), _("\
9799 Show mode for locking scheduler during execution."), _("\
9800 off == no locking (threads may preempt at any time)\n\
9801 on == full locking (no thread except the current thread may run)\n\
9802 This applies to both normal execution and replay mode.\n\
9803 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9804 In this mode, other threads may run during other commands.\n\
9805 This applies to both normal execution and replay mode.\n\
9806 replay == scheduler locked in replay mode and unlocked during normal execution."),
9807 set_schedlock_func
, /* traps on target vector */
9808 show_scheduler_mode
,
9809 &setlist
, &showlist
);
9811 add_setshow_boolean_cmd ("schedule-multiple", class_run
, &sched_multi
, _("\
9812 Set mode for resuming threads of all processes."), _("\
9813 Show mode for resuming threads of all processes."), _("\
9814 When on, execution commands (such as 'continue' or 'next') resume all\n\
9815 threads of all processes. When off (which is the default), execution\n\
9816 commands only resume the threads of the current process. The set of\n\
9817 threads that are resumed is further refined by the scheduler-locking\n\
9818 mode (see help set scheduler-locking)."),
9820 show_schedule_multiple
,
9821 &setlist
, &showlist
);
9823 add_setshow_boolean_cmd ("step-mode", class_run
, &step_stop_if_no_debug
, _("\
9824 Set mode of the step operation."), _("\
9825 Show mode of the step operation."), _("\
9826 When set, doing a step over a function without debug line information\n\
9827 will stop at the first instruction of that function. Otherwise, the\n\
9828 function is skipped and the step command stops at a different source line."),
9830 show_step_stop_if_no_debug
,
9831 &setlist
, &showlist
);
9833 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run
,
9834 &can_use_displaced_stepping
, _("\
9835 Set debugger's willingness to use displaced stepping."), _("\
9836 Show debugger's willingness to use displaced stepping."), _("\
9837 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9838 supported by the target architecture. If off, gdb will not use displaced\n\
9839 stepping to step over breakpoints, even if such is supported by the target\n\
9840 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9841 if the target architecture supports it and non-stop mode is active, but will not\n\
9842 use it in all-stop mode (see help set non-stop)."),
9844 show_can_use_displaced_stepping
,
9845 &setlist
, &showlist
);
9847 add_setshow_enum_cmd ("exec-direction", class_run
, exec_direction_names
,
9848 &exec_direction
, _("Set direction of execution.\n\
9849 Options are 'forward' or 'reverse'."),
9850 _("Show direction of execution (forward/reverse)."),
9851 _("Tells gdb whether to execute forward or backward."),
9852 set_exec_direction_func
, show_exec_direction_func
,
9853 &setlist
, &showlist
);
9855 /* Set/show detach-on-fork: user-settable mode. */
9857 add_setshow_boolean_cmd ("detach-on-fork", class_run
, &detach_fork
, _("\
9858 Set whether gdb will detach the child of a fork."), _("\
9859 Show whether gdb will detach the child of a fork."), _("\
9860 Tells gdb whether to detach the child of a fork."),
9861 NULL
, NULL
, &setlist
, &showlist
);
9863 /* Set/show disable address space randomization mode. */
9865 add_setshow_boolean_cmd ("disable-randomization", class_support
,
9866 &disable_randomization
, _("\
9867 Set disabling of debuggee's virtual address space randomization."), _("\
9868 Show disabling of debuggee's virtual address space randomization."), _("\
9869 When this mode is on (which is the default), randomization of the virtual\n\
9870 address space is disabled. Standalone programs run with the randomization\n\
9871 enabled by default on some platforms."),
9872 &set_disable_randomization
,
9873 &show_disable_randomization
,
9874 &setlist
, &showlist
);
9876 /* ptid initializations */
9877 inferior_ptid
= null_ptid
;
9878 target_last_wait_ptid
= minus_one_ptid
;
9880 gdb::observers::thread_ptid_changed
.attach (infrun_thread_ptid_changed
,
9882 gdb::observers::thread_stop_requested
.attach (infrun_thread_stop_requested
,
9884 gdb::observers::thread_exit
.attach (infrun_thread_thread_exit
, "infrun");
9885 gdb::observers::inferior_exit
.attach (infrun_inferior_exit
, "infrun");
9886 gdb::observers::inferior_execd
.attach (infrun_inferior_execd
, "infrun");
9888 /* Explicitly create without lookup, since that tries to create a
9889 value with a void typed value, and when we get here, gdbarch
9890 isn't initialized yet. At this point, we're quite sure there
9891 isn't another convenience variable of the same name. */
9892 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs
, NULL
);
9894 add_setshow_boolean_cmd ("observer", no_class
,
9895 &observer_mode_1
, _("\
9896 Set whether gdb controls the inferior in observer mode."), _("\
9897 Show whether gdb controls the inferior in observer mode."), _("\
9898 In observer mode, GDB can get data from the inferior, but not\n\
9899 affect its execution. Registers and memory may not be changed,\n\
9900 breakpoints may not be set, and the program cannot be interrupted\n\
9908 selftests::register_test ("infrun_thread_ptid_changed",
9909 selftests::infrun_thread_ptid_changed
);