1 /* Target-struct-independent code to start (run) and stop an inferior
4 Copyright (C) 1986-2021 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "displaced-stepping.h"
28 #include "breakpoint.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
40 #include "observable.h"
45 #include "mi/mi-common.h"
46 #include "event-top.h"
48 #include "record-full.h"
49 #include "inline-frame.h"
51 #include "tracepoint.h"
55 #include "completer.h"
56 #include "target-descriptions.h"
57 #include "target-dcache.h"
60 #include "gdbsupport/event-loop.h"
61 #include "thread-fsm.h"
62 #include "gdbsupport/enum-flags.h"
63 #include "progspace-and-thread.h"
64 #include "gdbsupport/gdb_optional.h"
65 #include "arch-utils.h"
66 #include "gdbsupport/scope-exit.h"
67 #include "gdbsupport/forward-scope-exit.h"
68 #include "gdbsupport/gdb_select.h"
69 #include <unordered_map>
70 #include "async-event.h"
71 #include "gdbsupport/selftest.h"
72 #include "scoped-mock-context.h"
73 #include "test-target.h"
74 #include "gdbsupport/common-debug.h"
76 /* Prototypes for local functions */
78 static void sig_print_info (enum gdb_signal
);
80 static void sig_print_header (void);
82 static void follow_inferior_reset_breakpoints (void);
84 static bool currently_stepping (struct thread_info
*tp
);
86 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info
*);
88 static void insert_step_resume_breakpoint_at_caller (struct frame_info
*);
90 static void insert_longjmp_resume_breakpoint (struct gdbarch
*, CORE_ADDR
);
92 static bool maybe_software_singlestep (struct gdbarch
*gdbarch
);
94 static void resume (gdb_signal sig
);
96 static void wait_for_inferior (inferior
*inf
);
98 /* Asynchronous signal handler registered as event loop source for
99 when we have pending events ready to be passed to the core. */
100 static struct async_event_handler
*infrun_async_inferior_event_token
;
102 /* Stores whether infrun_async was previously enabled or disabled.
103 Starts off as -1, indicating "never enabled/disabled". */
104 static int infrun_is_async
= -1;
109 infrun_async (int enable
)
111 if (infrun_is_async
!= enable
)
113 infrun_is_async
= enable
;
115 infrun_debug_printf ("enable=%d", enable
);
118 mark_async_event_handler (infrun_async_inferior_event_token
);
120 clear_async_event_handler (infrun_async_inferior_event_token
);
127 mark_infrun_async_event_handler (void)
129 mark_async_event_handler (infrun_async_inferior_event_token
);
132 /* When set, stop the 'step' command if we enter a function which has
133 no line number information. The normal behavior is that we step
134 over such function. */
135 bool step_stop_if_no_debug
= false;
137 show_step_stop_if_no_debug (struct ui_file
*file
, int from_tty
,
138 struct cmd_list_element
*c
, const char *value
)
140 fprintf_filtered (file
, _("Mode of the step operation is %s.\n"), value
);
143 /* proceed and normal_stop use this to notify the user when the
144 inferior stopped in a different thread than it had been running
147 static ptid_t previous_inferior_ptid
;
149 /* If set (default for legacy reasons), when following a fork, GDB
150 will detach from one of the fork branches, child or parent.
151 Exactly which branch is detached depends on 'set follow-fork-mode'
154 static bool detach_fork
= true;
156 bool debug_infrun
= false;
158 show_debug_infrun (struct ui_file
*file
, int from_tty
,
159 struct cmd_list_element
*c
, const char *value
)
161 fprintf_filtered (file
, _("Inferior debugging is %s.\n"), value
);
164 /* Support for disabling address space randomization. */
166 bool disable_randomization
= true;
169 show_disable_randomization (struct ui_file
*file
, int from_tty
,
170 struct cmd_list_element
*c
, const char *value
)
172 if (target_supports_disable_randomization ())
173 fprintf_filtered (file
,
174 _("Disabling randomization of debuggee's "
175 "virtual address space is %s.\n"),
178 fputs_filtered (_("Disabling randomization of debuggee's "
179 "virtual address space is unsupported on\n"
180 "this platform.\n"), file
);
184 set_disable_randomization (const char *args
, int from_tty
,
185 struct cmd_list_element
*c
)
187 if (!target_supports_disable_randomization ())
188 error (_("Disabling randomization of debuggee's "
189 "virtual address space is unsupported on\n"
193 /* User interface for non-stop mode. */
195 bool non_stop
= false;
196 static bool non_stop_1
= false;
199 set_non_stop (const char *args
, int from_tty
,
200 struct cmd_list_element
*c
)
202 if (target_has_execution ())
204 non_stop_1
= non_stop
;
205 error (_("Cannot change this setting while the inferior is running."));
208 non_stop
= non_stop_1
;
212 show_non_stop (struct ui_file
*file
, int from_tty
,
213 struct cmd_list_element
*c
, const char *value
)
215 fprintf_filtered (file
,
216 _("Controlling the inferior in non-stop mode is %s.\n"),
220 /* "Observer mode" is somewhat like a more extreme version of
221 non-stop, in which all GDB operations that might affect the
222 target's execution have been disabled. */
224 static bool observer_mode
= false;
225 static bool observer_mode_1
= false;
228 set_observer_mode (const char *args
, int from_tty
,
229 struct cmd_list_element
*c
)
231 if (target_has_execution ())
233 observer_mode_1
= observer_mode
;
234 error (_("Cannot change this setting while the inferior is running."));
237 observer_mode
= observer_mode_1
;
239 may_write_registers
= !observer_mode
;
240 may_write_memory
= !observer_mode
;
241 may_insert_breakpoints
= !observer_mode
;
242 may_insert_tracepoints
= !observer_mode
;
243 /* We can insert fast tracepoints in or out of observer mode,
244 but enable them if we're going into this mode. */
246 may_insert_fast_tracepoints
= true;
247 may_stop
= !observer_mode
;
248 update_target_permissions ();
250 /* Going *into* observer mode we must force non-stop, then
251 going out we leave it that way. */
254 pagination_enabled
= 0;
255 non_stop
= non_stop_1
= true;
259 printf_filtered (_("Observer mode is now %s.\n"),
260 (observer_mode
? "on" : "off"));
264 show_observer_mode (struct ui_file
*file
, int from_tty
,
265 struct cmd_list_element
*c
, const char *value
)
267 fprintf_filtered (file
, _("Observer mode is %s.\n"), value
);
270 /* This updates the value of observer mode based on changes in
271 permissions. Note that we are deliberately ignoring the values of
272 may-write-registers and may-write-memory, since the user may have
273 reason to enable these during a session, for instance to turn on a
274 debugging-related global. */
277 update_observer_mode (void)
279 bool newval
= (!may_insert_breakpoints
280 && !may_insert_tracepoints
281 && may_insert_fast_tracepoints
285 /* Let the user know if things change. */
286 if (newval
!= observer_mode
)
287 printf_filtered (_("Observer mode is now %s.\n"),
288 (newval
? "on" : "off"));
290 observer_mode
= observer_mode_1
= newval
;
293 /* Tables of how to react to signals; the user sets them. */
295 static unsigned char signal_stop
[GDB_SIGNAL_LAST
];
296 static unsigned char signal_print
[GDB_SIGNAL_LAST
];
297 static unsigned char signal_program
[GDB_SIGNAL_LAST
];
299 /* Table of signals that are registered with "catch signal". A
300 non-zero entry indicates that the signal is caught by some "catch
302 static unsigned char signal_catch
[GDB_SIGNAL_LAST
];
304 /* Table of signals that the target may silently handle.
305 This is automatically determined from the flags above,
306 and simply cached here. */
307 static unsigned char signal_pass
[GDB_SIGNAL_LAST
];
309 #define SET_SIGS(nsigs,sigs,flags) \
311 int signum = (nsigs); \
312 while (signum-- > 0) \
313 if ((sigs)[signum]) \
314 (flags)[signum] = 1; \
317 #define UNSET_SIGS(nsigs,sigs,flags) \
319 int signum = (nsigs); \
320 while (signum-- > 0) \
321 if ((sigs)[signum]) \
322 (flags)[signum] = 0; \
325 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
326 this function is to avoid exporting `signal_program'. */
329 update_signals_program_target (void)
331 target_program_signals (signal_program
);
334 /* Value to pass to target_resume() to cause all threads to resume. */
336 #define RESUME_ALL minus_one_ptid
338 /* Command list pointer for the "stop" placeholder. */
340 static struct cmd_list_element
*stop_command
;
342 /* Nonzero if we want to give control to the user when we're notified
343 of shared library events by the dynamic linker. */
344 int stop_on_solib_events
;
346 /* Enable or disable optional shared library event breakpoints
347 as appropriate when the above flag is changed. */
350 set_stop_on_solib_events (const char *args
,
351 int from_tty
, struct cmd_list_element
*c
)
353 update_solib_breakpoints ();
357 show_stop_on_solib_events (struct ui_file
*file
, int from_tty
,
358 struct cmd_list_element
*c
, const char *value
)
360 fprintf_filtered (file
, _("Stopping for shared library events is %s.\n"),
364 /* True after stop if current stack frame should be printed. */
366 static bool stop_print_frame
;
368 /* This is a cached copy of the target/ptid/waitstatus of the last
369 event returned by target_wait()/deprecated_target_wait_hook().
370 This information is returned by get_last_target_status(). */
371 static process_stratum_target
*target_last_proc_target
;
372 static ptid_t target_last_wait_ptid
;
373 static struct target_waitstatus target_last_waitstatus
;
375 void init_thread_stepping_state (struct thread_info
*tss
);
377 static const char follow_fork_mode_child
[] = "child";
378 static const char follow_fork_mode_parent
[] = "parent";
380 static const char *const follow_fork_mode_kind_names
[] = {
381 follow_fork_mode_child
,
382 follow_fork_mode_parent
,
386 static const char *follow_fork_mode_string
= follow_fork_mode_parent
;
388 show_follow_fork_mode_string (struct ui_file
*file
, int from_tty
,
389 struct cmd_list_element
*c
, const char *value
)
391 fprintf_filtered (file
,
392 _("Debugger response to a program "
393 "call of fork or vfork is \"%s\".\n"),
398 /* Handle changes to the inferior list based on the type of fork,
399 which process is being followed, and whether the other process
400 should be detached. On entry inferior_ptid must be the ptid of
401 the fork parent. At return inferior_ptid is the ptid of the
402 followed inferior. */
405 follow_fork_inferior (bool follow_child
, bool detach_fork
)
408 ptid_t parent_ptid
, child_ptid
;
410 has_vforked
= (inferior_thread ()->pending_follow
.kind
411 == TARGET_WAITKIND_VFORKED
);
412 parent_ptid
= inferior_ptid
;
413 child_ptid
= inferior_thread ()->pending_follow
.value
.related_pid
;
416 && !non_stop
/* Non-stop always resumes both branches. */
417 && current_ui
->prompt_state
== PROMPT_BLOCKED
418 && !(follow_child
|| detach_fork
|| sched_multi
))
420 /* The parent stays blocked inside the vfork syscall until the
421 child execs or exits. If we don't let the child run, then
422 the parent stays blocked. If we're telling the parent to run
423 in the foreground, the user will not be able to ctrl-c to get
424 back the terminal, effectively hanging the debug session. */
425 fprintf_filtered (gdb_stderr
, _("\
426 Can not resume the parent process over vfork in the foreground while\n\
427 holding the child stopped. Try \"set detach-on-fork\" or \
428 \"set schedule-multiple\".\n"));
432 thread_info
*child_thr
= nullptr;
436 /* Detach new forked process? */
439 /* Before detaching from the child, remove all breakpoints
440 from it. If we forked, then this has already been taken
441 care of by infrun.c. If we vforked however, any
442 breakpoint inserted in the parent is visible in the
443 child, even those added while stopped in a vfork
444 catchpoint. This will remove the breakpoints from the
445 parent also, but they'll be reinserted below. */
448 /* Keep breakpoints list in sync. */
449 remove_breakpoints_inf (current_inferior ());
452 if (print_inferior_events
)
454 /* Ensure that we have a process ptid. */
455 ptid_t process_ptid
= ptid_t (child_ptid
.pid ());
457 target_terminal::ours_for_output ();
458 fprintf_filtered (gdb_stdlog
,
459 _("[Detaching after %s from child %s]\n"),
460 has_vforked
? "vfork" : "fork",
461 target_pid_to_str (process_ptid
).c_str ());
466 struct inferior
*parent_inf
, *child_inf
;
468 /* Add process to GDB's tables. */
469 child_inf
= add_inferior (child_ptid
.pid ());
471 parent_inf
= current_inferior ();
472 child_inf
->attach_flag
= parent_inf
->attach_flag
;
473 copy_terminal_info (child_inf
, parent_inf
);
474 child_inf
->gdbarch
= parent_inf
->gdbarch
;
475 copy_inferior_target_desc_info (child_inf
, parent_inf
);
477 scoped_restore_current_pspace_and_thread restore_pspace_thread
;
479 set_current_inferior (child_inf
);
480 switch_to_no_thread ();
481 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
482 child_inf
->push_target (parent_inf
->process_target ());
483 child_thr
= add_thread_silent (child_inf
->process_target (),
486 /* If this is a vfork child, then the address-space is
487 shared with the parent. */
490 child_inf
->pspace
= parent_inf
->pspace
;
491 child_inf
->aspace
= parent_inf
->aspace
;
495 /* The parent will be frozen until the child is done
496 with the shared region. Keep track of the
498 child_inf
->vfork_parent
= parent_inf
;
499 child_inf
->pending_detach
= 0;
500 parent_inf
->vfork_child
= child_inf
;
501 parent_inf
->pending_detach
= 0;
503 /* Now that the inferiors and program spaces are all
504 wired up, we can switch to the child thread (which
505 switches inferior and program space too). */
506 switch_to_thread (child_thr
);
510 child_inf
->aspace
= new_address_space ();
511 child_inf
->pspace
= new program_space (child_inf
->aspace
);
512 child_inf
->removable
= 1;
513 set_current_program_space (child_inf
->pspace
);
514 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
516 /* solib_create_inferior_hook relies on the current
518 switch_to_thread (child_thr
);
524 struct inferior
*parent_inf
;
526 parent_inf
= current_inferior ();
528 /* If we detached from the child, then we have to be careful
529 to not insert breakpoints in the parent until the child
530 is done with the shared memory region. However, if we're
531 staying attached to the child, then we can and should
532 insert breakpoints, so that we can debug it. A
533 subsequent child exec or exit is enough to know when does
534 the child stops using the parent's address space. */
535 parent_inf
->waiting_for_vfork_done
= detach_fork
;
536 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
541 /* Follow the child. */
542 struct inferior
*parent_inf
, *child_inf
;
543 struct program_space
*parent_pspace
;
545 if (print_inferior_events
)
547 std::string parent_pid
= target_pid_to_str (parent_ptid
);
548 std::string child_pid
= target_pid_to_str (child_ptid
);
550 target_terminal::ours_for_output ();
551 fprintf_filtered (gdb_stdlog
,
552 _("[Attaching after %s %s to child %s]\n"),
554 has_vforked
? "vfork" : "fork",
558 /* Add the new inferior first, so that the target_detach below
559 doesn't unpush the target. */
561 child_inf
= add_inferior (child_ptid
.pid ());
563 parent_inf
= current_inferior ();
564 child_inf
->attach_flag
= parent_inf
->attach_flag
;
565 copy_terminal_info (child_inf
, parent_inf
);
566 child_inf
->gdbarch
= parent_inf
->gdbarch
;
567 copy_inferior_target_desc_info (child_inf
, parent_inf
);
569 parent_pspace
= parent_inf
->pspace
;
571 process_stratum_target
*target
= parent_inf
->process_target ();
574 /* Hold a strong reference to the target while (maybe)
575 detaching the parent. Otherwise detaching could close the
577 auto target_ref
= target_ops_ref::new_reference (target
);
579 /* If we're vforking, we want to hold on to the parent until
580 the child exits or execs. At child exec or exit time we
581 can remove the old breakpoints from the parent and detach
582 or resume debugging it. Otherwise, detach the parent now;
583 we'll want to reuse it's program/address spaces, but we
584 can't set them to the child before removing breakpoints
585 from the parent, otherwise, the breakpoints module could
586 decide to remove breakpoints from the wrong process (since
587 they'd be assigned to the same address space). */
591 gdb_assert (child_inf
->vfork_parent
== NULL
);
592 gdb_assert (parent_inf
->vfork_child
== NULL
);
593 child_inf
->vfork_parent
= parent_inf
;
594 child_inf
->pending_detach
= 0;
595 parent_inf
->vfork_child
= child_inf
;
596 parent_inf
->pending_detach
= detach_fork
;
597 parent_inf
->waiting_for_vfork_done
= 0;
599 else if (detach_fork
)
601 if (print_inferior_events
)
603 /* Ensure that we have a process ptid. */
604 ptid_t process_ptid
= ptid_t (parent_ptid
.pid ());
606 target_terminal::ours_for_output ();
607 fprintf_filtered (gdb_stdlog
,
608 _("[Detaching after fork from "
610 target_pid_to_str (process_ptid
).c_str ());
613 target_detach (parent_inf
, 0);
617 /* Note that the detach above makes PARENT_INF dangling. */
619 /* Add the child thread to the appropriate lists, and switch
620 to this new thread, before cloning the program space, and
621 informing the solib layer about this new process. */
623 set_current_inferior (child_inf
);
624 child_inf
->push_target (target
);
627 child_thr
= add_thread_silent (target
, child_ptid
);
629 /* If this is a vfork child, then the address-space is shared
630 with the parent. If we detached from the parent, then we can
631 reuse the parent's program/address spaces. */
632 if (has_vforked
|| detach_fork
)
634 child_inf
->pspace
= parent_pspace
;
635 child_inf
->aspace
= child_inf
->pspace
->aspace
;
641 child_inf
->aspace
= new_address_space ();
642 child_inf
->pspace
= new program_space (child_inf
->aspace
);
643 child_inf
->removable
= 1;
644 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
645 set_current_program_space (child_inf
->pspace
);
646 clone_program_space (child_inf
->pspace
, parent_pspace
);
649 switch_to_thread (child_thr
);
652 target_follow_fork (follow_child
, detach_fork
);
654 /* If we ended up creating a new inferior, call post_create_inferior to inform
655 the various subcomponents. */
656 if (child_thr
!= nullptr)
658 scoped_restore_current_thread restore
;
659 switch_to_thread (child_thr
);
661 post_create_inferior (0);
667 /* Tell the target to follow the fork we're stopped at. Returns true
668 if the inferior should be resumed; false, if the target for some
669 reason decided it's best not to resume. */
674 bool follow_child
= (follow_fork_mode_string
== follow_fork_mode_child
);
675 bool should_resume
= true;
676 struct thread_info
*tp
;
678 /* Copy user stepping state to the new inferior thread. FIXME: the
679 followed fork child thread should have a copy of most of the
680 parent thread structure's run control related fields, not just these.
681 Initialized to avoid "may be used uninitialized" warnings from gcc. */
682 struct breakpoint
*step_resume_breakpoint
= NULL
;
683 struct breakpoint
*exception_resume_breakpoint
= NULL
;
684 CORE_ADDR step_range_start
= 0;
685 CORE_ADDR step_range_end
= 0;
686 int current_line
= 0;
687 symtab
*current_symtab
= NULL
;
688 struct frame_id step_frame_id
= { 0 };
689 struct thread_fsm
*thread_fsm
= NULL
;
693 process_stratum_target
*wait_target
;
695 struct target_waitstatus wait_status
;
697 /* Get the last target status returned by target_wait(). */
698 get_last_target_status (&wait_target
, &wait_ptid
, &wait_status
);
700 /* If not stopped at a fork event, then there's nothing else to
702 if (wait_status
.kind
!= TARGET_WAITKIND_FORKED
703 && wait_status
.kind
!= TARGET_WAITKIND_VFORKED
)
706 /* Check if we switched over from WAIT_PTID, since the event was
708 if (wait_ptid
!= minus_one_ptid
709 && (current_inferior ()->process_target () != wait_target
710 || inferior_ptid
!= wait_ptid
))
712 /* We did. Switch back to WAIT_PTID thread, to tell the
713 target to follow it (in either direction). We'll
714 afterwards refuse to resume, and inform the user what
716 thread_info
*wait_thread
= find_thread_ptid (wait_target
, wait_ptid
);
717 switch_to_thread (wait_thread
);
718 should_resume
= false;
722 tp
= inferior_thread ();
724 /* If there were any forks/vforks that were caught and are now to be
725 followed, then do so now. */
726 switch (tp
->pending_follow
.kind
)
728 case TARGET_WAITKIND_FORKED
:
729 case TARGET_WAITKIND_VFORKED
:
731 ptid_t parent
, child
;
733 /* If the user did a next/step, etc, over a fork call,
734 preserve the stepping state in the fork child. */
735 if (follow_child
&& should_resume
)
737 step_resume_breakpoint
= clone_momentary_breakpoint
738 (tp
->control
.step_resume_breakpoint
);
739 step_range_start
= tp
->control
.step_range_start
;
740 step_range_end
= tp
->control
.step_range_end
;
741 current_line
= tp
->current_line
;
742 current_symtab
= tp
->current_symtab
;
743 step_frame_id
= tp
->control
.step_frame_id
;
744 exception_resume_breakpoint
745 = clone_momentary_breakpoint (tp
->control
.exception_resume_breakpoint
);
746 thread_fsm
= tp
->thread_fsm
;
748 /* For now, delete the parent's sr breakpoint, otherwise,
749 parent/child sr breakpoints are considered duplicates,
750 and the child version will not be installed. Remove
751 this when the breakpoints module becomes aware of
752 inferiors and address spaces. */
753 delete_step_resume_breakpoint (tp
);
754 tp
->control
.step_range_start
= 0;
755 tp
->control
.step_range_end
= 0;
756 tp
->control
.step_frame_id
= null_frame_id
;
757 delete_exception_resume_breakpoint (tp
);
758 tp
->thread_fsm
= NULL
;
761 parent
= inferior_ptid
;
762 child
= tp
->pending_follow
.value
.related_pid
;
764 process_stratum_target
*parent_targ
= tp
->inf
->process_target ();
765 /* Set up inferior(s) as specified by the caller, and tell the
766 target to do whatever is necessary to follow either parent
768 if (follow_fork_inferior (follow_child
, detach_fork
))
770 /* Target refused to follow, or there's some other reason
771 we shouldn't resume. */
776 /* This pending follow fork event is now handled, one way
777 or another. The previous selected thread may be gone
778 from the lists by now, but if it is still around, need
779 to clear the pending follow request. */
780 tp
= find_thread_ptid (parent_targ
, parent
);
782 tp
->pending_follow
.kind
= TARGET_WAITKIND_SPURIOUS
;
784 /* This makes sure we don't try to apply the "Switched
785 over from WAIT_PID" logic above. */
786 nullify_last_target_wait_ptid ();
788 /* If we followed the child, switch to it... */
791 thread_info
*child_thr
= find_thread_ptid (parent_targ
, child
);
792 switch_to_thread (child_thr
);
794 /* ... and preserve the stepping state, in case the
795 user was stepping over the fork call. */
798 tp
= inferior_thread ();
799 tp
->control
.step_resume_breakpoint
800 = step_resume_breakpoint
;
801 tp
->control
.step_range_start
= step_range_start
;
802 tp
->control
.step_range_end
= step_range_end
;
803 tp
->current_line
= current_line
;
804 tp
->current_symtab
= current_symtab
;
805 tp
->control
.step_frame_id
= step_frame_id
;
806 tp
->control
.exception_resume_breakpoint
807 = exception_resume_breakpoint
;
808 tp
->thread_fsm
= thread_fsm
;
812 /* If we get here, it was because we're trying to
813 resume from a fork catchpoint, but, the user
814 has switched threads away from the thread that
815 forked. In that case, the resume command
816 issued is most likely not applicable to the
817 child, so just warn, and refuse to resume. */
818 warning (_("Not resuming: switched threads "
819 "before following fork child."));
822 /* Reset breakpoints in the child as appropriate. */
823 follow_inferior_reset_breakpoints ();
828 case TARGET_WAITKIND_SPURIOUS
:
829 /* Nothing to follow. */
832 internal_error (__FILE__
, __LINE__
,
833 "Unexpected pending_follow.kind %d\n",
834 tp
->pending_follow
.kind
);
838 return should_resume
;
842 follow_inferior_reset_breakpoints (void)
844 struct thread_info
*tp
= inferior_thread ();
846 /* Was there a step_resume breakpoint? (There was if the user
847 did a "next" at the fork() call.) If so, explicitly reset its
848 thread number. Cloned step_resume breakpoints are disabled on
849 creation, so enable it here now that it is associated with the
852 step_resumes are a form of bp that are made to be per-thread.
853 Since we created the step_resume bp when the parent process
854 was being debugged, and now are switching to the child process,
855 from the breakpoint package's viewpoint, that's a switch of
856 "threads". We must update the bp's notion of which thread
857 it is for, or it'll be ignored when it triggers. */
859 if (tp
->control
.step_resume_breakpoint
)
861 breakpoint_re_set_thread (tp
->control
.step_resume_breakpoint
);
862 tp
->control
.step_resume_breakpoint
->loc
->enabled
= 1;
865 /* Treat exception_resume breakpoints like step_resume breakpoints. */
866 if (tp
->control
.exception_resume_breakpoint
)
868 breakpoint_re_set_thread (tp
->control
.exception_resume_breakpoint
);
869 tp
->control
.exception_resume_breakpoint
->loc
->enabled
= 1;
872 /* Reinsert all breakpoints in the child. The user may have set
873 breakpoints after catching the fork, in which case those
874 were never set in the child, but only in the parent. This makes
875 sure the inserted breakpoints match the breakpoint list. */
877 breakpoint_re_set ();
878 insert_breakpoints ();
881 /* The child has exited or execed: resume threads of the parent the
882 user wanted to be executing. */
885 proceed_after_vfork_done (struct thread_info
*thread
,
888 int pid
= * (int *) arg
;
890 if (thread
->ptid
.pid () == pid
891 && thread
->state
== THREAD_RUNNING
892 && !thread
->executing
893 && !thread
->stop_requested
894 && thread
->stop_signal () == GDB_SIGNAL_0
)
896 infrun_debug_printf ("resuming vfork parent thread %s",
897 target_pid_to_str (thread
->ptid
).c_str ());
899 switch_to_thread (thread
);
900 clear_proceed_status (0);
901 proceed ((CORE_ADDR
) -1, GDB_SIGNAL_DEFAULT
);
907 /* Called whenever we notice an exec or exit event, to handle
908 detaching or resuming a vfork parent. */
911 handle_vfork_child_exec_or_exit (int exec
)
913 struct inferior
*inf
= current_inferior ();
915 if (inf
->vfork_parent
)
917 int resume_parent
= -1;
919 /* This exec or exit marks the end of the shared memory region
920 between the parent and the child. Break the bonds. */
921 inferior
*vfork_parent
= inf
->vfork_parent
;
922 inf
->vfork_parent
->vfork_child
= NULL
;
923 inf
->vfork_parent
= NULL
;
925 /* If the user wanted to detach from the parent, now is the
927 if (vfork_parent
->pending_detach
)
929 struct program_space
*pspace
;
930 struct address_space
*aspace
;
932 /* follow-fork child, detach-on-fork on. */
934 vfork_parent
->pending_detach
= 0;
936 scoped_restore_current_pspace_and_thread restore_thread
;
938 /* We're letting loose of the parent. */
939 thread_info
*tp
= any_live_thread_of_inferior (vfork_parent
);
940 switch_to_thread (tp
);
942 /* We're about to detach from the parent, which implicitly
943 removes breakpoints from its address space. There's a
944 catch here: we want to reuse the spaces for the child,
945 but, parent/child are still sharing the pspace at this
946 point, although the exec in reality makes the kernel give
947 the child a fresh set of new pages. The problem here is
948 that the breakpoints module being unaware of this, would
949 likely chose the child process to write to the parent
950 address space. Swapping the child temporarily away from
951 the spaces has the desired effect. Yes, this is "sort
954 pspace
= inf
->pspace
;
955 aspace
= inf
->aspace
;
959 if (print_inferior_events
)
962 = target_pid_to_str (ptid_t (vfork_parent
->pid
));
964 target_terminal::ours_for_output ();
968 fprintf_filtered (gdb_stdlog
,
969 _("[Detaching vfork parent %s "
970 "after child exec]\n"), pidstr
.c_str ());
974 fprintf_filtered (gdb_stdlog
,
975 _("[Detaching vfork parent %s "
976 "after child exit]\n"), pidstr
.c_str ());
980 target_detach (vfork_parent
, 0);
983 inf
->pspace
= pspace
;
984 inf
->aspace
= aspace
;
988 /* We're staying attached to the parent, so, really give the
989 child a new address space. */
990 inf
->pspace
= new program_space (maybe_new_address_space ());
991 inf
->aspace
= inf
->pspace
->aspace
;
993 set_current_program_space (inf
->pspace
);
995 resume_parent
= vfork_parent
->pid
;
999 /* If this is a vfork child exiting, then the pspace and
1000 aspaces were shared with the parent. Since we're
1001 reporting the process exit, we'll be mourning all that is
1002 found in the address space, and switching to null_ptid,
1003 preparing to start a new inferior. But, since we don't
1004 want to clobber the parent's address/program spaces, we
1005 go ahead and create a new one for this exiting
1008 /* Switch to no-thread while running clone_program_space, so
1009 that clone_program_space doesn't want to read the
1010 selected frame of a dead process. */
1011 scoped_restore_current_thread restore_thread
;
1012 switch_to_no_thread ();
1014 inf
->pspace
= new program_space (maybe_new_address_space ());
1015 inf
->aspace
= inf
->pspace
->aspace
;
1016 set_current_program_space (inf
->pspace
);
1018 inf
->symfile_flags
= SYMFILE_NO_READ
;
1019 clone_program_space (inf
->pspace
, vfork_parent
->pspace
);
1021 resume_parent
= vfork_parent
->pid
;
1024 gdb_assert (current_program_space
== inf
->pspace
);
1026 if (non_stop
&& resume_parent
!= -1)
1028 /* If the user wanted the parent to be running, let it go
1030 scoped_restore_current_thread restore_thread
;
1032 infrun_debug_printf ("resuming vfork parent process %d",
1035 iterate_over_threads (proceed_after_vfork_done
, &resume_parent
);
1040 /* Enum strings for "set|show follow-exec-mode". */
1042 static const char follow_exec_mode_new
[] = "new";
1043 static const char follow_exec_mode_same
[] = "same";
1044 static const char *const follow_exec_mode_names
[] =
1046 follow_exec_mode_new
,
1047 follow_exec_mode_same
,
1051 static const char *follow_exec_mode_string
= follow_exec_mode_same
;
1053 show_follow_exec_mode_string (struct ui_file
*file
, int from_tty
,
1054 struct cmd_list_element
*c
, const char *value
)
1056 fprintf_filtered (file
, _("Follow exec mode is \"%s\".\n"), value
);
1059 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1062 follow_exec (ptid_t ptid
, const char *exec_file_target
)
1064 int pid
= ptid
.pid ();
1065 ptid_t process_ptid
;
1067 /* Switch terminal for any messages produced e.g. by
1068 breakpoint_re_set. */
1069 target_terminal::ours_for_output ();
1071 /* This is an exec event that we actually wish to pay attention to.
1072 Refresh our symbol table to the newly exec'd program, remove any
1073 momentary bp's, etc.
1075 If there are breakpoints, they aren't really inserted now,
1076 since the exec() transformed our inferior into a fresh set
1079 We want to preserve symbolic breakpoints on the list, since
1080 we have hopes that they can be reset after the new a.out's
1081 symbol table is read.
1083 However, any "raw" breakpoints must be removed from the list
1084 (e.g., the solib bp's), since their address is probably invalid
1087 And, we DON'T want to call delete_breakpoints() here, since
1088 that may write the bp's "shadow contents" (the instruction
1089 value that was overwritten with a TRAP instruction). Since
1090 we now have a new a.out, those shadow contents aren't valid. */
1092 mark_breakpoints_out ();
1094 /* The target reports the exec event to the main thread, even if
1095 some other thread does the exec, and even if the main thread was
1096 stopped or already gone. We may still have non-leader threads of
1097 the process on our list. E.g., on targets that don't have thread
1098 exit events (like remote); or on native Linux in non-stop mode if
1099 there were only two threads in the inferior and the non-leader
1100 one is the one that execs (and nothing forces an update of the
1101 thread list up to here). When debugging remotely, it's best to
1102 avoid extra traffic, when possible, so avoid syncing the thread
1103 list with the target, and instead go ahead and delete all threads
1104 of the process but one that reported the event. Note this must
1105 be done before calling update_breakpoints_after_exec, as
1106 otherwise clearing the threads' resources would reference stale
1107 thread breakpoints -- it may have been one of these threads that
1108 stepped across the exec. We could just clear their stepping
1109 states, but as long as we're iterating, might as well delete
1110 them. Deleting them now rather than at the next user-visible
1111 stop provides a nicer sequence of events for user and MI
1113 for (thread_info
*th
: all_threads_safe ())
1114 if (th
->ptid
.pid () == pid
&& th
->ptid
!= ptid
)
1117 /* We also need to clear any left over stale state for the
1118 leader/event thread. E.g., if there was any step-resume
1119 breakpoint or similar, it's gone now. We cannot truly
1120 step-to-next statement through an exec(). */
1121 thread_info
*th
= inferior_thread ();
1122 th
->control
.step_resume_breakpoint
= NULL
;
1123 th
->control
.exception_resume_breakpoint
= NULL
;
1124 th
->control
.single_step_breakpoints
= NULL
;
1125 th
->control
.step_range_start
= 0;
1126 th
->control
.step_range_end
= 0;
1128 /* The user may have had the main thread held stopped in the
1129 previous image (e.g., schedlock on, or non-stop). Release
1131 th
->stop_requested
= 0;
1133 update_breakpoints_after_exec ();
1135 /* What is this a.out's name? */
1136 process_ptid
= ptid_t (pid
);
1137 printf_unfiltered (_("%s is executing new program: %s\n"),
1138 target_pid_to_str (process_ptid
).c_str (),
1141 /* We've followed the inferior through an exec. Therefore, the
1142 inferior has essentially been killed & reborn. */
1144 breakpoint_init_inferior (inf_execd
);
1146 gdb::unique_xmalloc_ptr
<char> exec_file_host
1147 = exec_file_find (exec_file_target
, NULL
);
1149 /* If we were unable to map the executable target pathname onto a host
1150 pathname, tell the user that. Otherwise GDB's subsequent behavior
1151 is confusing. Maybe it would even be better to stop at this point
1152 so that the user can specify a file manually before continuing. */
1153 if (exec_file_host
== NULL
)
1154 warning (_("Could not load symbols for executable %s.\n"
1155 "Do you need \"set sysroot\"?"),
1158 /* Reset the shared library package. This ensures that we get a
1159 shlib event when the child reaches "_start", at which point the
1160 dld will have had a chance to initialize the child. */
1161 /* Also, loading a symbol file below may trigger symbol lookups, and
1162 we don't want those to be satisfied by the libraries of the
1163 previous incarnation of this process. */
1164 no_shared_libraries (NULL
, 0);
1166 struct inferior
*inf
= current_inferior ();
1168 if (follow_exec_mode_string
== follow_exec_mode_new
)
1170 /* The user wants to keep the old inferior and program spaces
1171 around. Create a new fresh one, and switch to it. */
1173 /* Do exit processing for the original inferior before setting the new
1174 inferior's pid. Having two inferiors with the same pid would confuse
1175 find_inferior_p(t)id. Transfer the terminal state and info from the
1176 old to the new inferior. */
1177 inferior
*new_inferior
= add_inferior_with_spaces ();
1179 swap_terminal_info (new_inferior
, inf
);
1180 exit_inferior_silent (inf
);
1182 new_inferior
->pid
= pid
;
1183 target_follow_exec (new_inferior
, ptid
, exec_file_target
);
1185 /* We continue with the new inferior. */
1190 /* The old description may no longer be fit for the new image.
1191 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1192 old description; we'll read a new one below. No need to do
1193 this on "follow-exec-mode new", as the old inferior stays
1194 around (its description is later cleared/refetched on
1196 target_clear_description ();
1197 target_follow_exec (inf
, ptid
, exec_file_target
);
1200 gdb_assert (current_inferior () == inf
);
1201 gdb_assert (current_program_space
== inf
->pspace
);
1203 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1204 because the proper displacement for a PIE (Position Independent
1205 Executable) main symbol file will only be computed by
1206 solib_create_inferior_hook below. breakpoint_re_set would fail
1207 to insert the breakpoints with the zero displacement. */
1208 try_open_exec_file (exec_file_host
.get (), inf
, SYMFILE_DEFER_BP_RESET
);
1210 /* If the target can specify a description, read it. Must do this
1211 after flipping to the new executable (because the target supplied
1212 description must be compatible with the executable's
1213 architecture, and the old executable may e.g., be 32-bit, while
1214 the new one 64-bit), and before anything involving memory or
1216 target_find_description ();
1218 gdb::observers::inferior_execd
.notify (inf
);
1220 breakpoint_re_set ();
1222 /* Reinsert all breakpoints. (Those which were symbolic have
1223 been reset to the proper address in the new a.out, thanks
1224 to symbol_file_command...). */
1225 insert_breakpoints ();
1227 /* The next resume of this inferior should bring it to the shlib
1228 startup breakpoints. (If the user had also set bp's on
1229 "main" from the old (parent) process, then they'll auto-
1230 matically get reset there in the new process.). */
1233 /* The chain of threads that need to do a step-over operation to get
1234 past e.g., a breakpoint. What technique is used to step over the
1235 breakpoint/watchpoint does not matter -- all threads end up in the
1236 same queue, to maintain rough temporal order of execution, in order
1237 to avoid starvation, otherwise, we could e.g., find ourselves
1238 constantly stepping the same couple threads past their breakpoints
1239 over and over, if the single-step finish fast enough. */
1240 thread_step_over_list global_thread_step_over_list
;
1242 /* Bit flags indicating what the thread needs to step over. */
1244 enum step_over_what_flag
1246 /* Step over a breakpoint. */
1247 STEP_OVER_BREAKPOINT
= 1,
1249 /* Step past a non-continuable watchpoint, in order to let the
1250 instruction execute so we can evaluate the watchpoint
1252 STEP_OVER_WATCHPOINT
= 2
1254 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag
, step_over_what
);
1256 /* Info about an instruction that is being stepped over. */
1258 struct step_over_info
1260 /* If we're stepping past a breakpoint, this is the address space
1261 and address of the instruction the breakpoint is set at. We'll
1262 skip inserting all breakpoints here. Valid iff ASPACE is
1264 const address_space
*aspace
= nullptr;
1265 CORE_ADDR address
= 0;
1267 /* The instruction being stepped over triggers a nonsteppable
1268 watchpoint. If true, we'll skip inserting watchpoints. */
1269 int nonsteppable_watchpoint_p
= 0;
1271 /* The thread's global number. */
1275 /* The step-over info of the location that is being stepped over.
1277 Note that with async/breakpoint always-inserted mode, a user might
1278 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1279 being stepped over. As setting a new breakpoint inserts all
1280 breakpoints, we need to make sure the breakpoint being stepped over
1281 isn't inserted then. We do that by only clearing the step-over
1282 info when the step-over is actually finished (or aborted).
1284 Presently GDB can only step over one breakpoint at any given time.
1285 Given threads that can't run code in the same address space as the
1286 breakpoint's can't really miss the breakpoint, GDB could be taught
1287 to step-over at most one breakpoint per address space (so this info
1288 could move to the address space object if/when GDB is extended).
1289 The set of breakpoints being stepped over will normally be much
1290 smaller than the set of all breakpoints, so a flag in the
1291 breakpoint location structure would be wasteful. A separate list
1292 also saves complexity and run-time, as otherwise we'd have to go
1293 through all breakpoint locations clearing their flag whenever we
1294 start a new sequence. Similar considerations weigh against storing
1295 this info in the thread object. Plus, not all step overs actually
1296 have breakpoint locations -- e.g., stepping past a single-step
1297 breakpoint, or stepping to complete a non-continuable
1299 static struct step_over_info step_over_info
;
1301 /* Record the address of the breakpoint/instruction we're currently
1303 N.B. We record the aspace and address now, instead of say just the thread,
1304 because when we need the info later the thread may be running. */
1307 set_step_over_info (const address_space
*aspace
, CORE_ADDR address
,
1308 int nonsteppable_watchpoint_p
,
1311 step_over_info
.aspace
= aspace
;
1312 step_over_info
.address
= address
;
1313 step_over_info
.nonsteppable_watchpoint_p
= nonsteppable_watchpoint_p
;
1314 step_over_info
.thread
= thread
;
1317 /* Called when we're not longer stepping over a breakpoint / an
1318 instruction, so all breakpoints are free to be (re)inserted. */
1321 clear_step_over_info (void)
1323 infrun_debug_printf ("clearing step over info");
1324 step_over_info
.aspace
= NULL
;
1325 step_over_info
.address
= 0;
1326 step_over_info
.nonsteppable_watchpoint_p
= 0;
1327 step_over_info
.thread
= -1;
1333 stepping_past_instruction_at (struct address_space
*aspace
,
1336 return (step_over_info
.aspace
!= NULL
1337 && breakpoint_address_match (aspace
, address
,
1338 step_over_info
.aspace
,
1339 step_over_info
.address
));
1345 thread_is_stepping_over_breakpoint (int thread
)
1347 return (step_over_info
.thread
!= -1
1348 && thread
== step_over_info
.thread
);
1354 stepping_past_nonsteppable_watchpoint (void)
1356 return step_over_info
.nonsteppable_watchpoint_p
;
1359 /* Returns true if step-over info is valid. */
1362 step_over_info_valid_p (void)
1364 return (step_over_info
.aspace
!= NULL
1365 || stepping_past_nonsteppable_watchpoint ());
1369 /* Displaced stepping. */
1371 /* In non-stop debugging mode, we must take special care to manage
1372 breakpoints properly; in particular, the traditional strategy for
1373 stepping a thread past a breakpoint it has hit is unsuitable.
1374 'Displaced stepping' is a tactic for stepping one thread past a
1375 breakpoint it has hit while ensuring that other threads running
1376 concurrently will hit the breakpoint as they should.
1378 The traditional way to step a thread T off a breakpoint in a
1379 multi-threaded program in all-stop mode is as follows:
1381 a0) Initially, all threads are stopped, and breakpoints are not
1383 a1) We single-step T, leaving breakpoints uninserted.
1384 a2) We insert breakpoints, and resume all threads.
1386 In non-stop debugging, however, this strategy is unsuitable: we
1387 don't want to have to stop all threads in the system in order to
1388 continue or step T past a breakpoint. Instead, we use displaced
1391 n0) Initially, T is stopped, other threads are running, and
1392 breakpoints are inserted.
1393 n1) We copy the instruction "under" the breakpoint to a separate
1394 location, outside the main code stream, making any adjustments
1395 to the instruction, register, and memory state as directed by
1397 n2) We single-step T over the instruction at its new location.
1398 n3) We adjust the resulting register and memory state as directed
1399 by T's architecture. This includes resetting T's PC to point
1400 back into the main instruction stream.
1403 This approach depends on the following gdbarch methods:
1405 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1406 indicate where to copy the instruction, and how much space must
1407 be reserved there. We use these in step n1.
1409 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1410 address, and makes any necessary adjustments to the instruction,
1411 register contents, and memory. We use this in step n1.
1413 - gdbarch_displaced_step_fixup adjusts registers and memory after
1414 we have successfully single-stepped the instruction, to yield the
1415 same effect the instruction would have had if we had executed it
1416 at its original address. We use this in step n3.
1418 The gdbarch_displaced_step_copy_insn and
1419 gdbarch_displaced_step_fixup functions must be written so that
1420 copying an instruction with gdbarch_displaced_step_copy_insn,
1421 single-stepping across the copied instruction, and then applying
1422 gdbarch_displaced_insn_fixup should have the same effects on the
1423 thread's memory and registers as stepping the instruction in place
1424 would have. Exactly which responsibilities fall to the copy and
1425 which fall to the fixup is up to the author of those functions.
1427 See the comments in gdbarch.sh for details.
1429 Note that displaced stepping and software single-step cannot
1430 currently be used in combination, although with some care I think
1431 they could be made to. Software single-step works by placing
1432 breakpoints on all possible subsequent instructions; if the
1433 displaced instruction is a PC-relative jump, those breakpoints
1434 could fall in very strange places --- on pages that aren't
1435 executable, or at addresses that are not proper instruction
1436 boundaries. (We do generally let other threads run while we wait
1437 to hit the software single-step breakpoint, and they might
1438 encounter such a corrupted instruction.) One way to work around
1439 this would be to have gdbarch_displaced_step_copy_insn fully
1440 simulate the effect of PC-relative instructions (and return NULL)
1441 on architectures that use software single-stepping.
1443 In non-stop mode, we can have independent and simultaneous step
1444 requests, so more than one thread may need to simultaneously step
1445 over a breakpoint. The current implementation assumes there is
1446 only one scratch space per process. In this case, we have to
1447 serialize access to the scratch space. If thread A wants to step
1448 over a breakpoint, but we are currently waiting for some other
1449 thread to complete a displaced step, we leave thread A stopped and
1450 place it in the displaced_step_request_queue. Whenever a displaced
1451 step finishes, we pick the next thread in the queue and start a new
1452 displaced step operation on it. See displaced_step_prepare and
1453 displaced_step_finish for details. */
1455 /* Return true if THREAD is doing a displaced step. */
1458 displaced_step_in_progress_thread (thread_info
*thread
)
1460 gdb_assert (thread
!= NULL
);
1462 return thread
->displaced_step_state
.in_progress ();
1465 /* Return true if INF has a thread doing a displaced step. */
1468 displaced_step_in_progress (inferior
*inf
)
1470 return inf
->displaced_step_state
.in_progress_count
> 0;
1473 /* Return true if any thread is doing a displaced step. */
1476 displaced_step_in_progress_any_thread ()
1478 for (inferior
*inf
: all_non_exited_inferiors ())
1480 if (displaced_step_in_progress (inf
))
1488 infrun_inferior_exit (struct inferior
*inf
)
1490 inf
->displaced_step_state
.reset ();
1494 infrun_inferior_execd (inferior
*inf
)
1496 /* If some threads where was doing a displaced step in this inferior at the
1497 moment of the exec, they no longer exist. Even if the exec'ing thread
1498 doing a displaced step, we don't want to to any fixup nor restore displaced
1499 stepping buffer bytes. */
1500 inf
->displaced_step_state
.reset ();
1502 for (thread_info
*thread
: inf
->threads ())
1503 thread
->displaced_step_state
.reset ();
1505 /* Since an in-line step is done with everything else stopped, if there was
1506 one in progress at the time of the exec, it must have been the exec'ing
1508 clear_step_over_info ();
1511 /* If ON, and the architecture supports it, GDB will use displaced
1512 stepping to step over breakpoints. If OFF, or if the architecture
1513 doesn't support it, GDB will instead use the traditional
1514 hold-and-step approach. If AUTO (which is the default), GDB will
1515 decide which technique to use to step over breakpoints depending on
1516 whether the target works in a non-stop way (see use_displaced_stepping). */
1518 static enum auto_boolean can_use_displaced_stepping
= AUTO_BOOLEAN_AUTO
;
1521 show_can_use_displaced_stepping (struct ui_file
*file
, int from_tty
,
1522 struct cmd_list_element
*c
,
1525 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
)
1526 fprintf_filtered (file
,
1527 _("Debugger's willingness to use displaced stepping "
1528 "to step over breakpoints is %s (currently %s).\n"),
1529 value
, target_is_non_stop_p () ? "on" : "off");
1531 fprintf_filtered (file
,
1532 _("Debugger's willingness to use displaced stepping "
1533 "to step over breakpoints is %s.\n"), value
);
1536 /* Return true if the gdbarch implements the required methods to use
1537 displaced stepping. */
1540 gdbarch_supports_displaced_stepping (gdbarch
*arch
)
1542 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1543 that if `prepare` is provided, so is `finish`. */
1544 return gdbarch_displaced_step_prepare_p (arch
);
1547 /* Return non-zero if displaced stepping can/should be used to step
1548 over breakpoints of thread TP. */
1551 use_displaced_stepping (thread_info
*tp
)
1553 /* If the user disabled it explicitly, don't use displaced stepping. */
1554 if (can_use_displaced_stepping
== AUTO_BOOLEAN_FALSE
)
1557 /* If "auto", only use displaced stepping if the target operates in a non-stop
1559 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
1560 && !target_is_non_stop_p ())
1563 gdbarch
*gdbarch
= get_thread_regcache (tp
)->arch ();
1565 /* If the architecture doesn't implement displaced stepping, don't use
1567 if (!gdbarch_supports_displaced_stepping (gdbarch
))
1570 /* If recording, don't use displaced stepping. */
1571 if (find_record_target () != nullptr)
1574 /* If displaced stepping failed before for this inferior, don't bother trying
1576 if (tp
->inf
->displaced_step_state
.failed_before
)
1582 /* Simple function wrapper around displaced_step_thread_state::reset. */
1585 displaced_step_reset (displaced_step_thread_state
*displaced
)
1587 displaced
->reset ();
1590 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1591 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1593 using displaced_step_reset_cleanup
= FORWARD_SCOPE_EXIT (displaced_step_reset
);
1598 displaced_step_dump_bytes (const gdb_byte
*buf
, size_t len
)
1602 for (size_t i
= 0; i
< len
; i
++)
1605 ret
+= string_printf ("%02x", buf
[i
]);
1607 ret
+= string_printf (" %02x", buf
[i
]);
1613 /* Prepare to single-step, using displaced stepping.
1615 Note that we cannot use displaced stepping when we have a signal to
1616 deliver. If we have a signal to deliver and an instruction to step
1617 over, then after the step, there will be no indication from the
1618 target whether the thread entered a signal handler or ignored the
1619 signal and stepped over the instruction successfully --- both cases
1620 result in a simple SIGTRAP. In the first case we mustn't do a
1621 fixup, and in the second case we must --- but we can't tell which.
1622 Comments in the code for 'random signals' in handle_inferior_event
1623 explain how we handle this case instead.
1625 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1626 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1627 if displaced stepping this thread got queued; or
1628 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1631 static displaced_step_prepare_status
1632 displaced_step_prepare_throw (thread_info
*tp
)
1634 regcache
*regcache
= get_thread_regcache (tp
);
1635 struct gdbarch
*gdbarch
= regcache
->arch ();
1636 displaced_step_thread_state
&disp_step_thread_state
1637 = tp
->displaced_step_state
;
1639 /* We should never reach this function if the architecture does not
1640 support displaced stepping. */
1641 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch
));
1643 /* Nor if the thread isn't meant to step over a breakpoint. */
1644 gdb_assert (tp
->control
.trap_expected
);
1646 /* Disable range stepping while executing in the scratch pad. We
1647 want a single-step even if executing the displaced instruction in
1648 the scratch buffer lands within the stepping range (e.g., a
1650 tp
->control
.may_range_step
= 0;
1652 /* We are about to start a displaced step for this thread. If one is already
1653 in progress, something's wrong. */
1654 gdb_assert (!disp_step_thread_state
.in_progress ());
1656 if (tp
->inf
->displaced_step_state
.unavailable
)
1658 /* The gdbarch tells us it's not worth asking to try a prepare because
1659 it is likely that it will return unavailable, so don't bother asking. */
1661 displaced_debug_printf ("deferring step of %s",
1662 target_pid_to_str (tp
->ptid
).c_str ());
1664 global_thread_step_over_chain_enqueue (tp
);
1665 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1668 displaced_debug_printf ("displaced-stepping %s now",
1669 target_pid_to_str (tp
->ptid
).c_str ());
1671 scoped_restore_current_thread restore_thread
;
1673 switch_to_thread (tp
);
1675 CORE_ADDR original_pc
= regcache_read_pc (regcache
);
1676 CORE_ADDR displaced_pc
;
1678 displaced_step_prepare_status status
1679 = gdbarch_displaced_step_prepare (gdbarch
, tp
, displaced_pc
);
1681 if (status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
1683 displaced_debug_printf ("failed to prepare (%s)",
1684 target_pid_to_str (tp
->ptid
).c_str ());
1686 return DISPLACED_STEP_PREPARE_STATUS_CANT
;
1688 else if (status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
1690 /* Not enough displaced stepping resources available, defer this
1691 request by placing it the queue. */
1693 displaced_debug_printf ("not enough resources available, "
1694 "deferring step of %s",
1695 target_pid_to_str (tp
->ptid
).c_str ());
1697 global_thread_step_over_chain_enqueue (tp
);
1699 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
;
1702 gdb_assert (status
== DISPLACED_STEP_PREPARE_STATUS_OK
);
1704 /* Save the information we need to fix things up if the step
1706 disp_step_thread_state
.set (gdbarch
);
1708 tp
->inf
->displaced_step_state
.in_progress_count
++;
1710 displaced_debug_printf ("prepared successfully thread=%s, "
1711 "original_pc=%s, displaced_pc=%s",
1712 target_pid_to_str (tp
->ptid
).c_str (),
1713 paddress (gdbarch
, original_pc
),
1714 paddress (gdbarch
, displaced_pc
));
1716 return DISPLACED_STEP_PREPARE_STATUS_OK
;
1719 /* Wrapper for displaced_step_prepare_throw that disabled further
1720 attempts at displaced stepping if we get a memory error. */
1722 static displaced_step_prepare_status
1723 displaced_step_prepare (thread_info
*thread
)
1725 displaced_step_prepare_status status
1726 = DISPLACED_STEP_PREPARE_STATUS_CANT
;
1730 status
= displaced_step_prepare_throw (thread
);
1732 catch (const gdb_exception_error
&ex
)
1734 if (ex
.error
!= MEMORY_ERROR
1735 && ex
.error
!= NOT_SUPPORTED_ERROR
)
1738 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1741 /* Be verbose if "set displaced-stepping" is "on", silent if
1743 if (can_use_displaced_stepping
== AUTO_BOOLEAN_TRUE
)
1745 warning (_("disabling displaced stepping: %s"),
1749 /* Disable further displaced stepping attempts. */
1750 thread
->inf
->displaced_step_state
.failed_before
= 1;
1756 /* If we displaced stepped an instruction successfully, adjust registers and
1757 memory to yield the same effect the instruction would have had if we had
1758 executed it at its original address, and return
1759 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1760 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
1762 If the thread wasn't displaced stepping, return
1763 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1765 static displaced_step_finish_status
1766 displaced_step_finish (thread_info
*event_thread
, enum gdb_signal signal
)
1768 displaced_step_thread_state
*displaced
= &event_thread
->displaced_step_state
;
1770 /* Was this thread performing a displaced step? */
1771 if (!displaced
->in_progress ())
1772 return DISPLACED_STEP_FINISH_STATUS_OK
;
1774 gdb_assert (event_thread
->inf
->displaced_step_state
.in_progress_count
> 0);
1775 event_thread
->inf
->displaced_step_state
.in_progress_count
--;
1777 /* Fixup may need to read memory/registers. Switch to the thread
1778 that we're fixing up. Also, target_stopped_by_watchpoint checks
1779 the current thread, and displaced_step_restore performs ptid-dependent
1780 memory accesses using current_inferior(). */
1781 switch_to_thread (event_thread
);
1783 displaced_step_reset_cleanup
cleanup (displaced
);
1785 /* Do the fixup, and release the resources acquired to do the displaced
1787 return gdbarch_displaced_step_finish (displaced
->get_original_gdbarch (),
1788 event_thread
, signal
);
1791 /* Data to be passed around while handling an event. This data is
1792 discarded between events. */
1793 struct execution_control_state
1795 process_stratum_target
*target
;
1797 /* The thread that got the event, if this was a thread event; NULL
1799 struct thread_info
*event_thread
;
1801 struct target_waitstatus ws
;
1802 int stop_func_filled_in
;
1803 CORE_ADDR stop_func_start
;
1804 CORE_ADDR stop_func_end
;
1805 const char *stop_func_name
;
1808 /* True if the event thread hit the single-step breakpoint of
1809 another thread. Thus the event doesn't cause a stop, the thread
1810 needs to be single-stepped past the single-step breakpoint before
1811 we can switch back to the original stepping thread. */
1812 int hit_singlestep_breakpoint
;
1815 /* Clear ECS and set it to point at TP. */
1818 reset_ecs (struct execution_control_state
*ecs
, struct thread_info
*tp
)
1820 memset (ecs
, 0, sizeof (*ecs
));
1821 ecs
->event_thread
= tp
;
1822 ecs
->ptid
= tp
->ptid
;
1825 static void keep_going_pass_signal (struct execution_control_state
*ecs
);
1826 static void prepare_to_wait (struct execution_control_state
*ecs
);
1827 static bool keep_going_stepped_thread (struct thread_info
*tp
);
1828 static step_over_what
thread_still_needs_step_over (struct thread_info
*tp
);
1830 /* Are there any pending step-over requests? If so, run all we can
1831 now and return true. Otherwise, return false. */
1834 start_step_over (void)
1836 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
1838 /* Don't start a new step-over if we already have an in-line
1839 step-over operation ongoing. */
1840 if (step_over_info_valid_p ())
1843 /* Steal the global thread step over chain. As we try to initiate displaced
1844 steps, threads will be enqueued in the global chain if no buffers are
1845 available. If we iterated on the global chain directly, we might iterate
1847 thread_step_over_list threads_to_step
1848 = std::move (global_thread_step_over_list
);
1850 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1851 thread_step_over_chain_length (threads_to_step
));
1853 bool started
= false;
1855 /* On scope exit (whatever the reason, return or exception), if there are
1856 threads left in the THREADS_TO_STEP chain, put back these threads in the
1860 if (threads_to_step
.empty ())
1861 infrun_debug_printf ("step-over queue now empty");
1864 infrun_debug_printf ("putting back %d threads to step in global queue",
1865 thread_step_over_chain_length (threads_to_step
));
1867 global_thread_step_over_chain_enqueue_chain
1868 (std::move (threads_to_step
));
1872 thread_step_over_list_safe_range range
1873 = make_thread_step_over_list_safe_range (threads_to_step
);
1875 for (thread_info
*tp
: range
)
1877 struct execution_control_state ecss
;
1878 struct execution_control_state
*ecs
= &ecss
;
1879 step_over_what step_what
;
1880 int must_be_in_line
;
1882 gdb_assert (!tp
->stop_requested
);
1884 if (tp
->inf
->displaced_step_state
.unavailable
)
1886 /* The arch told us to not even try preparing another displaced step
1887 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1888 will get moved to the global chain on scope exit. */
1892 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1893 while we try to prepare the displaced step, we don't add it back to
1894 the global step over chain. This is to avoid a thread staying in the
1895 step over chain indefinitely if something goes wrong when resuming it
1896 If the error is intermittent and it still needs a step over, it will
1897 get enqueued again when we try to resume it normally. */
1898 threads_to_step
.erase (threads_to_step
.iterator_to (*tp
));
1900 step_what
= thread_still_needs_step_over (tp
);
1901 must_be_in_line
= ((step_what
& STEP_OVER_WATCHPOINT
)
1902 || ((step_what
& STEP_OVER_BREAKPOINT
)
1903 && !use_displaced_stepping (tp
)));
1905 /* We currently stop all threads of all processes to step-over
1906 in-line. If we need to start a new in-line step-over, let
1907 any pending displaced steps finish first. */
1908 if (must_be_in_line
&& displaced_step_in_progress_any_thread ())
1910 global_thread_step_over_chain_enqueue (tp
);
1914 if (tp
->control
.trap_expected
1918 internal_error (__FILE__
, __LINE__
,
1919 "[%s] has inconsistent state: "
1920 "trap_expected=%d, resumed=%d, executing=%d\n",
1921 target_pid_to_str (tp
->ptid
).c_str (),
1922 tp
->control
.trap_expected
,
1927 infrun_debug_printf ("resuming [%s] for step-over",
1928 target_pid_to_str (tp
->ptid
).c_str ());
1930 /* keep_going_pass_signal skips the step-over if the breakpoint
1931 is no longer inserted. In all-stop, we want to keep looking
1932 for a thread that needs a step-over instead of resuming TP,
1933 because we wouldn't be able to resume anything else until the
1934 target stops again. In non-stop, the resume always resumes
1935 only TP, so it's OK to let the thread resume freely. */
1936 if (!target_is_non_stop_p () && !step_what
)
1939 switch_to_thread (tp
);
1940 reset_ecs (ecs
, tp
);
1941 keep_going_pass_signal (ecs
);
1943 if (!ecs
->wait_some_more
)
1944 error (_("Command aborted."));
1946 /* If the thread's step over could not be initiated because no buffers
1947 were available, it was re-added to the global step over chain. */
1950 infrun_debug_printf ("[%s] was resumed.",
1951 target_pid_to_str (tp
->ptid
).c_str ());
1952 gdb_assert (!thread_is_in_step_over_chain (tp
));
1956 infrun_debug_printf ("[%s] was NOT resumed.",
1957 target_pid_to_str (tp
->ptid
).c_str ());
1958 gdb_assert (thread_is_in_step_over_chain (tp
));
1961 /* If we started a new in-line step-over, we're done. */
1962 if (step_over_info_valid_p ())
1964 gdb_assert (tp
->control
.trap_expected
);
1969 if (!target_is_non_stop_p ())
1971 /* On all-stop, shouldn't have resumed unless we needed a
1973 gdb_assert (tp
->control
.trap_expected
1974 || tp
->step_after_step_resume_breakpoint
);
1976 /* With remote targets (at least), in all-stop, we can't
1977 issue any further remote commands until the program stops
1983 /* Either the thread no longer needed a step-over, or a new
1984 displaced stepping sequence started. Even in the latter
1985 case, continue looking. Maybe we can also start another
1986 displaced step on a thread of other process. */
1992 /* Update global variables holding ptids to hold NEW_PTID if they were
1993 holding OLD_PTID. */
1995 infrun_thread_ptid_changed (process_stratum_target
*target
,
1996 ptid_t old_ptid
, ptid_t new_ptid
)
1998 if (inferior_ptid
== old_ptid
1999 && current_inferior ()->process_target () == target
)
2000 inferior_ptid
= new_ptid
;
2005 static const char schedlock_off
[] = "off";
2006 static const char schedlock_on
[] = "on";
2007 static const char schedlock_step
[] = "step";
2008 static const char schedlock_replay
[] = "replay";
2009 static const char *const scheduler_enums
[] = {
2016 static const char *scheduler_mode
= schedlock_replay
;
2018 show_scheduler_mode (struct ui_file
*file
, int from_tty
,
2019 struct cmd_list_element
*c
, const char *value
)
2021 fprintf_filtered (file
,
2022 _("Mode for locking scheduler "
2023 "during execution is \"%s\".\n"),
2028 set_schedlock_func (const char *args
, int from_tty
, struct cmd_list_element
*c
)
2030 if (!target_can_lock_scheduler ())
2032 scheduler_mode
= schedlock_off
;
2033 error (_("Target '%s' cannot support this command."),
2034 target_shortname ());
2038 /* True if execution commands resume all threads of all processes by
2039 default; otherwise, resume only threads of the current inferior
2041 bool sched_multi
= false;
2043 /* Try to setup for software single stepping. Return true if target_resume()
2044 should use hardware single step.
2046 GDBARCH the current gdbarch. */
2049 maybe_software_singlestep (struct gdbarch
*gdbarch
)
2051 bool hw_step
= true;
2053 if (execution_direction
== EXEC_FORWARD
2054 && gdbarch_software_single_step_p (gdbarch
))
2055 hw_step
= !insert_single_step_breakpoints (gdbarch
);
2063 user_visible_resume_ptid (int step
)
2069 /* With non-stop mode on, threads are always handled
2071 resume_ptid
= inferior_ptid
;
2073 else if ((scheduler_mode
== schedlock_on
)
2074 || (scheduler_mode
== schedlock_step
&& step
))
2076 /* User-settable 'scheduler' mode requires solo thread
2078 resume_ptid
= inferior_ptid
;
2080 else if ((scheduler_mode
== schedlock_replay
)
2081 && target_record_will_replay (minus_one_ptid
, execution_direction
))
2083 /* User-settable 'scheduler' mode requires solo thread resume in replay
2085 resume_ptid
= inferior_ptid
;
2087 else if (!sched_multi
&& target_supports_multi_process ())
2089 /* Resume all threads of the current process (and none of other
2091 resume_ptid
= ptid_t (inferior_ptid
.pid ());
2095 /* Resume all threads of all processes. */
2096 resume_ptid
= RESUME_ALL
;
2104 process_stratum_target
*
2105 user_visible_resume_target (ptid_t resume_ptid
)
2107 return (resume_ptid
== minus_one_ptid
&& sched_multi
2109 : current_inferior ()->process_target ());
2112 /* Return a ptid representing the set of threads that we will resume,
2113 in the perspective of the target, assuming run control handling
2114 does not require leaving some threads stopped (e.g., stepping past
2115 breakpoint). USER_STEP indicates whether we're about to start the
2116 target for a stepping command. */
2119 internal_resume_ptid (int user_step
)
2121 /* In non-stop, we always control threads individually. Note that
2122 the target may always work in non-stop mode even with "set
2123 non-stop off", in which case user_visible_resume_ptid could
2124 return a wildcard ptid. */
2125 if (target_is_non_stop_p ())
2126 return inferior_ptid
;
2128 return user_visible_resume_ptid (user_step
);
2131 /* Wrapper for target_resume, that handles infrun-specific
2135 do_target_resume (ptid_t resume_ptid
, bool step
, enum gdb_signal sig
)
2137 struct thread_info
*tp
= inferior_thread ();
2139 gdb_assert (!tp
->stop_requested
);
2141 /* Install inferior's terminal modes. */
2142 target_terminal::inferior ();
2144 /* Avoid confusing the next resume, if the next stop/resume
2145 happens to apply to another thread. */
2146 tp
->set_stop_signal (GDB_SIGNAL_0
);
2148 /* Advise target which signals may be handled silently.
2150 If we have removed breakpoints because we are stepping over one
2151 in-line (in any thread), we need to receive all signals to avoid
2152 accidentally skipping a breakpoint during execution of a signal
2155 Likewise if we're displaced stepping, otherwise a trap for a
2156 breakpoint in a signal handler might be confused with the
2157 displaced step finishing. We don't make the displaced_step_finish
2158 step distinguish the cases instead, because:
2160 - a backtrace while stopped in the signal handler would show the
2161 scratch pad as frame older than the signal handler, instead of
2162 the real mainline code.
2164 - when the thread is later resumed, the signal handler would
2165 return to the scratch pad area, which would no longer be
2167 if (step_over_info_valid_p ()
2168 || displaced_step_in_progress (tp
->inf
))
2169 target_pass_signals ({});
2171 target_pass_signals (signal_pass
);
2173 target_resume (resume_ptid
, step
, sig
);
2175 if (target_can_async_p ())
2179 /* Resume the inferior. SIG is the signal to give the inferior
2180 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2181 call 'resume', which handles exceptions. */
2184 resume_1 (enum gdb_signal sig
)
2186 struct regcache
*regcache
= get_current_regcache ();
2187 struct gdbarch
*gdbarch
= regcache
->arch ();
2188 struct thread_info
*tp
= inferior_thread ();
2189 const address_space
*aspace
= regcache
->aspace ();
2191 /* This represents the user's step vs continue request. When
2192 deciding whether "set scheduler-locking step" applies, it's the
2193 user's intention that counts. */
2194 const int user_step
= tp
->control
.stepping_command
;
2195 /* This represents what we'll actually request the target to do.
2196 This can decay from a step to a continue, if e.g., we need to
2197 implement single-stepping with breakpoints (software
2201 gdb_assert (!tp
->stop_requested
);
2202 gdb_assert (!thread_is_in_step_over_chain (tp
));
2204 if (tp
->has_pending_waitstatus ())
2207 ("thread %s has pending wait "
2208 "status %s (currently_stepping=%d).",
2209 target_pid_to_str (tp
->ptid
).c_str (),
2210 target_waitstatus_to_string (&tp
->pending_waitstatus ()).c_str (),
2211 currently_stepping (tp
));
2213 tp
->inf
->process_target ()->threads_executing
= true;
2214 tp
->set_resumed (true);
2216 /* FIXME: What should we do if we are supposed to resume this
2217 thread with a signal? Maybe we should maintain a queue of
2218 pending signals to deliver. */
2219 if (sig
!= GDB_SIGNAL_0
)
2221 warning (_("Couldn't deliver signal %s to %s."),
2222 gdb_signal_to_name (sig
),
2223 target_pid_to_str (tp
->ptid
).c_str ());
2226 tp
->set_stop_signal (GDB_SIGNAL_0
);
2228 if (target_can_async_p ())
2231 /* Tell the event loop we have an event to process. */
2232 mark_async_event_handler (infrun_async_inferior_event_token
);
2237 tp
->stepped_breakpoint
= 0;
2239 /* Depends on stepped_breakpoint. */
2240 step
= currently_stepping (tp
);
2242 if (current_inferior ()->waiting_for_vfork_done
)
2244 /* Don't try to single-step a vfork parent that is waiting for
2245 the child to get out of the shared memory region (by exec'ing
2246 or exiting). This is particularly important on software
2247 single-step archs, as the child process would trip on the
2248 software single step breakpoint inserted for the parent
2249 process. Since the parent will not actually execute any
2250 instruction until the child is out of the shared region (such
2251 are vfork's semantics), it is safe to simply continue it.
2252 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2253 the parent, and tell it to `keep_going', which automatically
2254 re-sets it stepping. */
2255 infrun_debug_printf ("resume : clear step");
2259 CORE_ADDR pc
= regcache_read_pc (regcache
);
2261 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2262 "current thread [%s] at %s",
2263 step
, gdb_signal_to_symbol_string (sig
),
2264 tp
->control
.trap_expected
,
2265 target_pid_to_str (inferior_ptid
).c_str (),
2266 paddress (gdbarch
, pc
));
2268 /* Normally, by the time we reach `resume', the breakpoints are either
2269 removed or inserted, as appropriate. The exception is if we're sitting
2270 at a permanent breakpoint; we need to step over it, but permanent
2271 breakpoints can't be removed. So we have to test for it here. */
2272 if (breakpoint_here_p (aspace
, pc
) == permanent_breakpoint_here
)
2274 if (sig
!= GDB_SIGNAL_0
)
2276 /* We have a signal to pass to the inferior. The resume
2277 may, or may not take us to the signal handler. If this
2278 is a step, we'll need to stop in the signal handler, if
2279 there's one, (if the target supports stepping into
2280 handlers), or in the next mainline instruction, if
2281 there's no handler. If this is a continue, we need to be
2282 sure to run the handler with all breakpoints inserted.
2283 In all cases, set a breakpoint at the current address
2284 (where the handler returns to), and once that breakpoint
2285 is hit, resume skipping the permanent breakpoint. If
2286 that breakpoint isn't hit, then we've stepped into the
2287 signal handler (or hit some other event). We'll delete
2288 the step-resume breakpoint then. */
2290 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2291 "deliver signal first");
2293 clear_step_over_info ();
2294 tp
->control
.trap_expected
= 0;
2296 if (tp
->control
.step_resume_breakpoint
== NULL
)
2298 /* Set a "high-priority" step-resume, as we don't want
2299 user breakpoints at PC to trigger (again) when this
2301 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2302 gdb_assert (tp
->control
.step_resume_breakpoint
->loc
->permanent
);
2304 tp
->step_after_step_resume_breakpoint
= step
;
2307 insert_breakpoints ();
2311 /* There's no signal to pass, we can go ahead and skip the
2312 permanent breakpoint manually. */
2313 infrun_debug_printf ("skipping permanent breakpoint");
2314 gdbarch_skip_permanent_breakpoint (gdbarch
, regcache
);
2315 /* Update pc to reflect the new address from which we will
2316 execute instructions. */
2317 pc
= regcache_read_pc (regcache
);
2321 /* We've already advanced the PC, so the stepping part
2322 is done. Now we need to arrange for a trap to be
2323 reported to handle_inferior_event. Set a breakpoint
2324 at the current PC, and run to it. Don't update
2325 prev_pc, because if we end in
2326 switch_back_to_stepped_thread, we want the "expected
2327 thread advanced also" branch to be taken. IOW, we
2328 don't want this thread to step further from PC
2330 gdb_assert (!step_over_info_valid_p ());
2331 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
2332 insert_breakpoints ();
2334 resume_ptid
= internal_resume_ptid (user_step
);
2335 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
2336 tp
->set_resumed (true);
2342 /* If we have a breakpoint to step over, make sure to do a single
2343 step only. Same if we have software watchpoints. */
2344 if (tp
->control
.trap_expected
|| bpstat_should_step ())
2345 tp
->control
.may_range_step
= 0;
2347 /* If displaced stepping is enabled, step over breakpoints by executing a
2348 copy of the instruction at a different address.
2350 We can't use displaced stepping when we have a signal to deliver;
2351 the comments for displaced_step_prepare explain why. The
2352 comments in the handle_inferior event for dealing with 'random
2353 signals' explain what we do instead.
2355 We can't use displaced stepping when we are waiting for vfork_done
2356 event, displaced stepping breaks the vfork child similarly as single
2357 step software breakpoint. */
2358 if (tp
->control
.trap_expected
2359 && use_displaced_stepping (tp
)
2360 && !step_over_info_valid_p ()
2361 && sig
== GDB_SIGNAL_0
2362 && !current_inferior ()->waiting_for_vfork_done
)
2364 displaced_step_prepare_status prepare_status
2365 = displaced_step_prepare (tp
);
2367 if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
)
2369 infrun_debug_printf ("Got placed in step-over queue");
2371 tp
->control
.trap_expected
= 0;
2374 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_CANT
)
2376 /* Fallback to stepping over the breakpoint in-line. */
2378 if (target_is_non_stop_p ())
2379 stop_all_threads ();
2381 set_step_over_info (regcache
->aspace (),
2382 regcache_read_pc (regcache
), 0, tp
->global_num
);
2384 step
= maybe_software_singlestep (gdbarch
);
2386 insert_breakpoints ();
2388 else if (prepare_status
== DISPLACED_STEP_PREPARE_STATUS_OK
)
2390 /* Update pc to reflect the new address from which we will
2391 execute instructions due to displaced stepping. */
2392 pc
= regcache_read_pc (get_thread_regcache (tp
));
2394 step
= gdbarch_displaced_step_hw_singlestep (gdbarch
);
2397 gdb_assert_not_reached (_("Invalid displaced_step_prepare_status "
2401 /* Do we need to do it the hard way, w/temp breakpoints? */
2403 step
= maybe_software_singlestep (gdbarch
);
2405 /* Currently, our software single-step implementation leads to different
2406 results than hardware single-stepping in one situation: when stepping
2407 into delivering a signal which has an associated signal handler,
2408 hardware single-step will stop at the first instruction of the handler,
2409 while software single-step will simply skip execution of the handler.
2411 For now, this difference in behavior is accepted since there is no
2412 easy way to actually implement single-stepping into a signal handler
2413 without kernel support.
2415 However, there is one scenario where this difference leads to follow-on
2416 problems: if we're stepping off a breakpoint by removing all breakpoints
2417 and then single-stepping. In this case, the software single-step
2418 behavior means that even if there is a *breakpoint* in the signal
2419 handler, GDB still would not stop.
2421 Fortunately, we can at least fix this particular issue. We detect
2422 here the case where we are about to deliver a signal while software
2423 single-stepping with breakpoints removed. In this situation, we
2424 revert the decisions to remove all breakpoints and insert single-
2425 step breakpoints, and instead we install a step-resume breakpoint
2426 at the current address, deliver the signal without stepping, and
2427 once we arrive back at the step-resume breakpoint, actually step
2428 over the breakpoint we originally wanted to step over. */
2429 if (thread_has_single_step_breakpoints_set (tp
)
2430 && sig
!= GDB_SIGNAL_0
2431 && step_over_info_valid_p ())
2433 /* If we have nested signals or a pending signal is delivered
2434 immediately after a handler returns, might already have
2435 a step-resume breakpoint set on the earlier handler. We cannot
2436 set another step-resume breakpoint; just continue on until the
2437 original breakpoint is hit. */
2438 if (tp
->control
.step_resume_breakpoint
== NULL
)
2440 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2441 tp
->step_after_step_resume_breakpoint
= 1;
2444 delete_single_step_breakpoints (tp
);
2446 clear_step_over_info ();
2447 tp
->control
.trap_expected
= 0;
2449 insert_breakpoints ();
2452 /* If STEP is set, it's a request to use hardware stepping
2453 facilities. But in that case, we should never
2454 use singlestep breakpoint. */
2455 gdb_assert (!(thread_has_single_step_breakpoints_set (tp
) && step
));
2457 /* Decide the set of threads to ask the target to resume. */
2458 if (tp
->control
.trap_expected
)
2460 /* We're allowing a thread to run past a breakpoint it has
2461 hit, either by single-stepping the thread with the breakpoint
2462 removed, or by displaced stepping, with the breakpoint inserted.
2463 In the former case, we need to single-step only this thread,
2464 and keep others stopped, as they can miss this breakpoint if
2465 allowed to run. That's not really a problem for displaced
2466 stepping, but, we still keep other threads stopped, in case
2467 another thread is also stopped for a breakpoint waiting for
2468 its turn in the displaced stepping queue. */
2469 resume_ptid
= inferior_ptid
;
2472 resume_ptid
= internal_resume_ptid (user_step
);
2474 if (execution_direction
!= EXEC_REVERSE
2475 && step
&& breakpoint_inserted_here_p (aspace
, pc
))
2477 /* There are two cases where we currently need to step a
2478 breakpoint instruction when we have a signal to deliver:
2480 - See handle_signal_stop where we handle random signals that
2481 could take out us out of the stepping range. Normally, in
2482 that case we end up continuing (instead of stepping) over the
2483 signal handler with a breakpoint at PC, but there are cases
2484 where we should _always_ single-step, even if we have a
2485 step-resume breakpoint, like when a software watchpoint is
2486 set. Assuming single-stepping and delivering a signal at the
2487 same time would takes us to the signal handler, then we could
2488 have removed the breakpoint at PC to step over it. However,
2489 some hardware step targets (like e.g., Mac OS) can't step
2490 into signal handlers, and for those, we need to leave the
2491 breakpoint at PC inserted, as otherwise if the handler
2492 recurses and executes PC again, it'll miss the breakpoint.
2493 So we leave the breakpoint inserted anyway, but we need to
2494 record that we tried to step a breakpoint instruction, so
2495 that adjust_pc_after_break doesn't end up confused.
2497 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2498 in one thread after another thread that was stepping had been
2499 momentarily paused for a step-over. When we re-resume the
2500 stepping thread, it may be resumed from that address with a
2501 breakpoint that hasn't trapped yet. Seen with
2502 gdb.threads/non-stop-fair-events.exp, on targets that don't
2503 do displaced stepping. */
2505 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2506 target_pid_to_str (tp
->ptid
).c_str ());
2508 tp
->stepped_breakpoint
= 1;
2510 /* Most targets can step a breakpoint instruction, thus
2511 executing it normally. But if this one cannot, just
2512 continue and we will hit it anyway. */
2513 if (gdbarch_cannot_step_breakpoint (gdbarch
))
2518 && tp
->control
.trap_expected
2519 && use_displaced_stepping (tp
)
2520 && !step_over_info_valid_p ())
2522 struct regcache
*resume_regcache
= get_thread_regcache (tp
);
2523 struct gdbarch
*resume_gdbarch
= resume_regcache
->arch ();
2524 CORE_ADDR actual_pc
= regcache_read_pc (resume_regcache
);
2527 read_memory (actual_pc
, buf
, sizeof (buf
));
2528 displaced_debug_printf ("run %s: %s",
2529 paddress (resume_gdbarch
, actual_pc
),
2530 displaced_step_dump_bytes
2531 (buf
, sizeof (buf
)).c_str ());
2534 if (tp
->control
.may_range_step
)
2536 /* If we're resuming a thread with the PC out of the step
2537 range, then we're doing some nested/finer run control
2538 operation, like stepping the thread out of the dynamic
2539 linker or the displaced stepping scratch pad. We
2540 shouldn't have allowed a range step then. */
2541 gdb_assert (pc_in_thread_step_range (pc
, tp
));
2544 do_target_resume (resume_ptid
, step
, sig
);
2545 tp
->set_resumed (true);
2548 /* Resume the inferior. SIG is the signal to give the inferior
2549 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2550 rolls back state on error. */
2553 resume (gdb_signal sig
)
2559 catch (const gdb_exception
&ex
)
2561 /* If resuming is being aborted for any reason, delete any
2562 single-step breakpoint resume_1 may have created, to avoid
2563 confusing the following resumption, and to avoid leaving
2564 single-step breakpoints perturbing other threads, in case
2565 we're running in non-stop mode. */
2566 if (inferior_ptid
!= null_ptid
)
2567 delete_single_step_breakpoints (inferior_thread ());
2577 /* Counter that tracks number of user visible stops. This can be used
2578 to tell whether a command has proceeded the inferior past the
2579 current location. This allows e.g., inferior function calls in
2580 breakpoint commands to not interrupt the command list. When the
2581 call finishes successfully, the inferior is standing at the same
2582 breakpoint as if nothing happened (and so we don't call
2584 static ULONGEST current_stop_id
;
2591 return current_stop_id
;
2594 /* Called when we report a user visible stop. */
2602 /* Clear out all variables saying what to do when inferior is continued.
2603 First do this, then set the ones you want, then call `proceed'. */
2606 clear_proceed_status_thread (struct thread_info
*tp
)
2608 infrun_debug_printf ("%s", target_pid_to_str (tp
->ptid
).c_str ());
2610 /* If we're starting a new sequence, then the previous finished
2611 single-step is no longer relevant. */
2612 if (tp
->has_pending_waitstatus ())
2614 if (tp
->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP
)
2616 infrun_debug_printf ("pending event of %s was a finished step. "
2618 target_pid_to_str (tp
->ptid
).c_str ());
2620 tp
->clear_pending_waitstatus ();
2621 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
2626 ("thread %s has pending wait status %s (currently_stepping=%d).",
2627 target_pid_to_str (tp
->ptid
).c_str (),
2628 target_waitstatus_to_string (&tp
->pending_waitstatus ()).c_str (),
2629 currently_stepping (tp
));
2633 /* If this signal should not be seen by program, give it zero.
2634 Used for debugging signals. */
2635 if (!signal_pass_state (tp
->stop_signal ()))
2636 tp
->set_stop_signal (GDB_SIGNAL_0
);
2638 delete tp
->thread_fsm
;
2639 tp
->thread_fsm
= NULL
;
2641 tp
->control
.trap_expected
= 0;
2642 tp
->control
.step_range_start
= 0;
2643 tp
->control
.step_range_end
= 0;
2644 tp
->control
.may_range_step
= 0;
2645 tp
->control
.step_frame_id
= null_frame_id
;
2646 tp
->control
.step_stack_frame_id
= null_frame_id
;
2647 tp
->control
.step_over_calls
= STEP_OVER_UNDEBUGGABLE
;
2648 tp
->control
.step_start_function
= NULL
;
2649 tp
->stop_requested
= 0;
2651 tp
->control
.stop_step
= 0;
2653 tp
->control
.proceed_to_finish
= 0;
2655 tp
->control
.stepping_command
= 0;
2657 /* Discard any remaining commands or status from previous stop. */
2658 bpstat_clear (&tp
->control
.stop_bpstat
);
2662 clear_proceed_status (int step
)
2664 /* With scheduler-locking replay, stop replaying other threads if we're
2665 not replaying the user-visible resume ptid.
2667 This is a convenience feature to not require the user to explicitly
2668 stop replaying the other threads. We're assuming that the user's
2669 intent is to resume tracing the recorded process. */
2670 if (!non_stop
&& scheduler_mode
== schedlock_replay
2671 && target_record_is_replaying (minus_one_ptid
)
2672 && !target_record_will_replay (user_visible_resume_ptid (step
),
2673 execution_direction
))
2674 target_record_stop_replaying ();
2676 if (!non_stop
&& inferior_ptid
!= null_ptid
)
2678 ptid_t resume_ptid
= user_visible_resume_ptid (step
);
2679 process_stratum_target
*resume_target
2680 = user_visible_resume_target (resume_ptid
);
2682 /* In all-stop mode, delete the per-thread status of all threads
2683 we're about to resume, implicitly and explicitly. */
2684 for (thread_info
*tp
: all_non_exited_threads (resume_target
, resume_ptid
))
2685 clear_proceed_status_thread (tp
);
2688 if (inferior_ptid
!= null_ptid
)
2690 struct inferior
*inferior
;
2694 /* If in non-stop mode, only delete the per-thread status of
2695 the current thread. */
2696 clear_proceed_status_thread (inferior_thread ());
2699 inferior
= current_inferior ();
2700 inferior
->control
.stop_soon
= NO_STOP_QUIETLY
;
2703 gdb::observers::about_to_proceed
.notify ();
2706 /* Returns true if TP is still stopped at a breakpoint that needs
2707 stepping-over in order to make progress. If the breakpoint is gone
2708 meanwhile, we can skip the whole step-over dance. */
2711 thread_still_needs_step_over_bp (struct thread_info
*tp
)
2713 if (tp
->stepping_over_breakpoint
)
2715 struct regcache
*regcache
= get_thread_regcache (tp
);
2717 if (breakpoint_here_p (regcache
->aspace (),
2718 regcache_read_pc (regcache
))
2719 == ordinary_breakpoint_here
)
2722 tp
->stepping_over_breakpoint
= 0;
2728 /* Check whether thread TP still needs to start a step-over in order
2729 to make progress when resumed. Returns an bitwise or of enum
2730 step_over_what bits, indicating what needs to be stepped over. */
2732 static step_over_what
2733 thread_still_needs_step_over (struct thread_info
*tp
)
2735 step_over_what what
= 0;
2737 if (thread_still_needs_step_over_bp (tp
))
2738 what
|= STEP_OVER_BREAKPOINT
;
2740 if (tp
->stepping_over_watchpoint
2741 && !target_have_steppable_watchpoint ())
2742 what
|= STEP_OVER_WATCHPOINT
;
2747 /* Returns true if scheduler locking applies. STEP indicates whether
2748 we're about to do a step/next-like command to a thread. */
2751 schedlock_applies (struct thread_info
*tp
)
2753 return (scheduler_mode
== schedlock_on
2754 || (scheduler_mode
== schedlock_step
2755 && tp
->control
.stepping_command
)
2756 || (scheduler_mode
== schedlock_replay
2757 && target_record_will_replay (minus_one_ptid
,
2758 execution_direction
)));
2761 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
2762 stacks that have threads executing and don't have threads with
2766 maybe_set_commit_resumed_all_targets ()
2768 scoped_restore_current_thread restore_thread
;
2770 for (inferior
*inf
: all_non_exited_inferiors ())
2772 process_stratum_target
*proc_target
= inf
->process_target ();
2774 if (proc_target
->commit_resumed_state
)
2776 /* We already set this in a previous iteration, via another
2777 inferior sharing the process_stratum target. */
2781 /* If the target has no resumed threads, it would be useless to
2782 ask it to commit the resumed threads. */
2783 if (!proc_target
->threads_executing
)
2785 infrun_debug_printf ("not requesting commit-resumed for target "
2786 "%s, no resumed threads",
2787 proc_target
->shortname ());
2791 /* As an optimization, if a thread from this target has some
2792 status to report, handle it before requiring the target to
2793 commit its resumed threads: handling the status might lead to
2794 resuming more threads. */
2795 if (proc_target
->has_resumed_with_pending_wait_status ())
2797 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
2798 " thread has a pending waitstatus",
2799 proc_target
->shortname ());
2803 switch_to_inferior_no_thread (inf
);
2805 if (target_has_pending_events ())
2807 infrun_debug_printf ("not requesting commit-resumed for target %s, "
2808 "target has pending events",
2809 proc_target
->shortname ());
2813 infrun_debug_printf ("enabling commit-resumed for target %s",
2814 proc_target
->shortname ());
2816 proc_target
->commit_resumed_state
= true;
2823 maybe_call_commit_resumed_all_targets ()
2825 scoped_restore_current_thread restore_thread
;
2827 for (inferior
*inf
: all_non_exited_inferiors ())
2829 process_stratum_target
*proc_target
= inf
->process_target ();
2831 if (!proc_target
->commit_resumed_state
)
2834 switch_to_inferior_no_thread (inf
);
2836 infrun_debug_printf ("calling commit_resumed for target %s",
2837 proc_target
->shortname());
2839 target_commit_resumed ();
2843 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
2844 that only the outermost one attempts to re-enable
2846 static bool enable_commit_resumed
= true;
2850 scoped_disable_commit_resumed::scoped_disable_commit_resumed
2851 (const char *reason
)
2852 : m_reason (reason
),
2853 m_prev_enable_commit_resumed (enable_commit_resumed
)
2855 infrun_debug_printf ("reason=%s", m_reason
);
2857 enable_commit_resumed
= false;
2859 for (inferior
*inf
: all_non_exited_inferiors ())
2861 process_stratum_target
*proc_target
= inf
->process_target ();
2863 if (m_prev_enable_commit_resumed
)
2865 /* This is the outermost instance: force all
2866 COMMIT_RESUMED_STATE to false. */
2867 proc_target
->commit_resumed_state
= false;
2871 /* This is not the outermost instance, we expect
2872 COMMIT_RESUMED_STATE to have been cleared by the
2873 outermost instance. */
2874 gdb_assert (!proc_target
->commit_resumed_state
);
2882 scoped_disable_commit_resumed::reset ()
2888 infrun_debug_printf ("reason=%s", m_reason
);
2890 gdb_assert (!enable_commit_resumed
);
2892 enable_commit_resumed
= m_prev_enable_commit_resumed
;
2894 if (m_prev_enable_commit_resumed
)
2896 /* This is the outermost instance, re-enable
2897 COMMIT_RESUMED_STATE on the targets where it's possible. */
2898 maybe_set_commit_resumed_all_targets ();
2902 /* This is not the outermost instance, we expect
2903 COMMIT_RESUMED_STATE to still be false. */
2904 for (inferior
*inf
: all_non_exited_inferiors ())
2906 process_stratum_target
*proc_target
= inf
->process_target ();
2907 gdb_assert (!proc_target
->commit_resumed_state
);
2914 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
2922 scoped_disable_commit_resumed::reset_and_commit ()
2925 maybe_call_commit_resumed_all_targets ();
2930 scoped_enable_commit_resumed::scoped_enable_commit_resumed
2931 (const char *reason
)
2932 : m_reason (reason
),
2933 m_prev_enable_commit_resumed (enable_commit_resumed
)
2935 infrun_debug_printf ("reason=%s", m_reason
);
2937 if (!enable_commit_resumed
)
2939 enable_commit_resumed
= true;
2941 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
2943 maybe_set_commit_resumed_all_targets ();
2945 maybe_call_commit_resumed_all_targets ();
2951 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
2953 infrun_debug_printf ("reason=%s", m_reason
);
2955 gdb_assert (enable_commit_resumed
);
2957 enable_commit_resumed
= m_prev_enable_commit_resumed
;
2959 if (!enable_commit_resumed
)
2961 /* Force all COMMIT_RESUMED_STATE back to false. */
2962 for (inferior
*inf
: all_non_exited_inferiors ())
2964 process_stratum_target
*proc_target
= inf
->process_target ();
2965 proc_target
->commit_resumed_state
= false;
2970 /* Check that all the targets we're about to resume are in non-stop
2971 mode. Ideally, we'd only care whether all targets support
2972 target-async, but we're not there yet. E.g., stop_all_threads
2973 doesn't know how to handle all-stop targets. Also, the remote
2974 protocol in all-stop mode is synchronous, irrespective of
2975 target-async, which means that things like a breakpoint re-set
2976 triggered by one target would try to read memory from all targets
2980 check_multi_target_resumption (process_stratum_target
*resume_target
)
2982 if (!non_stop
&& resume_target
== nullptr)
2984 scoped_restore_current_thread restore_thread
;
2986 /* This is used to track whether we're resuming more than one
2988 process_stratum_target
*first_connection
= nullptr;
2990 /* The first inferior we see with a target that does not work in
2991 always-non-stop mode. */
2992 inferior
*first_not_non_stop
= nullptr;
2994 for (inferior
*inf
: all_non_exited_inferiors ())
2996 switch_to_inferior_no_thread (inf
);
2998 if (!target_has_execution ())
3001 process_stratum_target
*proc_target
3002 = current_inferior ()->process_target();
3004 if (!target_is_non_stop_p ())
3005 first_not_non_stop
= inf
;
3007 if (first_connection
== nullptr)
3008 first_connection
= proc_target
;
3009 else if (first_connection
!= proc_target
3010 && first_not_non_stop
!= nullptr)
3012 switch_to_inferior_no_thread (first_not_non_stop
);
3014 proc_target
= current_inferior ()->process_target();
3016 error (_("Connection %d (%s) does not support "
3017 "multi-target resumption."),
3018 proc_target
->connection_number
,
3019 make_target_connection_string (proc_target
).c_str ());
3025 /* Basic routine for continuing the program in various fashions.
3027 ADDR is the address to resume at, or -1 for resume where stopped.
3028 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3029 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3031 You should call clear_proceed_status before calling proceed. */
3034 proceed (CORE_ADDR addr
, enum gdb_signal siggnal
)
3036 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
3038 struct regcache
*regcache
;
3039 struct gdbarch
*gdbarch
;
3041 struct execution_control_state ecss
;
3042 struct execution_control_state
*ecs
= &ecss
;
3045 /* If we're stopped at a fork/vfork, follow the branch set by the
3046 "set follow-fork-mode" command; otherwise, we'll just proceed
3047 resuming the current thread. */
3048 if (!follow_fork ())
3050 /* The target for some reason decided not to resume. */
3052 if (target_can_async_p ())
3053 inferior_event_handler (INF_EXEC_COMPLETE
);
3057 /* We'll update this if & when we switch to a new thread. */
3058 previous_inferior_ptid
= inferior_ptid
;
3060 regcache
= get_current_regcache ();
3061 gdbarch
= regcache
->arch ();
3062 const address_space
*aspace
= regcache
->aspace ();
3064 pc
= regcache_read_pc_protected (regcache
);
3066 thread_info
*cur_thr
= inferior_thread ();
3068 /* Fill in with reasonable starting values. */
3069 init_thread_stepping_state (cur_thr
);
3071 gdb_assert (!thread_is_in_step_over_chain (cur_thr
));
3074 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
3075 process_stratum_target
*resume_target
3076 = user_visible_resume_target (resume_ptid
);
3078 check_multi_target_resumption (resume_target
);
3080 if (addr
== (CORE_ADDR
) -1)
3082 if (pc
== cur_thr
->stop_pc ()
3083 && breakpoint_here_p (aspace
, pc
) == ordinary_breakpoint_here
3084 && execution_direction
!= EXEC_REVERSE
)
3085 /* There is a breakpoint at the address we will resume at,
3086 step one instruction before inserting breakpoints so that
3087 we do not stop right away (and report a second hit at this
3090 Note, we don't do this in reverse, because we won't
3091 actually be executing the breakpoint insn anyway.
3092 We'll be (un-)executing the previous instruction. */
3093 cur_thr
->stepping_over_breakpoint
= 1;
3094 else if (gdbarch_single_step_through_delay_p (gdbarch
)
3095 && gdbarch_single_step_through_delay (gdbarch
,
3096 get_current_frame ()))
3097 /* We stepped onto an instruction that needs to be stepped
3098 again before re-inserting the breakpoint, do so. */
3099 cur_thr
->stepping_over_breakpoint
= 1;
3103 regcache_write_pc (regcache
, addr
);
3106 if (siggnal
!= GDB_SIGNAL_DEFAULT
)
3107 cur_thr
->set_stop_signal (siggnal
);
3109 /* If an exception is thrown from this point on, make sure to
3110 propagate GDB's knowledge of the executing state to the
3111 frontend/user running state. */
3112 scoped_finish_thread_state
finish_state (resume_target
, resume_ptid
);
3114 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3115 threads (e.g., we might need to set threads stepping over
3116 breakpoints first), from the user/frontend's point of view, all
3117 threads in RESUME_PTID are now running. Unless we're calling an
3118 inferior function, as in that case we pretend the inferior
3119 doesn't run at all. */
3120 if (!cur_thr
->control
.in_infcall
)
3121 set_running (resume_target
, resume_ptid
, true);
3123 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch
, addr
),
3124 gdb_signal_to_symbol_string (siggnal
));
3126 annotate_starting ();
3128 /* Make sure that output from GDB appears before output from the
3130 gdb_flush (gdb_stdout
);
3132 /* Since we've marked the inferior running, give it the terminal. A
3133 QUIT/Ctrl-C from here on is forwarded to the target (which can
3134 still detect attempts to unblock a stuck connection with repeated
3135 Ctrl-C from within target_pass_ctrlc). */
3136 target_terminal::inferior ();
3138 /* In a multi-threaded task we may select another thread and
3139 then continue or step.
3141 But if a thread that we're resuming had stopped at a breakpoint,
3142 it will immediately cause another breakpoint stop without any
3143 execution (i.e. it will report a breakpoint hit incorrectly). So
3144 we must step over it first.
3146 Look for threads other than the current (TP) that reported a
3147 breakpoint hit and haven't been resumed yet since. */
3149 /* If scheduler locking applies, we can avoid iterating over all
3151 if (!non_stop
&& !schedlock_applies (cur_thr
))
3153 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3156 switch_to_thread_no_regs (tp
);
3158 /* Ignore the current thread here. It's handled
3163 if (!thread_still_needs_step_over (tp
))
3166 gdb_assert (!thread_is_in_step_over_chain (tp
));
3168 infrun_debug_printf ("need to step-over [%s] first",
3169 target_pid_to_str (tp
->ptid
).c_str ());
3171 global_thread_step_over_chain_enqueue (tp
);
3174 switch_to_thread (cur_thr
);
3177 /* Enqueue the current thread last, so that we move all other
3178 threads over their breakpoints first. */
3179 if (cur_thr
->stepping_over_breakpoint
)
3180 global_thread_step_over_chain_enqueue (cur_thr
);
3182 /* If the thread isn't started, we'll still need to set its prev_pc,
3183 so that switch_back_to_stepped_thread knows the thread hasn't
3184 advanced. Must do this before resuming any thread, as in
3185 all-stop/remote, once we resume we can't send any other packet
3186 until the target stops again. */
3187 cur_thr
->prev_pc
= regcache_read_pc_protected (regcache
);
3190 scoped_disable_commit_resumed
disable_commit_resumed ("proceeding");
3192 started
= start_step_over ();
3194 if (step_over_info_valid_p ())
3196 /* Either this thread started a new in-line step over, or some
3197 other thread was already doing one. In either case, don't
3198 resume anything else until the step-over is finished. */
3200 else if (started
&& !target_is_non_stop_p ())
3202 /* A new displaced stepping sequence was started. In all-stop,
3203 we can't talk to the target anymore until it next stops. */
3205 else if (!non_stop
&& target_is_non_stop_p ())
3207 INFRUN_SCOPED_DEBUG_START_END
3208 ("resuming threads, all-stop-on-top-of-non-stop");
3210 /* In all-stop, but the target is always in non-stop mode.
3211 Start all other threads that are implicitly resumed too. */
3212 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3215 switch_to_thread_no_regs (tp
);
3217 if (!tp
->inf
->has_execution ())
3219 infrun_debug_printf ("[%s] target has no execution",
3220 target_pid_to_str (tp
->ptid
).c_str ());
3226 infrun_debug_printf ("[%s] resumed",
3227 target_pid_to_str (tp
->ptid
).c_str ());
3228 gdb_assert (tp
->executing
|| tp
->has_pending_waitstatus ());
3232 if (thread_is_in_step_over_chain (tp
))
3234 infrun_debug_printf ("[%s] needs step-over",
3235 target_pid_to_str (tp
->ptid
).c_str ());
3239 infrun_debug_printf ("resuming %s",
3240 target_pid_to_str (tp
->ptid
).c_str ());
3242 reset_ecs (ecs
, tp
);
3243 switch_to_thread (tp
);
3244 keep_going_pass_signal (ecs
);
3245 if (!ecs
->wait_some_more
)
3246 error (_("Command aborted."));
3249 else if (!cur_thr
->resumed () && !thread_is_in_step_over_chain (cur_thr
))
3251 /* The thread wasn't started, and isn't queued, run it now. */
3252 reset_ecs (ecs
, cur_thr
);
3253 switch_to_thread (cur_thr
);
3254 keep_going_pass_signal (ecs
);
3255 if (!ecs
->wait_some_more
)
3256 error (_("Command aborted."));
3259 disable_commit_resumed
.reset_and_commit ();
3262 finish_state
.release ();
3264 /* If we've switched threads above, switch back to the previously
3265 current thread. We don't want the user to see a different
3267 switch_to_thread (cur_thr
);
3269 /* Tell the event loop to wait for it to stop. If the target
3270 supports asynchronous execution, it'll do this from within
3272 if (!target_can_async_p ())
3273 mark_async_event_handler (infrun_async_inferior_event_token
);
3277 /* Start remote-debugging of a machine over a serial link. */
3280 start_remote (int from_tty
)
3282 inferior
*inf
= current_inferior ();
3283 inf
->control
.stop_soon
= STOP_QUIETLY_REMOTE
;
3285 /* Always go on waiting for the target, regardless of the mode. */
3286 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3287 indicate to wait_for_inferior that a target should timeout if
3288 nothing is returned (instead of just blocking). Because of this,
3289 targets expecting an immediate response need to, internally, set
3290 things up so that the target_wait() is forced to eventually
3292 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3293 differentiate to its caller what the state of the target is after
3294 the initial open has been performed. Here we're assuming that
3295 the target has stopped. It should be possible to eventually have
3296 target_open() return to the caller an indication that the target
3297 is currently running and GDB state should be set to the same as
3298 for an async run. */
3299 wait_for_inferior (inf
);
3301 /* Now that the inferior has stopped, do any bookkeeping like
3302 loading shared libraries. We want to do this before normal_stop,
3303 so that the displayed frame is up to date. */
3304 post_create_inferior (from_tty
);
3309 /* Initialize static vars when a new inferior begins. */
3312 init_wait_for_inferior (void)
3314 /* These are meaningless until the first time through wait_for_inferior. */
3316 breakpoint_init_inferior (inf_starting
);
3318 clear_proceed_status (0);
3320 nullify_last_target_wait_ptid ();
3322 previous_inferior_ptid
= inferior_ptid
;
3327 static void handle_inferior_event (struct execution_control_state
*ecs
);
3329 static void handle_step_into_function (struct gdbarch
*gdbarch
,
3330 struct execution_control_state
*ecs
);
3331 static void handle_step_into_function_backward (struct gdbarch
*gdbarch
,
3332 struct execution_control_state
*ecs
);
3333 static void handle_signal_stop (struct execution_control_state
*ecs
);
3334 static void check_exception_resume (struct execution_control_state
*,
3335 struct frame_info
*);
3337 static void end_stepping_range (struct execution_control_state
*ecs
);
3338 static void stop_waiting (struct execution_control_state
*ecs
);
3339 static void keep_going (struct execution_control_state
*ecs
);
3340 static void process_event_stop_test (struct execution_control_state
*ecs
);
3341 static bool switch_back_to_stepped_thread (struct execution_control_state
*ecs
);
3343 /* This function is attached as a "thread_stop_requested" observer.
3344 Cleanup local state that assumed the PTID was to be resumed, and
3345 report the stop to the frontend. */
3348 infrun_thread_stop_requested (ptid_t ptid
)
3350 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
3352 /* PTID was requested to stop. If the thread was already stopped,
3353 but the user/frontend doesn't know about that yet (e.g., the
3354 thread had been temporarily paused for some step-over), set up
3355 for reporting the stop now. */
3356 for (thread_info
*tp
: all_threads (curr_target
, ptid
))
3358 if (tp
->state
!= THREAD_RUNNING
)
3363 /* Remove matching threads from the step-over queue, so
3364 start_step_over doesn't try to resume them
3366 if (thread_is_in_step_over_chain (tp
))
3367 global_thread_step_over_chain_remove (tp
);
3369 /* If the thread is stopped, but the user/frontend doesn't
3370 know about that yet, queue a pending event, as if the
3371 thread had just stopped now. Unless the thread already had
3373 if (!tp
->has_pending_waitstatus ())
3375 target_waitstatus ws
;
3376 ws
.kind
= TARGET_WAITKIND_STOPPED
;
3377 ws
.value
.sig
= GDB_SIGNAL_0
;
3378 tp
->set_pending_waitstatus (ws
);
3381 /* Clear the inline-frame state, since we're re-processing the
3383 clear_inline_frame_state (tp
);
3385 /* If this thread was paused because some other thread was
3386 doing an inline-step over, let that finish first. Once
3387 that happens, we'll restart all threads and consume pending
3388 stop events then. */
3389 if (step_over_info_valid_p ())
3392 /* Otherwise we can process the (new) pending event now. Set
3393 it so this pending event is considered by
3395 tp
->set_resumed (true);
3400 infrun_thread_thread_exit (struct thread_info
*tp
, int silent
)
3402 if (target_last_proc_target
== tp
->inf
->process_target ()
3403 && target_last_wait_ptid
== tp
->ptid
)
3404 nullify_last_target_wait_ptid ();
3407 /* Delete the step resume, single-step and longjmp/exception resume
3408 breakpoints of TP. */
3411 delete_thread_infrun_breakpoints (struct thread_info
*tp
)
3413 delete_step_resume_breakpoint (tp
);
3414 delete_exception_resume_breakpoint (tp
);
3415 delete_single_step_breakpoints (tp
);
3418 /* If the target still has execution, call FUNC for each thread that
3419 just stopped. In all-stop, that's all the non-exited threads; in
3420 non-stop, that's the current thread, only. */
3422 typedef void (*for_each_just_stopped_thread_callback_func
)
3423 (struct thread_info
*tp
);
3426 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func
)
3428 if (!target_has_execution () || inferior_ptid
== null_ptid
)
3431 if (target_is_non_stop_p ())
3433 /* If in non-stop mode, only the current thread stopped. */
3434 func (inferior_thread ());
3438 /* In all-stop mode, all threads have stopped. */
3439 for (thread_info
*tp
: all_non_exited_threads ())
3444 /* Delete the step resume and longjmp/exception resume breakpoints of
3445 the threads that just stopped. */
3448 delete_just_stopped_threads_infrun_breakpoints (void)
3450 for_each_just_stopped_thread (delete_thread_infrun_breakpoints
);
3453 /* Delete the single-step breakpoints of the threads that just
3457 delete_just_stopped_threads_single_step_breakpoints (void)
3459 for_each_just_stopped_thread (delete_single_step_breakpoints
);
3465 print_target_wait_results (ptid_t waiton_ptid
, ptid_t result_ptid
,
3466 const struct target_waitstatus
*ws
)
3468 infrun_debug_printf ("target_wait (%d.%ld.%ld [%s], status) =",
3472 target_pid_to_str (waiton_ptid
).c_str ());
3473 infrun_debug_printf (" %d.%ld.%ld [%s],",
3477 target_pid_to_str (result_ptid
).c_str ());
3478 infrun_debug_printf (" %s", target_waitstatus_to_string (ws
).c_str ());
3481 /* Select a thread at random, out of those which are resumed and have
3484 static struct thread_info
*
3485 random_pending_event_thread (inferior
*inf
, ptid_t waiton_ptid
)
3487 process_stratum_target
*proc_target
= inf
->process_target ();
3489 = proc_target
->random_resumed_with_pending_wait_status (inf
, waiton_ptid
);
3491 if (thread
== nullptr)
3493 infrun_debug_printf ("None found.");
3497 infrun_debug_printf ("Found %s.", target_pid_to_str (thread
->ptid
).c_str ());
3498 gdb_assert (thread
->resumed ());
3499 gdb_assert (thread
->has_pending_waitstatus ());
3504 /* Wrapper for target_wait that first checks whether threads have
3505 pending statuses to report before actually asking the target for
3506 more events. INF is the inferior we're using to call target_wait
3510 do_target_wait_1 (inferior
*inf
, ptid_t ptid
,
3511 target_waitstatus
*status
, target_wait_flags options
)
3514 struct thread_info
*tp
;
3516 /* We know that we are looking for an event in the target of inferior
3517 INF, but we don't know which thread the event might come from. As
3518 such we want to make sure that INFERIOR_PTID is reset so that none of
3519 the wait code relies on it - doing so is always a mistake. */
3520 switch_to_inferior_no_thread (inf
);
3522 /* First check if there is a resumed thread with a wait status
3524 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
3526 tp
= random_pending_event_thread (inf
, ptid
);
3530 infrun_debug_printf ("Waiting for specific thread %s.",
3531 target_pid_to_str (ptid
).c_str ());
3533 /* We have a specific thread to check. */
3534 tp
= find_thread_ptid (inf
, ptid
);
3535 gdb_assert (tp
!= NULL
);
3536 if (!tp
->has_pending_waitstatus ())
3541 && (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3542 || tp
->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT
))
3544 struct regcache
*regcache
= get_thread_regcache (tp
);
3545 struct gdbarch
*gdbarch
= regcache
->arch ();
3549 pc
= regcache_read_pc (regcache
);
3551 if (pc
!= tp
->stop_pc ())
3553 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3554 target_pid_to_str (tp
->ptid
).c_str (),
3555 paddress (gdbarch
, tp
->stop_pc ()),
3556 paddress (gdbarch
, pc
));
3559 else if (!breakpoint_inserted_here_p (regcache
->aspace (), pc
))
3561 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3562 target_pid_to_str (tp
->ptid
).c_str (),
3563 paddress (gdbarch
, pc
));
3570 infrun_debug_printf ("pending event of %s cancelled.",
3571 target_pid_to_str (tp
->ptid
).c_str ());
3573 tp
->clear_pending_waitstatus ();
3574 target_waitstatus ws
;
3575 ws
.kind
= TARGET_WAITKIND_SPURIOUS
;
3576 tp
->set_pending_waitstatus (ws
);
3577 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3583 infrun_debug_printf ("Using pending wait status %s for %s.",
3584 target_waitstatus_to_string
3585 (&tp
->pending_waitstatus ()).c_str (),
3586 target_pid_to_str (tp
->ptid
).c_str ());
3588 /* Now that we've selected our final event LWP, un-adjust its PC
3589 if it was a software breakpoint (and the target doesn't
3590 always adjust the PC itself). */
3591 if (tp
->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3592 && !target_supports_stopped_by_sw_breakpoint ())
3594 struct regcache
*regcache
;
3595 struct gdbarch
*gdbarch
;
3598 regcache
= get_thread_regcache (tp
);
3599 gdbarch
= regcache
->arch ();
3601 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
3606 pc
= regcache_read_pc (regcache
);
3607 regcache_write_pc (regcache
, pc
+ decr_pc
);
3611 tp
->set_stop_reason (TARGET_STOPPED_BY_NO_REASON
);
3612 *status
= tp
->pending_waitstatus ();
3613 tp
->clear_pending_waitstatus ();
3615 /* Wake up the event loop again, until all pending events are
3617 if (target_is_async_p ())
3618 mark_async_event_handler (infrun_async_inferior_event_token
);
3622 /* But if we don't find one, we'll have to wait. */
3624 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3626 if (!target_can_async_p ())
3627 options
&= ~TARGET_WNOHANG
;
3629 if (deprecated_target_wait_hook
)
3630 event_ptid
= deprecated_target_wait_hook (ptid
, status
, options
);
3632 event_ptid
= target_wait (ptid
, status
, options
);
3637 /* Wrapper for target_wait that first checks whether threads have
3638 pending statuses to report before actually asking the target for
3639 more events. Polls for events from all inferiors/targets. */
3642 do_target_wait (execution_control_state
*ecs
, target_wait_flags options
)
3644 int num_inferiors
= 0;
3645 int random_selector
;
3647 /* For fairness, we pick the first inferior/target to poll at random
3648 out of all inferiors that may report events, and then continue
3649 polling the rest of the inferior list starting from that one in a
3650 circular fashion until the whole list is polled once. */
3652 auto inferior_matches
= [] (inferior
*inf
)
3654 return inf
->process_target () != nullptr;
3657 /* First see how many matching inferiors we have. */
3658 for (inferior
*inf
: all_inferiors ())
3659 if (inferior_matches (inf
))
3662 if (num_inferiors
== 0)
3664 ecs
->ws
.kind
= TARGET_WAITKIND_IGNORE
;
3668 /* Now randomly pick an inferior out of those that matched. */
3669 random_selector
= (int)
3670 ((num_inferiors
* (double) rand ()) / (RAND_MAX
+ 1.0));
3672 if (num_inferiors
> 1)
3673 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3674 num_inferiors
, random_selector
);
3676 /* Select the Nth inferior that matched. */
3678 inferior
*selected
= nullptr;
3680 for (inferior
*inf
: all_inferiors ())
3681 if (inferior_matches (inf
))
3682 if (random_selector
-- == 0)
3688 /* Now poll for events out of each of the matching inferior's
3689 targets, starting from the selected one. */
3691 auto do_wait
= [&] (inferior
*inf
)
3693 ecs
->ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
->ws
, options
);
3694 ecs
->target
= inf
->process_target ();
3695 return (ecs
->ws
.kind
!= TARGET_WAITKIND_IGNORE
);
3698 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3699 here spuriously after the target is all stopped and we've already
3700 reported the stop to the user, polling for events. */
3701 scoped_restore_current_thread restore_thread
;
3703 intrusive_list_iterator
<inferior
> start
3704 = inferior_list
.iterator_to (*selected
);
3706 for (intrusive_list_iterator
<inferior
> it
= start
;
3707 it
!= inferior_list
.end ();
3710 inferior
*inf
= &*it
;
3712 if (inferior_matches (inf
) && do_wait (inf
))
3716 for (intrusive_list_iterator
<inferior
> it
= inferior_list
.begin ();
3720 inferior
*inf
= &*it
;
3722 if (inferior_matches (inf
) && do_wait (inf
))
3726 ecs
->ws
.kind
= TARGET_WAITKIND_IGNORE
;
3730 /* An event reported by wait_one. */
3732 struct wait_one_event
3734 /* The target the event came out of. */
3735 process_stratum_target
*target
;
3737 /* The PTID the event was for. */
3740 /* The waitstatus. */
3741 target_waitstatus ws
;
3744 static bool handle_one (const wait_one_event
&event
);
3745 static void restart_threads (struct thread_info
*event_thread
);
3747 /* Prepare and stabilize the inferior for detaching it. E.g.,
3748 detaching while a thread is displaced stepping is a recipe for
3749 crashing it, as nothing would readjust the PC out of the scratch
3753 prepare_for_detach (void)
3755 struct inferior
*inf
= current_inferior ();
3756 ptid_t pid_ptid
= ptid_t (inf
->pid
);
3757 scoped_restore_current_thread restore_thread
;
3759 scoped_restore restore_detaching
= make_scoped_restore (&inf
->detaching
, true);
3761 /* Remove all threads of INF from the global step-over chain. We
3762 want to stop any ongoing step-over, not start any new one. */
3763 thread_step_over_list_safe_range range
3764 = make_thread_step_over_list_safe_range (global_thread_step_over_list
);
3766 for (thread_info
*tp
: range
)
3769 infrun_debug_printf ("removing thread %s from global step over chain",
3770 target_pid_to_str (tp
->ptid
).c_str ());
3771 global_thread_step_over_chain_remove (tp
);
3774 /* If we were already in the middle of an inline step-over, and the
3775 thread stepping belongs to the inferior we're detaching, we need
3776 to restart the threads of other inferiors. */
3777 if (step_over_info
.thread
!= -1)
3779 infrun_debug_printf ("inline step-over in-process while detaching");
3781 thread_info
*thr
= find_thread_global_id (step_over_info
.thread
);
3782 if (thr
->inf
== inf
)
3784 /* Since we removed threads of INF from the step-over chain,
3785 we know this won't start a step-over for INF. */
3786 clear_step_over_info ();
3788 if (target_is_non_stop_p ())
3790 /* Start a new step-over in another thread if there's
3791 one that needs it. */
3794 /* Restart all other threads (except the
3795 previously-stepping thread, since that one is still
3797 if (!step_over_info_valid_p ())
3798 restart_threads (thr
);
3803 if (displaced_step_in_progress (inf
))
3805 infrun_debug_printf ("displaced-stepping in-process while detaching");
3807 /* Stop threads currently displaced stepping, aborting it. */
3809 for (thread_info
*thr
: inf
->non_exited_threads ())
3811 if (thr
->displaced_step_state
.in_progress ())
3815 if (!thr
->stop_requested
)
3817 target_stop (thr
->ptid
);
3818 thr
->stop_requested
= true;
3822 thr
->set_resumed (false);
3826 while (displaced_step_in_progress (inf
))
3828 wait_one_event event
;
3830 event
.target
= inf
->process_target ();
3831 event
.ptid
= do_target_wait_1 (inf
, pid_ptid
, &event
.ws
, 0);
3834 print_target_wait_results (pid_ptid
, event
.ptid
, &event
.ws
);
3839 /* It's OK to leave some of the threads of INF stopped, since
3840 they'll be detached shortly. */
3844 /* Wait for control to return from inferior to debugger.
3846 If inferior gets a signal, we may decide to start it up again
3847 instead of returning. That is why there is a loop in this function.
3848 When this function actually returns it means the inferior
3849 should be left stopped and GDB should read more commands. */
3852 wait_for_inferior (inferior
*inf
)
3854 infrun_debug_printf ("wait_for_inferior ()");
3856 SCOPE_EXIT
{ delete_just_stopped_threads_infrun_breakpoints (); };
3858 /* If an error happens while handling the event, propagate GDB's
3859 knowledge of the executing state to the frontend/user running
3861 scoped_finish_thread_state finish_state
3862 (inf
->process_target (), minus_one_ptid
);
3866 struct execution_control_state ecss
;
3867 struct execution_control_state
*ecs
= &ecss
;
3869 memset (ecs
, 0, sizeof (*ecs
));
3871 overlay_cache_invalid
= 1;
3873 /* Flush target cache before starting to handle each event.
3874 Target was running and cache could be stale. This is just a
3875 heuristic. Running threads may modify target memory, but we
3876 don't get any event. */
3877 target_dcache_invalidate ();
3879 ecs
->ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
->ws
, 0);
3880 ecs
->target
= inf
->process_target ();
3883 print_target_wait_results (minus_one_ptid
, ecs
->ptid
, &ecs
->ws
);
3885 /* Now figure out what to do with the result of the result. */
3886 handle_inferior_event (ecs
);
3888 if (!ecs
->wait_some_more
)
3892 /* No error, don't finish the state yet. */
3893 finish_state
.release ();
3896 /* Cleanup that reinstalls the readline callback handler, if the
3897 target is running in the background. If while handling the target
3898 event something triggered a secondary prompt, like e.g., a
3899 pagination prompt, we'll have removed the callback handler (see
3900 gdb_readline_wrapper_line). Need to do this as we go back to the
3901 event loop, ready to process further input. Note this has no
3902 effect if the handler hasn't actually been removed, because calling
3903 rl_callback_handler_install resets the line buffer, thus losing
3907 reinstall_readline_callback_handler_cleanup ()
3909 struct ui
*ui
= current_ui
;
3913 /* We're not going back to the top level event loop yet. Don't
3914 install the readline callback, as it'd prep the terminal,
3915 readline-style (raw, noecho) (e.g., --batch). We'll install
3916 it the next time the prompt is displayed, when we're ready
3921 if (ui
->command_editing
&& ui
->prompt_state
!= PROMPT_BLOCKED
)
3922 gdb_rl_callback_handler_reinstall ();
3925 /* Clean up the FSMs of threads that are now stopped. In non-stop,
3926 that's just the event thread. In all-stop, that's all threads. */
3929 clean_up_just_stopped_threads_fsms (struct execution_control_state
*ecs
)
3931 if (ecs
->event_thread
!= NULL
3932 && ecs
->event_thread
->thread_fsm
!= NULL
)
3933 ecs
->event_thread
->thread_fsm
->clean_up (ecs
->event_thread
);
3937 for (thread_info
*thr
: all_non_exited_threads ())
3939 if (thr
->thread_fsm
== NULL
)
3941 if (thr
== ecs
->event_thread
)
3944 switch_to_thread (thr
);
3945 thr
->thread_fsm
->clean_up (thr
);
3948 if (ecs
->event_thread
!= NULL
)
3949 switch_to_thread (ecs
->event_thread
);
3953 /* Helper for all_uis_check_sync_execution_done that works on the
3957 check_curr_ui_sync_execution_done (void)
3959 struct ui
*ui
= current_ui
;
3961 if (ui
->prompt_state
== PROMPT_NEEDED
3963 && !gdb_in_secondary_prompt_p (ui
))
3965 target_terminal::ours ();
3966 gdb::observers::sync_execution_done
.notify ();
3967 ui_register_input_event_handler (ui
);
3974 all_uis_check_sync_execution_done (void)
3976 SWITCH_THRU_ALL_UIS ()
3978 check_curr_ui_sync_execution_done ();
3985 all_uis_on_sync_execution_starting (void)
3987 SWITCH_THRU_ALL_UIS ()
3989 if (current_ui
->prompt_state
== PROMPT_NEEDED
)
3990 async_disable_stdin ();
3994 /* Asynchronous version of wait_for_inferior. It is called by the
3995 event loop whenever a change of state is detected on the file
3996 descriptor corresponding to the target. It can be called more than
3997 once to complete a single execution command. In such cases we need
3998 to keep the state in a global variable ECSS. If it is the last time
3999 that this function is called for a single execution command, then
4000 report to the user that the inferior has stopped, and do the
4001 necessary cleanups. */
4004 fetch_inferior_event ()
4006 INFRUN_SCOPED_DEBUG_ENTER_EXIT
;
4008 struct execution_control_state ecss
;
4009 struct execution_control_state
*ecs
= &ecss
;
4012 memset (ecs
, 0, sizeof (*ecs
));
4014 /* Events are always processed with the main UI as current UI. This
4015 way, warnings, debug output, etc. are always consistently sent to
4016 the main console. */
4017 scoped_restore save_ui
= make_scoped_restore (¤t_ui
, main_ui
);
4019 /* Temporarily disable pagination. Otherwise, the user would be
4020 given an option to press 'q' to quit, which would cause an early
4021 exit and could leave GDB in a half-baked state. */
4022 scoped_restore save_pagination
4023 = make_scoped_restore (&pagination_enabled
, false);
4025 /* End up with readline processing input, if necessary. */
4027 SCOPE_EXIT
{ reinstall_readline_callback_handler_cleanup (); };
4029 /* We're handling a live event, so make sure we're doing live
4030 debugging. If we're looking at traceframes while the target is
4031 running, we're going to need to get back to that mode after
4032 handling the event. */
4033 gdb::optional
<scoped_restore_current_traceframe
> maybe_restore_traceframe
;
4036 maybe_restore_traceframe
.emplace ();
4037 set_current_traceframe (-1);
4040 /* The user/frontend should not notice a thread switch due to
4041 internal events. Make sure we revert to the user selected
4042 thread and frame after handling the event and running any
4043 breakpoint commands. */
4044 scoped_restore_current_thread restore_thread
;
4046 overlay_cache_invalid
= 1;
4047 /* Flush target cache before starting to handle each event. Target
4048 was running and cache could be stale. This is just a heuristic.
4049 Running threads may modify target memory, but we don't get any
4051 target_dcache_invalidate ();
4053 scoped_restore save_exec_dir
4054 = make_scoped_restore (&execution_direction
,
4055 target_execution_direction ());
4057 /* Allow targets to pause their resumed threads while we handle
4059 scoped_disable_commit_resumed
disable_commit_resumed ("handling event");
4061 if (!do_target_wait (ecs
, TARGET_WNOHANG
))
4063 infrun_debug_printf ("do_target_wait returned no event");
4064 disable_commit_resumed
.reset_and_commit ();
4068 gdb_assert (ecs
->ws
.kind
!= TARGET_WAITKIND_IGNORE
);
4070 /* Switch to the target that generated the event, so we can do
4072 switch_to_target_no_thread (ecs
->target
);
4075 print_target_wait_results (minus_one_ptid
, ecs
->ptid
, &ecs
->ws
);
4077 /* If an error happens while handling the event, propagate GDB's
4078 knowledge of the executing state to the frontend/user running
4080 ptid_t finish_ptid
= !target_is_non_stop_p () ? minus_one_ptid
: ecs
->ptid
;
4081 scoped_finish_thread_state
finish_state (ecs
->target
, finish_ptid
);
4083 /* Get executed before scoped_restore_current_thread above to apply
4084 still for the thread which has thrown the exception. */
4085 auto defer_bpstat_clear
4086 = make_scope_exit (bpstat_clear_actions
);
4087 auto defer_delete_threads
4088 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints
);
4090 /* Now figure out what to do with the result of the result. */
4091 handle_inferior_event (ecs
);
4093 if (!ecs
->wait_some_more
)
4095 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
4096 bool should_stop
= true;
4097 struct thread_info
*thr
= ecs
->event_thread
;
4099 delete_just_stopped_threads_infrun_breakpoints ();
4103 struct thread_fsm
*thread_fsm
= thr
->thread_fsm
;
4105 if (thread_fsm
!= NULL
)
4106 should_stop
= thread_fsm
->should_stop (thr
);
4115 bool should_notify_stop
= true;
4118 clean_up_just_stopped_threads_fsms (ecs
);
4120 if (thr
!= NULL
&& thr
->thread_fsm
!= NULL
)
4121 should_notify_stop
= thr
->thread_fsm
->should_notify_stop ();
4123 if (should_notify_stop
)
4125 /* We may not find an inferior if this was a process exit. */
4126 if (inf
== NULL
|| inf
->control
.stop_soon
== NO_STOP_QUIETLY
)
4127 proceeded
= normal_stop ();
4132 inferior_event_handler (INF_EXEC_COMPLETE
);
4136 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4137 previously selected thread is gone. We have two
4138 choices - switch to no thread selected, or restore the
4139 previously selected thread (now exited). We chose the
4140 later, just because that's what GDB used to do. After
4141 this, "info threads" says "The current thread <Thread
4142 ID 2> has terminated." instead of "No thread
4146 && ecs
->ws
.kind
!= TARGET_WAITKIND_NO_RESUMED
)
4147 restore_thread
.dont_restore ();
4151 defer_delete_threads
.release ();
4152 defer_bpstat_clear
.release ();
4154 /* No error, don't finish the thread states yet. */
4155 finish_state
.release ();
4157 disable_commit_resumed
.reset_and_commit ();
4159 /* This scope is used to ensure that readline callbacks are
4160 reinstalled here. */
4163 /* If a UI was in sync execution mode, and now isn't, restore its
4164 prompt (a synchronous execution command has finished, and we're
4165 ready for input). */
4166 all_uis_check_sync_execution_done ();
4169 && exec_done_display_p
4170 && (inferior_ptid
== null_ptid
4171 || inferior_thread ()->state
!= THREAD_RUNNING
))
4172 printf_unfiltered (_("completed.\n"));
4178 set_step_info (thread_info
*tp
, struct frame_info
*frame
,
4179 struct symtab_and_line sal
)
4181 /* This can be removed once this function no longer implicitly relies on the
4182 inferior_ptid value. */
4183 gdb_assert (inferior_ptid
== tp
->ptid
);
4185 tp
->control
.step_frame_id
= get_frame_id (frame
);
4186 tp
->control
.step_stack_frame_id
= get_stack_frame_id (frame
);
4188 tp
->current_symtab
= sal
.symtab
;
4189 tp
->current_line
= sal
.line
;
4192 /* Clear context switchable stepping state. */
4195 init_thread_stepping_state (struct thread_info
*tss
)
4197 tss
->stepped_breakpoint
= 0;
4198 tss
->stepping_over_breakpoint
= 0;
4199 tss
->stepping_over_watchpoint
= 0;
4200 tss
->step_after_step_resume_breakpoint
= 0;
4206 set_last_target_status (process_stratum_target
*target
, ptid_t ptid
,
4207 target_waitstatus status
)
4209 target_last_proc_target
= target
;
4210 target_last_wait_ptid
= ptid
;
4211 target_last_waitstatus
= status
;
4217 get_last_target_status (process_stratum_target
**target
, ptid_t
*ptid
,
4218 target_waitstatus
*status
)
4220 if (target
!= nullptr)
4221 *target
= target_last_proc_target
;
4222 if (ptid
!= nullptr)
4223 *ptid
= target_last_wait_ptid
;
4224 if (status
!= nullptr)
4225 *status
= target_last_waitstatus
;
4231 nullify_last_target_wait_ptid (void)
4233 target_last_proc_target
= nullptr;
4234 target_last_wait_ptid
= minus_one_ptid
;
4235 target_last_waitstatus
= {};
4238 /* Switch thread contexts. */
4241 context_switch (execution_control_state
*ecs
)
4243 if (ecs
->ptid
!= inferior_ptid
4244 && (inferior_ptid
== null_ptid
4245 || ecs
->event_thread
!= inferior_thread ()))
4247 infrun_debug_printf ("Switching context from %s to %s",
4248 target_pid_to_str (inferior_ptid
).c_str (),
4249 target_pid_to_str (ecs
->ptid
).c_str ());
4252 switch_to_thread (ecs
->event_thread
);
4255 /* If the target can't tell whether we've hit breakpoints
4256 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4257 check whether that could have been caused by a breakpoint. If so,
4258 adjust the PC, per gdbarch_decr_pc_after_break. */
4261 adjust_pc_after_break (struct thread_info
*thread
,
4262 const target_waitstatus
*ws
)
4264 struct regcache
*regcache
;
4265 struct gdbarch
*gdbarch
;
4266 CORE_ADDR breakpoint_pc
, decr_pc
;
4268 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4269 we aren't, just return.
4271 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4272 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4273 implemented by software breakpoints should be handled through the normal
4276 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4277 different signals (SIGILL or SIGEMT for instance), but it is less
4278 clear where the PC is pointing afterwards. It may not match
4279 gdbarch_decr_pc_after_break. I don't know any specific target that
4280 generates these signals at breakpoints (the code has been in GDB since at
4281 least 1992) so I can not guess how to handle them here.
4283 In earlier versions of GDB, a target with
4284 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4285 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4286 target with both of these set in GDB history, and it seems unlikely to be
4287 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4289 if (ws
->kind
!= TARGET_WAITKIND_STOPPED
)
4292 if (ws
->value
.sig
!= GDB_SIGNAL_TRAP
)
4295 /* In reverse execution, when a breakpoint is hit, the instruction
4296 under it has already been de-executed. The reported PC always
4297 points at the breakpoint address, so adjusting it further would
4298 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4301 B1 0x08000000 : INSN1
4302 B2 0x08000001 : INSN2
4304 PC -> 0x08000003 : INSN4
4306 Say you're stopped at 0x08000003 as above. Reverse continuing
4307 from that point should hit B2 as below. Reading the PC when the
4308 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4309 been de-executed already.
4311 B1 0x08000000 : INSN1
4312 B2 PC -> 0x08000001 : INSN2
4316 We can't apply the same logic as for forward execution, because
4317 we would wrongly adjust the PC to 0x08000000, since there's a
4318 breakpoint at PC - 1. We'd then report a hit on B1, although
4319 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4321 if (execution_direction
== EXEC_REVERSE
)
4324 /* If the target can tell whether the thread hit a SW breakpoint,
4325 trust it. Targets that can tell also adjust the PC
4327 if (target_supports_stopped_by_sw_breakpoint ())
4330 /* Note that relying on whether a breakpoint is planted in memory to
4331 determine this can fail. E.g,. the breakpoint could have been
4332 removed since. Or the thread could have been told to step an
4333 instruction the size of a breakpoint instruction, and only
4334 _after_ was a breakpoint inserted at its address. */
4336 /* If this target does not decrement the PC after breakpoints, then
4337 we have nothing to do. */
4338 regcache
= get_thread_regcache (thread
);
4339 gdbarch
= regcache
->arch ();
4341 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4345 const address_space
*aspace
= regcache
->aspace ();
4347 /* Find the location where (if we've hit a breakpoint) the
4348 breakpoint would be. */
4349 breakpoint_pc
= regcache_read_pc (regcache
) - decr_pc
;
4351 /* If the target can't tell whether a software breakpoint triggered,
4352 fallback to figuring it out based on breakpoints we think were
4353 inserted in the target, and on whether the thread was stepped or
4356 /* Check whether there actually is a software breakpoint inserted at
4359 If in non-stop mode, a race condition is possible where we've
4360 removed a breakpoint, but stop events for that breakpoint were
4361 already queued and arrive later. To suppress those spurious
4362 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4363 and retire them after a number of stop events are reported. Note
4364 this is an heuristic and can thus get confused. The real fix is
4365 to get the "stopped by SW BP and needs adjustment" info out of
4366 the target/kernel (and thus never reach here; see above). */
4367 if (software_breakpoint_inserted_here_p (aspace
, breakpoint_pc
)
4368 || (target_is_non_stop_p ()
4369 && moribund_breakpoint_here_p (aspace
, breakpoint_pc
)))
4371 gdb::optional
<scoped_restore_tmpl
<int>> restore_operation_disable
;
4373 if (record_full_is_used ())
4374 restore_operation_disable
.emplace
4375 (record_full_gdb_operation_disable_set ());
4377 /* When using hardware single-step, a SIGTRAP is reported for both
4378 a completed single-step and a software breakpoint. Need to
4379 differentiate between the two, as the latter needs adjusting
4380 but the former does not.
4382 The SIGTRAP can be due to a completed hardware single-step only if
4383 - we didn't insert software single-step breakpoints
4384 - this thread is currently being stepped
4386 If any of these events did not occur, we must have stopped due
4387 to hitting a software breakpoint, and have to back up to the
4390 As a special case, we could have hardware single-stepped a
4391 software breakpoint. In this case (prev_pc == breakpoint_pc),
4392 we also need to back up to the breakpoint address. */
4394 if (thread_has_single_step_breakpoints_set (thread
)
4395 || !currently_stepping (thread
)
4396 || (thread
->stepped_breakpoint
4397 && thread
->prev_pc
== breakpoint_pc
))
4398 regcache_write_pc (regcache
, breakpoint_pc
);
4403 stepped_in_from (struct frame_info
*frame
, struct frame_id step_frame_id
)
4405 for (frame
= get_prev_frame (frame
);
4407 frame
= get_prev_frame (frame
))
4409 if (frame_id_eq (get_frame_id (frame
), step_frame_id
))
4412 if (get_frame_type (frame
) != INLINE_FRAME
)
4419 /* Look for an inline frame that is marked for skip.
4420 If PREV_FRAME is TRUE start at the previous frame,
4421 otherwise start at the current frame. Stop at the
4422 first non-inline frame, or at the frame where the
4426 inline_frame_is_marked_for_skip (bool prev_frame
, struct thread_info
*tp
)
4428 struct frame_info
*frame
= get_current_frame ();
4431 frame
= get_prev_frame (frame
);
4433 for (; frame
!= NULL
; frame
= get_prev_frame (frame
))
4435 const char *fn
= NULL
;
4436 symtab_and_line sal
;
4439 if (frame_id_eq (get_frame_id (frame
), tp
->control
.step_frame_id
))
4441 if (get_frame_type (frame
) != INLINE_FRAME
)
4444 sal
= find_frame_sal (frame
);
4445 sym
= get_frame_function (frame
);
4448 fn
= sym
->print_name ();
4451 && function_name_is_marked_for_skip (fn
, sal
))
4458 /* If the event thread has the stop requested flag set, pretend it
4459 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4463 handle_stop_requested (struct execution_control_state
*ecs
)
4465 if (ecs
->event_thread
->stop_requested
)
4467 ecs
->ws
.kind
= TARGET_WAITKIND_STOPPED
;
4468 ecs
->ws
.value
.sig
= GDB_SIGNAL_0
;
4469 handle_signal_stop (ecs
);
4475 /* Auxiliary function that handles syscall entry/return events.
4476 It returns true if the inferior should keep going (and GDB
4477 should ignore the event), or false if the event deserves to be
4481 handle_syscall_event (struct execution_control_state
*ecs
)
4483 struct regcache
*regcache
;
4486 context_switch (ecs
);
4488 regcache
= get_thread_regcache (ecs
->event_thread
);
4489 syscall_number
= ecs
->ws
.value
.syscall_number
;
4490 ecs
->event_thread
->set_stop_pc (regcache_read_pc (regcache
));
4492 if (catch_syscall_enabled () > 0
4493 && catching_syscall_number (syscall_number
) > 0)
4495 infrun_debug_printf ("syscall number=%d", syscall_number
);
4497 ecs
->event_thread
->control
.stop_bpstat
4498 = bpstat_stop_status (regcache
->aspace (),
4499 ecs
->event_thread
->stop_pc (),
4500 ecs
->event_thread
, &ecs
->ws
);
4502 if (handle_stop_requested (ecs
))
4505 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
4507 /* Catchpoint hit. */
4512 if (handle_stop_requested (ecs
))
4515 /* If no catchpoint triggered for this, then keep going. */
4521 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4524 fill_in_stop_func (struct gdbarch
*gdbarch
,
4525 struct execution_control_state
*ecs
)
4527 if (!ecs
->stop_func_filled_in
)
4530 const general_symbol_info
*gsi
;
4532 /* Don't care about return value; stop_func_start and stop_func_name
4533 will both be 0 if it doesn't work. */
4534 find_pc_partial_function_sym (ecs
->event_thread
->stop_pc (),
4536 &ecs
->stop_func_start
,
4537 &ecs
->stop_func_end
,
4539 ecs
->stop_func_name
= gsi
== nullptr ? nullptr : gsi
->print_name ();
4541 /* The call to find_pc_partial_function, above, will set
4542 stop_func_start and stop_func_end to the start and end
4543 of the range containing the stop pc. If this range
4544 contains the entry pc for the block (which is always the
4545 case for contiguous blocks), advance stop_func_start past
4546 the function's start offset and entrypoint. Note that
4547 stop_func_start is NOT advanced when in a range of a
4548 non-contiguous block that does not contain the entry pc. */
4549 if (block
!= nullptr
4550 && ecs
->stop_func_start
<= BLOCK_ENTRY_PC (block
)
4551 && BLOCK_ENTRY_PC (block
) < ecs
->stop_func_end
)
4553 ecs
->stop_func_start
4554 += gdbarch_deprecated_function_start_offset (gdbarch
);
4556 if (gdbarch_skip_entrypoint_p (gdbarch
))
4557 ecs
->stop_func_start
4558 = gdbarch_skip_entrypoint (gdbarch
, ecs
->stop_func_start
);
4561 ecs
->stop_func_filled_in
= 1;
4566 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4568 static enum stop_kind
4569 get_inferior_stop_soon (execution_control_state
*ecs
)
4571 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
4573 gdb_assert (inf
!= NULL
);
4574 return inf
->control
.stop_soon
;
4577 /* Poll for one event out of the current target. Store the resulting
4578 waitstatus in WS, and return the event ptid. Does not block. */
4581 poll_one_curr_target (struct target_waitstatus
*ws
)
4585 overlay_cache_invalid
= 1;
4587 /* Flush target cache before starting to handle each event.
4588 Target was running and cache could be stale. This is just a
4589 heuristic. Running threads may modify target memory, but we
4590 don't get any event. */
4591 target_dcache_invalidate ();
4593 if (deprecated_target_wait_hook
)
4594 event_ptid
= deprecated_target_wait_hook (minus_one_ptid
, ws
, TARGET_WNOHANG
);
4596 event_ptid
= target_wait (minus_one_ptid
, ws
, TARGET_WNOHANG
);
4599 print_target_wait_results (minus_one_ptid
, event_ptid
, ws
);
4604 /* Wait for one event out of any target. */
4606 static wait_one_event
4611 for (inferior
*inf
: all_inferiors ())
4613 process_stratum_target
*target
= inf
->process_target ();
4615 || !target
->is_async_p ()
4616 || !target
->threads_executing
)
4619 switch_to_inferior_no_thread (inf
);
4621 wait_one_event event
;
4622 event
.target
= target
;
4623 event
.ptid
= poll_one_curr_target (&event
.ws
);
4625 if (event
.ws
.kind
== TARGET_WAITKIND_NO_RESUMED
)
4627 /* If nothing is resumed, remove the target from the
4631 else if (event
.ws
.kind
!= TARGET_WAITKIND_IGNORE
)
4635 /* Block waiting for some event. */
4642 for (inferior
*inf
: all_inferiors ())
4644 process_stratum_target
*target
= inf
->process_target ();
4646 || !target
->is_async_p ()
4647 || !target
->threads_executing
)
4650 int fd
= target
->async_wait_fd ();
4651 FD_SET (fd
, &readfds
);
4658 /* No waitable targets left. All must be stopped. */
4659 return {NULL
, minus_one_ptid
, {TARGET_WAITKIND_NO_RESUMED
}};
4664 int numfds
= interruptible_select (nfds
, &readfds
, 0, NULL
, 0);
4670 perror_with_name ("interruptible_select");
4675 /* Save the thread's event and stop reason to process it later. */
4678 save_waitstatus (struct thread_info
*tp
, const target_waitstatus
*ws
)
4680 infrun_debug_printf ("saving status %s for %d.%ld.%ld",
4681 target_waitstatus_to_string (ws
).c_str (),
4686 /* Record for later. */
4687 tp
->set_pending_waitstatus (*ws
);
4689 if (ws
->kind
== TARGET_WAITKIND_STOPPED
4690 && ws
->value
.sig
== GDB_SIGNAL_TRAP
)
4692 struct regcache
*regcache
= get_thread_regcache (tp
);
4693 const address_space
*aspace
= regcache
->aspace ();
4694 CORE_ADDR pc
= regcache_read_pc (regcache
);
4696 adjust_pc_after_break (tp
, &tp
->pending_waitstatus ());
4698 scoped_restore_current_thread restore_thread
;
4699 switch_to_thread (tp
);
4701 if (target_stopped_by_watchpoint ())
4702 tp
->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT
);
4703 else if (target_supports_stopped_by_sw_breakpoint ()
4704 && target_stopped_by_sw_breakpoint ())
4705 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
4706 else if (target_supports_stopped_by_hw_breakpoint ()
4707 && target_stopped_by_hw_breakpoint ())
4708 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
4709 else if (!target_supports_stopped_by_hw_breakpoint ()
4710 && hardware_breakpoint_inserted_here_p (aspace
, pc
))
4711 tp
->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT
);
4712 else if (!target_supports_stopped_by_sw_breakpoint ()
4713 && software_breakpoint_inserted_here_p (aspace
, pc
))
4714 tp
->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT
);
4715 else if (!thread_has_single_step_breakpoints_set (tp
)
4716 && currently_stepping (tp
))
4717 tp
->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP
);
4721 /* Mark the non-executing threads accordingly. In all-stop, all
4722 threads of all processes are stopped when we get any event
4723 reported. In non-stop mode, only the event thread stops. */
4726 mark_non_executing_threads (process_stratum_target
*target
,
4728 struct target_waitstatus ws
)
4732 if (!target_is_non_stop_p ())
4733 mark_ptid
= minus_one_ptid
;
4734 else if (ws
.kind
== TARGET_WAITKIND_SIGNALLED
4735 || ws
.kind
== TARGET_WAITKIND_EXITED
)
4737 /* If we're handling a process exit in non-stop mode, even
4738 though threads haven't been deleted yet, one would think
4739 that there is nothing to do, as threads of the dead process
4740 will be soon deleted, and threads of any other process were
4741 left running. However, on some targets, threads survive a
4742 process exit event. E.g., for the "checkpoint" command,
4743 when the current checkpoint/fork exits, linux-fork.c
4744 automatically switches to another fork from within
4745 target_mourn_inferior, by associating the same
4746 inferior/thread to another fork. We haven't mourned yet at
4747 this point, but we must mark any threads left in the
4748 process as not-executing so that finish_thread_state marks
4749 them stopped (in the user's perspective) if/when we present
4750 the stop to the user. */
4751 mark_ptid
= ptid_t (event_ptid
.pid ());
4754 mark_ptid
= event_ptid
;
4756 set_executing (target
, mark_ptid
, false);
4758 /* Likewise the resumed flag. */
4759 set_resumed (target
, mark_ptid
, false);
4762 /* Handle one event after stopping threads. If the eventing thread
4763 reports back any interesting event, we leave it pending. If the
4764 eventing thread was in the middle of a displaced step, we
4765 cancel/finish it, and unless the thread's inferior is being
4766 detached, put the thread back in the step-over chain. Returns true
4767 if there are no resumed threads left in the target (thus there's no
4768 point in waiting further), false otherwise. */
4771 handle_one (const wait_one_event
&event
)
4774 ("%s %s", target_waitstatus_to_string (&event
.ws
).c_str (),
4775 target_pid_to_str (event
.ptid
).c_str ());
4777 if (event
.ws
.kind
== TARGET_WAITKIND_NO_RESUMED
)
4779 /* All resumed threads exited. */
4782 else if (event
.ws
.kind
== TARGET_WAITKIND_THREAD_EXITED
4783 || event
.ws
.kind
== TARGET_WAITKIND_EXITED
4784 || event
.ws
.kind
== TARGET_WAITKIND_SIGNALLED
)
4786 /* One thread/process exited/signalled. */
4788 thread_info
*t
= nullptr;
4790 /* The target may have reported just a pid. If so, try
4791 the first non-exited thread. */
4792 if (event
.ptid
.is_pid ())
4794 int pid
= event
.ptid
.pid ();
4795 inferior
*inf
= find_inferior_pid (event
.target
, pid
);
4796 for (thread_info
*tp
: inf
->non_exited_threads ())
4802 /* If there is no available thread, the event would
4803 have to be appended to a per-inferior event list,
4804 which does not exist (and if it did, we'd have
4805 to adjust run control command to be able to
4806 resume such an inferior). We assert here instead
4807 of going into an infinite loop. */
4808 gdb_assert (t
!= nullptr);
4811 ("using %s", target_pid_to_str (t
->ptid
).c_str ());
4815 t
= find_thread_ptid (event
.target
, event
.ptid
);
4816 /* Check if this is the first time we see this thread.
4817 Don't bother adding if it individually exited. */
4819 && event
.ws
.kind
!= TARGET_WAITKIND_THREAD_EXITED
)
4820 t
= add_thread (event
.target
, event
.ptid
);
4825 /* Set the threads as non-executing to avoid
4826 another stop attempt on them. */
4827 switch_to_thread_no_regs (t
);
4828 mark_non_executing_threads (event
.target
, event
.ptid
,
4830 save_waitstatus (t
, &event
.ws
);
4831 t
->stop_requested
= false;
4836 thread_info
*t
= find_thread_ptid (event
.target
, event
.ptid
);
4838 t
= add_thread (event
.target
, event
.ptid
);
4840 t
->stop_requested
= 0;
4842 t
->set_resumed (false);
4843 t
->control
.may_range_step
= 0;
4845 /* This may be the first time we see the inferior report
4847 inferior
*inf
= find_inferior_ptid (event
.target
, event
.ptid
);
4848 if (inf
->needs_setup
)
4850 switch_to_thread_no_regs (t
);
4854 if (event
.ws
.kind
== TARGET_WAITKIND_STOPPED
4855 && event
.ws
.value
.sig
== GDB_SIGNAL_0
)
4857 /* We caught the event that we intended to catch, so
4858 there's no event to save as pending. */
4860 if (displaced_step_finish (t
, GDB_SIGNAL_0
)
4861 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
4863 /* Add it back to the step-over queue. */
4865 ("displaced-step of %s canceled",
4866 target_pid_to_str (t
->ptid
).c_str ());
4868 t
->control
.trap_expected
= 0;
4869 if (!t
->inf
->detaching
)
4870 global_thread_step_over_chain_enqueue (t
);
4875 enum gdb_signal sig
;
4876 struct regcache
*regcache
;
4879 ("target_wait %s, saving status for %d.%ld.%ld",
4880 target_waitstatus_to_string (&event
.ws
).c_str (),
4881 t
->ptid
.pid (), t
->ptid
.lwp (), t
->ptid
.tid ());
4883 /* Record for later. */
4884 save_waitstatus (t
, &event
.ws
);
4886 sig
= (event
.ws
.kind
== TARGET_WAITKIND_STOPPED
4887 ? event
.ws
.value
.sig
: GDB_SIGNAL_0
);
4889 if (displaced_step_finish (t
, sig
)
4890 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED
)
4892 /* Add it back to the step-over queue. */
4893 t
->control
.trap_expected
= 0;
4894 if (!t
->inf
->detaching
)
4895 global_thread_step_over_chain_enqueue (t
);
4898 regcache
= get_thread_regcache (t
);
4899 t
->set_stop_pc (regcache_read_pc (regcache
));
4901 infrun_debug_printf ("saved stop_pc=%s for %s "
4902 "(currently_stepping=%d)",
4903 paddress (target_gdbarch (), t
->stop_pc ()),
4904 target_pid_to_str (t
->ptid
).c_str (),
4905 currently_stepping (t
));
4915 stop_all_threads (void)
4917 /* We may need multiple passes to discover all threads. */
4921 gdb_assert (exists_non_stop_target ());
4923 infrun_debug_printf ("starting");
4925 scoped_restore_current_thread restore_thread
;
4927 /* Enable thread events of all targets. */
4928 for (auto *target
: all_non_exited_process_targets ())
4930 switch_to_target_no_thread (target
);
4931 target_thread_events (true);
4936 /* Disable thread events of all targets. */
4937 for (auto *target
: all_non_exited_process_targets ())
4939 switch_to_target_no_thread (target
);
4940 target_thread_events (false);
4943 /* Use debug_prefixed_printf directly to get a meaningful function
4946 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
4949 /* Request threads to stop, and then wait for the stops. Because
4950 threads we already know about can spawn more threads while we're
4951 trying to stop them, and we only learn about new threads when we
4952 update the thread list, do this in a loop, and keep iterating
4953 until two passes find no threads that need to be stopped. */
4954 for (pass
= 0; pass
< 2; pass
++, iterations
++)
4956 infrun_debug_printf ("pass=%d, iterations=%d", pass
, iterations
);
4959 int waits_needed
= 0;
4961 for (auto *target
: all_non_exited_process_targets ())
4963 switch_to_target_no_thread (target
);
4964 update_thread_list ();
4967 /* Go through all threads looking for threads that we need
4968 to tell the target to stop. */
4969 for (thread_info
*t
: all_non_exited_threads ())
4971 /* For a single-target setting with an all-stop target,
4972 we would not even arrive here. For a multi-target
4973 setting, until GDB is able to handle a mixture of
4974 all-stop and non-stop targets, simply skip all-stop
4975 targets' threads. This should be fine due to the
4976 protection of 'check_multi_target_resumption'. */
4978 switch_to_thread_no_regs (t
);
4979 if (!target_is_non_stop_p ())
4984 /* If already stopping, don't request a stop again.
4985 We just haven't seen the notification yet. */
4986 if (!t
->stop_requested
)
4988 infrun_debug_printf (" %s executing, need stop",
4989 target_pid_to_str (t
->ptid
).c_str ());
4990 target_stop (t
->ptid
);
4991 t
->stop_requested
= 1;
4995 infrun_debug_printf (" %s executing, already stopping",
4996 target_pid_to_str (t
->ptid
).c_str ());
4999 if (t
->stop_requested
)
5004 infrun_debug_printf (" %s not executing",
5005 target_pid_to_str (t
->ptid
).c_str ());
5007 /* The thread may be not executing, but still be
5008 resumed with a pending status to process. */
5009 t
->set_resumed (false);
5013 if (waits_needed
== 0)
5016 /* If we find new threads on the second iteration, restart
5017 over. We want to see two iterations in a row with all
5022 for (int i
= 0; i
< waits_needed
; i
++)
5024 wait_one_event event
= wait_one ();
5025 if (handle_one (event
))
5032 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5035 handle_no_resumed (struct execution_control_state
*ecs
)
5037 if (target_can_async_p ())
5039 bool any_sync
= false;
5041 for (ui
*ui
: all_uis ())
5043 if (ui
->prompt_state
== PROMPT_BLOCKED
)
5051 /* There were no unwaited-for children left in the target, but,
5052 we're not synchronously waiting for events either. Just
5055 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5056 prepare_to_wait (ecs
);
5061 /* Otherwise, if we were running a synchronous execution command, we
5062 may need to cancel it and give the user back the terminal.
5064 In non-stop mode, the target can't tell whether we've already
5065 consumed previous stop events, so it can end up sending us a
5066 no-resumed event like so:
5068 #0 - thread 1 is left stopped
5070 #1 - thread 2 is resumed and hits breakpoint
5071 -> TARGET_WAITKIND_STOPPED
5073 #2 - thread 3 is resumed and exits
5074 this is the last resumed thread, so
5075 -> TARGET_WAITKIND_NO_RESUMED
5077 #3 - gdb processes stop for thread 2 and decides to re-resume
5080 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5081 thread 2 is now resumed, so the event should be ignored.
5083 IOW, if the stop for thread 2 doesn't end a foreground command,
5084 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5085 event. But it could be that the event meant that thread 2 itself
5086 (or whatever other thread was the last resumed thread) exited.
5088 To address this we refresh the thread list and check whether we
5089 have resumed threads _now_. In the example above, this removes
5090 thread 3 from the thread list. If thread 2 was re-resumed, we
5091 ignore this event. If we find no thread resumed, then we cancel
5092 the synchronous command and show "no unwaited-for " to the
5095 inferior
*curr_inf
= current_inferior ();
5097 scoped_restore_current_thread restore_thread
;
5099 for (auto *target
: all_non_exited_process_targets ())
5101 switch_to_target_no_thread (target
);
5102 update_thread_list ();
5107 - the current target has no thread executing, and
5108 - the current inferior is native, and
5109 - the current inferior is the one which has the terminal, and
5112 then a Ctrl-C from this point on would remain stuck in the
5113 kernel, until a thread resumes and dequeues it. That would
5114 result in the GDB CLI not reacting to Ctrl-C, not able to
5115 interrupt the program. To address this, if the current inferior
5116 no longer has any thread executing, we give the terminal to some
5117 other inferior that has at least one thread executing. */
5118 bool swap_terminal
= true;
5120 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5121 whether to report it to the user. */
5122 bool ignore_event
= false;
5124 for (thread_info
*thread
: all_non_exited_threads ())
5126 if (swap_terminal
&& thread
->executing
)
5128 if (thread
->inf
!= curr_inf
)
5130 target_terminal::ours ();
5132 switch_to_thread (thread
);
5133 target_terminal::inferior ();
5135 swap_terminal
= false;
5139 && (thread
->executing
|| thread
->has_pending_waitstatus ()))
5141 /* Either there were no unwaited-for children left in the
5142 target at some point, but there are now, or some target
5143 other than the eventing one has unwaited-for children
5144 left. Just ignore. */
5145 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5146 "(ignoring: found resumed)");
5148 ignore_event
= true;
5151 if (ignore_event
&& !swap_terminal
)
5157 switch_to_inferior_no_thread (curr_inf
);
5158 prepare_to_wait (ecs
);
5162 /* Go ahead and report the event. */
5166 /* Given an execution control state that has been freshly filled in by
5167 an event from the inferior, figure out what it means and take
5170 The alternatives are:
5172 1) stop_waiting and return; to really stop and return to the
5175 2) keep_going and return; to wait for the next event (set
5176 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5180 handle_inferior_event (struct execution_control_state
*ecs
)
5182 /* Make sure that all temporary struct value objects that were
5183 created during the handling of the event get deleted at the
5185 scoped_value_mark free_values
;
5187 infrun_debug_printf ("%s", target_waitstatus_to_string (&ecs
->ws
).c_str ());
5189 if (ecs
->ws
.kind
== TARGET_WAITKIND_IGNORE
)
5191 /* We had an event in the inferior, but we are not interested in
5192 handling it at this level. The lower layers have already
5193 done what needs to be done, if anything.
5195 One of the possible circumstances for this is when the
5196 inferior produces output for the console. The inferior has
5197 not stopped, and we are ignoring the event. Another possible
5198 circumstance is any event which the lower level knows will be
5199 reported multiple times without an intervening resume. */
5200 prepare_to_wait (ecs
);
5204 if (ecs
->ws
.kind
== TARGET_WAITKIND_THREAD_EXITED
)
5206 prepare_to_wait (ecs
);
5210 if (ecs
->ws
.kind
== TARGET_WAITKIND_NO_RESUMED
5211 && handle_no_resumed (ecs
))
5214 /* Cache the last target/ptid/waitstatus. */
5215 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5217 /* Always clear state belonging to the previous time we stopped. */
5218 stop_stack_dummy
= STOP_NONE
;
5220 if (ecs
->ws
.kind
== TARGET_WAITKIND_NO_RESUMED
)
5222 /* No unwaited-for children left. IOW, all resumed children
5224 stop_print_frame
= false;
5229 if (ecs
->ws
.kind
!= TARGET_WAITKIND_EXITED
5230 && ecs
->ws
.kind
!= TARGET_WAITKIND_SIGNALLED
)
5232 ecs
->event_thread
= find_thread_ptid (ecs
->target
, ecs
->ptid
);
5233 /* If it's a new thread, add it to the thread database. */
5234 if (ecs
->event_thread
== NULL
)
5235 ecs
->event_thread
= add_thread (ecs
->target
, ecs
->ptid
);
5237 /* Disable range stepping. If the next step request could use a
5238 range, this will be end up re-enabled then. */
5239 ecs
->event_thread
->control
.may_range_step
= 0;
5242 /* Dependent on valid ECS->EVENT_THREAD. */
5243 adjust_pc_after_break (ecs
->event_thread
, &ecs
->ws
);
5245 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5246 reinit_frame_cache ();
5248 breakpoint_retire_moribund ();
5250 /* First, distinguish signals caused by the debugger from signals
5251 that have to do with the program's own actions. Note that
5252 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5253 on the operating system version. Here we detect when a SIGILL or
5254 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5255 something similar for SIGSEGV, since a SIGSEGV will be generated
5256 when we're trying to execute a breakpoint instruction on a
5257 non-executable stack. This happens for call dummy breakpoints
5258 for architectures like SPARC that place call dummies on the
5260 if (ecs
->ws
.kind
== TARGET_WAITKIND_STOPPED
5261 && (ecs
->ws
.value
.sig
== GDB_SIGNAL_ILL
5262 || ecs
->ws
.value
.sig
== GDB_SIGNAL_SEGV
5263 || ecs
->ws
.value
.sig
== GDB_SIGNAL_EMT
))
5265 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5267 if (breakpoint_inserted_here_p (regcache
->aspace (),
5268 regcache_read_pc (regcache
)))
5270 infrun_debug_printf ("Treating signal as SIGTRAP");
5271 ecs
->ws
.value
.sig
= GDB_SIGNAL_TRAP
;
5275 mark_non_executing_threads (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5277 switch (ecs
->ws
.kind
)
5279 case TARGET_WAITKIND_LOADED
:
5281 context_switch (ecs
);
5282 /* Ignore gracefully during startup of the inferior, as it might
5283 be the shell which has just loaded some objects, otherwise
5284 add the symbols for the newly loaded objects. Also ignore at
5285 the beginning of an attach or remote session; we will query
5286 the full list of libraries once the connection is
5289 stop_kind stop_soon
= get_inferior_stop_soon (ecs
);
5290 if (stop_soon
== NO_STOP_QUIETLY
)
5292 struct regcache
*regcache
;
5294 regcache
= get_thread_regcache (ecs
->event_thread
);
5296 handle_solib_event ();
5298 ecs
->event_thread
->control
.stop_bpstat
5299 = bpstat_stop_status (regcache
->aspace (),
5300 ecs
->event_thread
->stop_pc (),
5301 ecs
->event_thread
, &ecs
->ws
);
5303 if (handle_stop_requested (ecs
))
5306 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5308 /* A catchpoint triggered. */
5309 process_event_stop_test (ecs
);
5313 /* If requested, stop when the dynamic linker notifies
5314 gdb of events. This allows the user to get control
5315 and place breakpoints in initializer routines for
5316 dynamically loaded objects (among other things). */
5317 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5318 if (stop_on_solib_events
)
5320 /* Make sure we print "Stopped due to solib-event" in
5322 stop_print_frame
= true;
5329 /* If we are skipping through a shell, or through shared library
5330 loading that we aren't interested in, resume the program. If
5331 we're running the program normally, also resume. */
5332 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== NO_STOP_QUIETLY
)
5334 /* Loading of shared libraries might have changed breakpoint
5335 addresses. Make sure new breakpoints are inserted. */
5336 if (stop_soon
== NO_STOP_QUIETLY
)
5337 insert_breakpoints ();
5338 resume (GDB_SIGNAL_0
);
5339 prepare_to_wait (ecs
);
5343 /* But stop if we're attaching or setting up a remote
5345 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
5346 || stop_soon
== STOP_QUIETLY_REMOTE
)
5348 infrun_debug_printf ("quietly stopped");
5353 internal_error (__FILE__
, __LINE__
,
5354 _("unhandled stop_soon: %d"), (int) stop_soon
);
5357 case TARGET_WAITKIND_SPURIOUS
:
5358 if (handle_stop_requested (ecs
))
5360 context_switch (ecs
);
5361 resume (GDB_SIGNAL_0
);
5362 prepare_to_wait (ecs
);
5365 case TARGET_WAITKIND_THREAD_CREATED
:
5366 if (handle_stop_requested (ecs
))
5368 context_switch (ecs
);
5369 if (!switch_back_to_stepped_thread (ecs
))
5373 case TARGET_WAITKIND_EXITED
:
5374 case TARGET_WAITKIND_SIGNALLED
:
5376 /* Depending on the system, ecs->ptid may point to a thread or
5377 to a process. On some targets, target_mourn_inferior may
5378 need to have access to the just-exited thread. That is the
5379 case of GNU/Linux's "checkpoint" support, for example.
5380 Call the switch_to_xxx routine as appropriate. */
5381 thread_info
*thr
= find_thread_ptid (ecs
->target
, ecs
->ptid
);
5383 switch_to_thread (thr
);
5386 inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5387 switch_to_inferior_no_thread (inf
);
5390 handle_vfork_child_exec_or_exit (0);
5391 target_terminal::ours (); /* Must do this before mourn anyway. */
5393 /* Clearing any previous state of convenience variables. */
5394 clear_exit_convenience_vars ();
5396 if (ecs
->ws
.kind
== TARGET_WAITKIND_EXITED
)
5398 /* Record the exit code in the convenience variable $_exitcode, so
5399 that the user can inspect this again later. */
5400 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5401 (LONGEST
) ecs
->ws
.value
.integer
);
5403 /* Also record this in the inferior itself. */
5404 current_inferior ()->has_exit_code
= 1;
5405 current_inferior ()->exit_code
= (LONGEST
) ecs
->ws
.value
.integer
;
5407 /* Support the --return-child-result option. */
5408 return_child_result_value
= ecs
->ws
.value
.integer
;
5410 gdb::observers::exited
.notify (ecs
->ws
.value
.integer
);
5414 struct gdbarch
*gdbarch
= current_inferior ()->gdbarch
;
5416 if (gdbarch_gdb_signal_to_target_p (gdbarch
))
5418 /* Set the value of the internal variable $_exitsignal,
5419 which holds the signal uncaught by the inferior. */
5420 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5421 gdbarch_gdb_signal_to_target (gdbarch
,
5422 ecs
->ws
.value
.sig
));
5426 /* We don't have access to the target's method used for
5427 converting between signal numbers (GDB's internal
5428 representation <-> target's representation).
5429 Therefore, we cannot do a good job at displaying this
5430 information to the user. It's better to just warn
5431 her about it (if infrun debugging is enabled), and
5433 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5437 gdb::observers::signal_exited
.notify (ecs
->ws
.value
.sig
);
5440 gdb_flush (gdb_stdout
);
5441 target_mourn_inferior (inferior_ptid
);
5442 stop_print_frame
= false;
5446 case TARGET_WAITKIND_FORKED
:
5447 case TARGET_WAITKIND_VFORKED
:
5448 /* Check whether the inferior is displaced stepping. */
5450 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5451 struct gdbarch
*gdbarch
= regcache
->arch ();
5452 inferior
*parent_inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5454 /* If this is a fork (child gets its own address space copy)
5455 and some displaced step buffers were in use at the time of
5456 the fork, restore the displaced step buffer bytes in the
5459 Architectures which support displaced stepping and fork
5460 events must supply an implementation of
5461 gdbarch_displaced_step_restore_all_in_ptid. This is not
5462 enforced during gdbarch validation to support architectures
5463 which support displaced stepping but not forks. */
5464 if (ecs
->ws
.kind
== TARGET_WAITKIND_FORKED
5465 && gdbarch_supports_displaced_stepping (gdbarch
))
5466 gdbarch_displaced_step_restore_all_in_ptid
5467 (gdbarch
, parent_inf
, ecs
->ws
.value
.related_pid
);
5469 /* If displaced stepping is supported, and thread ecs->ptid is
5470 displaced stepping. */
5471 if (displaced_step_in_progress_thread (ecs
->event_thread
))
5473 struct regcache
*child_regcache
;
5474 CORE_ADDR parent_pc
;
5476 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5477 indicating that the displaced stepping of syscall instruction
5478 has been done. Perform cleanup for parent process here. Note
5479 that this operation also cleans up the child process for vfork,
5480 because their pages are shared. */
5481 displaced_step_finish (ecs
->event_thread
, GDB_SIGNAL_TRAP
);
5482 /* Start a new step-over in another thread if there's one
5486 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5487 the child's PC is also within the scratchpad. Set the child's PC
5488 to the parent's PC value, which has already been fixed up.
5489 FIXME: we use the parent's aspace here, although we're touching
5490 the child, because the child hasn't been added to the inferior
5491 list yet at this point. */
5494 = get_thread_arch_aspace_regcache (parent_inf
->process_target (),
5495 ecs
->ws
.value
.related_pid
,
5497 parent_inf
->aspace
);
5498 /* Read PC value of parent process. */
5499 parent_pc
= regcache_read_pc (regcache
);
5501 displaced_debug_printf ("write child pc from %s to %s",
5503 regcache_read_pc (child_regcache
)),
5504 paddress (gdbarch
, parent_pc
));
5506 regcache_write_pc (child_regcache
, parent_pc
);
5510 context_switch (ecs
);
5512 /* Immediately detach breakpoints from the child before there's
5513 any chance of letting the user delete breakpoints from the
5514 breakpoint lists. If we don't do this early, it's easy to
5515 leave left over traps in the child, vis: "break foo; catch
5516 fork; c; <fork>; del; c; <child calls foo>". We only follow
5517 the fork on the last `continue', and by that time the
5518 breakpoint at "foo" is long gone from the breakpoint table.
5519 If we vforked, then we don't need to unpatch here, since both
5520 parent and child are sharing the same memory pages; we'll
5521 need to unpatch at follow/detach time instead to be certain
5522 that new breakpoints added between catchpoint hit time and
5523 vfork follow are detached. */
5524 if (ecs
->ws
.kind
!= TARGET_WAITKIND_VFORKED
)
5526 /* This won't actually modify the breakpoint list, but will
5527 physically remove the breakpoints from the child. */
5528 detach_breakpoints (ecs
->ws
.value
.related_pid
);
5531 delete_just_stopped_threads_single_step_breakpoints ();
5533 /* In case the event is caught by a catchpoint, remember that
5534 the event is to be followed at the next resume of the thread,
5535 and not immediately. */
5536 ecs
->event_thread
->pending_follow
= ecs
->ws
;
5538 ecs
->event_thread
->set_stop_pc
5539 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
5541 ecs
->event_thread
->control
.stop_bpstat
5542 = bpstat_stop_status (get_current_regcache ()->aspace (),
5543 ecs
->event_thread
->stop_pc (),
5544 ecs
->event_thread
, &ecs
->ws
);
5546 if (handle_stop_requested (ecs
))
5549 /* If no catchpoint triggered for this, then keep going. Note
5550 that we're interested in knowing the bpstat actually causes a
5551 stop, not just if it may explain the signal. Software
5552 watchpoints, for example, always appear in the bpstat. */
5553 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5556 = (follow_fork_mode_string
== follow_fork_mode_child
);
5558 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5560 process_stratum_target
*targ
5561 = ecs
->event_thread
->inf
->process_target ();
5563 bool should_resume
= follow_fork ();
5565 /* Note that one of these may be an invalid pointer,
5566 depending on detach_fork. */
5567 thread_info
*parent
= ecs
->event_thread
;
5569 = find_thread_ptid (targ
, ecs
->ws
.value
.related_pid
);
5571 /* At this point, the parent is marked running, and the
5572 child is marked stopped. */
5574 /* If not resuming the parent, mark it stopped. */
5575 if (follow_child
&& !detach_fork
&& !non_stop
&& !sched_multi
)
5576 parent
->set_running (false);
5578 /* If resuming the child, mark it running. */
5579 if (follow_child
|| (!detach_fork
&& (non_stop
|| sched_multi
)))
5580 child
->set_running (true);
5582 /* In non-stop mode, also resume the other branch. */
5583 if (!detach_fork
&& (non_stop
5584 || (sched_multi
&& target_is_non_stop_p ())))
5587 switch_to_thread (parent
);
5589 switch_to_thread (child
);
5591 ecs
->event_thread
= inferior_thread ();
5592 ecs
->ptid
= inferior_ptid
;
5597 switch_to_thread (child
);
5599 switch_to_thread (parent
);
5601 ecs
->event_thread
= inferior_thread ();
5602 ecs
->ptid
= inferior_ptid
;
5610 process_event_stop_test (ecs
);
5613 case TARGET_WAITKIND_VFORK_DONE
:
5614 /* Done with the shared memory region. Re-insert breakpoints in
5615 the parent, and keep going. */
5617 context_switch (ecs
);
5619 current_inferior ()->waiting_for_vfork_done
= 0;
5620 current_inferior ()->pspace
->breakpoints_not_allowed
= 0;
5622 if (handle_stop_requested (ecs
))
5625 /* This also takes care of reinserting breakpoints in the
5626 previously locked inferior. */
5630 case TARGET_WAITKIND_EXECD
:
5632 /* Note we can't read registers yet (the stop_pc), because we
5633 don't yet know the inferior's post-exec architecture.
5634 'stop_pc' is explicitly read below instead. */
5635 switch_to_thread_no_regs (ecs
->event_thread
);
5637 /* Do whatever is necessary to the parent branch of the vfork. */
5638 handle_vfork_child_exec_or_exit (1);
5640 /* This causes the eventpoints and symbol table to be reset.
5641 Must do this now, before trying to determine whether to
5643 follow_exec (inferior_ptid
, ecs
->ws
.value
.execd_pathname
);
5645 /* In follow_exec we may have deleted the original thread and
5646 created a new one. Make sure that the event thread is the
5647 execd thread for that case (this is a nop otherwise). */
5648 ecs
->event_thread
= inferior_thread ();
5650 ecs
->event_thread
->set_stop_pc
5651 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
5653 ecs
->event_thread
->control
.stop_bpstat
5654 = bpstat_stop_status (get_current_regcache ()->aspace (),
5655 ecs
->event_thread
->stop_pc (),
5656 ecs
->event_thread
, &ecs
->ws
);
5658 /* Note that this may be referenced from inside
5659 bpstat_stop_status above, through inferior_has_execd. */
5660 xfree (ecs
->ws
.value
.execd_pathname
);
5661 ecs
->ws
.value
.execd_pathname
= NULL
;
5663 if (handle_stop_requested (ecs
))
5666 /* If no catchpoint triggered for this, then keep going. */
5667 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5669 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5673 process_event_stop_test (ecs
);
5676 /* Be careful not to try to gather much state about a thread
5677 that's in a syscall. It's frequently a losing proposition. */
5678 case TARGET_WAITKIND_SYSCALL_ENTRY
:
5679 /* Getting the current syscall number. */
5680 if (handle_syscall_event (ecs
) == 0)
5681 process_event_stop_test (ecs
);
5684 /* Before examining the threads further, step this thread to
5685 get it entirely out of the syscall. (We get notice of the
5686 event when the thread is just on the verge of exiting a
5687 syscall. Stepping one instruction seems to get it back
5689 case TARGET_WAITKIND_SYSCALL_RETURN
:
5690 if (handle_syscall_event (ecs
) == 0)
5691 process_event_stop_test (ecs
);
5694 case TARGET_WAITKIND_STOPPED
:
5695 handle_signal_stop (ecs
);
5698 case TARGET_WAITKIND_NO_HISTORY
:
5699 /* Reverse execution: target ran out of history info. */
5701 /* Switch to the stopped thread. */
5702 context_switch (ecs
);
5703 infrun_debug_printf ("stopped");
5705 delete_just_stopped_threads_single_step_breakpoints ();
5706 ecs
->event_thread
->set_stop_pc
5707 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
5709 if (handle_stop_requested (ecs
))
5712 gdb::observers::no_history
.notify ();
5718 /* Restart threads back to what they were trying to do back when we
5719 paused them for an in-line step-over. The EVENT_THREAD thread is
5723 restart_threads (struct thread_info
*event_thread
)
5725 /* In case the instruction just stepped spawned a new thread. */
5726 update_thread_list ();
5728 for (thread_info
*tp
: all_non_exited_threads ())
5730 if (tp
->inf
->detaching
)
5732 infrun_debug_printf ("restart threads: [%s] inferior detaching",
5733 target_pid_to_str (tp
->ptid
).c_str ());
5737 switch_to_thread_no_regs (tp
);
5739 if (tp
== event_thread
)
5741 infrun_debug_printf ("restart threads: [%s] is event thread",
5742 target_pid_to_str (tp
->ptid
).c_str ());
5746 if (!(tp
->state
== THREAD_RUNNING
|| tp
->control
.in_infcall
))
5748 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5749 target_pid_to_str (tp
->ptid
).c_str ());
5755 infrun_debug_printf ("restart threads: [%s] resumed",
5756 target_pid_to_str (tp
->ptid
).c_str ());
5757 gdb_assert (tp
->executing
|| tp
->has_pending_waitstatus ());
5761 if (thread_is_in_step_over_chain (tp
))
5763 infrun_debug_printf ("restart threads: [%s] needs step-over",
5764 target_pid_to_str (tp
->ptid
).c_str ());
5765 gdb_assert (!tp
->resumed ());
5770 if (tp
->has_pending_waitstatus ())
5772 infrun_debug_printf ("restart threads: [%s] has pending status",
5773 target_pid_to_str (tp
->ptid
).c_str ());
5774 tp
->set_resumed (true);
5778 gdb_assert (!tp
->stop_requested
);
5780 /* If some thread needs to start a step-over at this point, it
5781 should still be in the step-over queue, and thus skipped
5783 if (thread_still_needs_step_over (tp
))
5785 internal_error (__FILE__
, __LINE__
,
5786 "thread [%s] needs a step-over, but not in "
5787 "step-over queue\n",
5788 target_pid_to_str (tp
->ptid
).c_str ());
5791 if (currently_stepping (tp
))
5793 infrun_debug_printf ("restart threads: [%s] was stepping",
5794 target_pid_to_str (tp
->ptid
).c_str ());
5795 keep_going_stepped_thread (tp
);
5799 struct execution_control_state ecss
;
5800 struct execution_control_state
*ecs
= &ecss
;
5802 infrun_debug_printf ("restart threads: [%s] continuing",
5803 target_pid_to_str (tp
->ptid
).c_str ());
5804 reset_ecs (ecs
, tp
);
5805 switch_to_thread (tp
);
5806 keep_going_pass_signal (ecs
);
5811 /* Callback for iterate_over_threads. Find a resumed thread that has
5812 a pending waitstatus. */
5815 resumed_thread_with_pending_status (struct thread_info
*tp
,
5818 return tp
->resumed () && tp
->has_pending_waitstatus ();
5821 /* Called when we get an event that may finish an in-line or
5822 out-of-line (displaced stepping) step-over started previously.
5823 Return true if the event is processed and we should go back to the
5824 event loop; false if the caller should continue processing the
5828 finish_step_over (struct execution_control_state
*ecs
)
5830 displaced_step_finish (ecs
->event_thread
, ecs
->event_thread
->stop_signal ());
5832 bool had_step_over_info
= step_over_info_valid_p ();
5834 if (had_step_over_info
)
5836 /* If we're stepping over a breakpoint with all threads locked,
5837 then only the thread that was stepped should be reporting
5839 gdb_assert (ecs
->event_thread
->control
.trap_expected
);
5841 clear_step_over_info ();
5844 if (!target_is_non_stop_p ())
5847 /* Start a new step-over in another thread if there's one that
5851 /* If we were stepping over a breakpoint before, and haven't started
5852 a new in-line step-over sequence, then restart all other threads
5853 (except the event thread). We can't do this in all-stop, as then
5854 e.g., we wouldn't be able to issue any other remote packet until
5855 these other threads stop. */
5856 if (had_step_over_info
&& !step_over_info_valid_p ())
5858 struct thread_info
*pending
;
5860 /* If we only have threads with pending statuses, the restart
5861 below won't restart any thread and so nothing re-inserts the
5862 breakpoint we just stepped over. But we need it inserted
5863 when we later process the pending events, otherwise if
5864 another thread has a pending event for this breakpoint too,
5865 we'd discard its event (because the breakpoint that
5866 originally caused the event was no longer inserted). */
5867 context_switch (ecs
);
5868 insert_breakpoints ();
5870 restart_threads (ecs
->event_thread
);
5872 /* If we have events pending, go through handle_inferior_event
5873 again, picking up a pending event at random. This avoids
5874 thread starvation. */
5876 /* But not if we just stepped over a watchpoint in order to let
5877 the instruction execute so we can evaluate its expression.
5878 The set of watchpoints that triggered is recorded in the
5879 breakpoint objects themselves (see bp->watchpoint_triggered).
5880 If we processed another event first, that other event could
5881 clobber this info. */
5882 if (ecs
->event_thread
->stepping_over_watchpoint
)
5885 pending
= iterate_over_threads (resumed_thread_with_pending_status
,
5887 if (pending
!= NULL
)
5889 struct thread_info
*tp
= ecs
->event_thread
;
5890 struct regcache
*regcache
;
5892 infrun_debug_printf ("found resumed threads with "
5893 "pending events, saving status");
5895 gdb_assert (pending
!= tp
);
5897 /* Record the event thread's event for later. */
5898 save_waitstatus (tp
, &ecs
->ws
);
5899 /* This was cleared early, by handle_inferior_event. Set it
5900 so this pending event is considered by
5902 tp
->set_resumed (true);
5904 gdb_assert (!tp
->executing
);
5906 regcache
= get_thread_regcache (tp
);
5907 tp
->set_stop_pc (regcache_read_pc (regcache
));
5909 infrun_debug_printf ("saved stop_pc=%s for %s "
5910 "(currently_stepping=%d)",
5911 paddress (target_gdbarch (), tp
->stop_pc ()),
5912 target_pid_to_str (tp
->ptid
).c_str (),
5913 currently_stepping (tp
));
5915 /* This in-line step-over finished; clear this so we won't
5916 start a new one. This is what handle_signal_stop would
5917 do, if we returned false. */
5918 tp
->stepping_over_breakpoint
= 0;
5920 /* Wake up the event loop again. */
5921 mark_async_event_handler (infrun_async_inferior_event_token
);
5923 prepare_to_wait (ecs
);
5931 /* Come here when the program has stopped with a signal. */
5934 handle_signal_stop (struct execution_control_state
*ecs
)
5936 struct frame_info
*frame
;
5937 struct gdbarch
*gdbarch
;
5938 int stopped_by_watchpoint
;
5939 enum stop_kind stop_soon
;
5942 gdb_assert (ecs
->ws
.kind
== TARGET_WAITKIND_STOPPED
);
5944 ecs
->event_thread
->set_stop_signal (ecs
->ws
.value
.sig
);
5946 /* Do we need to clean up the state of a thread that has
5947 completed a displaced single-step? (Doing so usually affects
5948 the PC, so do it here, before we set stop_pc.) */
5949 if (finish_step_over (ecs
))
5952 /* If we either finished a single-step or hit a breakpoint, but
5953 the user wanted this thread to be stopped, pretend we got a
5954 SIG0 (generic unsignaled stop). */
5955 if (ecs
->event_thread
->stop_requested
5956 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
5957 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
5959 ecs
->event_thread
->set_stop_pc
5960 (regcache_read_pc (get_thread_regcache (ecs
->event_thread
)));
5962 context_switch (ecs
);
5964 if (deprecated_context_hook
)
5965 deprecated_context_hook (ecs
->event_thread
->global_num
);
5969 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5970 struct gdbarch
*reg_gdbarch
= regcache
->arch ();
5973 ("stop_pc=%s", paddress (reg_gdbarch
, ecs
->event_thread
->stop_pc ()));
5974 if (target_stopped_by_watchpoint ())
5978 infrun_debug_printf ("stopped by watchpoint");
5980 if (target_stopped_data_address (current_inferior ()->top_target (),
5982 infrun_debug_printf ("stopped data address=%s",
5983 paddress (reg_gdbarch
, addr
));
5985 infrun_debug_printf ("(no data address available)");
5989 /* This is originated from start_remote(), start_inferior() and
5990 shared libraries hook functions. */
5991 stop_soon
= get_inferior_stop_soon (ecs
);
5992 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== STOP_QUIETLY_REMOTE
)
5994 infrun_debug_printf ("quietly stopped");
5995 stop_print_frame
= true;
6000 /* This originates from attach_command(). We need to overwrite
6001 the stop_signal here, because some kernels don't ignore a
6002 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6003 See more comments in inferior.h. On the other hand, if we
6004 get a non-SIGSTOP, report it to the user - assume the backend
6005 will handle the SIGSTOP if it should show up later.
6007 Also consider that the attach is complete when we see a
6008 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6009 target extended-remote report it instead of a SIGSTOP
6010 (e.g. gdbserver). We already rely on SIGTRAP being our
6011 signal, so this is no exception.
6013 Also consider that the attach is complete when we see a
6014 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6015 the target to stop all threads of the inferior, in case the
6016 low level attach operation doesn't stop them implicitly. If
6017 they weren't stopped implicitly, then the stub will report a
6018 GDB_SIGNAL_0, meaning: stopped for no particular reason
6019 other than GDB's request. */
6020 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6021 && (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_STOP
6022 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6023 || ecs
->event_thread
->stop_signal () == GDB_SIGNAL_0
))
6025 stop_print_frame
= true;
6027 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6031 /* At this point, get hold of the now-current thread's frame. */
6032 frame
= get_current_frame ();
6033 gdbarch
= get_frame_arch (frame
);
6035 /* Pull the single step breakpoints out of the target. */
6036 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
6038 struct regcache
*regcache
;
6041 regcache
= get_thread_regcache (ecs
->event_thread
);
6042 const address_space
*aspace
= regcache
->aspace ();
6044 pc
= regcache_read_pc (regcache
);
6046 /* However, before doing so, if this single-step breakpoint was
6047 actually for another thread, set this thread up for moving
6049 if (!thread_has_single_step_breakpoint_here (ecs
->event_thread
,
6052 if (single_step_breakpoint_inserted_here_p (aspace
, pc
))
6054 infrun_debug_printf ("[%s] hit another thread's single-step "
6056 target_pid_to_str (ecs
->ptid
).c_str ());
6057 ecs
->hit_singlestep_breakpoint
= 1;
6062 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6063 target_pid_to_str (ecs
->ptid
).c_str ());
6066 delete_just_stopped_threads_single_step_breakpoints ();
6068 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6069 && ecs
->event_thread
->control
.trap_expected
6070 && ecs
->event_thread
->stepping_over_watchpoint
)
6071 stopped_by_watchpoint
= 0;
6073 stopped_by_watchpoint
= watchpoints_triggered (&ecs
->ws
);
6075 /* If necessary, step over this watchpoint. We'll be back to display
6077 if (stopped_by_watchpoint
6078 && (target_have_steppable_watchpoint ()
6079 || gdbarch_have_nonsteppable_watchpoint (gdbarch
)))
6081 /* At this point, we are stopped at an instruction which has
6082 attempted to write to a piece of memory under control of
6083 a watchpoint. The instruction hasn't actually executed
6084 yet. If we were to evaluate the watchpoint expression
6085 now, we would get the old value, and therefore no change
6086 would seem to have occurred.
6088 In order to make watchpoints work `right', we really need
6089 to complete the memory write, and then evaluate the
6090 watchpoint expression. We do this by single-stepping the
6093 It may not be necessary to disable the watchpoint to step over
6094 it. For example, the PA can (with some kernel cooperation)
6095 single step over a watchpoint without disabling the watchpoint.
6097 It is far more common to need to disable a watchpoint to step
6098 the inferior over it. If we have non-steppable watchpoints,
6099 we must disable the current watchpoint; it's simplest to
6100 disable all watchpoints.
6102 Any breakpoint at PC must also be stepped over -- if there's
6103 one, it will have already triggered before the watchpoint
6104 triggered, and we either already reported it to the user, or
6105 it didn't cause a stop and we called keep_going. In either
6106 case, if there was a breakpoint at PC, we must be trying to
6108 ecs
->event_thread
->stepping_over_watchpoint
= 1;
6113 ecs
->event_thread
->stepping_over_breakpoint
= 0;
6114 ecs
->event_thread
->stepping_over_watchpoint
= 0;
6115 bpstat_clear (&ecs
->event_thread
->control
.stop_bpstat
);
6116 ecs
->event_thread
->control
.stop_step
= 0;
6117 stop_print_frame
= true;
6118 stopped_by_random_signal
= 0;
6119 bpstat stop_chain
= NULL
;
6121 /* Hide inlined functions starting here, unless we just performed stepi or
6122 nexti. After stepi and nexti, always show the innermost frame (not any
6123 inline function call sites). */
6124 if (ecs
->event_thread
->control
.step_range_end
!= 1)
6126 const address_space
*aspace
6127 = get_thread_regcache (ecs
->event_thread
)->aspace ();
6129 /* skip_inline_frames is expensive, so we avoid it if we can
6130 determine that the address is one where functions cannot have
6131 been inlined. This improves performance with inferiors that
6132 load a lot of shared libraries, because the solib event
6133 breakpoint is defined as the address of a function (i.e. not
6134 inline). Note that we have to check the previous PC as well
6135 as the current one to catch cases when we have just
6136 single-stepped off a breakpoint prior to reinstating it.
6137 Note that we're assuming that the code we single-step to is
6138 not inline, but that's not definitive: there's nothing
6139 preventing the event breakpoint function from containing
6140 inlined code, and the single-step ending up there. If the
6141 user had set a breakpoint on that inlined code, the missing
6142 skip_inline_frames call would break things. Fortunately
6143 that's an extremely unlikely scenario. */
6144 if (!pc_at_non_inline_function (aspace
,
6145 ecs
->event_thread
->stop_pc (),
6147 && !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6148 && ecs
->event_thread
->control
.trap_expected
6149 && pc_at_non_inline_function (aspace
,
6150 ecs
->event_thread
->prev_pc
,
6153 stop_chain
= build_bpstat_chain (aspace
,
6154 ecs
->event_thread
->stop_pc (),
6156 skip_inline_frames (ecs
->event_thread
, stop_chain
);
6158 /* Re-fetch current thread's frame in case that invalidated
6160 frame
= get_current_frame ();
6161 gdbarch
= get_frame_arch (frame
);
6165 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6166 && ecs
->event_thread
->control
.trap_expected
6167 && gdbarch_single_step_through_delay_p (gdbarch
)
6168 && currently_stepping (ecs
->event_thread
))
6170 /* We're trying to step off a breakpoint. Turns out that we're
6171 also on an instruction that needs to be stepped multiple
6172 times before it's been fully executing. E.g., architectures
6173 with a delay slot. It needs to be stepped twice, once for
6174 the instruction and once for the delay slot. */
6175 int step_through_delay
6176 = gdbarch_single_step_through_delay (gdbarch
, frame
);
6178 if (step_through_delay
)
6179 infrun_debug_printf ("step through delay");
6181 if (ecs
->event_thread
->control
.step_range_end
== 0
6182 && step_through_delay
)
6184 /* The user issued a continue when stopped at a breakpoint.
6185 Set up for another trap and get out of here. */
6186 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6190 else if (step_through_delay
)
6192 /* The user issued a step when stopped at a breakpoint.
6193 Maybe we should stop, maybe we should not - the delay
6194 slot *might* correspond to a line of source. In any
6195 case, don't decide that here, just set
6196 ecs->stepping_over_breakpoint, making sure we
6197 single-step again before breakpoints are re-inserted. */
6198 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6202 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6203 handles this event. */
6204 ecs
->event_thread
->control
.stop_bpstat
6205 = bpstat_stop_status (get_current_regcache ()->aspace (),
6206 ecs
->event_thread
->stop_pc (),
6207 ecs
->event_thread
, &ecs
->ws
, stop_chain
);
6209 /* Following in case break condition called a
6211 stop_print_frame
= true;
6213 /* This is where we handle "moribund" watchpoints. Unlike
6214 software breakpoints traps, hardware watchpoint traps are
6215 always distinguishable from random traps. If no high-level
6216 watchpoint is associated with the reported stop data address
6217 anymore, then the bpstat does not explain the signal ---
6218 simply make sure to ignore it if `stopped_by_watchpoint' is
6221 if (ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6222 && !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
6224 && stopped_by_watchpoint
)
6226 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6230 /* NOTE: cagney/2003-03-29: These checks for a random signal
6231 at one stage in the past included checks for an inferior
6232 function call's call dummy's return breakpoint. The original
6233 comment, that went with the test, read:
6235 ``End of a stack dummy. Some systems (e.g. Sony news) give
6236 another signal besides SIGTRAP, so check here as well as
6239 If someone ever tries to get call dummys on a
6240 non-executable stack to work (where the target would stop
6241 with something like a SIGSEGV), then those tests might need
6242 to be re-instated. Given, however, that the tests were only
6243 enabled when momentary breakpoints were not being used, I
6244 suspect that it won't be the case.
6246 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6247 be necessary for call dummies on a non-executable stack on
6250 /* See if the breakpoints module can explain the signal. */
6252 = !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
6253 ecs
->event_thread
->stop_signal ());
6255 /* Maybe this was a trap for a software breakpoint that has since
6257 if (random_signal
&& target_stopped_by_sw_breakpoint ())
6259 if (gdbarch_program_breakpoint_here_p (gdbarch
,
6260 ecs
->event_thread
->stop_pc ()))
6262 struct regcache
*regcache
;
6265 /* Re-adjust PC to what the program would see if GDB was not
6267 regcache
= get_thread_regcache (ecs
->event_thread
);
6268 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
6271 gdb::optional
<scoped_restore_tmpl
<int>>
6272 restore_operation_disable
;
6274 if (record_full_is_used ())
6275 restore_operation_disable
.emplace
6276 (record_full_gdb_operation_disable_set ());
6278 regcache_write_pc (regcache
,
6279 ecs
->event_thread
->stop_pc () + decr_pc
);
6284 /* A delayed software breakpoint event. Ignore the trap. */
6285 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
6290 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6291 has since been removed. */
6292 if (random_signal
&& target_stopped_by_hw_breakpoint ())
6294 /* A delayed hardware breakpoint event. Ignore the trap. */
6295 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6300 /* If not, perhaps stepping/nexting can. */
6302 random_signal
= !(ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
6303 && currently_stepping (ecs
->event_thread
));
6305 /* Perhaps the thread hit a single-step breakpoint of _another_
6306 thread. Single-step breakpoints are transparent to the
6307 breakpoints module. */
6309 random_signal
= !ecs
->hit_singlestep_breakpoint
;
6311 /* No? Perhaps we got a moribund watchpoint. */
6313 random_signal
= !stopped_by_watchpoint
;
6315 /* Always stop if the user explicitly requested this thread to
6317 if (ecs
->event_thread
->stop_requested
)
6320 infrun_debug_printf ("user-requested stop");
6323 /* For the program's own signals, act according to
6324 the signal handling tables. */
6328 /* Signal not for debugging purposes. */
6329 enum gdb_signal stop_signal
= ecs
->event_thread
->stop_signal ();
6331 infrun_debug_printf ("random signal (%s)",
6332 gdb_signal_to_symbol_string (stop_signal
));
6334 stopped_by_random_signal
= 1;
6336 /* Always stop on signals if we're either just gaining control
6337 of the program, or the user explicitly requested this thread
6338 to remain stopped. */
6339 if (stop_soon
!= NO_STOP_QUIETLY
6340 || ecs
->event_thread
->stop_requested
6341 || signal_stop_state (ecs
->event_thread
->stop_signal ()))
6347 /* Notify observers the signal has "handle print" set. Note we
6348 returned early above if stopping; normal_stop handles the
6349 printing in that case. */
6350 if (signal_print
[ecs
->event_thread
->stop_signal ()])
6352 /* The signal table tells us to print about this signal. */
6353 target_terminal::ours_for_output ();
6354 gdb::observers::signal_received
.notify (ecs
->event_thread
->stop_signal ());
6355 target_terminal::inferior ();
6358 /* Clear the signal if it should not be passed. */
6359 if (signal_program
[ecs
->event_thread
->stop_signal ()] == 0)
6360 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
6362 if (ecs
->event_thread
->prev_pc
== ecs
->event_thread
->stop_pc ()
6363 && ecs
->event_thread
->control
.trap_expected
6364 && ecs
->event_thread
->control
.step_resume_breakpoint
== NULL
)
6366 /* We were just starting a new sequence, attempting to
6367 single-step off of a breakpoint and expecting a SIGTRAP.
6368 Instead this signal arrives. This signal will take us out
6369 of the stepping range so GDB needs to remember to, when
6370 the signal handler returns, resume stepping off that
6372 /* To simplify things, "continue" is forced to use the same
6373 code paths as single-step - set a breakpoint at the
6374 signal return address and then, once hit, step off that
6376 infrun_debug_printf ("signal arrived while stepping over breakpoint");
6378 insert_hp_step_resume_breakpoint_at_frame (frame
);
6379 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
6380 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6381 ecs
->event_thread
->control
.trap_expected
= 0;
6383 /* If we were nexting/stepping some other thread, switch to
6384 it, so that we don't continue it, losing control. */
6385 if (!switch_back_to_stepped_thread (ecs
))
6390 if (ecs
->event_thread
->stop_signal () != GDB_SIGNAL_0
6391 && (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
6393 || ecs
->event_thread
->control
.step_range_end
== 1)
6394 && frame_id_eq (get_stack_frame_id (frame
),
6395 ecs
->event_thread
->control
.step_stack_frame_id
)
6396 && ecs
->event_thread
->control
.step_resume_breakpoint
== NULL
)
6398 /* The inferior is about to take a signal that will take it
6399 out of the single step range. Set a breakpoint at the
6400 current PC (which is presumably where the signal handler
6401 will eventually return) and then allow the inferior to
6404 Note that this is only needed for a signal delivered
6405 while in the single-step range. Nested signals aren't a
6406 problem as they eventually all return. */
6407 infrun_debug_printf ("signal may take us out of single-step range");
6409 clear_step_over_info ();
6410 insert_hp_step_resume_breakpoint_at_frame (frame
);
6411 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
6412 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6413 ecs
->event_thread
->control
.trap_expected
= 0;
6418 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6419 when either there's a nested signal, or when there's a
6420 pending signal enabled just as the signal handler returns
6421 (leaving the inferior at the step-resume-breakpoint without
6422 actually executing it). Either way continue until the
6423 breakpoint is really hit. */
6425 if (!switch_back_to_stepped_thread (ecs
))
6427 infrun_debug_printf ("random signal, keep going");
6434 process_event_stop_test (ecs
);
6437 /* Come here when we've got some debug event / signal we can explain
6438 (IOW, not a random signal), and test whether it should cause a
6439 stop, or whether we should resume the inferior (transparently).
6440 E.g., could be a breakpoint whose condition evaluates false; we
6441 could be still stepping within the line; etc. */
6444 process_event_stop_test (struct execution_control_state
*ecs
)
6446 struct symtab_and_line stop_pc_sal
;
6447 struct frame_info
*frame
;
6448 struct gdbarch
*gdbarch
;
6449 CORE_ADDR jmp_buf_pc
;
6450 struct bpstat_what what
;
6452 /* Handle cases caused by hitting a breakpoint. */
6454 frame
= get_current_frame ();
6455 gdbarch
= get_frame_arch (frame
);
6457 what
= bpstat_what (ecs
->event_thread
->control
.stop_bpstat
);
6459 if (what
.call_dummy
)
6461 stop_stack_dummy
= what
.call_dummy
;
6464 /* A few breakpoint types have callbacks associated (e.g.,
6465 bp_jit_event). Run them now. */
6466 bpstat_run_callbacks (ecs
->event_thread
->control
.stop_bpstat
);
6468 /* If we hit an internal event that triggers symbol changes, the
6469 current frame will be invalidated within bpstat_what (e.g., if we
6470 hit an internal solib event). Re-fetch it. */
6471 frame
= get_current_frame ();
6472 gdbarch
= get_frame_arch (frame
);
6474 switch (what
.main_action
)
6476 case BPSTAT_WHAT_SET_LONGJMP_RESUME
:
6477 /* If we hit the breakpoint at longjmp while stepping, we
6478 install a momentary breakpoint at the target of the
6481 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
6483 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6485 if (what
.is_longjmp
)
6487 struct value
*arg_value
;
6489 /* If we set the longjmp breakpoint via a SystemTap probe,
6490 then use it to extract the arguments. The destination PC
6491 is the third argument to the probe. */
6492 arg_value
= probe_safe_evaluate_at_pc (frame
, 2);
6495 jmp_buf_pc
= value_as_address (arg_value
);
6496 jmp_buf_pc
= gdbarch_addr_bits_remove (gdbarch
, jmp_buf_pc
);
6498 else if (!gdbarch_get_longjmp_target_p (gdbarch
)
6499 || !gdbarch_get_longjmp_target (gdbarch
,
6500 frame
, &jmp_buf_pc
))
6502 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6503 "(!gdbarch_get_longjmp_target)");
6508 /* Insert a breakpoint at resume address. */
6509 insert_longjmp_resume_breakpoint (gdbarch
, jmp_buf_pc
);
6512 check_exception_resume (ecs
, frame
);
6516 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME
:
6518 struct frame_info
*init_frame
;
6520 /* There are several cases to consider.
6522 1. The initiating frame no longer exists. In this case we
6523 must stop, because the exception or longjmp has gone too
6526 2. The initiating frame exists, and is the same as the
6527 current frame. We stop, because the exception or longjmp
6530 3. The initiating frame exists and is different from the
6531 current frame. This means the exception or longjmp has
6532 been caught beneath the initiating frame, so keep going.
6534 4. longjmp breakpoint has been placed just to protect
6535 against stale dummy frames and user is not interested in
6536 stopping around longjmps. */
6538 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
6540 gdb_assert (ecs
->event_thread
->control
.exception_resume_breakpoint
6542 delete_exception_resume_breakpoint (ecs
->event_thread
);
6544 if (what
.is_longjmp
)
6546 check_longjmp_breakpoint_for_call_dummy (ecs
->event_thread
);
6548 if (!frame_id_p (ecs
->event_thread
->initiating_frame
))
6556 init_frame
= frame_find_by_id (ecs
->event_thread
->initiating_frame
);
6560 struct frame_id current_id
6561 = get_frame_id (get_current_frame ());
6562 if (frame_id_eq (current_id
,
6563 ecs
->event_thread
->initiating_frame
))
6565 /* Case 2. Fall through. */
6575 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6577 delete_step_resume_breakpoint (ecs
->event_thread
);
6579 end_stepping_range (ecs
);
6583 case BPSTAT_WHAT_SINGLE
:
6584 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
6585 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6586 /* Still need to check other stuff, at least the case where we
6587 are stepping and step out of the right range. */
6590 case BPSTAT_WHAT_STEP_RESUME
:
6591 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
6593 delete_step_resume_breakpoint (ecs
->event_thread
);
6594 if (ecs
->event_thread
->control
.proceed_to_finish
6595 && execution_direction
== EXEC_REVERSE
)
6597 struct thread_info
*tp
= ecs
->event_thread
;
6599 /* We are finishing a function in reverse, and just hit the
6600 step-resume breakpoint at the start address of the
6601 function, and we're almost there -- just need to back up
6602 by one more single-step, which should take us back to the
6604 tp
->control
.step_range_start
= tp
->control
.step_range_end
= 1;
6608 fill_in_stop_func (gdbarch
, ecs
);
6609 if (ecs
->event_thread
->stop_pc () == ecs
->stop_func_start
6610 && execution_direction
== EXEC_REVERSE
)
6612 /* We are stepping over a function call in reverse, and just
6613 hit the step-resume breakpoint at the start address of
6614 the function. Go back to single-stepping, which should
6615 take us back to the function call. */
6616 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6622 case BPSTAT_WHAT_STOP_NOISY
:
6623 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
6624 stop_print_frame
= true;
6626 /* Assume the thread stopped for a breakpoint. We'll still check
6627 whether a/the breakpoint is there when the thread is next
6629 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6634 case BPSTAT_WHAT_STOP_SILENT
:
6635 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
6636 stop_print_frame
= false;
6638 /* Assume the thread stopped for a breakpoint. We'll still check
6639 whether a/the breakpoint is there when the thread is next
6641 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6645 case BPSTAT_WHAT_HP_STEP_RESUME
:
6646 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
6648 delete_step_resume_breakpoint (ecs
->event_thread
);
6649 if (ecs
->event_thread
->step_after_step_resume_breakpoint
)
6651 /* Back when the step-resume breakpoint was inserted, we
6652 were trying to single-step off a breakpoint. Go back to
6654 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
6655 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6661 case BPSTAT_WHAT_KEEP_CHECKING
:
6665 /* If we stepped a permanent breakpoint and we had a high priority
6666 step-resume breakpoint for the address we stepped, but we didn't
6667 hit it, then we must have stepped into the signal handler. The
6668 step-resume was only necessary to catch the case of _not_
6669 stepping into the handler, so delete it, and fall through to
6670 checking whether the step finished. */
6671 if (ecs
->event_thread
->stepped_breakpoint
)
6673 struct breakpoint
*sr_bp
6674 = ecs
->event_thread
->control
.step_resume_breakpoint
;
6677 && sr_bp
->loc
->permanent
6678 && sr_bp
->type
== bp_hp_step_resume
6679 && sr_bp
->loc
->address
== ecs
->event_thread
->prev_pc
)
6681 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
6682 delete_step_resume_breakpoint (ecs
->event_thread
);
6683 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
6687 /* We come here if we hit a breakpoint but should not stop for it.
6688 Possibly we also were stepping and should stop for that. So fall
6689 through and test for stepping. But, if not stepping, do not
6692 /* In all-stop mode, if we're currently stepping but have stopped in
6693 some other thread, we need to switch back to the stepped thread. */
6694 if (switch_back_to_stepped_thread (ecs
))
6697 if (ecs
->event_thread
->control
.step_resume_breakpoint
)
6699 infrun_debug_printf ("step-resume breakpoint is inserted");
6701 /* Having a step-resume breakpoint overrides anything
6702 else having to do with stepping commands until
6703 that breakpoint is reached. */
6708 if (ecs
->event_thread
->control
.step_range_end
== 0)
6710 infrun_debug_printf ("no stepping, continue");
6711 /* Likewise if we aren't even stepping. */
6716 /* Re-fetch current thread's frame in case the code above caused
6717 the frame cache to be re-initialized, making our FRAME variable
6718 a dangling pointer. */
6719 frame
= get_current_frame ();
6720 gdbarch
= get_frame_arch (frame
);
6721 fill_in_stop_func (gdbarch
, ecs
);
6723 /* If stepping through a line, keep going if still within it.
6725 Note that step_range_end is the address of the first instruction
6726 beyond the step range, and NOT the address of the last instruction
6729 Note also that during reverse execution, we may be stepping
6730 through a function epilogue and therefore must detect when
6731 the current-frame changes in the middle of a line. */
6733 if (pc_in_thread_step_range (ecs
->event_thread
->stop_pc (),
6735 && (execution_direction
!= EXEC_REVERSE
6736 || frame_id_eq (get_frame_id (frame
),
6737 ecs
->event_thread
->control
.step_frame_id
)))
6740 ("stepping inside range [%s-%s]",
6741 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
6742 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
));
6744 /* Tentatively re-enable range stepping; `resume' disables it if
6745 necessary (e.g., if we're stepping over a breakpoint or we
6746 have software watchpoints). */
6747 ecs
->event_thread
->control
.may_range_step
= 1;
6749 /* When stepping backward, stop at beginning of line range
6750 (unless it's the function entry point, in which case
6751 keep going back to the call point). */
6752 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
6753 if (stop_pc
== ecs
->event_thread
->control
.step_range_start
6754 && stop_pc
!= ecs
->stop_func_start
6755 && execution_direction
== EXEC_REVERSE
)
6756 end_stepping_range (ecs
);
6763 /* We stepped out of the stepping range. */
6765 /* If we are stepping at the source level and entered the runtime
6766 loader dynamic symbol resolution code...
6768 EXEC_FORWARD: we keep on single stepping until we exit the run
6769 time loader code and reach the callee's address.
6771 EXEC_REVERSE: we've already executed the callee (backward), and
6772 the runtime loader code is handled just like any other
6773 undebuggable function call. Now we need only keep stepping
6774 backward through the trampoline code, and that's handled further
6775 down, so there is nothing for us to do here. */
6777 if (execution_direction
!= EXEC_REVERSE
6778 && ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
6779 && in_solib_dynsym_resolve_code (ecs
->event_thread
->stop_pc ()))
6781 CORE_ADDR pc_after_resolver
=
6782 gdbarch_skip_solib_resolver (gdbarch
, ecs
->event_thread
->stop_pc ());
6784 infrun_debug_printf ("stepped into dynsym resolve code");
6786 if (pc_after_resolver
)
6788 /* Set up a step-resume breakpoint at the address
6789 indicated by SKIP_SOLIB_RESOLVER. */
6790 symtab_and_line sr_sal
;
6791 sr_sal
.pc
= pc_after_resolver
;
6792 sr_sal
.pspace
= get_frame_program_space (frame
);
6794 insert_step_resume_breakpoint_at_sal (gdbarch
,
6795 sr_sal
, null_frame_id
);
6802 /* Step through an indirect branch thunk. */
6803 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
6804 && gdbarch_in_indirect_branch_thunk (gdbarch
,
6805 ecs
->event_thread
->stop_pc ()))
6807 infrun_debug_printf ("stepped into indirect branch thunk");
6812 if (ecs
->event_thread
->control
.step_range_end
!= 1
6813 && (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
6814 || ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
6815 && get_frame_type (frame
) == SIGTRAMP_FRAME
)
6817 infrun_debug_printf ("stepped into signal trampoline");
6818 /* The inferior, while doing a "step" or "next", has ended up in
6819 a signal trampoline (either by a signal being delivered or by
6820 the signal handler returning). Just single-step until the
6821 inferior leaves the trampoline (either by calling the handler
6827 /* If we're in the return path from a shared library trampoline,
6828 we want to proceed through the trampoline when stepping. */
6829 /* macro/2012-04-25: This needs to come before the subroutine
6830 call check below as on some targets return trampolines look
6831 like subroutine calls (MIPS16 return thunks). */
6832 if (gdbarch_in_solib_return_trampoline (gdbarch
,
6833 ecs
->event_thread
->stop_pc (),
6834 ecs
->stop_func_name
)
6835 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
6837 /* Determine where this trampoline returns. */
6838 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
6839 CORE_ADDR real_stop_pc
6840 = gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
6842 infrun_debug_printf ("stepped into solib return tramp");
6844 /* Only proceed through if we know where it's going. */
6847 /* And put the step-breakpoint there and go until there. */
6848 symtab_and_line sr_sal
;
6849 sr_sal
.pc
= real_stop_pc
;
6850 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
6851 sr_sal
.pspace
= get_frame_program_space (frame
);
6853 /* Do not specify what the fp should be when we stop since
6854 on some machines the prologue is where the new fp value
6856 insert_step_resume_breakpoint_at_sal (gdbarch
,
6857 sr_sal
, null_frame_id
);
6859 /* Restart without fiddling with the step ranges or
6866 /* Check for subroutine calls. The check for the current frame
6867 equalling the step ID is not necessary - the check of the
6868 previous frame's ID is sufficient - but it is a common case and
6869 cheaper than checking the previous frame's ID.
6871 NOTE: frame_id_eq will never report two invalid frame IDs as
6872 being equal, so to get into this block, both the current and
6873 previous frame must have valid frame IDs. */
6874 /* The outer_frame_id check is a heuristic to detect stepping
6875 through startup code. If we step over an instruction which
6876 sets the stack pointer from an invalid value to a valid value,
6877 we may detect that as a subroutine call from the mythical
6878 "outermost" function. This could be fixed by marking
6879 outermost frames as !stack_p,code_p,special_p. Then the
6880 initial outermost frame, before sp was valid, would
6881 have code_addr == &_start. See the comment in frame_id_eq
6883 if (!frame_id_eq (get_stack_frame_id (frame
),
6884 ecs
->event_thread
->control
.step_stack_frame_id
)
6885 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
6886 ecs
->event_thread
->control
.step_stack_frame_id
)
6887 && (!frame_id_eq (ecs
->event_thread
->control
.step_stack_frame_id
,
6889 || (ecs
->event_thread
->control
.step_start_function
6890 != find_pc_function (ecs
->event_thread
->stop_pc ())))))
6892 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
6893 CORE_ADDR real_stop_pc
;
6895 infrun_debug_printf ("stepped into subroutine");
6897 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_NONE
)
6899 /* I presume that step_over_calls is only 0 when we're
6900 supposed to be stepping at the assembly language level
6901 ("stepi"). Just stop. */
6902 /* And this works the same backward as frontward. MVS */
6903 end_stepping_range (ecs
);
6907 /* Reverse stepping through solib trampolines. */
6909 if (execution_direction
== EXEC_REVERSE
6910 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
6911 && (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
6912 || (ecs
->stop_func_start
== 0
6913 && in_solib_dynsym_resolve_code (stop_pc
))))
6915 /* Any solib trampoline code can be handled in reverse
6916 by simply continuing to single-step. We have already
6917 executed the solib function (backwards), and a few
6918 steps will take us back through the trampoline to the
6924 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
6926 /* We're doing a "next".
6928 Normal (forward) execution: set a breakpoint at the
6929 callee's return address (the address at which the caller
6932 Reverse (backward) execution. set the step-resume
6933 breakpoint at the start of the function that we just
6934 stepped into (backwards), and continue to there. When we
6935 get there, we'll need to single-step back to the caller. */
6937 if (execution_direction
== EXEC_REVERSE
)
6939 /* If we're already at the start of the function, we've either
6940 just stepped backward into a single instruction function,
6941 or stepped back out of a signal handler to the first instruction
6942 of the function. Just keep going, which will single-step back
6944 if (ecs
->stop_func_start
!= stop_pc
&& ecs
->stop_func_start
!= 0)
6946 /* Normal function call return (static or dynamic). */
6947 symtab_and_line sr_sal
;
6948 sr_sal
.pc
= ecs
->stop_func_start
;
6949 sr_sal
.pspace
= get_frame_program_space (frame
);
6950 insert_step_resume_breakpoint_at_sal (gdbarch
,
6951 sr_sal
, null_frame_id
);
6955 insert_step_resume_breakpoint_at_caller (frame
);
6961 /* If we are in a function call trampoline (a stub between the
6962 calling routine and the real function), locate the real
6963 function. That's what tells us (a) whether we want to step
6964 into it at all, and (b) what prologue we want to run to the
6965 end of, if we do step into it. */
6966 real_stop_pc
= skip_language_trampoline (frame
, stop_pc
);
6967 if (real_stop_pc
== 0)
6968 real_stop_pc
= gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
6969 if (real_stop_pc
!= 0)
6970 ecs
->stop_func_start
= real_stop_pc
;
6972 if (real_stop_pc
!= 0 && in_solib_dynsym_resolve_code (real_stop_pc
))
6974 symtab_and_line sr_sal
;
6975 sr_sal
.pc
= ecs
->stop_func_start
;
6976 sr_sal
.pspace
= get_frame_program_space (frame
);
6978 insert_step_resume_breakpoint_at_sal (gdbarch
,
6979 sr_sal
, null_frame_id
);
6984 /* If we have line number information for the function we are
6985 thinking of stepping into and the function isn't on the skip
6988 If there are several symtabs at that PC (e.g. with include
6989 files), just want to know whether *any* of them have line
6990 numbers. find_pc_line handles this. */
6992 struct symtab_and_line tmp_sal
;
6994 tmp_sal
= find_pc_line (ecs
->stop_func_start
, 0);
6995 if (tmp_sal
.line
!= 0
6996 && !function_name_is_marked_for_skip (ecs
->stop_func_name
,
6998 && !inline_frame_is_marked_for_skip (true, ecs
->event_thread
))
7000 if (execution_direction
== EXEC_REVERSE
)
7001 handle_step_into_function_backward (gdbarch
, ecs
);
7003 handle_step_into_function (gdbarch
, ecs
);
7008 /* If we have no line number and the step-stop-if-no-debug is
7009 set, we stop the step so that the user has a chance to switch
7010 in assembly mode. */
7011 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7012 && step_stop_if_no_debug
)
7014 end_stepping_range (ecs
);
7018 if (execution_direction
== EXEC_REVERSE
)
7020 /* If we're already at the start of the function, we've either just
7021 stepped backward into a single instruction function without line
7022 number info, or stepped back out of a signal handler to the first
7023 instruction of the function without line number info. Just keep
7024 going, which will single-step back to the caller. */
7025 if (ecs
->stop_func_start
!= stop_pc
)
7027 /* Set a breakpoint at callee's start address.
7028 From there we can step once and be back in the caller. */
7029 symtab_and_line sr_sal
;
7030 sr_sal
.pc
= ecs
->stop_func_start
;
7031 sr_sal
.pspace
= get_frame_program_space (frame
);
7032 insert_step_resume_breakpoint_at_sal (gdbarch
,
7033 sr_sal
, null_frame_id
);
7037 /* Set a breakpoint at callee's return address (the address
7038 at which the caller will resume). */
7039 insert_step_resume_breakpoint_at_caller (frame
);
7045 /* Reverse stepping through solib trampolines. */
7047 if (execution_direction
== EXEC_REVERSE
7048 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7050 CORE_ADDR stop_pc
= ecs
->event_thread
->stop_pc ();
7052 if (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7053 || (ecs
->stop_func_start
== 0
7054 && in_solib_dynsym_resolve_code (stop_pc
)))
7056 /* Any solib trampoline code can be handled in reverse
7057 by simply continuing to single-step. We have already
7058 executed the solib function (backwards), and a few
7059 steps will take us back through the trampoline to the
7064 else if (in_solib_dynsym_resolve_code (stop_pc
))
7066 /* Stepped backward into the solib dynsym resolver.
7067 Set a breakpoint at its start and continue, then
7068 one more step will take us out. */
7069 symtab_and_line sr_sal
;
7070 sr_sal
.pc
= ecs
->stop_func_start
;
7071 sr_sal
.pspace
= get_frame_program_space (frame
);
7072 insert_step_resume_breakpoint_at_sal (gdbarch
,
7073 sr_sal
, null_frame_id
);
7079 /* This always returns the sal for the inner-most frame when we are in a
7080 stack of inlined frames, even if GDB actually believes that it is in a
7081 more outer frame. This is checked for below by calls to
7082 inline_skipped_frames. */
7083 stop_pc_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
7085 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7086 the trampoline processing logic, however, there are some trampolines
7087 that have no names, so we should do trampoline handling first. */
7088 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7089 && ecs
->stop_func_name
== NULL
7090 && stop_pc_sal
.line
== 0)
7092 infrun_debug_printf ("stepped into undebuggable function");
7094 /* The inferior just stepped into, or returned to, an
7095 undebuggable function (where there is no debugging information
7096 and no line number corresponding to the address where the
7097 inferior stopped). Since we want to skip this kind of code,
7098 we keep going until the inferior returns from this
7099 function - unless the user has asked us not to (via
7100 set step-mode) or we no longer know how to get back
7101 to the call site. */
7102 if (step_stop_if_no_debug
7103 || !frame_id_p (frame_unwind_caller_id (frame
)))
7105 /* If we have no line number and the step-stop-if-no-debug
7106 is set, we stop the step so that the user has a chance to
7107 switch in assembly mode. */
7108 end_stepping_range (ecs
);
7113 /* Set a breakpoint at callee's return address (the address
7114 at which the caller will resume). */
7115 insert_step_resume_breakpoint_at_caller (frame
);
7121 if (ecs
->event_thread
->control
.step_range_end
== 1)
7123 /* It is stepi or nexti. We always want to stop stepping after
7125 infrun_debug_printf ("stepi/nexti");
7126 end_stepping_range (ecs
);
7130 if (stop_pc_sal
.line
== 0)
7132 /* We have no line number information. That means to stop
7133 stepping (does this always happen right after one instruction,
7134 when we do "s" in a function with no line numbers,
7135 or can this happen as a result of a return or longjmp?). */
7136 infrun_debug_printf ("line number info");
7137 end_stepping_range (ecs
);
7141 /* Look for "calls" to inlined functions, part one. If the inline
7142 frame machinery detected some skipped call sites, we have entered
7143 a new inline function. */
7145 if (frame_id_eq (get_frame_id (get_current_frame ()),
7146 ecs
->event_thread
->control
.step_frame_id
)
7147 && inline_skipped_frames (ecs
->event_thread
))
7149 infrun_debug_printf ("stepped into inlined function");
7151 symtab_and_line call_sal
= find_frame_sal (get_current_frame ());
7153 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_ALL
)
7155 /* For "step", we're going to stop. But if the call site
7156 for this inlined function is on the same source line as
7157 we were previously stepping, go down into the function
7158 first. Otherwise stop at the call site. */
7160 if (call_sal
.line
== ecs
->event_thread
->current_line
7161 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
7163 step_into_inline_frame (ecs
->event_thread
);
7164 if (inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
7171 end_stepping_range (ecs
);
7176 /* For "next", we should stop at the call site if it is on a
7177 different source line. Otherwise continue through the
7178 inlined function. */
7179 if (call_sal
.line
== ecs
->event_thread
->current_line
7180 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
7183 end_stepping_range (ecs
);
7188 /* Look for "calls" to inlined functions, part two. If we are still
7189 in the same real function we were stepping through, but we have
7190 to go further up to find the exact frame ID, we are stepping
7191 through a more inlined call beyond its call site. */
7193 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7194 && !frame_id_eq (get_frame_id (get_current_frame ()),
7195 ecs
->event_thread
->control
.step_frame_id
)
7196 && stepped_in_from (get_current_frame (),
7197 ecs
->event_thread
->control
.step_frame_id
))
7199 infrun_debug_printf ("stepping through inlined function");
7201 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
7202 || inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
7205 end_stepping_range (ecs
);
7209 bool refresh_step_info
= true;
7210 if ((ecs
->event_thread
->stop_pc () == stop_pc_sal
.pc
)
7211 && (ecs
->event_thread
->current_line
!= stop_pc_sal
.line
7212 || ecs
->event_thread
->current_symtab
!= stop_pc_sal
.symtab
))
7214 /* We are at a different line. */
7216 if (stop_pc_sal
.is_stmt
)
7218 /* We are at the start of a statement.
7220 So stop. Note that we don't stop if we step into the middle of a
7221 statement. That is said to make things like for (;;) statements
7223 infrun_debug_printf ("stepped to a different line");
7224 end_stepping_range (ecs
);
7227 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7228 ecs
->event_thread
->control
.step_frame_id
))
7230 /* We are not at the start of a statement, and we have not changed
7233 We ignore this line table entry, and continue stepping forward,
7234 looking for a better place to stop. */
7235 refresh_step_info
= false;
7236 infrun_debug_printf ("stepped to a different line, but "
7237 "it's not the start of a statement");
7241 /* We are not the start of a statement, and we have changed frame.
7243 We ignore this line table entry, and continue stepping forward,
7244 looking for a better place to stop. Keep refresh_step_info at
7245 true to note that the frame has changed, but ignore the line
7246 number to make sure we don't ignore a subsequent entry with the
7247 same line number. */
7248 stop_pc_sal
.line
= 0;
7249 infrun_debug_printf ("stepped to a different frame, but "
7250 "it's not the start of a statement");
7254 /* We aren't done stepping.
7256 Optimize by setting the stepping range to the line.
7257 (We might not be in the original line, but if we entered a
7258 new line in mid-statement, we continue stepping. This makes
7259 things like for(;;) statements work better.)
7261 If we entered a SAL that indicates a non-statement line table entry,
7262 then we update the stepping range, but we don't update the step info,
7263 which includes things like the line number we are stepping away from.
7264 This means we will stop when we find a line table entry that is marked
7265 as is-statement, even if it matches the non-statement one we just
7268 ecs
->event_thread
->control
.step_range_start
= stop_pc_sal
.pc
;
7269 ecs
->event_thread
->control
.step_range_end
= stop_pc_sal
.end
;
7270 ecs
->event_thread
->control
.may_range_step
= 1;
7271 if (refresh_step_info
)
7272 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
7274 infrun_debug_printf ("keep going");
7278 static bool restart_stepped_thread (process_stratum_target
*resume_target
,
7279 ptid_t resume_ptid
);
7281 /* In all-stop mode, if we're currently stepping but have stopped in
7282 some other thread, we may need to switch back to the stepped
7283 thread. Returns true we set the inferior running, false if we left
7284 it stopped (and the event needs further processing). */
7287 switch_back_to_stepped_thread (struct execution_control_state
*ecs
)
7289 if (!target_is_non_stop_p ())
7291 /* If any thread is blocked on some internal breakpoint, and we
7292 simply need to step over that breakpoint to get it going
7293 again, do that first. */
7295 /* However, if we see an event for the stepping thread, then we
7296 know all other threads have been moved past their breakpoints
7297 already. Let the caller check whether the step is finished,
7298 etc., before deciding to move it past a breakpoint. */
7299 if (ecs
->event_thread
->control
.step_range_end
!= 0)
7302 /* Check if the current thread is blocked on an incomplete
7303 step-over, interrupted by a random signal. */
7304 if (ecs
->event_thread
->control
.trap_expected
7305 && ecs
->event_thread
->stop_signal () != GDB_SIGNAL_TRAP
)
7308 ("need to finish step-over of [%s]",
7309 target_pid_to_str (ecs
->event_thread
->ptid
).c_str ());
7314 /* Check if the current thread is blocked by a single-step
7315 breakpoint of another thread. */
7316 if (ecs
->hit_singlestep_breakpoint
)
7318 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7319 target_pid_to_str (ecs
->ptid
).c_str ());
7324 /* If this thread needs yet another step-over (e.g., stepping
7325 through a delay slot), do it first before moving on to
7327 if (thread_still_needs_step_over (ecs
->event_thread
))
7330 ("thread [%s] still needs step-over",
7331 target_pid_to_str (ecs
->event_thread
->ptid
).c_str ());
7336 /* If scheduler locking applies even if not stepping, there's no
7337 need to walk over threads. Above we've checked whether the
7338 current thread is stepping. If some other thread not the
7339 event thread is stepping, then it must be that scheduler
7340 locking is not in effect. */
7341 if (schedlock_applies (ecs
->event_thread
))
7344 /* Otherwise, we no longer expect a trap in the current thread.
7345 Clear the trap_expected flag before switching back -- this is
7346 what keep_going does as well, if we call it. */
7347 ecs
->event_thread
->control
.trap_expected
= 0;
7349 /* Likewise, clear the signal if it should not be passed. */
7350 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
7351 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
7353 if (restart_stepped_thread (ecs
->target
, ecs
->ptid
))
7355 prepare_to_wait (ecs
);
7359 switch_to_thread (ecs
->event_thread
);
7365 /* Look for the thread that was stepping, and resume it.
7366 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7367 is resuming. Return true if a thread was started, false
7371 restart_stepped_thread (process_stratum_target
*resume_target
,
7374 /* Do all pending step-overs before actually proceeding with
7376 if (start_step_over ())
7379 for (thread_info
*tp
: all_threads_safe ())
7381 if (tp
->state
== THREAD_EXITED
)
7384 if (tp
->has_pending_waitstatus ())
7387 /* Ignore threads of processes the caller is not
7390 && (tp
->inf
->process_target () != resume_target
7391 || tp
->inf
->pid
!= resume_ptid
.pid ()))
7394 if (tp
->control
.trap_expected
)
7396 infrun_debug_printf ("switching back to stepped thread (step-over)");
7398 if (keep_going_stepped_thread (tp
))
7403 for (thread_info
*tp
: all_threads_safe ())
7405 if (tp
->state
== THREAD_EXITED
)
7408 if (tp
->has_pending_waitstatus ())
7411 /* Ignore threads of processes the caller is not
7414 && (tp
->inf
->process_target () != resume_target
7415 || tp
->inf
->pid
!= resume_ptid
.pid ()))
7418 /* Did we find the stepping thread? */
7419 if (tp
->control
.step_range_end
)
7421 infrun_debug_printf ("switching back to stepped thread (stepping)");
7423 if (keep_going_stepped_thread (tp
))
7434 restart_after_all_stop_detach (process_stratum_target
*proc_target
)
7436 /* Note we don't check target_is_non_stop_p() here, because the
7437 current inferior may no longer have a process_stratum target
7438 pushed, as we just detached. */
7440 /* See if we have a THREAD_RUNNING thread that need to be
7441 re-resumed. If we have any thread that is already executing,
7442 then we don't need to resume the target -- it is already been
7443 resumed. With the remote target (in all-stop), it's even
7444 impossible to issue another resumption if the target is already
7445 resumed, until the target reports a stop. */
7446 for (thread_info
*thr
: all_threads (proc_target
))
7448 if (thr
->state
!= THREAD_RUNNING
)
7451 /* If we have any thread that is already executing, then we
7452 don't need to resume the target -- it is already been
7457 /* If we have a pending event to process, skip resuming the
7458 target and go straight to processing it. */
7459 if (thr
->resumed () && thr
->has_pending_waitstatus ())
7463 /* Alright, we need to re-resume the target. If a thread was
7464 stepping, we need to restart it stepping. */
7465 if (restart_stepped_thread (proc_target
, minus_one_ptid
))
7468 /* Otherwise, find the first THREAD_RUNNING thread and resume
7470 for (thread_info
*thr
: all_threads (proc_target
))
7472 if (thr
->state
!= THREAD_RUNNING
)
7475 execution_control_state ecs
;
7476 reset_ecs (&ecs
, thr
);
7477 switch_to_thread (thr
);
7483 /* Set a previously stepped thread back to stepping. Returns true on
7484 success, false if the resume is not possible (e.g., the thread
7488 keep_going_stepped_thread (struct thread_info
*tp
)
7490 struct frame_info
*frame
;
7491 struct execution_control_state ecss
;
7492 struct execution_control_state
*ecs
= &ecss
;
7494 /* If the stepping thread exited, then don't try to switch back and
7495 resume it, which could fail in several different ways depending
7496 on the target. Instead, just keep going.
7498 We can find a stepping dead thread in the thread list in two
7501 - The target supports thread exit events, and when the target
7502 tries to delete the thread from the thread list, inferior_ptid
7503 pointed at the exiting thread. In such case, calling
7504 delete_thread does not really remove the thread from the list;
7505 instead, the thread is left listed, with 'exited' state.
7507 - The target's debug interface does not support thread exit
7508 events, and so we have no idea whatsoever if the previously
7509 stepping thread is still alive. For that reason, we need to
7510 synchronously query the target now. */
7512 if (tp
->state
== THREAD_EXITED
|| !target_thread_alive (tp
->ptid
))
7514 infrun_debug_printf ("not resuming previously stepped thread, it has "
7521 infrun_debug_printf ("resuming previously stepped thread");
7523 reset_ecs (ecs
, tp
);
7524 switch_to_thread (tp
);
7526 tp
->set_stop_pc (regcache_read_pc (get_thread_regcache (tp
)));
7527 frame
= get_current_frame ();
7529 /* If the PC of the thread we were trying to single-step has
7530 changed, then that thread has trapped or been signaled, but the
7531 event has not been reported to GDB yet. Re-poll the target
7532 looking for this particular thread's event (i.e. temporarily
7533 enable schedlock) by:
7535 - setting a break at the current PC
7536 - resuming that particular thread, only (by setting trap
7539 This prevents us continuously moving the single-step breakpoint
7540 forward, one instruction at a time, overstepping. */
7542 if (tp
->stop_pc () != tp
->prev_pc
)
7546 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7547 paddress (target_gdbarch (), tp
->prev_pc
),
7548 paddress (target_gdbarch (), tp
->stop_pc ()));
7550 /* Clear the info of the previous step-over, as it's no longer
7551 valid (if the thread was trying to step over a breakpoint, it
7552 has already succeeded). It's what keep_going would do too,
7553 if we called it. Do this before trying to insert the sss
7554 breakpoint, otherwise if we were previously trying to step
7555 over this exact address in another thread, the breakpoint is
7557 clear_step_over_info ();
7558 tp
->control
.trap_expected
= 0;
7560 insert_single_step_breakpoint (get_frame_arch (frame
),
7561 get_frame_address_space (frame
),
7564 tp
->set_resumed (true);
7565 resume_ptid
= internal_resume_ptid (tp
->control
.stepping_command
);
7566 do_target_resume (resume_ptid
, false, GDB_SIGNAL_0
);
7570 infrun_debug_printf ("expected thread still hasn't advanced");
7572 keep_going_pass_signal (ecs
);
7578 /* Is thread TP in the middle of (software or hardware)
7579 single-stepping? (Note the result of this function must never be
7580 passed directly as target_resume's STEP parameter.) */
7583 currently_stepping (struct thread_info
*tp
)
7585 return ((tp
->control
.step_range_end
7586 && tp
->control
.step_resume_breakpoint
== NULL
)
7587 || tp
->control
.trap_expected
7588 || tp
->stepped_breakpoint
7589 || bpstat_should_step ());
7592 /* Inferior has stepped into a subroutine call with source code that
7593 we should not step over. Do step to the first line of code in
7597 handle_step_into_function (struct gdbarch
*gdbarch
,
7598 struct execution_control_state
*ecs
)
7600 fill_in_stop_func (gdbarch
, ecs
);
7602 compunit_symtab
*cust
7603 = find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
7604 if (cust
!= NULL
&& compunit_language (cust
) != language_asm
)
7605 ecs
->stop_func_start
7606 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
7608 symtab_and_line stop_func_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7609 /* Use the step_resume_break to step until the end of the prologue,
7610 even if that involves jumps (as it seems to on the vax under
7612 /* If the prologue ends in the middle of a source line, continue to
7613 the end of that source line (if it is still within the function).
7614 Otherwise, just go to end of prologue. */
7615 if (stop_func_sal
.end
7616 && stop_func_sal
.pc
!= ecs
->stop_func_start
7617 && stop_func_sal
.end
< ecs
->stop_func_end
)
7618 ecs
->stop_func_start
= stop_func_sal
.end
;
7620 /* Architectures which require breakpoint adjustment might not be able
7621 to place a breakpoint at the computed address. If so, the test
7622 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7623 ecs->stop_func_start to an address at which a breakpoint may be
7624 legitimately placed.
7626 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7627 made, GDB will enter an infinite loop when stepping through
7628 optimized code consisting of VLIW instructions which contain
7629 subinstructions corresponding to different source lines. On
7630 FR-V, it's not permitted to place a breakpoint on any but the
7631 first subinstruction of a VLIW instruction. When a breakpoint is
7632 set, GDB will adjust the breakpoint address to the beginning of
7633 the VLIW instruction. Thus, we need to make the corresponding
7634 adjustment here when computing the stop address. */
7636 if (gdbarch_adjust_breakpoint_address_p (gdbarch
))
7638 ecs
->stop_func_start
7639 = gdbarch_adjust_breakpoint_address (gdbarch
,
7640 ecs
->stop_func_start
);
7643 if (ecs
->stop_func_start
== ecs
->event_thread
->stop_pc ())
7645 /* We are already there: stop now. */
7646 end_stepping_range (ecs
);
7651 /* Put the step-breakpoint there and go until there. */
7652 symtab_and_line sr_sal
;
7653 sr_sal
.pc
= ecs
->stop_func_start
;
7654 sr_sal
.section
= find_pc_overlay (ecs
->stop_func_start
);
7655 sr_sal
.pspace
= get_frame_program_space (get_current_frame ());
7657 /* Do not specify what the fp should be when we stop since on
7658 some machines the prologue is where the new fp value is
7660 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
, null_frame_id
);
7662 /* And make sure stepping stops right away then. */
7663 ecs
->event_thread
->control
.step_range_end
7664 = ecs
->event_thread
->control
.step_range_start
;
7669 /* Inferior has stepped backward into a subroutine call with source
7670 code that we should not step over. Do step to the beginning of the
7671 last line of code in it. */
7674 handle_step_into_function_backward (struct gdbarch
*gdbarch
,
7675 struct execution_control_state
*ecs
)
7677 struct compunit_symtab
*cust
;
7678 struct symtab_and_line stop_func_sal
;
7680 fill_in_stop_func (gdbarch
, ecs
);
7682 cust
= find_pc_compunit_symtab (ecs
->event_thread
->stop_pc ());
7683 if (cust
!= NULL
&& compunit_language (cust
) != language_asm
)
7684 ecs
->stop_func_start
7685 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
7687 stop_func_sal
= find_pc_line (ecs
->event_thread
->stop_pc (), 0);
7689 /* OK, we're just going to keep stepping here. */
7690 if (stop_func_sal
.pc
== ecs
->event_thread
->stop_pc ())
7692 /* We're there already. Just stop stepping now. */
7693 end_stepping_range (ecs
);
7697 /* Else just reset the step range and keep going.
7698 No step-resume breakpoint, they don't work for
7699 epilogues, which can have multiple entry paths. */
7700 ecs
->event_thread
->control
.step_range_start
= stop_func_sal
.pc
;
7701 ecs
->event_thread
->control
.step_range_end
= stop_func_sal
.end
;
7707 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7708 This is used to both functions and to skip over code. */
7711 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch
*gdbarch
,
7712 struct symtab_and_line sr_sal
,
7713 struct frame_id sr_id
,
7714 enum bptype sr_type
)
7716 /* There should never be more than one step-resume or longjmp-resume
7717 breakpoint per thread, so we should never be setting a new
7718 step_resume_breakpoint when one is already active. */
7719 gdb_assert (inferior_thread ()->control
.step_resume_breakpoint
== NULL
);
7720 gdb_assert (sr_type
== bp_step_resume
|| sr_type
== bp_hp_step_resume
);
7722 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7723 paddress (gdbarch
, sr_sal
.pc
));
7725 inferior_thread ()->control
.step_resume_breakpoint
7726 = set_momentary_breakpoint (gdbarch
, sr_sal
, sr_id
, sr_type
).release ();
7730 insert_step_resume_breakpoint_at_sal (struct gdbarch
*gdbarch
,
7731 struct symtab_and_line sr_sal
,
7732 struct frame_id sr_id
)
7734 insert_step_resume_breakpoint_at_sal_1 (gdbarch
,
7739 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7740 This is used to skip a potential signal handler.
7742 This is called with the interrupted function's frame. The signal
7743 handler, when it returns, will resume the interrupted function at
7747 insert_hp_step_resume_breakpoint_at_frame (struct frame_info
*return_frame
)
7749 gdb_assert (return_frame
!= NULL
);
7751 struct gdbarch
*gdbarch
= get_frame_arch (return_frame
);
7753 symtab_and_line sr_sal
;
7754 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
, get_frame_pc (return_frame
));
7755 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7756 sr_sal
.pspace
= get_frame_program_space (return_frame
);
7758 insert_step_resume_breakpoint_at_sal_1 (gdbarch
, sr_sal
,
7759 get_stack_frame_id (return_frame
),
7763 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7764 is used to skip a function after stepping into it (for "next" or if
7765 the called function has no debugging information).
7767 The current function has almost always been reached by single
7768 stepping a call or return instruction. NEXT_FRAME belongs to the
7769 current function, and the breakpoint will be set at the caller's
7772 This is a separate function rather than reusing
7773 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7774 get_prev_frame, which may stop prematurely (see the implementation
7775 of frame_unwind_caller_id for an example). */
7778 insert_step_resume_breakpoint_at_caller (struct frame_info
*next_frame
)
7780 /* We shouldn't have gotten here if we don't know where the call site
7782 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame
)));
7784 struct gdbarch
*gdbarch
= frame_unwind_caller_arch (next_frame
);
7786 symtab_and_line sr_sal
;
7787 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
,
7788 frame_unwind_caller_pc (next_frame
));
7789 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7790 sr_sal
.pspace
= frame_unwind_program_space (next_frame
);
7792 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
,
7793 frame_unwind_caller_id (next_frame
));
7796 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7797 new breakpoint at the target of a jmp_buf. The handling of
7798 longjmp-resume uses the same mechanisms used for handling
7799 "step-resume" breakpoints. */
7802 insert_longjmp_resume_breakpoint (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
7804 /* There should never be more than one longjmp-resume breakpoint per
7805 thread, so we should never be setting a new
7806 longjmp_resume_breakpoint when one is already active. */
7807 gdb_assert (inferior_thread ()->control
.exception_resume_breakpoint
== NULL
);
7809 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7810 paddress (gdbarch
, pc
));
7812 inferior_thread ()->control
.exception_resume_breakpoint
=
7813 set_momentary_breakpoint_at_pc (gdbarch
, pc
, bp_longjmp_resume
).release ();
7816 /* Insert an exception resume breakpoint. TP is the thread throwing
7817 the exception. The block B is the block of the unwinder debug hook
7818 function. FRAME is the frame corresponding to the call to this
7819 function. SYM is the symbol of the function argument holding the
7820 target PC of the exception. */
7823 insert_exception_resume_breakpoint (struct thread_info
*tp
,
7824 const struct block
*b
,
7825 struct frame_info
*frame
,
7830 struct block_symbol vsym
;
7831 struct value
*value
;
7833 struct breakpoint
*bp
;
7835 vsym
= lookup_symbol_search_name (sym
->search_name (),
7837 value
= read_var_value (vsym
.symbol
, vsym
.block
, frame
);
7838 /* If the value was optimized out, revert to the old behavior. */
7839 if (! value_optimized_out (value
))
7841 handler
= value_as_address (value
);
7843 infrun_debug_printf ("exception resume at %lx",
7844 (unsigned long) handler
);
7846 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
7848 bp_exception_resume
).release ();
7850 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7853 bp
->thread
= tp
->global_num
;
7854 inferior_thread ()->control
.exception_resume_breakpoint
= bp
;
7857 catch (const gdb_exception_error
&e
)
7859 /* We want to ignore errors here. */
7863 /* A helper for check_exception_resume that sets an
7864 exception-breakpoint based on a SystemTap probe. */
7867 insert_exception_resume_from_probe (struct thread_info
*tp
,
7868 const struct bound_probe
*probe
,
7869 struct frame_info
*frame
)
7871 struct value
*arg_value
;
7873 struct breakpoint
*bp
;
7875 arg_value
= probe_safe_evaluate_at_pc (frame
, 1);
7879 handler
= value_as_address (arg_value
);
7881 infrun_debug_printf ("exception resume at %s",
7882 paddress (probe
->objfile
->arch (), handler
));
7884 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
7885 handler
, bp_exception_resume
).release ();
7886 bp
->thread
= tp
->global_num
;
7887 inferior_thread ()->control
.exception_resume_breakpoint
= bp
;
7890 /* This is called when an exception has been intercepted. Check to
7891 see whether the exception's destination is of interest, and if so,
7892 set an exception resume breakpoint there. */
7895 check_exception_resume (struct execution_control_state
*ecs
,
7896 struct frame_info
*frame
)
7898 struct bound_probe probe
;
7899 struct symbol
*func
;
7901 /* First see if this exception unwinding breakpoint was set via a
7902 SystemTap probe point. If so, the probe has two arguments: the
7903 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7904 set a breakpoint there. */
7905 probe
= find_probe_by_pc (get_frame_pc (frame
));
7908 insert_exception_resume_from_probe (ecs
->event_thread
, &probe
, frame
);
7912 func
= get_frame_function (frame
);
7918 const struct block
*b
;
7919 struct block_iterator iter
;
7923 /* The exception breakpoint is a thread-specific breakpoint on
7924 the unwinder's debug hook, declared as:
7926 void _Unwind_DebugHook (void *cfa, void *handler);
7928 The CFA argument indicates the frame to which control is
7929 about to be transferred. HANDLER is the destination PC.
7931 We ignore the CFA and set a temporary breakpoint at HANDLER.
7932 This is not extremely efficient but it avoids issues in gdb
7933 with computing the DWARF CFA, and it also works even in weird
7934 cases such as throwing an exception from inside a signal
7937 b
= SYMBOL_BLOCK_VALUE (func
);
7938 ALL_BLOCK_SYMBOLS (b
, iter
, sym
)
7940 if (!SYMBOL_IS_ARGUMENT (sym
))
7947 insert_exception_resume_breakpoint (ecs
->event_thread
,
7953 catch (const gdb_exception_error
&e
)
7959 stop_waiting (struct execution_control_state
*ecs
)
7961 infrun_debug_printf ("stop_waiting");
7963 /* Let callers know we don't want to wait for the inferior anymore. */
7964 ecs
->wait_some_more
= 0;
7966 /* If all-stop, but there exists a non-stop target, stop all
7967 threads now that we're presenting the stop to the user. */
7968 if (!non_stop
&& exists_non_stop_target ())
7969 stop_all_threads ();
7972 /* Like keep_going, but passes the signal to the inferior, even if the
7973 signal is set to nopass. */
7976 keep_going_pass_signal (struct execution_control_state
*ecs
)
7978 gdb_assert (ecs
->event_thread
->ptid
== inferior_ptid
);
7979 gdb_assert (!ecs
->event_thread
->resumed ());
7981 /* Save the pc before execution, to compare with pc after stop. */
7982 ecs
->event_thread
->prev_pc
7983 = regcache_read_pc_protected (get_thread_regcache (ecs
->event_thread
));
7985 if (ecs
->event_thread
->control
.trap_expected
)
7987 struct thread_info
*tp
= ecs
->event_thread
;
7989 infrun_debug_printf ("%s has trap_expected set, "
7990 "resuming to collect trap",
7991 target_pid_to_str (tp
->ptid
).c_str ());
7993 /* We haven't yet gotten our trap, and either: intercepted a
7994 non-signal event (e.g., a fork); or took a signal which we
7995 are supposed to pass through to the inferior. Simply
7997 resume (ecs
->event_thread
->stop_signal ());
7999 else if (step_over_info_valid_p ())
8001 /* Another thread is stepping over a breakpoint in-line. If
8002 this thread needs a step-over too, queue the request. In
8003 either case, this resume must be deferred for later. */
8004 struct thread_info
*tp
= ecs
->event_thread
;
8006 if (ecs
->hit_singlestep_breakpoint
8007 || thread_still_needs_step_over (tp
))
8009 infrun_debug_printf ("step-over already in progress: "
8010 "step-over for %s deferred",
8011 target_pid_to_str (tp
->ptid
).c_str ());
8012 global_thread_step_over_chain_enqueue (tp
);
8016 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8017 target_pid_to_str (tp
->ptid
).c_str ());
8022 struct regcache
*regcache
= get_current_regcache ();
8025 step_over_what step_what
;
8027 /* Either the trap was not expected, but we are continuing
8028 anyway (if we got a signal, the user asked it be passed to
8031 We got our expected trap, but decided we should resume from
8034 We're going to run this baby now!
8036 Note that insert_breakpoints won't try to re-insert
8037 already inserted breakpoints. Therefore, we don't
8038 care if breakpoints were already inserted, or not. */
8040 /* If we need to step over a breakpoint, and we're not using
8041 displaced stepping to do so, insert all breakpoints
8042 (watchpoints, etc.) but the one we're stepping over, step one
8043 instruction, and then re-insert the breakpoint when that step
8046 step_what
= thread_still_needs_step_over (ecs
->event_thread
);
8048 remove_bp
= (ecs
->hit_singlestep_breakpoint
8049 || (step_what
& STEP_OVER_BREAKPOINT
));
8050 remove_wps
= (step_what
& STEP_OVER_WATCHPOINT
);
8052 /* We can't use displaced stepping if we need to step past a
8053 watchpoint. The instruction copied to the scratch pad would
8054 still trigger the watchpoint. */
8056 && (remove_wps
|| !use_displaced_stepping (ecs
->event_thread
)))
8058 set_step_over_info (regcache
->aspace (),
8059 regcache_read_pc (regcache
), remove_wps
,
8060 ecs
->event_thread
->global_num
);
8062 else if (remove_wps
)
8063 set_step_over_info (NULL
, 0, remove_wps
, -1);
8065 /* If we now need to do an in-line step-over, we need to stop
8066 all other threads. Note this must be done before
8067 insert_breakpoints below, because that removes the breakpoint
8068 we're about to step over, otherwise other threads could miss
8070 if (step_over_info_valid_p () && target_is_non_stop_p ())
8071 stop_all_threads ();
8073 /* Stop stepping if inserting breakpoints fails. */
8076 insert_breakpoints ();
8078 catch (const gdb_exception_error
&e
)
8080 exception_print (gdb_stderr
, e
);
8082 clear_step_over_info ();
8086 ecs
->event_thread
->control
.trap_expected
= (remove_bp
|| remove_wps
);
8088 resume (ecs
->event_thread
->stop_signal ());
8091 prepare_to_wait (ecs
);
8094 /* Called when we should continue running the inferior, because the
8095 current event doesn't cause a user visible stop. This does the
8096 resuming part; waiting for the next event is done elsewhere. */
8099 keep_going (struct execution_control_state
*ecs
)
8101 if (ecs
->event_thread
->control
.trap_expected
8102 && ecs
->event_thread
->stop_signal () == GDB_SIGNAL_TRAP
)
8103 ecs
->event_thread
->control
.trap_expected
= 0;
8105 if (!signal_program
[ecs
->event_thread
->stop_signal ()])
8106 ecs
->event_thread
->set_stop_signal (GDB_SIGNAL_0
);
8107 keep_going_pass_signal (ecs
);
8110 /* This function normally comes after a resume, before
8111 handle_inferior_event exits. It takes care of any last bits of
8112 housekeeping, and sets the all-important wait_some_more flag. */
8115 prepare_to_wait (struct execution_control_state
*ecs
)
8117 infrun_debug_printf ("prepare_to_wait");
8119 ecs
->wait_some_more
= 1;
8121 /* If the target can't async, emulate it by marking the infrun event
8122 handler such that as soon as we get back to the event-loop, we
8123 immediately end up in fetch_inferior_event again calling
8125 if (!target_can_async_p ())
8126 mark_infrun_async_event_handler ();
8129 /* We are done with the step range of a step/next/si/ni command.
8130 Called once for each n of a "step n" operation. */
8133 end_stepping_range (struct execution_control_state
*ecs
)
8135 ecs
->event_thread
->control
.stop_step
= 1;
8139 /* Several print_*_reason functions to print why the inferior has stopped.
8140 We always print something when the inferior exits, or receives a signal.
8141 The rest of the cases are dealt with later on in normal_stop and
8142 print_it_typical. Ideally there should be a call to one of these
8143 print_*_reason functions functions from handle_inferior_event each time
8144 stop_waiting is called.
8146 Note that we don't call these directly, instead we delegate that to
8147 the interpreters, through observers. Interpreters then call these
8148 with whatever uiout is right. */
8151 print_end_stepping_range_reason (struct ui_out
*uiout
)
8153 /* For CLI-like interpreters, print nothing. */
8155 if (uiout
->is_mi_like_p ())
8157 uiout
->field_string ("reason",
8158 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE
));
8163 print_signal_exited_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
8165 annotate_signalled ();
8166 if (uiout
->is_mi_like_p ())
8168 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED
));
8169 uiout
->text ("\nProgram terminated with signal ");
8170 annotate_signal_name ();
8171 uiout
->field_string ("signal-name",
8172 gdb_signal_to_name (siggnal
));
8173 annotate_signal_name_end ();
8175 annotate_signal_string ();
8176 uiout
->field_string ("signal-meaning",
8177 gdb_signal_to_string (siggnal
));
8178 annotate_signal_string_end ();
8179 uiout
->text (".\n");
8180 uiout
->text ("The program no longer exists.\n");
8184 print_exited_reason (struct ui_out
*uiout
, int exitstatus
)
8186 struct inferior
*inf
= current_inferior ();
8187 std::string pidstr
= target_pid_to_str (ptid_t (inf
->pid
));
8189 annotate_exited (exitstatus
);
8192 if (uiout
->is_mi_like_p ())
8193 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED
));
8194 std::string exit_code_str
8195 = string_printf ("0%o", (unsigned int) exitstatus
);
8196 uiout
->message ("[Inferior %s (%s) exited with code %pF]\n",
8197 plongest (inf
->num
), pidstr
.c_str (),
8198 string_field ("exit-code", exit_code_str
.c_str ()));
8202 if (uiout
->is_mi_like_p ())
8204 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY
));
8205 uiout
->message ("[Inferior %s (%s) exited normally]\n",
8206 plongest (inf
->num
), pidstr
.c_str ());
8211 print_signal_received_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
8213 struct thread_info
*thr
= inferior_thread ();
8217 if (uiout
->is_mi_like_p ())
8219 else if (show_thread_that_caused_stop ())
8223 uiout
->text ("\nThread ");
8224 uiout
->field_string ("thread-id", print_thread_id (thr
));
8226 name
= thr
->name
!= NULL
? thr
->name
: target_thread_name (thr
);
8229 uiout
->text (" \"");
8230 uiout
->field_string ("name", name
);
8235 uiout
->text ("\nProgram");
8237 if (siggnal
== GDB_SIGNAL_0
&& !uiout
->is_mi_like_p ())
8238 uiout
->text (" stopped");
8241 uiout
->text (" received signal ");
8242 annotate_signal_name ();
8243 if (uiout
->is_mi_like_p ())
8245 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED
));
8246 uiout
->field_string ("signal-name", gdb_signal_to_name (siggnal
));
8247 annotate_signal_name_end ();
8249 annotate_signal_string ();
8250 uiout
->field_string ("signal-meaning", gdb_signal_to_string (siggnal
));
8252 struct regcache
*regcache
= get_current_regcache ();
8253 struct gdbarch
*gdbarch
= regcache
->arch ();
8254 if (gdbarch_report_signal_info_p (gdbarch
))
8255 gdbarch_report_signal_info (gdbarch
, uiout
, siggnal
);
8257 annotate_signal_string_end ();
8259 uiout
->text (".\n");
8263 print_no_history_reason (struct ui_out
*uiout
)
8265 uiout
->text ("\nNo more reverse-execution history.\n");
8268 /* Print current location without a level number, if we have changed
8269 functions or hit a breakpoint. Print source line if we have one.
8270 bpstat_print contains the logic deciding in detail what to print,
8271 based on the event(s) that just occurred. */
8274 print_stop_location (struct target_waitstatus
*ws
)
8277 enum print_what source_flag
;
8278 int do_frame_printing
= 1;
8279 struct thread_info
*tp
= inferior_thread ();
8281 bpstat_ret
= bpstat_print (tp
->control
.stop_bpstat
, ws
->kind
);
8285 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8286 should) carry around the function and does (or should) use
8287 that when doing a frame comparison. */
8288 if (tp
->control
.stop_step
8289 && frame_id_eq (tp
->control
.step_frame_id
,
8290 get_frame_id (get_current_frame ()))
8291 && (tp
->control
.step_start_function
8292 == find_pc_function (tp
->stop_pc ())))
8294 /* Finished step, just print source line. */
8295 source_flag
= SRC_LINE
;
8299 /* Print location and source line. */
8300 source_flag
= SRC_AND_LOC
;
8303 case PRINT_SRC_AND_LOC
:
8304 /* Print location and source line. */
8305 source_flag
= SRC_AND_LOC
;
8307 case PRINT_SRC_ONLY
:
8308 source_flag
= SRC_LINE
;
8311 /* Something bogus. */
8312 source_flag
= SRC_LINE
;
8313 do_frame_printing
= 0;
8316 internal_error (__FILE__
, __LINE__
, _("Unknown value."));
8319 /* The behavior of this routine with respect to the source
8321 SRC_LINE: Print only source line
8322 LOCATION: Print only location
8323 SRC_AND_LOC: Print location and source line. */
8324 if (do_frame_printing
)
8325 print_stack_frame (get_selected_frame (NULL
), 0, source_flag
, 1);
8331 print_stop_event (struct ui_out
*uiout
, bool displays
)
8333 struct target_waitstatus last
;
8334 struct thread_info
*tp
;
8336 get_last_target_status (nullptr, nullptr, &last
);
8339 scoped_restore save_uiout
= make_scoped_restore (¤t_uiout
, uiout
);
8341 print_stop_location (&last
);
8343 /* Display the auto-display expressions. */
8348 tp
= inferior_thread ();
8349 if (tp
->thread_fsm
!= NULL
8350 && tp
->thread_fsm
->finished_p ())
8352 struct return_value_info
*rv
;
8354 rv
= tp
->thread_fsm
->return_value ();
8356 print_return_value (uiout
, rv
);
8363 maybe_remove_breakpoints (void)
8365 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
8367 if (remove_breakpoints ())
8369 target_terminal::ours_for_output ();
8370 printf_filtered (_("Cannot remove breakpoints because "
8371 "program is no longer writable.\nFurther "
8372 "execution is probably impossible.\n"));
8377 /* The execution context that just caused a normal stop. */
8383 DISABLE_COPY_AND_ASSIGN (stop_context
);
8385 bool changed () const;
8390 /* The event PTID. */
8394 /* If stopp for a thread event, this is the thread that caused the
8396 thread_info_ref thread
;
8398 /* The inferior that caused the stop. */
8402 /* Initializes a new stop context. If stopped for a thread event, this
8403 takes a strong reference to the thread. */
8405 stop_context::stop_context ()
8407 stop_id
= get_stop_id ();
8408 ptid
= inferior_ptid
;
8409 inf_num
= current_inferior ()->num
;
8411 if (inferior_ptid
!= null_ptid
)
8413 /* Take a strong reference so that the thread can't be deleted
8415 thread
= thread_info_ref::new_reference (inferior_thread ());
8419 /* Return true if the current context no longer matches the saved stop
8423 stop_context::changed () const
8425 if (ptid
!= inferior_ptid
)
8427 if (inf_num
!= current_inferior ()->num
)
8429 if (thread
!= NULL
&& thread
->state
!= THREAD_STOPPED
)
8431 if (get_stop_id () != stop_id
)
8441 struct target_waitstatus last
;
8443 get_last_target_status (nullptr, nullptr, &last
);
8447 /* If an exception is thrown from this point on, make sure to
8448 propagate GDB's knowledge of the executing state to the
8449 frontend/user running state. A QUIT is an easy exception to see
8450 here, so do this before any filtered output. */
8452 ptid_t finish_ptid
= null_ptid
;
8455 finish_ptid
= minus_one_ptid
;
8456 else if (last
.kind
== TARGET_WAITKIND_SIGNALLED
8457 || last
.kind
== TARGET_WAITKIND_EXITED
)
8459 /* On some targets, we may still have live threads in the
8460 inferior when we get a process exit event. E.g., for
8461 "checkpoint", when the current checkpoint/fork exits,
8462 linux-fork.c automatically switches to another fork from
8463 within target_mourn_inferior. */
8464 if (inferior_ptid
!= null_ptid
)
8465 finish_ptid
= ptid_t (inferior_ptid
.pid ());
8467 else if (last
.kind
!= TARGET_WAITKIND_NO_RESUMED
)
8468 finish_ptid
= inferior_ptid
;
8470 gdb::optional
<scoped_finish_thread_state
> maybe_finish_thread_state
;
8471 if (finish_ptid
!= null_ptid
)
8473 maybe_finish_thread_state
.emplace
8474 (user_visible_resume_target (finish_ptid
), finish_ptid
);
8477 /* As we're presenting a stop, and potentially removing breakpoints,
8478 update the thread list so we can tell whether there are threads
8479 running on the target. With target remote, for example, we can
8480 only learn about new threads when we explicitly update the thread
8481 list. Do this before notifying the interpreters about signal
8482 stops, end of stepping ranges, etc., so that the "new thread"
8483 output is emitted before e.g., "Program received signal FOO",
8484 instead of after. */
8485 update_thread_list ();
8487 if (last
.kind
== TARGET_WAITKIND_STOPPED
&& stopped_by_random_signal
)
8488 gdb::observers::signal_received
.notify (inferior_thread ()->stop_signal ());
8490 /* As with the notification of thread events, we want to delay
8491 notifying the user that we've switched thread context until
8492 the inferior actually stops.
8494 There's no point in saying anything if the inferior has exited.
8495 Note that SIGNALLED here means "exited with a signal", not
8496 "received a signal".
8498 Also skip saying anything in non-stop mode. In that mode, as we
8499 don't want GDB to switch threads behind the user's back, to avoid
8500 races where the user is typing a command to apply to thread x,
8501 but GDB switches to thread y before the user finishes entering
8502 the command, fetch_inferior_event installs a cleanup to restore
8503 the current thread back to the thread the user had selected right
8504 after this event is handled, so we're not really switching, only
8505 informing of a stop. */
8507 && previous_inferior_ptid
!= inferior_ptid
8508 && target_has_execution ()
8509 && last
.kind
!= TARGET_WAITKIND_SIGNALLED
8510 && last
.kind
!= TARGET_WAITKIND_EXITED
8511 && last
.kind
!= TARGET_WAITKIND_NO_RESUMED
)
8513 SWITCH_THRU_ALL_UIS ()
8515 target_terminal::ours_for_output ();
8516 printf_filtered (_("[Switching to %s]\n"),
8517 target_pid_to_str (inferior_ptid
).c_str ());
8518 annotate_thread_changed ();
8520 previous_inferior_ptid
= inferior_ptid
;
8523 if (last
.kind
== TARGET_WAITKIND_NO_RESUMED
)
8525 SWITCH_THRU_ALL_UIS ()
8526 if (current_ui
->prompt_state
== PROMPT_BLOCKED
)
8528 target_terminal::ours_for_output ();
8529 printf_filtered (_("No unwaited-for children left.\n"));
8533 /* Note: this depends on the update_thread_list call above. */
8534 maybe_remove_breakpoints ();
8536 /* If an auto-display called a function and that got a signal,
8537 delete that auto-display to avoid an infinite recursion. */
8539 if (stopped_by_random_signal
)
8540 disable_current_display ();
8542 SWITCH_THRU_ALL_UIS ()
8544 async_enable_stdin ();
8547 /* Let the user/frontend see the threads as stopped. */
8548 maybe_finish_thread_state
.reset ();
8550 /* Select innermost stack frame - i.e., current frame is frame 0,
8551 and current location is based on that. Handle the case where the
8552 dummy call is returning after being stopped. E.g. the dummy call
8553 previously hit a breakpoint. (If the dummy call returns
8554 normally, we won't reach here.) Do this before the stop hook is
8555 run, so that it doesn't get to see the temporary dummy frame,
8556 which is not where we'll present the stop. */
8557 if (has_stack_frames ())
8559 if (stop_stack_dummy
== STOP_STACK_DUMMY
)
8561 /* Pop the empty frame that contains the stack dummy. This
8562 also restores inferior state prior to the call (struct
8563 infcall_suspend_state). */
8564 struct frame_info
*frame
= get_current_frame ();
8566 gdb_assert (get_frame_type (frame
) == DUMMY_FRAME
);
8568 /* frame_pop calls reinit_frame_cache as the last thing it
8569 does which means there's now no selected frame. */
8572 select_frame (get_current_frame ());
8574 /* Set the current source location. */
8575 set_current_sal_from_frame (get_current_frame ());
8578 /* Look up the hook_stop and run it (CLI internally handles problem
8579 of stop_command's pre-hook not existing). */
8580 if (stop_command
!= NULL
)
8582 stop_context saved_context
;
8586 execute_cmd_pre_hook (stop_command
);
8588 catch (const gdb_exception
&ex
)
8590 exception_fprintf (gdb_stderr
, ex
,
8591 "Error while running hook_stop:\n");
8594 /* If the stop hook resumes the target, then there's no point in
8595 trying to notify about the previous stop; its context is
8596 gone. Likewise if the command switches thread or inferior --
8597 the observers would print a stop for the wrong
8599 if (saved_context
.changed ())
8603 /* Notify observers about the stop. This is where the interpreters
8604 print the stop event. */
8605 if (inferior_ptid
!= null_ptid
)
8606 gdb::observers::normal_stop
.notify (inferior_thread ()->control
.stop_bpstat
,
8609 gdb::observers::normal_stop
.notify (NULL
, stop_print_frame
);
8611 annotate_stopped ();
8613 if (target_has_execution ())
8615 if (last
.kind
!= TARGET_WAITKIND_SIGNALLED
8616 && last
.kind
!= TARGET_WAITKIND_EXITED
8617 && last
.kind
!= TARGET_WAITKIND_NO_RESUMED
)
8618 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8619 Delete any breakpoint that is to be deleted at the next stop. */
8620 breakpoint_auto_delete (inferior_thread ()->control
.stop_bpstat
);
8623 /* Try to get rid of automatically added inferiors that are no
8624 longer needed. Keeping those around slows down things linearly.
8625 Note that this never removes the current inferior. */
8632 signal_stop_state (int signo
)
8634 return signal_stop
[signo
];
8638 signal_print_state (int signo
)
8640 return signal_print
[signo
];
8644 signal_pass_state (int signo
)
8646 return signal_program
[signo
];
8650 signal_cache_update (int signo
)
8654 for (signo
= 0; signo
< (int) GDB_SIGNAL_LAST
; signo
++)
8655 signal_cache_update (signo
);
8660 signal_pass
[signo
] = (signal_stop
[signo
] == 0
8661 && signal_print
[signo
] == 0
8662 && signal_program
[signo
] == 1
8663 && signal_catch
[signo
] == 0);
8667 signal_stop_update (int signo
, int state
)
8669 int ret
= signal_stop
[signo
];
8671 signal_stop
[signo
] = state
;
8672 signal_cache_update (signo
);
8677 signal_print_update (int signo
, int state
)
8679 int ret
= signal_print
[signo
];
8681 signal_print
[signo
] = state
;
8682 signal_cache_update (signo
);
8687 signal_pass_update (int signo
, int state
)
8689 int ret
= signal_program
[signo
];
8691 signal_program
[signo
] = state
;
8692 signal_cache_update (signo
);
8696 /* Update the global 'signal_catch' from INFO and notify the
8700 signal_catch_update (const unsigned int *info
)
8704 for (i
= 0; i
< GDB_SIGNAL_LAST
; ++i
)
8705 signal_catch
[i
] = info
[i
] > 0;
8706 signal_cache_update (-1);
8707 target_pass_signals (signal_pass
);
8711 sig_print_header (void)
8713 printf_filtered (_("Signal Stop\tPrint\tPass "
8714 "to program\tDescription\n"));
8718 sig_print_info (enum gdb_signal oursig
)
8720 const char *name
= gdb_signal_to_name (oursig
);
8721 int name_padding
= 13 - strlen (name
);
8723 if (name_padding
<= 0)
8726 printf_filtered ("%s", name
);
8727 printf_filtered ("%*.*s ", name_padding
, name_padding
, " ");
8728 printf_filtered ("%s\t", signal_stop
[oursig
] ? "Yes" : "No");
8729 printf_filtered ("%s\t", signal_print
[oursig
] ? "Yes" : "No");
8730 printf_filtered ("%s\t\t", signal_program
[oursig
] ? "Yes" : "No");
8731 printf_filtered ("%s\n", gdb_signal_to_string (oursig
));
8734 /* Specify how various signals in the inferior should be handled. */
8737 handle_command (const char *args
, int from_tty
)
8739 int digits
, wordlen
;
8740 int sigfirst
, siglast
;
8741 enum gdb_signal oursig
;
8746 error_no_arg (_("signal to handle"));
8749 /* Allocate and zero an array of flags for which signals to handle. */
8751 const size_t nsigs
= GDB_SIGNAL_LAST
;
8752 unsigned char sigs
[nsigs
] {};
8754 /* Break the command line up into args. */
8756 gdb_argv
built_argv (args
);
8758 /* Walk through the args, looking for signal oursigs, signal names, and
8759 actions. Signal numbers and signal names may be interspersed with
8760 actions, with the actions being performed for all signals cumulatively
8761 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8763 for (char *arg
: built_argv
)
8765 wordlen
= strlen (arg
);
8766 for (digits
= 0; isdigit (arg
[digits
]); digits
++)
8770 sigfirst
= siglast
= -1;
8772 if (wordlen
>= 1 && !strncmp (arg
, "all", wordlen
))
8774 /* Apply action to all signals except those used by the
8775 debugger. Silently skip those. */
8778 siglast
= nsigs
- 1;
8780 else if (wordlen
>= 1 && !strncmp (arg
, "stop", wordlen
))
8782 SET_SIGS (nsigs
, sigs
, signal_stop
);
8783 SET_SIGS (nsigs
, sigs
, signal_print
);
8785 else if (wordlen
>= 1 && !strncmp (arg
, "ignore", wordlen
))
8787 UNSET_SIGS (nsigs
, sigs
, signal_program
);
8789 else if (wordlen
>= 2 && !strncmp (arg
, "print", wordlen
))
8791 SET_SIGS (nsigs
, sigs
, signal_print
);
8793 else if (wordlen
>= 2 && !strncmp (arg
, "pass", wordlen
))
8795 SET_SIGS (nsigs
, sigs
, signal_program
);
8797 else if (wordlen
>= 3 && !strncmp (arg
, "nostop", wordlen
))
8799 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
8801 else if (wordlen
>= 3 && !strncmp (arg
, "noignore", wordlen
))
8803 SET_SIGS (nsigs
, sigs
, signal_program
);
8805 else if (wordlen
>= 4 && !strncmp (arg
, "noprint", wordlen
))
8807 UNSET_SIGS (nsigs
, sigs
, signal_print
);
8808 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
8810 else if (wordlen
>= 4 && !strncmp (arg
, "nopass", wordlen
))
8812 UNSET_SIGS (nsigs
, sigs
, signal_program
);
8814 else if (digits
> 0)
8816 /* It is numeric. The numeric signal refers to our own
8817 internal signal numbering from target.h, not to host/target
8818 signal number. This is a feature; users really should be
8819 using symbolic names anyway, and the common ones like
8820 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8822 sigfirst
= siglast
= (int)
8823 gdb_signal_from_command (atoi (arg
));
8824 if (arg
[digits
] == '-')
8827 gdb_signal_from_command (atoi (arg
+ digits
+ 1));
8829 if (sigfirst
> siglast
)
8831 /* Bet he didn't figure we'd think of this case... */
8832 std::swap (sigfirst
, siglast
);
8837 oursig
= gdb_signal_from_name (arg
);
8838 if (oursig
!= GDB_SIGNAL_UNKNOWN
)
8840 sigfirst
= siglast
= (int) oursig
;
8844 /* Not a number and not a recognized flag word => complain. */
8845 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg
);
8849 /* If any signal numbers or symbol names were found, set flags for
8850 which signals to apply actions to. */
8852 for (int signum
= sigfirst
; signum
>= 0 && signum
<= siglast
; signum
++)
8854 switch ((enum gdb_signal
) signum
)
8856 case GDB_SIGNAL_TRAP
:
8857 case GDB_SIGNAL_INT
:
8858 if (!allsigs
&& !sigs
[signum
])
8860 if (query (_("%s is used by the debugger.\n\
8861 Are you sure you want to change it? "),
8862 gdb_signal_to_name ((enum gdb_signal
) signum
)))
8867 printf_unfiltered (_("Not confirmed, unchanged.\n"));
8871 case GDB_SIGNAL_DEFAULT
:
8872 case GDB_SIGNAL_UNKNOWN
:
8873 /* Make sure that "all" doesn't print these. */
8882 for (int signum
= 0; signum
< nsigs
; signum
++)
8885 signal_cache_update (-1);
8886 target_pass_signals (signal_pass
);
8887 target_program_signals (signal_program
);
8891 /* Show the results. */
8892 sig_print_header ();
8893 for (; signum
< nsigs
; signum
++)
8895 sig_print_info ((enum gdb_signal
) signum
);
8902 /* Complete the "handle" command. */
8905 handle_completer (struct cmd_list_element
*ignore
,
8906 completion_tracker
&tracker
,
8907 const char *text
, const char *word
)
8909 static const char * const keywords
[] =
8923 signal_completer (ignore
, tracker
, text
, word
);
8924 complete_on_enum (tracker
, keywords
, word
, word
);
8928 gdb_signal_from_command (int num
)
8930 if (num
>= 1 && num
<= 15)
8931 return (enum gdb_signal
) num
;
8932 error (_("Only signals 1-15 are valid as numeric signals.\n\
8933 Use \"info signals\" for a list of symbolic signals."));
8936 /* Print current contents of the tables set by the handle command.
8937 It is possible we should just be printing signals actually used
8938 by the current target (but for things to work right when switching
8939 targets, all signals should be in the signal tables). */
8942 info_signals_command (const char *signum_exp
, int from_tty
)
8944 enum gdb_signal oursig
;
8946 sig_print_header ();
8950 /* First see if this is a symbol name. */
8951 oursig
= gdb_signal_from_name (signum_exp
);
8952 if (oursig
== GDB_SIGNAL_UNKNOWN
)
8954 /* No, try numeric. */
8956 gdb_signal_from_command (parse_and_eval_long (signum_exp
));
8958 sig_print_info (oursig
);
8962 printf_filtered ("\n");
8963 /* These ugly casts brought to you by the native VAX compiler. */
8964 for (oursig
= GDB_SIGNAL_FIRST
;
8965 (int) oursig
< (int) GDB_SIGNAL_LAST
;
8966 oursig
= (enum gdb_signal
) ((int) oursig
+ 1))
8970 if (oursig
!= GDB_SIGNAL_UNKNOWN
8971 && oursig
!= GDB_SIGNAL_DEFAULT
&& oursig
!= GDB_SIGNAL_0
)
8972 sig_print_info (oursig
);
8975 printf_filtered (_("\nUse the \"handle\" command "
8976 "to change these tables.\n"));
8979 /* The $_siginfo convenience variable is a bit special. We don't know
8980 for sure the type of the value until we actually have a chance to
8981 fetch the data. The type can change depending on gdbarch, so it is
8982 also dependent on which thread you have selected.
8984 1. making $_siginfo be an internalvar that creates a new value on
8987 2. making the value of $_siginfo be an lval_computed value. */
8989 /* This function implements the lval_computed support for reading a
8993 siginfo_value_read (struct value
*v
)
8995 LONGEST transferred
;
8997 /* If we can access registers, so can we access $_siginfo. Likewise
8999 validate_registers_access ();
9002 target_read (current_inferior ()->top_target (),
9003 TARGET_OBJECT_SIGNAL_INFO
,
9005 value_contents_all_raw (v
),
9007 TYPE_LENGTH (value_type (v
)));
9009 if (transferred
!= TYPE_LENGTH (value_type (v
)))
9010 error (_("Unable to read siginfo"));
9013 /* This function implements the lval_computed support for writing a
9017 siginfo_value_write (struct value
*v
, struct value
*fromval
)
9019 LONGEST transferred
;
9021 /* If we can access registers, so can we access $_siginfo. Likewise
9023 validate_registers_access ();
9025 transferred
= target_write (current_inferior ()->top_target (),
9026 TARGET_OBJECT_SIGNAL_INFO
,
9028 value_contents_all_raw (fromval
),
9030 TYPE_LENGTH (value_type (fromval
)));
9032 if (transferred
!= TYPE_LENGTH (value_type (fromval
)))
9033 error (_("Unable to write siginfo"));
9036 static const struct lval_funcs siginfo_value_funcs
=
9042 /* Return a new value with the correct type for the siginfo object of
9043 the current thread using architecture GDBARCH. Return a void value
9044 if there's no object available. */
9046 static struct value
*
9047 siginfo_make_value (struct gdbarch
*gdbarch
, struct internalvar
*var
,
9050 if (target_has_stack ()
9051 && inferior_ptid
!= null_ptid
9052 && gdbarch_get_siginfo_type_p (gdbarch
))
9054 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9056 return allocate_computed_value (type
, &siginfo_value_funcs
, NULL
);
9059 return allocate_value (builtin_type (gdbarch
)->builtin_void
);
9063 /* infcall_suspend_state contains state about the program itself like its
9064 registers and any signal it received when it last stopped.
9065 This state must be restored regardless of how the inferior function call
9066 ends (either successfully, or after it hits a breakpoint or signal)
9067 if the program is to properly continue where it left off. */
9069 class infcall_suspend_state
9072 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9073 once the inferior function call has finished. */
9074 infcall_suspend_state (struct gdbarch
*gdbarch
,
9075 const struct thread_info
*tp
,
9076 struct regcache
*regcache
)
9077 : m_registers (new readonly_detached_regcache (*regcache
))
9079 tp
->save_suspend_to (m_thread_suspend
);
9081 gdb::unique_xmalloc_ptr
<gdb_byte
> siginfo_data
;
9083 if (gdbarch_get_siginfo_type_p (gdbarch
))
9085 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9086 size_t len
= TYPE_LENGTH (type
);
9088 siginfo_data
.reset ((gdb_byte
*) xmalloc (len
));
9090 if (target_read (current_inferior ()->top_target (),
9091 TARGET_OBJECT_SIGNAL_INFO
, NULL
,
9092 siginfo_data
.get (), 0, len
) != len
)
9094 /* Errors ignored. */
9095 siginfo_data
.reset (nullptr);
9101 m_siginfo_gdbarch
= gdbarch
;
9102 m_siginfo_data
= std::move (siginfo_data
);
9106 /* Return a pointer to the stored register state. */
9108 readonly_detached_regcache
*registers () const
9110 return m_registers
.get ();
9113 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9115 void restore (struct gdbarch
*gdbarch
,
9116 struct thread_info
*tp
,
9117 struct regcache
*regcache
) const
9119 tp
->restore_suspend_from (m_thread_suspend
);
9121 if (m_siginfo_gdbarch
== gdbarch
)
9123 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9125 /* Errors ignored. */
9126 target_write (current_inferior ()->top_target (),
9127 TARGET_OBJECT_SIGNAL_INFO
, NULL
,
9128 m_siginfo_data
.get (), 0, TYPE_LENGTH (type
));
9131 /* The inferior can be gone if the user types "print exit(0)"
9132 (and perhaps other times). */
9133 if (target_has_execution ())
9134 /* NB: The register write goes through to the target. */
9135 regcache
->restore (registers ());
9139 /* How the current thread stopped before the inferior function call was
9141 struct thread_suspend_state m_thread_suspend
;
9143 /* The registers before the inferior function call was executed. */
9144 std::unique_ptr
<readonly_detached_regcache
> m_registers
;
9146 /* Format of SIGINFO_DATA or NULL if it is not present. */
9147 struct gdbarch
*m_siginfo_gdbarch
= nullptr;
9149 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9150 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9151 content would be invalid. */
9152 gdb::unique_xmalloc_ptr
<gdb_byte
> m_siginfo_data
;
9155 infcall_suspend_state_up
9156 save_infcall_suspend_state ()
9158 struct thread_info
*tp
= inferior_thread ();
9159 struct regcache
*regcache
= get_current_regcache ();
9160 struct gdbarch
*gdbarch
= regcache
->arch ();
9162 infcall_suspend_state_up inf_state
9163 (new struct infcall_suspend_state (gdbarch
, tp
, regcache
));
9165 /* Having saved the current state, adjust the thread state, discarding
9166 any stop signal information. The stop signal is not useful when
9167 starting an inferior function call, and run_inferior_call will not use
9168 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9169 tp
->set_stop_signal (GDB_SIGNAL_0
);
9174 /* Restore inferior session state to INF_STATE. */
9177 restore_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
9179 struct thread_info
*tp
= inferior_thread ();
9180 struct regcache
*regcache
= get_current_regcache ();
9181 struct gdbarch
*gdbarch
= regcache
->arch ();
9183 inf_state
->restore (gdbarch
, tp
, regcache
);
9184 discard_infcall_suspend_state (inf_state
);
9188 discard_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
9193 readonly_detached_regcache
*
9194 get_infcall_suspend_state_regcache (struct infcall_suspend_state
*inf_state
)
9196 return inf_state
->registers ();
9199 /* infcall_control_state contains state regarding gdb's control of the
9200 inferior itself like stepping control. It also contains session state like
9201 the user's currently selected frame. */
9203 struct infcall_control_state
9205 struct thread_control_state thread_control
;
9206 struct inferior_control_state inferior_control
;
9209 enum stop_stack_kind stop_stack_dummy
= STOP_NONE
;
9210 int stopped_by_random_signal
= 0;
9212 /* ID and level of the selected frame when the inferior function
9214 struct frame_id selected_frame_id
{};
9215 int selected_frame_level
= -1;
9218 /* Save all of the information associated with the inferior<==>gdb
9221 infcall_control_state_up
9222 save_infcall_control_state ()
9224 infcall_control_state_up
inf_status (new struct infcall_control_state
);
9225 struct thread_info
*tp
= inferior_thread ();
9226 struct inferior
*inf
= current_inferior ();
9228 inf_status
->thread_control
= tp
->control
;
9229 inf_status
->inferior_control
= inf
->control
;
9231 tp
->control
.step_resume_breakpoint
= NULL
;
9232 tp
->control
.exception_resume_breakpoint
= NULL
;
9234 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9235 chain. If caller's caller is walking the chain, they'll be happier if we
9236 hand them back the original chain when restore_infcall_control_state is
9238 tp
->control
.stop_bpstat
= bpstat_copy (tp
->control
.stop_bpstat
);
9241 inf_status
->stop_stack_dummy
= stop_stack_dummy
;
9242 inf_status
->stopped_by_random_signal
= stopped_by_random_signal
;
9244 save_selected_frame (&inf_status
->selected_frame_id
,
9245 &inf_status
->selected_frame_level
);
9250 /* Restore inferior session state to INF_STATUS. */
9253 restore_infcall_control_state (struct infcall_control_state
*inf_status
)
9255 struct thread_info
*tp
= inferior_thread ();
9256 struct inferior
*inf
= current_inferior ();
9258 if (tp
->control
.step_resume_breakpoint
)
9259 tp
->control
.step_resume_breakpoint
->disposition
= disp_del_at_next_stop
;
9261 if (tp
->control
.exception_resume_breakpoint
)
9262 tp
->control
.exception_resume_breakpoint
->disposition
9263 = disp_del_at_next_stop
;
9265 /* Handle the bpstat_copy of the chain. */
9266 bpstat_clear (&tp
->control
.stop_bpstat
);
9268 tp
->control
= inf_status
->thread_control
;
9269 inf
->control
= inf_status
->inferior_control
;
9272 stop_stack_dummy
= inf_status
->stop_stack_dummy
;
9273 stopped_by_random_signal
= inf_status
->stopped_by_random_signal
;
9275 if (target_has_stack ())
9277 restore_selected_frame (inf_status
->selected_frame_id
,
9278 inf_status
->selected_frame_level
);
9285 discard_infcall_control_state (struct infcall_control_state
*inf_status
)
9287 if (inf_status
->thread_control
.step_resume_breakpoint
)
9288 inf_status
->thread_control
.step_resume_breakpoint
->disposition
9289 = disp_del_at_next_stop
;
9291 if (inf_status
->thread_control
.exception_resume_breakpoint
)
9292 inf_status
->thread_control
.exception_resume_breakpoint
->disposition
9293 = disp_del_at_next_stop
;
9295 /* See save_infcall_control_state for info on stop_bpstat. */
9296 bpstat_clear (&inf_status
->thread_control
.stop_bpstat
);
9304 clear_exit_convenience_vars (void)
9306 clear_internalvar (lookup_internalvar ("_exitsignal"));
9307 clear_internalvar (lookup_internalvar ("_exitcode"));
9311 /* User interface for reverse debugging:
9312 Set exec-direction / show exec-direction commands
9313 (returns error unless target implements to_set_exec_direction method). */
9315 enum exec_direction_kind execution_direction
= EXEC_FORWARD
;
9316 static const char exec_forward
[] = "forward";
9317 static const char exec_reverse
[] = "reverse";
9318 static const char *exec_direction
= exec_forward
;
9319 static const char *const exec_direction_names
[] = {
9326 set_exec_direction_func (const char *args
, int from_tty
,
9327 struct cmd_list_element
*cmd
)
9329 if (target_can_execute_reverse ())
9331 if (!strcmp (exec_direction
, exec_forward
))
9332 execution_direction
= EXEC_FORWARD
;
9333 else if (!strcmp (exec_direction
, exec_reverse
))
9334 execution_direction
= EXEC_REVERSE
;
9338 exec_direction
= exec_forward
;
9339 error (_("Target does not support this operation."));
9344 show_exec_direction_func (struct ui_file
*out
, int from_tty
,
9345 struct cmd_list_element
*cmd
, const char *value
)
9347 switch (execution_direction
) {
9349 fprintf_filtered (out
, _("Forward.\n"));
9352 fprintf_filtered (out
, _("Reverse.\n"));
9355 internal_error (__FILE__
, __LINE__
,
9356 _("bogus execution_direction value: %d"),
9357 (int) execution_direction
);
9362 show_schedule_multiple (struct ui_file
*file
, int from_tty
,
9363 struct cmd_list_element
*c
, const char *value
)
9365 fprintf_filtered (file
, _("Resuming the execution of threads "
9366 "of all processes is %s.\n"), value
);
9369 /* Implementation of `siginfo' variable. */
9371 static const struct internalvar_funcs siginfo_funcs
=
9378 /* Callback for infrun's target events source. This is marked when a
9379 thread has a pending status to process. */
9382 infrun_async_inferior_event_handler (gdb_client_data data
)
9384 clear_async_event_handler (infrun_async_inferior_event_token
);
9385 inferior_event_handler (INF_REG_EVENT
);
9392 /* Verify that when two threads with the same ptid exist (from two different
9393 targets) and one of them changes ptid, we only update inferior_ptid if
9394 it is appropriate. */
9397 infrun_thread_ptid_changed ()
9399 gdbarch
*arch
= current_inferior ()->gdbarch
;
9401 /* The thread which inferior_ptid represents changes ptid. */
9403 scoped_restore_current_pspace_and_thread restore
;
9405 scoped_mock_context
<test_target_ops
> target1 (arch
);
9406 scoped_mock_context
<test_target_ops
> target2 (arch
);
9408 ptid_t
old_ptid (111, 222);
9409 ptid_t
new_ptid (111, 333);
9411 target1
.mock_inferior
.pid
= old_ptid
.pid ();
9412 target1
.mock_thread
.ptid
= old_ptid
;
9413 target1
.mock_inferior
.ptid_thread_map
.clear ();
9414 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
9416 target2
.mock_inferior
.pid
= old_ptid
.pid ();
9417 target2
.mock_thread
.ptid
= old_ptid
;
9418 target2
.mock_inferior
.ptid_thread_map
.clear ();
9419 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
9421 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
9422 set_current_inferior (&target1
.mock_inferior
);
9424 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
9426 gdb_assert (inferior_ptid
== new_ptid
);
9429 /* A thread with the same ptid as inferior_ptid, but from another target,
9432 scoped_restore_current_pspace_and_thread restore
;
9434 scoped_mock_context
<test_target_ops
> target1 (arch
);
9435 scoped_mock_context
<test_target_ops
> target2 (arch
);
9437 ptid_t
old_ptid (111, 222);
9438 ptid_t
new_ptid (111, 333);
9440 target1
.mock_inferior
.pid
= old_ptid
.pid ();
9441 target1
.mock_thread
.ptid
= old_ptid
;
9442 target1
.mock_inferior
.ptid_thread_map
.clear ();
9443 target1
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target1
.mock_thread
;
9445 target2
.mock_inferior
.pid
= old_ptid
.pid ();
9446 target2
.mock_thread
.ptid
= old_ptid
;
9447 target2
.mock_inferior
.ptid_thread_map
.clear ();
9448 target2
.mock_inferior
.ptid_thread_map
[old_ptid
] = &target2
.mock_thread
;
9450 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
9451 set_current_inferior (&target2
.mock_inferior
);
9453 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
9455 gdb_assert (inferior_ptid
== old_ptid
);
9459 } /* namespace selftests */
9461 #endif /* GDB_SELF_TEST */
9463 void _initialize_infrun ();
9465 _initialize_infrun ()
9467 struct cmd_list_element
*c
;
9469 /* Register extra event sources in the event loop. */
9470 infrun_async_inferior_event_token
9471 = create_async_event_handler (infrun_async_inferior_event_handler
, NULL
,
9474 cmd_list_element
*info_signals_cmd
9475 = add_info ("signals", info_signals_command
, _("\
9476 What debugger does when program gets various signals.\n\
9477 Specify a signal as argument to print info on that signal only."));
9478 add_info_alias ("handle", info_signals_cmd
, 0);
9480 c
= add_com ("handle", class_run
, handle_command
, _("\
9481 Specify how to handle signals.\n\
9482 Usage: handle SIGNAL [ACTIONS]\n\
9483 Args are signals and actions to apply to those signals.\n\
9484 If no actions are specified, the current settings for the specified signals\n\
9485 will be displayed instead.\n\
9487 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9488 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9489 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9490 The special arg \"all\" is recognized to mean all signals except those\n\
9491 used by the debugger, typically SIGTRAP and SIGINT.\n\
9493 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9494 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9495 Stop means reenter debugger if this signal happens (implies print).\n\
9496 Print means print a message if this signal happens.\n\
9497 Pass means let program see this signal; otherwise program doesn't know.\n\
9498 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9499 Pass and Stop may be combined.\n\
9501 Multiple signals may be specified. Signal numbers and signal names\n\
9502 may be interspersed with actions, with the actions being performed for\n\
9503 all signals cumulatively specified."));
9504 set_cmd_completer (c
, handle_completer
);
9507 stop_command
= add_cmd ("stop", class_obscure
,
9508 not_just_help_class_command
, _("\
9509 There is no `stop' command, but you can set a hook on `stop'.\n\
9510 This allows you to set a list of commands to be run each time execution\n\
9511 of the program stops."), &cmdlist
);
9513 add_setshow_boolean_cmd
9514 ("infrun", class_maintenance
, &debug_infrun
,
9515 _("Set inferior debugging."),
9516 _("Show inferior debugging."),
9517 _("When non-zero, inferior specific debugging is enabled."),
9518 NULL
, show_debug_infrun
, &setdebuglist
, &showdebuglist
);
9520 add_setshow_boolean_cmd ("non-stop", no_class
,
9522 Set whether gdb controls the inferior in non-stop mode."), _("\
9523 Show whether gdb controls the inferior in non-stop mode."), _("\
9524 When debugging a multi-threaded program and this setting is\n\
9525 off (the default, also called all-stop mode), when one thread stops\n\
9526 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9527 all other threads in the program while you interact with the thread of\n\
9528 interest. When you continue or step a thread, you can allow the other\n\
9529 threads to run, or have them remain stopped, but while you inspect any\n\
9530 thread's state, all threads stop.\n\
9532 In non-stop mode, when one thread stops, other threads can continue\n\
9533 to run freely. You'll be able to step each thread independently,\n\
9534 leave it stopped or free to run as needed."),
9540 for (size_t i
= 0; i
< GDB_SIGNAL_LAST
; i
++)
9543 signal_print
[i
] = 1;
9544 signal_program
[i
] = 1;
9545 signal_catch
[i
] = 0;
9548 /* Signals caused by debugger's own actions should not be given to
9549 the program afterwards.
9551 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9552 explicitly specifies that it should be delivered to the target
9553 program. Typically, that would occur when a user is debugging a
9554 target monitor on a simulator: the target monitor sets a
9555 breakpoint; the simulator encounters this breakpoint and halts
9556 the simulation handing control to GDB; GDB, noting that the stop
9557 address doesn't map to any known breakpoint, returns control back
9558 to the simulator; the simulator then delivers the hardware
9559 equivalent of a GDB_SIGNAL_TRAP to the program being
9561 signal_program
[GDB_SIGNAL_TRAP
] = 0;
9562 signal_program
[GDB_SIGNAL_INT
] = 0;
9564 /* Signals that are not errors should not normally enter the debugger. */
9565 signal_stop
[GDB_SIGNAL_ALRM
] = 0;
9566 signal_print
[GDB_SIGNAL_ALRM
] = 0;
9567 signal_stop
[GDB_SIGNAL_VTALRM
] = 0;
9568 signal_print
[GDB_SIGNAL_VTALRM
] = 0;
9569 signal_stop
[GDB_SIGNAL_PROF
] = 0;
9570 signal_print
[GDB_SIGNAL_PROF
] = 0;
9571 signal_stop
[GDB_SIGNAL_CHLD
] = 0;
9572 signal_print
[GDB_SIGNAL_CHLD
] = 0;
9573 signal_stop
[GDB_SIGNAL_IO
] = 0;
9574 signal_print
[GDB_SIGNAL_IO
] = 0;
9575 signal_stop
[GDB_SIGNAL_POLL
] = 0;
9576 signal_print
[GDB_SIGNAL_POLL
] = 0;
9577 signal_stop
[GDB_SIGNAL_URG
] = 0;
9578 signal_print
[GDB_SIGNAL_URG
] = 0;
9579 signal_stop
[GDB_SIGNAL_WINCH
] = 0;
9580 signal_print
[GDB_SIGNAL_WINCH
] = 0;
9581 signal_stop
[GDB_SIGNAL_PRIO
] = 0;
9582 signal_print
[GDB_SIGNAL_PRIO
] = 0;
9584 /* These signals are used internally by user-level thread
9585 implementations. (See signal(5) on Solaris.) Like the above
9586 signals, a healthy program receives and handles them as part of
9587 its normal operation. */
9588 signal_stop
[GDB_SIGNAL_LWP
] = 0;
9589 signal_print
[GDB_SIGNAL_LWP
] = 0;
9590 signal_stop
[GDB_SIGNAL_WAITING
] = 0;
9591 signal_print
[GDB_SIGNAL_WAITING
] = 0;
9592 signal_stop
[GDB_SIGNAL_CANCEL
] = 0;
9593 signal_print
[GDB_SIGNAL_CANCEL
] = 0;
9594 signal_stop
[GDB_SIGNAL_LIBRT
] = 0;
9595 signal_print
[GDB_SIGNAL_LIBRT
] = 0;
9597 /* Update cached state. */
9598 signal_cache_update (-1);
9600 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support
,
9601 &stop_on_solib_events
, _("\
9602 Set stopping for shared library events."), _("\
9603 Show stopping for shared library events."), _("\
9604 If nonzero, gdb will give control to the user when the dynamic linker\n\
9605 notifies gdb of shared library events. The most common event of interest\n\
9606 to the user would be loading/unloading of a new library."),
9607 set_stop_on_solib_events
,
9608 show_stop_on_solib_events
,
9609 &setlist
, &showlist
);
9611 add_setshow_enum_cmd ("follow-fork-mode", class_run
,
9612 follow_fork_mode_kind_names
,
9613 &follow_fork_mode_string
, _("\
9614 Set debugger response to a program call of fork or vfork."), _("\
9615 Show debugger response to a program call of fork or vfork."), _("\
9616 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9617 parent - the original process is debugged after a fork\n\
9618 child - the new process is debugged after a fork\n\
9619 The unfollowed process will continue to run.\n\
9620 By default, the debugger will follow the parent process."),
9622 show_follow_fork_mode_string
,
9623 &setlist
, &showlist
);
9625 add_setshow_enum_cmd ("follow-exec-mode", class_run
,
9626 follow_exec_mode_names
,
9627 &follow_exec_mode_string
, _("\
9628 Set debugger response to a program call of exec."), _("\
9629 Show debugger response to a program call of exec."), _("\
9630 An exec call replaces the program image of a process.\n\
9632 follow-exec-mode can be:\n\
9634 new - the debugger creates a new inferior and rebinds the process\n\
9635 to this new inferior. The program the process was running before\n\
9636 the exec call can be restarted afterwards by restarting the original\n\
9639 same - the debugger keeps the process bound to the same inferior.\n\
9640 The new executable image replaces the previous executable loaded in\n\
9641 the inferior. Restarting the inferior after the exec call restarts\n\
9642 the executable the process was running after the exec call.\n\
9644 By default, the debugger will use the same inferior."),
9646 show_follow_exec_mode_string
,
9647 &setlist
, &showlist
);
9649 add_setshow_enum_cmd ("scheduler-locking", class_run
,
9650 scheduler_enums
, &scheduler_mode
, _("\
9651 Set mode for locking scheduler during execution."), _("\
9652 Show mode for locking scheduler during execution."), _("\
9653 off == no locking (threads may preempt at any time)\n\
9654 on == full locking (no thread except the current thread may run)\n\
9655 This applies to both normal execution and replay mode.\n\
9656 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9657 In this mode, other threads may run during other commands.\n\
9658 This applies to both normal execution and replay mode.\n\
9659 replay == scheduler locked in replay mode and unlocked during normal execution."),
9660 set_schedlock_func
, /* traps on target vector */
9661 show_scheduler_mode
,
9662 &setlist
, &showlist
);
9664 add_setshow_boolean_cmd ("schedule-multiple", class_run
, &sched_multi
, _("\
9665 Set mode for resuming threads of all processes."), _("\
9666 Show mode for resuming threads of all processes."), _("\
9667 When on, execution commands (such as 'continue' or 'next') resume all\n\
9668 threads of all processes. When off (which is the default), execution\n\
9669 commands only resume the threads of the current process. The set of\n\
9670 threads that are resumed is further refined by the scheduler-locking\n\
9671 mode (see help set scheduler-locking)."),
9673 show_schedule_multiple
,
9674 &setlist
, &showlist
);
9676 add_setshow_boolean_cmd ("step-mode", class_run
, &step_stop_if_no_debug
, _("\
9677 Set mode of the step operation."), _("\
9678 Show mode of the step operation."), _("\
9679 When set, doing a step over a function without debug line information\n\
9680 will stop at the first instruction of that function. Otherwise, the\n\
9681 function is skipped and the step command stops at a different source line."),
9683 show_step_stop_if_no_debug
,
9684 &setlist
, &showlist
);
9686 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run
,
9687 &can_use_displaced_stepping
, _("\
9688 Set debugger's willingness to use displaced stepping."), _("\
9689 Show debugger's willingness to use displaced stepping."), _("\
9690 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9691 supported by the target architecture. If off, gdb will not use displaced\n\
9692 stepping to step over breakpoints, even if such is supported by the target\n\
9693 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9694 if the target architecture supports it and non-stop mode is active, but will not\n\
9695 use it in all-stop mode (see help set non-stop)."),
9697 show_can_use_displaced_stepping
,
9698 &setlist
, &showlist
);
9700 add_setshow_enum_cmd ("exec-direction", class_run
, exec_direction_names
,
9701 &exec_direction
, _("Set direction of execution.\n\
9702 Options are 'forward' or 'reverse'."),
9703 _("Show direction of execution (forward/reverse)."),
9704 _("Tells gdb whether to execute forward or backward."),
9705 set_exec_direction_func
, show_exec_direction_func
,
9706 &setlist
, &showlist
);
9708 /* Set/show detach-on-fork: user-settable mode. */
9710 add_setshow_boolean_cmd ("detach-on-fork", class_run
, &detach_fork
, _("\
9711 Set whether gdb will detach the child of a fork."), _("\
9712 Show whether gdb will detach the child of a fork."), _("\
9713 Tells gdb whether to detach the child of a fork."),
9714 NULL
, NULL
, &setlist
, &showlist
);
9716 /* Set/show disable address space randomization mode. */
9718 add_setshow_boolean_cmd ("disable-randomization", class_support
,
9719 &disable_randomization
, _("\
9720 Set disabling of debuggee's virtual address space randomization."), _("\
9721 Show disabling of debuggee's virtual address space randomization."), _("\
9722 When this mode is on (which is the default), randomization of the virtual\n\
9723 address space is disabled. Standalone programs run with the randomization\n\
9724 enabled by default on some platforms."),
9725 &set_disable_randomization
,
9726 &show_disable_randomization
,
9727 &setlist
, &showlist
);
9729 /* ptid initializations */
9730 inferior_ptid
= null_ptid
;
9731 target_last_wait_ptid
= minus_one_ptid
;
9733 gdb::observers::thread_ptid_changed
.attach (infrun_thread_ptid_changed
,
9735 gdb::observers::thread_stop_requested
.attach (infrun_thread_stop_requested
,
9737 gdb::observers::thread_exit
.attach (infrun_thread_thread_exit
, "infrun");
9738 gdb::observers::inferior_exit
.attach (infrun_inferior_exit
, "infrun");
9739 gdb::observers::inferior_execd
.attach (infrun_inferior_execd
, "infrun");
9741 /* Explicitly create without lookup, since that tries to create a
9742 value with a void typed value, and when we get here, gdbarch
9743 isn't initialized yet. At this point, we're quite sure there
9744 isn't another convenience variable of the same name. */
9745 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs
, NULL
);
9747 add_setshow_boolean_cmd ("observer", no_class
,
9748 &observer_mode_1
, _("\
9749 Set whether gdb controls the inferior in observer mode."), _("\
9750 Show whether gdb controls the inferior in observer mode."), _("\
9751 In observer mode, GDB can get data from the inferior, but not\n\
9752 affect its execution. Registers and memory may not be changed,\n\
9753 breakpoints may not be set, and the program cannot be interrupted\n\
9761 selftests::register_test ("infrun_thread_ptid_changed",
9762 selftests::infrun_thread_ptid_changed
);