2011-10-24 Pedro Alves <pedro@codesourcery.com>
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "dictionary.h"
49 #include "block.h"
50 #include "gdb_assert.h"
51 #include "mi/mi-common.h"
52 #include "event-top.h"
53 #include "record.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58 #include "interps.h"
59
60 /* Prototypes for local functions */
61
62 static void signals_info (char *, int);
63
64 static void handle_command (char *, int);
65
66 static void sig_print_info (enum target_signal);
67
68 static void sig_print_header (void);
69
70 static void resume_cleanups (void *);
71
72 static int hook_stop_stub (void *);
73
74 static int restore_selected_frame (void *);
75
76 static int follow_fork (void);
77
78 static void set_schedlock_func (char *args, int from_tty,
79 struct cmd_list_element *c);
80
81 static int currently_stepping (struct thread_info *tp);
82
83 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
84 void *data);
85
86 static void xdb_handle_command (char *args, int from_tty);
87
88 static int prepare_to_proceed (int);
89
90 static void print_exited_reason (int exitstatus);
91
92 static void print_signal_exited_reason (enum target_signal siggnal);
93
94 static void print_no_history_reason (void);
95
96 static void print_signal_received_reason (enum target_signal siggnal);
97
98 static void print_end_stepping_range_reason (void);
99
100 void _initialize_infrun (void);
101
102 void nullify_last_target_wait_ptid (void);
103
104 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
105
106 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
107
108 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
109
110 /* When set, stop the 'step' command if we enter a function which has
111 no line number information. The normal behavior is that we step
112 over such function. */
113 int step_stop_if_no_debug = 0;
114 static void
115 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
116 struct cmd_list_element *c, const char *value)
117 {
118 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
119 }
120
121 /* In asynchronous mode, but simulating synchronous execution. */
122
123 int sync_execution = 0;
124
125 /* wait_for_inferior and normal_stop use this to notify the user
126 when the inferior stopped in a different thread than it had been
127 running in. */
128
129 static ptid_t previous_inferior_ptid;
130
131 /* Default behavior is to detach newly forked processes (legacy). */
132 int detach_fork = 1;
133
134 int debug_displaced = 0;
135 static void
136 show_debug_displaced (struct ui_file *file, int from_tty,
137 struct cmd_list_element *c, const char *value)
138 {
139 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
140 }
141
142 int debug_infrun = 0;
143 static void
144 show_debug_infrun (struct ui_file *file, int from_tty,
145 struct cmd_list_element *c, const char *value)
146 {
147 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
148 }
149
150
151 /* Support for disabling address space randomization. */
152
153 int disable_randomization = 1;
154
155 static void
156 show_disable_randomization (struct ui_file *file, int from_tty,
157 struct cmd_list_element *c, const char *value)
158 {
159 if (target_supports_disable_randomization ())
160 fprintf_filtered (file,
161 _("Disabling randomization of debuggee's "
162 "virtual address space is %s.\n"),
163 value);
164 else
165 fputs_filtered (_("Disabling randomization of debuggee's "
166 "virtual address space is unsupported on\n"
167 "this platform.\n"), file);
168 }
169
170 static void
171 set_disable_randomization (char *args, int from_tty,
172 struct cmd_list_element *c)
173 {
174 if (!target_supports_disable_randomization ())
175 error (_("Disabling randomization of debuggee's "
176 "virtual address space is unsupported on\n"
177 "this platform."));
178 }
179
180
181 /* If the program uses ELF-style shared libraries, then calls to
182 functions in shared libraries go through stubs, which live in a
183 table called the PLT (Procedure Linkage Table). The first time the
184 function is called, the stub sends control to the dynamic linker,
185 which looks up the function's real address, patches the stub so
186 that future calls will go directly to the function, and then passes
187 control to the function.
188
189 If we are stepping at the source level, we don't want to see any of
190 this --- we just want to skip over the stub and the dynamic linker.
191 The simple approach is to single-step until control leaves the
192 dynamic linker.
193
194 However, on some systems (e.g., Red Hat's 5.2 distribution) the
195 dynamic linker calls functions in the shared C library, so you
196 can't tell from the PC alone whether the dynamic linker is still
197 running. In this case, we use a step-resume breakpoint to get us
198 past the dynamic linker, as if we were using "next" to step over a
199 function call.
200
201 in_solib_dynsym_resolve_code() says whether we're in the dynamic
202 linker code or not. Normally, this means we single-step. However,
203 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
204 address where we can place a step-resume breakpoint to get past the
205 linker's symbol resolution function.
206
207 in_solib_dynsym_resolve_code() can generally be implemented in a
208 pretty portable way, by comparing the PC against the address ranges
209 of the dynamic linker's sections.
210
211 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
212 it depends on internal details of the dynamic linker. It's usually
213 not too hard to figure out where to put a breakpoint, but it
214 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
215 sanity checking. If it can't figure things out, returning zero and
216 getting the (possibly confusing) stepping behavior is better than
217 signalling an error, which will obscure the change in the
218 inferior's state. */
219
220 /* This function returns TRUE if pc is the address of an instruction
221 that lies within the dynamic linker (such as the event hook, or the
222 dld itself).
223
224 This function must be used only when a dynamic linker event has
225 been caught, and the inferior is being stepped out of the hook, or
226 undefined results are guaranteed. */
227
228 #ifndef SOLIB_IN_DYNAMIC_LINKER
229 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
230 #endif
231
232 /* "Observer mode" is somewhat like a more extreme version of
233 non-stop, in which all GDB operations that might affect the
234 target's execution have been disabled. */
235
236 static int non_stop_1 = 0;
237
238 int observer_mode = 0;
239 static int observer_mode_1 = 0;
240
241 static void
242 set_observer_mode (char *args, int from_tty,
243 struct cmd_list_element *c)
244 {
245 extern int pagination_enabled;
246
247 if (target_has_execution)
248 {
249 observer_mode_1 = observer_mode;
250 error (_("Cannot change this setting while the inferior is running."));
251 }
252
253 observer_mode = observer_mode_1;
254
255 may_write_registers = !observer_mode;
256 may_write_memory = !observer_mode;
257 may_insert_breakpoints = !observer_mode;
258 may_insert_tracepoints = !observer_mode;
259 /* We can insert fast tracepoints in or out of observer mode,
260 but enable them if we're going into this mode. */
261 if (observer_mode)
262 may_insert_fast_tracepoints = 1;
263 may_stop = !observer_mode;
264 update_target_permissions ();
265
266 /* Going *into* observer mode we must force non-stop, then
267 going out we leave it that way. */
268 if (observer_mode)
269 {
270 target_async_permitted = 1;
271 pagination_enabled = 0;
272 non_stop = non_stop_1 = 1;
273 }
274
275 if (from_tty)
276 printf_filtered (_("Observer mode is now %s.\n"),
277 (observer_mode ? "on" : "off"));
278 }
279
280 static void
281 show_observer_mode (struct ui_file *file, int from_tty,
282 struct cmd_list_element *c, const char *value)
283 {
284 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
285 }
286
287 /* This updates the value of observer mode based on changes in
288 permissions. Note that we are deliberately ignoring the values of
289 may-write-registers and may-write-memory, since the user may have
290 reason to enable these during a session, for instance to turn on a
291 debugging-related global. */
292
293 void
294 update_observer_mode (void)
295 {
296 int newval;
297
298 newval = (!may_insert_breakpoints
299 && !may_insert_tracepoints
300 && may_insert_fast_tracepoints
301 && !may_stop
302 && non_stop);
303
304 /* Let the user know if things change. */
305 if (newval != observer_mode)
306 printf_filtered (_("Observer mode is now %s.\n"),
307 (newval ? "on" : "off"));
308
309 observer_mode = observer_mode_1 = newval;
310 }
311
312 /* Tables of how to react to signals; the user sets them. */
313
314 static unsigned char *signal_stop;
315 static unsigned char *signal_print;
316 static unsigned char *signal_program;
317
318 /* Table of signals that the target may silently handle.
319 This is automatically determined from the flags above,
320 and simply cached here. */
321 static unsigned char *signal_pass;
322
323 #define SET_SIGS(nsigs,sigs,flags) \
324 do { \
325 int signum = (nsigs); \
326 while (signum-- > 0) \
327 if ((sigs)[signum]) \
328 (flags)[signum] = 1; \
329 } while (0)
330
331 #define UNSET_SIGS(nsigs,sigs,flags) \
332 do { \
333 int signum = (nsigs); \
334 while (signum-- > 0) \
335 if ((sigs)[signum]) \
336 (flags)[signum] = 0; \
337 } while (0)
338
339 /* Value to pass to target_resume() to cause all threads to resume. */
340
341 #define RESUME_ALL minus_one_ptid
342
343 /* Command list pointer for the "stop" placeholder. */
344
345 static struct cmd_list_element *stop_command;
346
347 /* Function inferior was in as of last step command. */
348
349 static struct symbol *step_start_function;
350
351 /* Nonzero if we want to give control to the user when we're notified
352 of shared library events by the dynamic linker. */
353 int stop_on_solib_events;
354 static void
355 show_stop_on_solib_events (struct ui_file *file, int from_tty,
356 struct cmd_list_element *c, const char *value)
357 {
358 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
359 value);
360 }
361
362 /* Nonzero means expecting a trace trap
363 and should stop the inferior and return silently when it happens. */
364
365 int stop_after_trap;
366
367 /* Save register contents here when executing a "finish" command or are
368 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
369 Thus this contains the return value from the called function (assuming
370 values are returned in a register). */
371
372 struct regcache *stop_registers;
373
374 /* Nonzero after stop if current stack frame should be printed. */
375
376 static int stop_print_frame;
377
378 /* This is a cached copy of the pid/waitstatus of the last event
379 returned by target_wait()/deprecated_target_wait_hook(). This
380 information is returned by get_last_target_status(). */
381 static ptid_t target_last_wait_ptid;
382 static struct target_waitstatus target_last_waitstatus;
383
384 static void context_switch (ptid_t ptid);
385
386 void init_thread_stepping_state (struct thread_info *tss);
387
388 void init_infwait_state (void);
389
390 static const char follow_fork_mode_child[] = "child";
391 static const char follow_fork_mode_parent[] = "parent";
392
393 static const char *follow_fork_mode_kind_names[] = {
394 follow_fork_mode_child,
395 follow_fork_mode_parent,
396 NULL
397 };
398
399 static const char *follow_fork_mode_string = follow_fork_mode_parent;
400 static void
401 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
402 struct cmd_list_element *c, const char *value)
403 {
404 fprintf_filtered (file,
405 _("Debugger response to a program "
406 "call of fork or vfork is \"%s\".\n"),
407 value);
408 }
409 \f
410
411 /* Tell the target to follow the fork we're stopped at. Returns true
412 if the inferior should be resumed; false, if the target for some
413 reason decided it's best not to resume. */
414
415 static int
416 follow_fork (void)
417 {
418 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
419 int should_resume = 1;
420 struct thread_info *tp;
421
422 /* Copy user stepping state to the new inferior thread. FIXME: the
423 followed fork child thread should have a copy of most of the
424 parent thread structure's run control related fields, not just these.
425 Initialized to avoid "may be used uninitialized" warnings from gcc. */
426 struct breakpoint *step_resume_breakpoint = NULL;
427 struct breakpoint *exception_resume_breakpoint = NULL;
428 CORE_ADDR step_range_start = 0;
429 CORE_ADDR step_range_end = 0;
430 struct frame_id step_frame_id = { 0 };
431
432 if (!non_stop)
433 {
434 ptid_t wait_ptid;
435 struct target_waitstatus wait_status;
436
437 /* Get the last target status returned by target_wait(). */
438 get_last_target_status (&wait_ptid, &wait_status);
439
440 /* If not stopped at a fork event, then there's nothing else to
441 do. */
442 if (wait_status.kind != TARGET_WAITKIND_FORKED
443 && wait_status.kind != TARGET_WAITKIND_VFORKED)
444 return 1;
445
446 /* Check if we switched over from WAIT_PTID, since the event was
447 reported. */
448 if (!ptid_equal (wait_ptid, minus_one_ptid)
449 && !ptid_equal (inferior_ptid, wait_ptid))
450 {
451 /* We did. Switch back to WAIT_PTID thread, to tell the
452 target to follow it (in either direction). We'll
453 afterwards refuse to resume, and inform the user what
454 happened. */
455 switch_to_thread (wait_ptid);
456 should_resume = 0;
457 }
458 }
459
460 tp = inferior_thread ();
461
462 /* If there were any forks/vforks that were caught and are now to be
463 followed, then do so now. */
464 switch (tp->pending_follow.kind)
465 {
466 case TARGET_WAITKIND_FORKED:
467 case TARGET_WAITKIND_VFORKED:
468 {
469 ptid_t parent, child;
470
471 /* If the user did a next/step, etc, over a fork call,
472 preserve the stepping state in the fork child. */
473 if (follow_child && should_resume)
474 {
475 step_resume_breakpoint = clone_momentary_breakpoint
476 (tp->control.step_resume_breakpoint);
477 step_range_start = tp->control.step_range_start;
478 step_range_end = tp->control.step_range_end;
479 step_frame_id = tp->control.step_frame_id;
480 exception_resume_breakpoint
481 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
482
483 /* For now, delete the parent's sr breakpoint, otherwise,
484 parent/child sr breakpoints are considered duplicates,
485 and the child version will not be installed. Remove
486 this when the breakpoints module becomes aware of
487 inferiors and address spaces. */
488 delete_step_resume_breakpoint (tp);
489 tp->control.step_range_start = 0;
490 tp->control.step_range_end = 0;
491 tp->control.step_frame_id = null_frame_id;
492 delete_exception_resume_breakpoint (tp);
493 }
494
495 parent = inferior_ptid;
496 child = tp->pending_follow.value.related_pid;
497
498 /* Tell the target to do whatever is necessary to follow
499 either parent or child. */
500 if (target_follow_fork (follow_child))
501 {
502 /* Target refused to follow, or there's some other reason
503 we shouldn't resume. */
504 should_resume = 0;
505 }
506 else
507 {
508 /* This pending follow fork event is now handled, one way
509 or another. The previous selected thread may be gone
510 from the lists by now, but if it is still around, need
511 to clear the pending follow request. */
512 tp = find_thread_ptid (parent);
513 if (tp)
514 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
515
516 /* This makes sure we don't try to apply the "Switched
517 over from WAIT_PID" logic above. */
518 nullify_last_target_wait_ptid ();
519
520 /* If we followed the child, switch to it... */
521 if (follow_child)
522 {
523 switch_to_thread (child);
524
525 /* ... and preserve the stepping state, in case the
526 user was stepping over the fork call. */
527 if (should_resume)
528 {
529 tp = inferior_thread ();
530 tp->control.step_resume_breakpoint
531 = step_resume_breakpoint;
532 tp->control.step_range_start = step_range_start;
533 tp->control.step_range_end = step_range_end;
534 tp->control.step_frame_id = step_frame_id;
535 tp->control.exception_resume_breakpoint
536 = exception_resume_breakpoint;
537 }
538 else
539 {
540 /* If we get here, it was because we're trying to
541 resume from a fork catchpoint, but, the user
542 has switched threads away from the thread that
543 forked. In that case, the resume command
544 issued is most likely not applicable to the
545 child, so just warn, and refuse to resume. */
546 warning (_("Not resuming: switched threads "
547 "before following fork child.\n"));
548 }
549
550 /* Reset breakpoints in the child as appropriate. */
551 follow_inferior_reset_breakpoints ();
552 }
553 else
554 switch_to_thread (parent);
555 }
556 }
557 break;
558 case TARGET_WAITKIND_SPURIOUS:
559 /* Nothing to follow. */
560 break;
561 default:
562 internal_error (__FILE__, __LINE__,
563 "Unexpected pending_follow.kind %d\n",
564 tp->pending_follow.kind);
565 break;
566 }
567
568 return should_resume;
569 }
570
571 void
572 follow_inferior_reset_breakpoints (void)
573 {
574 struct thread_info *tp = inferior_thread ();
575
576 /* Was there a step_resume breakpoint? (There was if the user
577 did a "next" at the fork() call.) If so, explicitly reset its
578 thread number.
579
580 step_resumes are a form of bp that are made to be per-thread.
581 Since we created the step_resume bp when the parent process
582 was being debugged, and now are switching to the child process,
583 from the breakpoint package's viewpoint, that's a switch of
584 "threads". We must update the bp's notion of which thread
585 it is for, or it'll be ignored when it triggers. */
586
587 if (tp->control.step_resume_breakpoint)
588 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
589
590 if (tp->control.exception_resume_breakpoint)
591 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
592
593 /* Reinsert all breakpoints in the child. The user may have set
594 breakpoints after catching the fork, in which case those
595 were never set in the child, but only in the parent. This makes
596 sure the inserted breakpoints match the breakpoint list. */
597
598 breakpoint_re_set ();
599 insert_breakpoints ();
600 }
601
602 /* The child has exited or execed: resume threads of the parent the
603 user wanted to be executing. */
604
605 static int
606 proceed_after_vfork_done (struct thread_info *thread,
607 void *arg)
608 {
609 int pid = * (int *) arg;
610
611 if (ptid_get_pid (thread->ptid) == pid
612 && is_running (thread->ptid)
613 && !is_executing (thread->ptid)
614 && !thread->stop_requested
615 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
616 {
617 if (debug_infrun)
618 fprintf_unfiltered (gdb_stdlog,
619 "infrun: resuming vfork parent thread %s\n",
620 target_pid_to_str (thread->ptid));
621
622 switch_to_thread (thread->ptid);
623 clear_proceed_status ();
624 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
625 }
626
627 return 0;
628 }
629
630 /* Called whenever we notice an exec or exit event, to handle
631 detaching or resuming a vfork parent. */
632
633 static void
634 handle_vfork_child_exec_or_exit (int exec)
635 {
636 struct inferior *inf = current_inferior ();
637
638 if (inf->vfork_parent)
639 {
640 int resume_parent = -1;
641
642 /* This exec or exit marks the end of the shared memory region
643 between the parent and the child. If the user wanted to
644 detach from the parent, now is the time. */
645
646 if (inf->vfork_parent->pending_detach)
647 {
648 struct thread_info *tp;
649 struct cleanup *old_chain;
650 struct program_space *pspace;
651 struct address_space *aspace;
652
653 /* follow-fork child, detach-on-fork on. */
654
655 old_chain = make_cleanup_restore_current_thread ();
656
657 /* We're letting loose of the parent. */
658 tp = any_live_thread_of_process (inf->vfork_parent->pid);
659 switch_to_thread (tp->ptid);
660
661 /* We're about to detach from the parent, which implicitly
662 removes breakpoints from its address space. There's a
663 catch here: we want to reuse the spaces for the child,
664 but, parent/child are still sharing the pspace at this
665 point, although the exec in reality makes the kernel give
666 the child a fresh set of new pages. The problem here is
667 that the breakpoints module being unaware of this, would
668 likely chose the child process to write to the parent
669 address space. Swapping the child temporarily away from
670 the spaces has the desired effect. Yes, this is "sort
671 of" a hack. */
672
673 pspace = inf->pspace;
674 aspace = inf->aspace;
675 inf->aspace = NULL;
676 inf->pspace = NULL;
677
678 if (debug_infrun || info_verbose)
679 {
680 target_terminal_ours ();
681
682 if (exec)
683 fprintf_filtered (gdb_stdlog,
684 "Detaching vfork parent process "
685 "%d after child exec.\n",
686 inf->vfork_parent->pid);
687 else
688 fprintf_filtered (gdb_stdlog,
689 "Detaching vfork parent process "
690 "%d after child exit.\n",
691 inf->vfork_parent->pid);
692 }
693
694 target_detach (NULL, 0);
695
696 /* Put it back. */
697 inf->pspace = pspace;
698 inf->aspace = aspace;
699
700 do_cleanups (old_chain);
701 }
702 else if (exec)
703 {
704 /* We're staying attached to the parent, so, really give the
705 child a new address space. */
706 inf->pspace = add_program_space (maybe_new_address_space ());
707 inf->aspace = inf->pspace->aspace;
708 inf->removable = 1;
709 set_current_program_space (inf->pspace);
710
711 resume_parent = inf->vfork_parent->pid;
712
713 /* Break the bonds. */
714 inf->vfork_parent->vfork_child = NULL;
715 }
716 else
717 {
718 struct cleanup *old_chain;
719 struct program_space *pspace;
720
721 /* If this is a vfork child exiting, then the pspace and
722 aspaces were shared with the parent. Since we're
723 reporting the process exit, we'll be mourning all that is
724 found in the address space, and switching to null_ptid,
725 preparing to start a new inferior. But, since we don't
726 want to clobber the parent's address/program spaces, we
727 go ahead and create a new one for this exiting
728 inferior. */
729
730 /* Switch to null_ptid, so that clone_program_space doesn't want
731 to read the selected frame of a dead process. */
732 old_chain = save_inferior_ptid ();
733 inferior_ptid = null_ptid;
734
735 /* This inferior is dead, so avoid giving the breakpoints
736 module the option to write through to it (cloning a
737 program space resets breakpoints). */
738 inf->aspace = NULL;
739 inf->pspace = NULL;
740 pspace = add_program_space (maybe_new_address_space ());
741 set_current_program_space (pspace);
742 inf->removable = 1;
743 clone_program_space (pspace, inf->vfork_parent->pspace);
744 inf->pspace = pspace;
745 inf->aspace = pspace->aspace;
746
747 /* Put back inferior_ptid. We'll continue mourning this
748 inferior. */
749 do_cleanups (old_chain);
750
751 resume_parent = inf->vfork_parent->pid;
752 /* Break the bonds. */
753 inf->vfork_parent->vfork_child = NULL;
754 }
755
756 inf->vfork_parent = NULL;
757
758 gdb_assert (current_program_space == inf->pspace);
759
760 if (non_stop && resume_parent != -1)
761 {
762 /* If the user wanted the parent to be running, let it go
763 free now. */
764 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
765
766 if (debug_infrun)
767 fprintf_unfiltered (gdb_stdlog,
768 "infrun: resuming vfork parent process %d\n",
769 resume_parent);
770
771 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
772
773 do_cleanups (old_chain);
774 }
775 }
776 }
777
778 /* Enum strings for "set|show displaced-stepping". */
779
780 static const char follow_exec_mode_new[] = "new";
781 static const char follow_exec_mode_same[] = "same";
782 static const char *follow_exec_mode_names[] =
783 {
784 follow_exec_mode_new,
785 follow_exec_mode_same,
786 NULL,
787 };
788
789 static const char *follow_exec_mode_string = follow_exec_mode_same;
790 static void
791 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
792 struct cmd_list_element *c, const char *value)
793 {
794 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
795 }
796
797 /* EXECD_PATHNAME is assumed to be non-NULL. */
798
799 static void
800 follow_exec (ptid_t pid, char *execd_pathname)
801 {
802 struct thread_info *th = inferior_thread ();
803 struct inferior *inf = current_inferior ();
804
805 /* This is an exec event that we actually wish to pay attention to.
806 Refresh our symbol table to the newly exec'd program, remove any
807 momentary bp's, etc.
808
809 If there are breakpoints, they aren't really inserted now,
810 since the exec() transformed our inferior into a fresh set
811 of instructions.
812
813 We want to preserve symbolic breakpoints on the list, since
814 we have hopes that they can be reset after the new a.out's
815 symbol table is read.
816
817 However, any "raw" breakpoints must be removed from the list
818 (e.g., the solib bp's), since their address is probably invalid
819 now.
820
821 And, we DON'T want to call delete_breakpoints() here, since
822 that may write the bp's "shadow contents" (the instruction
823 value that was overwritten witha TRAP instruction). Since
824 we now have a new a.out, those shadow contents aren't valid. */
825
826 mark_breakpoints_out ();
827
828 update_breakpoints_after_exec ();
829
830 /* If there was one, it's gone now. We cannot truly step-to-next
831 statement through an exec(). */
832 th->control.step_resume_breakpoint = NULL;
833 th->control.exception_resume_breakpoint = NULL;
834 th->control.step_range_start = 0;
835 th->control.step_range_end = 0;
836
837 /* The target reports the exec event to the main thread, even if
838 some other thread does the exec, and even if the main thread was
839 already stopped --- if debugging in non-stop mode, it's possible
840 the user had the main thread held stopped in the previous image
841 --- release it now. This is the same behavior as step-over-exec
842 with scheduler-locking on in all-stop mode. */
843 th->stop_requested = 0;
844
845 /* What is this a.out's name? */
846 printf_unfiltered (_("%s is executing new program: %s\n"),
847 target_pid_to_str (inferior_ptid),
848 execd_pathname);
849
850 /* We've followed the inferior through an exec. Therefore, the
851 inferior has essentially been killed & reborn. */
852
853 gdb_flush (gdb_stdout);
854
855 breakpoint_init_inferior (inf_execd);
856
857 if (gdb_sysroot && *gdb_sysroot)
858 {
859 char *name = alloca (strlen (gdb_sysroot)
860 + strlen (execd_pathname)
861 + 1);
862
863 strcpy (name, gdb_sysroot);
864 strcat (name, execd_pathname);
865 execd_pathname = name;
866 }
867
868 /* Reset the shared library package. This ensures that we get a
869 shlib event when the child reaches "_start", at which point the
870 dld will have had a chance to initialize the child. */
871 /* Also, loading a symbol file below may trigger symbol lookups, and
872 we don't want those to be satisfied by the libraries of the
873 previous incarnation of this process. */
874 no_shared_libraries (NULL, 0);
875
876 if (follow_exec_mode_string == follow_exec_mode_new)
877 {
878 struct program_space *pspace;
879
880 /* The user wants to keep the old inferior and program spaces
881 around. Create a new fresh one, and switch to it. */
882
883 inf = add_inferior (current_inferior ()->pid);
884 pspace = add_program_space (maybe_new_address_space ());
885 inf->pspace = pspace;
886 inf->aspace = pspace->aspace;
887
888 exit_inferior_num_silent (current_inferior ()->num);
889
890 set_current_inferior (inf);
891 set_current_program_space (pspace);
892 }
893
894 gdb_assert (current_program_space == inf->pspace);
895
896 /* That a.out is now the one to use. */
897 exec_file_attach (execd_pathname, 0);
898
899 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
900 (Position Independent Executable) main symbol file will get applied by
901 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
902 the breakpoints with the zero displacement. */
903
904 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
905 NULL, 0);
906
907 set_initial_language ();
908
909 #ifdef SOLIB_CREATE_INFERIOR_HOOK
910 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
911 #else
912 solib_create_inferior_hook (0);
913 #endif
914
915 jit_inferior_created_hook ();
916
917 breakpoint_re_set ();
918
919 /* Reinsert all breakpoints. (Those which were symbolic have
920 been reset to the proper address in the new a.out, thanks
921 to symbol_file_command...). */
922 insert_breakpoints ();
923
924 /* The next resume of this inferior should bring it to the shlib
925 startup breakpoints. (If the user had also set bp's on
926 "main" from the old (parent) process, then they'll auto-
927 matically get reset there in the new process.). */
928 }
929
930 /* Non-zero if we just simulating a single-step. This is needed
931 because we cannot remove the breakpoints in the inferior process
932 until after the `wait' in `wait_for_inferior'. */
933 static int singlestep_breakpoints_inserted_p = 0;
934
935 /* The thread we inserted single-step breakpoints for. */
936 static ptid_t singlestep_ptid;
937
938 /* PC when we started this single-step. */
939 static CORE_ADDR singlestep_pc;
940
941 /* If another thread hit the singlestep breakpoint, we save the original
942 thread here so that we can resume single-stepping it later. */
943 static ptid_t saved_singlestep_ptid;
944 static int stepping_past_singlestep_breakpoint;
945
946 /* If not equal to null_ptid, this means that after stepping over breakpoint
947 is finished, we need to switch to deferred_step_ptid, and step it.
948
949 The use case is when one thread has hit a breakpoint, and then the user
950 has switched to another thread and issued 'step'. We need to step over
951 breakpoint in the thread which hit the breakpoint, but then continue
952 stepping the thread user has selected. */
953 static ptid_t deferred_step_ptid;
954 \f
955 /* Displaced stepping. */
956
957 /* In non-stop debugging mode, we must take special care to manage
958 breakpoints properly; in particular, the traditional strategy for
959 stepping a thread past a breakpoint it has hit is unsuitable.
960 'Displaced stepping' is a tactic for stepping one thread past a
961 breakpoint it has hit while ensuring that other threads running
962 concurrently will hit the breakpoint as they should.
963
964 The traditional way to step a thread T off a breakpoint in a
965 multi-threaded program in all-stop mode is as follows:
966
967 a0) Initially, all threads are stopped, and breakpoints are not
968 inserted.
969 a1) We single-step T, leaving breakpoints uninserted.
970 a2) We insert breakpoints, and resume all threads.
971
972 In non-stop debugging, however, this strategy is unsuitable: we
973 don't want to have to stop all threads in the system in order to
974 continue or step T past a breakpoint. Instead, we use displaced
975 stepping:
976
977 n0) Initially, T is stopped, other threads are running, and
978 breakpoints are inserted.
979 n1) We copy the instruction "under" the breakpoint to a separate
980 location, outside the main code stream, making any adjustments
981 to the instruction, register, and memory state as directed by
982 T's architecture.
983 n2) We single-step T over the instruction at its new location.
984 n3) We adjust the resulting register and memory state as directed
985 by T's architecture. This includes resetting T's PC to point
986 back into the main instruction stream.
987 n4) We resume T.
988
989 This approach depends on the following gdbarch methods:
990
991 - gdbarch_max_insn_length and gdbarch_displaced_step_location
992 indicate where to copy the instruction, and how much space must
993 be reserved there. We use these in step n1.
994
995 - gdbarch_displaced_step_copy_insn copies a instruction to a new
996 address, and makes any necessary adjustments to the instruction,
997 register contents, and memory. We use this in step n1.
998
999 - gdbarch_displaced_step_fixup adjusts registers and memory after
1000 we have successfuly single-stepped the instruction, to yield the
1001 same effect the instruction would have had if we had executed it
1002 at its original address. We use this in step n3.
1003
1004 - gdbarch_displaced_step_free_closure provides cleanup.
1005
1006 The gdbarch_displaced_step_copy_insn and
1007 gdbarch_displaced_step_fixup functions must be written so that
1008 copying an instruction with gdbarch_displaced_step_copy_insn,
1009 single-stepping across the copied instruction, and then applying
1010 gdbarch_displaced_insn_fixup should have the same effects on the
1011 thread's memory and registers as stepping the instruction in place
1012 would have. Exactly which responsibilities fall to the copy and
1013 which fall to the fixup is up to the author of those functions.
1014
1015 See the comments in gdbarch.sh for details.
1016
1017 Note that displaced stepping and software single-step cannot
1018 currently be used in combination, although with some care I think
1019 they could be made to. Software single-step works by placing
1020 breakpoints on all possible subsequent instructions; if the
1021 displaced instruction is a PC-relative jump, those breakpoints
1022 could fall in very strange places --- on pages that aren't
1023 executable, or at addresses that are not proper instruction
1024 boundaries. (We do generally let other threads run while we wait
1025 to hit the software single-step breakpoint, and they might
1026 encounter such a corrupted instruction.) One way to work around
1027 this would be to have gdbarch_displaced_step_copy_insn fully
1028 simulate the effect of PC-relative instructions (and return NULL)
1029 on architectures that use software single-stepping.
1030
1031 In non-stop mode, we can have independent and simultaneous step
1032 requests, so more than one thread may need to simultaneously step
1033 over a breakpoint. The current implementation assumes there is
1034 only one scratch space per process. In this case, we have to
1035 serialize access to the scratch space. If thread A wants to step
1036 over a breakpoint, but we are currently waiting for some other
1037 thread to complete a displaced step, we leave thread A stopped and
1038 place it in the displaced_step_request_queue. Whenever a displaced
1039 step finishes, we pick the next thread in the queue and start a new
1040 displaced step operation on it. See displaced_step_prepare and
1041 displaced_step_fixup for details. */
1042
1043 struct displaced_step_request
1044 {
1045 ptid_t ptid;
1046 struct displaced_step_request *next;
1047 };
1048
1049 /* Per-inferior displaced stepping state. */
1050 struct displaced_step_inferior_state
1051 {
1052 /* Pointer to next in linked list. */
1053 struct displaced_step_inferior_state *next;
1054
1055 /* The process this displaced step state refers to. */
1056 int pid;
1057
1058 /* A queue of pending displaced stepping requests. One entry per
1059 thread that needs to do a displaced step. */
1060 struct displaced_step_request *step_request_queue;
1061
1062 /* If this is not null_ptid, this is the thread carrying out a
1063 displaced single-step in process PID. This thread's state will
1064 require fixing up once it has completed its step. */
1065 ptid_t step_ptid;
1066
1067 /* The architecture the thread had when we stepped it. */
1068 struct gdbarch *step_gdbarch;
1069
1070 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1071 for post-step cleanup. */
1072 struct displaced_step_closure *step_closure;
1073
1074 /* The address of the original instruction, and the copy we
1075 made. */
1076 CORE_ADDR step_original, step_copy;
1077
1078 /* Saved contents of copy area. */
1079 gdb_byte *step_saved_copy;
1080 };
1081
1082 /* The list of states of processes involved in displaced stepping
1083 presently. */
1084 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1085
1086 /* Get the displaced stepping state of process PID. */
1087
1088 static struct displaced_step_inferior_state *
1089 get_displaced_stepping_state (int pid)
1090 {
1091 struct displaced_step_inferior_state *state;
1092
1093 for (state = displaced_step_inferior_states;
1094 state != NULL;
1095 state = state->next)
1096 if (state->pid == pid)
1097 return state;
1098
1099 return NULL;
1100 }
1101
1102 /* Add a new displaced stepping state for process PID to the displaced
1103 stepping state list, or return a pointer to an already existing
1104 entry, if it already exists. Never returns NULL. */
1105
1106 static struct displaced_step_inferior_state *
1107 add_displaced_stepping_state (int pid)
1108 {
1109 struct displaced_step_inferior_state *state;
1110
1111 for (state = displaced_step_inferior_states;
1112 state != NULL;
1113 state = state->next)
1114 if (state->pid == pid)
1115 return state;
1116
1117 state = xcalloc (1, sizeof (*state));
1118 state->pid = pid;
1119 state->next = displaced_step_inferior_states;
1120 displaced_step_inferior_states = state;
1121
1122 return state;
1123 }
1124
1125 /* If inferior is in displaced stepping, and ADDR equals to starting address
1126 of copy area, return corresponding displaced_step_closure. Otherwise,
1127 return NULL. */
1128
1129 struct displaced_step_closure*
1130 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1131 {
1132 struct displaced_step_inferior_state *displaced
1133 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1134
1135 /* If checking the mode of displaced instruction in copy area. */
1136 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1137 && (displaced->step_copy == addr))
1138 return displaced->step_closure;
1139
1140 return NULL;
1141 }
1142
1143 /* Remove the displaced stepping state of process PID. */
1144
1145 static void
1146 remove_displaced_stepping_state (int pid)
1147 {
1148 struct displaced_step_inferior_state *it, **prev_next_p;
1149
1150 gdb_assert (pid != 0);
1151
1152 it = displaced_step_inferior_states;
1153 prev_next_p = &displaced_step_inferior_states;
1154 while (it)
1155 {
1156 if (it->pid == pid)
1157 {
1158 *prev_next_p = it->next;
1159 xfree (it);
1160 return;
1161 }
1162
1163 prev_next_p = &it->next;
1164 it = *prev_next_p;
1165 }
1166 }
1167
1168 static void
1169 infrun_inferior_exit (struct inferior *inf)
1170 {
1171 remove_displaced_stepping_state (inf->pid);
1172 }
1173
1174 /* Enum strings for "set|show displaced-stepping". */
1175
1176 static const char can_use_displaced_stepping_auto[] = "auto";
1177 static const char can_use_displaced_stepping_on[] = "on";
1178 static const char can_use_displaced_stepping_off[] = "off";
1179 static const char *can_use_displaced_stepping_enum[] =
1180 {
1181 can_use_displaced_stepping_auto,
1182 can_use_displaced_stepping_on,
1183 can_use_displaced_stepping_off,
1184 NULL,
1185 };
1186
1187 /* If ON, and the architecture supports it, GDB will use displaced
1188 stepping to step over breakpoints. If OFF, or if the architecture
1189 doesn't support it, GDB will instead use the traditional
1190 hold-and-step approach. If AUTO (which is the default), GDB will
1191 decide which technique to use to step over breakpoints depending on
1192 which of all-stop or non-stop mode is active --- displaced stepping
1193 in non-stop mode; hold-and-step in all-stop mode. */
1194
1195 static const char *can_use_displaced_stepping =
1196 can_use_displaced_stepping_auto;
1197
1198 static void
1199 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1200 struct cmd_list_element *c,
1201 const char *value)
1202 {
1203 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1204 fprintf_filtered (file,
1205 _("Debugger's willingness to use displaced stepping "
1206 "to step over breakpoints is %s (currently %s).\n"),
1207 value, non_stop ? "on" : "off");
1208 else
1209 fprintf_filtered (file,
1210 _("Debugger's willingness to use displaced stepping "
1211 "to step over breakpoints is %s.\n"), value);
1212 }
1213
1214 /* Return non-zero if displaced stepping can/should be used to step
1215 over breakpoints. */
1216
1217 static int
1218 use_displaced_stepping (struct gdbarch *gdbarch)
1219 {
1220 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1221 && non_stop)
1222 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1223 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1224 && !RECORD_IS_USED);
1225 }
1226
1227 /* Clean out any stray displaced stepping state. */
1228 static void
1229 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1230 {
1231 /* Indicate that there is no cleanup pending. */
1232 displaced->step_ptid = null_ptid;
1233
1234 if (displaced->step_closure)
1235 {
1236 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1237 displaced->step_closure);
1238 displaced->step_closure = NULL;
1239 }
1240 }
1241
1242 static void
1243 displaced_step_clear_cleanup (void *arg)
1244 {
1245 struct displaced_step_inferior_state *state = arg;
1246
1247 displaced_step_clear (state);
1248 }
1249
1250 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1251 void
1252 displaced_step_dump_bytes (struct ui_file *file,
1253 const gdb_byte *buf,
1254 size_t len)
1255 {
1256 int i;
1257
1258 for (i = 0; i < len; i++)
1259 fprintf_unfiltered (file, "%02x ", buf[i]);
1260 fputs_unfiltered ("\n", file);
1261 }
1262
1263 /* Prepare to single-step, using displaced stepping.
1264
1265 Note that we cannot use displaced stepping when we have a signal to
1266 deliver. If we have a signal to deliver and an instruction to step
1267 over, then after the step, there will be no indication from the
1268 target whether the thread entered a signal handler or ignored the
1269 signal and stepped over the instruction successfully --- both cases
1270 result in a simple SIGTRAP. In the first case we mustn't do a
1271 fixup, and in the second case we must --- but we can't tell which.
1272 Comments in the code for 'random signals' in handle_inferior_event
1273 explain how we handle this case instead.
1274
1275 Returns 1 if preparing was successful -- this thread is going to be
1276 stepped now; or 0 if displaced stepping this thread got queued. */
1277 static int
1278 displaced_step_prepare (ptid_t ptid)
1279 {
1280 struct cleanup *old_cleanups, *ignore_cleanups;
1281 struct regcache *regcache = get_thread_regcache (ptid);
1282 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1283 CORE_ADDR original, copy;
1284 ULONGEST len;
1285 struct displaced_step_closure *closure;
1286 struct displaced_step_inferior_state *displaced;
1287
1288 /* We should never reach this function if the architecture does not
1289 support displaced stepping. */
1290 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1291
1292 /* We have to displaced step one thread at a time, as we only have
1293 access to a single scratch space per inferior. */
1294
1295 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1296
1297 if (!ptid_equal (displaced->step_ptid, null_ptid))
1298 {
1299 /* Already waiting for a displaced step to finish. Defer this
1300 request and place in queue. */
1301 struct displaced_step_request *req, *new_req;
1302
1303 if (debug_displaced)
1304 fprintf_unfiltered (gdb_stdlog,
1305 "displaced: defering step of %s\n",
1306 target_pid_to_str (ptid));
1307
1308 new_req = xmalloc (sizeof (*new_req));
1309 new_req->ptid = ptid;
1310 new_req->next = NULL;
1311
1312 if (displaced->step_request_queue)
1313 {
1314 for (req = displaced->step_request_queue;
1315 req && req->next;
1316 req = req->next)
1317 ;
1318 req->next = new_req;
1319 }
1320 else
1321 displaced->step_request_queue = new_req;
1322
1323 return 0;
1324 }
1325 else
1326 {
1327 if (debug_displaced)
1328 fprintf_unfiltered (gdb_stdlog,
1329 "displaced: stepping %s now\n",
1330 target_pid_to_str (ptid));
1331 }
1332
1333 displaced_step_clear (displaced);
1334
1335 old_cleanups = save_inferior_ptid ();
1336 inferior_ptid = ptid;
1337
1338 original = regcache_read_pc (regcache);
1339
1340 copy = gdbarch_displaced_step_location (gdbarch);
1341 len = gdbarch_max_insn_length (gdbarch);
1342
1343 /* Save the original contents of the copy area. */
1344 displaced->step_saved_copy = xmalloc (len);
1345 ignore_cleanups = make_cleanup (free_current_contents,
1346 &displaced->step_saved_copy);
1347 read_memory (copy, displaced->step_saved_copy, len);
1348 if (debug_displaced)
1349 {
1350 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1351 paddress (gdbarch, copy));
1352 displaced_step_dump_bytes (gdb_stdlog,
1353 displaced->step_saved_copy,
1354 len);
1355 };
1356
1357 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1358 original, copy, regcache);
1359
1360 /* We don't support the fully-simulated case at present. */
1361 gdb_assert (closure);
1362
1363 /* Save the information we need to fix things up if the step
1364 succeeds. */
1365 displaced->step_ptid = ptid;
1366 displaced->step_gdbarch = gdbarch;
1367 displaced->step_closure = closure;
1368 displaced->step_original = original;
1369 displaced->step_copy = copy;
1370
1371 make_cleanup (displaced_step_clear_cleanup, displaced);
1372
1373 /* Resume execution at the copy. */
1374 regcache_write_pc (regcache, copy);
1375
1376 discard_cleanups (ignore_cleanups);
1377
1378 do_cleanups (old_cleanups);
1379
1380 if (debug_displaced)
1381 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1382 paddress (gdbarch, copy));
1383
1384 return 1;
1385 }
1386
1387 static void
1388 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1389 const gdb_byte *myaddr, int len)
1390 {
1391 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1392
1393 inferior_ptid = ptid;
1394 write_memory (memaddr, myaddr, len);
1395 do_cleanups (ptid_cleanup);
1396 }
1397
1398 /* Restore the contents of the copy area for thread PTID. */
1399
1400 static void
1401 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1402 ptid_t ptid)
1403 {
1404 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1405
1406 write_memory_ptid (ptid, displaced->step_copy,
1407 displaced->step_saved_copy, len);
1408 if (debug_displaced)
1409 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1410 target_pid_to_str (ptid),
1411 paddress (displaced->step_gdbarch,
1412 displaced->step_copy));
1413 }
1414
1415 static void
1416 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1417 {
1418 struct cleanup *old_cleanups;
1419 struct displaced_step_inferior_state *displaced
1420 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1421
1422 /* Was any thread of this process doing a displaced step? */
1423 if (displaced == NULL)
1424 return;
1425
1426 /* Was this event for the pid we displaced? */
1427 if (ptid_equal (displaced->step_ptid, null_ptid)
1428 || ! ptid_equal (displaced->step_ptid, event_ptid))
1429 return;
1430
1431 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1432
1433 displaced_step_restore (displaced, displaced->step_ptid);
1434
1435 /* Did the instruction complete successfully? */
1436 if (signal == TARGET_SIGNAL_TRAP)
1437 {
1438 /* Fix up the resulting state. */
1439 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1440 displaced->step_closure,
1441 displaced->step_original,
1442 displaced->step_copy,
1443 get_thread_regcache (displaced->step_ptid));
1444 }
1445 else
1446 {
1447 /* Since the instruction didn't complete, all we can do is
1448 relocate the PC. */
1449 struct regcache *regcache = get_thread_regcache (event_ptid);
1450 CORE_ADDR pc = regcache_read_pc (regcache);
1451
1452 pc = displaced->step_original + (pc - displaced->step_copy);
1453 regcache_write_pc (regcache, pc);
1454 }
1455
1456 do_cleanups (old_cleanups);
1457
1458 displaced->step_ptid = null_ptid;
1459
1460 /* Are there any pending displaced stepping requests? If so, run
1461 one now. Leave the state object around, since we're likely to
1462 need it again soon. */
1463 while (displaced->step_request_queue)
1464 {
1465 struct displaced_step_request *head;
1466 ptid_t ptid;
1467 struct regcache *regcache;
1468 struct gdbarch *gdbarch;
1469 CORE_ADDR actual_pc;
1470 struct address_space *aspace;
1471
1472 head = displaced->step_request_queue;
1473 ptid = head->ptid;
1474 displaced->step_request_queue = head->next;
1475 xfree (head);
1476
1477 context_switch (ptid);
1478
1479 regcache = get_thread_regcache (ptid);
1480 actual_pc = regcache_read_pc (regcache);
1481 aspace = get_regcache_aspace (regcache);
1482
1483 if (breakpoint_here_p (aspace, actual_pc))
1484 {
1485 if (debug_displaced)
1486 fprintf_unfiltered (gdb_stdlog,
1487 "displaced: stepping queued %s now\n",
1488 target_pid_to_str (ptid));
1489
1490 displaced_step_prepare (ptid);
1491
1492 gdbarch = get_regcache_arch (regcache);
1493
1494 if (debug_displaced)
1495 {
1496 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1497 gdb_byte buf[4];
1498
1499 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1500 paddress (gdbarch, actual_pc));
1501 read_memory (actual_pc, buf, sizeof (buf));
1502 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1503 }
1504
1505 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1506 displaced->step_closure))
1507 target_resume (ptid, 1, TARGET_SIGNAL_0);
1508 else
1509 target_resume (ptid, 0, TARGET_SIGNAL_0);
1510
1511 /* Done, we're stepping a thread. */
1512 break;
1513 }
1514 else
1515 {
1516 int step;
1517 struct thread_info *tp = inferior_thread ();
1518
1519 /* The breakpoint we were sitting under has since been
1520 removed. */
1521 tp->control.trap_expected = 0;
1522
1523 /* Go back to what we were trying to do. */
1524 step = currently_stepping (tp);
1525
1526 if (debug_displaced)
1527 fprintf_unfiltered (gdb_stdlog,
1528 "breakpoint is gone %s: step(%d)\n",
1529 target_pid_to_str (tp->ptid), step);
1530
1531 target_resume (ptid, step, TARGET_SIGNAL_0);
1532 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1533
1534 /* This request was discarded. See if there's any other
1535 thread waiting for its turn. */
1536 }
1537 }
1538 }
1539
1540 /* Update global variables holding ptids to hold NEW_PTID if they were
1541 holding OLD_PTID. */
1542 static void
1543 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1544 {
1545 struct displaced_step_request *it;
1546 struct displaced_step_inferior_state *displaced;
1547
1548 if (ptid_equal (inferior_ptid, old_ptid))
1549 inferior_ptid = new_ptid;
1550
1551 if (ptid_equal (singlestep_ptid, old_ptid))
1552 singlestep_ptid = new_ptid;
1553
1554 if (ptid_equal (deferred_step_ptid, old_ptid))
1555 deferred_step_ptid = new_ptid;
1556
1557 for (displaced = displaced_step_inferior_states;
1558 displaced;
1559 displaced = displaced->next)
1560 {
1561 if (ptid_equal (displaced->step_ptid, old_ptid))
1562 displaced->step_ptid = new_ptid;
1563
1564 for (it = displaced->step_request_queue; it; it = it->next)
1565 if (ptid_equal (it->ptid, old_ptid))
1566 it->ptid = new_ptid;
1567 }
1568 }
1569
1570 \f
1571 /* Resuming. */
1572
1573 /* Things to clean up if we QUIT out of resume (). */
1574 static void
1575 resume_cleanups (void *ignore)
1576 {
1577 normal_stop ();
1578 }
1579
1580 static const char schedlock_off[] = "off";
1581 static const char schedlock_on[] = "on";
1582 static const char schedlock_step[] = "step";
1583 static const char *scheduler_enums[] = {
1584 schedlock_off,
1585 schedlock_on,
1586 schedlock_step,
1587 NULL
1588 };
1589 static const char *scheduler_mode = schedlock_off;
1590 static void
1591 show_scheduler_mode (struct ui_file *file, int from_tty,
1592 struct cmd_list_element *c, const char *value)
1593 {
1594 fprintf_filtered (file,
1595 _("Mode for locking scheduler "
1596 "during execution is \"%s\".\n"),
1597 value);
1598 }
1599
1600 static void
1601 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1602 {
1603 if (!target_can_lock_scheduler)
1604 {
1605 scheduler_mode = schedlock_off;
1606 error (_("Target '%s' cannot support this command."), target_shortname);
1607 }
1608 }
1609
1610 /* True if execution commands resume all threads of all processes by
1611 default; otherwise, resume only threads of the current inferior
1612 process. */
1613 int sched_multi = 0;
1614
1615 /* Try to setup for software single stepping over the specified location.
1616 Return 1 if target_resume() should use hardware single step.
1617
1618 GDBARCH the current gdbarch.
1619 PC the location to step over. */
1620
1621 static int
1622 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1623 {
1624 int hw_step = 1;
1625
1626 if (execution_direction == EXEC_FORWARD
1627 && gdbarch_software_single_step_p (gdbarch)
1628 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1629 {
1630 hw_step = 0;
1631 /* Do not pull these breakpoints until after a `wait' in
1632 `wait_for_inferior'. */
1633 singlestep_breakpoints_inserted_p = 1;
1634 singlestep_ptid = inferior_ptid;
1635 singlestep_pc = pc;
1636 }
1637 return hw_step;
1638 }
1639
1640 /* Return a ptid representing the set of threads that we will proceed,
1641 in the perspective of the user/frontend. We may actually resume
1642 fewer threads at first, e.g., if a thread is stopped at a
1643 breakpoint that needs stepping-off, but that should not be visible
1644 to the user/frontend, and neither should the frontend/user be
1645 allowed to proceed any of the threads that happen to be stopped for
1646 internal run control handling, if a previous command wanted them
1647 resumed. */
1648
1649 ptid_t
1650 user_visible_resume_ptid (int step)
1651 {
1652 /* By default, resume all threads of all processes. */
1653 ptid_t resume_ptid = RESUME_ALL;
1654
1655 /* Maybe resume only all threads of the current process. */
1656 if (!sched_multi && target_supports_multi_process ())
1657 {
1658 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1659 }
1660
1661 /* Maybe resume a single thread after all. */
1662 if (non_stop)
1663 {
1664 /* With non-stop mode on, threads are always handled
1665 individually. */
1666 resume_ptid = inferior_ptid;
1667 }
1668 else if ((scheduler_mode == schedlock_on)
1669 || (scheduler_mode == schedlock_step
1670 && (step || singlestep_breakpoints_inserted_p)))
1671 {
1672 /* User-settable 'scheduler' mode requires solo thread resume. */
1673 resume_ptid = inferior_ptid;
1674 }
1675
1676 return resume_ptid;
1677 }
1678
1679 /* Resume the inferior, but allow a QUIT. This is useful if the user
1680 wants to interrupt some lengthy single-stepping operation
1681 (for child processes, the SIGINT goes to the inferior, and so
1682 we get a SIGINT random_signal, but for remote debugging and perhaps
1683 other targets, that's not true).
1684
1685 STEP nonzero if we should step (zero to continue instead).
1686 SIG is the signal to give the inferior (zero for none). */
1687 void
1688 resume (int step, enum target_signal sig)
1689 {
1690 int should_resume = 1;
1691 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1692 struct regcache *regcache = get_current_regcache ();
1693 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1694 struct thread_info *tp = inferior_thread ();
1695 CORE_ADDR pc = regcache_read_pc (regcache);
1696 struct address_space *aspace = get_regcache_aspace (regcache);
1697
1698 QUIT;
1699
1700 if (current_inferior ()->waiting_for_vfork_done)
1701 {
1702 /* Don't try to single-step a vfork parent that is waiting for
1703 the child to get out of the shared memory region (by exec'ing
1704 or exiting). This is particularly important on software
1705 single-step archs, as the child process would trip on the
1706 software single step breakpoint inserted for the parent
1707 process. Since the parent will not actually execute any
1708 instruction until the child is out of the shared region (such
1709 are vfork's semantics), it is safe to simply continue it.
1710 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1711 the parent, and tell it to `keep_going', which automatically
1712 re-sets it stepping. */
1713 if (debug_infrun)
1714 fprintf_unfiltered (gdb_stdlog,
1715 "infrun: resume : clear step\n");
1716 step = 0;
1717 }
1718
1719 if (debug_infrun)
1720 fprintf_unfiltered (gdb_stdlog,
1721 "infrun: resume (step=%d, signal=%d), "
1722 "trap_expected=%d, current thread [%s] at %s\n",
1723 step, sig, tp->control.trap_expected,
1724 target_pid_to_str (inferior_ptid),
1725 paddress (gdbarch, pc));
1726
1727 /* Normally, by the time we reach `resume', the breakpoints are either
1728 removed or inserted, as appropriate. The exception is if we're sitting
1729 at a permanent breakpoint; we need to step over it, but permanent
1730 breakpoints can't be removed. So we have to test for it here. */
1731 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1732 {
1733 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1734 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1735 else
1736 error (_("\
1737 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1738 how to step past a permanent breakpoint on this architecture. Try using\n\
1739 a command like `return' or `jump' to continue execution."));
1740 }
1741
1742 /* If enabled, step over breakpoints by executing a copy of the
1743 instruction at a different address.
1744
1745 We can't use displaced stepping when we have a signal to deliver;
1746 the comments for displaced_step_prepare explain why. The
1747 comments in the handle_inferior event for dealing with 'random
1748 signals' explain what we do instead.
1749
1750 We can't use displaced stepping when we are waiting for vfork_done
1751 event, displaced stepping breaks the vfork child similarly as single
1752 step software breakpoint. */
1753 if (use_displaced_stepping (gdbarch)
1754 && (tp->control.trap_expected
1755 || (step && gdbarch_software_single_step_p (gdbarch)))
1756 && sig == TARGET_SIGNAL_0
1757 && !current_inferior ()->waiting_for_vfork_done)
1758 {
1759 struct displaced_step_inferior_state *displaced;
1760
1761 if (!displaced_step_prepare (inferior_ptid))
1762 {
1763 /* Got placed in displaced stepping queue. Will be resumed
1764 later when all the currently queued displaced stepping
1765 requests finish. The thread is not executing at this point,
1766 and the call to set_executing will be made later. But we
1767 need to call set_running here, since from frontend point of view,
1768 the thread is running. */
1769 set_running (inferior_ptid, 1);
1770 discard_cleanups (old_cleanups);
1771 return;
1772 }
1773
1774 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1775 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1776 displaced->step_closure);
1777 }
1778
1779 /* Do we need to do it the hard way, w/temp breakpoints? */
1780 else if (step)
1781 step = maybe_software_singlestep (gdbarch, pc);
1782
1783 /* Currently, our software single-step implementation leads to different
1784 results than hardware single-stepping in one situation: when stepping
1785 into delivering a signal which has an associated signal handler,
1786 hardware single-step will stop at the first instruction of the handler,
1787 while software single-step will simply skip execution of the handler.
1788
1789 For now, this difference in behavior is accepted since there is no
1790 easy way to actually implement single-stepping into a signal handler
1791 without kernel support.
1792
1793 However, there is one scenario where this difference leads to follow-on
1794 problems: if we're stepping off a breakpoint by removing all breakpoints
1795 and then single-stepping. In this case, the software single-step
1796 behavior means that even if there is a *breakpoint* in the signal
1797 handler, GDB still would not stop.
1798
1799 Fortunately, we can at least fix this particular issue. We detect
1800 here the case where we are about to deliver a signal while software
1801 single-stepping with breakpoints removed. In this situation, we
1802 revert the decisions to remove all breakpoints and insert single-
1803 step breakpoints, and instead we install a step-resume breakpoint
1804 at the current address, deliver the signal without stepping, and
1805 once we arrive back at the step-resume breakpoint, actually step
1806 over the breakpoint we originally wanted to step over. */
1807 if (singlestep_breakpoints_inserted_p
1808 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1809 {
1810 /* If we have nested signals or a pending signal is delivered
1811 immediately after a handler returns, might might already have
1812 a step-resume breakpoint set on the earlier handler. We cannot
1813 set another step-resume breakpoint; just continue on until the
1814 original breakpoint is hit. */
1815 if (tp->control.step_resume_breakpoint == NULL)
1816 {
1817 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1818 tp->step_after_step_resume_breakpoint = 1;
1819 }
1820
1821 remove_single_step_breakpoints ();
1822 singlestep_breakpoints_inserted_p = 0;
1823
1824 insert_breakpoints ();
1825 tp->control.trap_expected = 0;
1826 }
1827
1828 if (should_resume)
1829 {
1830 ptid_t resume_ptid;
1831
1832 /* If STEP is set, it's a request to use hardware stepping
1833 facilities. But in that case, we should never
1834 use singlestep breakpoint. */
1835 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1836
1837 /* Decide the set of threads to ask the target to resume. Start
1838 by assuming everything will be resumed, than narrow the set
1839 by applying increasingly restricting conditions. */
1840 resume_ptid = user_visible_resume_ptid (step);
1841
1842 /* Maybe resume a single thread after all. */
1843 if (singlestep_breakpoints_inserted_p
1844 && stepping_past_singlestep_breakpoint)
1845 {
1846 /* The situation here is as follows. In thread T1 we wanted to
1847 single-step. Lacking hardware single-stepping we've
1848 set breakpoint at the PC of the next instruction -- call it
1849 P. After resuming, we've hit that breakpoint in thread T2.
1850 Now we've removed original breakpoint, inserted breakpoint
1851 at P+1, and try to step to advance T2 past breakpoint.
1852 We need to step only T2, as if T1 is allowed to freely run,
1853 it can run past P, and if other threads are allowed to run,
1854 they can hit breakpoint at P+1, and nested hits of single-step
1855 breakpoints is not something we'd want -- that's complicated
1856 to support, and has no value. */
1857 resume_ptid = inferior_ptid;
1858 }
1859 else if ((step || singlestep_breakpoints_inserted_p)
1860 && tp->control.trap_expected)
1861 {
1862 /* We're allowing a thread to run past a breakpoint it has
1863 hit, by single-stepping the thread with the breakpoint
1864 removed. In which case, we need to single-step only this
1865 thread, and keep others stopped, as they can miss this
1866 breakpoint if allowed to run.
1867
1868 The current code actually removes all breakpoints when
1869 doing this, not just the one being stepped over, so if we
1870 let other threads run, we can actually miss any
1871 breakpoint, not just the one at PC. */
1872 resume_ptid = inferior_ptid;
1873 }
1874
1875 if (gdbarch_cannot_step_breakpoint (gdbarch))
1876 {
1877 /* Most targets can step a breakpoint instruction, thus
1878 executing it normally. But if this one cannot, just
1879 continue and we will hit it anyway. */
1880 if (step && breakpoint_inserted_here_p (aspace, pc))
1881 step = 0;
1882 }
1883
1884 if (debug_displaced
1885 && use_displaced_stepping (gdbarch)
1886 && tp->control.trap_expected)
1887 {
1888 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1889 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1890 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1891 gdb_byte buf[4];
1892
1893 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1894 paddress (resume_gdbarch, actual_pc));
1895 read_memory (actual_pc, buf, sizeof (buf));
1896 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1897 }
1898
1899 /* Install inferior's terminal modes. */
1900 target_terminal_inferior ();
1901
1902 /* Avoid confusing the next resume, if the next stop/resume
1903 happens to apply to another thread. */
1904 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1905
1906 /* Advise target which signals may be handled silently. If we have
1907 removed breakpoints because we are stepping over one (which can
1908 happen only if we are not using displaced stepping), we need to
1909 receive all signals to avoid accidentally skipping a breakpoint
1910 during execution of a signal handler. */
1911 if ((step || singlestep_breakpoints_inserted_p)
1912 && tp->control.trap_expected
1913 && !use_displaced_stepping (gdbarch))
1914 target_pass_signals (0, NULL);
1915 else
1916 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1917
1918 target_resume (resume_ptid, step, sig);
1919 }
1920
1921 discard_cleanups (old_cleanups);
1922 }
1923 \f
1924 /* Proceeding. */
1925
1926 /* Clear out all variables saying what to do when inferior is continued.
1927 First do this, then set the ones you want, then call `proceed'. */
1928
1929 static void
1930 clear_proceed_status_thread (struct thread_info *tp)
1931 {
1932 if (debug_infrun)
1933 fprintf_unfiltered (gdb_stdlog,
1934 "infrun: clear_proceed_status_thread (%s)\n",
1935 target_pid_to_str (tp->ptid));
1936
1937 tp->control.trap_expected = 0;
1938 tp->control.step_range_start = 0;
1939 tp->control.step_range_end = 0;
1940 tp->control.step_frame_id = null_frame_id;
1941 tp->control.step_stack_frame_id = null_frame_id;
1942 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1943 tp->stop_requested = 0;
1944
1945 tp->control.stop_step = 0;
1946
1947 tp->control.proceed_to_finish = 0;
1948
1949 /* Discard any remaining commands or status from previous stop. */
1950 bpstat_clear (&tp->control.stop_bpstat);
1951 }
1952
1953 static int
1954 clear_proceed_status_callback (struct thread_info *tp, void *data)
1955 {
1956 if (is_exited (tp->ptid))
1957 return 0;
1958
1959 clear_proceed_status_thread (tp);
1960 return 0;
1961 }
1962
1963 void
1964 clear_proceed_status (void)
1965 {
1966 if (!non_stop)
1967 {
1968 /* In all-stop mode, delete the per-thread status of all
1969 threads, even if inferior_ptid is null_ptid, there may be
1970 threads on the list. E.g., we may be launching a new
1971 process, while selecting the executable. */
1972 iterate_over_threads (clear_proceed_status_callback, NULL);
1973 }
1974
1975 if (!ptid_equal (inferior_ptid, null_ptid))
1976 {
1977 struct inferior *inferior;
1978
1979 if (non_stop)
1980 {
1981 /* If in non-stop mode, only delete the per-thread status of
1982 the current thread. */
1983 clear_proceed_status_thread (inferior_thread ());
1984 }
1985
1986 inferior = current_inferior ();
1987 inferior->control.stop_soon = NO_STOP_QUIETLY;
1988 }
1989
1990 stop_after_trap = 0;
1991
1992 observer_notify_about_to_proceed ();
1993
1994 if (stop_registers)
1995 {
1996 regcache_xfree (stop_registers);
1997 stop_registers = NULL;
1998 }
1999 }
2000
2001 /* Check the current thread against the thread that reported the most recent
2002 event. If a step-over is required return TRUE and set the current thread
2003 to the old thread. Otherwise return FALSE.
2004
2005 This should be suitable for any targets that support threads. */
2006
2007 static int
2008 prepare_to_proceed (int step)
2009 {
2010 ptid_t wait_ptid;
2011 struct target_waitstatus wait_status;
2012 int schedlock_enabled;
2013
2014 /* With non-stop mode on, threads are always handled individually. */
2015 gdb_assert (! non_stop);
2016
2017 /* Get the last target status returned by target_wait(). */
2018 get_last_target_status (&wait_ptid, &wait_status);
2019
2020 /* Make sure we were stopped at a breakpoint. */
2021 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2022 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
2023 && wait_status.value.sig != TARGET_SIGNAL_ILL
2024 && wait_status.value.sig != TARGET_SIGNAL_SEGV
2025 && wait_status.value.sig != TARGET_SIGNAL_EMT))
2026 {
2027 return 0;
2028 }
2029
2030 schedlock_enabled = (scheduler_mode == schedlock_on
2031 || (scheduler_mode == schedlock_step
2032 && step));
2033
2034 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2035 if (schedlock_enabled)
2036 return 0;
2037
2038 /* Don't switch over if we're about to resume some other process
2039 other than WAIT_PTID's, and schedule-multiple is off. */
2040 if (!sched_multi
2041 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2042 return 0;
2043
2044 /* Switched over from WAIT_PID. */
2045 if (!ptid_equal (wait_ptid, minus_one_ptid)
2046 && !ptid_equal (inferior_ptid, wait_ptid))
2047 {
2048 struct regcache *regcache = get_thread_regcache (wait_ptid);
2049
2050 if (breakpoint_here_p (get_regcache_aspace (regcache),
2051 regcache_read_pc (regcache)))
2052 {
2053 /* If stepping, remember current thread to switch back to. */
2054 if (step)
2055 deferred_step_ptid = inferior_ptid;
2056
2057 /* Switch back to WAIT_PID thread. */
2058 switch_to_thread (wait_ptid);
2059
2060 if (debug_infrun)
2061 fprintf_unfiltered (gdb_stdlog,
2062 "infrun: prepare_to_proceed (step=%d), "
2063 "switched to [%s]\n",
2064 step, target_pid_to_str (inferior_ptid));
2065
2066 /* We return 1 to indicate that there is a breakpoint here,
2067 so we need to step over it before continuing to avoid
2068 hitting it straight away. */
2069 return 1;
2070 }
2071 }
2072
2073 return 0;
2074 }
2075
2076 /* Basic routine for continuing the program in various fashions.
2077
2078 ADDR is the address to resume at, or -1 for resume where stopped.
2079 SIGGNAL is the signal to give it, or 0 for none,
2080 or -1 for act according to how it stopped.
2081 STEP is nonzero if should trap after one instruction.
2082 -1 means return after that and print nothing.
2083 You should probably set various step_... variables
2084 before calling here, if you are stepping.
2085
2086 You should call clear_proceed_status before calling proceed. */
2087
2088 void
2089 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2090 {
2091 struct regcache *regcache;
2092 struct gdbarch *gdbarch;
2093 struct thread_info *tp;
2094 CORE_ADDR pc;
2095 struct address_space *aspace;
2096 int oneproc = 0;
2097
2098 /* If we're stopped at a fork/vfork, follow the branch set by the
2099 "set follow-fork-mode" command; otherwise, we'll just proceed
2100 resuming the current thread. */
2101 if (!follow_fork ())
2102 {
2103 /* The target for some reason decided not to resume. */
2104 normal_stop ();
2105 if (target_can_async_p ())
2106 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2107 return;
2108 }
2109
2110 /* We'll update this if & when we switch to a new thread. */
2111 previous_inferior_ptid = inferior_ptid;
2112
2113 regcache = get_current_regcache ();
2114 gdbarch = get_regcache_arch (regcache);
2115 aspace = get_regcache_aspace (regcache);
2116 pc = regcache_read_pc (regcache);
2117
2118 if (step > 0)
2119 step_start_function = find_pc_function (pc);
2120 if (step < 0)
2121 stop_after_trap = 1;
2122
2123 if (addr == (CORE_ADDR) -1)
2124 {
2125 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2126 && execution_direction != EXEC_REVERSE)
2127 /* There is a breakpoint at the address we will resume at,
2128 step one instruction before inserting breakpoints so that
2129 we do not stop right away (and report a second hit at this
2130 breakpoint).
2131
2132 Note, we don't do this in reverse, because we won't
2133 actually be executing the breakpoint insn anyway.
2134 We'll be (un-)executing the previous instruction. */
2135
2136 oneproc = 1;
2137 else if (gdbarch_single_step_through_delay_p (gdbarch)
2138 && gdbarch_single_step_through_delay (gdbarch,
2139 get_current_frame ()))
2140 /* We stepped onto an instruction that needs to be stepped
2141 again before re-inserting the breakpoint, do so. */
2142 oneproc = 1;
2143 }
2144 else
2145 {
2146 regcache_write_pc (regcache, addr);
2147 }
2148
2149 if (debug_infrun)
2150 fprintf_unfiltered (gdb_stdlog,
2151 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2152 paddress (gdbarch, addr), siggnal, step);
2153
2154 if (non_stop)
2155 /* In non-stop, each thread is handled individually. The context
2156 must already be set to the right thread here. */
2157 ;
2158 else
2159 {
2160 /* In a multi-threaded task we may select another thread and
2161 then continue or step.
2162
2163 But if the old thread was stopped at a breakpoint, it will
2164 immediately cause another breakpoint stop without any
2165 execution (i.e. it will report a breakpoint hit incorrectly).
2166 So we must step over it first.
2167
2168 prepare_to_proceed checks the current thread against the
2169 thread that reported the most recent event. If a step-over
2170 is required it returns TRUE and sets the current thread to
2171 the old thread. */
2172 if (prepare_to_proceed (step))
2173 oneproc = 1;
2174 }
2175
2176 /* prepare_to_proceed may change the current thread. */
2177 tp = inferior_thread ();
2178
2179 if (oneproc)
2180 {
2181 tp->control.trap_expected = 1;
2182 /* If displaced stepping is enabled, we can step over the
2183 breakpoint without hitting it, so leave all breakpoints
2184 inserted. Otherwise we need to disable all breakpoints, step
2185 one instruction, and then re-add them when that step is
2186 finished. */
2187 if (!use_displaced_stepping (gdbarch))
2188 remove_breakpoints ();
2189 }
2190
2191 /* We can insert breakpoints if we're not trying to step over one,
2192 or if we are stepping over one but we're using displaced stepping
2193 to do so. */
2194 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2195 insert_breakpoints ();
2196
2197 if (!non_stop)
2198 {
2199 /* Pass the last stop signal to the thread we're resuming,
2200 irrespective of whether the current thread is the thread that
2201 got the last event or not. This was historically GDB's
2202 behaviour before keeping a stop_signal per thread. */
2203
2204 struct thread_info *last_thread;
2205 ptid_t last_ptid;
2206 struct target_waitstatus last_status;
2207
2208 get_last_target_status (&last_ptid, &last_status);
2209 if (!ptid_equal (inferior_ptid, last_ptid)
2210 && !ptid_equal (last_ptid, null_ptid)
2211 && !ptid_equal (last_ptid, minus_one_ptid))
2212 {
2213 last_thread = find_thread_ptid (last_ptid);
2214 if (last_thread)
2215 {
2216 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2217 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2218 }
2219 }
2220 }
2221
2222 if (siggnal != TARGET_SIGNAL_DEFAULT)
2223 tp->suspend.stop_signal = siggnal;
2224 /* If this signal should not be seen by program,
2225 give it zero. Used for debugging signals. */
2226 else if (!signal_program[tp->suspend.stop_signal])
2227 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2228
2229 annotate_starting ();
2230
2231 /* Make sure that output from GDB appears before output from the
2232 inferior. */
2233 gdb_flush (gdb_stdout);
2234
2235 /* Refresh prev_pc value just prior to resuming. This used to be
2236 done in stop_stepping, however, setting prev_pc there did not handle
2237 scenarios such as inferior function calls or returning from
2238 a function via the return command. In those cases, the prev_pc
2239 value was not set properly for subsequent commands. The prev_pc value
2240 is used to initialize the starting line number in the ecs. With an
2241 invalid value, the gdb next command ends up stopping at the position
2242 represented by the next line table entry past our start position.
2243 On platforms that generate one line table entry per line, this
2244 is not a problem. However, on the ia64, the compiler generates
2245 extraneous line table entries that do not increase the line number.
2246 When we issue the gdb next command on the ia64 after an inferior call
2247 or a return command, we often end up a few instructions forward, still
2248 within the original line we started.
2249
2250 An attempt was made to refresh the prev_pc at the same time the
2251 execution_control_state is initialized (for instance, just before
2252 waiting for an inferior event). But this approach did not work
2253 because of platforms that use ptrace, where the pc register cannot
2254 be read unless the inferior is stopped. At that point, we are not
2255 guaranteed the inferior is stopped and so the regcache_read_pc() call
2256 can fail. Setting the prev_pc value here ensures the value is updated
2257 correctly when the inferior is stopped. */
2258 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2259
2260 /* Fill in with reasonable starting values. */
2261 init_thread_stepping_state (tp);
2262
2263 /* Reset to normal state. */
2264 init_infwait_state ();
2265
2266 /* Resume inferior. */
2267 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2268
2269 /* Wait for it to stop (if not standalone)
2270 and in any case decode why it stopped, and act accordingly. */
2271 /* Do this only if we are not using the event loop, or if the target
2272 does not support asynchronous execution. */
2273 if (!target_can_async_p ())
2274 {
2275 wait_for_inferior ();
2276 normal_stop ();
2277 }
2278 }
2279 \f
2280
2281 /* Start remote-debugging of a machine over a serial link. */
2282
2283 void
2284 start_remote (int from_tty)
2285 {
2286 struct inferior *inferior;
2287
2288 inferior = current_inferior ();
2289 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2290
2291 /* Always go on waiting for the target, regardless of the mode. */
2292 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2293 indicate to wait_for_inferior that a target should timeout if
2294 nothing is returned (instead of just blocking). Because of this,
2295 targets expecting an immediate response need to, internally, set
2296 things up so that the target_wait() is forced to eventually
2297 timeout. */
2298 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2299 differentiate to its caller what the state of the target is after
2300 the initial open has been performed. Here we're assuming that
2301 the target has stopped. It should be possible to eventually have
2302 target_open() return to the caller an indication that the target
2303 is currently running and GDB state should be set to the same as
2304 for an async run. */
2305 wait_for_inferior ();
2306
2307 /* Now that the inferior has stopped, do any bookkeeping like
2308 loading shared libraries. We want to do this before normal_stop,
2309 so that the displayed frame is up to date. */
2310 post_create_inferior (&current_target, from_tty);
2311
2312 normal_stop ();
2313 }
2314
2315 /* Initialize static vars when a new inferior begins. */
2316
2317 void
2318 init_wait_for_inferior (void)
2319 {
2320 /* These are meaningless until the first time through wait_for_inferior. */
2321
2322 breakpoint_init_inferior (inf_starting);
2323
2324 clear_proceed_status ();
2325
2326 stepping_past_singlestep_breakpoint = 0;
2327 deferred_step_ptid = null_ptid;
2328
2329 target_last_wait_ptid = minus_one_ptid;
2330
2331 previous_inferior_ptid = inferior_ptid;
2332 init_infwait_state ();
2333
2334 /* Discard any skipped inlined frames. */
2335 clear_inline_frame_state (minus_one_ptid);
2336 }
2337
2338 \f
2339 /* This enum encodes possible reasons for doing a target_wait, so that
2340 wfi can call target_wait in one place. (Ultimately the call will be
2341 moved out of the infinite loop entirely.) */
2342
2343 enum infwait_states
2344 {
2345 infwait_normal_state,
2346 infwait_thread_hop_state,
2347 infwait_step_watch_state,
2348 infwait_nonstep_watch_state
2349 };
2350
2351 /* The PTID we'll do a target_wait on.*/
2352 ptid_t waiton_ptid;
2353
2354 /* Current inferior wait state. */
2355 enum infwait_states infwait_state;
2356
2357 /* Data to be passed around while handling an event. This data is
2358 discarded between events. */
2359 struct execution_control_state
2360 {
2361 ptid_t ptid;
2362 /* The thread that got the event, if this was a thread event; NULL
2363 otherwise. */
2364 struct thread_info *event_thread;
2365
2366 struct target_waitstatus ws;
2367 int random_signal;
2368 int stop_func_filled_in;
2369 CORE_ADDR stop_func_start;
2370 CORE_ADDR stop_func_end;
2371 char *stop_func_name;
2372 int new_thread_event;
2373 int wait_some_more;
2374 };
2375
2376 static void handle_inferior_event (struct execution_control_state *ecs);
2377
2378 static void handle_step_into_function (struct gdbarch *gdbarch,
2379 struct execution_control_state *ecs);
2380 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2381 struct execution_control_state *ecs);
2382 static void check_exception_resume (struct execution_control_state *,
2383 struct frame_info *, struct symbol *);
2384
2385 static void stop_stepping (struct execution_control_state *ecs);
2386 static void prepare_to_wait (struct execution_control_state *ecs);
2387 static void keep_going (struct execution_control_state *ecs);
2388
2389 /* Callback for iterate over threads. If the thread is stopped, but
2390 the user/frontend doesn't know about that yet, go through
2391 normal_stop, as if the thread had just stopped now. ARG points at
2392 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2393 ptid_is_pid(PTID) is true, applies to all threads of the process
2394 pointed at by PTID. Otherwise, apply only to the thread pointed by
2395 PTID. */
2396
2397 static int
2398 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2399 {
2400 ptid_t ptid = * (ptid_t *) arg;
2401
2402 if ((ptid_equal (info->ptid, ptid)
2403 || ptid_equal (minus_one_ptid, ptid)
2404 || (ptid_is_pid (ptid)
2405 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2406 && is_running (info->ptid)
2407 && !is_executing (info->ptid))
2408 {
2409 struct cleanup *old_chain;
2410 struct execution_control_state ecss;
2411 struct execution_control_state *ecs = &ecss;
2412
2413 memset (ecs, 0, sizeof (*ecs));
2414
2415 old_chain = make_cleanup_restore_current_thread ();
2416
2417 switch_to_thread (info->ptid);
2418
2419 /* Go through handle_inferior_event/normal_stop, so we always
2420 have consistent output as if the stop event had been
2421 reported. */
2422 ecs->ptid = info->ptid;
2423 ecs->event_thread = find_thread_ptid (info->ptid);
2424 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2425 ecs->ws.value.sig = TARGET_SIGNAL_0;
2426
2427 handle_inferior_event (ecs);
2428
2429 if (!ecs->wait_some_more)
2430 {
2431 struct thread_info *tp;
2432
2433 normal_stop ();
2434
2435 /* Finish off the continuations. */
2436 tp = inferior_thread ();
2437 do_all_intermediate_continuations_thread (tp, 1);
2438 do_all_continuations_thread (tp, 1);
2439 }
2440
2441 do_cleanups (old_chain);
2442 }
2443
2444 return 0;
2445 }
2446
2447 /* This function is attached as a "thread_stop_requested" observer.
2448 Cleanup local state that assumed the PTID was to be resumed, and
2449 report the stop to the frontend. */
2450
2451 static void
2452 infrun_thread_stop_requested (ptid_t ptid)
2453 {
2454 struct displaced_step_inferior_state *displaced;
2455
2456 /* PTID was requested to stop. Remove it from the displaced
2457 stepping queue, so we don't try to resume it automatically. */
2458
2459 for (displaced = displaced_step_inferior_states;
2460 displaced;
2461 displaced = displaced->next)
2462 {
2463 struct displaced_step_request *it, **prev_next_p;
2464
2465 it = displaced->step_request_queue;
2466 prev_next_p = &displaced->step_request_queue;
2467 while (it)
2468 {
2469 if (ptid_match (it->ptid, ptid))
2470 {
2471 *prev_next_p = it->next;
2472 it->next = NULL;
2473 xfree (it);
2474 }
2475 else
2476 {
2477 prev_next_p = &it->next;
2478 }
2479
2480 it = *prev_next_p;
2481 }
2482 }
2483
2484 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2485 }
2486
2487 static void
2488 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2489 {
2490 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2491 nullify_last_target_wait_ptid ();
2492 }
2493
2494 /* Callback for iterate_over_threads. */
2495
2496 static int
2497 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2498 {
2499 if (is_exited (info->ptid))
2500 return 0;
2501
2502 delete_step_resume_breakpoint (info);
2503 delete_exception_resume_breakpoint (info);
2504 return 0;
2505 }
2506
2507 /* In all-stop, delete the step resume breakpoint of any thread that
2508 had one. In non-stop, delete the step resume breakpoint of the
2509 thread that just stopped. */
2510
2511 static void
2512 delete_step_thread_step_resume_breakpoint (void)
2513 {
2514 if (!target_has_execution
2515 || ptid_equal (inferior_ptid, null_ptid))
2516 /* If the inferior has exited, we have already deleted the step
2517 resume breakpoints out of GDB's lists. */
2518 return;
2519
2520 if (non_stop)
2521 {
2522 /* If in non-stop mode, only delete the step-resume or
2523 longjmp-resume breakpoint of the thread that just stopped
2524 stepping. */
2525 struct thread_info *tp = inferior_thread ();
2526
2527 delete_step_resume_breakpoint (tp);
2528 delete_exception_resume_breakpoint (tp);
2529 }
2530 else
2531 /* In all-stop mode, delete all step-resume and longjmp-resume
2532 breakpoints of any thread that had them. */
2533 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2534 }
2535
2536 /* A cleanup wrapper. */
2537
2538 static void
2539 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2540 {
2541 delete_step_thread_step_resume_breakpoint ();
2542 }
2543
2544 /* Pretty print the results of target_wait, for debugging purposes. */
2545
2546 static void
2547 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2548 const struct target_waitstatus *ws)
2549 {
2550 char *status_string = target_waitstatus_to_string (ws);
2551 struct ui_file *tmp_stream = mem_fileopen ();
2552 char *text;
2553
2554 /* The text is split over several lines because it was getting too long.
2555 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2556 output as a unit; we want only one timestamp printed if debug_timestamp
2557 is set. */
2558
2559 fprintf_unfiltered (tmp_stream,
2560 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2561 if (PIDGET (waiton_ptid) != -1)
2562 fprintf_unfiltered (tmp_stream,
2563 " [%s]", target_pid_to_str (waiton_ptid));
2564 fprintf_unfiltered (tmp_stream, ", status) =\n");
2565 fprintf_unfiltered (tmp_stream,
2566 "infrun: %d [%s],\n",
2567 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2568 fprintf_unfiltered (tmp_stream,
2569 "infrun: %s\n",
2570 status_string);
2571
2572 text = ui_file_xstrdup (tmp_stream, NULL);
2573
2574 /* This uses %s in part to handle %'s in the text, but also to avoid
2575 a gcc error: the format attribute requires a string literal. */
2576 fprintf_unfiltered (gdb_stdlog, "%s", text);
2577
2578 xfree (status_string);
2579 xfree (text);
2580 ui_file_delete (tmp_stream);
2581 }
2582
2583 /* Prepare and stabilize the inferior for detaching it. E.g.,
2584 detaching while a thread is displaced stepping is a recipe for
2585 crashing it, as nothing would readjust the PC out of the scratch
2586 pad. */
2587
2588 void
2589 prepare_for_detach (void)
2590 {
2591 struct inferior *inf = current_inferior ();
2592 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2593 struct cleanup *old_chain_1;
2594 struct displaced_step_inferior_state *displaced;
2595
2596 displaced = get_displaced_stepping_state (inf->pid);
2597
2598 /* Is any thread of this process displaced stepping? If not,
2599 there's nothing else to do. */
2600 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2601 return;
2602
2603 if (debug_infrun)
2604 fprintf_unfiltered (gdb_stdlog,
2605 "displaced-stepping in-process while detaching");
2606
2607 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2608 inf->detaching = 1;
2609
2610 while (!ptid_equal (displaced->step_ptid, null_ptid))
2611 {
2612 struct cleanup *old_chain_2;
2613 struct execution_control_state ecss;
2614 struct execution_control_state *ecs;
2615
2616 ecs = &ecss;
2617 memset (ecs, 0, sizeof (*ecs));
2618
2619 overlay_cache_invalid = 1;
2620
2621 if (deprecated_target_wait_hook)
2622 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2623 else
2624 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2625
2626 if (debug_infrun)
2627 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2628
2629 /* If an error happens while handling the event, propagate GDB's
2630 knowledge of the executing state to the frontend/user running
2631 state. */
2632 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2633 &minus_one_ptid);
2634
2635 /* In non-stop mode, each thread is handled individually.
2636 Switch early, so the global state is set correctly for this
2637 thread. */
2638 if (non_stop
2639 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2640 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2641 context_switch (ecs->ptid);
2642
2643 /* Now figure out what to do with the result of the result. */
2644 handle_inferior_event (ecs);
2645
2646 /* No error, don't finish the state yet. */
2647 discard_cleanups (old_chain_2);
2648
2649 /* Breakpoints and watchpoints are not installed on the target
2650 at this point, and signals are passed directly to the
2651 inferior, so this must mean the process is gone. */
2652 if (!ecs->wait_some_more)
2653 {
2654 discard_cleanups (old_chain_1);
2655 error (_("Program exited while detaching"));
2656 }
2657 }
2658
2659 discard_cleanups (old_chain_1);
2660 }
2661
2662 /* Wait for control to return from inferior to debugger.
2663
2664 If inferior gets a signal, we may decide to start it up again
2665 instead of returning. That is why there is a loop in this function.
2666 When this function actually returns it means the inferior
2667 should be left stopped and GDB should read more commands. */
2668
2669 void
2670 wait_for_inferior (void)
2671 {
2672 struct cleanup *old_cleanups;
2673 struct execution_control_state ecss;
2674 struct execution_control_state *ecs;
2675
2676 if (debug_infrun)
2677 fprintf_unfiltered
2678 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2679
2680 old_cleanups =
2681 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2682
2683 ecs = &ecss;
2684 memset (ecs, 0, sizeof (*ecs));
2685
2686 while (1)
2687 {
2688 struct cleanup *old_chain;
2689
2690 overlay_cache_invalid = 1;
2691
2692 if (deprecated_target_wait_hook)
2693 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2694 else
2695 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2696
2697 if (debug_infrun)
2698 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2699
2700 /* If an error happens while handling the event, propagate GDB's
2701 knowledge of the executing state to the frontend/user running
2702 state. */
2703 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2704
2705 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2706 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2707 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2708
2709 /* Now figure out what to do with the result of the result. */
2710 handle_inferior_event (ecs);
2711
2712 /* No error, don't finish the state yet. */
2713 discard_cleanups (old_chain);
2714
2715 if (!ecs->wait_some_more)
2716 break;
2717 }
2718
2719 do_cleanups (old_cleanups);
2720 }
2721
2722 /* Asynchronous version of wait_for_inferior. It is called by the
2723 event loop whenever a change of state is detected on the file
2724 descriptor corresponding to the target. It can be called more than
2725 once to complete a single execution command. In such cases we need
2726 to keep the state in a global variable ECSS. If it is the last time
2727 that this function is called for a single execution command, then
2728 report to the user that the inferior has stopped, and do the
2729 necessary cleanups. */
2730
2731 void
2732 fetch_inferior_event (void *client_data)
2733 {
2734 struct execution_control_state ecss;
2735 struct execution_control_state *ecs = &ecss;
2736 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2737 struct cleanup *ts_old_chain;
2738 int was_sync = sync_execution;
2739 int cmd_done = 0;
2740
2741 memset (ecs, 0, sizeof (*ecs));
2742
2743 /* We're handling a live event, so make sure we're doing live
2744 debugging. If we're looking at traceframes while the target is
2745 running, we're going to need to get back to that mode after
2746 handling the event. */
2747 if (non_stop)
2748 {
2749 make_cleanup_restore_current_traceframe ();
2750 set_current_traceframe (-1);
2751 }
2752
2753 if (non_stop)
2754 /* In non-stop mode, the user/frontend should not notice a thread
2755 switch due to internal events. Make sure we reverse to the
2756 user selected thread and frame after handling the event and
2757 running any breakpoint commands. */
2758 make_cleanup_restore_current_thread ();
2759
2760 overlay_cache_invalid = 1;
2761
2762 make_cleanup_restore_integer (&execution_direction);
2763 execution_direction = target_execution_direction ();
2764
2765 if (deprecated_target_wait_hook)
2766 ecs->ptid =
2767 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2768 else
2769 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2770
2771 if (debug_infrun)
2772 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2773
2774 if (non_stop
2775 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2776 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2777 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2778 /* In non-stop mode, each thread is handled individually. Switch
2779 early, so the global state is set correctly for this
2780 thread. */
2781 context_switch (ecs->ptid);
2782
2783 /* If an error happens while handling the event, propagate GDB's
2784 knowledge of the executing state to the frontend/user running
2785 state. */
2786 if (!non_stop)
2787 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2788 else
2789 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2790
2791 /* Get executed before make_cleanup_restore_current_thread above to apply
2792 still for the thread which has thrown the exception. */
2793 make_bpstat_clear_actions_cleanup ();
2794
2795 /* Now figure out what to do with the result of the result. */
2796 handle_inferior_event (ecs);
2797
2798 if (!ecs->wait_some_more)
2799 {
2800 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2801
2802 delete_step_thread_step_resume_breakpoint ();
2803
2804 /* We may not find an inferior if this was a process exit. */
2805 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2806 normal_stop ();
2807
2808 if (target_has_execution
2809 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2810 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2811 && ecs->event_thread->step_multi
2812 && ecs->event_thread->control.stop_step)
2813 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2814 else
2815 {
2816 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2817 cmd_done = 1;
2818 }
2819 }
2820
2821 /* No error, don't finish the thread states yet. */
2822 discard_cleanups (ts_old_chain);
2823
2824 /* Revert thread and frame. */
2825 do_cleanups (old_chain);
2826
2827 /* If the inferior was in sync execution mode, and now isn't,
2828 restore the prompt (a synchronous execution command has finished,
2829 and we're ready for input). */
2830 if (interpreter_async && was_sync && !sync_execution)
2831 display_gdb_prompt (0);
2832
2833 if (cmd_done
2834 && !was_sync
2835 && exec_done_display_p
2836 && (ptid_equal (inferior_ptid, null_ptid)
2837 || !is_running (inferior_ptid)))
2838 printf_unfiltered (_("completed.\n"));
2839 }
2840
2841 /* Record the frame and location we're currently stepping through. */
2842 void
2843 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2844 {
2845 struct thread_info *tp = inferior_thread ();
2846
2847 tp->control.step_frame_id = get_frame_id (frame);
2848 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2849
2850 tp->current_symtab = sal.symtab;
2851 tp->current_line = sal.line;
2852 }
2853
2854 /* Clear context switchable stepping state. */
2855
2856 void
2857 init_thread_stepping_state (struct thread_info *tss)
2858 {
2859 tss->stepping_over_breakpoint = 0;
2860 tss->step_after_step_resume_breakpoint = 0;
2861 }
2862
2863 /* Return the cached copy of the last pid/waitstatus returned by
2864 target_wait()/deprecated_target_wait_hook(). The data is actually
2865 cached by handle_inferior_event(), which gets called immediately
2866 after target_wait()/deprecated_target_wait_hook(). */
2867
2868 void
2869 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2870 {
2871 *ptidp = target_last_wait_ptid;
2872 *status = target_last_waitstatus;
2873 }
2874
2875 void
2876 nullify_last_target_wait_ptid (void)
2877 {
2878 target_last_wait_ptid = minus_one_ptid;
2879 }
2880
2881 /* Switch thread contexts. */
2882
2883 static void
2884 context_switch (ptid_t ptid)
2885 {
2886 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2887 {
2888 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2889 target_pid_to_str (inferior_ptid));
2890 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2891 target_pid_to_str (ptid));
2892 }
2893
2894 switch_to_thread (ptid);
2895 }
2896
2897 static void
2898 adjust_pc_after_break (struct execution_control_state *ecs)
2899 {
2900 struct regcache *regcache;
2901 struct gdbarch *gdbarch;
2902 struct address_space *aspace;
2903 CORE_ADDR breakpoint_pc;
2904
2905 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2906 we aren't, just return.
2907
2908 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2909 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2910 implemented by software breakpoints should be handled through the normal
2911 breakpoint layer.
2912
2913 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2914 different signals (SIGILL or SIGEMT for instance), but it is less
2915 clear where the PC is pointing afterwards. It may not match
2916 gdbarch_decr_pc_after_break. I don't know any specific target that
2917 generates these signals at breakpoints (the code has been in GDB since at
2918 least 1992) so I can not guess how to handle them here.
2919
2920 In earlier versions of GDB, a target with
2921 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2922 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2923 target with both of these set in GDB history, and it seems unlikely to be
2924 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2925
2926 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2927 return;
2928
2929 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2930 return;
2931
2932 /* In reverse execution, when a breakpoint is hit, the instruction
2933 under it has already been de-executed. The reported PC always
2934 points at the breakpoint address, so adjusting it further would
2935 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2936 architecture:
2937
2938 B1 0x08000000 : INSN1
2939 B2 0x08000001 : INSN2
2940 0x08000002 : INSN3
2941 PC -> 0x08000003 : INSN4
2942
2943 Say you're stopped at 0x08000003 as above. Reverse continuing
2944 from that point should hit B2 as below. Reading the PC when the
2945 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2946 been de-executed already.
2947
2948 B1 0x08000000 : INSN1
2949 B2 PC -> 0x08000001 : INSN2
2950 0x08000002 : INSN3
2951 0x08000003 : INSN4
2952
2953 We can't apply the same logic as for forward execution, because
2954 we would wrongly adjust the PC to 0x08000000, since there's a
2955 breakpoint at PC - 1. We'd then report a hit on B1, although
2956 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2957 behaviour. */
2958 if (execution_direction == EXEC_REVERSE)
2959 return;
2960
2961 /* If this target does not decrement the PC after breakpoints, then
2962 we have nothing to do. */
2963 regcache = get_thread_regcache (ecs->ptid);
2964 gdbarch = get_regcache_arch (regcache);
2965 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2966 return;
2967
2968 aspace = get_regcache_aspace (regcache);
2969
2970 /* Find the location where (if we've hit a breakpoint) the
2971 breakpoint would be. */
2972 breakpoint_pc = regcache_read_pc (regcache)
2973 - gdbarch_decr_pc_after_break (gdbarch);
2974
2975 /* Check whether there actually is a software breakpoint inserted at
2976 that location.
2977
2978 If in non-stop mode, a race condition is possible where we've
2979 removed a breakpoint, but stop events for that breakpoint were
2980 already queued and arrive later. To suppress those spurious
2981 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2982 and retire them after a number of stop events are reported. */
2983 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2984 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2985 {
2986 struct cleanup *old_cleanups = NULL;
2987
2988 if (RECORD_IS_USED)
2989 old_cleanups = record_gdb_operation_disable_set ();
2990
2991 /* When using hardware single-step, a SIGTRAP is reported for both
2992 a completed single-step and a software breakpoint. Need to
2993 differentiate between the two, as the latter needs adjusting
2994 but the former does not.
2995
2996 The SIGTRAP can be due to a completed hardware single-step only if
2997 - we didn't insert software single-step breakpoints
2998 - the thread to be examined is still the current thread
2999 - this thread is currently being stepped
3000
3001 If any of these events did not occur, we must have stopped due
3002 to hitting a software breakpoint, and have to back up to the
3003 breakpoint address.
3004
3005 As a special case, we could have hardware single-stepped a
3006 software breakpoint. In this case (prev_pc == breakpoint_pc),
3007 we also need to back up to the breakpoint address. */
3008
3009 if (singlestep_breakpoints_inserted_p
3010 || !ptid_equal (ecs->ptid, inferior_ptid)
3011 || !currently_stepping (ecs->event_thread)
3012 || ecs->event_thread->prev_pc == breakpoint_pc)
3013 regcache_write_pc (regcache, breakpoint_pc);
3014
3015 if (RECORD_IS_USED)
3016 do_cleanups (old_cleanups);
3017 }
3018 }
3019
3020 void
3021 init_infwait_state (void)
3022 {
3023 waiton_ptid = pid_to_ptid (-1);
3024 infwait_state = infwait_normal_state;
3025 }
3026
3027 void
3028 error_is_running (void)
3029 {
3030 error (_("Cannot execute this command while "
3031 "the selected thread is running."));
3032 }
3033
3034 void
3035 ensure_not_running (void)
3036 {
3037 if (is_running (inferior_ptid))
3038 error_is_running ();
3039 }
3040
3041 static int
3042 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3043 {
3044 for (frame = get_prev_frame (frame);
3045 frame != NULL;
3046 frame = get_prev_frame (frame))
3047 {
3048 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3049 return 1;
3050 if (get_frame_type (frame) != INLINE_FRAME)
3051 break;
3052 }
3053
3054 return 0;
3055 }
3056
3057 /* Auxiliary function that handles syscall entry/return events.
3058 It returns 1 if the inferior should keep going (and GDB
3059 should ignore the event), or 0 if the event deserves to be
3060 processed. */
3061
3062 static int
3063 handle_syscall_event (struct execution_control_state *ecs)
3064 {
3065 struct regcache *regcache;
3066 struct gdbarch *gdbarch;
3067 int syscall_number;
3068
3069 if (!ptid_equal (ecs->ptid, inferior_ptid))
3070 context_switch (ecs->ptid);
3071
3072 regcache = get_thread_regcache (ecs->ptid);
3073 gdbarch = get_regcache_arch (regcache);
3074 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
3075 stop_pc = regcache_read_pc (regcache);
3076
3077 target_last_waitstatus.value.syscall_number = syscall_number;
3078
3079 if (catch_syscall_enabled () > 0
3080 && catching_syscall_number (syscall_number) > 0)
3081 {
3082 if (debug_infrun)
3083 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3084 syscall_number);
3085
3086 ecs->event_thread->control.stop_bpstat
3087 = bpstat_stop_status (get_regcache_aspace (regcache),
3088 stop_pc, ecs->ptid);
3089 ecs->random_signal
3090 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3091
3092 if (!ecs->random_signal)
3093 {
3094 /* Catchpoint hit. */
3095 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3096 return 0;
3097 }
3098 }
3099
3100 /* If no catchpoint triggered for this, then keep going. */
3101 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3102 keep_going (ecs);
3103 return 1;
3104 }
3105
3106 /* Clear the supplied execution_control_state's stop_func_* fields. */
3107
3108 static void
3109 clear_stop_func (struct execution_control_state *ecs)
3110 {
3111 ecs->stop_func_filled_in = 0;
3112 ecs->stop_func_start = 0;
3113 ecs->stop_func_end = 0;
3114 ecs->stop_func_name = NULL;
3115 }
3116
3117 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3118
3119 static void
3120 fill_in_stop_func (struct gdbarch *gdbarch,
3121 struct execution_control_state *ecs)
3122 {
3123 if (!ecs->stop_func_filled_in)
3124 {
3125 /* Don't care about return value; stop_func_start and stop_func_name
3126 will both be 0 if it doesn't work. */
3127 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3128 &ecs->stop_func_start, &ecs->stop_func_end);
3129 ecs->stop_func_start
3130 += gdbarch_deprecated_function_start_offset (gdbarch);
3131
3132 ecs->stop_func_filled_in = 1;
3133 }
3134 }
3135
3136 /* Given an execution control state that has been freshly filled in
3137 by an event from the inferior, figure out what it means and take
3138 appropriate action. */
3139
3140 static void
3141 handle_inferior_event (struct execution_control_state *ecs)
3142 {
3143 struct frame_info *frame;
3144 struct gdbarch *gdbarch;
3145 int stopped_by_watchpoint;
3146 int stepped_after_stopped_by_watchpoint = 0;
3147 struct symtab_and_line stop_pc_sal;
3148 enum stop_kind stop_soon;
3149
3150 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3151 {
3152 /* We had an event in the inferior, but we are not interested in
3153 handling it at this level. The lower layers have already
3154 done what needs to be done, if anything.
3155
3156 One of the possible circumstances for this is when the
3157 inferior produces output for the console. The inferior has
3158 not stopped, and we are ignoring the event. Another possible
3159 circumstance is any event which the lower level knows will be
3160 reported multiple times without an intervening resume. */
3161 if (debug_infrun)
3162 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3163 prepare_to_wait (ecs);
3164 return;
3165 }
3166
3167 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3168 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3169 {
3170 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3171
3172 gdb_assert (inf);
3173 stop_soon = inf->control.stop_soon;
3174 }
3175 else
3176 stop_soon = NO_STOP_QUIETLY;
3177
3178 /* Cache the last pid/waitstatus. */
3179 target_last_wait_ptid = ecs->ptid;
3180 target_last_waitstatus = ecs->ws;
3181
3182 /* Always clear state belonging to the previous time we stopped. */
3183 stop_stack_dummy = STOP_NONE;
3184
3185 /* If it's a new process, add it to the thread database. */
3186
3187 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3188 && !ptid_equal (ecs->ptid, minus_one_ptid)
3189 && !in_thread_list (ecs->ptid));
3190
3191 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3192 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3193 add_thread (ecs->ptid);
3194
3195 ecs->event_thread = find_thread_ptid (ecs->ptid);
3196
3197 /* Dependent on valid ECS->EVENT_THREAD. */
3198 adjust_pc_after_break (ecs);
3199
3200 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3201 reinit_frame_cache ();
3202
3203 breakpoint_retire_moribund ();
3204
3205 /* First, distinguish signals caused by the debugger from signals
3206 that have to do with the program's own actions. Note that
3207 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3208 on the operating system version. Here we detect when a SIGILL or
3209 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3210 something similar for SIGSEGV, since a SIGSEGV will be generated
3211 when we're trying to execute a breakpoint instruction on a
3212 non-executable stack. This happens for call dummy breakpoints
3213 for architectures like SPARC that place call dummies on the
3214 stack. */
3215 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3216 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3217 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3218 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3219 {
3220 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3221
3222 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3223 regcache_read_pc (regcache)))
3224 {
3225 if (debug_infrun)
3226 fprintf_unfiltered (gdb_stdlog,
3227 "infrun: Treating signal as SIGTRAP\n");
3228 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3229 }
3230 }
3231
3232 /* Mark the non-executing threads accordingly. In all-stop, all
3233 threads of all processes are stopped when we get any event
3234 reported. In non-stop mode, only the event thread stops. If
3235 we're handling a process exit in non-stop mode, there's nothing
3236 to do, as threads of the dead process are gone, and threads of
3237 any other process were left running. */
3238 if (!non_stop)
3239 set_executing (minus_one_ptid, 0);
3240 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3241 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3242 set_executing (inferior_ptid, 0);
3243
3244 switch (infwait_state)
3245 {
3246 case infwait_thread_hop_state:
3247 if (debug_infrun)
3248 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3249 break;
3250
3251 case infwait_normal_state:
3252 if (debug_infrun)
3253 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3254 break;
3255
3256 case infwait_step_watch_state:
3257 if (debug_infrun)
3258 fprintf_unfiltered (gdb_stdlog,
3259 "infrun: infwait_step_watch_state\n");
3260
3261 stepped_after_stopped_by_watchpoint = 1;
3262 break;
3263
3264 case infwait_nonstep_watch_state:
3265 if (debug_infrun)
3266 fprintf_unfiltered (gdb_stdlog,
3267 "infrun: infwait_nonstep_watch_state\n");
3268 insert_breakpoints ();
3269
3270 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3271 handle things like signals arriving and other things happening
3272 in combination correctly? */
3273 stepped_after_stopped_by_watchpoint = 1;
3274 break;
3275
3276 default:
3277 internal_error (__FILE__, __LINE__, _("bad switch"));
3278 }
3279
3280 infwait_state = infwait_normal_state;
3281 waiton_ptid = pid_to_ptid (-1);
3282
3283 switch (ecs->ws.kind)
3284 {
3285 case TARGET_WAITKIND_LOADED:
3286 if (debug_infrun)
3287 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3288 /* Ignore gracefully during startup of the inferior, as it might
3289 be the shell which has just loaded some objects, otherwise
3290 add the symbols for the newly loaded objects. Also ignore at
3291 the beginning of an attach or remote session; we will query
3292 the full list of libraries once the connection is
3293 established. */
3294 if (stop_soon == NO_STOP_QUIETLY)
3295 {
3296 /* Check for any newly added shared libraries if we're
3297 supposed to be adding them automatically. Switch
3298 terminal for any messages produced by
3299 breakpoint_re_set. */
3300 target_terminal_ours_for_output ();
3301 /* NOTE: cagney/2003-11-25: Make certain that the target
3302 stack's section table is kept up-to-date. Architectures,
3303 (e.g., PPC64), use the section table to perform
3304 operations such as address => section name and hence
3305 require the table to contain all sections (including
3306 those found in shared libraries). */
3307 #ifdef SOLIB_ADD
3308 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3309 #else
3310 solib_add (NULL, 0, &current_target, auto_solib_add);
3311 #endif
3312 target_terminal_inferior ();
3313
3314 /* If requested, stop when the dynamic linker notifies
3315 gdb of events. This allows the user to get control
3316 and place breakpoints in initializer routines for
3317 dynamically loaded objects (among other things). */
3318 if (stop_on_solib_events)
3319 {
3320 /* Make sure we print "Stopped due to solib-event" in
3321 normal_stop. */
3322 stop_print_frame = 1;
3323
3324 stop_stepping (ecs);
3325 return;
3326 }
3327
3328 /* NOTE drow/2007-05-11: This might be a good place to check
3329 for "catch load". */
3330 }
3331
3332 /* If we are skipping through a shell, or through shared library
3333 loading that we aren't interested in, resume the program. If
3334 we're running the program normally, also resume. But stop if
3335 we're attaching or setting up a remote connection. */
3336 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3337 {
3338 /* Loading of shared libraries might have changed breakpoint
3339 addresses. Make sure new breakpoints are inserted. */
3340 if (stop_soon == NO_STOP_QUIETLY
3341 && !breakpoints_always_inserted_mode ())
3342 insert_breakpoints ();
3343 resume (0, TARGET_SIGNAL_0);
3344 prepare_to_wait (ecs);
3345 return;
3346 }
3347
3348 break;
3349
3350 case TARGET_WAITKIND_SPURIOUS:
3351 if (debug_infrun)
3352 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3353 resume (0, TARGET_SIGNAL_0);
3354 prepare_to_wait (ecs);
3355 return;
3356
3357 case TARGET_WAITKIND_EXITED:
3358 if (debug_infrun)
3359 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3360 inferior_ptid = ecs->ptid;
3361 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3362 set_current_program_space (current_inferior ()->pspace);
3363 handle_vfork_child_exec_or_exit (0);
3364 target_terminal_ours (); /* Must do this before mourn anyway. */
3365 print_exited_reason (ecs->ws.value.integer);
3366
3367 /* Record the exit code in the convenience variable $_exitcode, so
3368 that the user can inspect this again later. */
3369 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3370 (LONGEST) ecs->ws.value.integer);
3371
3372 /* Also record this in the inferior itself. */
3373 current_inferior ()->has_exit_code = 1;
3374 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3375
3376 gdb_flush (gdb_stdout);
3377 target_mourn_inferior ();
3378 singlestep_breakpoints_inserted_p = 0;
3379 cancel_single_step_breakpoints ();
3380 stop_print_frame = 0;
3381 stop_stepping (ecs);
3382 return;
3383
3384 case TARGET_WAITKIND_SIGNALLED:
3385 if (debug_infrun)
3386 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3387 inferior_ptid = ecs->ptid;
3388 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3389 set_current_program_space (current_inferior ()->pspace);
3390 handle_vfork_child_exec_or_exit (0);
3391 stop_print_frame = 0;
3392 target_terminal_ours (); /* Must do this before mourn anyway. */
3393
3394 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3395 reach here unless the inferior is dead. However, for years
3396 target_kill() was called here, which hints that fatal signals aren't
3397 really fatal on some systems. If that's true, then some changes
3398 may be needed. */
3399 target_mourn_inferior ();
3400
3401 print_signal_exited_reason (ecs->ws.value.sig);
3402 singlestep_breakpoints_inserted_p = 0;
3403 cancel_single_step_breakpoints ();
3404 stop_stepping (ecs);
3405 return;
3406
3407 /* The following are the only cases in which we keep going;
3408 the above cases end in a continue or goto. */
3409 case TARGET_WAITKIND_FORKED:
3410 case TARGET_WAITKIND_VFORKED:
3411 if (debug_infrun)
3412 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3413
3414 /* Check whether the inferior is displaced stepping. */
3415 {
3416 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3417 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3418 struct displaced_step_inferior_state *displaced
3419 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3420
3421 /* If checking displaced stepping is supported, and thread
3422 ecs->ptid is displaced stepping. */
3423 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3424 {
3425 struct inferior *parent_inf
3426 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3427 struct regcache *child_regcache;
3428 CORE_ADDR parent_pc;
3429
3430 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3431 indicating that the displaced stepping of syscall instruction
3432 has been done. Perform cleanup for parent process here. Note
3433 that this operation also cleans up the child process for vfork,
3434 because their pages are shared. */
3435 displaced_step_fixup (ecs->ptid, TARGET_SIGNAL_TRAP);
3436
3437 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3438 {
3439 /* Restore scratch pad for child process. */
3440 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3441 }
3442
3443 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3444 the child's PC is also within the scratchpad. Set the child's PC
3445 to the parent's PC value, which has already been fixed up.
3446 FIXME: we use the parent's aspace here, although we're touching
3447 the child, because the child hasn't been added to the inferior
3448 list yet at this point. */
3449
3450 child_regcache
3451 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3452 gdbarch,
3453 parent_inf->aspace);
3454 /* Read PC value of parent process. */
3455 parent_pc = regcache_read_pc (regcache);
3456
3457 if (debug_displaced)
3458 fprintf_unfiltered (gdb_stdlog,
3459 "displaced: write child pc from %s to %s\n",
3460 paddress (gdbarch,
3461 regcache_read_pc (child_regcache)),
3462 paddress (gdbarch, parent_pc));
3463
3464 regcache_write_pc (child_regcache, parent_pc);
3465 }
3466 }
3467
3468 if (!ptid_equal (ecs->ptid, inferior_ptid))
3469 {
3470 context_switch (ecs->ptid);
3471 reinit_frame_cache ();
3472 }
3473
3474 /* Immediately detach breakpoints from the child before there's
3475 any chance of letting the user delete breakpoints from the
3476 breakpoint lists. If we don't do this early, it's easy to
3477 leave left over traps in the child, vis: "break foo; catch
3478 fork; c; <fork>; del; c; <child calls foo>". We only follow
3479 the fork on the last `continue', and by that time the
3480 breakpoint at "foo" is long gone from the breakpoint table.
3481 If we vforked, then we don't need to unpatch here, since both
3482 parent and child are sharing the same memory pages; we'll
3483 need to unpatch at follow/detach time instead to be certain
3484 that new breakpoints added between catchpoint hit time and
3485 vfork follow are detached. */
3486 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3487 {
3488 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3489
3490 /* This won't actually modify the breakpoint list, but will
3491 physically remove the breakpoints from the child. */
3492 detach_breakpoints (child_pid);
3493 }
3494
3495 if (singlestep_breakpoints_inserted_p)
3496 {
3497 /* Pull the single step breakpoints out of the target. */
3498 remove_single_step_breakpoints ();
3499 singlestep_breakpoints_inserted_p = 0;
3500 }
3501
3502 /* In case the event is caught by a catchpoint, remember that
3503 the event is to be followed at the next resume of the thread,
3504 and not immediately. */
3505 ecs->event_thread->pending_follow = ecs->ws;
3506
3507 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3508
3509 ecs->event_thread->control.stop_bpstat
3510 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3511 stop_pc, ecs->ptid);
3512
3513 /* Note that we're interested in knowing the bpstat actually
3514 causes a stop, not just if it may explain the signal.
3515 Software watchpoints, for example, always appear in the
3516 bpstat. */
3517 ecs->random_signal
3518 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3519
3520 /* If no catchpoint triggered for this, then keep going. */
3521 if (ecs->random_signal)
3522 {
3523 ptid_t parent;
3524 ptid_t child;
3525 int should_resume;
3526 int follow_child
3527 = (follow_fork_mode_string == follow_fork_mode_child);
3528
3529 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3530
3531 should_resume = follow_fork ();
3532
3533 parent = ecs->ptid;
3534 child = ecs->ws.value.related_pid;
3535
3536 /* In non-stop mode, also resume the other branch. */
3537 if (non_stop && !detach_fork)
3538 {
3539 if (follow_child)
3540 switch_to_thread (parent);
3541 else
3542 switch_to_thread (child);
3543
3544 ecs->event_thread = inferior_thread ();
3545 ecs->ptid = inferior_ptid;
3546 keep_going (ecs);
3547 }
3548
3549 if (follow_child)
3550 switch_to_thread (child);
3551 else
3552 switch_to_thread (parent);
3553
3554 ecs->event_thread = inferior_thread ();
3555 ecs->ptid = inferior_ptid;
3556
3557 if (should_resume)
3558 keep_going (ecs);
3559 else
3560 stop_stepping (ecs);
3561 return;
3562 }
3563 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3564 goto process_event_stop_test;
3565
3566 case TARGET_WAITKIND_VFORK_DONE:
3567 /* Done with the shared memory region. Re-insert breakpoints in
3568 the parent, and keep going. */
3569
3570 if (debug_infrun)
3571 fprintf_unfiltered (gdb_stdlog,
3572 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3573
3574 if (!ptid_equal (ecs->ptid, inferior_ptid))
3575 context_switch (ecs->ptid);
3576
3577 current_inferior ()->waiting_for_vfork_done = 0;
3578 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3579 /* This also takes care of reinserting breakpoints in the
3580 previously locked inferior. */
3581 keep_going (ecs);
3582 return;
3583
3584 case TARGET_WAITKIND_EXECD:
3585 if (debug_infrun)
3586 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3587
3588 if (!ptid_equal (ecs->ptid, inferior_ptid))
3589 {
3590 context_switch (ecs->ptid);
3591 reinit_frame_cache ();
3592 }
3593
3594 singlestep_breakpoints_inserted_p = 0;
3595 cancel_single_step_breakpoints ();
3596
3597 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3598
3599 /* Do whatever is necessary to the parent branch of the vfork. */
3600 handle_vfork_child_exec_or_exit (1);
3601
3602 /* This causes the eventpoints and symbol table to be reset.
3603 Must do this now, before trying to determine whether to
3604 stop. */
3605 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3606
3607 ecs->event_thread->control.stop_bpstat
3608 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3609 stop_pc, ecs->ptid);
3610 ecs->random_signal
3611 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3612
3613 /* Note that this may be referenced from inside
3614 bpstat_stop_status above, through inferior_has_execd. */
3615 xfree (ecs->ws.value.execd_pathname);
3616 ecs->ws.value.execd_pathname = NULL;
3617
3618 /* If no catchpoint triggered for this, then keep going. */
3619 if (ecs->random_signal)
3620 {
3621 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3622 keep_going (ecs);
3623 return;
3624 }
3625 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3626 goto process_event_stop_test;
3627
3628 /* Be careful not to try to gather much state about a thread
3629 that's in a syscall. It's frequently a losing proposition. */
3630 case TARGET_WAITKIND_SYSCALL_ENTRY:
3631 if (debug_infrun)
3632 fprintf_unfiltered (gdb_stdlog,
3633 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3634 /* Getting the current syscall number. */
3635 if (handle_syscall_event (ecs) != 0)
3636 return;
3637 goto process_event_stop_test;
3638
3639 /* Before examining the threads further, step this thread to
3640 get it entirely out of the syscall. (We get notice of the
3641 event when the thread is just on the verge of exiting a
3642 syscall. Stepping one instruction seems to get it back
3643 into user code.) */
3644 case TARGET_WAITKIND_SYSCALL_RETURN:
3645 if (debug_infrun)
3646 fprintf_unfiltered (gdb_stdlog,
3647 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3648 if (handle_syscall_event (ecs) != 0)
3649 return;
3650 goto process_event_stop_test;
3651
3652 case TARGET_WAITKIND_STOPPED:
3653 if (debug_infrun)
3654 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3655 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3656 break;
3657
3658 case TARGET_WAITKIND_NO_HISTORY:
3659 if (debug_infrun)
3660 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3661 /* Reverse execution: target ran out of history info. */
3662 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3663 print_no_history_reason ();
3664 stop_stepping (ecs);
3665 return;
3666 }
3667
3668 if (ecs->new_thread_event)
3669 {
3670 if (non_stop)
3671 /* Non-stop assumes that the target handles adding new threads
3672 to the thread list. */
3673 internal_error (__FILE__, __LINE__,
3674 "targets should add new threads to the thread "
3675 "list themselves in non-stop mode.");
3676
3677 /* We may want to consider not doing a resume here in order to
3678 give the user a chance to play with the new thread. It might
3679 be good to make that a user-settable option. */
3680
3681 /* At this point, all threads are stopped (happens automatically
3682 in either the OS or the native code). Therefore we need to
3683 continue all threads in order to make progress. */
3684
3685 if (!ptid_equal (ecs->ptid, inferior_ptid))
3686 context_switch (ecs->ptid);
3687 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3688 prepare_to_wait (ecs);
3689 return;
3690 }
3691
3692 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3693 {
3694 /* Do we need to clean up the state of a thread that has
3695 completed a displaced single-step? (Doing so usually affects
3696 the PC, so do it here, before we set stop_pc.) */
3697 displaced_step_fixup (ecs->ptid,
3698 ecs->event_thread->suspend.stop_signal);
3699
3700 /* If we either finished a single-step or hit a breakpoint, but
3701 the user wanted this thread to be stopped, pretend we got a
3702 SIG0 (generic unsignaled stop). */
3703
3704 if (ecs->event_thread->stop_requested
3705 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3706 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3707 }
3708
3709 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3710
3711 if (debug_infrun)
3712 {
3713 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3714 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3715 struct cleanup *old_chain = save_inferior_ptid ();
3716
3717 inferior_ptid = ecs->ptid;
3718
3719 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3720 paddress (gdbarch, stop_pc));
3721 if (target_stopped_by_watchpoint ())
3722 {
3723 CORE_ADDR addr;
3724
3725 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3726
3727 if (target_stopped_data_address (&current_target, &addr))
3728 fprintf_unfiltered (gdb_stdlog,
3729 "infrun: stopped data address = %s\n",
3730 paddress (gdbarch, addr));
3731 else
3732 fprintf_unfiltered (gdb_stdlog,
3733 "infrun: (no data address available)\n");
3734 }
3735
3736 do_cleanups (old_chain);
3737 }
3738
3739 if (stepping_past_singlestep_breakpoint)
3740 {
3741 gdb_assert (singlestep_breakpoints_inserted_p);
3742 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3743 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3744
3745 stepping_past_singlestep_breakpoint = 0;
3746
3747 /* We've either finished single-stepping past the single-step
3748 breakpoint, or stopped for some other reason. It would be nice if
3749 we could tell, but we can't reliably. */
3750 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3751 {
3752 if (debug_infrun)
3753 fprintf_unfiltered (gdb_stdlog,
3754 "infrun: stepping_past_"
3755 "singlestep_breakpoint\n");
3756 /* Pull the single step breakpoints out of the target. */
3757 remove_single_step_breakpoints ();
3758 singlestep_breakpoints_inserted_p = 0;
3759
3760 ecs->random_signal = 0;
3761 ecs->event_thread->control.trap_expected = 0;
3762
3763 context_switch (saved_singlestep_ptid);
3764 if (deprecated_context_hook)
3765 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3766
3767 resume (1, TARGET_SIGNAL_0);
3768 prepare_to_wait (ecs);
3769 return;
3770 }
3771 }
3772
3773 if (!ptid_equal (deferred_step_ptid, null_ptid))
3774 {
3775 /* In non-stop mode, there's never a deferred_step_ptid set. */
3776 gdb_assert (!non_stop);
3777
3778 /* If we stopped for some other reason than single-stepping, ignore
3779 the fact that we were supposed to switch back. */
3780 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3781 {
3782 if (debug_infrun)
3783 fprintf_unfiltered (gdb_stdlog,
3784 "infrun: handling deferred step\n");
3785
3786 /* Pull the single step breakpoints out of the target. */
3787 if (singlestep_breakpoints_inserted_p)
3788 {
3789 remove_single_step_breakpoints ();
3790 singlestep_breakpoints_inserted_p = 0;
3791 }
3792
3793 ecs->event_thread->control.trap_expected = 0;
3794
3795 /* Note: We do not call context_switch at this point, as the
3796 context is already set up for stepping the original thread. */
3797 switch_to_thread (deferred_step_ptid);
3798 deferred_step_ptid = null_ptid;
3799 /* Suppress spurious "Switching to ..." message. */
3800 previous_inferior_ptid = inferior_ptid;
3801
3802 resume (1, TARGET_SIGNAL_0);
3803 prepare_to_wait (ecs);
3804 return;
3805 }
3806
3807 deferred_step_ptid = null_ptid;
3808 }
3809
3810 /* See if a thread hit a thread-specific breakpoint that was meant for
3811 another thread. If so, then step that thread past the breakpoint,
3812 and continue it. */
3813
3814 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3815 {
3816 int thread_hop_needed = 0;
3817 struct address_space *aspace =
3818 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3819
3820 /* Check if a regular breakpoint has been hit before checking
3821 for a potential single step breakpoint. Otherwise, GDB will
3822 not see this breakpoint hit when stepping onto breakpoints. */
3823 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3824 {
3825 ecs->random_signal = 0;
3826 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3827 thread_hop_needed = 1;
3828 }
3829 else if (singlestep_breakpoints_inserted_p)
3830 {
3831 /* We have not context switched yet, so this should be true
3832 no matter which thread hit the singlestep breakpoint. */
3833 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3834 if (debug_infrun)
3835 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3836 "trap for %s\n",
3837 target_pid_to_str (ecs->ptid));
3838
3839 ecs->random_signal = 0;
3840 /* The call to in_thread_list is necessary because PTIDs sometimes
3841 change when we go from single-threaded to multi-threaded. If
3842 the singlestep_ptid is still in the list, assume that it is
3843 really different from ecs->ptid. */
3844 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3845 && in_thread_list (singlestep_ptid))
3846 {
3847 /* If the PC of the thread we were trying to single-step
3848 has changed, discard this event (which we were going
3849 to ignore anyway), and pretend we saw that thread
3850 trap. This prevents us continuously moving the
3851 single-step breakpoint forward, one instruction at a
3852 time. If the PC has changed, then the thread we were
3853 trying to single-step has trapped or been signalled,
3854 but the event has not been reported to GDB yet.
3855
3856 There might be some cases where this loses signal
3857 information, if a signal has arrived at exactly the
3858 same time that the PC changed, but this is the best
3859 we can do with the information available. Perhaps we
3860 should arrange to report all events for all threads
3861 when they stop, or to re-poll the remote looking for
3862 this particular thread (i.e. temporarily enable
3863 schedlock). */
3864
3865 CORE_ADDR new_singlestep_pc
3866 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3867
3868 if (new_singlestep_pc != singlestep_pc)
3869 {
3870 enum target_signal stop_signal;
3871
3872 if (debug_infrun)
3873 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3874 " but expected thread advanced also\n");
3875
3876 /* The current context still belongs to
3877 singlestep_ptid. Don't swap here, since that's
3878 the context we want to use. Just fudge our
3879 state and continue. */
3880 stop_signal = ecs->event_thread->suspend.stop_signal;
3881 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3882 ecs->ptid = singlestep_ptid;
3883 ecs->event_thread = find_thread_ptid (ecs->ptid);
3884 ecs->event_thread->suspend.stop_signal = stop_signal;
3885 stop_pc = new_singlestep_pc;
3886 }
3887 else
3888 {
3889 if (debug_infrun)
3890 fprintf_unfiltered (gdb_stdlog,
3891 "infrun: unexpected thread\n");
3892
3893 thread_hop_needed = 1;
3894 stepping_past_singlestep_breakpoint = 1;
3895 saved_singlestep_ptid = singlestep_ptid;
3896 }
3897 }
3898 }
3899
3900 if (thread_hop_needed)
3901 {
3902 struct regcache *thread_regcache;
3903 int remove_status = 0;
3904
3905 if (debug_infrun)
3906 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3907
3908 /* Switch context before touching inferior memory, the
3909 previous thread may have exited. */
3910 if (!ptid_equal (inferior_ptid, ecs->ptid))
3911 context_switch (ecs->ptid);
3912
3913 /* Saw a breakpoint, but it was hit by the wrong thread.
3914 Just continue. */
3915
3916 if (singlestep_breakpoints_inserted_p)
3917 {
3918 /* Pull the single step breakpoints out of the target. */
3919 remove_single_step_breakpoints ();
3920 singlestep_breakpoints_inserted_p = 0;
3921 }
3922
3923 /* If the arch can displace step, don't remove the
3924 breakpoints. */
3925 thread_regcache = get_thread_regcache (ecs->ptid);
3926 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3927 remove_status = remove_breakpoints ();
3928
3929 /* Did we fail to remove breakpoints? If so, try
3930 to set the PC past the bp. (There's at least
3931 one situation in which we can fail to remove
3932 the bp's: On HP-UX's that use ttrace, we can't
3933 change the address space of a vforking child
3934 process until the child exits (well, okay, not
3935 then either :-) or execs. */
3936 if (remove_status != 0)
3937 error (_("Cannot step over breakpoint hit in wrong thread"));
3938 else
3939 { /* Single step */
3940 if (!non_stop)
3941 {
3942 /* Only need to require the next event from this
3943 thread in all-stop mode. */
3944 waiton_ptid = ecs->ptid;
3945 infwait_state = infwait_thread_hop_state;
3946 }
3947
3948 ecs->event_thread->stepping_over_breakpoint = 1;
3949 keep_going (ecs);
3950 return;
3951 }
3952 }
3953 else if (singlestep_breakpoints_inserted_p)
3954 {
3955 ecs->random_signal = 0;
3956 }
3957 }
3958 else
3959 ecs->random_signal = 1;
3960
3961 /* See if something interesting happened to the non-current thread. If
3962 so, then switch to that thread. */
3963 if (!ptid_equal (ecs->ptid, inferior_ptid))
3964 {
3965 if (debug_infrun)
3966 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3967
3968 context_switch (ecs->ptid);
3969
3970 if (deprecated_context_hook)
3971 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3972 }
3973
3974 /* At this point, get hold of the now-current thread's frame. */
3975 frame = get_current_frame ();
3976 gdbarch = get_frame_arch (frame);
3977
3978 if (singlestep_breakpoints_inserted_p)
3979 {
3980 /* Pull the single step breakpoints out of the target. */
3981 remove_single_step_breakpoints ();
3982 singlestep_breakpoints_inserted_p = 0;
3983 }
3984
3985 if (stepped_after_stopped_by_watchpoint)
3986 stopped_by_watchpoint = 0;
3987 else
3988 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3989
3990 /* If necessary, step over this watchpoint. We'll be back to display
3991 it in a moment. */
3992 if (stopped_by_watchpoint
3993 && (target_have_steppable_watchpoint
3994 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3995 {
3996 /* At this point, we are stopped at an instruction which has
3997 attempted to write to a piece of memory under control of
3998 a watchpoint. The instruction hasn't actually executed
3999 yet. If we were to evaluate the watchpoint expression
4000 now, we would get the old value, and therefore no change
4001 would seem to have occurred.
4002
4003 In order to make watchpoints work `right', we really need
4004 to complete the memory write, and then evaluate the
4005 watchpoint expression. We do this by single-stepping the
4006 target.
4007
4008 It may not be necessary to disable the watchpoint to stop over
4009 it. For example, the PA can (with some kernel cooperation)
4010 single step over a watchpoint without disabling the watchpoint.
4011
4012 It is far more common to need to disable a watchpoint to step
4013 the inferior over it. If we have non-steppable watchpoints,
4014 we must disable the current watchpoint; it's simplest to
4015 disable all watchpoints and breakpoints. */
4016 int hw_step = 1;
4017
4018 if (!target_have_steppable_watchpoint)
4019 {
4020 remove_breakpoints ();
4021 /* See comment in resume why we need to stop bypassing signals
4022 while breakpoints have been removed. */
4023 target_pass_signals (0, NULL);
4024 }
4025 /* Single step */
4026 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4027 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
4028 waiton_ptid = ecs->ptid;
4029 if (target_have_steppable_watchpoint)
4030 infwait_state = infwait_step_watch_state;
4031 else
4032 infwait_state = infwait_nonstep_watch_state;
4033 prepare_to_wait (ecs);
4034 return;
4035 }
4036
4037 clear_stop_func (ecs);
4038 ecs->event_thread->stepping_over_breakpoint = 0;
4039 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4040 ecs->event_thread->control.stop_step = 0;
4041 stop_print_frame = 1;
4042 ecs->random_signal = 0;
4043 stopped_by_random_signal = 0;
4044
4045 /* Hide inlined functions starting here, unless we just performed stepi or
4046 nexti. After stepi and nexti, always show the innermost frame (not any
4047 inline function call sites). */
4048 if (ecs->event_thread->control.step_range_end != 1)
4049 {
4050 struct address_space *aspace =
4051 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4052
4053 /* skip_inline_frames is expensive, so we avoid it if we can
4054 determine that the address is one where functions cannot have
4055 been inlined. This improves performance with inferiors that
4056 load a lot of shared libraries, because the solib event
4057 breakpoint is defined as the address of a function (i.e. not
4058 inline). Note that we have to check the previous PC as well
4059 as the current one to catch cases when we have just
4060 single-stepped off a breakpoint prior to reinstating it.
4061 Note that we're assuming that the code we single-step to is
4062 not inline, but that's not definitive: there's nothing
4063 preventing the event breakpoint function from containing
4064 inlined code, and the single-step ending up there. If the
4065 user had set a breakpoint on that inlined code, the missing
4066 skip_inline_frames call would break things. Fortunately
4067 that's an extremely unlikely scenario. */
4068 if (!pc_at_non_inline_function (aspace, stop_pc)
4069 && !(ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4070 && ecs->event_thread->control.trap_expected
4071 && pc_at_non_inline_function (aspace,
4072 ecs->event_thread->prev_pc)))
4073 skip_inline_frames (ecs->ptid);
4074 }
4075
4076 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4077 && ecs->event_thread->control.trap_expected
4078 && gdbarch_single_step_through_delay_p (gdbarch)
4079 && currently_stepping (ecs->event_thread))
4080 {
4081 /* We're trying to step off a breakpoint. Turns out that we're
4082 also on an instruction that needs to be stepped multiple
4083 times before it's been fully executing. E.g., architectures
4084 with a delay slot. It needs to be stepped twice, once for
4085 the instruction and once for the delay slot. */
4086 int step_through_delay
4087 = gdbarch_single_step_through_delay (gdbarch, frame);
4088
4089 if (debug_infrun && step_through_delay)
4090 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4091 if (ecs->event_thread->control.step_range_end == 0
4092 && step_through_delay)
4093 {
4094 /* The user issued a continue when stopped at a breakpoint.
4095 Set up for another trap and get out of here. */
4096 ecs->event_thread->stepping_over_breakpoint = 1;
4097 keep_going (ecs);
4098 return;
4099 }
4100 else if (step_through_delay)
4101 {
4102 /* The user issued a step when stopped at a breakpoint.
4103 Maybe we should stop, maybe we should not - the delay
4104 slot *might* correspond to a line of source. In any
4105 case, don't decide that here, just set
4106 ecs->stepping_over_breakpoint, making sure we
4107 single-step again before breakpoints are re-inserted. */
4108 ecs->event_thread->stepping_over_breakpoint = 1;
4109 }
4110 }
4111
4112 /* Look at the cause of the stop, and decide what to do.
4113 The alternatives are:
4114 1) stop_stepping and return; to really stop and return to the debugger,
4115 2) keep_going and return to start up again
4116 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4117 3) set ecs->random_signal to 1, and the decision between 1 and 2
4118 will be made according to the signal handling tables. */
4119
4120 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4121 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4122 || stop_soon == STOP_QUIETLY_REMOTE)
4123 {
4124 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4125 && stop_after_trap)
4126 {
4127 if (debug_infrun)
4128 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4129 stop_print_frame = 0;
4130 stop_stepping (ecs);
4131 return;
4132 }
4133
4134 /* This is originated from start_remote(), start_inferior() and
4135 shared libraries hook functions. */
4136 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4137 {
4138 if (debug_infrun)
4139 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4140 stop_stepping (ecs);
4141 return;
4142 }
4143
4144 /* This originates from attach_command(). We need to overwrite
4145 the stop_signal here, because some kernels don't ignore a
4146 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4147 See more comments in inferior.h. On the other hand, if we
4148 get a non-SIGSTOP, report it to the user - assume the backend
4149 will handle the SIGSTOP if it should show up later.
4150
4151 Also consider that the attach is complete when we see a
4152 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4153 target extended-remote report it instead of a SIGSTOP
4154 (e.g. gdbserver). We already rely on SIGTRAP being our
4155 signal, so this is no exception.
4156
4157 Also consider that the attach is complete when we see a
4158 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4159 the target to stop all threads of the inferior, in case the
4160 low level attach operation doesn't stop them implicitly. If
4161 they weren't stopped implicitly, then the stub will report a
4162 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4163 other than GDB's request. */
4164 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4165 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4166 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4167 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4168 {
4169 stop_stepping (ecs);
4170 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4171 return;
4172 }
4173
4174 /* See if there is a breakpoint at the current PC. */
4175 ecs->event_thread->control.stop_bpstat
4176 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4177 stop_pc, ecs->ptid);
4178
4179 /* Following in case break condition called a
4180 function. */
4181 stop_print_frame = 1;
4182
4183 /* This is where we handle "moribund" watchpoints. Unlike
4184 software breakpoints traps, hardware watchpoint traps are
4185 always distinguishable from random traps. If no high-level
4186 watchpoint is associated with the reported stop data address
4187 anymore, then the bpstat does not explain the signal ---
4188 simply make sure to ignore it if `stopped_by_watchpoint' is
4189 set. */
4190
4191 if (debug_infrun
4192 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4193 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4194 && stopped_by_watchpoint)
4195 fprintf_unfiltered (gdb_stdlog,
4196 "infrun: no user watchpoint explains "
4197 "watchpoint SIGTRAP, ignoring\n");
4198
4199 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4200 at one stage in the past included checks for an inferior
4201 function call's call dummy's return breakpoint. The original
4202 comment, that went with the test, read:
4203
4204 ``End of a stack dummy. Some systems (e.g. Sony news) give
4205 another signal besides SIGTRAP, so check here as well as
4206 above.''
4207
4208 If someone ever tries to get call dummys on a
4209 non-executable stack to work (where the target would stop
4210 with something like a SIGSEGV), then those tests might need
4211 to be re-instated. Given, however, that the tests were only
4212 enabled when momentary breakpoints were not being used, I
4213 suspect that it won't be the case.
4214
4215 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4216 be necessary for call dummies on a non-executable stack on
4217 SPARC. */
4218
4219 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4220 ecs->random_signal
4221 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4222 || stopped_by_watchpoint
4223 || ecs->event_thread->control.trap_expected
4224 || (ecs->event_thread->control.step_range_end
4225 && (ecs->event_thread->control.step_resume_breakpoint
4226 == NULL)));
4227 else
4228 {
4229 ecs->random_signal = !bpstat_explains_signal
4230 (ecs->event_thread->control.stop_bpstat);
4231 if (!ecs->random_signal)
4232 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4233 }
4234 }
4235
4236 /* When we reach this point, we've pretty much decided
4237 that the reason for stopping must've been a random
4238 (unexpected) signal. */
4239
4240 else
4241 ecs->random_signal = 1;
4242
4243 process_event_stop_test:
4244
4245 /* Re-fetch current thread's frame in case we did a
4246 "goto process_event_stop_test" above. */
4247 frame = get_current_frame ();
4248 gdbarch = get_frame_arch (frame);
4249
4250 /* For the program's own signals, act according to
4251 the signal handling tables. */
4252
4253 if (ecs->random_signal)
4254 {
4255 /* Signal not for debugging purposes. */
4256 int printed = 0;
4257 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4258
4259 if (debug_infrun)
4260 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4261 ecs->event_thread->suspend.stop_signal);
4262
4263 stopped_by_random_signal = 1;
4264
4265 if (signal_print[ecs->event_thread->suspend.stop_signal])
4266 {
4267 printed = 1;
4268 target_terminal_ours_for_output ();
4269 print_signal_received_reason
4270 (ecs->event_thread->suspend.stop_signal);
4271 }
4272 /* Always stop on signals if we're either just gaining control
4273 of the program, or the user explicitly requested this thread
4274 to remain stopped. */
4275 if (stop_soon != NO_STOP_QUIETLY
4276 || ecs->event_thread->stop_requested
4277 || (!inf->detaching
4278 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4279 {
4280 stop_stepping (ecs);
4281 return;
4282 }
4283 /* If not going to stop, give terminal back
4284 if we took it away. */
4285 else if (printed)
4286 target_terminal_inferior ();
4287
4288 /* Clear the signal if it should not be passed. */
4289 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4290 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4291
4292 if (ecs->event_thread->prev_pc == stop_pc
4293 && ecs->event_thread->control.trap_expected
4294 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4295 {
4296 /* We were just starting a new sequence, attempting to
4297 single-step off of a breakpoint and expecting a SIGTRAP.
4298 Instead this signal arrives. This signal will take us out
4299 of the stepping range so GDB needs to remember to, when
4300 the signal handler returns, resume stepping off that
4301 breakpoint. */
4302 /* To simplify things, "continue" is forced to use the same
4303 code paths as single-step - set a breakpoint at the
4304 signal return address and then, once hit, step off that
4305 breakpoint. */
4306 if (debug_infrun)
4307 fprintf_unfiltered (gdb_stdlog,
4308 "infrun: signal arrived while stepping over "
4309 "breakpoint\n");
4310
4311 insert_hp_step_resume_breakpoint_at_frame (frame);
4312 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4313 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4314 ecs->event_thread->control.trap_expected = 0;
4315 keep_going (ecs);
4316 return;
4317 }
4318
4319 if (ecs->event_thread->control.step_range_end != 0
4320 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4321 && (ecs->event_thread->control.step_range_start <= stop_pc
4322 && stop_pc < ecs->event_thread->control.step_range_end)
4323 && frame_id_eq (get_stack_frame_id (frame),
4324 ecs->event_thread->control.step_stack_frame_id)
4325 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4326 {
4327 /* The inferior is about to take a signal that will take it
4328 out of the single step range. Set a breakpoint at the
4329 current PC (which is presumably where the signal handler
4330 will eventually return) and then allow the inferior to
4331 run free.
4332
4333 Note that this is only needed for a signal delivered
4334 while in the single-step range. Nested signals aren't a
4335 problem as they eventually all return. */
4336 if (debug_infrun)
4337 fprintf_unfiltered (gdb_stdlog,
4338 "infrun: signal may take us out of "
4339 "single-step range\n");
4340
4341 insert_hp_step_resume_breakpoint_at_frame (frame);
4342 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4343 ecs->event_thread->control.trap_expected = 0;
4344 keep_going (ecs);
4345 return;
4346 }
4347
4348 /* Note: step_resume_breakpoint may be non-NULL. This occures
4349 when either there's a nested signal, or when there's a
4350 pending signal enabled just as the signal handler returns
4351 (leaving the inferior at the step-resume-breakpoint without
4352 actually executing it). Either way continue until the
4353 breakpoint is really hit. */
4354 keep_going (ecs);
4355 return;
4356 }
4357
4358 /* Handle cases caused by hitting a breakpoint. */
4359 {
4360 CORE_ADDR jmp_buf_pc;
4361 struct bpstat_what what;
4362
4363 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4364
4365 if (what.call_dummy)
4366 {
4367 stop_stack_dummy = what.call_dummy;
4368 }
4369
4370 /* If we hit an internal event that triggers symbol changes, the
4371 current frame will be invalidated within bpstat_what (e.g., if
4372 we hit an internal solib event). Re-fetch it. */
4373 frame = get_current_frame ();
4374 gdbarch = get_frame_arch (frame);
4375
4376 switch (what.main_action)
4377 {
4378 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4379 /* If we hit the breakpoint at longjmp while stepping, we
4380 install a momentary breakpoint at the target of the
4381 jmp_buf. */
4382
4383 if (debug_infrun)
4384 fprintf_unfiltered (gdb_stdlog,
4385 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4386
4387 ecs->event_thread->stepping_over_breakpoint = 1;
4388
4389 if (what.is_longjmp)
4390 {
4391 if (!gdbarch_get_longjmp_target_p (gdbarch)
4392 || !gdbarch_get_longjmp_target (gdbarch,
4393 frame, &jmp_buf_pc))
4394 {
4395 if (debug_infrun)
4396 fprintf_unfiltered (gdb_stdlog,
4397 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4398 "(!gdbarch_get_longjmp_target)\n");
4399 keep_going (ecs);
4400 return;
4401 }
4402
4403 /* We're going to replace the current step-resume breakpoint
4404 with a longjmp-resume breakpoint. */
4405 delete_step_resume_breakpoint (ecs->event_thread);
4406
4407 /* Insert a breakpoint at resume address. */
4408 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4409 }
4410 else
4411 {
4412 struct symbol *func = get_frame_function (frame);
4413
4414 if (func)
4415 check_exception_resume (ecs, frame, func);
4416 }
4417 keep_going (ecs);
4418 return;
4419
4420 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4421 if (debug_infrun)
4422 fprintf_unfiltered (gdb_stdlog,
4423 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4424
4425 if (what.is_longjmp)
4426 {
4427 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4428 != NULL);
4429 delete_step_resume_breakpoint (ecs->event_thread);
4430 }
4431 else
4432 {
4433 /* There are several cases to consider.
4434
4435 1. The initiating frame no longer exists. In this case
4436 we must stop, because the exception has gone too far.
4437
4438 2. The initiating frame exists, and is the same as the
4439 current frame. We stop, because the exception has been
4440 caught.
4441
4442 3. The initiating frame exists and is different from
4443 the current frame. This means the exception has been
4444 caught beneath the initiating frame, so keep going. */
4445 struct frame_info *init_frame
4446 = frame_find_by_id (ecs->event_thread->initiating_frame);
4447
4448 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4449 != NULL);
4450 delete_exception_resume_breakpoint (ecs->event_thread);
4451
4452 if (init_frame)
4453 {
4454 struct frame_id current_id
4455 = get_frame_id (get_current_frame ());
4456 if (frame_id_eq (current_id,
4457 ecs->event_thread->initiating_frame))
4458 {
4459 /* Case 2. Fall through. */
4460 }
4461 else
4462 {
4463 /* Case 3. */
4464 keep_going (ecs);
4465 return;
4466 }
4467 }
4468
4469 /* For Cases 1 and 2, remove the step-resume breakpoint,
4470 if it exists. */
4471 delete_step_resume_breakpoint (ecs->event_thread);
4472 }
4473
4474 ecs->event_thread->control.stop_step = 1;
4475 print_end_stepping_range_reason ();
4476 stop_stepping (ecs);
4477 return;
4478
4479 case BPSTAT_WHAT_SINGLE:
4480 if (debug_infrun)
4481 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4482 ecs->event_thread->stepping_over_breakpoint = 1;
4483 /* Still need to check other stuff, at least the case
4484 where we are stepping and step out of the right range. */
4485 break;
4486
4487 case BPSTAT_WHAT_STEP_RESUME:
4488 if (debug_infrun)
4489 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4490
4491 delete_step_resume_breakpoint (ecs->event_thread);
4492 if (ecs->event_thread->control.proceed_to_finish
4493 && execution_direction == EXEC_REVERSE)
4494 {
4495 struct thread_info *tp = ecs->event_thread;
4496
4497 /* We are finishing a function in reverse, and just hit
4498 the step-resume breakpoint at the start address of the
4499 function, and we're almost there -- just need to back
4500 up by one more single-step, which should take us back
4501 to the function call. */
4502 tp->control.step_range_start = tp->control.step_range_end = 1;
4503 keep_going (ecs);
4504 return;
4505 }
4506 fill_in_stop_func (gdbarch, ecs);
4507 if (stop_pc == ecs->stop_func_start
4508 && execution_direction == EXEC_REVERSE)
4509 {
4510 /* We are stepping over a function call in reverse, and
4511 just hit the step-resume breakpoint at the start
4512 address of the function. Go back to single-stepping,
4513 which should take us back to the function call. */
4514 ecs->event_thread->stepping_over_breakpoint = 1;
4515 keep_going (ecs);
4516 return;
4517 }
4518 break;
4519
4520 case BPSTAT_WHAT_STOP_NOISY:
4521 if (debug_infrun)
4522 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4523 stop_print_frame = 1;
4524
4525 /* We are about to nuke the step_resume_breakpointt via the
4526 cleanup chain, so no need to worry about it here. */
4527
4528 stop_stepping (ecs);
4529 return;
4530
4531 case BPSTAT_WHAT_STOP_SILENT:
4532 if (debug_infrun)
4533 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4534 stop_print_frame = 0;
4535
4536 /* We are about to nuke the step_resume_breakpoin via the
4537 cleanup chain, so no need to worry about it here. */
4538
4539 stop_stepping (ecs);
4540 return;
4541
4542 case BPSTAT_WHAT_HP_STEP_RESUME:
4543 if (debug_infrun)
4544 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4545
4546 delete_step_resume_breakpoint (ecs->event_thread);
4547 if (ecs->event_thread->step_after_step_resume_breakpoint)
4548 {
4549 /* Back when the step-resume breakpoint was inserted, we
4550 were trying to single-step off a breakpoint. Go back
4551 to doing that. */
4552 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4553 ecs->event_thread->stepping_over_breakpoint = 1;
4554 keep_going (ecs);
4555 return;
4556 }
4557 break;
4558
4559 case BPSTAT_WHAT_KEEP_CHECKING:
4560 break;
4561 }
4562 }
4563
4564 /* We come here if we hit a breakpoint but should not
4565 stop for it. Possibly we also were stepping
4566 and should stop for that. So fall through and
4567 test for stepping. But, if not stepping,
4568 do not stop. */
4569
4570 /* In all-stop mode, if we're currently stepping but have stopped in
4571 some other thread, we need to switch back to the stepped thread. */
4572 if (!non_stop)
4573 {
4574 struct thread_info *tp;
4575
4576 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4577 ecs->event_thread);
4578 if (tp)
4579 {
4580 /* However, if the current thread is blocked on some internal
4581 breakpoint, and we simply need to step over that breakpoint
4582 to get it going again, do that first. */
4583 if ((ecs->event_thread->control.trap_expected
4584 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4585 || ecs->event_thread->stepping_over_breakpoint)
4586 {
4587 keep_going (ecs);
4588 return;
4589 }
4590
4591 /* If the stepping thread exited, then don't try to switch
4592 back and resume it, which could fail in several different
4593 ways depending on the target. Instead, just keep going.
4594
4595 We can find a stepping dead thread in the thread list in
4596 two cases:
4597
4598 - The target supports thread exit events, and when the
4599 target tries to delete the thread from the thread list,
4600 inferior_ptid pointed at the exiting thread. In such
4601 case, calling delete_thread does not really remove the
4602 thread from the list; instead, the thread is left listed,
4603 with 'exited' state.
4604
4605 - The target's debug interface does not support thread
4606 exit events, and so we have no idea whatsoever if the
4607 previously stepping thread is still alive. For that
4608 reason, we need to synchronously query the target
4609 now. */
4610 if (is_exited (tp->ptid)
4611 || !target_thread_alive (tp->ptid))
4612 {
4613 if (debug_infrun)
4614 fprintf_unfiltered (gdb_stdlog,
4615 "infrun: not switching back to "
4616 "stepped thread, it has vanished\n");
4617
4618 delete_thread (tp->ptid);
4619 keep_going (ecs);
4620 return;
4621 }
4622
4623 /* Otherwise, we no longer expect a trap in the current thread.
4624 Clear the trap_expected flag before switching back -- this is
4625 what keep_going would do as well, if we called it. */
4626 ecs->event_thread->control.trap_expected = 0;
4627
4628 if (debug_infrun)
4629 fprintf_unfiltered (gdb_stdlog,
4630 "infrun: switching back to stepped thread\n");
4631
4632 ecs->event_thread = tp;
4633 ecs->ptid = tp->ptid;
4634 context_switch (ecs->ptid);
4635 keep_going (ecs);
4636 return;
4637 }
4638 }
4639
4640 if (ecs->event_thread->control.step_resume_breakpoint)
4641 {
4642 if (debug_infrun)
4643 fprintf_unfiltered (gdb_stdlog,
4644 "infrun: step-resume breakpoint is inserted\n");
4645
4646 /* Having a step-resume breakpoint overrides anything
4647 else having to do with stepping commands until
4648 that breakpoint is reached. */
4649 keep_going (ecs);
4650 return;
4651 }
4652
4653 if (ecs->event_thread->control.step_range_end == 0)
4654 {
4655 if (debug_infrun)
4656 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4657 /* Likewise if we aren't even stepping. */
4658 keep_going (ecs);
4659 return;
4660 }
4661
4662 /* Re-fetch current thread's frame in case the code above caused
4663 the frame cache to be re-initialized, making our FRAME variable
4664 a dangling pointer. */
4665 frame = get_current_frame ();
4666 gdbarch = get_frame_arch (frame);
4667 fill_in_stop_func (gdbarch, ecs);
4668
4669 /* If stepping through a line, keep going if still within it.
4670
4671 Note that step_range_end is the address of the first instruction
4672 beyond the step range, and NOT the address of the last instruction
4673 within it!
4674
4675 Note also that during reverse execution, we may be stepping
4676 through a function epilogue and therefore must detect when
4677 the current-frame changes in the middle of a line. */
4678
4679 if (stop_pc >= ecs->event_thread->control.step_range_start
4680 && stop_pc < ecs->event_thread->control.step_range_end
4681 && (execution_direction != EXEC_REVERSE
4682 || frame_id_eq (get_frame_id (frame),
4683 ecs->event_thread->control.step_frame_id)))
4684 {
4685 if (debug_infrun)
4686 fprintf_unfiltered
4687 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4688 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4689 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4690
4691 /* When stepping backward, stop at beginning of line range
4692 (unless it's the function entry point, in which case
4693 keep going back to the call point). */
4694 if (stop_pc == ecs->event_thread->control.step_range_start
4695 && stop_pc != ecs->stop_func_start
4696 && execution_direction == EXEC_REVERSE)
4697 {
4698 ecs->event_thread->control.stop_step = 1;
4699 print_end_stepping_range_reason ();
4700 stop_stepping (ecs);
4701 }
4702 else
4703 keep_going (ecs);
4704
4705 return;
4706 }
4707
4708 /* We stepped out of the stepping range. */
4709
4710 /* If we are stepping at the source level and entered the runtime
4711 loader dynamic symbol resolution code...
4712
4713 EXEC_FORWARD: we keep on single stepping until we exit the run
4714 time loader code and reach the callee's address.
4715
4716 EXEC_REVERSE: we've already executed the callee (backward), and
4717 the runtime loader code is handled just like any other
4718 undebuggable function call. Now we need only keep stepping
4719 backward through the trampoline code, and that's handled further
4720 down, so there is nothing for us to do here. */
4721
4722 if (execution_direction != EXEC_REVERSE
4723 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4724 && in_solib_dynsym_resolve_code (stop_pc))
4725 {
4726 CORE_ADDR pc_after_resolver =
4727 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4728
4729 if (debug_infrun)
4730 fprintf_unfiltered (gdb_stdlog,
4731 "infrun: stepped into dynsym resolve code\n");
4732
4733 if (pc_after_resolver)
4734 {
4735 /* Set up a step-resume breakpoint at the address
4736 indicated by SKIP_SOLIB_RESOLVER. */
4737 struct symtab_and_line sr_sal;
4738
4739 init_sal (&sr_sal);
4740 sr_sal.pc = pc_after_resolver;
4741 sr_sal.pspace = get_frame_program_space (frame);
4742
4743 insert_step_resume_breakpoint_at_sal (gdbarch,
4744 sr_sal, null_frame_id);
4745 }
4746
4747 keep_going (ecs);
4748 return;
4749 }
4750
4751 if (ecs->event_thread->control.step_range_end != 1
4752 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4753 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4754 && get_frame_type (frame) == SIGTRAMP_FRAME)
4755 {
4756 if (debug_infrun)
4757 fprintf_unfiltered (gdb_stdlog,
4758 "infrun: stepped into signal trampoline\n");
4759 /* The inferior, while doing a "step" or "next", has ended up in
4760 a signal trampoline (either by a signal being delivered or by
4761 the signal handler returning). Just single-step until the
4762 inferior leaves the trampoline (either by calling the handler
4763 or returning). */
4764 keep_going (ecs);
4765 return;
4766 }
4767
4768 /* Check for subroutine calls. The check for the current frame
4769 equalling the step ID is not necessary - the check of the
4770 previous frame's ID is sufficient - but it is a common case and
4771 cheaper than checking the previous frame's ID.
4772
4773 NOTE: frame_id_eq will never report two invalid frame IDs as
4774 being equal, so to get into this block, both the current and
4775 previous frame must have valid frame IDs. */
4776 /* The outer_frame_id check is a heuristic to detect stepping
4777 through startup code. If we step over an instruction which
4778 sets the stack pointer from an invalid value to a valid value,
4779 we may detect that as a subroutine call from the mythical
4780 "outermost" function. This could be fixed by marking
4781 outermost frames as !stack_p,code_p,special_p. Then the
4782 initial outermost frame, before sp was valid, would
4783 have code_addr == &_start. See the comment in frame_id_eq
4784 for more. */
4785 if (!frame_id_eq (get_stack_frame_id (frame),
4786 ecs->event_thread->control.step_stack_frame_id)
4787 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4788 ecs->event_thread->control.step_stack_frame_id)
4789 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4790 outer_frame_id)
4791 || step_start_function != find_pc_function (stop_pc))))
4792 {
4793 CORE_ADDR real_stop_pc;
4794
4795 if (debug_infrun)
4796 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4797
4798 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4799 || ((ecs->event_thread->control.step_range_end == 1)
4800 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4801 ecs->stop_func_start)))
4802 {
4803 /* I presume that step_over_calls is only 0 when we're
4804 supposed to be stepping at the assembly language level
4805 ("stepi"). Just stop. */
4806 /* Also, maybe we just did a "nexti" inside a prolog, so we
4807 thought it was a subroutine call but it was not. Stop as
4808 well. FENN */
4809 /* And this works the same backward as frontward. MVS */
4810 ecs->event_thread->control.stop_step = 1;
4811 print_end_stepping_range_reason ();
4812 stop_stepping (ecs);
4813 return;
4814 }
4815
4816 /* Reverse stepping through solib trampolines. */
4817
4818 if (execution_direction == EXEC_REVERSE
4819 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4820 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4821 || (ecs->stop_func_start == 0
4822 && in_solib_dynsym_resolve_code (stop_pc))))
4823 {
4824 /* Any solib trampoline code can be handled in reverse
4825 by simply continuing to single-step. We have already
4826 executed the solib function (backwards), and a few
4827 steps will take us back through the trampoline to the
4828 caller. */
4829 keep_going (ecs);
4830 return;
4831 }
4832
4833 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4834 {
4835 /* We're doing a "next".
4836
4837 Normal (forward) execution: set a breakpoint at the
4838 callee's return address (the address at which the caller
4839 will resume).
4840
4841 Reverse (backward) execution. set the step-resume
4842 breakpoint at the start of the function that we just
4843 stepped into (backwards), and continue to there. When we
4844 get there, we'll need to single-step back to the caller. */
4845
4846 if (execution_direction == EXEC_REVERSE)
4847 {
4848 struct symtab_and_line sr_sal;
4849
4850 /* Normal function call return (static or dynamic). */
4851 init_sal (&sr_sal);
4852 sr_sal.pc = ecs->stop_func_start;
4853 sr_sal.pspace = get_frame_program_space (frame);
4854 insert_step_resume_breakpoint_at_sal (gdbarch,
4855 sr_sal, null_frame_id);
4856 }
4857 else
4858 insert_step_resume_breakpoint_at_caller (frame);
4859
4860 keep_going (ecs);
4861 return;
4862 }
4863
4864 /* If we are in a function call trampoline (a stub between the
4865 calling routine and the real function), locate the real
4866 function. That's what tells us (a) whether we want to step
4867 into it at all, and (b) what prologue we want to run to the
4868 end of, if we do step into it. */
4869 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4870 if (real_stop_pc == 0)
4871 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4872 if (real_stop_pc != 0)
4873 ecs->stop_func_start = real_stop_pc;
4874
4875 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4876 {
4877 struct symtab_and_line sr_sal;
4878
4879 init_sal (&sr_sal);
4880 sr_sal.pc = ecs->stop_func_start;
4881 sr_sal.pspace = get_frame_program_space (frame);
4882
4883 insert_step_resume_breakpoint_at_sal (gdbarch,
4884 sr_sal, null_frame_id);
4885 keep_going (ecs);
4886 return;
4887 }
4888
4889 /* If we have line number information for the function we are
4890 thinking of stepping into, step into it.
4891
4892 If there are several symtabs at that PC (e.g. with include
4893 files), just want to know whether *any* of them have line
4894 numbers. find_pc_line handles this. */
4895 {
4896 struct symtab_and_line tmp_sal;
4897
4898 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4899 if (tmp_sal.line != 0)
4900 {
4901 if (execution_direction == EXEC_REVERSE)
4902 handle_step_into_function_backward (gdbarch, ecs);
4903 else
4904 handle_step_into_function (gdbarch, ecs);
4905 return;
4906 }
4907 }
4908
4909 /* If we have no line number and the step-stop-if-no-debug is
4910 set, we stop the step so that the user has a chance to switch
4911 in assembly mode. */
4912 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4913 && step_stop_if_no_debug)
4914 {
4915 ecs->event_thread->control.stop_step = 1;
4916 print_end_stepping_range_reason ();
4917 stop_stepping (ecs);
4918 return;
4919 }
4920
4921 if (execution_direction == EXEC_REVERSE)
4922 {
4923 /* Set a breakpoint at callee's start address.
4924 From there we can step once and be back in the caller. */
4925 struct symtab_and_line sr_sal;
4926
4927 init_sal (&sr_sal);
4928 sr_sal.pc = ecs->stop_func_start;
4929 sr_sal.pspace = get_frame_program_space (frame);
4930 insert_step_resume_breakpoint_at_sal (gdbarch,
4931 sr_sal, null_frame_id);
4932 }
4933 else
4934 /* Set a breakpoint at callee's return address (the address
4935 at which the caller will resume). */
4936 insert_step_resume_breakpoint_at_caller (frame);
4937
4938 keep_going (ecs);
4939 return;
4940 }
4941
4942 /* Reverse stepping through solib trampolines. */
4943
4944 if (execution_direction == EXEC_REVERSE
4945 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4946 {
4947 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4948 || (ecs->stop_func_start == 0
4949 && in_solib_dynsym_resolve_code (stop_pc)))
4950 {
4951 /* Any solib trampoline code can be handled in reverse
4952 by simply continuing to single-step. We have already
4953 executed the solib function (backwards), and a few
4954 steps will take us back through the trampoline to the
4955 caller. */
4956 keep_going (ecs);
4957 return;
4958 }
4959 else if (in_solib_dynsym_resolve_code (stop_pc))
4960 {
4961 /* Stepped backward into the solib dynsym resolver.
4962 Set a breakpoint at its start and continue, then
4963 one more step will take us out. */
4964 struct symtab_and_line sr_sal;
4965
4966 init_sal (&sr_sal);
4967 sr_sal.pc = ecs->stop_func_start;
4968 sr_sal.pspace = get_frame_program_space (frame);
4969 insert_step_resume_breakpoint_at_sal (gdbarch,
4970 sr_sal, null_frame_id);
4971 keep_going (ecs);
4972 return;
4973 }
4974 }
4975
4976 /* If we're in the return path from a shared library trampoline,
4977 we want to proceed through the trampoline when stepping. */
4978 if (gdbarch_in_solib_return_trampoline (gdbarch,
4979 stop_pc, ecs->stop_func_name))
4980 {
4981 /* Determine where this trampoline returns. */
4982 CORE_ADDR real_stop_pc;
4983
4984 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4985
4986 if (debug_infrun)
4987 fprintf_unfiltered (gdb_stdlog,
4988 "infrun: stepped into solib return tramp\n");
4989
4990 /* Only proceed through if we know where it's going. */
4991 if (real_stop_pc)
4992 {
4993 /* And put the step-breakpoint there and go until there. */
4994 struct symtab_and_line sr_sal;
4995
4996 init_sal (&sr_sal); /* initialize to zeroes */
4997 sr_sal.pc = real_stop_pc;
4998 sr_sal.section = find_pc_overlay (sr_sal.pc);
4999 sr_sal.pspace = get_frame_program_space (frame);
5000
5001 /* Do not specify what the fp should be when we stop since
5002 on some machines the prologue is where the new fp value
5003 is established. */
5004 insert_step_resume_breakpoint_at_sal (gdbarch,
5005 sr_sal, null_frame_id);
5006
5007 /* Restart without fiddling with the step ranges or
5008 other state. */
5009 keep_going (ecs);
5010 return;
5011 }
5012 }
5013
5014 stop_pc_sal = find_pc_line (stop_pc, 0);
5015
5016 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5017 the trampoline processing logic, however, there are some trampolines
5018 that have no names, so we should do trampoline handling first. */
5019 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5020 && ecs->stop_func_name == NULL
5021 && stop_pc_sal.line == 0)
5022 {
5023 if (debug_infrun)
5024 fprintf_unfiltered (gdb_stdlog,
5025 "infrun: stepped into undebuggable function\n");
5026
5027 /* The inferior just stepped into, or returned to, an
5028 undebuggable function (where there is no debugging information
5029 and no line number corresponding to the address where the
5030 inferior stopped). Since we want to skip this kind of code,
5031 we keep going until the inferior returns from this
5032 function - unless the user has asked us not to (via
5033 set step-mode) or we no longer know how to get back
5034 to the call site. */
5035 if (step_stop_if_no_debug
5036 || !frame_id_p (frame_unwind_caller_id (frame)))
5037 {
5038 /* If we have no line number and the step-stop-if-no-debug
5039 is set, we stop the step so that the user has a chance to
5040 switch in assembly mode. */
5041 ecs->event_thread->control.stop_step = 1;
5042 print_end_stepping_range_reason ();
5043 stop_stepping (ecs);
5044 return;
5045 }
5046 else
5047 {
5048 /* Set a breakpoint at callee's return address (the address
5049 at which the caller will resume). */
5050 insert_step_resume_breakpoint_at_caller (frame);
5051 keep_going (ecs);
5052 return;
5053 }
5054 }
5055
5056 if (ecs->event_thread->control.step_range_end == 1)
5057 {
5058 /* It is stepi or nexti. We always want to stop stepping after
5059 one instruction. */
5060 if (debug_infrun)
5061 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5062 ecs->event_thread->control.stop_step = 1;
5063 print_end_stepping_range_reason ();
5064 stop_stepping (ecs);
5065 return;
5066 }
5067
5068 if (stop_pc_sal.line == 0)
5069 {
5070 /* We have no line number information. That means to stop
5071 stepping (does this always happen right after one instruction,
5072 when we do "s" in a function with no line numbers,
5073 or can this happen as a result of a return or longjmp?). */
5074 if (debug_infrun)
5075 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5076 ecs->event_thread->control.stop_step = 1;
5077 print_end_stepping_range_reason ();
5078 stop_stepping (ecs);
5079 return;
5080 }
5081
5082 /* Look for "calls" to inlined functions, part one. If the inline
5083 frame machinery detected some skipped call sites, we have entered
5084 a new inline function. */
5085
5086 if (frame_id_eq (get_frame_id (get_current_frame ()),
5087 ecs->event_thread->control.step_frame_id)
5088 && inline_skipped_frames (ecs->ptid))
5089 {
5090 struct symtab_and_line call_sal;
5091
5092 if (debug_infrun)
5093 fprintf_unfiltered (gdb_stdlog,
5094 "infrun: stepped into inlined function\n");
5095
5096 find_frame_sal (get_current_frame (), &call_sal);
5097
5098 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5099 {
5100 /* For "step", we're going to stop. But if the call site
5101 for this inlined function is on the same source line as
5102 we were previously stepping, go down into the function
5103 first. Otherwise stop at the call site. */
5104
5105 if (call_sal.line == ecs->event_thread->current_line
5106 && call_sal.symtab == ecs->event_thread->current_symtab)
5107 step_into_inline_frame (ecs->ptid);
5108
5109 ecs->event_thread->control.stop_step = 1;
5110 print_end_stepping_range_reason ();
5111 stop_stepping (ecs);
5112 return;
5113 }
5114 else
5115 {
5116 /* For "next", we should stop at the call site if it is on a
5117 different source line. Otherwise continue through the
5118 inlined function. */
5119 if (call_sal.line == ecs->event_thread->current_line
5120 && call_sal.symtab == ecs->event_thread->current_symtab)
5121 keep_going (ecs);
5122 else
5123 {
5124 ecs->event_thread->control.stop_step = 1;
5125 print_end_stepping_range_reason ();
5126 stop_stepping (ecs);
5127 }
5128 return;
5129 }
5130 }
5131
5132 /* Look for "calls" to inlined functions, part two. If we are still
5133 in the same real function we were stepping through, but we have
5134 to go further up to find the exact frame ID, we are stepping
5135 through a more inlined call beyond its call site. */
5136
5137 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5138 && !frame_id_eq (get_frame_id (get_current_frame ()),
5139 ecs->event_thread->control.step_frame_id)
5140 && stepped_in_from (get_current_frame (),
5141 ecs->event_thread->control.step_frame_id))
5142 {
5143 if (debug_infrun)
5144 fprintf_unfiltered (gdb_stdlog,
5145 "infrun: stepping through inlined function\n");
5146
5147 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5148 keep_going (ecs);
5149 else
5150 {
5151 ecs->event_thread->control.stop_step = 1;
5152 print_end_stepping_range_reason ();
5153 stop_stepping (ecs);
5154 }
5155 return;
5156 }
5157
5158 if ((stop_pc == stop_pc_sal.pc)
5159 && (ecs->event_thread->current_line != stop_pc_sal.line
5160 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5161 {
5162 /* We are at the start of a different line. So stop. Note that
5163 we don't stop if we step into the middle of a different line.
5164 That is said to make things like for (;;) statements work
5165 better. */
5166 if (debug_infrun)
5167 fprintf_unfiltered (gdb_stdlog,
5168 "infrun: stepped to a different line\n");
5169 ecs->event_thread->control.stop_step = 1;
5170 print_end_stepping_range_reason ();
5171 stop_stepping (ecs);
5172 return;
5173 }
5174
5175 /* We aren't done stepping.
5176
5177 Optimize by setting the stepping range to the line.
5178 (We might not be in the original line, but if we entered a
5179 new line in mid-statement, we continue stepping. This makes
5180 things like for(;;) statements work better.) */
5181
5182 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5183 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5184 set_step_info (frame, stop_pc_sal);
5185
5186 if (debug_infrun)
5187 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5188 keep_going (ecs);
5189 }
5190
5191 /* Is thread TP in the middle of single-stepping? */
5192
5193 static int
5194 currently_stepping (struct thread_info *tp)
5195 {
5196 return ((tp->control.step_range_end
5197 && tp->control.step_resume_breakpoint == NULL)
5198 || tp->control.trap_expected
5199 || bpstat_should_step ());
5200 }
5201
5202 /* Returns true if any thread *but* the one passed in "data" is in the
5203 middle of stepping or of handling a "next". */
5204
5205 static int
5206 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5207 {
5208 if (tp == data)
5209 return 0;
5210
5211 return (tp->control.step_range_end
5212 || tp->control.trap_expected);
5213 }
5214
5215 /* Inferior has stepped into a subroutine call with source code that
5216 we should not step over. Do step to the first line of code in
5217 it. */
5218
5219 static void
5220 handle_step_into_function (struct gdbarch *gdbarch,
5221 struct execution_control_state *ecs)
5222 {
5223 struct symtab *s;
5224 struct symtab_and_line stop_func_sal, sr_sal;
5225
5226 fill_in_stop_func (gdbarch, ecs);
5227
5228 s = find_pc_symtab (stop_pc);
5229 if (s && s->language != language_asm)
5230 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5231 ecs->stop_func_start);
5232
5233 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5234 /* Use the step_resume_break to step until the end of the prologue,
5235 even if that involves jumps (as it seems to on the vax under
5236 4.2). */
5237 /* If the prologue ends in the middle of a source line, continue to
5238 the end of that source line (if it is still within the function).
5239 Otherwise, just go to end of prologue. */
5240 if (stop_func_sal.end
5241 && stop_func_sal.pc != ecs->stop_func_start
5242 && stop_func_sal.end < ecs->stop_func_end)
5243 ecs->stop_func_start = stop_func_sal.end;
5244
5245 /* Architectures which require breakpoint adjustment might not be able
5246 to place a breakpoint at the computed address. If so, the test
5247 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5248 ecs->stop_func_start to an address at which a breakpoint may be
5249 legitimately placed.
5250
5251 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5252 made, GDB will enter an infinite loop when stepping through
5253 optimized code consisting of VLIW instructions which contain
5254 subinstructions corresponding to different source lines. On
5255 FR-V, it's not permitted to place a breakpoint on any but the
5256 first subinstruction of a VLIW instruction. When a breakpoint is
5257 set, GDB will adjust the breakpoint address to the beginning of
5258 the VLIW instruction. Thus, we need to make the corresponding
5259 adjustment here when computing the stop address. */
5260
5261 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5262 {
5263 ecs->stop_func_start
5264 = gdbarch_adjust_breakpoint_address (gdbarch,
5265 ecs->stop_func_start);
5266 }
5267
5268 if (ecs->stop_func_start == stop_pc)
5269 {
5270 /* We are already there: stop now. */
5271 ecs->event_thread->control.stop_step = 1;
5272 print_end_stepping_range_reason ();
5273 stop_stepping (ecs);
5274 return;
5275 }
5276 else
5277 {
5278 /* Put the step-breakpoint there and go until there. */
5279 init_sal (&sr_sal); /* initialize to zeroes */
5280 sr_sal.pc = ecs->stop_func_start;
5281 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5282 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5283
5284 /* Do not specify what the fp should be when we stop since on
5285 some machines the prologue is where the new fp value is
5286 established. */
5287 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5288
5289 /* And make sure stepping stops right away then. */
5290 ecs->event_thread->control.step_range_end
5291 = ecs->event_thread->control.step_range_start;
5292 }
5293 keep_going (ecs);
5294 }
5295
5296 /* Inferior has stepped backward into a subroutine call with source
5297 code that we should not step over. Do step to the beginning of the
5298 last line of code in it. */
5299
5300 static void
5301 handle_step_into_function_backward (struct gdbarch *gdbarch,
5302 struct execution_control_state *ecs)
5303 {
5304 struct symtab *s;
5305 struct symtab_and_line stop_func_sal;
5306
5307 fill_in_stop_func (gdbarch, ecs);
5308
5309 s = find_pc_symtab (stop_pc);
5310 if (s && s->language != language_asm)
5311 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5312 ecs->stop_func_start);
5313
5314 stop_func_sal = find_pc_line (stop_pc, 0);
5315
5316 /* OK, we're just going to keep stepping here. */
5317 if (stop_func_sal.pc == stop_pc)
5318 {
5319 /* We're there already. Just stop stepping now. */
5320 ecs->event_thread->control.stop_step = 1;
5321 print_end_stepping_range_reason ();
5322 stop_stepping (ecs);
5323 }
5324 else
5325 {
5326 /* Else just reset the step range and keep going.
5327 No step-resume breakpoint, they don't work for
5328 epilogues, which can have multiple entry paths. */
5329 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5330 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5331 keep_going (ecs);
5332 }
5333 return;
5334 }
5335
5336 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5337 This is used to both functions and to skip over code. */
5338
5339 static void
5340 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5341 struct symtab_and_line sr_sal,
5342 struct frame_id sr_id,
5343 enum bptype sr_type)
5344 {
5345 /* There should never be more than one step-resume or longjmp-resume
5346 breakpoint per thread, so we should never be setting a new
5347 step_resume_breakpoint when one is already active. */
5348 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5349 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5350
5351 if (debug_infrun)
5352 fprintf_unfiltered (gdb_stdlog,
5353 "infrun: inserting step-resume breakpoint at %s\n",
5354 paddress (gdbarch, sr_sal.pc));
5355
5356 inferior_thread ()->control.step_resume_breakpoint
5357 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5358 }
5359
5360 void
5361 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5362 struct symtab_and_line sr_sal,
5363 struct frame_id sr_id)
5364 {
5365 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5366 sr_sal, sr_id,
5367 bp_step_resume);
5368 }
5369
5370 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5371 This is used to skip a potential signal handler.
5372
5373 This is called with the interrupted function's frame. The signal
5374 handler, when it returns, will resume the interrupted function at
5375 RETURN_FRAME.pc. */
5376
5377 static void
5378 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5379 {
5380 struct symtab_and_line sr_sal;
5381 struct gdbarch *gdbarch;
5382
5383 gdb_assert (return_frame != NULL);
5384 init_sal (&sr_sal); /* initialize to zeros */
5385
5386 gdbarch = get_frame_arch (return_frame);
5387 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5388 sr_sal.section = find_pc_overlay (sr_sal.pc);
5389 sr_sal.pspace = get_frame_program_space (return_frame);
5390
5391 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5392 get_stack_frame_id (return_frame),
5393 bp_hp_step_resume);
5394 }
5395
5396 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5397 is used to skip a function after stepping into it (for "next" or if
5398 the called function has no debugging information).
5399
5400 The current function has almost always been reached by single
5401 stepping a call or return instruction. NEXT_FRAME belongs to the
5402 current function, and the breakpoint will be set at the caller's
5403 resume address.
5404
5405 This is a separate function rather than reusing
5406 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5407 get_prev_frame, which may stop prematurely (see the implementation
5408 of frame_unwind_caller_id for an example). */
5409
5410 static void
5411 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5412 {
5413 struct symtab_and_line sr_sal;
5414 struct gdbarch *gdbarch;
5415
5416 /* We shouldn't have gotten here if we don't know where the call site
5417 is. */
5418 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5419
5420 init_sal (&sr_sal); /* initialize to zeros */
5421
5422 gdbarch = frame_unwind_caller_arch (next_frame);
5423 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5424 frame_unwind_caller_pc (next_frame));
5425 sr_sal.section = find_pc_overlay (sr_sal.pc);
5426 sr_sal.pspace = frame_unwind_program_space (next_frame);
5427
5428 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5429 frame_unwind_caller_id (next_frame));
5430 }
5431
5432 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5433 new breakpoint at the target of a jmp_buf. The handling of
5434 longjmp-resume uses the same mechanisms used for handling
5435 "step-resume" breakpoints. */
5436
5437 static void
5438 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5439 {
5440 /* There should never be more than one step-resume or longjmp-resume
5441 breakpoint per thread, so we should never be setting a new
5442 longjmp_resume_breakpoint when one is already active. */
5443 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5444
5445 if (debug_infrun)
5446 fprintf_unfiltered (gdb_stdlog,
5447 "infrun: inserting longjmp-resume breakpoint at %s\n",
5448 paddress (gdbarch, pc));
5449
5450 inferior_thread ()->control.step_resume_breakpoint =
5451 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5452 }
5453
5454 /* Insert an exception resume breakpoint. TP is the thread throwing
5455 the exception. The block B is the block of the unwinder debug hook
5456 function. FRAME is the frame corresponding to the call to this
5457 function. SYM is the symbol of the function argument holding the
5458 target PC of the exception. */
5459
5460 static void
5461 insert_exception_resume_breakpoint (struct thread_info *tp,
5462 struct block *b,
5463 struct frame_info *frame,
5464 struct symbol *sym)
5465 {
5466 struct gdb_exception e;
5467
5468 /* We want to ignore errors here. */
5469 TRY_CATCH (e, RETURN_MASK_ERROR)
5470 {
5471 struct symbol *vsym;
5472 struct value *value;
5473 CORE_ADDR handler;
5474 struct breakpoint *bp;
5475
5476 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5477 value = read_var_value (vsym, frame);
5478 /* If the value was optimized out, revert to the old behavior. */
5479 if (! value_optimized_out (value))
5480 {
5481 handler = value_as_address (value);
5482
5483 if (debug_infrun)
5484 fprintf_unfiltered (gdb_stdlog,
5485 "infrun: exception resume at %lx\n",
5486 (unsigned long) handler);
5487
5488 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5489 handler, bp_exception_resume);
5490 bp->thread = tp->num;
5491 inferior_thread ()->control.exception_resume_breakpoint = bp;
5492 }
5493 }
5494 }
5495
5496 /* This is called when an exception has been intercepted. Check to
5497 see whether the exception's destination is of interest, and if so,
5498 set an exception resume breakpoint there. */
5499
5500 static void
5501 check_exception_resume (struct execution_control_state *ecs,
5502 struct frame_info *frame, struct symbol *func)
5503 {
5504 struct gdb_exception e;
5505
5506 TRY_CATCH (e, RETURN_MASK_ERROR)
5507 {
5508 struct block *b;
5509 struct dict_iterator iter;
5510 struct symbol *sym;
5511 int argno = 0;
5512
5513 /* The exception breakpoint is a thread-specific breakpoint on
5514 the unwinder's debug hook, declared as:
5515
5516 void _Unwind_DebugHook (void *cfa, void *handler);
5517
5518 The CFA argument indicates the frame to which control is
5519 about to be transferred. HANDLER is the destination PC.
5520
5521 We ignore the CFA and set a temporary breakpoint at HANDLER.
5522 This is not extremely efficient but it avoids issues in gdb
5523 with computing the DWARF CFA, and it also works even in weird
5524 cases such as throwing an exception from inside a signal
5525 handler. */
5526
5527 b = SYMBOL_BLOCK_VALUE (func);
5528 ALL_BLOCK_SYMBOLS (b, iter, sym)
5529 {
5530 if (!SYMBOL_IS_ARGUMENT (sym))
5531 continue;
5532
5533 if (argno == 0)
5534 ++argno;
5535 else
5536 {
5537 insert_exception_resume_breakpoint (ecs->event_thread,
5538 b, frame, sym);
5539 break;
5540 }
5541 }
5542 }
5543 }
5544
5545 static void
5546 stop_stepping (struct execution_control_state *ecs)
5547 {
5548 if (debug_infrun)
5549 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5550
5551 /* Let callers know we don't want to wait for the inferior anymore. */
5552 ecs->wait_some_more = 0;
5553 }
5554
5555 /* This function handles various cases where we need to continue
5556 waiting for the inferior. */
5557 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5558
5559 static void
5560 keep_going (struct execution_control_state *ecs)
5561 {
5562 /* Make sure normal_stop is called if we get a QUIT handled before
5563 reaching resume. */
5564 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5565
5566 /* Save the pc before execution, to compare with pc after stop. */
5567 ecs->event_thread->prev_pc
5568 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5569
5570 /* If we did not do break;, it means we should keep running the
5571 inferior and not return to debugger. */
5572
5573 if (ecs->event_thread->control.trap_expected
5574 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5575 {
5576 /* We took a signal (which we are supposed to pass through to
5577 the inferior, else we'd not get here) and we haven't yet
5578 gotten our trap. Simply continue. */
5579
5580 discard_cleanups (old_cleanups);
5581 resume (currently_stepping (ecs->event_thread),
5582 ecs->event_thread->suspend.stop_signal);
5583 }
5584 else
5585 {
5586 /* Either the trap was not expected, but we are continuing
5587 anyway (the user asked that this signal be passed to the
5588 child)
5589 -- or --
5590 The signal was SIGTRAP, e.g. it was our signal, but we
5591 decided we should resume from it.
5592
5593 We're going to run this baby now!
5594
5595 Note that insert_breakpoints won't try to re-insert
5596 already inserted breakpoints. Therefore, we don't
5597 care if breakpoints were already inserted, or not. */
5598
5599 if (ecs->event_thread->stepping_over_breakpoint)
5600 {
5601 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5602
5603 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5604 /* Since we can't do a displaced step, we have to remove
5605 the breakpoint while we step it. To keep things
5606 simple, we remove them all. */
5607 remove_breakpoints ();
5608 }
5609 else
5610 {
5611 struct gdb_exception e;
5612
5613 /* Stop stepping when inserting breakpoints
5614 has failed. */
5615 TRY_CATCH (e, RETURN_MASK_ERROR)
5616 {
5617 insert_breakpoints ();
5618 }
5619 if (e.reason < 0)
5620 {
5621 exception_print (gdb_stderr, e);
5622 stop_stepping (ecs);
5623 return;
5624 }
5625 }
5626
5627 ecs->event_thread->control.trap_expected
5628 = ecs->event_thread->stepping_over_breakpoint;
5629
5630 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5631 specifies that such a signal should be delivered to the
5632 target program).
5633
5634 Typically, this would occure when a user is debugging a
5635 target monitor on a simulator: the target monitor sets a
5636 breakpoint; the simulator encounters this break-point and
5637 halts the simulation handing control to GDB; GDB, noteing
5638 that the break-point isn't valid, returns control back to the
5639 simulator; the simulator then delivers the hardware
5640 equivalent of a SIGNAL_TRAP to the program being debugged. */
5641
5642 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5643 && !signal_program[ecs->event_thread->suspend.stop_signal])
5644 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5645
5646 discard_cleanups (old_cleanups);
5647 resume (currently_stepping (ecs->event_thread),
5648 ecs->event_thread->suspend.stop_signal);
5649 }
5650
5651 prepare_to_wait (ecs);
5652 }
5653
5654 /* This function normally comes after a resume, before
5655 handle_inferior_event exits. It takes care of any last bits of
5656 housekeeping, and sets the all-important wait_some_more flag. */
5657
5658 static void
5659 prepare_to_wait (struct execution_control_state *ecs)
5660 {
5661 if (debug_infrun)
5662 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5663
5664 /* This is the old end of the while loop. Let everybody know we
5665 want to wait for the inferior some more and get called again
5666 soon. */
5667 ecs->wait_some_more = 1;
5668 }
5669
5670 /* Several print_*_reason functions to print why the inferior has stopped.
5671 We always print something when the inferior exits, or receives a signal.
5672 The rest of the cases are dealt with later on in normal_stop and
5673 print_it_typical. Ideally there should be a call to one of these
5674 print_*_reason functions functions from handle_inferior_event each time
5675 stop_stepping is called. */
5676
5677 /* Print why the inferior has stopped.
5678 We are done with a step/next/si/ni command, print why the inferior has
5679 stopped. For now print nothing. Print a message only if not in the middle
5680 of doing a "step n" operation for n > 1. */
5681
5682 static void
5683 print_end_stepping_range_reason (void)
5684 {
5685 if ((!inferior_thread ()->step_multi
5686 || !inferior_thread ()->control.stop_step)
5687 && ui_out_is_mi_like_p (current_uiout))
5688 ui_out_field_string (current_uiout, "reason",
5689 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5690 }
5691
5692 /* The inferior was terminated by a signal, print why it stopped. */
5693
5694 static void
5695 print_signal_exited_reason (enum target_signal siggnal)
5696 {
5697 struct ui_out *uiout = current_uiout;
5698
5699 annotate_signalled ();
5700 if (ui_out_is_mi_like_p (uiout))
5701 ui_out_field_string
5702 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5703 ui_out_text (uiout, "\nProgram terminated with signal ");
5704 annotate_signal_name ();
5705 ui_out_field_string (uiout, "signal-name",
5706 target_signal_to_name (siggnal));
5707 annotate_signal_name_end ();
5708 ui_out_text (uiout, ", ");
5709 annotate_signal_string ();
5710 ui_out_field_string (uiout, "signal-meaning",
5711 target_signal_to_string (siggnal));
5712 annotate_signal_string_end ();
5713 ui_out_text (uiout, ".\n");
5714 ui_out_text (uiout, "The program no longer exists.\n");
5715 }
5716
5717 /* The inferior program is finished, print why it stopped. */
5718
5719 static void
5720 print_exited_reason (int exitstatus)
5721 {
5722 struct inferior *inf = current_inferior ();
5723 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5724 struct ui_out *uiout = current_uiout;
5725
5726 annotate_exited (exitstatus);
5727 if (exitstatus)
5728 {
5729 if (ui_out_is_mi_like_p (uiout))
5730 ui_out_field_string (uiout, "reason",
5731 async_reason_lookup (EXEC_ASYNC_EXITED));
5732 ui_out_text (uiout, "[Inferior ");
5733 ui_out_text (uiout, plongest (inf->num));
5734 ui_out_text (uiout, " (");
5735 ui_out_text (uiout, pidstr);
5736 ui_out_text (uiout, ") exited with code ");
5737 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5738 ui_out_text (uiout, "]\n");
5739 }
5740 else
5741 {
5742 if (ui_out_is_mi_like_p (uiout))
5743 ui_out_field_string
5744 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5745 ui_out_text (uiout, "[Inferior ");
5746 ui_out_text (uiout, plongest (inf->num));
5747 ui_out_text (uiout, " (");
5748 ui_out_text (uiout, pidstr);
5749 ui_out_text (uiout, ") exited normally]\n");
5750 }
5751 /* Support the --return-child-result option. */
5752 return_child_result_value = exitstatus;
5753 }
5754
5755 /* Signal received, print why the inferior has stopped. The signal table
5756 tells us to print about it. */
5757
5758 static void
5759 print_signal_received_reason (enum target_signal siggnal)
5760 {
5761 struct ui_out *uiout = current_uiout;
5762
5763 annotate_signal ();
5764
5765 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5766 {
5767 struct thread_info *t = inferior_thread ();
5768
5769 ui_out_text (uiout, "\n[");
5770 ui_out_field_string (uiout, "thread-name",
5771 target_pid_to_str (t->ptid));
5772 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5773 ui_out_text (uiout, " stopped");
5774 }
5775 else
5776 {
5777 ui_out_text (uiout, "\nProgram received signal ");
5778 annotate_signal_name ();
5779 if (ui_out_is_mi_like_p (uiout))
5780 ui_out_field_string
5781 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5782 ui_out_field_string (uiout, "signal-name",
5783 target_signal_to_name (siggnal));
5784 annotate_signal_name_end ();
5785 ui_out_text (uiout, ", ");
5786 annotate_signal_string ();
5787 ui_out_field_string (uiout, "signal-meaning",
5788 target_signal_to_string (siggnal));
5789 annotate_signal_string_end ();
5790 }
5791 ui_out_text (uiout, ".\n");
5792 }
5793
5794 /* Reverse execution: target ran out of history info, print why the inferior
5795 has stopped. */
5796
5797 static void
5798 print_no_history_reason (void)
5799 {
5800 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5801 }
5802
5803 /* Here to return control to GDB when the inferior stops for real.
5804 Print appropriate messages, remove breakpoints, give terminal our modes.
5805
5806 STOP_PRINT_FRAME nonzero means print the executing frame
5807 (pc, function, args, file, line number and line text).
5808 BREAKPOINTS_FAILED nonzero means stop was due to error
5809 attempting to insert breakpoints. */
5810
5811 void
5812 normal_stop (void)
5813 {
5814 struct target_waitstatus last;
5815 ptid_t last_ptid;
5816 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5817
5818 get_last_target_status (&last_ptid, &last);
5819
5820 /* If an exception is thrown from this point on, make sure to
5821 propagate GDB's knowledge of the executing state to the
5822 frontend/user running state. A QUIT is an easy exception to see
5823 here, so do this before any filtered output. */
5824 if (!non_stop)
5825 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5826 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5827 && last.kind != TARGET_WAITKIND_EXITED)
5828 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5829
5830 /* In non-stop mode, we don't want GDB to switch threads behind the
5831 user's back, to avoid races where the user is typing a command to
5832 apply to thread x, but GDB switches to thread y before the user
5833 finishes entering the command. */
5834
5835 /* As with the notification of thread events, we want to delay
5836 notifying the user that we've switched thread context until
5837 the inferior actually stops.
5838
5839 There's no point in saying anything if the inferior has exited.
5840 Note that SIGNALLED here means "exited with a signal", not
5841 "received a signal". */
5842 if (!non_stop
5843 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5844 && target_has_execution
5845 && last.kind != TARGET_WAITKIND_SIGNALLED
5846 && last.kind != TARGET_WAITKIND_EXITED)
5847 {
5848 target_terminal_ours_for_output ();
5849 printf_filtered (_("[Switching to %s]\n"),
5850 target_pid_to_str (inferior_ptid));
5851 annotate_thread_changed ();
5852 previous_inferior_ptid = inferior_ptid;
5853 }
5854
5855 if (!breakpoints_always_inserted_mode () && target_has_execution)
5856 {
5857 if (remove_breakpoints ())
5858 {
5859 target_terminal_ours_for_output ();
5860 printf_filtered (_("Cannot remove breakpoints because "
5861 "program is no longer writable.\nFurther "
5862 "execution is probably impossible.\n"));
5863 }
5864 }
5865
5866 /* If an auto-display called a function and that got a signal,
5867 delete that auto-display to avoid an infinite recursion. */
5868
5869 if (stopped_by_random_signal)
5870 disable_current_display ();
5871
5872 /* Don't print a message if in the middle of doing a "step n"
5873 operation for n > 1 */
5874 if (target_has_execution
5875 && last.kind != TARGET_WAITKIND_SIGNALLED
5876 && last.kind != TARGET_WAITKIND_EXITED
5877 && inferior_thread ()->step_multi
5878 && inferior_thread ()->control.stop_step)
5879 goto done;
5880
5881 target_terminal_ours ();
5882 async_enable_stdin ();
5883
5884 /* Set the current source location. This will also happen if we
5885 display the frame below, but the current SAL will be incorrect
5886 during a user hook-stop function. */
5887 if (has_stack_frames () && !stop_stack_dummy)
5888 set_current_sal_from_frame (get_current_frame (), 1);
5889
5890 /* Let the user/frontend see the threads as stopped. */
5891 do_cleanups (old_chain);
5892
5893 /* Look up the hook_stop and run it (CLI internally handles problem
5894 of stop_command's pre-hook not existing). */
5895 if (stop_command)
5896 catch_errors (hook_stop_stub, stop_command,
5897 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5898
5899 if (!has_stack_frames ())
5900 goto done;
5901
5902 if (last.kind == TARGET_WAITKIND_SIGNALLED
5903 || last.kind == TARGET_WAITKIND_EXITED)
5904 goto done;
5905
5906 /* Select innermost stack frame - i.e., current frame is frame 0,
5907 and current location is based on that.
5908 Don't do this on return from a stack dummy routine,
5909 or if the program has exited. */
5910
5911 if (!stop_stack_dummy)
5912 {
5913 select_frame (get_current_frame ());
5914
5915 /* Print current location without a level number, if
5916 we have changed functions or hit a breakpoint.
5917 Print source line if we have one.
5918 bpstat_print() contains the logic deciding in detail
5919 what to print, based on the event(s) that just occurred. */
5920
5921 /* If --batch-silent is enabled then there's no need to print the current
5922 source location, and to try risks causing an error message about
5923 missing source files. */
5924 if (stop_print_frame && !batch_silent)
5925 {
5926 int bpstat_ret;
5927 int source_flag;
5928 int do_frame_printing = 1;
5929 struct thread_info *tp = inferior_thread ();
5930
5931 bpstat_ret = bpstat_print (tp->control.stop_bpstat);
5932 switch (bpstat_ret)
5933 {
5934 case PRINT_UNKNOWN:
5935 /* If we had hit a shared library event breakpoint,
5936 bpstat_print would print out this message. If we hit
5937 an OS-level shared library event, do the same
5938 thing. */
5939 if (last.kind == TARGET_WAITKIND_LOADED)
5940 {
5941 printf_filtered (_("Stopped due to shared library event\n"));
5942 source_flag = SRC_LINE; /* something bogus */
5943 do_frame_printing = 0;
5944 break;
5945 }
5946
5947 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5948 (or should) carry around the function and does (or
5949 should) use that when doing a frame comparison. */
5950 if (tp->control.stop_step
5951 && frame_id_eq (tp->control.step_frame_id,
5952 get_frame_id (get_current_frame ()))
5953 && step_start_function == find_pc_function (stop_pc))
5954 source_flag = SRC_LINE; /* Finished step, just
5955 print source line. */
5956 else
5957 source_flag = SRC_AND_LOC; /* Print location and
5958 source line. */
5959 break;
5960 case PRINT_SRC_AND_LOC:
5961 source_flag = SRC_AND_LOC; /* Print location and
5962 source line. */
5963 break;
5964 case PRINT_SRC_ONLY:
5965 source_flag = SRC_LINE;
5966 break;
5967 case PRINT_NOTHING:
5968 source_flag = SRC_LINE; /* something bogus */
5969 do_frame_printing = 0;
5970 break;
5971 default:
5972 internal_error (__FILE__, __LINE__, _("Unknown value."));
5973 }
5974
5975 /* The behavior of this routine with respect to the source
5976 flag is:
5977 SRC_LINE: Print only source line
5978 LOCATION: Print only location
5979 SRC_AND_LOC: Print location and source line. */
5980 if (do_frame_printing)
5981 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5982
5983 /* Display the auto-display expressions. */
5984 do_displays ();
5985 }
5986 }
5987
5988 /* Save the function value return registers, if we care.
5989 We might be about to restore their previous contents. */
5990 if (inferior_thread ()->control.proceed_to_finish
5991 && execution_direction != EXEC_REVERSE)
5992 {
5993 /* This should not be necessary. */
5994 if (stop_registers)
5995 regcache_xfree (stop_registers);
5996
5997 /* NB: The copy goes through to the target picking up the value of
5998 all the registers. */
5999 stop_registers = regcache_dup (get_current_regcache ());
6000 }
6001
6002 if (stop_stack_dummy == STOP_STACK_DUMMY)
6003 {
6004 /* Pop the empty frame that contains the stack dummy.
6005 This also restores inferior state prior to the call
6006 (struct infcall_suspend_state). */
6007 struct frame_info *frame = get_current_frame ();
6008
6009 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6010 frame_pop (frame);
6011 /* frame_pop() calls reinit_frame_cache as the last thing it
6012 does which means there's currently no selected frame. We
6013 don't need to re-establish a selected frame if the dummy call
6014 returns normally, that will be done by
6015 restore_infcall_control_state. However, we do have to handle
6016 the case where the dummy call is returning after being
6017 stopped (e.g. the dummy call previously hit a breakpoint).
6018 We can't know which case we have so just always re-establish
6019 a selected frame here. */
6020 select_frame (get_current_frame ());
6021 }
6022
6023 done:
6024 annotate_stopped ();
6025
6026 /* Suppress the stop observer if we're in the middle of:
6027
6028 - a step n (n > 1), as there still more steps to be done.
6029
6030 - a "finish" command, as the observer will be called in
6031 finish_command_continuation, so it can include the inferior
6032 function's return value.
6033
6034 - calling an inferior function, as we pretend we inferior didn't
6035 run at all. The return value of the call is handled by the
6036 expression evaluator, through call_function_by_hand. */
6037
6038 if (!target_has_execution
6039 || last.kind == TARGET_WAITKIND_SIGNALLED
6040 || last.kind == TARGET_WAITKIND_EXITED
6041 || (!inferior_thread ()->step_multi
6042 && !(inferior_thread ()->control.stop_bpstat
6043 && inferior_thread ()->control.proceed_to_finish)
6044 && !inferior_thread ()->control.in_infcall))
6045 {
6046 if (!ptid_equal (inferior_ptid, null_ptid))
6047 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6048 stop_print_frame);
6049 else
6050 observer_notify_normal_stop (NULL, stop_print_frame);
6051 }
6052
6053 if (target_has_execution)
6054 {
6055 if (last.kind != TARGET_WAITKIND_SIGNALLED
6056 && last.kind != TARGET_WAITKIND_EXITED)
6057 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6058 Delete any breakpoint that is to be deleted at the next stop. */
6059 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6060 }
6061
6062 /* Try to get rid of automatically added inferiors that are no
6063 longer needed. Keeping those around slows down things linearly.
6064 Note that this never removes the current inferior. */
6065 prune_inferiors ();
6066 }
6067
6068 static int
6069 hook_stop_stub (void *cmd)
6070 {
6071 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6072 return (0);
6073 }
6074 \f
6075 int
6076 signal_stop_state (int signo)
6077 {
6078 return signal_stop[signo];
6079 }
6080
6081 int
6082 signal_print_state (int signo)
6083 {
6084 return signal_print[signo];
6085 }
6086
6087 int
6088 signal_pass_state (int signo)
6089 {
6090 return signal_program[signo];
6091 }
6092
6093 static void
6094 signal_cache_update (int signo)
6095 {
6096 if (signo == -1)
6097 {
6098 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6099 signal_cache_update (signo);
6100
6101 return;
6102 }
6103
6104 signal_pass[signo] = (signal_stop[signo] == 0
6105 && signal_print[signo] == 0
6106 && signal_program[signo] == 1);
6107 }
6108
6109 int
6110 signal_stop_update (int signo, int state)
6111 {
6112 int ret = signal_stop[signo];
6113
6114 signal_stop[signo] = state;
6115 signal_cache_update (signo);
6116 return ret;
6117 }
6118
6119 int
6120 signal_print_update (int signo, int state)
6121 {
6122 int ret = signal_print[signo];
6123
6124 signal_print[signo] = state;
6125 signal_cache_update (signo);
6126 return ret;
6127 }
6128
6129 int
6130 signal_pass_update (int signo, int state)
6131 {
6132 int ret = signal_program[signo];
6133
6134 signal_program[signo] = state;
6135 signal_cache_update (signo);
6136 return ret;
6137 }
6138
6139 static void
6140 sig_print_header (void)
6141 {
6142 printf_filtered (_("Signal Stop\tPrint\tPass "
6143 "to program\tDescription\n"));
6144 }
6145
6146 static void
6147 sig_print_info (enum target_signal oursig)
6148 {
6149 const char *name = target_signal_to_name (oursig);
6150 int name_padding = 13 - strlen (name);
6151
6152 if (name_padding <= 0)
6153 name_padding = 0;
6154
6155 printf_filtered ("%s", name);
6156 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6157 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6158 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6159 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6160 printf_filtered ("%s\n", target_signal_to_string (oursig));
6161 }
6162
6163 /* Specify how various signals in the inferior should be handled. */
6164
6165 static void
6166 handle_command (char *args, int from_tty)
6167 {
6168 char **argv;
6169 int digits, wordlen;
6170 int sigfirst, signum, siglast;
6171 enum target_signal oursig;
6172 int allsigs;
6173 int nsigs;
6174 unsigned char *sigs;
6175 struct cleanup *old_chain;
6176
6177 if (args == NULL)
6178 {
6179 error_no_arg (_("signal to handle"));
6180 }
6181
6182 /* Allocate and zero an array of flags for which signals to handle. */
6183
6184 nsigs = (int) TARGET_SIGNAL_LAST;
6185 sigs = (unsigned char *) alloca (nsigs);
6186 memset (sigs, 0, nsigs);
6187
6188 /* Break the command line up into args. */
6189
6190 argv = gdb_buildargv (args);
6191 old_chain = make_cleanup_freeargv (argv);
6192
6193 /* Walk through the args, looking for signal oursigs, signal names, and
6194 actions. Signal numbers and signal names may be interspersed with
6195 actions, with the actions being performed for all signals cumulatively
6196 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6197
6198 while (*argv != NULL)
6199 {
6200 wordlen = strlen (*argv);
6201 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6202 {;
6203 }
6204 allsigs = 0;
6205 sigfirst = siglast = -1;
6206
6207 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6208 {
6209 /* Apply action to all signals except those used by the
6210 debugger. Silently skip those. */
6211 allsigs = 1;
6212 sigfirst = 0;
6213 siglast = nsigs - 1;
6214 }
6215 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6216 {
6217 SET_SIGS (nsigs, sigs, signal_stop);
6218 SET_SIGS (nsigs, sigs, signal_print);
6219 }
6220 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6221 {
6222 UNSET_SIGS (nsigs, sigs, signal_program);
6223 }
6224 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6225 {
6226 SET_SIGS (nsigs, sigs, signal_print);
6227 }
6228 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6229 {
6230 SET_SIGS (nsigs, sigs, signal_program);
6231 }
6232 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6233 {
6234 UNSET_SIGS (nsigs, sigs, signal_stop);
6235 }
6236 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6237 {
6238 SET_SIGS (nsigs, sigs, signal_program);
6239 }
6240 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6241 {
6242 UNSET_SIGS (nsigs, sigs, signal_print);
6243 UNSET_SIGS (nsigs, sigs, signal_stop);
6244 }
6245 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6246 {
6247 UNSET_SIGS (nsigs, sigs, signal_program);
6248 }
6249 else if (digits > 0)
6250 {
6251 /* It is numeric. The numeric signal refers to our own
6252 internal signal numbering from target.h, not to host/target
6253 signal number. This is a feature; users really should be
6254 using symbolic names anyway, and the common ones like
6255 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6256
6257 sigfirst = siglast = (int)
6258 target_signal_from_command (atoi (*argv));
6259 if ((*argv)[digits] == '-')
6260 {
6261 siglast = (int)
6262 target_signal_from_command (atoi ((*argv) + digits + 1));
6263 }
6264 if (sigfirst > siglast)
6265 {
6266 /* Bet he didn't figure we'd think of this case... */
6267 signum = sigfirst;
6268 sigfirst = siglast;
6269 siglast = signum;
6270 }
6271 }
6272 else
6273 {
6274 oursig = target_signal_from_name (*argv);
6275 if (oursig != TARGET_SIGNAL_UNKNOWN)
6276 {
6277 sigfirst = siglast = (int) oursig;
6278 }
6279 else
6280 {
6281 /* Not a number and not a recognized flag word => complain. */
6282 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6283 }
6284 }
6285
6286 /* If any signal numbers or symbol names were found, set flags for
6287 which signals to apply actions to. */
6288
6289 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6290 {
6291 switch ((enum target_signal) signum)
6292 {
6293 case TARGET_SIGNAL_TRAP:
6294 case TARGET_SIGNAL_INT:
6295 if (!allsigs && !sigs[signum])
6296 {
6297 if (query (_("%s is used by the debugger.\n\
6298 Are you sure you want to change it? "),
6299 target_signal_to_name ((enum target_signal) signum)))
6300 {
6301 sigs[signum] = 1;
6302 }
6303 else
6304 {
6305 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6306 gdb_flush (gdb_stdout);
6307 }
6308 }
6309 break;
6310 case TARGET_SIGNAL_0:
6311 case TARGET_SIGNAL_DEFAULT:
6312 case TARGET_SIGNAL_UNKNOWN:
6313 /* Make sure that "all" doesn't print these. */
6314 break;
6315 default:
6316 sigs[signum] = 1;
6317 break;
6318 }
6319 }
6320
6321 argv++;
6322 }
6323
6324 for (signum = 0; signum < nsigs; signum++)
6325 if (sigs[signum])
6326 {
6327 signal_cache_update (-1);
6328 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6329
6330 if (from_tty)
6331 {
6332 /* Show the results. */
6333 sig_print_header ();
6334 for (; signum < nsigs; signum++)
6335 if (sigs[signum])
6336 sig_print_info (signum);
6337 }
6338
6339 break;
6340 }
6341
6342 do_cleanups (old_chain);
6343 }
6344
6345 static void
6346 xdb_handle_command (char *args, int from_tty)
6347 {
6348 char **argv;
6349 struct cleanup *old_chain;
6350
6351 if (args == NULL)
6352 error_no_arg (_("xdb command"));
6353
6354 /* Break the command line up into args. */
6355
6356 argv = gdb_buildargv (args);
6357 old_chain = make_cleanup_freeargv (argv);
6358 if (argv[1] != (char *) NULL)
6359 {
6360 char *argBuf;
6361 int bufLen;
6362
6363 bufLen = strlen (argv[0]) + 20;
6364 argBuf = (char *) xmalloc (bufLen);
6365 if (argBuf)
6366 {
6367 int validFlag = 1;
6368 enum target_signal oursig;
6369
6370 oursig = target_signal_from_name (argv[0]);
6371 memset (argBuf, 0, bufLen);
6372 if (strcmp (argv[1], "Q") == 0)
6373 sprintf (argBuf, "%s %s", argv[0], "noprint");
6374 else
6375 {
6376 if (strcmp (argv[1], "s") == 0)
6377 {
6378 if (!signal_stop[oursig])
6379 sprintf (argBuf, "%s %s", argv[0], "stop");
6380 else
6381 sprintf (argBuf, "%s %s", argv[0], "nostop");
6382 }
6383 else if (strcmp (argv[1], "i") == 0)
6384 {
6385 if (!signal_program[oursig])
6386 sprintf (argBuf, "%s %s", argv[0], "pass");
6387 else
6388 sprintf (argBuf, "%s %s", argv[0], "nopass");
6389 }
6390 else if (strcmp (argv[1], "r") == 0)
6391 {
6392 if (!signal_print[oursig])
6393 sprintf (argBuf, "%s %s", argv[0], "print");
6394 else
6395 sprintf (argBuf, "%s %s", argv[0], "noprint");
6396 }
6397 else
6398 validFlag = 0;
6399 }
6400 if (validFlag)
6401 handle_command (argBuf, from_tty);
6402 else
6403 printf_filtered (_("Invalid signal handling flag.\n"));
6404 if (argBuf)
6405 xfree (argBuf);
6406 }
6407 }
6408 do_cleanups (old_chain);
6409 }
6410
6411 /* Print current contents of the tables set by the handle command.
6412 It is possible we should just be printing signals actually used
6413 by the current target (but for things to work right when switching
6414 targets, all signals should be in the signal tables). */
6415
6416 static void
6417 signals_info (char *signum_exp, int from_tty)
6418 {
6419 enum target_signal oursig;
6420
6421 sig_print_header ();
6422
6423 if (signum_exp)
6424 {
6425 /* First see if this is a symbol name. */
6426 oursig = target_signal_from_name (signum_exp);
6427 if (oursig == TARGET_SIGNAL_UNKNOWN)
6428 {
6429 /* No, try numeric. */
6430 oursig =
6431 target_signal_from_command (parse_and_eval_long (signum_exp));
6432 }
6433 sig_print_info (oursig);
6434 return;
6435 }
6436
6437 printf_filtered ("\n");
6438 /* These ugly casts brought to you by the native VAX compiler. */
6439 for (oursig = TARGET_SIGNAL_FIRST;
6440 (int) oursig < (int) TARGET_SIGNAL_LAST;
6441 oursig = (enum target_signal) ((int) oursig + 1))
6442 {
6443 QUIT;
6444
6445 if (oursig != TARGET_SIGNAL_UNKNOWN
6446 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6447 sig_print_info (oursig);
6448 }
6449
6450 printf_filtered (_("\nUse the \"handle\" command "
6451 "to change these tables.\n"));
6452 }
6453
6454 /* Check if it makes sense to read $_siginfo from the current thread
6455 at this point. If not, throw an error. */
6456
6457 static void
6458 validate_siginfo_access (void)
6459 {
6460 /* No current inferior, no siginfo. */
6461 if (ptid_equal (inferior_ptid, null_ptid))
6462 error (_("No thread selected."));
6463
6464 /* Don't try to read from a dead thread. */
6465 if (is_exited (inferior_ptid))
6466 error (_("The current thread has terminated"));
6467
6468 /* ... or from a spinning thread. */
6469 if (is_running (inferior_ptid))
6470 error (_("Selected thread is running."));
6471 }
6472
6473 /* The $_siginfo convenience variable is a bit special. We don't know
6474 for sure the type of the value until we actually have a chance to
6475 fetch the data. The type can change depending on gdbarch, so it is
6476 also dependent on which thread you have selected.
6477
6478 1. making $_siginfo be an internalvar that creates a new value on
6479 access.
6480
6481 2. making the value of $_siginfo be an lval_computed value. */
6482
6483 /* This function implements the lval_computed support for reading a
6484 $_siginfo value. */
6485
6486 static void
6487 siginfo_value_read (struct value *v)
6488 {
6489 LONGEST transferred;
6490
6491 validate_siginfo_access ();
6492
6493 transferred =
6494 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6495 NULL,
6496 value_contents_all_raw (v),
6497 value_offset (v),
6498 TYPE_LENGTH (value_type (v)));
6499
6500 if (transferred != TYPE_LENGTH (value_type (v)))
6501 error (_("Unable to read siginfo"));
6502 }
6503
6504 /* This function implements the lval_computed support for writing a
6505 $_siginfo value. */
6506
6507 static void
6508 siginfo_value_write (struct value *v, struct value *fromval)
6509 {
6510 LONGEST transferred;
6511
6512 validate_siginfo_access ();
6513
6514 transferred = target_write (&current_target,
6515 TARGET_OBJECT_SIGNAL_INFO,
6516 NULL,
6517 value_contents_all_raw (fromval),
6518 value_offset (v),
6519 TYPE_LENGTH (value_type (fromval)));
6520
6521 if (transferred != TYPE_LENGTH (value_type (fromval)))
6522 error (_("Unable to write siginfo"));
6523 }
6524
6525 static const struct lval_funcs siginfo_value_funcs =
6526 {
6527 siginfo_value_read,
6528 siginfo_value_write
6529 };
6530
6531 /* Return a new value with the correct type for the siginfo object of
6532 the current thread using architecture GDBARCH. Return a void value
6533 if there's no object available. */
6534
6535 static struct value *
6536 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6537 {
6538 if (target_has_stack
6539 && !ptid_equal (inferior_ptid, null_ptid)
6540 && gdbarch_get_siginfo_type_p (gdbarch))
6541 {
6542 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6543
6544 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6545 }
6546
6547 return allocate_value (builtin_type (gdbarch)->builtin_void);
6548 }
6549
6550 \f
6551 /* infcall_suspend_state contains state about the program itself like its
6552 registers and any signal it received when it last stopped.
6553 This state must be restored regardless of how the inferior function call
6554 ends (either successfully, or after it hits a breakpoint or signal)
6555 if the program is to properly continue where it left off. */
6556
6557 struct infcall_suspend_state
6558 {
6559 struct thread_suspend_state thread_suspend;
6560 struct inferior_suspend_state inferior_suspend;
6561
6562 /* Other fields: */
6563 CORE_ADDR stop_pc;
6564 struct regcache *registers;
6565
6566 /* Format of SIGINFO_DATA or NULL if it is not present. */
6567 struct gdbarch *siginfo_gdbarch;
6568
6569 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6570 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6571 content would be invalid. */
6572 gdb_byte *siginfo_data;
6573 };
6574
6575 struct infcall_suspend_state *
6576 save_infcall_suspend_state (void)
6577 {
6578 struct infcall_suspend_state *inf_state;
6579 struct thread_info *tp = inferior_thread ();
6580 struct inferior *inf = current_inferior ();
6581 struct regcache *regcache = get_current_regcache ();
6582 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6583 gdb_byte *siginfo_data = NULL;
6584
6585 if (gdbarch_get_siginfo_type_p (gdbarch))
6586 {
6587 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6588 size_t len = TYPE_LENGTH (type);
6589 struct cleanup *back_to;
6590
6591 siginfo_data = xmalloc (len);
6592 back_to = make_cleanup (xfree, siginfo_data);
6593
6594 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6595 siginfo_data, 0, len) == len)
6596 discard_cleanups (back_to);
6597 else
6598 {
6599 /* Errors ignored. */
6600 do_cleanups (back_to);
6601 siginfo_data = NULL;
6602 }
6603 }
6604
6605 inf_state = XZALLOC (struct infcall_suspend_state);
6606
6607 if (siginfo_data)
6608 {
6609 inf_state->siginfo_gdbarch = gdbarch;
6610 inf_state->siginfo_data = siginfo_data;
6611 }
6612
6613 inf_state->thread_suspend = tp->suspend;
6614 inf_state->inferior_suspend = inf->suspend;
6615
6616 /* run_inferior_call will not use the signal due to its `proceed' call with
6617 TARGET_SIGNAL_0 anyway. */
6618 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6619
6620 inf_state->stop_pc = stop_pc;
6621
6622 inf_state->registers = regcache_dup (regcache);
6623
6624 return inf_state;
6625 }
6626
6627 /* Restore inferior session state to INF_STATE. */
6628
6629 void
6630 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6631 {
6632 struct thread_info *tp = inferior_thread ();
6633 struct inferior *inf = current_inferior ();
6634 struct regcache *regcache = get_current_regcache ();
6635 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6636
6637 tp->suspend = inf_state->thread_suspend;
6638 inf->suspend = inf_state->inferior_suspend;
6639
6640 stop_pc = inf_state->stop_pc;
6641
6642 if (inf_state->siginfo_gdbarch == gdbarch)
6643 {
6644 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6645 size_t len = TYPE_LENGTH (type);
6646
6647 /* Errors ignored. */
6648 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6649 inf_state->siginfo_data, 0, len);
6650 }
6651
6652 /* The inferior can be gone if the user types "print exit(0)"
6653 (and perhaps other times). */
6654 if (target_has_execution)
6655 /* NB: The register write goes through to the target. */
6656 regcache_cpy (regcache, inf_state->registers);
6657
6658 discard_infcall_suspend_state (inf_state);
6659 }
6660
6661 static void
6662 do_restore_infcall_suspend_state_cleanup (void *state)
6663 {
6664 restore_infcall_suspend_state (state);
6665 }
6666
6667 struct cleanup *
6668 make_cleanup_restore_infcall_suspend_state
6669 (struct infcall_suspend_state *inf_state)
6670 {
6671 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6672 }
6673
6674 void
6675 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6676 {
6677 regcache_xfree (inf_state->registers);
6678 xfree (inf_state->siginfo_data);
6679 xfree (inf_state);
6680 }
6681
6682 struct regcache *
6683 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6684 {
6685 return inf_state->registers;
6686 }
6687
6688 /* infcall_control_state contains state regarding gdb's control of the
6689 inferior itself like stepping control. It also contains session state like
6690 the user's currently selected frame. */
6691
6692 struct infcall_control_state
6693 {
6694 struct thread_control_state thread_control;
6695 struct inferior_control_state inferior_control;
6696
6697 /* Other fields: */
6698 enum stop_stack_kind stop_stack_dummy;
6699 int stopped_by_random_signal;
6700 int stop_after_trap;
6701
6702 /* ID if the selected frame when the inferior function call was made. */
6703 struct frame_id selected_frame_id;
6704 };
6705
6706 /* Save all of the information associated with the inferior<==>gdb
6707 connection. */
6708
6709 struct infcall_control_state *
6710 save_infcall_control_state (void)
6711 {
6712 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6713 struct thread_info *tp = inferior_thread ();
6714 struct inferior *inf = current_inferior ();
6715
6716 inf_status->thread_control = tp->control;
6717 inf_status->inferior_control = inf->control;
6718
6719 tp->control.step_resume_breakpoint = NULL;
6720 tp->control.exception_resume_breakpoint = NULL;
6721
6722 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6723 chain. If caller's caller is walking the chain, they'll be happier if we
6724 hand them back the original chain when restore_infcall_control_state is
6725 called. */
6726 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6727
6728 /* Other fields: */
6729 inf_status->stop_stack_dummy = stop_stack_dummy;
6730 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6731 inf_status->stop_after_trap = stop_after_trap;
6732
6733 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6734
6735 return inf_status;
6736 }
6737
6738 static int
6739 restore_selected_frame (void *args)
6740 {
6741 struct frame_id *fid = (struct frame_id *) args;
6742 struct frame_info *frame;
6743
6744 frame = frame_find_by_id (*fid);
6745
6746 /* If inf_status->selected_frame_id is NULL, there was no previously
6747 selected frame. */
6748 if (frame == NULL)
6749 {
6750 warning (_("Unable to restore previously selected frame."));
6751 return 0;
6752 }
6753
6754 select_frame (frame);
6755
6756 return (1);
6757 }
6758
6759 /* Restore inferior session state to INF_STATUS. */
6760
6761 void
6762 restore_infcall_control_state (struct infcall_control_state *inf_status)
6763 {
6764 struct thread_info *tp = inferior_thread ();
6765 struct inferior *inf = current_inferior ();
6766
6767 if (tp->control.step_resume_breakpoint)
6768 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6769
6770 if (tp->control.exception_resume_breakpoint)
6771 tp->control.exception_resume_breakpoint->disposition
6772 = disp_del_at_next_stop;
6773
6774 /* Handle the bpstat_copy of the chain. */
6775 bpstat_clear (&tp->control.stop_bpstat);
6776
6777 tp->control = inf_status->thread_control;
6778 inf->control = inf_status->inferior_control;
6779
6780 /* Other fields: */
6781 stop_stack_dummy = inf_status->stop_stack_dummy;
6782 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6783 stop_after_trap = inf_status->stop_after_trap;
6784
6785 if (target_has_stack)
6786 {
6787 /* The point of catch_errors is that if the stack is clobbered,
6788 walking the stack might encounter a garbage pointer and
6789 error() trying to dereference it. */
6790 if (catch_errors
6791 (restore_selected_frame, &inf_status->selected_frame_id,
6792 "Unable to restore previously selected frame:\n",
6793 RETURN_MASK_ERROR) == 0)
6794 /* Error in restoring the selected frame. Select the innermost
6795 frame. */
6796 select_frame (get_current_frame ());
6797 }
6798
6799 xfree (inf_status);
6800 }
6801
6802 static void
6803 do_restore_infcall_control_state_cleanup (void *sts)
6804 {
6805 restore_infcall_control_state (sts);
6806 }
6807
6808 struct cleanup *
6809 make_cleanup_restore_infcall_control_state
6810 (struct infcall_control_state *inf_status)
6811 {
6812 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6813 }
6814
6815 void
6816 discard_infcall_control_state (struct infcall_control_state *inf_status)
6817 {
6818 if (inf_status->thread_control.step_resume_breakpoint)
6819 inf_status->thread_control.step_resume_breakpoint->disposition
6820 = disp_del_at_next_stop;
6821
6822 if (inf_status->thread_control.exception_resume_breakpoint)
6823 inf_status->thread_control.exception_resume_breakpoint->disposition
6824 = disp_del_at_next_stop;
6825
6826 /* See save_infcall_control_state for info on stop_bpstat. */
6827 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6828
6829 xfree (inf_status);
6830 }
6831 \f
6832 int
6833 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6834 {
6835 struct target_waitstatus last;
6836 ptid_t last_ptid;
6837
6838 get_last_target_status (&last_ptid, &last);
6839
6840 if (last.kind != TARGET_WAITKIND_FORKED)
6841 return 0;
6842
6843 if (!ptid_equal (last_ptid, pid))
6844 return 0;
6845
6846 *child_pid = last.value.related_pid;
6847 return 1;
6848 }
6849
6850 int
6851 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6852 {
6853 struct target_waitstatus last;
6854 ptid_t last_ptid;
6855
6856 get_last_target_status (&last_ptid, &last);
6857
6858 if (last.kind != TARGET_WAITKIND_VFORKED)
6859 return 0;
6860
6861 if (!ptid_equal (last_ptid, pid))
6862 return 0;
6863
6864 *child_pid = last.value.related_pid;
6865 return 1;
6866 }
6867
6868 int
6869 inferior_has_execd (ptid_t pid, char **execd_pathname)
6870 {
6871 struct target_waitstatus last;
6872 ptid_t last_ptid;
6873
6874 get_last_target_status (&last_ptid, &last);
6875
6876 if (last.kind != TARGET_WAITKIND_EXECD)
6877 return 0;
6878
6879 if (!ptid_equal (last_ptid, pid))
6880 return 0;
6881
6882 *execd_pathname = xstrdup (last.value.execd_pathname);
6883 return 1;
6884 }
6885
6886 int
6887 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6888 {
6889 struct target_waitstatus last;
6890 ptid_t last_ptid;
6891
6892 get_last_target_status (&last_ptid, &last);
6893
6894 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6895 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6896 return 0;
6897
6898 if (!ptid_equal (last_ptid, pid))
6899 return 0;
6900
6901 *syscall_number = last.value.syscall_number;
6902 return 1;
6903 }
6904
6905 int
6906 ptid_match (ptid_t ptid, ptid_t filter)
6907 {
6908 if (ptid_equal (filter, minus_one_ptid))
6909 return 1;
6910 if (ptid_is_pid (filter)
6911 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6912 return 1;
6913 else if (ptid_equal (ptid, filter))
6914 return 1;
6915
6916 return 0;
6917 }
6918
6919 /* restore_inferior_ptid() will be used by the cleanup machinery
6920 to restore the inferior_ptid value saved in a call to
6921 save_inferior_ptid(). */
6922
6923 static void
6924 restore_inferior_ptid (void *arg)
6925 {
6926 ptid_t *saved_ptid_ptr = arg;
6927
6928 inferior_ptid = *saved_ptid_ptr;
6929 xfree (arg);
6930 }
6931
6932 /* Save the value of inferior_ptid so that it may be restored by a
6933 later call to do_cleanups(). Returns the struct cleanup pointer
6934 needed for later doing the cleanup. */
6935
6936 struct cleanup *
6937 save_inferior_ptid (void)
6938 {
6939 ptid_t *saved_ptid_ptr;
6940
6941 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6942 *saved_ptid_ptr = inferior_ptid;
6943 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6944 }
6945 \f
6946
6947 /* User interface for reverse debugging:
6948 Set exec-direction / show exec-direction commands
6949 (returns error unless target implements to_set_exec_direction method). */
6950
6951 int execution_direction = EXEC_FORWARD;
6952 static const char exec_forward[] = "forward";
6953 static const char exec_reverse[] = "reverse";
6954 static const char *exec_direction = exec_forward;
6955 static const char *exec_direction_names[] = {
6956 exec_forward,
6957 exec_reverse,
6958 NULL
6959 };
6960
6961 static void
6962 set_exec_direction_func (char *args, int from_tty,
6963 struct cmd_list_element *cmd)
6964 {
6965 if (target_can_execute_reverse)
6966 {
6967 if (!strcmp (exec_direction, exec_forward))
6968 execution_direction = EXEC_FORWARD;
6969 else if (!strcmp (exec_direction, exec_reverse))
6970 execution_direction = EXEC_REVERSE;
6971 }
6972 else
6973 {
6974 exec_direction = exec_forward;
6975 error (_("Target does not support this operation."));
6976 }
6977 }
6978
6979 static void
6980 show_exec_direction_func (struct ui_file *out, int from_tty,
6981 struct cmd_list_element *cmd, const char *value)
6982 {
6983 switch (execution_direction) {
6984 case EXEC_FORWARD:
6985 fprintf_filtered (out, _("Forward.\n"));
6986 break;
6987 case EXEC_REVERSE:
6988 fprintf_filtered (out, _("Reverse.\n"));
6989 break;
6990 default:
6991 internal_error (__FILE__, __LINE__,
6992 _("bogus execution_direction value: %d"),
6993 (int) execution_direction);
6994 }
6995 }
6996
6997 /* User interface for non-stop mode. */
6998
6999 int non_stop = 0;
7000
7001 static void
7002 set_non_stop (char *args, int from_tty,
7003 struct cmd_list_element *c)
7004 {
7005 if (target_has_execution)
7006 {
7007 non_stop_1 = non_stop;
7008 error (_("Cannot change this setting while the inferior is running."));
7009 }
7010
7011 non_stop = non_stop_1;
7012 }
7013
7014 static void
7015 show_non_stop (struct ui_file *file, int from_tty,
7016 struct cmd_list_element *c, const char *value)
7017 {
7018 fprintf_filtered (file,
7019 _("Controlling the inferior in non-stop mode is %s.\n"),
7020 value);
7021 }
7022
7023 static void
7024 show_schedule_multiple (struct ui_file *file, int from_tty,
7025 struct cmd_list_element *c, const char *value)
7026 {
7027 fprintf_filtered (file, _("Resuming the execution of threads "
7028 "of all processes is %s.\n"), value);
7029 }
7030
7031 void
7032 _initialize_infrun (void)
7033 {
7034 int i;
7035 int numsigs;
7036
7037 add_info ("signals", signals_info, _("\
7038 What debugger does when program gets various signals.\n\
7039 Specify a signal as argument to print info on that signal only."));
7040 add_info_alias ("handle", "signals", 0);
7041
7042 add_com ("handle", class_run, handle_command, _("\
7043 Specify how to handle a signal.\n\
7044 Args are signals and actions to apply to those signals.\n\
7045 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7046 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7047 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7048 The special arg \"all\" is recognized to mean all signals except those\n\
7049 used by the debugger, typically SIGTRAP and SIGINT.\n\
7050 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7051 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7052 Stop means reenter debugger if this signal happens (implies print).\n\
7053 Print means print a message if this signal happens.\n\
7054 Pass means let program see this signal; otherwise program doesn't know.\n\
7055 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7056 Pass and Stop may be combined."));
7057 if (xdb_commands)
7058 {
7059 add_com ("lz", class_info, signals_info, _("\
7060 What debugger does when program gets various signals.\n\
7061 Specify a signal as argument to print info on that signal only."));
7062 add_com ("z", class_run, xdb_handle_command, _("\
7063 Specify how to handle a signal.\n\
7064 Args are signals and actions to apply to those signals.\n\
7065 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7066 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7067 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7068 The special arg \"all\" is recognized to mean all signals except those\n\
7069 used by the debugger, typically SIGTRAP and SIGINT.\n\
7070 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7071 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7072 nopass), \"Q\" (noprint)\n\
7073 Stop means reenter debugger if this signal happens (implies print).\n\
7074 Print means print a message if this signal happens.\n\
7075 Pass means let program see this signal; otherwise program doesn't know.\n\
7076 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7077 Pass and Stop may be combined."));
7078 }
7079
7080 if (!dbx_commands)
7081 stop_command = add_cmd ("stop", class_obscure,
7082 not_just_help_class_command, _("\
7083 There is no `stop' command, but you can set a hook on `stop'.\n\
7084 This allows you to set a list of commands to be run each time execution\n\
7085 of the program stops."), &cmdlist);
7086
7087 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7088 Set inferior debugging."), _("\
7089 Show inferior debugging."), _("\
7090 When non-zero, inferior specific debugging is enabled."),
7091 NULL,
7092 show_debug_infrun,
7093 &setdebuglist, &showdebuglist);
7094
7095 add_setshow_boolean_cmd ("displaced", class_maintenance,
7096 &debug_displaced, _("\
7097 Set displaced stepping debugging."), _("\
7098 Show displaced stepping debugging."), _("\
7099 When non-zero, displaced stepping specific debugging is enabled."),
7100 NULL,
7101 show_debug_displaced,
7102 &setdebuglist, &showdebuglist);
7103
7104 add_setshow_boolean_cmd ("non-stop", no_class,
7105 &non_stop_1, _("\
7106 Set whether gdb controls the inferior in non-stop mode."), _("\
7107 Show whether gdb controls the inferior in non-stop mode."), _("\
7108 When debugging a multi-threaded program and this setting is\n\
7109 off (the default, also called all-stop mode), when one thread stops\n\
7110 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7111 all other threads in the program while you interact with the thread of\n\
7112 interest. When you continue or step a thread, you can allow the other\n\
7113 threads to run, or have them remain stopped, but while you inspect any\n\
7114 thread's state, all threads stop.\n\
7115 \n\
7116 In non-stop mode, when one thread stops, other threads can continue\n\
7117 to run freely. You'll be able to step each thread independently,\n\
7118 leave it stopped or free to run as needed."),
7119 set_non_stop,
7120 show_non_stop,
7121 &setlist,
7122 &showlist);
7123
7124 numsigs = (int) TARGET_SIGNAL_LAST;
7125 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7126 signal_print = (unsigned char *)
7127 xmalloc (sizeof (signal_print[0]) * numsigs);
7128 signal_program = (unsigned char *)
7129 xmalloc (sizeof (signal_program[0]) * numsigs);
7130 signal_pass = (unsigned char *)
7131 xmalloc (sizeof (signal_program[0]) * numsigs);
7132 for (i = 0; i < numsigs; i++)
7133 {
7134 signal_stop[i] = 1;
7135 signal_print[i] = 1;
7136 signal_program[i] = 1;
7137 }
7138
7139 /* Signals caused by debugger's own actions
7140 should not be given to the program afterwards. */
7141 signal_program[TARGET_SIGNAL_TRAP] = 0;
7142 signal_program[TARGET_SIGNAL_INT] = 0;
7143
7144 /* Signals that are not errors should not normally enter the debugger. */
7145 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7146 signal_print[TARGET_SIGNAL_ALRM] = 0;
7147 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7148 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7149 signal_stop[TARGET_SIGNAL_PROF] = 0;
7150 signal_print[TARGET_SIGNAL_PROF] = 0;
7151 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7152 signal_print[TARGET_SIGNAL_CHLD] = 0;
7153 signal_stop[TARGET_SIGNAL_IO] = 0;
7154 signal_print[TARGET_SIGNAL_IO] = 0;
7155 signal_stop[TARGET_SIGNAL_POLL] = 0;
7156 signal_print[TARGET_SIGNAL_POLL] = 0;
7157 signal_stop[TARGET_SIGNAL_URG] = 0;
7158 signal_print[TARGET_SIGNAL_URG] = 0;
7159 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7160 signal_print[TARGET_SIGNAL_WINCH] = 0;
7161 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7162 signal_print[TARGET_SIGNAL_PRIO] = 0;
7163
7164 /* These signals are used internally by user-level thread
7165 implementations. (See signal(5) on Solaris.) Like the above
7166 signals, a healthy program receives and handles them as part of
7167 its normal operation. */
7168 signal_stop[TARGET_SIGNAL_LWP] = 0;
7169 signal_print[TARGET_SIGNAL_LWP] = 0;
7170 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7171 signal_print[TARGET_SIGNAL_WAITING] = 0;
7172 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7173 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7174
7175 /* Update cached state. */
7176 signal_cache_update (-1);
7177
7178 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7179 &stop_on_solib_events, _("\
7180 Set stopping for shared library events."), _("\
7181 Show stopping for shared library events."), _("\
7182 If nonzero, gdb will give control to the user when the dynamic linker\n\
7183 notifies gdb of shared library events. The most common event of interest\n\
7184 to the user would be loading/unloading of a new library."),
7185 NULL,
7186 show_stop_on_solib_events,
7187 &setlist, &showlist);
7188
7189 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7190 follow_fork_mode_kind_names,
7191 &follow_fork_mode_string, _("\
7192 Set debugger response to a program call of fork or vfork."), _("\
7193 Show debugger response to a program call of fork or vfork."), _("\
7194 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7195 parent - the original process is debugged after a fork\n\
7196 child - the new process is debugged after a fork\n\
7197 The unfollowed process will continue to run.\n\
7198 By default, the debugger will follow the parent process."),
7199 NULL,
7200 show_follow_fork_mode_string,
7201 &setlist, &showlist);
7202
7203 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7204 follow_exec_mode_names,
7205 &follow_exec_mode_string, _("\
7206 Set debugger response to a program call of exec."), _("\
7207 Show debugger response to a program call of exec."), _("\
7208 An exec call replaces the program image of a process.\n\
7209 \n\
7210 follow-exec-mode can be:\n\
7211 \n\
7212 new - the debugger creates a new inferior and rebinds the process\n\
7213 to this new inferior. The program the process was running before\n\
7214 the exec call can be restarted afterwards by restarting the original\n\
7215 inferior.\n\
7216 \n\
7217 same - the debugger keeps the process bound to the same inferior.\n\
7218 The new executable image replaces the previous executable loaded in\n\
7219 the inferior. Restarting the inferior after the exec call restarts\n\
7220 the executable the process was running after the exec call.\n\
7221 \n\
7222 By default, the debugger will use the same inferior."),
7223 NULL,
7224 show_follow_exec_mode_string,
7225 &setlist, &showlist);
7226
7227 add_setshow_enum_cmd ("scheduler-locking", class_run,
7228 scheduler_enums, &scheduler_mode, _("\
7229 Set mode for locking scheduler during execution."), _("\
7230 Show mode for locking scheduler during execution."), _("\
7231 off == no locking (threads may preempt at any time)\n\
7232 on == full locking (no thread except the current thread may run)\n\
7233 step == scheduler locked during every single-step operation.\n\
7234 In this mode, no other thread may run during a step command.\n\
7235 Other threads may run while stepping over a function call ('next')."),
7236 set_schedlock_func, /* traps on target vector */
7237 show_scheduler_mode,
7238 &setlist, &showlist);
7239
7240 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7241 Set mode for resuming threads of all processes."), _("\
7242 Show mode for resuming threads of all processes."), _("\
7243 When on, execution commands (such as 'continue' or 'next') resume all\n\
7244 threads of all processes. When off (which is the default), execution\n\
7245 commands only resume the threads of the current process. The set of\n\
7246 threads that are resumed is further refined by the scheduler-locking\n\
7247 mode (see help set scheduler-locking)."),
7248 NULL,
7249 show_schedule_multiple,
7250 &setlist, &showlist);
7251
7252 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7253 Set mode of the step operation."), _("\
7254 Show mode of the step operation."), _("\
7255 When set, doing a step over a function without debug line information\n\
7256 will stop at the first instruction of that function. Otherwise, the\n\
7257 function is skipped and the step command stops at a different source line."),
7258 NULL,
7259 show_step_stop_if_no_debug,
7260 &setlist, &showlist);
7261
7262 add_setshow_enum_cmd ("displaced-stepping", class_run,
7263 can_use_displaced_stepping_enum,
7264 &can_use_displaced_stepping, _("\
7265 Set debugger's willingness to use displaced stepping."), _("\
7266 Show debugger's willingness to use displaced stepping."), _("\
7267 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7268 supported by the target architecture. If off, gdb will not use displaced\n\
7269 stepping to step over breakpoints, even if such is supported by the target\n\
7270 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7271 if the target architecture supports it and non-stop mode is active, but will not\n\
7272 use it in all-stop mode (see help set non-stop)."),
7273 NULL,
7274 show_can_use_displaced_stepping,
7275 &setlist, &showlist);
7276
7277 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7278 &exec_direction, _("Set direction of execution.\n\
7279 Options are 'forward' or 'reverse'."),
7280 _("Show direction of execution (forward/reverse)."),
7281 _("Tells gdb whether to execute forward or backward."),
7282 set_exec_direction_func, show_exec_direction_func,
7283 &setlist, &showlist);
7284
7285 /* Set/show detach-on-fork: user-settable mode. */
7286
7287 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7288 Set whether gdb will detach the child of a fork."), _("\
7289 Show whether gdb will detach the child of a fork."), _("\
7290 Tells gdb whether to detach the child of a fork."),
7291 NULL, NULL, &setlist, &showlist);
7292
7293 /* Set/show disable address space randomization mode. */
7294
7295 add_setshow_boolean_cmd ("disable-randomization", class_support,
7296 &disable_randomization, _("\
7297 Set disabling of debuggee's virtual address space randomization."), _("\
7298 Show disabling of debuggee's virtual address space randomization."), _("\
7299 When this mode is on (which is the default), randomization of the virtual\n\
7300 address space is disabled. Standalone programs run with the randomization\n\
7301 enabled by default on some platforms."),
7302 &set_disable_randomization,
7303 &show_disable_randomization,
7304 &setlist, &showlist);
7305
7306 /* ptid initializations */
7307 inferior_ptid = null_ptid;
7308 target_last_wait_ptid = minus_one_ptid;
7309
7310 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7311 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7312 observer_attach_thread_exit (infrun_thread_thread_exit);
7313 observer_attach_inferior_exit (infrun_inferior_exit);
7314
7315 /* Explicitly create without lookup, since that tries to create a
7316 value with a void typed value, and when we get here, gdbarch
7317 isn't initialized yet. At this point, we're quite sure there
7318 isn't another convenience variable of the same name. */
7319 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7320
7321 add_setshow_boolean_cmd ("observer", no_class,
7322 &observer_mode_1, _("\
7323 Set whether gdb controls the inferior in observer mode."), _("\
7324 Show whether gdb controls the inferior in observer mode."), _("\
7325 In observer mode, GDB can get data from the inferior, but not\n\
7326 affect its execution. Registers and memory may not be changed,\n\
7327 breakpoints may not be set, and the program cannot be interrupted\n\
7328 or signalled."),
7329 set_observer_mode,
7330 show_observer_mode,
7331 &setlist,
7332 &showlist);
7333 }