2011-09-02 Pedro Alves <pedro@codesourcery.com>
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "dictionary.h"
49 #include "block.h"
50 #include "gdb_assert.h"
51 #include "mi/mi-common.h"
52 #include "event-top.h"
53 #include "record.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149 /* If the program uses ELF-style shared libraries, then calls to
150 functions in shared libraries go through stubs, which live in a
151 table called the PLT (Procedure Linkage Table). The first time the
152 function is called, the stub sends control to the dynamic linker,
153 which looks up the function's real address, patches the stub so
154 that future calls will go directly to the function, and then passes
155 control to the function.
156
157 If we are stepping at the source level, we don't want to see any of
158 this --- we just want to skip over the stub and the dynamic linker.
159 The simple approach is to single-step until control leaves the
160 dynamic linker.
161
162 However, on some systems (e.g., Red Hat's 5.2 distribution) the
163 dynamic linker calls functions in the shared C library, so you
164 can't tell from the PC alone whether the dynamic linker is still
165 running. In this case, we use a step-resume breakpoint to get us
166 past the dynamic linker, as if we were using "next" to step over a
167 function call.
168
169 in_solib_dynsym_resolve_code() says whether we're in the dynamic
170 linker code or not. Normally, this means we single-step. However,
171 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
172 address where we can place a step-resume breakpoint to get past the
173 linker's symbol resolution function.
174
175 in_solib_dynsym_resolve_code() can generally be implemented in a
176 pretty portable way, by comparing the PC against the address ranges
177 of the dynamic linker's sections.
178
179 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
180 it depends on internal details of the dynamic linker. It's usually
181 not too hard to figure out where to put a breakpoint, but it
182 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
183 sanity checking. If it can't figure things out, returning zero and
184 getting the (possibly confusing) stepping behavior is better than
185 signalling an error, which will obscure the change in the
186 inferior's state. */
187
188 /* This function returns TRUE if pc is the address of an instruction
189 that lies within the dynamic linker (such as the event hook, or the
190 dld itself).
191
192 This function must be used only when a dynamic linker event has
193 been caught, and the inferior is being stepped out of the hook, or
194 undefined results are guaranteed. */
195
196 #ifndef SOLIB_IN_DYNAMIC_LINKER
197 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
198 #endif
199
200 /* "Observer mode" is somewhat like a more extreme version of
201 non-stop, in which all GDB operations that might affect the
202 target's execution have been disabled. */
203
204 static int non_stop_1 = 0;
205
206 int observer_mode = 0;
207 static int observer_mode_1 = 0;
208
209 static void
210 set_observer_mode (char *args, int from_tty,
211 struct cmd_list_element *c)
212 {
213 extern int pagination_enabled;
214
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 target_async_permitted = 1;
239 pagination_enabled = 0;
240 non_stop = non_stop_1 = 1;
241 }
242
243 if (from_tty)
244 printf_filtered (_("Observer mode is now %s.\n"),
245 (observer_mode ? "on" : "off"));
246 }
247
248 static void
249 show_observer_mode (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
253 }
254
255 /* This updates the value of observer mode based on changes in
256 permissions. Note that we are deliberately ignoring the values of
257 may-write-registers and may-write-memory, since the user may have
258 reason to enable these during a session, for instance to turn on a
259 debugging-related global. */
260
261 void
262 update_observer_mode (void)
263 {
264 int newval;
265
266 newval = (!may_insert_breakpoints
267 && !may_insert_tracepoints
268 && may_insert_fast_tracepoints
269 && !may_stop
270 && non_stop);
271
272 /* Let the user know if things change. */
273 if (newval != observer_mode)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (newval ? "on" : "off"));
276
277 observer_mode = observer_mode_1 = newval;
278 }
279
280 /* Tables of how to react to signals; the user sets them. */
281
282 static unsigned char *signal_stop;
283 static unsigned char *signal_print;
284 static unsigned char *signal_program;
285
286 /* Table of signals that the target may silently handle.
287 This is automatically determined from the flags above,
288 and simply cached here. */
289 static unsigned char *signal_pass;
290
291 #define SET_SIGS(nsigs,sigs,flags) \
292 do { \
293 int signum = (nsigs); \
294 while (signum-- > 0) \
295 if ((sigs)[signum]) \
296 (flags)[signum] = 1; \
297 } while (0)
298
299 #define UNSET_SIGS(nsigs,sigs,flags) \
300 do { \
301 int signum = (nsigs); \
302 while (signum-- > 0) \
303 if ((sigs)[signum]) \
304 (flags)[signum] = 0; \
305 } while (0)
306
307 /* Value to pass to target_resume() to cause all threads to resume. */
308
309 #define RESUME_ALL minus_one_ptid
310
311 /* Command list pointer for the "stop" placeholder. */
312
313 static struct cmd_list_element *stop_command;
314
315 /* Function inferior was in as of last step command. */
316
317 static struct symbol *step_start_function;
318
319 /* Nonzero if we want to give control to the user when we're notified
320 of shared library events by the dynamic linker. */
321 int stop_on_solib_events;
322 static void
323 show_stop_on_solib_events (struct ui_file *file, int from_tty,
324 struct cmd_list_element *c, const char *value)
325 {
326 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
327 value);
328 }
329
330 /* Nonzero means expecting a trace trap
331 and should stop the inferior and return silently when it happens. */
332
333 int stop_after_trap;
334
335 /* Save register contents here when executing a "finish" command or are
336 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
337 Thus this contains the return value from the called function (assuming
338 values are returned in a register). */
339
340 struct regcache *stop_registers;
341
342 /* Nonzero after stop if current stack frame should be printed. */
343
344 static int stop_print_frame;
345
346 /* This is a cached copy of the pid/waitstatus of the last event
347 returned by target_wait()/deprecated_target_wait_hook(). This
348 information is returned by get_last_target_status(). */
349 static ptid_t target_last_wait_ptid;
350 static struct target_waitstatus target_last_waitstatus;
351
352 static void context_switch (ptid_t ptid);
353
354 void init_thread_stepping_state (struct thread_info *tss);
355
356 void init_infwait_state (void);
357
358 static const char follow_fork_mode_child[] = "child";
359 static const char follow_fork_mode_parent[] = "parent";
360
361 static const char *follow_fork_mode_kind_names[] = {
362 follow_fork_mode_child,
363 follow_fork_mode_parent,
364 NULL
365 };
366
367 static const char *follow_fork_mode_string = follow_fork_mode_parent;
368 static void
369 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
370 struct cmd_list_element *c, const char *value)
371 {
372 fprintf_filtered (file,
373 _("Debugger response to a program "
374 "call of fork or vfork is \"%s\".\n"),
375 value);
376 }
377 \f
378
379 /* Tell the target to follow the fork we're stopped at. Returns true
380 if the inferior should be resumed; false, if the target for some
381 reason decided it's best not to resume. */
382
383 static int
384 follow_fork (void)
385 {
386 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
387 int should_resume = 1;
388 struct thread_info *tp;
389
390 /* Copy user stepping state to the new inferior thread. FIXME: the
391 followed fork child thread should have a copy of most of the
392 parent thread structure's run control related fields, not just these.
393 Initialized to avoid "may be used uninitialized" warnings from gcc. */
394 struct breakpoint *step_resume_breakpoint = NULL;
395 struct breakpoint *exception_resume_breakpoint = NULL;
396 CORE_ADDR step_range_start = 0;
397 CORE_ADDR step_range_end = 0;
398 struct frame_id step_frame_id = { 0 };
399
400 if (!non_stop)
401 {
402 ptid_t wait_ptid;
403 struct target_waitstatus wait_status;
404
405 /* Get the last target status returned by target_wait(). */
406 get_last_target_status (&wait_ptid, &wait_status);
407
408 /* If not stopped at a fork event, then there's nothing else to
409 do. */
410 if (wait_status.kind != TARGET_WAITKIND_FORKED
411 && wait_status.kind != TARGET_WAITKIND_VFORKED)
412 return 1;
413
414 /* Check if we switched over from WAIT_PTID, since the event was
415 reported. */
416 if (!ptid_equal (wait_ptid, minus_one_ptid)
417 && !ptid_equal (inferior_ptid, wait_ptid))
418 {
419 /* We did. Switch back to WAIT_PTID thread, to tell the
420 target to follow it (in either direction). We'll
421 afterwards refuse to resume, and inform the user what
422 happened. */
423 switch_to_thread (wait_ptid);
424 should_resume = 0;
425 }
426 }
427
428 tp = inferior_thread ();
429
430 /* If there were any forks/vforks that were caught and are now to be
431 followed, then do so now. */
432 switch (tp->pending_follow.kind)
433 {
434 case TARGET_WAITKIND_FORKED:
435 case TARGET_WAITKIND_VFORKED:
436 {
437 ptid_t parent, child;
438
439 /* If the user did a next/step, etc, over a fork call,
440 preserve the stepping state in the fork child. */
441 if (follow_child && should_resume)
442 {
443 step_resume_breakpoint = clone_momentary_breakpoint
444 (tp->control.step_resume_breakpoint);
445 step_range_start = tp->control.step_range_start;
446 step_range_end = tp->control.step_range_end;
447 step_frame_id = tp->control.step_frame_id;
448 exception_resume_breakpoint
449 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
450
451 /* For now, delete the parent's sr breakpoint, otherwise,
452 parent/child sr breakpoints are considered duplicates,
453 and the child version will not be installed. Remove
454 this when the breakpoints module becomes aware of
455 inferiors and address spaces. */
456 delete_step_resume_breakpoint (tp);
457 tp->control.step_range_start = 0;
458 tp->control.step_range_end = 0;
459 tp->control.step_frame_id = null_frame_id;
460 delete_exception_resume_breakpoint (tp);
461 }
462
463 parent = inferior_ptid;
464 child = tp->pending_follow.value.related_pid;
465
466 /* Tell the target to do whatever is necessary to follow
467 either parent or child. */
468 if (target_follow_fork (follow_child))
469 {
470 /* Target refused to follow, or there's some other reason
471 we shouldn't resume. */
472 should_resume = 0;
473 }
474 else
475 {
476 /* This pending follow fork event is now handled, one way
477 or another. The previous selected thread may be gone
478 from the lists by now, but if it is still around, need
479 to clear the pending follow request. */
480 tp = find_thread_ptid (parent);
481 if (tp)
482 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
483
484 /* This makes sure we don't try to apply the "Switched
485 over from WAIT_PID" logic above. */
486 nullify_last_target_wait_ptid ();
487
488 /* If we followed the child, switch to it... */
489 if (follow_child)
490 {
491 switch_to_thread (child);
492
493 /* ... and preserve the stepping state, in case the
494 user was stepping over the fork call. */
495 if (should_resume)
496 {
497 tp = inferior_thread ();
498 tp->control.step_resume_breakpoint
499 = step_resume_breakpoint;
500 tp->control.step_range_start = step_range_start;
501 tp->control.step_range_end = step_range_end;
502 tp->control.step_frame_id = step_frame_id;
503 tp->control.exception_resume_breakpoint
504 = exception_resume_breakpoint;
505 }
506 else
507 {
508 /* If we get here, it was because we're trying to
509 resume from a fork catchpoint, but, the user
510 has switched threads away from the thread that
511 forked. In that case, the resume command
512 issued is most likely not applicable to the
513 child, so just warn, and refuse to resume. */
514 warning (_("Not resuming: switched threads "
515 "before following fork child.\n"));
516 }
517
518 /* Reset breakpoints in the child as appropriate. */
519 follow_inferior_reset_breakpoints ();
520 }
521 else
522 switch_to_thread (parent);
523 }
524 }
525 break;
526 case TARGET_WAITKIND_SPURIOUS:
527 /* Nothing to follow. */
528 break;
529 default:
530 internal_error (__FILE__, __LINE__,
531 "Unexpected pending_follow.kind %d\n",
532 tp->pending_follow.kind);
533 break;
534 }
535
536 return should_resume;
537 }
538
539 void
540 follow_inferior_reset_breakpoints (void)
541 {
542 struct thread_info *tp = inferior_thread ();
543
544 /* Was there a step_resume breakpoint? (There was if the user
545 did a "next" at the fork() call.) If so, explicitly reset its
546 thread number.
547
548 step_resumes are a form of bp that are made to be per-thread.
549 Since we created the step_resume bp when the parent process
550 was being debugged, and now are switching to the child process,
551 from the breakpoint package's viewpoint, that's a switch of
552 "threads". We must update the bp's notion of which thread
553 it is for, or it'll be ignored when it triggers. */
554
555 if (tp->control.step_resume_breakpoint)
556 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
557
558 if (tp->control.exception_resume_breakpoint)
559 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
560
561 /* Reinsert all breakpoints in the child. The user may have set
562 breakpoints after catching the fork, in which case those
563 were never set in the child, but only in the parent. This makes
564 sure the inserted breakpoints match the breakpoint list. */
565
566 breakpoint_re_set ();
567 insert_breakpoints ();
568 }
569
570 /* The child has exited or execed: resume threads of the parent the
571 user wanted to be executing. */
572
573 static int
574 proceed_after_vfork_done (struct thread_info *thread,
575 void *arg)
576 {
577 int pid = * (int *) arg;
578
579 if (ptid_get_pid (thread->ptid) == pid
580 && is_running (thread->ptid)
581 && !is_executing (thread->ptid)
582 && !thread->stop_requested
583 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
584 {
585 if (debug_infrun)
586 fprintf_unfiltered (gdb_stdlog,
587 "infrun: resuming vfork parent thread %s\n",
588 target_pid_to_str (thread->ptid));
589
590 switch_to_thread (thread->ptid);
591 clear_proceed_status ();
592 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
593 }
594
595 return 0;
596 }
597
598 /* Called whenever we notice an exec or exit event, to handle
599 detaching or resuming a vfork parent. */
600
601 static void
602 handle_vfork_child_exec_or_exit (int exec)
603 {
604 struct inferior *inf = current_inferior ();
605
606 if (inf->vfork_parent)
607 {
608 int resume_parent = -1;
609
610 /* This exec or exit marks the end of the shared memory region
611 between the parent and the child. If the user wanted to
612 detach from the parent, now is the time. */
613
614 if (inf->vfork_parent->pending_detach)
615 {
616 struct thread_info *tp;
617 struct cleanup *old_chain;
618 struct program_space *pspace;
619 struct address_space *aspace;
620
621 /* follow-fork child, detach-on-fork on. */
622
623 old_chain = make_cleanup_restore_current_thread ();
624
625 /* We're letting loose of the parent. */
626 tp = any_live_thread_of_process (inf->vfork_parent->pid);
627 switch_to_thread (tp->ptid);
628
629 /* We're about to detach from the parent, which implicitly
630 removes breakpoints from its address space. There's a
631 catch here: we want to reuse the spaces for the child,
632 but, parent/child are still sharing the pspace at this
633 point, although the exec in reality makes the kernel give
634 the child a fresh set of new pages. The problem here is
635 that the breakpoints module being unaware of this, would
636 likely chose the child process to write to the parent
637 address space. Swapping the child temporarily away from
638 the spaces has the desired effect. Yes, this is "sort
639 of" a hack. */
640
641 pspace = inf->pspace;
642 aspace = inf->aspace;
643 inf->aspace = NULL;
644 inf->pspace = NULL;
645
646 if (debug_infrun || info_verbose)
647 {
648 target_terminal_ours ();
649
650 if (exec)
651 fprintf_filtered (gdb_stdlog,
652 "Detaching vfork parent process "
653 "%d after child exec.\n",
654 inf->vfork_parent->pid);
655 else
656 fprintf_filtered (gdb_stdlog,
657 "Detaching vfork parent process "
658 "%d after child exit.\n",
659 inf->vfork_parent->pid);
660 }
661
662 target_detach (NULL, 0);
663
664 /* Put it back. */
665 inf->pspace = pspace;
666 inf->aspace = aspace;
667
668 do_cleanups (old_chain);
669 }
670 else if (exec)
671 {
672 /* We're staying attached to the parent, so, really give the
673 child a new address space. */
674 inf->pspace = add_program_space (maybe_new_address_space ());
675 inf->aspace = inf->pspace->aspace;
676 inf->removable = 1;
677 set_current_program_space (inf->pspace);
678
679 resume_parent = inf->vfork_parent->pid;
680
681 /* Break the bonds. */
682 inf->vfork_parent->vfork_child = NULL;
683 }
684 else
685 {
686 struct cleanup *old_chain;
687 struct program_space *pspace;
688
689 /* If this is a vfork child exiting, then the pspace and
690 aspaces were shared with the parent. Since we're
691 reporting the process exit, we'll be mourning all that is
692 found in the address space, and switching to null_ptid,
693 preparing to start a new inferior. But, since we don't
694 want to clobber the parent's address/program spaces, we
695 go ahead and create a new one for this exiting
696 inferior. */
697
698 /* Switch to null_ptid, so that clone_program_space doesn't want
699 to read the selected frame of a dead process. */
700 old_chain = save_inferior_ptid ();
701 inferior_ptid = null_ptid;
702
703 /* This inferior is dead, so avoid giving the breakpoints
704 module the option to write through to it (cloning a
705 program space resets breakpoints). */
706 inf->aspace = NULL;
707 inf->pspace = NULL;
708 pspace = add_program_space (maybe_new_address_space ());
709 set_current_program_space (pspace);
710 inf->removable = 1;
711 clone_program_space (pspace, inf->vfork_parent->pspace);
712 inf->pspace = pspace;
713 inf->aspace = pspace->aspace;
714
715 /* Put back inferior_ptid. We'll continue mourning this
716 inferior. */
717 do_cleanups (old_chain);
718
719 resume_parent = inf->vfork_parent->pid;
720 /* Break the bonds. */
721 inf->vfork_parent->vfork_child = NULL;
722 }
723
724 inf->vfork_parent = NULL;
725
726 gdb_assert (current_program_space == inf->pspace);
727
728 if (non_stop && resume_parent != -1)
729 {
730 /* If the user wanted the parent to be running, let it go
731 free now. */
732 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
733
734 if (debug_infrun)
735 fprintf_unfiltered (gdb_stdlog,
736 "infrun: resuming vfork parent process %d\n",
737 resume_parent);
738
739 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
740
741 do_cleanups (old_chain);
742 }
743 }
744 }
745
746 /* Enum strings for "set|show displaced-stepping". */
747
748 static const char follow_exec_mode_new[] = "new";
749 static const char follow_exec_mode_same[] = "same";
750 static const char *follow_exec_mode_names[] =
751 {
752 follow_exec_mode_new,
753 follow_exec_mode_same,
754 NULL,
755 };
756
757 static const char *follow_exec_mode_string = follow_exec_mode_same;
758 static void
759 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
760 struct cmd_list_element *c, const char *value)
761 {
762 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
763 }
764
765 /* EXECD_PATHNAME is assumed to be non-NULL. */
766
767 static void
768 follow_exec (ptid_t pid, char *execd_pathname)
769 {
770 struct thread_info *th = inferior_thread ();
771 struct inferior *inf = current_inferior ();
772
773 /* This is an exec event that we actually wish to pay attention to.
774 Refresh our symbol table to the newly exec'd program, remove any
775 momentary bp's, etc.
776
777 If there are breakpoints, they aren't really inserted now,
778 since the exec() transformed our inferior into a fresh set
779 of instructions.
780
781 We want to preserve symbolic breakpoints on the list, since
782 we have hopes that they can be reset after the new a.out's
783 symbol table is read.
784
785 However, any "raw" breakpoints must be removed from the list
786 (e.g., the solib bp's), since their address is probably invalid
787 now.
788
789 And, we DON'T want to call delete_breakpoints() here, since
790 that may write the bp's "shadow contents" (the instruction
791 value that was overwritten witha TRAP instruction). Since
792 we now have a new a.out, those shadow contents aren't valid. */
793
794 mark_breakpoints_out ();
795
796 update_breakpoints_after_exec ();
797
798 /* If there was one, it's gone now. We cannot truly step-to-next
799 statement through an exec(). */
800 th->control.step_resume_breakpoint = NULL;
801 th->control.exception_resume_breakpoint = NULL;
802 th->control.step_range_start = 0;
803 th->control.step_range_end = 0;
804
805 /* The target reports the exec event to the main thread, even if
806 some other thread does the exec, and even if the main thread was
807 already stopped --- if debugging in non-stop mode, it's possible
808 the user had the main thread held stopped in the previous image
809 --- release it now. This is the same behavior as step-over-exec
810 with scheduler-locking on in all-stop mode. */
811 th->stop_requested = 0;
812
813 /* What is this a.out's name? */
814 printf_unfiltered (_("%s is executing new program: %s\n"),
815 target_pid_to_str (inferior_ptid),
816 execd_pathname);
817
818 /* We've followed the inferior through an exec. Therefore, the
819 inferior has essentially been killed & reborn. */
820
821 gdb_flush (gdb_stdout);
822
823 breakpoint_init_inferior (inf_execd);
824
825 if (gdb_sysroot && *gdb_sysroot)
826 {
827 char *name = alloca (strlen (gdb_sysroot)
828 + strlen (execd_pathname)
829 + 1);
830
831 strcpy (name, gdb_sysroot);
832 strcat (name, execd_pathname);
833 execd_pathname = name;
834 }
835
836 /* Reset the shared library package. This ensures that we get a
837 shlib event when the child reaches "_start", at which point the
838 dld will have had a chance to initialize the child. */
839 /* Also, loading a symbol file below may trigger symbol lookups, and
840 we don't want those to be satisfied by the libraries of the
841 previous incarnation of this process. */
842 no_shared_libraries (NULL, 0);
843
844 if (follow_exec_mode_string == follow_exec_mode_new)
845 {
846 struct program_space *pspace;
847
848 /* The user wants to keep the old inferior and program spaces
849 around. Create a new fresh one, and switch to it. */
850
851 inf = add_inferior (current_inferior ()->pid);
852 pspace = add_program_space (maybe_new_address_space ());
853 inf->pspace = pspace;
854 inf->aspace = pspace->aspace;
855
856 exit_inferior_num_silent (current_inferior ()->num);
857
858 set_current_inferior (inf);
859 set_current_program_space (pspace);
860 }
861
862 gdb_assert (current_program_space == inf->pspace);
863
864 /* That a.out is now the one to use. */
865 exec_file_attach (execd_pathname, 0);
866
867 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
868 (Position Independent Executable) main symbol file will get applied by
869 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
870 the breakpoints with the zero displacement. */
871
872 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
873 NULL, 0);
874
875 set_initial_language ();
876
877 #ifdef SOLIB_CREATE_INFERIOR_HOOK
878 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
879 #else
880 solib_create_inferior_hook (0);
881 #endif
882
883 jit_inferior_created_hook ();
884
885 breakpoint_re_set ();
886
887 /* Reinsert all breakpoints. (Those which were symbolic have
888 been reset to the proper address in the new a.out, thanks
889 to symbol_file_command...). */
890 insert_breakpoints ();
891
892 /* The next resume of this inferior should bring it to the shlib
893 startup breakpoints. (If the user had also set bp's on
894 "main" from the old (parent) process, then they'll auto-
895 matically get reset there in the new process.). */
896 }
897
898 /* Non-zero if we just simulating a single-step. This is needed
899 because we cannot remove the breakpoints in the inferior process
900 until after the `wait' in `wait_for_inferior'. */
901 static int singlestep_breakpoints_inserted_p = 0;
902
903 /* The thread we inserted single-step breakpoints for. */
904 static ptid_t singlestep_ptid;
905
906 /* PC when we started this single-step. */
907 static CORE_ADDR singlestep_pc;
908
909 /* If another thread hit the singlestep breakpoint, we save the original
910 thread here so that we can resume single-stepping it later. */
911 static ptid_t saved_singlestep_ptid;
912 static int stepping_past_singlestep_breakpoint;
913
914 /* If not equal to null_ptid, this means that after stepping over breakpoint
915 is finished, we need to switch to deferred_step_ptid, and step it.
916
917 The use case is when one thread has hit a breakpoint, and then the user
918 has switched to another thread and issued 'step'. We need to step over
919 breakpoint in the thread which hit the breakpoint, but then continue
920 stepping the thread user has selected. */
921 static ptid_t deferred_step_ptid;
922 \f
923 /* Displaced stepping. */
924
925 /* In non-stop debugging mode, we must take special care to manage
926 breakpoints properly; in particular, the traditional strategy for
927 stepping a thread past a breakpoint it has hit is unsuitable.
928 'Displaced stepping' is a tactic for stepping one thread past a
929 breakpoint it has hit while ensuring that other threads running
930 concurrently will hit the breakpoint as they should.
931
932 The traditional way to step a thread T off a breakpoint in a
933 multi-threaded program in all-stop mode is as follows:
934
935 a0) Initially, all threads are stopped, and breakpoints are not
936 inserted.
937 a1) We single-step T, leaving breakpoints uninserted.
938 a2) We insert breakpoints, and resume all threads.
939
940 In non-stop debugging, however, this strategy is unsuitable: we
941 don't want to have to stop all threads in the system in order to
942 continue or step T past a breakpoint. Instead, we use displaced
943 stepping:
944
945 n0) Initially, T is stopped, other threads are running, and
946 breakpoints are inserted.
947 n1) We copy the instruction "under" the breakpoint to a separate
948 location, outside the main code stream, making any adjustments
949 to the instruction, register, and memory state as directed by
950 T's architecture.
951 n2) We single-step T over the instruction at its new location.
952 n3) We adjust the resulting register and memory state as directed
953 by T's architecture. This includes resetting T's PC to point
954 back into the main instruction stream.
955 n4) We resume T.
956
957 This approach depends on the following gdbarch methods:
958
959 - gdbarch_max_insn_length and gdbarch_displaced_step_location
960 indicate where to copy the instruction, and how much space must
961 be reserved there. We use these in step n1.
962
963 - gdbarch_displaced_step_copy_insn copies a instruction to a new
964 address, and makes any necessary adjustments to the instruction,
965 register contents, and memory. We use this in step n1.
966
967 - gdbarch_displaced_step_fixup adjusts registers and memory after
968 we have successfuly single-stepped the instruction, to yield the
969 same effect the instruction would have had if we had executed it
970 at its original address. We use this in step n3.
971
972 - gdbarch_displaced_step_free_closure provides cleanup.
973
974 The gdbarch_displaced_step_copy_insn and
975 gdbarch_displaced_step_fixup functions must be written so that
976 copying an instruction with gdbarch_displaced_step_copy_insn,
977 single-stepping across the copied instruction, and then applying
978 gdbarch_displaced_insn_fixup should have the same effects on the
979 thread's memory and registers as stepping the instruction in place
980 would have. Exactly which responsibilities fall to the copy and
981 which fall to the fixup is up to the author of those functions.
982
983 See the comments in gdbarch.sh for details.
984
985 Note that displaced stepping and software single-step cannot
986 currently be used in combination, although with some care I think
987 they could be made to. Software single-step works by placing
988 breakpoints on all possible subsequent instructions; if the
989 displaced instruction is a PC-relative jump, those breakpoints
990 could fall in very strange places --- on pages that aren't
991 executable, or at addresses that are not proper instruction
992 boundaries. (We do generally let other threads run while we wait
993 to hit the software single-step breakpoint, and they might
994 encounter such a corrupted instruction.) One way to work around
995 this would be to have gdbarch_displaced_step_copy_insn fully
996 simulate the effect of PC-relative instructions (and return NULL)
997 on architectures that use software single-stepping.
998
999 In non-stop mode, we can have independent and simultaneous step
1000 requests, so more than one thread may need to simultaneously step
1001 over a breakpoint. The current implementation assumes there is
1002 only one scratch space per process. In this case, we have to
1003 serialize access to the scratch space. If thread A wants to step
1004 over a breakpoint, but we are currently waiting for some other
1005 thread to complete a displaced step, we leave thread A stopped and
1006 place it in the displaced_step_request_queue. Whenever a displaced
1007 step finishes, we pick the next thread in the queue and start a new
1008 displaced step operation on it. See displaced_step_prepare and
1009 displaced_step_fixup for details. */
1010
1011 struct displaced_step_request
1012 {
1013 ptid_t ptid;
1014 struct displaced_step_request *next;
1015 };
1016
1017 /* Per-inferior displaced stepping state. */
1018 struct displaced_step_inferior_state
1019 {
1020 /* Pointer to next in linked list. */
1021 struct displaced_step_inferior_state *next;
1022
1023 /* The process this displaced step state refers to. */
1024 int pid;
1025
1026 /* A queue of pending displaced stepping requests. One entry per
1027 thread that needs to do a displaced step. */
1028 struct displaced_step_request *step_request_queue;
1029
1030 /* If this is not null_ptid, this is the thread carrying out a
1031 displaced single-step in process PID. This thread's state will
1032 require fixing up once it has completed its step. */
1033 ptid_t step_ptid;
1034
1035 /* The architecture the thread had when we stepped it. */
1036 struct gdbarch *step_gdbarch;
1037
1038 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1039 for post-step cleanup. */
1040 struct displaced_step_closure *step_closure;
1041
1042 /* The address of the original instruction, and the copy we
1043 made. */
1044 CORE_ADDR step_original, step_copy;
1045
1046 /* Saved contents of copy area. */
1047 gdb_byte *step_saved_copy;
1048 };
1049
1050 /* The list of states of processes involved in displaced stepping
1051 presently. */
1052 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1053
1054 /* Get the displaced stepping state of process PID. */
1055
1056 static struct displaced_step_inferior_state *
1057 get_displaced_stepping_state (int pid)
1058 {
1059 struct displaced_step_inferior_state *state;
1060
1061 for (state = displaced_step_inferior_states;
1062 state != NULL;
1063 state = state->next)
1064 if (state->pid == pid)
1065 return state;
1066
1067 return NULL;
1068 }
1069
1070 /* Add a new displaced stepping state for process PID to the displaced
1071 stepping state list, or return a pointer to an already existing
1072 entry, if it already exists. Never returns NULL. */
1073
1074 static struct displaced_step_inferior_state *
1075 add_displaced_stepping_state (int pid)
1076 {
1077 struct displaced_step_inferior_state *state;
1078
1079 for (state = displaced_step_inferior_states;
1080 state != NULL;
1081 state = state->next)
1082 if (state->pid == pid)
1083 return state;
1084
1085 state = xcalloc (1, sizeof (*state));
1086 state->pid = pid;
1087 state->next = displaced_step_inferior_states;
1088 displaced_step_inferior_states = state;
1089
1090 return state;
1091 }
1092
1093 /* If inferior is in displaced stepping, and ADDR equals to starting address
1094 of copy area, return corresponding displaced_step_closure. Otherwise,
1095 return NULL. */
1096
1097 struct displaced_step_closure*
1098 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1099 {
1100 struct displaced_step_inferior_state *displaced
1101 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1102
1103 /* If checking the mode of displaced instruction in copy area. */
1104 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1105 && (displaced->step_copy == addr))
1106 return displaced->step_closure;
1107
1108 return NULL;
1109 }
1110
1111 /* Remove the displaced stepping state of process PID. */
1112
1113 static void
1114 remove_displaced_stepping_state (int pid)
1115 {
1116 struct displaced_step_inferior_state *it, **prev_next_p;
1117
1118 gdb_assert (pid != 0);
1119
1120 it = displaced_step_inferior_states;
1121 prev_next_p = &displaced_step_inferior_states;
1122 while (it)
1123 {
1124 if (it->pid == pid)
1125 {
1126 *prev_next_p = it->next;
1127 xfree (it);
1128 return;
1129 }
1130
1131 prev_next_p = &it->next;
1132 it = *prev_next_p;
1133 }
1134 }
1135
1136 static void
1137 infrun_inferior_exit (struct inferior *inf)
1138 {
1139 remove_displaced_stepping_state (inf->pid);
1140 }
1141
1142 /* Enum strings for "set|show displaced-stepping". */
1143
1144 static const char can_use_displaced_stepping_auto[] = "auto";
1145 static const char can_use_displaced_stepping_on[] = "on";
1146 static const char can_use_displaced_stepping_off[] = "off";
1147 static const char *can_use_displaced_stepping_enum[] =
1148 {
1149 can_use_displaced_stepping_auto,
1150 can_use_displaced_stepping_on,
1151 can_use_displaced_stepping_off,
1152 NULL,
1153 };
1154
1155 /* If ON, and the architecture supports it, GDB will use displaced
1156 stepping to step over breakpoints. If OFF, or if the architecture
1157 doesn't support it, GDB will instead use the traditional
1158 hold-and-step approach. If AUTO (which is the default), GDB will
1159 decide which technique to use to step over breakpoints depending on
1160 which of all-stop or non-stop mode is active --- displaced stepping
1161 in non-stop mode; hold-and-step in all-stop mode. */
1162
1163 static const char *can_use_displaced_stepping =
1164 can_use_displaced_stepping_auto;
1165
1166 static void
1167 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1168 struct cmd_list_element *c,
1169 const char *value)
1170 {
1171 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1172 fprintf_filtered (file,
1173 _("Debugger's willingness to use displaced stepping "
1174 "to step over breakpoints is %s (currently %s).\n"),
1175 value, non_stop ? "on" : "off");
1176 else
1177 fprintf_filtered (file,
1178 _("Debugger's willingness to use displaced stepping "
1179 "to step over breakpoints is %s.\n"), value);
1180 }
1181
1182 /* Return non-zero if displaced stepping can/should be used to step
1183 over breakpoints. */
1184
1185 static int
1186 use_displaced_stepping (struct gdbarch *gdbarch)
1187 {
1188 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1189 && non_stop)
1190 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1191 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1192 && !RECORD_IS_USED);
1193 }
1194
1195 /* Clean out any stray displaced stepping state. */
1196 static void
1197 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1198 {
1199 /* Indicate that there is no cleanup pending. */
1200 displaced->step_ptid = null_ptid;
1201
1202 if (displaced->step_closure)
1203 {
1204 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1205 displaced->step_closure);
1206 displaced->step_closure = NULL;
1207 }
1208 }
1209
1210 static void
1211 displaced_step_clear_cleanup (void *arg)
1212 {
1213 struct displaced_step_inferior_state *state = arg;
1214
1215 displaced_step_clear (state);
1216 }
1217
1218 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1219 void
1220 displaced_step_dump_bytes (struct ui_file *file,
1221 const gdb_byte *buf,
1222 size_t len)
1223 {
1224 int i;
1225
1226 for (i = 0; i < len; i++)
1227 fprintf_unfiltered (file, "%02x ", buf[i]);
1228 fputs_unfiltered ("\n", file);
1229 }
1230
1231 /* Prepare to single-step, using displaced stepping.
1232
1233 Note that we cannot use displaced stepping when we have a signal to
1234 deliver. If we have a signal to deliver and an instruction to step
1235 over, then after the step, there will be no indication from the
1236 target whether the thread entered a signal handler or ignored the
1237 signal and stepped over the instruction successfully --- both cases
1238 result in a simple SIGTRAP. In the first case we mustn't do a
1239 fixup, and in the second case we must --- but we can't tell which.
1240 Comments in the code for 'random signals' in handle_inferior_event
1241 explain how we handle this case instead.
1242
1243 Returns 1 if preparing was successful -- this thread is going to be
1244 stepped now; or 0 if displaced stepping this thread got queued. */
1245 static int
1246 displaced_step_prepare (ptid_t ptid)
1247 {
1248 struct cleanup *old_cleanups, *ignore_cleanups;
1249 struct regcache *regcache = get_thread_regcache (ptid);
1250 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1251 CORE_ADDR original, copy;
1252 ULONGEST len;
1253 struct displaced_step_closure *closure;
1254 struct displaced_step_inferior_state *displaced;
1255
1256 /* We should never reach this function if the architecture does not
1257 support displaced stepping. */
1258 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1259
1260 /* We have to displaced step one thread at a time, as we only have
1261 access to a single scratch space per inferior. */
1262
1263 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1264
1265 if (!ptid_equal (displaced->step_ptid, null_ptid))
1266 {
1267 /* Already waiting for a displaced step to finish. Defer this
1268 request and place in queue. */
1269 struct displaced_step_request *req, *new_req;
1270
1271 if (debug_displaced)
1272 fprintf_unfiltered (gdb_stdlog,
1273 "displaced: defering step of %s\n",
1274 target_pid_to_str (ptid));
1275
1276 new_req = xmalloc (sizeof (*new_req));
1277 new_req->ptid = ptid;
1278 new_req->next = NULL;
1279
1280 if (displaced->step_request_queue)
1281 {
1282 for (req = displaced->step_request_queue;
1283 req && req->next;
1284 req = req->next)
1285 ;
1286 req->next = new_req;
1287 }
1288 else
1289 displaced->step_request_queue = new_req;
1290
1291 return 0;
1292 }
1293 else
1294 {
1295 if (debug_displaced)
1296 fprintf_unfiltered (gdb_stdlog,
1297 "displaced: stepping %s now\n",
1298 target_pid_to_str (ptid));
1299 }
1300
1301 displaced_step_clear (displaced);
1302
1303 old_cleanups = save_inferior_ptid ();
1304 inferior_ptid = ptid;
1305
1306 original = regcache_read_pc (regcache);
1307
1308 copy = gdbarch_displaced_step_location (gdbarch);
1309 len = gdbarch_max_insn_length (gdbarch);
1310
1311 /* Save the original contents of the copy area. */
1312 displaced->step_saved_copy = xmalloc (len);
1313 ignore_cleanups = make_cleanup (free_current_contents,
1314 &displaced->step_saved_copy);
1315 read_memory (copy, displaced->step_saved_copy, len);
1316 if (debug_displaced)
1317 {
1318 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1319 paddress (gdbarch, copy));
1320 displaced_step_dump_bytes (gdb_stdlog,
1321 displaced->step_saved_copy,
1322 len);
1323 };
1324
1325 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1326 original, copy, regcache);
1327
1328 /* We don't support the fully-simulated case at present. */
1329 gdb_assert (closure);
1330
1331 /* Save the information we need to fix things up if the step
1332 succeeds. */
1333 displaced->step_ptid = ptid;
1334 displaced->step_gdbarch = gdbarch;
1335 displaced->step_closure = closure;
1336 displaced->step_original = original;
1337 displaced->step_copy = copy;
1338
1339 make_cleanup (displaced_step_clear_cleanup, displaced);
1340
1341 /* Resume execution at the copy. */
1342 regcache_write_pc (regcache, copy);
1343
1344 discard_cleanups (ignore_cleanups);
1345
1346 do_cleanups (old_cleanups);
1347
1348 if (debug_displaced)
1349 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1350 paddress (gdbarch, copy));
1351
1352 return 1;
1353 }
1354
1355 static void
1356 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1357 const gdb_byte *myaddr, int len)
1358 {
1359 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1360
1361 inferior_ptid = ptid;
1362 write_memory (memaddr, myaddr, len);
1363 do_cleanups (ptid_cleanup);
1364 }
1365
1366 static void
1367 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1368 {
1369 struct cleanup *old_cleanups;
1370 struct displaced_step_inferior_state *displaced
1371 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1372
1373 /* Was any thread of this process doing a displaced step? */
1374 if (displaced == NULL)
1375 return;
1376
1377 /* Was this event for the pid we displaced? */
1378 if (ptid_equal (displaced->step_ptid, null_ptid)
1379 || ! ptid_equal (displaced->step_ptid, event_ptid))
1380 return;
1381
1382 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1383
1384 /* Restore the contents of the copy area. */
1385 {
1386 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1387
1388 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1389 displaced->step_saved_copy, len);
1390 if (debug_displaced)
1391 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1392 paddress (displaced->step_gdbarch,
1393 displaced->step_copy));
1394 }
1395
1396 /* Did the instruction complete successfully? */
1397 if (signal == TARGET_SIGNAL_TRAP)
1398 {
1399 /* Fix up the resulting state. */
1400 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1401 displaced->step_closure,
1402 displaced->step_original,
1403 displaced->step_copy,
1404 get_thread_regcache (displaced->step_ptid));
1405 }
1406 else
1407 {
1408 /* Since the instruction didn't complete, all we can do is
1409 relocate the PC. */
1410 struct regcache *regcache = get_thread_regcache (event_ptid);
1411 CORE_ADDR pc = regcache_read_pc (regcache);
1412
1413 pc = displaced->step_original + (pc - displaced->step_copy);
1414 regcache_write_pc (regcache, pc);
1415 }
1416
1417 do_cleanups (old_cleanups);
1418
1419 displaced->step_ptid = null_ptid;
1420
1421 /* Are there any pending displaced stepping requests? If so, run
1422 one now. Leave the state object around, since we're likely to
1423 need it again soon. */
1424 while (displaced->step_request_queue)
1425 {
1426 struct displaced_step_request *head;
1427 ptid_t ptid;
1428 struct regcache *regcache;
1429 struct gdbarch *gdbarch;
1430 CORE_ADDR actual_pc;
1431 struct address_space *aspace;
1432
1433 head = displaced->step_request_queue;
1434 ptid = head->ptid;
1435 displaced->step_request_queue = head->next;
1436 xfree (head);
1437
1438 context_switch (ptid);
1439
1440 regcache = get_thread_regcache (ptid);
1441 actual_pc = regcache_read_pc (regcache);
1442 aspace = get_regcache_aspace (regcache);
1443
1444 if (breakpoint_here_p (aspace, actual_pc))
1445 {
1446 if (debug_displaced)
1447 fprintf_unfiltered (gdb_stdlog,
1448 "displaced: stepping queued %s now\n",
1449 target_pid_to_str (ptid));
1450
1451 displaced_step_prepare (ptid);
1452
1453 gdbarch = get_regcache_arch (regcache);
1454
1455 if (debug_displaced)
1456 {
1457 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1458 gdb_byte buf[4];
1459
1460 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1461 paddress (gdbarch, actual_pc));
1462 read_memory (actual_pc, buf, sizeof (buf));
1463 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1464 }
1465
1466 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1467 displaced->step_closure))
1468 target_resume (ptid, 1, TARGET_SIGNAL_0);
1469 else
1470 target_resume (ptid, 0, TARGET_SIGNAL_0);
1471
1472 /* Done, we're stepping a thread. */
1473 break;
1474 }
1475 else
1476 {
1477 int step;
1478 struct thread_info *tp = inferior_thread ();
1479
1480 /* The breakpoint we were sitting under has since been
1481 removed. */
1482 tp->control.trap_expected = 0;
1483
1484 /* Go back to what we were trying to do. */
1485 step = currently_stepping (tp);
1486
1487 if (debug_displaced)
1488 fprintf_unfiltered (gdb_stdlog,
1489 "breakpoint is gone %s: step(%d)\n",
1490 target_pid_to_str (tp->ptid), step);
1491
1492 target_resume (ptid, step, TARGET_SIGNAL_0);
1493 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1494
1495 /* This request was discarded. See if there's any other
1496 thread waiting for its turn. */
1497 }
1498 }
1499 }
1500
1501 /* Update global variables holding ptids to hold NEW_PTID if they were
1502 holding OLD_PTID. */
1503 static void
1504 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1505 {
1506 struct displaced_step_request *it;
1507 struct displaced_step_inferior_state *displaced;
1508
1509 if (ptid_equal (inferior_ptid, old_ptid))
1510 inferior_ptid = new_ptid;
1511
1512 if (ptid_equal (singlestep_ptid, old_ptid))
1513 singlestep_ptid = new_ptid;
1514
1515 if (ptid_equal (deferred_step_ptid, old_ptid))
1516 deferred_step_ptid = new_ptid;
1517
1518 for (displaced = displaced_step_inferior_states;
1519 displaced;
1520 displaced = displaced->next)
1521 {
1522 if (ptid_equal (displaced->step_ptid, old_ptid))
1523 displaced->step_ptid = new_ptid;
1524
1525 for (it = displaced->step_request_queue; it; it = it->next)
1526 if (ptid_equal (it->ptid, old_ptid))
1527 it->ptid = new_ptid;
1528 }
1529 }
1530
1531 \f
1532 /* Resuming. */
1533
1534 /* Things to clean up if we QUIT out of resume (). */
1535 static void
1536 resume_cleanups (void *ignore)
1537 {
1538 normal_stop ();
1539 }
1540
1541 static const char schedlock_off[] = "off";
1542 static const char schedlock_on[] = "on";
1543 static const char schedlock_step[] = "step";
1544 static const char *scheduler_enums[] = {
1545 schedlock_off,
1546 schedlock_on,
1547 schedlock_step,
1548 NULL
1549 };
1550 static const char *scheduler_mode = schedlock_off;
1551 static void
1552 show_scheduler_mode (struct ui_file *file, int from_tty,
1553 struct cmd_list_element *c, const char *value)
1554 {
1555 fprintf_filtered (file,
1556 _("Mode for locking scheduler "
1557 "during execution is \"%s\".\n"),
1558 value);
1559 }
1560
1561 static void
1562 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1563 {
1564 if (!target_can_lock_scheduler)
1565 {
1566 scheduler_mode = schedlock_off;
1567 error (_("Target '%s' cannot support this command."), target_shortname);
1568 }
1569 }
1570
1571 /* True if execution commands resume all threads of all processes by
1572 default; otherwise, resume only threads of the current inferior
1573 process. */
1574 int sched_multi = 0;
1575
1576 /* Try to setup for software single stepping over the specified location.
1577 Return 1 if target_resume() should use hardware single step.
1578
1579 GDBARCH the current gdbarch.
1580 PC the location to step over. */
1581
1582 static int
1583 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1584 {
1585 int hw_step = 1;
1586
1587 if (execution_direction == EXEC_FORWARD
1588 && gdbarch_software_single_step_p (gdbarch)
1589 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1590 {
1591 hw_step = 0;
1592 /* Do not pull these breakpoints until after a `wait' in
1593 `wait_for_inferior'. */
1594 singlestep_breakpoints_inserted_p = 1;
1595 singlestep_ptid = inferior_ptid;
1596 singlestep_pc = pc;
1597 }
1598 return hw_step;
1599 }
1600
1601 /* Return a ptid representing the set of threads that we will proceed,
1602 in the perspective of the user/frontend. We may actually resume
1603 fewer threads at first, e.g., if a thread is stopped at a
1604 breakpoint that needs stepping-off, but that should not be visible
1605 to the user/frontend, and neither should the frontend/user be
1606 allowed to proceed any of the threads that happen to be stopped for
1607 internal run control handling, if a previous command wanted them
1608 resumed. */
1609
1610 ptid_t
1611 user_visible_resume_ptid (int step)
1612 {
1613 /* By default, resume all threads of all processes. */
1614 ptid_t resume_ptid = RESUME_ALL;
1615
1616 /* Maybe resume only all threads of the current process. */
1617 if (!sched_multi && target_supports_multi_process ())
1618 {
1619 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1620 }
1621
1622 /* Maybe resume a single thread after all. */
1623 if (non_stop)
1624 {
1625 /* With non-stop mode on, threads are always handled
1626 individually. */
1627 resume_ptid = inferior_ptid;
1628 }
1629 else if ((scheduler_mode == schedlock_on)
1630 || (scheduler_mode == schedlock_step
1631 && (step || singlestep_breakpoints_inserted_p)))
1632 {
1633 /* User-settable 'scheduler' mode requires solo thread resume. */
1634 resume_ptid = inferior_ptid;
1635 }
1636
1637 return resume_ptid;
1638 }
1639
1640 /* Resume the inferior, but allow a QUIT. This is useful if the user
1641 wants to interrupt some lengthy single-stepping operation
1642 (for child processes, the SIGINT goes to the inferior, and so
1643 we get a SIGINT random_signal, but for remote debugging and perhaps
1644 other targets, that's not true).
1645
1646 STEP nonzero if we should step (zero to continue instead).
1647 SIG is the signal to give the inferior (zero for none). */
1648 void
1649 resume (int step, enum target_signal sig)
1650 {
1651 int should_resume = 1;
1652 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1653 struct regcache *regcache = get_current_regcache ();
1654 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1655 struct thread_info *tp = inferior_thread ();
1656 CORE_ADDR pc = regcache_read_pc (regcache);
1657 struct address_space *aspace = get_regcache_aspace (regcache);
1658
1659 QUIT;
1660
1661 if (current_inferior ()->waiting_for_vfork_done)
1662 {
1663 /* Don't try to single-step a vfork parent that is waiting for
1664 the child to get out of the shared memory region (by exec'ing
1665 or exiting). This is particularly important on software
1666 single-step archs, as the child process would trip on the
1667 software single step breakpoint inserted for the parent
1668 process. Since the parent will not actually execute any
1669 instruction until the child is out of the shared region (such
1670 are vfork's semantics), it is safe to simply continue it.
1671 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1672 the parent, and tell it to `keep_going', which automatically
1673 re-sets it stepping. */
1674 if (debug_infrun)
1675 fprintf_unfiltered (gdb_stdlog,
1676 "infrun: resume : clear step\n");
1677 step = 0;
1678 }
1679
1680 if (debug_infrun)
1681 fprintf_unfiltered (gdb_stdlog,
1682 "infrun: resume (step=%d, signal=%d), "
1683 "trap_expected=%d, current thread [%s] at %s\n",
1684 step, sig, tp->control.trap_expected,
1685 target_pid_to_str (inferior_ptid),
1686 paddress (gdbarch, pc));
1687
1688 /* Normally, by the time we reach `resume', the breakpoints are either
1689 removed or inserted, as appropriate. The exception is if we're sitting
1690 at a permanent breakpoint; we need to step over it, but permanent
1691 breakpoints can't be removed. So we have to test for it here. */
1692 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1693 {
1694 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1695 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1696 else
1697 error (_("\
1698 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1699 how to step past a permanent breakpoint on this architecture. Try using\n\
1700 a command like `return' or `jump' to continue execution."));
1701 }
1702
1703 /* If enabled, step over breakpoints by executing a copy of the
1704 instruction at a different address.
1705
1706 We can't use displaced stepping when we have a signal to deliver;
1707 the comments for displaced_step_prepare explain why. The
1708 comments in the handle_inferior event for dealing with 'random
1709 signals' explain what we do instead.
1710
1711 We can't use displaced stepping when we are waiting for vfork_done
1712 event, displaced stepping breaks the vfork child similarly as single
1713 step software breakpoint. */
1714 if (use_displaced_stepping (gdbarch)
1715 && (tp->control.trap_expected
1716 || (step && gdbarch_software_single_step_p (gdbarch)))
1717 && sig == TARGET_SIGNAL_0
1718 && !current_inferior ()->waiting_for_vfork_done)
1719 {
1720 struct displaced_step_inferior_state *displaced;
1721
1722 if (!displaced_step_prepare (inferior_ptid))
1723 {
1724 /* Got placed in displaced stepping queue. Will be resumed
1725 later when all the currently queued displaced stepping
1726 requests finish. The thread is not executing at this point,
1727 and the call to set_executing will be made later. But we
1728 need to call set_running here, since from frontend point of view,
1729 the thread is running. */
1730 set_running (inferior_ptid, 1);
1731 discard_cleanups (old_cleanups);
1732 return;
1733 }
1734
1735 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1736 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1737 displaced->step_closure);
1738 }
1739
1740 /* Do we need to do it the hard way, w/temp breakpoints? */
1741 else if (step)
1742 step = maybe_software_singlestep (gdbarch, pc);
1743
1744 /* Currently, our software single-step implementation leads to different
1745 results than hardware single-stepping in one situation: when stepping
1746 into delivering a signal which has an associated signal handler,
1747 hardware single-step will stop at the first instruction of the handler,
1748 while software single-step will simply skip execution of the handler.
1749
1750 For now, this difference in behavior is accepted since there is no
1751 easy way to actually implement single-stepping into a signal handler
1752 without kernel support.
1753
1754 However, there is one scenario where this difference leads to follow-on
1755 problems: if we're stepping off a breakpoint by removing all breakpoints
1756 and then single-stepping. In this case, the software single-step
1757 behavior means that even if there is a *breakpoint* in the signal
1758 handler, GDB still would not stop.
1759
1760 Fortunately, we can at least fix this particular issue. We detect
1761 here the case where we are about to deliver a signal while software
1762 single-stepping with breakpoints removed. In this situation, we
1763 revert the decisions to remove all breakpoints and insert single-
1764 step breakpoints, and instead we install a step-resume breakpoint
1765 at the current address, deliver the signal without stepping, and
1766 once we arrive back at the step-resume breakpoint, actually step
1767 over the breakpoint we originally wanted to step over. */
1768 if (singlestep_breakpoints_inserted_p
1769 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1770 {
1771 /* If we have nested signals or a pending signal is delivered
1772 immediately after a handler returns, might might already have
1773 a step-resume breakpoint set on the earlier handler. We cannot
1774 set another step-resume breakpoint; just continue on until the
1775 original breakpoint is hit. */
1776 if (tp->control.step_resume_breakpoint == NULL)
1777 {
1778 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1779 tp->step_after_step_resume_breakpoint = 1;
1780 }
1781
1782 remove_single_step_breakpoints ();
1783 singlestep_breakpoints_inserted_p = 0;
1784
1785 insert_breakpoints ();
1786 tp->control.trap_expected = 0;
1787 }
1788
1789 if (should_resume)
1790 {
1791 ptid_t resume_ptid;
1792
1793 /* If STEP is set, it's a request to use hardware stepping
1794 facilities. But in that case, we should never
1795 use singlestep breakpoint. */
1796 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1797
1798 /* Decide the set of threads to ask the target to resume. Start
1799 by assuming everything will be resumed, than narrow the set
1800 by applying increasingly restricting conditions. */
1801 resume_ptid = user_visible_resume_ptid (step);
1802
1803 /* Maybe resume a single thread after all. */
1804 if (singlestep_breakpoints_inserted_p
1805 && stepping_past_singlestep_breakpoint)
1806 {
1807 /* The situation here is as follows. In thread T1 we wanted to
1808 single-step. Lacking hardware single-stepping we've
1809 set breakpoint at the PC of the next instruction -- call it
1810 P. After resuming, we've hit that breakpoint in thread T2.
1811 Now we've removed original breakpoint, inserted breakpoint
1812 at P+1, and try to step to advance T2 past breakpoint.
1813 We need to step only T2, as if T1 is allowed to freely run,
1814 it can run past P, and if other threads are allowed to run,
1815 they can hit breakpoint at P+1, and nested hits of single-step
1816 breakpoints is not something we'd want -- that's complicated
1817 to support, and has no value. */
1818 resume_ptid = inferior_ptid;
1819 }
1820 else if ((step || singlestep_breakpoints_inserted_p)
1821 && tp->control.trap_expected)
1822 {
1823 /* We're allowing a thread to run past a breakpoint it has
1824 hit, by single-stepping the thread with the breakpoint
1825 removed. In which case, we need to single-step only this
1826 thread, and keep others stopped, as they can miss this
1827 breakpoint if allowed to run.
1828
1829 The current code actually removes all breakpoints when
1830 doing this, not just the one being stepped over, so if we
1831 let other threads run, we can actually miss any
1832 breakpoint, not just the one at PC. */
1833 resume_ptid = inferior_ptid;
1834 }
1835
1836 if (gdbarch_cannot_step_breakpoint (gdbarch))
1837 {
1838 /* Most targets can step a breakpoint instruction, thus
1839 executing it normally. But if this one cannot, just
1840 continue and we will hit it anyway. */
1841 if (step && breakpoint_inserted_here_p (aspace, pc))
1842 step = 0;
1843 }
1844
1845 if (debug_displaced
1846 && use_displaced_stepping (gdbarch)
1847 && tp->control.trap_expected)
1848 {
1849 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1850 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1851 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1852 gdb_byte buf[4];
1853
1854 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1855 paddress (resume_gdbarch, actual_pc));
1856 read_memory (actual_pc, buf, sizeof (buf));
1857 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1858 }
1859
1860 /* Install inferior's terminal modes. */
1861 target_terminal_inferior ();
1862
1863 /* Avoid confusing the next resume, if the next stop/resume
1864 happens to apply to another thread. */
1865 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1866
1867 /* Advise target which signals may be handled silently. If we have
1868 removed breakpoints because we are stepping over one (which can
1869 happen only if we are not using displaced stepping), we need to
1870 receive all signals to avoid accidentally skipping a breakpoint
1871 during execution of a signal handler. */
1872 if ((step || singlestep_breakpoints_inserted_p)
1873 && tp->control.trap_expected
1874 && !use_displaced_stepping (gdbarch))
1875 target_pass_signals (0, NULL);
1876 else
1877 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1878
1879 target_resume (resume_ptid, step, sig);
1880 }
1881
1882 discard_cleanups (old_cleanups);
1883 }
1884 \f
1885 /* Proceeding. */
1886
1887 /* Clear out all variables saying what to do when inferior is continued.
1888 First do this, then set the ones you want, then call `proceed'. */
1889
1890 static void
1891 clear_proceed_status_thread (struct thread_info *tp)
1892 {
1893 if (debug_infrun)
1894 fprintf_unfiltered (gdb_stdlog,
1895 "infrun: clear_proceed_status_thread (%s)\n",
1896 target_pid_to_str (tp->ptid));
1897
1898 tp->control.trap_expected = 0;
1899 tp->control.step_range_start = 0;
1900 tp->control.step_range_end = 0;
1901 tp->control.step_frame_id = null_frame_id;
1902 tp->control.step_stack_frame_id = null_frame_id;
1903 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1904 tp->stop_requested = 0;
1905
1906 tp->control.stop_step = 0;
1907
1908 tp->control.proceed_to_finish = 0;
1909
1910 /* Discard any remaining commands or status from previous stop. */
1911 bpstat_clear (&tp->control.stop_bpstat);
1912 }
1913
1914 static int
1915 clear_proceed_status_callback (struct thread_info *tp, void *data)
1916 {
1917 if (is_exited (tp->ptid))
1918 return 0;
1919
1920 clear_proceed_status_thread (tp);
1921 return 0;
1922 }
1923
1924 void
1925 clear_proceed_status (void)
1926 {
1927 if (!non_stop)
1928 {
1929 /* In all-stop mode, delete the per-thread status of all
1930 threads, even if inferior_ptid is null_ptid, there may be
1931 threads on the list. E.g., we may be launching a new
1932 process, while selecting the executable. */
1933 iterate_over_threads (clear_proceed_status_callback, NULL);
1934 }
1935
1936 if (!ptid_equal (inferior_ptid, null_ptid))
1937 {
1938 struct inferior *inferior;
1939
1940 if (non_stop)
1941 {
1942 /* If in non-stop mode, only delete the per-thread status of
1943 the current thread. */
1944 clear_proceed_status_thread (inferior_thread ());
1945 }
1946
1947 inferior = current_inferior ();
1948 inferior->control.stop_soon = NO_STOP_QUIETLY;
1949 }
1950
1951 stop_after_trap = 0;
1952
1953 observer_notify_about_to_proceed ();
1954
1955 if (stop_registers)
1956 {
1957 regcache_xfree (stop_registers);
1958 stop_registers = NULL;
1959 }
1960 }
1961
1962 /* Check the current thread against the thread that reported the most recent
1963 event. If a step-over is required return TRUE and set the current thread
1964 to the old thread. Otherwise return FALSE.
1965
1966 This should be suitable for any targets that support threads. */
1967
1968 static int
1969 prepare_to_proceed (int step)
1970 {
1971 ptid_t wait_ptid;
1972 struct target_waitstatus wait_status;
1973 int schedlock_enabled;
1974
1975 /* With non-stop mode on, threads are always handled individually. */
1976 gdb_assert (! non_stop);
1977
1978 /* Get the last target status returned by target_wait(). */
1979 get_last_target_status (&wait_ptid, &wait_status);
1980
1981 /* Make sure we were stopped at a breakpoint. */
1982 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1983 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1984 && wait_status.value.sig != TARGET_SIGNAL_ILL
1985 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1986 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1987 {
1988 return 0;
1989 }
1990
1991 schedlock_enabled = (scheduler_mode == schedlock_on
1992 || (scheduler_mode == schedlock_step
1993 && step));
1994
1995 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1996 if (schedlock_enabled)
1997 return 0;
1998
1999 /* Don't switch over if we're about to resume some other process
2000 other than WAIT_PTID's, and schedule-multiple is off. */
2001 if (!sched_multi
2002 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2003 return 0;
2004
2005 /* Switched over from WAIT_PID. */
2006 if (!ptid_equal (wait_ptid, minus_one_ptid)
2007 && !ptid_equal (inferior_ptid, wait_ptid))
2008 {
2009 struct regcache *regcache = get_thread_regcache (wait_ptid);
2010
2011 if (breakpoint_here_p (get_regcache_aspace (regcache),
2012 regcache_read_pc (regcache)))
2013 {
2014 /* If stepping, remember current thread to switch back to. */
2015 if (step)
2016 deferred_step_ptid = inferior_ptid;
2017
2018 /* Switch back to WAIT_PID thread. */
2019 switch_to_thread (wait_ptid);
2020
2021 if (debug_infrun)
2022 fprintf_unfiltered (gdb_stdlog,
2023 "infrun: prepare_to_proceed (step=%d), "
2024 "switched to [%s]\n",
2025 step, target_pid_to_str (inferior_ptid));
2026
2027 /* We return 1 to indicate that there is a breakpoint here,
2028 so we need to step over it before continuing to avoid
2029 hitting it straight away. */
2030 return 1;
2031 }
2032 }
2033
2034 return 0;
2035 }
2036
2037 /* Basic routine for continuing the program in various fashions.
2038
2039 ADDR is the address to resume at, or -1 for resume where stopped.
2040 SIGGNAL is the signal to give it, or 0 for none,
2041 or -1 for act according to how it stopped.
2042 STEP is nonzero if should trap after one instruction.
2043 -1 means return after that and print nothing.
2044 You should probably set various step_... variables
2045 before calling here, if you are stepping.
2046
2047 You should call clear_proceed_status before calling proceed. */
2048
2049 void
2050 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2051 {
2052 struct regcache *regcache;
2053 struct gdbarch *gdbarch;
2054 struct thread_info *tp;
2055 CORE_ADDR pc;
2056 struct address_space *aspace;
2057 int oneproc = 0;
2058
2059 /* If we're stopped at a fork/vfork, follow the branch set by the
2060 "set follow-fork-mode" command; otherwise, we'll just proceed
2061 resuming the current thread. */
2062 if (!follow_fork ())
2063 {
2064 /* The target for some reason decided not to resume. */
2065 normal_stop ();
2066 if (target_can_async_p ())
2067 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2068 return;
2069 }
2070
2071 /* We'll update this if & when we switch to a new thread. */
2072 previous_inferior_ptid = inferior_ptid;
2073
2074 regcache = get_current_regcache ();
2075 gdbarch = get_regcache_arch (regcache);
2076 aspace = get_regcache_aspace (regcache);
2077 pc = regcache_read_pc (regcache);
2078
2079 if (step > 0)
2080 step_start_function = find_pc_function (pc);
2081 if (step < 0)
2082 stop_after_trap = 1;
2083
2084 if (addr == (CORE_ADDR) -1)
2085 {
2086 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2087 && execution_direction != EXEC_REVERSE)
2088 /* There is a breakpoint at the address we will resume at,
2089 step one instruction before inserting breakpoints so that
2090 we do not stop right away (and report a second hit at this
2091 breakpoint).
2092
2093 Note, we don't do this in reverse, because we won't
2094 actually be executing the breakpoint insn anyway.
2095 We'll be (un-)executing the previous instruction. */
2096
2097 oneproc = 1;
2098 else if (gdbarch_single_step_through_delay_p (gdbarch)
2099 && gdbarch_single_step_through_delay (gdbarch,
2100 get_current_frame ()))
2101 /* We stepped onto an instruction that needs to be stepped
2102 again before re-inserting the breakpoint, do so. */
2103 oneproc = 1;
2104 }
2105 else
2106 {
2107 regcache_write_pc (regcache, addr);
2108 }
2109
2110 if (debug_infrun)
2111 fprintf_unfiltered (gdb_stdlog,
2112 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2113 paddress (gdbarch, addr), siggnal, step);
2114
2115 if (non_stop)
2116 /* In non-stop, each thread is handled individually. The context
2117 must already be set to the right thread here. */
2118 ;
2119 else
2120 {
2121 /* In a multi-threaded task we may select another thread and
2122 then continue or step.
2123
2124 But if the old thread was stopped at a breakpoint, it will
2125 immediately cause another breakpoint stop without any
2126 execution (i.e. it will report a breakpoint hit incorrectly).
2127 So we must step over it first.
2128
2129 prepare_to_proceed checks the current thread against the
2130 thread that reported the most recent event. If a step-over
2131 is required it returns TRUE and sets the current thread to
2132 the old thread. */
2133 if (prepare_to_proceed (step))
2134 oneproc = 1;
2135 }
2136
2137 /* prepare_to_proceed may change the current thread. */
2138 tp = inferior_thread ();
2139
2140 if (oneproc)
2141 {
2142 tp->control.trap_expected = 1;
2143 /* If displaced stepping is enabled, we can step over the
2144 breakpoint without hitting it, so leave all breakpoints
2145 inserted. Otherwise we need to disable all breakpoints, step
2146 one instruction, and then re-add them when that step is
2147 finished. */
2148 if (!use_displaced_stepping (gdbarch))
2149 remove_breakpoints ();
2150 }
2151
2152 /* We can insert breakpoints if we're not trying to step over one,
2153 or if we are stepping over one but we're using displaced stepping
2154 to do so. */
2155 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2156 insert_breakpoints ();
2157
2158 if (!non_stop)
2159 {
2160 /* Pass the last stop signal to the thread we're resuming,
2161 irrespective of whether the current thread is the thread that
2162 got the last event or not. This was historically GDB's
2163 behaviour before keeping a stop_signal per thread. */
2164
2165 struct thread_info *last_thread;
2166 ptid_t last_ptid;
2167 struct target_waitstatus last_status;
2168
2169 get_last_target_status (&last_ptid, &last_status);
2170 if (!ptid_equal (inferior_ptid, last_ptid)
2171 && !ptid_equal (last_ptid, null_ptid)
2172 && !ptid_equal (last_ptid, minus_one_ptid))
2173 {
2174 last_thread = find_thread_ptid (last_ptid);
2175 if (last_thread)
2176 {
2177 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2178 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2179 }
2180 }
2181 }
2182
2183 if (siggnal != TARGET_SIGNAL_DEFAULT)
2184 tp->suspend.stop_signal = siggnal;
2185 /* If this signal should not be seen by program,
2186 give it zero. Used for debugging signals. */
2187 else if (!signal_program[tp->suspend.stop_signal])
2188 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2189
2190 annotate_starting ();
2191
2192 /* Make sure that output from GDB appears before output from the
2193 inferior. */
2194 gdb_flush (gdb_stdout);
2195
2196 /* Refresh prev_pc value just prior to resuming. This used to be
2197 done in stop_stepping, however, setting prev_pc there did not handle
2198 scenarios such as inferior function calls or returning from
2199 a function via the return command. In those cases, the prev_pc
2200 value was not set properly for subsequent commands. The prev_pc value
2201 is used to initialize the starting line number in the ecs. With an
2202 invalid value, the gdb next command ends up stopping at the position
2203 represented by the next line table entry past our start position.
2204 On platforms that generate one line table entry per line, this
2205 is not a problem. However, on the ia64, the compiler generates
2206 extraneous line table entries that do not increase the line number.
2207 When we issue the gdb next command on the ia64 after an inferior call
2208 or a return command, we often end up a few instructions forward, still
2209 within the original line we started.
2210
2211 An attempt was made to refresh the prev_pc at the same time the
2212 execution_control_state is initialized (for instance, just before
2213 waiting for an inferior event). But this approach did not work
2214 because of platforms that use ptrace, where the pc register cannot
2215 be read unless the inferior is stopped. At that point, we are not
2216 guaranteed the inferior is stopped and so the regcache_read_pc() call
2217 can fail. Setting the prev_pc value here ensures the value is updated
2218 correctly when the inferior is stopped. */
2219 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2220
2221 /* Fill in with reasonable starting values. */
2222 init_thread_stepping_state (tp);
2223
2224 /* Reset to normal state. */
2225 init_infwait_state ();
2226
2227 /* Resume inferior. */
2228 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2229
2230 /* Wait for it to stop (if not standalone)
2231 and in any case decode why it stopped, and act accordingly. */
2232 /* Do this only if we are not using the event loop, or if the target
2233 does not support asynchronous execution. */
2234 if (!target_can_async_p ())
2235 {
2236 wait_for_inferior ();
2237 normal_stop ();
2238 }
2239 }
2240 \f
2241
2242 /* Start remote-debugging of a machine over a serial link. */
2243
2244 void
2245 start_remote (int from_tty)
2246 {
2247 struct inferior *inferior;
2248
2249 inferior = current_inferior ();
2250 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2251
2252 /* Always go on waiting for the target, regardless of the mode. */
2253 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2254 indicate to wait_for_inferior that a target should timeout if
2255 nothing is returned (instead of just blocking). Because of this,
2256 targets expecting an immediate response need to, internally, set
2257 things up so that the target_wait() is forced to eventually
2258 timeout. */
2259 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2260 differentiate to its caller what the state of the target is after
2261 the initial open has been performed. Here we're assuming that
2262 the target has stopped. It should be possible to eventually have
2263 target_open() return to the caller an indication that the target
2264 is currently running and GDB state should be set to the same as
2265 for an async run. */
2266 wait_for_inferior ();
2267
2268 /* Now that the inferior has stopped, do any bookkeeping like
2269 loading shared libraries. We want to do this before normal_stop,
2270 so that the displayed frame is up to date. */
2271 post_create_inferior (&current_target, from_tty);
2272
2273 normal_stop ();
2274 }
2275
2276 /* Initialize static vars when a new inferior begins. */
2277
2278 void
2279 init_wait_for_inferior (void)
2280 {
2281 /* These are meaningless until the first time through wait_for_inferior. */
2282
2283 breakpoint_init_inferior (inf_starting);
2284
2285 clear_proceed_status ();
2286
2287 stepping_past_singlestep_breakpoint = 0;
2288 deferred_step_ptid = null_ptid;
2289
2290 target_last_wait_ptid = minus_one_ptid;
2291
2292 previous_inferior_ptid = inferior_ptid;
2293 init_infwait_state ();
2294
2295 /* Discard any skipped inlined frames. */
2296 clear_inline_frame_state (minus_one_ptid);
2297 }
2298
2299 \f
2300 /* This enum encodes possible reasons for doing a target_wait, so that
2301 wfi can call target_wait in one place. (Ultimately the call will be
2302 moved out of the infinite loop entirely.) */
2303
2304 enum infwait_states
2305 {
2306 infwait_normal_state,
2307 infwait_thread_hop_state,
2308 infwait_step_watch_state,
2309 infwait_nonstep_watch_state
2310 };
2311
2312 /* The PTID we'll do a target_wait on.*/
2313 ptid_t waiton_ptid;
2314
2315 /* Current inferior wait state. */
2316 enum infwait_states infwait_state;
2317
2318 /* Data to be passed around while handling an event. This data is
2319 discarded between events. */
2320 struct execution_control_state
2321 {
2322 ptid_t ptid;
2323 /* The thread that got the event, if this was a thread event; NULL
2324 otherwise. */
2325 struct thread_info *event_thread;
2326
2327 struct target_waitstatus ws;
2328 int random_signal;
2329 int stop_func_filled_in;
2330 CORE_ADDR stop_func_start;
2331 CORE_ADDR stop_func_end;
2332 char *stop_func_name;
2333 int new_thread_event;
2334 int wait_some_more;
2335 };
2336
2337 static void handle_inferior_event (struct execution_control_state *ecs);
2338
2339 static void handle_step_into_function (struct gdbarch *gdbarch,
2340 struct execution_control_state *ecs);
2341 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2342 struct execution_control_state *ecs);
2343 static void check_exception_resume (struct execution_control_state *,
2344 struct frame_info *, struct symbol *);
2345
2346 static void stop_stepping (struct execution_control_state *ecs);
2347 static void prepare_to_wait (struct execution_control_state *ecs);
2348 static void keep_going (struct execution_control_state *ecs);
2349
2350 /* Callback for iterate over threads. If the thread is stopped, but
2351 the user/frontend doesn't know about that yet, go through
2352 normal_stop, as if the thread had just stopped now. ARG points at
2353 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2354 ptid_is_pid(PTID) is true, applies to all threads of the process
2355 pointed at by PTID. Otherwise, apply only to the thread pointed by
2356 PTID. */
2357
2358 static int
2359 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2360 {
2361 ptid_t ptid = * (ptid_t *) arg;
2362
2363 if ((ptid_equal (info->ptid, ptid)
2364 || ptid_equal (minus_one_ptid, ptid)
2365 || (ptid_is_pid (ptid)
2366 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2367 && is_running (info->ptid)
2368 && !is_executing (info->ptid))
2369 {
2370 struct cleanup *old_chain;
2371 struct execution_control_state ecss;
2372 struct execution_control_state *ecs = &ecss;
2373
2374 memset (ecs, 0, sizeof (*ecs));
2375
2376 old_chain = make_cleanup_restore_current_thread ();
2377
2378 switch_to_thread (info->ptid);
2379
2380 /* Go through handle_inferior_event/normal_stop, so we always
2381 have consistent output as if the stop event had been
2382 reported. */
2383 ecs->ptid = info->ptid;
2384 ecs->event_thread = find_thread_ptid (info->ptid);
2385 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2386 ecs->ws.value.sig = TARGET_SIGNAL_0;
2387
2388 handle_inferior_event (ecs);
2389
2390 if (!ecs->wait_some_more)
2391 {
2392 struct thread_info *tp;
2393
2394 normal_stop ();
2395
2396 /* Finish off the continuations. */
2397 tp = inferior_thread ();
2398 do_all_intermediate_continuations_thread (tp, 1);
2399 do_all_continuations_thread (tp, 1);
2400 }
2401
2402 do_cleanups (old_chain);
2403 }
2404
2405 return 0;
2406 }
2407
2408 /* This function is attached as a "thread_stop_requested" observer.
2409 Cleanup local state that assumed the PTID was to be resumed, and
2410 report the stop to the frontend. */
2411
2412 static void
2413 infrun_thread_stop_requested (ptid_t ptid)
2414 {
2415 struct displaced_step_inferior_state *displaced;
2416
2417 /* PTID was requested to stop. Remove it from the displaced
2418 stepping queue, so we don't try to resume it automatically. */
2419
2420 for (displaced = displaced_step_inferior_states;
2421 displaced;
2422 displaced = displaced->next)
2423 {
2424 struct displaced_step_request *it, **prev_next_p;
2425
2426 it = displaced->step_request_queue;
2427 prev_next_p = &displaced->step_request_queue;
2428 while (it)
2429 {
2430 if (ptid_match (it->ptid, ptid))
2431 {
2432 *prev_next_p = it->next;
2433 it->next = NULL;
2434 xfree (it);
2435 }
2436 else
2437 {
2438 prev_next_p = &it->next;
2439 }
2440
2441 it = *prev_next_p;
2442 }
2443 }
2444
2445 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2446 }
2447
2448 static void
2449 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2450 {
2451 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2452 nullify_last_target_wait_ptid ();
2453 }
2454
2455 /* Callback for iterate_over_threads. */
2456
2457 static int
2458 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2459 {
2460 if (is_exited (info->ptid))
2461 return 0;
2462
2463 delete_step_resume_breakpoint (info);
2464 delete_exception_resume_breakpoint (info);
2465 return 0;
2466 }
2467
2468 /* In all-stop, delete the step resume breakpoint of any thread that
2469 had one. In non-stop, delete the step resume breakpoint of the
2470 thread that just stopped. */
2471
2472 static void
2473 delete_step_thread_step_resume_breakpoint (void)
2474 {
2475 if (!target_has_execution
2476 || ptid_equal (inferior_ptid, null_ptid))
2477 /* If the inferior has exited, we have already deleted the step
2478 resume breakpoints out of GDB's lists. */
2479 return;
2480
2481 if (non_stop)
2482 {
2483 /* If in non-stop mode, only delete the step-resume or
2484 longjmp-resume breakpoint of the thread that just stopped
2485 stepping. */
2486 struct thread_info *tp = inferior_thread ();
2487
2488 delete_step_resume_breakpoint (tp);
2489 delete_exception_resume_breakpoint (tp);
2490 }
2491 else
2492 /* In all-stop mode, delete all step-resume and longjmp-resume
2493 breakpoints of any thread that had them. */
2494 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2495 }
2496
2497 /* A cleanup wrapper. */
2498
2499 static void
2500 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2501 {
2502 delete_step_thread_step_resume_breakpoint ();
2503 }
2504
2505 /* Pretty print the results of target_wait, for debugging purposes. */
2506
2507 static void
2508 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2509 const struct target_waitstatus *ws)
2510 {
2511 char *status_string = target_waitstatus_to_string (ws);
2512 struct ui_file *tmp_stream = mem_fileopen ();
2513 char *text;
2514
2515 /* The text is split over several lines because it was getting too long.
2516 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2517 output as a unit; we want only one timestamp printed if debug_timestamp
2518 is set. */
2519
2520 fprintf_unfiltered (tmp_stream,
2521 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2522 if (PIDGET (waiton_ptid) != -1)
2523 fprintf_unfiltered (tmp_stream,
2524 " [%s]", target_pid_to_str (waiton_ptid));
2525 fprintf_unfiltered (tmp_stream, ", status) =\n");
2526 fprintf_unfiltered (tmp_stream,
2527 "infrun: %d [%s],\n",
2528 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2529 fprintf_unfiltered (tmp_stream,
2530 "infrun: %s\n",
2531 status_string);
2532
2533 text = ui_file_xstrdup (tmp_stream, NULL);
2534
2535 /* This uses %s in part to handle %'s in the text, but also to avoid
2536 a gcc error: the format attribute requires a string literal. */
2537 fprintf_unfiltered (gdb_stdlog, "%s", text);
2538
2539 xfree (status_string);
2540 xfree (text);
2541 ui_file_delete (tmp_stream);
2542 }
2543
2544 /* Prepare and stabilize the inferior for detaching it. E.g.,
2545 detaching while a thread is displaced stepping is a recipe for
2546 crashing it, as nothing would readjust the PC out of the scratch
2547 pad. */
2548
2549 void
2550 prepare_for_detach (void)
2551 {
2552 struct inferior *inf = current_inferior ();
2553 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2554 struct cleanup *old_chain_1;
2555 struct displaced_step_inferior_state *displaced;
2556
2557 displaced = get_displaced_stepping_state (inf->pid);
2558
2559 /* Is any thread of this process displaced stepping? If not,
2560 there's nothing else to do. */
2561 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2562 return;
2563
2564 if (debug_infrun)
2565 fprintf_unfiltered (gdb_stdlog,
2566 "displaced-stepping in-process while detaching");
2567
2568 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2569 inf->detaching = 1;
2570
2571 while (!ptid_equal (displaced->step_ptid, null_ptid))
2572 {
2573 struct cleanup *old_chain_2;
2574 struct execution_control_state ecss;
2575 struct execution_control_state *ecs;
2576
2577 ecs = &ecss;
2578 memset (ecs, 0, sizeof (*ecs));
2579
2580 overlay_cache_invalid = 1;
2581
2582 /* We have to invalidate the registers BEFORE calling
2583 target_wait because they can be loaded from the target while
2584 in target_wait. This makes remote debugging a bit more
2585 efficient for those targets that provide critical registers
2586 as part of their normal status mechanism. */
2587
2588 registers_changed ();
2589
2590 if (deprecated_target_wait_hook)
2591 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2592 else
2593 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2594
2595 if (debug_infrun)
2596 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2597
2598 /* If an error happens while handling the event, propagate GDB's
2599 knowledge of the executing state to the frontend/user running
2600 state. */
2601 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2602 &minus_one_ptid);
2603
2604 /* In non-stop mode, each thread is handled individually.
2605 Switch early, so the global state is set correctly for this
2606 thread. */
2607 if (non_stop
2608 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2609 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2610 context_switch (ecs->ptid);
2611
2612 /* Now figure out what to do with the result of the result. */
2613 handle_inferior_event (ecs);
2614
2615 /* No error, don't finish the state yet. */
2616 discard_cleanups (old_chain_2);
2617
2618 /* Breakpoints and watchpoints are not installed on the target
2619 at this point, and signals are passed directly to the
2620 inferior, so this must mean the process is gone. */
2621 if (!ecs->wait_some_more)
2622 {
2623 discard_cleanups (old_chain_1);
2624 error (_("Program exited while detaching"));
2625 }
2626 }
2627
2628 discard_cleanups (old_chain_1);
2629 }
2630
2631 /* Wait for control to return from inferior to debugger.
2632
2633 If inferior gets a signal, we may decide to start it up again
2634 instead of returning. That is why there is a loop in this function.
2635 When this function actually returns it means the inferior
2636 should be left stopped and GDB should read more commands. */
2637
2638 void
2639 wait_for_inferior (void)
2640 {
2641 struct cleanup *old_cleanups;
2642 struct execution_control_state ecss;
2643 struct execution_control_state *ecs;
2644
2645 if (debug_infrun)
2646 fprintf_unfiltered
2647 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2648
2649 old_cleanups =
2650 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2651
2652 ecs = &ecss;
2653 memset (ecs, 0, sizeof (*ecs));
2654
2655 while (1)
2656 {
2657 struct cleanup *old_chain;
2658
2659 /* We have to invalidate the registers BEFORE calling target_wait
2660 because they can be loaded from the target while in target_wait.
2661 This makes remote debugging a bit more efficient for those
2662 targets that provide critical registers as part of their normal
2663 status mechanism. */
2664
2665 overlay_cache_invalid = 1;
2666 registers_changed ();
2667
2668 if (deprecated_target_wait_hook)
2669 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2670 else
2671 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2672
2673 if (debug_infrun)
2674 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2675
2676 /* If an error happens while handling the event, propagate GDB's
2677 knowledge of the executing state to the frontend/user running
2678 state. */
2679 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2680
2681 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2682 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2683 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2684
2685 /* Now figure out what to do with the result of the result. */
2686 handle_inferior_event (ecs);
2687
2688 /* No error, don't finish the state yet. */
2689 discard_cleanups (old_chain);
2690
2691 if (!ecs->wait_some_more)
2692 break;
2693 }
2694
2695 do_cleanups (old_cleanups);
2696 }
2697
2698 /* Asynchronous version of wait_for_inferior. It is called by the
2699 event loop whenever a change of state is detected on the file
2700 descriptor corresponding to the target. It can be called more than
2701 once to complete a single execution command. In such cases we need
2702 to keep the state in a global variable ECSS. If it is the last time
2703 that this function is called for a single execution command, then
2704 report to the user that the inferior has stopped, and do the
2705 necessary cleanups. */
2706
2707 void
2708 fetch_inferior_event (void *client_data)
2709 {
2710 struct execution_control_state ecss;
2711 struct execution_control_state *ecs = &ecss;
2712 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2713 struct cleanup *ts_old_chain;
2714 int was_sync = sync_execution;
2715
2716 memset (ecs, 0, sizeof (*ecs));
2717
2718 /* We're handling a live event, so make sure we're doing live
2719 debugging. If we're looking at traceframes while the target is
2720 running, we're going to need to get back to that mode after
2721 handling the event. */
2722 if (non_stop)
2723 {
2724 make_cleanup_restore_current_traceframe ();
2725 set_current_traceframe (-1);
2726 }
2727
2728 if (non_stop)
2729 /* In non-stop mode, the user/frontend should not notice a thread
2730 switch due to internal events. Make sure we reverse to the
2731 user selected thread and frame after handling the event and
2732 running any breakpoint commands. */
2733 make_cleanup_restore_current_thread ();
2734
2735 /* We have to invalidate the registers BEFORE calling target_wait
2736 because they can be loaded from the target while in target_wait.
2737 This makes remote debugging a bit more efficient for those
2738 targets that provide critical registers as part of their normal
2739 status mechanism. */
2740
2741 overlay_cache_invalid = 1;
2742
2743 /* But don't do it if the current thread is already stopped (hence
2744 this is either a delayed event that will result in
2745 TARGET_WAITKIND_IGNORE, or it's an event for another thread (and
2746 we always clear the register and frame caches when the user
2747 switches threads anyway). If we didn't do this, a spurious
2748 delayed event in all-stop mode would make the user lose the
2749 selected frame. */
2750 if (non_stop || is_executing (inferior_ptid))
2751 registers_changed ();
2752
2753 make_cleanup_restore_integer (&execution_direction);
2754 execution_direction = target_execution_direction ();
2755
2756 if (deprecated_target_wait_hook)
2757 ecs->ptid =
2758 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2759 else
2760 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2761
2762 if (debug_infrun)
2763 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2764
2765 if (non_stop
2766 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2767 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2768 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2769 /* In non-stop mode, each thread is handled individually. Switch
2770 early, so the global state is set correctly for this
2771 thread. */
2772 context_switch (ecs->ptid);
2773
2774 /* If an error happens while handling the event, propagate GDB's
2775 knowledge of the executing state to the frontend/user running
2776 state. */
2777 if (!non_stop)
2778 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2779 else
2780 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2781
2782 /* Get executed before make_cleanup_restore_current_thread above to apply
2783 still for the thread which has thrown the exception. */
2784 make_bpstat_clear_actions_cleanup ();
2785
2786 /* Now figure out what to do with the result of the result. */
2787 handle_inferior_event (ecs);
2788
2789 if (!ecs->wait_some_more)
2790 {
2791 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2792
2793 delete_step_thread_step_resume_breakpoint ();
2794
2795 /* We may not find an inferior if this was a process exit. */
2796 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2797 normal_stop ();
2798
2799 if (target_has_execution
2800 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2801 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2802 && ecs->event_thread->step_multi
2803 && ecs->event_thread->control.stop_step)
2804 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2805 else
2806 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2807 }
2808
2809 /* No error, don't finish the thread states yet. */
2810 discard_cleanups (ts_old_chain);
2811
2812 /* Revert thread and frame. */
2813 do_cleanups (old_chain);
2814
2815 /* If the inferior was in sync execution mode, and now isn't,
2816 restore the prompt. */
2817 if (was_sync && !sync_execution)
2818 display_gdb_prompt (0);
2819 }
2820
2821 /* Record the frame and location we're currently stepping through. */
2822 void
2823 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2824 {
2825 struct thread_info *tp = inferior_thread ();
2826
2827 tp->control.step_frame_id = get_frame_id (frame);
2828 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2829
2830 tp->current_symtab = sal.symtab;
2831 tp->current_line = sal.line;
2832 }
2833
2834 /* Clear context switchable stepping state. */
2835
2836 void
2837 init_thread_stepping_state (struct thread_info *tss)
2838 {
2839 tss->stepping_over_breakpoint = 0;
2840 tss->step_after_step_resume_breakpoint = 0;
2841 tss->stepping_through_solib_after_catch = 0;
2842 tss->stepping_through_solib_catchpoints = NULL;
2843 }
2844
2845 /* Return the cached copy of the last pid/waitstatus returned by
2846 target_wait()/deprecated_target_wait_hook(). The data is actually
2847 cached by handle_inferior_event(), which gets called immediately
2848 after target_wait()/deprecated_target_wait_hook(). */
2849
2850 void
2851 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2852 {
2853 *ptidp = target_last_wait_ptid;
2854 *status = target_last_waitstatus;
2855 }
2856
2857 void
2858 nullify_last_target_wait_ptid (void)
2859 {
2860 target_last_wait_ptid = minus_one_ptid;
2861 }
2862
2863 /* Switch thread contexts. */
2864
2865 static void
2866 context_switch (ptid_t ptid)
2867 {
2868 if (debug_infrun)
2869 {
2870 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2871 target_pid_to_str (inferior_ptid));
2872 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2873 target_pid_to_str (ptid));
2874 }
2875
2876 switch_to_thread (ptid);
2877 }
2878
2879 static void
2880 adjust_pc_after_break (struct execution_control_state *ecs)
2881 {
2882 struct regcache *regcache;
2883 struct gdbarch *gdbarch;
2884 struct address_space *aspace;
2885 CORE_ADDR breakpoint_pc;
2886
2887 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2888 we aren't, just return.
2889
2890 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2891 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2892 implemented by software breakpoints should be handled through the normal
2893 breakpoint layer.
2894
2895 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2896 different signals (SIGILL or SIGEMT for instance), but it is less
2897 clear where the PC is pointing afterwards. It may not match
2898 gdbarch_decr_pc_after_break. I don't know any specific target that
2899 generates these signals at breakpoints (the code has been in GDB since at
2900 least 1992) so I can not guess how to handle them here.
2901
2902 In earlier versions of GDB, a target with
2903 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2904 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2905 target with both of these set in GDB history, and it seems unlikely to be
2906 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2907
2908 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2909 return;
2910
2911 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2912 return;
2913
2914 /* In reverse execution, when a breakpoint is hit, the instruction
2915 under it has already been de-executed. The reported PC always
2916 points at the breakpoint address, so adjusting it further would
2917 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2918 architecture:
2919
2920 B1 0x08000000 : INSN1
2921 B2 0x08000001 : INSN2
2922 0x08000002 : INSN3
2923 PC -> 0x08000003 : INSN4
2924
2925 Say you're stopped at 0x08000003 as above. Reverse continuing
2926 from that point should hit B2 as below. Reading the PC when the
2927 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2928 been de-executed already.
2929
2930 B1 0x08000000 : INSN1
2931 B2 PC -> 0x08000001 : INSN2
2932 0x08000002 : INSN3
2933 0x08000003 : INSN4
2934
2935 We can't apply the same logic as for forward execution, because
2936 we would wrongly adjust the PC to 0x08000000, since there's a
2937 breakpoint at PC - 1. We'd then report a hit on B1, although
2938 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2939 behaviour. */
2940 if (execution_direction == EXEC_REVERSE)
2941 return;
2942
2943 /* If this target does not decrement the PC after breakpoints, then
2944 we have nothing to do. */
2945 regcache = get_thread_regcache (ecs->ptid);
2946 gdbarch = get_regcache_arch (regcache);
2947 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2948 return;
2949
2950 aspace = get_regcache_aspace (regcache);
2951
2952 /* Find the location where (if we've hit a breakpoint) the
2953 breakpoint would be. */
2954 breakpoint_pc = regcache_read_pc (regcache)
2955 - gdbarch_decr_pc_after_break (gdbarch);
2956
2957 /* Check whether there actually is a software breakpoint inserted at
2958 that location.
2959
2960 If in non-stop mode, a race condition is possible where we've
2961 removed a breakpoint, but stop events for that breakpoint were
2962 already queued and arrive later. To suppress those spurious
2963 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2964 and retire them after a number of stop events are reported. */
2965 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2966 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2967 {
2968 struct cleanup *old_cleanups = NULL;
2969
2970 if (RECORD_IS_USED)
2971 old_cleanups = record_gdb_operation_disable_set ();
2972
2973 /* When using hardware single-step, a SIGTRAP is reported for both
2974 a completed single-step and a software breakpoint. Need to
2975 differentiate between the two, as the latter needs adjusting
2976 but the former does not.
2977
2978 The SIGTRAP can be due to a completed hardware single-step only if
2979 - we didn't insert software single-step breakpoints
2980 - the thread to be examined is still the current thread
2981 - this thread is currently being stepped
2982
2983 If any of these events did not occur, we must have stopped due
2984 to hitting a software breakpoint, and have to back up to the
2985 breakpoint address.
2986
2987 As a special case, we could have hardware single-stepped a
2988 software breakpoint. In this case (prev_pc == breakpoint_pc),
2989 we also need to back up to the breakpoint address. */
2990
2991 if (singlestep_breakpoints_inserted_p
2992 || !ptid_equal (ecs->ptid, inferior_ptid)
2993 || !currently_stepping (ecs->event_thread)
2994 || ecs->event_thread->prev_pc == breakpoint_pc)
2995 regcache_write_pc (regcache, breakpoint_pc);
2996
2997 if (RECORD_IS_USED)
2998 do_cleanups (old_cleanups);
2999 }
3000 }
3001
3002 void
3003 init_infwait_state (void)
3004 {
3005 waiton_ptid = pid_to_ptid (-1);
3006 infwait_state = infwait_normal_state;
3007 }
3008
3009 void
3010 error_is_running (void)
3011 {
3012 error (_("Cannot execute this command while "
3013 "the selected thread is running."));
3014 }
3015
3016 void
3017 ensure_not_running (void)
3018 {
3019 if (is_running (inferior_ptid))
3020 error_is_running ();
3021 }
3022
3023 static int
3024 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3025 {
3026 for (frame = get_prev_frame (frame);
3027 frame != NULL;
3028 frame = get_prev_frame (frame))
3029 {
3030 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3031 return 1;
3032 if (get_frame_type (frame) != INLINE_FRAME)
3033 break;
3034 }
3035
3036 return 0;
3037 }
3038
3039 /* Auxiliary function that handles syscall entry/return events.
3040 It returns 1 if the inferior should keep going (and GDB
3041 should ignore the event), or 0 if the event deserves to be
3042 processed. */
3043
3044 static int
3045 handle_syscall_event (struct execution_control_state *ecs)
3046 {
3047 struct regcache *regcache;
3048 struct gdbarch *gdbarch;
3049 int syscall_number;
3050
3051 if (!ptid_equal (ecs->ptid, inferior_ptid))
3052 context_switch (ecs->ptid);
3053
3054 regcache = get_thread_regcache (ecs->ptid);
3055 gdbarch = get_regcache_arch (regcache);
3056 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
3057 stop_pc = regcache_read_pc (regcache);
3058
3059 target_last_waitstatus.value.syscall_number = syscall_number;
3060
3061 if (catch_syscall_enabled () > 0
3062 && catching_syscall_number (syscall_number) > 0)
3063 {
3064 if (debug_infrun)
3065 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3066 syscall_number);
3067
3068 ecs->event_thread->control.stop_bpstat
3069 = bpstat_stop_status (get_regcache_aspace (regcache),
3070 stop_pc, ecs->ptid);
3071 ecs->random_signal
3072 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3073
3074 if (!ecs->random_signal)
3075 {
3076 /* Catchpoint hit. */
3077 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3078 return 0;
3079 }
3080 }
3081
3082 /* If no catchpoint triggered for this, then keep going. */
3083 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3084 keep_going (ecs);
3085 return 1;
3086 }
3087
3088 /* Clear the supplied execution_control_state's stop_func_* fields. */
3089
3090 static void
3091 clear_stop_func (struct execution_control_state *ecs)
3092 {
3093 ecs->stop_func_filled_in = 0;
3094 ecs->stop_func_start = 0;
3095 ecs->stop_func_end = 0;
3096 ecs->stop_func_name = NULL;
3097 }
3098
3099 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3100
3101 static void
3102 fill_in_stop_func (struct gdbarch *gdbarch,
3103 struct execution_control_state *ecs)
3104 {
3105 if (!ecs->stop_func_filled_in)
3106 {
3107 /* Don't care about return value; stop_func_start and stop_func_name
3108 will both be 0 if it doesn't work. */
3109 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3110 &ecs->stop_func_start, &ecs->stop_func_end);
3111 ecs->stop_func_start
3112 += gdbarch_deprecated_function_start_offset (gdbarch);
3113
3114 ecs->stop_func_filled_in = 1;
3115 }
3116 }
3117
3118 /* Given an execution control state that has been freshly filled in
3119 by an event from the inferior, figure out what it means and take
3120 appropriate action. */
3121
3122 static void
3123 handle_inferior_event (struct execution_control_state *ecs)
3124 {
3125 struct frame_info *frame;
3126 struct gdbarch *gdbarch;
3127 int stopped_by_watchpoint;
3128 int stepped_after_stopped_by_watchpoint = 0;
3129 struct symtab_and_line stop_pc_sal;
3130 enum stop_kind stop_soon;
3131
3132 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3133 {
3134 /* We had an event in the inferior, but we are not interested in
3135 handling it at this level. The lower layers have already
3136 done what needs to be done, if anything.
3137
3138 One of the possible circumstances for this is when the
3139 inferior produces output for the console. The inferior has
3140 not stopped, and we are ignoring the event. Another possible
3141 circumstance is any event which the lower level knows will be
3142 reported multiple times without an intervening resume. */
3143 if (debug_infrun)
3144 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3145 prepare_to_wait (ecs);
3146 return;
3147 }
3148
3149 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3150 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3151 {
3152 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3153
3154 gdb_assert (inf);
3155 stop_soon = inf->control.stop_soon;
3156 }
3157 else
3158 stop_soon = NO_STOP_QUIETLY;
3159
3160 /* Cache the last pid/waitstatus. */
3161 target_last_wait_ptid = ecs->ptid;
3162 target_last_waitstatus = ecs->ws;
3163
3164 /* Always clear state belonging to the previous time we stopped. */
3165 stop_stack_dummy = STOP_NONE;
3166
3167 /* If it's a new process, add it to the thread database. */
3168
3169 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3170 && !ptid_equal (ecs->ptid, minus_one_ptid)
3171 && !in_thread_list (ecs->ptid));
3172
3173 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3174 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3175 add_thread (ecs->ptid);
3176
3177 ecs->event_thread = find_thread_ptid (ecs->ptid);
3178
3179 /* Dependent on valid ECS->EVENT_THREAD. */
3180 adjust_pc_after_break (ecs);
3181
3182 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3183 reinit_frame_cache ();
3184
3185 breakpoint_retire_moribund ();
3186
3187 /* First, distinguish signals caused by the debugger from signals
3188 that have to do with the program's own actions. Note that
3189 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3190 on the operating system version. Here we detect when a SIGILL or
3191 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3192 something similar for SIGSEGV, since a SIGSEGV will be generated
3193 when we're trying to execute a breakpoint instruction on a
3194 non-executable stack. This happens for call dummy breakpoints
3195 for architectures like SPARC that place call dummies on the
3196 stack. */
3197 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3198 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3199 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3200 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3201 {
3202 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3203
3204 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3205 regcache_read_pc (regcache)))
3206 {
3207 if (debug_infrun)
3208 fprintf_unfiltered (gdb_stdlog,
3209 "infrun: Treating signal as SIGTRAP\n");
3210 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3211 }
3212 }
3213
3214 /* Mark the non-executing threads accordingly. In all-stop, all
3215 threads of all processes are stopped when we get any event
3216 reported. In non-stop mode, only the event thread stops. If
3217 we're handling a process exit in non-stop mode, there's nothing
3218 to do, as threads of the dead process are gone, and threads of
3219 any other process were left running. */
3220 if (!non_stop)
3221 set_executing (minus_one_ptid, 0);
3222 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3223 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3224 set_executing (inferior_ptid, 0);
3225
3226 switch (infwait_state)
3227 {
3228 case infwait_thread_hop_state:
3229 if (debug_infrun)
3230 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3231 break;
3232
3233 case infwait_normal_state:
3234 if (debug_infrun)
3235 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3236 break;
3237
3238 case infwait_step_watch_state:
3239 if (debug_infrun)
3240 fprintf_unfiltered (gdb_stdlog,
3241 "infrun: infwait_step_watch_state\n");
3242
3243 stepped_after_stopped_by_watchpoint = 1;
3244 break;
3245
3246 case infwait_nonstep_watch_state:
3247 if (debug_infrun)
3248 fprintf_unfiltered (gdb_stdlog,
3249 "infrun: infwait_nonstep_watch_state\n");
3250 insert_breakpoints ();
3251
3252 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3253 handle things like signals arriving and other things happening
3254 in combination correctly? */
3255 stepped_after_stopped_by_watchpoint = 1;
3256 break;
3257
3258 default:
3259 internal_error (__FILE__, __LINE__, _("bad switch"));
3260 }
3261
3262 infwait_state = infwait_normal_state;
3263 waiton_ptid = pid_to_ptid (-1);
3264
3265 switch (ecs->ws.kind)
3266 {
3267 case TARGET_WAITKIND_LOADED:
3268 if (debug_infrun)
3269 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3270 /* Ignore gracefully during startup of the inferior, as it might
3271 be the shell which has just loaded some objects, otherwise
3272 add the symbols for the newly loaded objects. Also ignore at
3273 the beginning of an attach or remote session; we will query
3274 the full list of libraries once the connection is
3275 established. */
3276 if (stop_soon == NO_STOP_QUIETLY)
3277 {
3278 /* Check for any newly added shared libraries if we're
3279 supposed to be adding them automatically. Switch
3280 terminal for any messages produced by
3281 breakpoint_re_set. */
3282 target_terminal_ours_for_output ();
3283 /* NOTE: cagney/2003-11-25: Make certain that the target
3284 stack's section table is kept up-to-date. Architectures,
3285 (e.g., PPC64), use the section table to perform
3286 operations such as address => section name and hence
3287 require the table to contain all sections (including
3288 those found in shared libraries). */
3289 #ifdef SOLIB_ADD
3290 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3291 #else
3292 solib_add (NULL, 0, &current_target, auto_solib_add);
3293 #endif
3294 target_terminal_inferior ();
3295
3296 /* If requested, stop when the dynamic linker notifies
3297 gdb of events. This allows the user to get control
3298 and place breakpoints in initializer routines for
3299 dynamically loaded objects (among other things). */
3300 if (stop_on_solib_events)
3301 {
3302 /* Make sure we print "Stopped due to solib-event" in
3303 normal_stop. */
3304 stop_print_frame = 1;
3305
3306 stop_stepping (ecs);
3307 return;
3308 }
3309
3310 /* NOTE drow/2007-05-11: This might be a good place to check
3311 for "catch load". */
3312 }
3313
3314 /* If we are skipping through a shell, or through shared library
3315 loading that we aren't interested in, resume the program. If
3316 we're running the program normally, also resume. But stop if
3317 we're attaching or setting up a remote connection. */
3318 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3319 {
3320 /* Loading of shared libraries might have changed breakpoint
3321 addresses. Make sure new breakpoints are inserted. */
3322 if (stop_soon == NO_STOP_QUIETLY
3323 && !breakpoints_always_inserted_mode ())
3324 insert_breakpoints ();
3325 resume (0, TARGET_SIGNAL_0);
3326 prepare_to_wait (ecs);
3327 return;
3328 }
3329
3330 break;
3331
3332 case TARGET_WAITKIND_SPURIOUS:
3333 if (debug_infrun)
3334 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3335 resume (0, TARGET_SIGNAL_0);
3336 prepare_to_wait (ecs);
3337 return;
3338
3339 case TARGET_WAITKIND_EXITED:
3340 if (debug_infrun)
3341 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3342 inferior_ptid = ecs->ptid;
3343 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3344 set_current_program_space (current_inferior ()->pspace);
3345 handle_vfork_child_exec_or_exit (0);
3346 target_terminal_ours (); /* Must do this before mourn anyway. */
3347 print_exited_reason (ecs->ws.value.integer);
3348
3349 /* Record the exit code in the convenience variable $_exitcode, so
3350 that the user can inspect this again later. */
3351 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3352 (LONGEST) ecs->ws.value.integer);
3353
3354 /* Also record this in the inferior itself. */
3355 current_inferior ()->has_exit_code = 1;
3356 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3357
3358 gdb_flush (gdb_stdout);
3359 target_mourn_inferior ();
3360 singlestep_breakpoints_inserted_p = 0;
3361 cancel_single_step_breakpoints ();
3362 stop_print_frame = 0;
3363 stop_stepping (ecs);
3364 return;
3365
3366 case TARGET_WAITKIND_SIGNALLED:
3367 if (debug_infrun)
3368 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3369 inferior_ptid = ecs->ptid;
3370 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3371 set_current_program_space (current_inferior ()->pspace);
3372 handle_vfork_child_exec_or_exit (0);
3373 stop_print_frame = 0;
3374 target_terminal_ours (); /* Must do this before mourn anyway. */
3375
3376 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3377 reach here unless the inferior is dead. However, for years
3378 target_kill() was called here, which hints that fatal signals aren't
3379 really fatal on some systems. If that's true, then some changes
3380 may be needed. */
3381 target_mourn_inferior ();
3382
3383 print_signal_exited_reason (ecs->ws.value.sig);
3384 singlestep_breakpoints_inserted_p = 0;
3385 cancel_single_step_breakpoints ();
3386 stop_stepping (ecs);
3387 return;
3388
3389 /* The following are the only cases in which we keep going;
3390 the above cases end in a continue or goto. */
3391 case TARGET_WAITKIND_FORKED:
3392 case TARGET_WAITKIND_VFORKED:
3393 if (debug_infrun)
3394 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3395
3396 if (!ptid_equal (ecs->ptid, inferior_ptid))
3397 {
3398 context_switch (ecs->ptid);
3399 reinit_frame_cache ();
3400 }
3401
3402 /* Immediately detach breakpoints from the child before there's
3403 any chance of letting the user delete breakpoints from the
3404 breakpoint lists. If we don't do this early, it's easy to
3405 leave left over traps in the child, vis: "break foo; catch
3406 fork; c; <fork>; del; c; <child calls foo>". We only follow
3407 the fork on the last `continue', and by that time the
3408 breakpoint at "foo" is long gone from the breakpoint table.
3409 If we vforked, then we don't need to unpatch here, since both
3410 parent and child are sharing the same memory pages; we'll
3411 need to unpatch at follow/detach time instead to be certain
3412 that new breakpoints added between catchpoint hit time and
3413 vfork follow are detached. */
3414 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3415 {
3416 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3417
3418 /* This won't actually modify the breakpoint list, but will
3419 physically remove the breakpoints from the child. */
3420 detach_breakpoints (child_pid);
3421 }
3422
3423 if (singlestep_breakpoints_inserted_p)
3424 {
3425 /* Pull the single step breakpoints out of the target. */
3426 remove_single_step_breakpoints ();
3427 singlestep_breakpoints_inserted_p = 0;
3428 }
3429
3430 /* In case the event is caught by a catchpoint, remember that
3431 the event is to be followed at the next resume of the thread,
3432 and not immediately. */
3433 ecs->event_thread->pending_follow = ecs->ws;
3434
3435 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3436
3437 ecs->event_thread->control.stop_bpstat
3438 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3439 stop_pc, ecs->ptid);
3440
3441 /* Note that we're interested in knowing the bpstat actually
3442 causes a stop, not just if it may explain the signal.
3443 Software watchpoints, for example, always appear in the
3444 bpstat. */
3445 ecs->random_signal
3446 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3447
3448 /* If no catchpoint triggered for this, then keep going. */
3449 if (ecs->random_signal)
3450 {
3451 ptid_t parent;
3452 ptid_t child;
3453 int should_resume;
3454 int follow_child
3455 = (follow_fork_mode_string == follow_fork_mode_child);
3456
3457 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3458
3459 should_resume = follow_fork ();
3460
3461 parent = ecs->ptid;
3462 child = ecs->ws.value.related_pid;
3463
3464 /* In non-stop mode, also resume the other branch. */
3465 if (non_stop && !detach_fork)
3466 {
3467 if (follow_child)
3468 switch_to_thread (parent);
3469 else
3470 switch_to_thread (child);
3471
3472 ecs->event_thread = inferior_thread ();
3473 ecs->ptid = inferior_ptid;
3474 keep_going (ecs);
3475 }
3476
3477 if (follow_child)
3478 switch_to_thread (child);
3479 else
3480 switch_to_thread (parent);
3481
3482 ecs->event_thread = inferior_thread ();
3483 ecs->ptid = inferior_ptid;
3484
3485 if (should_resume)
3486 keep_going (ecs);
3487 else
3488 stop_stepping (ecs);
3489 return;
3490 }
3491 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3492 goto process_event_stop_test;
3493
3494 case TARGET_WAITKIND_VFORK_DONE:
3495 /* Done with the shared memory region. Re-insert breakpoints in
3496 the parent, and keep going. */
3497
3498 if (debug_infrun)
3499 fprintf_unfiltered (gdb_stdlog,
3500 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3501
3502 if (!ptid_equal (ecs->ptid, inferior_ptid))
3503 context_switch (ecs->ptid);
3504
3505 current_inferior ()->waiting_for_vfork_done = 0;
3506 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3507 /* This also takes care of reinserting breakpoints in the
3508 previously locked inferior. */
3509 keep_going (ecs);
3510 return;
3511
3512 case TARGET_WAITKIND_EXECD:
3513 if (debug_infrun)
3514 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3515
3516 if (!ptid_equal (ecs->ptid, inferior_ptid))
3517 {
3518 context_switch (ecs->ptid);
3519 reinit_frame_cache ();
3520 }
3521
3522 singlestep_breakpoints_inserted_p = 0;
3523 cancel_single_step_breakpoints ();
3524
3525 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3526
3527 /* Do whatever is necessary to the parent branch of the vfork. */
3528 handle_vfork_child_exec_or_exit (1);
3529
3530 /* This causes the eventpoints and symbol table to be reset.
3531 Must do this now, before trying to determine whether to
3532 stop. */
3533 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3534
3535 ecs->event_thread->control.stop_bpstat
3536 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3537 stop_pc, ecs->ptid);
3538 ecs->random_signal
3539 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3540
3541 /* Note that this may be referenced from inside
3542 bpstat_stop_status above, through inferior_has_execd. */
3543 xfree (ecs->ws.value.execd_pathname);
3544 ecs->ws.value.execd_pathname = NULL;
3545
3546 /* If no catchpoint triggered for this, then keep going. */
3547 if (ecs->random_signal)
3548 {
3549 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3550 keep_going (ecs);
3551 return;
3552 }
3553 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3554 goto process_event_stop_test;
3555
3556 /* Be careful not to try to gather much state about a thread
3557 that's in a syscall. It's frequently a losing proposition. */
3558 case TARGET_WAITKIND_SYSCALL_ENTRY:
3559 if (debug_infrun)
3560 fprintf_unfiltered (gdb_stdlog,
3561 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3562 /* Getting the current syscall number. */
3563 if (handle_syscall_event (ecs) != 0)
3564 return;
3565 goto process_event_stop_test;
3566
3567 /* Before examining the threads further, step this thread to
3568 get it entirely out of the syscall. (We get notice of the
3569 event when the thread is just on the verge of exiting a
3570 syscall. Stepping one instruction seems to get it back
3571 into user code.) */
3572 case TARGET_WAITKIND_SYSCALL_RETURN:
3573 if (debug_infrun)
3574 fprintf_unfiltered (gdb_stdlog,
3575 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3576 if (handle_syscall_event (ecs) != 0)
3577 return;
3578 goto process_event_stop_test;
3579
3580 case TARGET_WAITKIND_STOPPED:
3581 if (debug_infrun)
3582 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3583 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3584 break;
3585
3586 case TARGET_WAITKIND_NO_HISTORY:
3587 /* Reverse execution: target ran out of history info. */
3588 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3589 print_no_history_reason ();
3590 stop_stepping (ecs);
3591 return;
3592 }
3593
3594 if (ecs->new_thread_event)
3595 {
3596 if (non_stop)
3597 /* Non-stop assumes that the target handles adding new threads
3598 to the thread list. */
3599 internal_error (__FILE__, __LINE__,
3600 "targets should add new threads to the thread "
3601 "list themselves in non-stop mode.");
3602
3603 /* We may want to consider not doing a resume here in order to
3604 give the user a chance to play with the new thread. It might
3605 be good to make that a user-settable option. */
3606
3607 /* At this point, all threads are stopped (happens automatically
3608 in either the OS or the native code). Therefore we need to
3609 continue all threads in order to make progress. */
3610
3611 if (!ptid_equal (ecs->ptid, inferior_ptid))
3612 context_switch (ecs->ptid);
3613 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3614 prepare_to_wait (ecs);
3615 return;
3616 }
3617
3618 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3619 {
3620 /* Do we need to clean up the state of a thread that has
3621 completed a displaced single-step? (Doing so usually affects
3622 the PC, so do it here, before we set stop_pc.) */
3623 displaced_step_fixup (ecs->ptid,
3624 ecs->event_thread->suspend.stop_signal);
3625
3626 /* If we either finished a single-step or hit a breakpoint, but
3627 the user wanted this thread to be stopped, pretend we got a
3628 SIG0 (generic unsignaled stop). */
3629
3630 if (ecs->event_thread->stop_requested
3631 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3632 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3633 }
3634
3635 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3636
3637 if (debug_infrun)
3638 {
3639 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3640 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3641 struct cleanup *old_chain = save_inferior_ptid ();
3642
3643 inferior_ptid = ecs->ptid;
3644
3645 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3646 paddress (gdbarch, stop_pc));
3647 if (target_stopped_by_watchpoint ())
3648 {
3649 CORE_ADDR addr;
3650
3651 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3652
3653 if (target_stopped_data_address (&current_target, &addr))
3654 fprintf_unfiltered (gdb_stdlog,
3655 "infrun: stopped data address = %s\n",
3656 paddress (gdbarch, addr));
3657 else
3658 fprintf_unfiltered (gdb_stdlog,
3659 "infrun: (no data address available)\n");
3660 }
3661
3662 do_cleanups (old_chain);
3663 }
3664
3665 if (stepping_past_singlestep_breakpoint)
3666 {
3667 gdb_assert (singlestep_breakpoints_inserted_p);
3668 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3669 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3670
3671 stepping_past_singlestep_breakpoint = 0;
3672
3673 /* We've either finished single-stepping past the single-step
3674 breakpoint, or stopped for some other reason. It would be nice if
3675 we could tell, but we can't reliably. */
3676 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3677 {
3678 if (debug_infrun)
3679 fprintf_unfiltered (gdb_stdlog,
3680 "infrun: stepping_past_"
3681 "singlestep_breakpoint\n");
3682 /* Pull the single step breakpoints out of the target. */
3683 remove_single_step_breakpoints ();
3684 singlestep_breakpoints_inserted_p = 0;
3685
3686 ecs->random_signal = 0;
3687 ecs->event_thread->control.trap_expected = 0;
3688
3689 context_switch (saved_singlestep_ptid);
3690 if (deprecated_context_hook)
3691 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3692
3693 resume (1, TARGET_SIGNAL_0);
3694 prepare_to_wait (ecs);
3695 return;
3696 }
3697 }
3698
3699 if (!ptid_equal (deferred_step_ptid, null_ptid))
3700 {
3701 /* In non-stop mode, there's never a deferred_step_ptid set. */
3702 gdb_assert (!non_stop);
3703
3704 /* If we stopped for some other reason than single-stepping, ignore
3705 the fact that we were supposed to switch back. */
3706 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3707 {
3708 if (debug_infrun)
3709 fprintf_unfiltered (gdb_stdlog,
3710 "infrun: handling deferred step\n");
3711
3712 /* Pull the single step breakpoints out of the target. */
3713 if (singlestep_breakpoints_inserted_p)
3714 {
3715 remove_single_step_breakpoints ();
3716 singlestep_breakpoints_inserted_p = 0;
3717 }
3718
3719 ecs->event_thread->control.trap_expected = 0;
3720
3721 /* Note: We do not call context_switch at this point, as the
3722 context is already set up for stepping the original thread. */
3723 switch_to_thread (deferred_step_ptid);
3724 deferred_step_ptid = null_ptid;
3725 /* Suppress spurious "Switching to ..." message. */
3726 previous_inferior_ptid = inferior_ptid;
3727
3728 resume (1, TARGET_SIGNAL_0);
3729 prepare_to_wait (ecs);
3730 return;
3731 }
3732
3733 deferred_step_ptid = null_ptid;
3734 }
3735
3736 /* See if a thread hit a thread-specific breakpoint that was meant for
3737 another thread. If so, then step that thread past the breakpoint,
3738 and continue it. */
3739
3740 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3741 {
3742 int thread_hop_needed = 0;
3743 struct address_space *aspace =
3744 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3745
3746 /* Check if a regular breakpoint has been hit before checking
3747 for a potential single step breakpoint. Otherwise, GDB will
3748 not see this breakpoint hit when stepping onto breakpoints. */
3749 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3750 {
3751 ecs->random_signal = 0;
3752 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3753 thread_hop_needed = 1;
3754 }
3755 else if (singlestep_breakpoints_inserted_p)
3756 {
3757 /* We have not context switched yet, so this should be true
3758 no matter which thread hit the singlestep breakpoint. */
3759 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3760 if (debug_infrun)
3761 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3762 "trap for %s\n",
3763 target_pid_to_str (ecs->ptid));
3764
3765 ecs->random_signal = 0;
3766 /* The call to in_thread_list is necessary because PTIDs sometimes
3767 change when we go from single-threaded to multi-threaded. If
3768 the singlestep_ptid is still in the list, assume that it is
3769 really different from ecs->ptid. */
3770 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3771 && in_thread_list (singlestep_ptid))
3772 {
3773 /* If the PC of the thread we were trying to single-step
3774 has changed, discard this event (which we were going
3775 to ignore anyway), and pretend we saw that thread
3776 trap. This prevents us continuously moving the
3777 single-step breakpoint forward, one instruction at a
3778 time. If the PC has changed, then the thread we were
3779 trying to single-step has trapped or been signalled,
3780 but the event has not been reported to GDB yet.
3781
3782 There might be some cases where this loses signal
3783 information, if a signal has arrived at exactly the
3784 same time that the PC changed, but this is the best
3785 we can do with the information available. Perhaps we
3786 should arrange to report all events for all threads
3787 when they stop, or to re-poll the remote looking for
3788 this particular thread (i.e. temporarily enable
3789 schedlock). */
3790
3791 CORE_ADDR new_singlestep_pc
3792 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3793
3794 if (new_singlestep_pc != singlestep_pc)
3795 {
3796 enum target_signal stop_signal;
3797
3798 if (debug_infrun)
3799 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3800 " but expected thread advanced also\n");
3801
3802 /* The current context still belongs to
3803 singlestep_ptid. Don't swap here, since that's
3804 the context we want to use. Just fudge our
3805 state and continue. */
3806 stop_signal = ecs->event_thread->suspend.stop_signal;
3807 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3808 ecs->ptid = singlestep_ptid;
3809 ecs->event_thread = find_thread_ptid (ecs->ptid);
3810 ecs->event_thread->suspend.stop_signal = stop_signal;
3811 stop_pc = new_singlestep_pc;
3812 }
3813 else
3814 {
3815 if (debug_infrun)
3816 fprintf_unfiltered (gdb_stdlog,
3817 "infrun: unexpected thread\n");
3818
3819 thread_hop_needed = 1;
3820 stepping_past_singlestep_breakpoint = 1;
3821 saved_singlestep_ptid = singlestep_ptid;
3822 }
3823 }
3824 }
3825
3826 if (thread_hop_needed)
3827 {
3828 struct regcache *thread_regcache;
3829 int remove_status = 0;
3830
3831 if (debug_infrun)
3832 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3833
3834 /* Switch context before touching inferior memory, the
3835 previous thread may have exited. */
3836 if (!ptid_equal (inferior_ptid, ecs->ptid))
3837 context_switch (ecs->ptid);
3838
3839 /* Saw a breakpoint, but it was hit by the wrong thread.
3840 Just continue. */
3841
3842 if (singlestep_breakpoints_inserted_p)
3843 {
3844 /* Pull the single step breakpoints out of the target. */
3845 remove_single_step_breakpoints ();
3846 singlestep_breakpoints_inserted_p = 0;
3847 }
3848
3849 /* If the arch can displace step, don't remove the
3850 breakpoints. */
3851 thread_regcache = get_thread_regcache (ecs->ptid);
3852 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3853 remove_status = remove_breakpoints ();
3854
3855 /* Did we fail to remove breakpoints? If so, try
3856 to set the PC past the bp. (There's at least
3857 one situation in which we can fail to remove
3858 the bp's: On HP-UX's that use ttrace, we can't
3859 change the address space of a vforking child
3860 process until the child exits (well, okay, not
3861 then either :-) or execs. */
3862 if (remove_status != 0)
3863 error (_("Cannot step over breakpoint hit in wrong thread"));
3864 else
3865 { /* Single step */
3866 if (!non_stop)
3867 {
3868 /* Only need to require the next event from this
3869 thread in all-stop mode. */
3870 waiton_ptid = ecs->ptid;
3871 infwait_state = infwait_thread_hop_state;
3872 }
3873
3874 ecs->event_thread->stepping_over_breakpoint = 1;
3875 keep_going (ecs);
3876 return;
3877 }
3878 }
3879 else if (singlestep_breakpoints_inserted_p)
3880 {
3881 ecs->random_signal = 0;
3882 }
3883 }
3884 else
3885 ecs->random_signal = 1;
3886
3887 /* See if something interesting happened to the non-current thread. If
3888 so, then switch to that thread. */
3889 if (!ptid_equal (ecs->ptid, inferior_ptid))
3890 {
3891 if (debug_infrun)
3892 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3893
3894 context_switch (ecs->ptid);
3895
3896 if (deprecated_context_hook)
3897 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3898 }
3899
3900 /* At this point, get hold of the now-current thread's frame. */
3901 frame = get_current_frame ();
3902 gdbarch = get_frame_arch (frame);
3903
3904 if (singlestep_breakpoints_inserted_p)
3905 {
3906 /* Pull the single step breakpoints out of the target. */
3907 remove_single_step_breakpoints ();
3908 singlestep_breakpoints_inserted_p = 0;
3909 }
3910
3911 if (stepped_after_stopped_by_watchpoint)
3912 stopped_by_watchpoint = 0;
3913 else
3914 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3915
3916 /* If necessary, step over this watchpoint. We'll be back to display
3917 it in a moment. */
3918 if (stopped_by_watchpoint
3919 && (target_have_steppable_watchpoint
3920 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3921 {
3922 /* At this point, we are stopped at an instruction which has
3923 attempted to write to a piece of memory under control of
3924 a watchpoint. The instruction hasn't actually executed
3925 yet. If we were to evaluate the watchpoint expression
3926 now, we would get the old value, and therefore no change
3927 would seem to have occurred.
3928
3929 In order to make watchpoints work `right', we really need
3930 to complete the memory write, and then evaluate the
3931 watchpoint expression. We do this by single-stepping the
3932 target.
3933
3934 It may not be necessary to disable the watchpoint to stop over
3935 it. For example, the PA can (with some kernel cooperation)
3936 single step over a watchpoint without disabling the watchpoint.
3937
3938 It is far more common to need to disable a watchpoint to step
3939 the inferior over it. If we have non-steppable watchpoints,
3940 we must disable the current watchpoint; it's simplest to
3941 disable all watchpoints and breakpoints. */
3942 int hw_step = 1;
3943
3944 if (!target_have_steppable_watchpoint)
3945 {
3946 remove_breakpoints ();
3947 /* See comment in resume why we need to stop bypassing signals
3948 while breakpoints have been removed. */
3949 target_pass_signals (0, NULL);
3950 }
3951 /* Single step */
3952 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3953 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3954 waiton_ptid = ecs->ptid;
3955 if (target_have_steppable_watchpoint)
3956 infwait_state = infwait_step_watch_state;
3957 else
3958 infwait_state = infwait_nonstep_watch_state;
3959 prepare_to_wait (ecs);
3960 return;
3961 }
3962
3963 clear_stop_func (ecs);
3964 ecs->event_thread->stepping_over_breakpoint = 0;
3965 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
3966 ecs->event_thread->control.stop_step = 0;
3967 stop_print_frame = 1;
3968 ecs->random_signal = 0;
3969 stopped_by_random_signal = 0;
3970
3971 /* Hide inlined functions starting here, unless we just performed stepi or
3972 nexti. After stepi and nexti, always show the innermost frame (not any
3973 inline function call sites). */
3974 if (ecs->event_thread->control.step_range_end != 1)
3975 skip_inline_frames (ecs->ptid);
3976
3977 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3978 && ecs->event_thread->control.trap_expected
3979 && gdbarch_single_step_through_delay_p (gdbarch)
3980 && currently_stepping (ecs->event_thread))
3981 {
3982 /* We're trying to step off a breakpoint. Turns out that we're
3983 also on an instruction that needs to be stepped multiple
3984 times before it's been fully executing. E.g., architectures
3985 with a delay slot. It needs to be stepped twice, once for
3986 the instruction and once for the delay slot. */
3987 int step_through_delay
3988 = gdbarch_single_step_through_delay (gdbarch, frame);
3989
3990 if (debug_infrun && step_through_delay)
3991 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3992 if (ecs->event_thread->control.step_range_end == 0
3993 && step_through_delay)
3994 {
3995 /* The user issued a continue when stopped at a breakpoint.
3996 Set up for another trap and get out of here. */
3997 ecs->event_thread->stepping_over_breakpoint = 1;
3998 keep_going (ecs);
3999 return;
4000 }
4001 else if (step_through_delay)
4002 {
4003 /* The user issued a step when stopped at a breakpoint.
4004 Maybe we should stop, maybe we should not - the delay
4005 slot *might* correspond to a line of source. In any
4006 case, don't decide that here, just set
4007 ecs->stepping_over_breakpoint, making sure we
4008 single-step again before breakpoints are re-inserted. */
4009 ecs->event_thread->stepping_over_breakpoint = 1;
4010 }
4011 }
4012
4013 /* Look at the cause of the stop, and decide what to do.
4014 The alternatives are:
4015 1) stop_stepping and return; to really stop and return to the debugger,
4016 2) keep_going and return to start up again
4017 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4018 3) set ecs->random_signal to 1, and the decision between 1 and 2
4019 will be made according to the signal handling tables. */
4020
4021 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4022 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4023 || stop_soon == STOP_QUIETLY_REMOTE)
4024 {
4025 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4026 && stop_after_trap)
4027 {
4028 if (debug_infrun)
4029 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4030 stop_print_frame = 0;
4031 stop_stepping (ecs);
4032 return;
4033 }
4034
4035 /* This is originated from start_remote(), start_inferior() and
4036 shared libraries hook functions. */
4037 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4038 {
4039 if (debug_infrun)
4040 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4041 stop_stepping (ecs);
4042 return;
4043 }
4044
4045 /* This originates from attach_command(). We need to overwrite
4046 the stop_signal here, because some kernels don't ignore a
4047 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4048 See more comments in inferior.h. On the other hand, if we
4049 get a non-SIGSTOP, report it to the user - assume the backend
4050 will handle the SIGSTOP if it should show up later.
4051
4052 Also consider that the attach is complete when we see a
4053 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4054 target extended-remote report it instead of a SIGSTOP
4055 (e.g. gdbserver). We already rely on SIGTRAP being our
4056 signal, so this is no exception.
4057
4058 Also consider that the attach is complete when we see a
4059 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4060 the target to stop all threads of the inferior, in case the
4061 low level attach operation doesn't stop them implicitly. If
4062 they weren't stopped implicitly, then the stub will report a
4063 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4064 other than GDB's request. */
4065 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4066 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4067 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4068 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4069 {
4070 stop_stepping (ecs);
4071 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4072 return;
4073 }
4074
4075 /* See if there is a breakpoint at the current PC. */
4076 ecs->event_thread->control.stop_bpstat
4077 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4078 stop_pc, ecs->ptid);
4079
4080 /* Following in case break condition called a
4081 function. */
4082 stop_print_frame = 1;
4083
4084 /* This is where we handle "moribund" watchpoints. Unlike
4085 software breakpoints traps, hardware watchpoint traps are
4086 always distinguishable from random traps. If no high-level
4087 watchpoint is associated with the reported stop data address
4088 anymore, then the bpstat does not explain the signal ---
4089 simply make sure to ignore it if `stopped_by_watchpoint' is
4090 set. */
4091
4092 if (debug_infrun
4093 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4094 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4095 && stopped_by_watchpoint)
4096 fprintf_unfiltered (gdb_stdlog,
4097 "infrun: no user watchpoint explains "
4098 "watchpoint SIGTRAP, ignoring\n");
4099
4100 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4101 at one stage in the past included checks for an inferior
4102 function call's call dummy's return breakpoint. The original
4103 comment, that went with the test, read:
4104
4105 ``End of a stack dummy. Some systems (e.g. Sony news) give
4106 another signal besides SIGTRAP, so check here as well as
4107 above.''
4108
4109 If someone ever tries to get call dummys on a
4110 non-executable stack to work (where the target would stop
4111 with something like a SIGSEGV), then those tests might need
4112 to be re-instated. Given, however, that the tests were only
4113 enabled when momentary breakpoints were not being used, I
4114 suspect that it won't be the case.
4115
4116 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4117 be necessary for call dummies on a non-executable stack on
4118 SPARC. */
4119
4120 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4121 ecs->random_signal
4122 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4123 || stopped_by_watchpoint
4124 || ecs->event_thread->control.trap_expected
4125 || (ecs->event_thread->control.step_range_end
4126 && (ecs->event_thread->control.step_resume_breakpoint
4127 == NULL)));
4128 else
4129 {
4130 ecs->random_signal = !bpstat_explains_signal
4131 (ecs->event_thread->control.stop_bpstat);
4132 if (!ecs->random_signal)
4133 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4134 }
4135 }
4136
4137 /* When we reach this point, we've pretty much decided
4138 that the reason for stopping must've been a random
4139 (unexpected) signal. */
4140
4141 else
4142 ecs->random_signal = 1;
4143
4144 process_event_stop_test:
4145
4146 /* Re-fetch current thread's frame in case we did a
4147 "goto process_event_stop_test" above. */
4148 frame = get_current_frame ();
4149 gdbarch = get_frame_arch (frame);
4150
4151 /* For the program's own signals, act according to
4152 the signal handling tables. */
4153
4154 if (ecs->random_signal)
4155 {
4156 /* Signal not for debugging purposes. */
4157 int printed = 0;
4158 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4159
4160 if (debug_infrun)
4161 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4162 ecs->event_thread->suspend.stop_signal);
4163
4164 stopped_by_random_signal = 1;
4165
4166 if (signal_print[ecs->event_thread->suspend.stop_signal])
4167 {
4168 printed = 1;
4169 target_terminal_ours_for_output ();
4170 print_signal_received_reason
4171 (ecs->event_thread->suspend.stop_signal);
4172 }
4173 /* Always stop on signals if we're either just gaining control
4174 of the program, or the user explicitly requested this thread
4175 to remain stopped. */
4176 if (stop_soon != NO_STOP_QUIETLY
4177 || ecs->event_thread->stop_requested
4178 || (!inf->detaching
4179 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4180 {
4181 stop_stepping (ecs);
4182 return;
4183 }
4184 /* If not going to stop, give terminal back
4185 if we took it away. */
4186 else if (printed)
4187 target_terminal_inferior ();
4188
4189 /* Clear the signal if it should not be passed. */
4190 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4191 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4192
4193 if (ecs->event_thread->prev_pc == stop_pc
4194 && ecs->event_thread->control.trap_expected
4195 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4196 {
4197 /* We were just starting a new sequence, attempting to
4198 single-step off of a breakpoint and expecting a SIGTRAP.
4199 Instead this signal arrives. This signal will take us out
4200 of the stepping range so GDB needs to remember to, when
4201 the signal handler returns, resume stepping off that
4202 breakpoint. */
4203 /* To simplify things, "continue" is forced to use the same
4204 code paths as single-step - set a breakpoint at the
4205 signal return address and then, once hit, step off that
4206 breakpoint. */
4207 if (debug_infrun)
4208 fprintf_unfiltered (gdb_stdlog,
4209 "infrun: signal arrived while stepping over "
4210 "breakpoint\n");
4211
4212 insert_hp_step_resume_breakpoint_at_frame (frame);
4213 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4214 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4215 ecs->event_thread->control.trap_expected = 0;
4216 keep_going (ecs);
4217 return;
4218 }
4219
4220 if (ecs->event_thread->control.step_range_end != 0
4221 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4222 && (ecs->event_thread->control.step_range_start <= stop_pc
4223 && stop_pc < ecs->event_thread->control.step_range_end)
4224 && frame_id_eq (get_stack_frame_id (frame),
4225 ecs->event_thread->control.step_stack_frame_id)
4226 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4227 {
4228 /* The inferior is about to take a signal that will take it
4229 out of the single step range. Set a breakpoint at the
4230 current PC (which is presumably where the signal handler
4231 will eventually return) and then allow the inferior to
4232 run free.
4233
4234 Note that this is only needed for a signal delivered
4235 while in the single-step range. Nested signals aren't a
4236 problem as they eventually all return. */
4237 if (debug_infrun)
4238 fprintf_unfiltered (gdb_stdlog,
4239 "infrun: signal may take us out of "
4240 "single-step range\n");
4241
4242 insert_hp_step_resume_breakpoint_at_frame (frame);
4243 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4244 ecs->event_thread->control.trap_expected = 0;
4245 keep_going (ecs);
4246 return;
4247 }
4248
4249 /* Note: step_resume_breakpoint may be non-NULL. This occures
4250 when either there's a nested signal, or when there's a
4251 pending signal enabled just as the signal handler returns
4252 (leaving the inferior at the step-resume-breakpoint without
4253 actually executing it). Either way continue until the
4254 breakpoint is really hit. */
4255 keep_going (ecs);
4256 return;
4257 }
4258
4259 /* Handle cases caused by hitting a breakpoint. */
4260 {
4261 CORE_ADDR jmp_buf_pc;
4262 struct bpstat_what what;
4263
4264 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4265
4266 if (what.call_dummy)
4267 {
4268 stop_stack_dummy = what.call_dummy;
4269 }
4270
4271 /* If we hit an internal event that triggers symbol changes, the
4272 current frame will be invalidated within bpstat_what (e.g., if
4273 we hit an internal solib event). Re-fetch it. */
4274 frame = get_current_frame ();
4275 gdbarch = get_frame_arch (frame);
4276
4277 switch (what.main_action)
4278 {
4279 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4280 /* If we hit the breakpoint at longjmp while stepping, we
4281 install a momentary breakpoint at the target of the
4282 jmp_buf. */
4283
4284 if (debug_infrun)
4285 fprintf_unfiltered (gdb_stdlog,
4286 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4287
4288 ecs->event_thread->stepping_over_breakpoint = 1;
4289
4290 if (what.is_longjmp)
4291 {
4292 if (!gdbarch_get_longjmp_target_p (gdbarch)
4293 || !gdbarch_get_longjmp_target (gdbarch,
4294 frame, &jmp_buf_pc))
4295 {
4296 if (debug_infrun)
4297 fprintf_unfiltered (gdb_stdlog,
4298 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4299 "(!gdbarch_get_longjmp_target)\n");
4300 keep_going (ecs);
4301 return;
4302 }
4303
4304 /* We're going to replace the current step-resume breakpoint
4305 with a longjmp-resume breakpoint. */
4306 delete_step_resume_breakpoint (ecs->event_thread);
4307
4308 /* Insert a breakpoint at resume address. */
4309 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4310 }
4311 else
4312 {
4313 struct symbol *func = get_frame_function (frame);
4314
4315 if (func)
4316 check_exception_resume (ecs, frame, func);
4317 }
4318 keep_going (ecs);
4319 return;
4320
4321 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4322 if (debug_infrun)
4323 fprintf_unfiltered (gdb_stdlog,
4324 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4325
4326 if (what.is_longjmp)
4327 {
4328 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4329 != NULL);
4330 delete_step_resume_breakpoint (ecs->event_thread);
4331 }
4332 else
4333 {
4334 /* There are several cases to consider.
4335
4336 1. The initiating frame no longer exists. In this case
4337 we must stop, because the exception has gone too far.
4338
4339 2. The initiating frame exists, and is the same as the
4340 current frame. We stop, because the exception has been
4341 caught.
4342
4343 3. The initiating frame exists and is different from
4344 the current frame. This means the exception has been
4345 caught beneath the initiating frame, so keep going. */
4346 struct frame_info *init_frame
4347 = frame_find_by_id (ecs->event_thread->initiating_frame);
4348
4349 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4350 != NULL);
4351 delete_exception_resume_breakpoint (ecs->event_thread);
4352
4353 if (init_frame)
4354 {
4355 struct frame_id current_id
4356 = get_frame_id (get_current_frame ());
4357 if (frame_id_eq (current_id,
4358 ecs->event_thread->initiating_frame))
4359 {
4360 /* Case 2. Fall through. */
4361 }
4362 else
4363 {
4364 /* Case 3. */
4365 keep_going (ecs);
4366 return;
4367 }
4368 }
4369
4370 /* For Cases 1 and 2, remove the step-resume breakpoint,
4371 if it exists. */
4372 delete_step_resume_breakpoint (ecs->event_thread);
4373 }
4374
4375 ecs->event_thread->control.stop_step = 1;
4376 print_end_stepping_range_reason ();
4377 stop_stepping (ecs);
4378 return;
4379
4380 case BPSTAT_WHAT_SINGLE:
4381 if (debug_infrun)
4382 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4383 ecs->event_thread->stepping_over_breakpoint = 1;
4384 /* Still need to check other stuff, at least the case
4385 where we are stepping and step out of the right range. */
4386 break;
4387
4388 case BPSTAT_WHAT_STEP_RESUME:
4389 if (debug_infrun)
4390 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4391
4392 delete_step_resume_breakpoint (ecs->event_thread);
4393 if (ecs->event_thread->control.proceed_to_finish
4394 && execution_direction == EXEC_REVERSE)
4395 {
4396 struct thread_info *tp = ecs->event_thread;
4397
4398 /* We are finishing a function in reverse, and just hit
4399 the step-resume breakpoint at the start address of the
4400 function, and we're almost there -- just need to back
4401 up by one more single-step, which should take us back
4402 to the function call. */
4403 tp->control.step_range_start = tp->control.step_range_end = 1;
4404 keep_going (ecs);
4405 return;
4406 }
4407 fill_in_stop_func (gdbarch, ecs);
4408 if (stop_pc == ecs->stop_func_start
4409 && execution_direction == EXEC_REVERSE)
4410 {
4411 /* We are stepping over a function call in reverse, and
4412 just hit the step-resume breakpoint at the start
4413 address of the function. Go back to single-stepping,
4414 which should take us back to the function call. */
4415 ecs->event_thread->stepping_over_breakpoint = 1;
4416 keep_going (ecs);
4417 return;
4418 }
4419 break;
4420
4421 case BPSTAT_WHAT_STOP_NOISY:
4422 if (debug_infrun)
4423 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4424 stop_print_frame = 1;
4425
4426 /* We are about to nuke the step_resume_breakpointt via the
4427 cleanup chain, so no need to worry about it here. */
4428
4429 stop_stepping (ecs);
4430 return;
4431
4432 case BPSTAT_WHAT_STOP_SILENT:
4433 if (debug_infrun)
4434 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4435 stop_print_frame = 0;
4436
4437 /* We are about to nuke the step_resume_breakpoin via the
4438 cleanup chain, so no need to worry about it here. */
4439
4440 stop_stepping (ecs);
4441 return;
4442
4443 case BPSTAT_WHAT_HP_STEP_RESUME:
4444 if (debug_infrun)
4445 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4446
4447 delete_step_resume_breakpoint (ecs->event_thread);
4448 if (ecs->event_thread->step_after_step_resume_breakpoint)
4449 {
4450 /* Back when the step-resume breakpoint was inserted, we
4451 were trying to single-step off a breakpoint. Go back
4452 to doing that. */
4453 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4454 ecs->event_thread->stepping_over_breakpoint = 1;
4455 keep_going (ecs);
4456 return;
4457 }
4458 break;
4459
4460 case BPSTAT_WHAT_KEEP_CHECKING:
4461 break;
4462 }
4463 }
4464
4465 /* We come here if we hit a breakpoint but should not
4466 stop for it. Possibly we also were stepping
4467 and should stop for that. So fall through and
4468 test for stepping. But, if not stepping,
4469 do not stop. */
4470
4471 /* In all-stop mode, if we're currently stepping but have stopped in
4472 some other thread, we need to switch back to the stepped thread. */
4473 if (!non_stop)
4474 {
4475 struct thread_info *tp;
4476
4477 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4478 ecs->event_thread);
4479 if (tp)
4480 {
4481 /* However, if the current thread is blocked on some internal
4482 breakpoint, and we simply need to step over that breakpoint
4483 to get it going again, do that first. */
4484 if ((ecs->event_thread->control.trap_expected
4485 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4486 || ecs->event_thread->stepping_over_breakpoint)
4487 {
4488 keep_going (ecs);
4489 return;
4490 }
4491
4492 /* If the stepping thread exited, then don't try to switch
4493 back and resume it, which could fail in several different
4494 ways depending on the target. Instead, just keep going.
4495
4496 We can find a stepping dead thread in the thread list in
4497 two cases:
4498
4499 - The target supports thread exit events, and when the
4500 target tries to delete the thread from the thread list,
4501 inferior_ptid pointed at the exiting thread. In such
4502 case, calling delete_thread does not really remove the
4503 thread from the list; instead, the thread is left listed,
4504 with 'exited' state.
4505
4506 - The target's debug interface does not support thread
4507 exit events, and so we have no idea whatsoever if the
4508 previously stepping thread is still alive. For that
4509 reason, we need to synchronously query the target
4510 now. */
4511 if (is_exited (tp->ptid)
4512 || !target_thread_alive (tp->ptid))
4513 {
4514 if (debug_infrun)
4515 fprintf_unfiltered (gdb_stdlog,
4516 "infrun: not switching back to "
4517 "stepped thread, it has vanished\n");
4518
4519 delete_thread (tp->ptid);
4520 keep_going (ecs);
4521 return;
4522 }
4523
4524 /* Otherwise, we no longer expect a trap in the current thread.
4525 Clear the trap_expected flag before switching back -- this is
4526 what keep_going would do as well, if we called it. */
4527 ecs->event_thread->control.trap_expected = 0;
4528
4529 if (debug_infrun)
4530 fprintf_unfiltered (gdb_stdlog,
4531 "infrun: switching back to stepped thread\n");
4532
4533 ecs->event_thread = tp;
4534 ecs->ptid = tp->ptid;
4535 context_switch (ecs->ptid);
4536 keep_going (ecs);
4537 return;
4538 }
4539 }
4540
4541 /* Are we stepping to get the inferior out of the dynamic linker's
4542 hook (and possibly the dld itself) after catching a shlib
4543 event? */
4544 if (ecs->event_thread->stepping_through_solib_after_catch)
4545 {
4546 #if defined(SOLIB_ADD)
4547 /* Have we reached our destination? If not, keep going. */
4548 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4549 {
4550 if (debug_infrun)
4551 fprintf_unfiltered (gdb_stdlog,
4552 "infrun: stepping in dynamic linker\n");
4553 ecs->event_thread->stepping_over_breakpoint = 1;
4554 keep_going (ecs);
4555 return;
4556 }
4557 #endif
4558 if (debug_infrun)
4559 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4560 /* Else, stop and report the catchpoint(s) whose triggering
4561 caused us to begin stepping. */
4562 ecs->event_thread->stepping_through_solib_after_catch = 0;
4563 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4564 ecs->event_thread->control.stop_bpstat
4565 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4566 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4567 stop_print_frame = 1;
4568 stop_stepping (ecs);
4569 return;
4570 }
4571
4572 if (ecs->event_thread->control.step_resume_breakpoint)
4573 {
4574 if (debug_infrun)
4575 fprintf_unfiltered (gdb_stdlog,
4576 "infrun: step-resume breakpoint is inserted\n");
4577
4578 /* Having a step-resume breakpoint overrides anything
4579 else having to do with stepping commands until
4580 that breakpoint is reached. */
4581 keep_going (ecs);
4582 return;
4583 }
4584
4585 if (ecs->event_thread->control.step_range_end == 0)
4586 {
4587 if (debug_infrun)
4588 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4589 /* Likewise if we aren't even stepping. */
4590 keep_going (ecs);
4591 return;
4592 }
4593
4594 /* Re-fetch current thread's frame in case the code above caused
4595 the frame cache to be re-initialized, making our FRAME variable
4596 a dangling pointer. */
4597 frame = get_current_frame ();
4598 gdbarch = get_frame_arch (frame);
4599 fill_in_stop_func (gdbarch, ecs);
4600
4601 /* If stepping through a line, keep going if still within it.
4602
4603 Note that step_range_end is the address of the first instruction
4604 beyond the step range, and NOT the address of the last instruction
4605 within it!
4606
4607 Note also that during reverse execution, we may be stepping
4608 through a function epilogue and therefore must detect when
4609 the current-frame changes in the middle of a line. */
4610
4611 if (stop_pc >= ecs->event_thread->control.step_range_start
4612 && stop_pc < ecs->event_thread->control.step_range_end
4613 && (execution_direction != EXEC_REVERSE
4614 || frame_id_eq (get_frame_id (frame),
4615 ecs->event_thread->control.step_frame_id)))
4616 {
4617 if (debug_infrun)
4618 fprintf_unfiltered
4619 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4620 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4621 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4622
4623 /* When stepping backward, stop at beginning of line range
4624 (unless it's the function entry point, in which case
4625 keep going back to the call point). */
4626 if (stop_pc == ecs->event_thread->control.step_range_start
4627 && stop_pc != ecs->stop_func_start
4628 && execution_direction == EXEC_REVERSE)
4629 {
4630 ecs->event_thread->control.stop_step = 1;
4631 print_end_stepping_range_reason ();
4632 stop_stepping (ecs);
4633 }
4634 else
4635 keep_going (ecs);
4636
4637 return;
4638 }
4639
4640 /* We stepped out of the stepping range. */
4641
4642 /* If we are stepping at the source level and entered the runtime
4643 loader dynamic symbol resolution code...
4644
4645 EXEC_FORWARD: we keep on single stepping until we exit the run
4646 time loader code and reach the callee's address.
4647
4648 EXEC_REVERSE: we've already executed the callee (backward), and
4649 the runtime loader code is handled just like any other
4650 undebuggable function call. Now we need only keep stepping
4651 backward through the trampoline code, and that's handled further
4652 down, so there is nothing for us to do here. */
4653
4654 if (execution_direction != EXEC_REVERSE
4655 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4656 && in_solib_dynsym_resolve_code (stop_pc))
4657 {
4658 CORE_ADDR pc_after_resolver =
4659 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4660
4661 if (debug_infrun)
4662 fprintf_unfiltered (gdb_stdlog,
4663 "infrun: stepped into dynsym resolve code\n");
4664
4665 if (pc_after_resolver)
4666 {
4667 /* Set up a step-resume breakpoint at the address
4668 indicated by SKIP_SOLIB_RESOLVER. */
4669 struct symtab_and_line sr_sal;
4670
4671 init_sal (&sr_sal);
4672 sr_sal.pc = pc_after_resolver;
4673 sr_sal.pspace = get_frame_program_space (frame);
4674
4675 insert_step_resume_breakpoint_at_sal (gdbarch,
4676 sr_sal, null_frame_id);
4677 }
4678
4679 keep_going (ecs);
4680 return;
4681 }
4682
4683 if (ecs->event_thread->control.step_range_end != 1
4684 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4685 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4686 && get_frame_type (frame) == SIGTRAMP_FRAME)
4687 {
4688 if (debug_infrun)
4689 fprintf_unfiltered (gdb_stdlog,
4690 "infrun: stepped into signal trampoline\n");
4691 /* The inferior, while doing a "step" or "next", has ended up in
4692 a signal trampoline (either by a signal being delivered or by
4693 the signal handler returning). Just single-step until the
4694 inferior leaves the trampoline (either by calling the handler
4695 or returning). */
4696 keep_going (ecs);
4697 return;
4698 }
4699
4700 /* Check for subroutine calls. The check for the current frame
4701 equalling the step ID is not necessary - the check of the
4702 previous frame's ID is sufficient - but it is a common case and
4703 cheaper than checking the previous frame's ID.
4704
4705 NOTE: frame_id_eq will never report two invalid frame IDs as
4706 being equal, so to get into this block, both the current and
4707 previous frame must have valid frame IDs. */
4708 /* The outer_frame_id check is a heuristic to detect stepping
4709 through startup code. If we step over an instruction which
4710 sets the stack pointer from an invalid value to a valid value,
4711 we may detect that as a subroutine call from the mythical
4712 "outermost" function. This could be fixed by marking
4713 outermost frames as !stack_p,code_p,special_p. Then the
4714 initial outermost frame, before sp was valid, would
4715 have code_addr == &_start. See the comment in frame_id_eq
4716 for more. */
4717 if (!frame_id_eq (get_stack_frame_id (frame),
4718 ecs->event_thread->control.step_stack_frame_id)
4719 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4720 ecs->event_thread->control.step_stack_frame_id)
4721 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4722 outer_frame_id)
4723 || step_start_function != find_pc_function (stop_pc))))
4724 {
4725 CORE_ADDR real_stop_pc;
4726
4727 if (debug_infrun)
4728 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4729
4730 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4731 || ((ecs->event_thread->control.step_range_end == 1)
4732 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4733 ecs->stop_func_start)))
4734 {
4735 /* I presume that step_over_calls is only 0 when we're
4736 supposed to be stepping at the assembly language level
4737 ("stepi"). Just stop. */
4738 /* Also, maybe we just did a "nexti" inside a prolog, so we
4739 thought it was a subroutine call but it was not. Stop as
4740 well. FENN */
4741 /* And this works the same backward as frontward. MVS */
4742 ecs->event_thread->control.stop_step = 1;
4743 print_end_stepping_range_reason ();
4744 stop_stepping (ecs);
4745 return;
4746 }
4747
4748 /* Reverse stepping through solib trampolines. */
4749
4750 if (execution_direction == EXEC_REVERSE
4751 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4752 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4753 || (ecs->stop_func_start == 0
4754 && in_solib_dynsym_resolve_code (stop_pc))))
4755 {
4756 /* Any solib trampoline code can be handled in reverse
4757 by simply continuing to single-step. We have already
4758 executed the solib function (backwards), and a few
4759 steps will take us back through the trampoline to the
4760 caller. */
4761 keep_going (ecs);
4762 return;
4763 }
4764
4765 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4766 {
4767 /* We're doing a "next".
4768
4769 Normal (forward) execution: set a breakpoint at the
4770 callee's return address (the address at which the caller
4771 will resume).
4772
4773 Reverse (backward) execution. set the step-resume
4774 breakpoint at the start of the function that we just
4775 stepped into (backwards), and continue to there. When we
4776 get there, we'll need to single-step back to the caller. */
4777
4778 if (execution_direction == EXEC_REVERSE)
4779 {
4780 struct symtab_and_line sr_sal;
4781
4782 /* Normal function call return (static or dynamic). */
4783 init_sal (&sr_sal);
4784 sr_sal.pc = ecs->stop_func_start;
4785 sr_sal.pspace = get_frame_program_space (frame);
4786 insert_step_resume_breakpoint_at_sal (gdbarch,
4787 sr_sal, null_frame_id);
4788 }
4789 else
4790 insert_step_resume_breakpoint_at_caller (frame);
4791
4792 keep_going (ecs);
4793 return;
4794 }
4795
4796 /* If we are in a function call trampoline (a stub between the
4797 calling routine and the real function), locate the real
4798 function. That's what tells us (a) whether we want to step
4799 into it at all, and (b) what prologue we want to run to the
4800 end of, if we do step into it. */
4801 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4802 if (real_stop_pc == 0)
4803 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4804 if (real_stop_pc != 0)
4805 ecs->stop_func_start = real_stop_pc;
4806
4807 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4808 {
4809 struct symtab_and_line sr_sal;
4810
4811 init_sal (&sr_sal);
4812 sr_sal.pc = ecs->stop_func_start;
4813 sr_sal.pspace = get_frame_program_space (frame);
4814
4815 insert_step_resume_breakpoint_at_sal (gdbarch,
4816 sr_sal, null_frame_id);
4817 keep_going (ecs);
4818 return;
4819 }
4820
4821 /* If we have line number information for the function we are
4822 thinking of stepping into, step into it.
4823
4824 If there are several symtabs at that PC (e.g. with include
4825 files), just want to know whether *any* of them have line
4826 numbers. find_pc_line handles this. */
4827 {
4828 struct symtab_and_line tmp_sal;
4829
4830 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4831 if (tmp_sal.line != 0)
4832 {
4833 if (execution_direction == EXEC_REVERSE)
4834 handle_step_into_function_backward (gdbarch, ecs);
4835 else
4836 handle_step_into_function (gdbarch, ecs);
4837 return;
4838 }
4839 }
4840
4841 /* If we have no line number and the step-stop-if-no-debug is
4842 set, we stop the step so that the user has a chance to switch
4843 in assembly mode. */
4844 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4845 && step_stop_if_no_debug)
4846 {
4847 ecs->event_thread->control.stop_step = 1;
4848 print_end_stepping_range_reason ();
4849 stop_stepping (ecs);
4850 return;
4851 }
4852
4853 if (execution_direction == EXEC_REVERSE)
4854 {
4855 /* Set a breakpoint at callee's start address.
4856 From there we can step once and be back in the caller. */
4857 struct symtab_and_line sr_sal;
4858
4859 init_sal (&sr_sal);
4860 sr_sal.pc = ecs->stop_func_start;
4861 sr_sal.pspace = get_frame_program_space (frame);
4862 insert_step_resume_breakpoint_at_sal (gdbarch,
4863 sr_sal, null_frame_id);
4864 }
4865 else
4866 /* Set a breakpoint at callee's return address (the address
4867 at which the caller will resume). */
4868 insert_step_resume_breakpoint_at_caller (frame);
4869
4870 keep_going (ecs);
4871 return;
4872 }
4873
4874 /* Reverse stepping through solib trampolines. */
4875
4876 if (execution_direction == EXEC_REVERSE
4877 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4878 {
4879 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4880 || (ecs->stop_func_start == 0
4881 && in_solib_dynsym_resolve_code (stop_pc)))
4882 {
4883 /* Any solib trampoline code can be handled in reverse
4884 by simply continuing to single-step. We have already
4885 executed the solib function (backwards), and a few
4886 steps will take us back through the trampoline to the
4887 caller. */
4888 keep_going (ecs);
4889 return;
4890 }
4891 else if (in_solib_dynsym_resolve_code (stop_pc))
4892 {
4893 /* Stepped backward into the solib dynsym resolver.
4894 Set a breakpoint at its start and continue, then
4895 one more step will take us out. */
4896 struct symtab_and_line sr_sal;
4897
4898 init_sal (&sr_sal);
4899 sr_sal.pc = ecs->stop_func_start;
4900 sr_sal.pspace = get_frame_program_space (frame);
4901 insert_step_resume_breakpoint_at_sal (gdbarch,
4902 sr_sal, null_frame_id);
4903 keep_going (ecs);
4904 return;
4905 }
4906 }
4907
4908 /* If we're in the return path from a shared library trampoline,
4909 we want to proceed through the trampoline when stepping. */
4910 if (gdbarch_in_solib_return_trampoline (gdbarch,
4911 stop_pc, ecs->stop_func_name))
4912 {
4913 /* Determine where this trampoline returns. */
4914 CORE_ADDR real_stop_pc;
4915
4916 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4917
4918 if (debug_infrun)
4919 fprintf_unfiltered (gdb_stdlog,
4920 "infrun: stepped into solib return tramp\n");
4921
4922 /* Only proceed through if we know where it's going. */
4923 if (real_stop_pc)
4924 {
4925 /* And put the step-breakpoint there and go until there. */
4926 struct symtab_and_line sr_sal;
4927
4928 init_sal (&sr_sal); /* initialize to zeroes */
4929 sr_sal.pc = real_stop_pc;
4930 sr_sal.section = find_pc_overlay (sr_sal.pc);
4931 sr_sal.pspace = get_frame_program_space (frame);
4932
4933 /* Do not specify what the fp should be when we stop since
4934 on some machines the prologue is where the new fp value
4935 is established. */
4936 insert_step_resume_breakpoint_at_sal (gdbarch,
4937 sr_sal, null_frame_id);
4938
4939 /* Restart without fiddling with the step ranges or
4940 other state. */
4941 keep_going (ecs);
4942 return;
4943 }
4944 }
4945
4946 stop_pc_sal = find_pc_line (stop_pc, 0);
4947
4948 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4949 the trampoline processing logic, however, there are some trampolines
4950 that have no names, so we should do trampoline handling first. */
4951 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4952 && ecs->stop_func_name == NULL
4953 && stop_pc_sal.line == 0)
4954 {
4955 if (debug_infrun)
4956 fprintf_unfiltered (gdb_stdlog,
4957 "infrun: stepped into undebuggable function\n");
4958
4959 /* The inferior just stepped into, or returned to, an
4960 undebuggable function (where there is no debugging information
4961 and no line number corresponding to the address where the
4962 inferior stopped). Since we want to skip this kind of code,
4963 we keep going until the inferior returns from this
4964 function - unless the user has asked us not to (via
4965 set step-mode) or we no longer know how to get back
4966 to the call site. */
4967 if (step_stop_if_no_debug
4968 || !frame_id_p (frame_unwind_caller_id (frame)))
4969 {
4970 /* If we have no line number and the step-stop-if-no-debug
4971 is set, we stop the step so that the user has a chance to
4972 switch in assembly mode. */
4973 ecs->event_thread->control.stop_step = 1;
4974 print_end_stepping_range_reason ();
4975 stop_stepping (ecs);
4976 return;
4977 }
4978 else
4979 {
4980 /* Set a breakpoint at callee's return address (the address
4981 at which the caller will resume). */
4982 insert_step_resume_breakpoint_at_caller (frame);
4983 keep_going (ecs);
4984 return;
4985 }
4986 }
4987
4988 if (ecs->event_thread->control.step_range_end == 1)
4989 {
4990 /* It is stepi or nexti. We always want to stop stepping after
4991 one instruction. */
4992 if (debug_infrun)
4993 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4994 ecs->event_thread->control.stop_step = 1;
4995 print_end_stepping_range_reason ();
4996 stop_stepping (ecs);
4997 return;
4998 }
4999
5000 if (stop_pc_sal.line == 0)
5001 {
5002 /* We have no line number information. That means to stop
5003 stepping (does this always happen right after one instruction,
5004 when we do "s" in a function with no line numbers,
5005 or can this happen as a result of a return or longjmp?). */
5006 if (debug_infrun)
5007 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5008 ecs->event_thread->control.stop_step = 1;
5009 print_end_stepping_range_reason ();
5010 stop_stepping (ecs);
5011 return;
5012 }
5013
5014 /* Look for "calls" to inlined functions, part one. If the inline
5015 frame machinery detected some skipped call sites, we have entered
5016 a new inline function. */
5017
5018 if (frame_id_eq (get_frame_id (get_current_frame ()),
5019 ecs->event_thread->control.step_frame_id)
5020 && inline_skipped_frames (ecs->ptid))
5021 {
5022 struct symtab_and_line call_sal;
5023
5024 if (debug_infrun)
5025 fprintf_unfiltered (gdb_stdlog,
5026 "infrun: stepped into inlined function\n");
5027
5028 find_frame_sal (get_current_frame (), &call_sal);
5029
5030 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5031 {
5032 /* For "step", we're going to stop. But if the call site
5033 for this inlined function is on the same source line as
5034 we were previously stepping, go down into the function
5035 first. Otherwise stop at the call site. */
5036
5037 if (call_sal.line == ecs->event_thread->current_line
5038 && call_sal.symtab == ecs->event_thread->current_symtab)
5039 step_into_inline_frame (ecs->ptid);
5040
5041 ecs->event_thread->control.stop_step = 1;
5042 print_end_stepping_range_reason ();
5043 stop_stepping (ecs);
5044 return;
5045 }
5046 else
5047 {
5048 /* For "next", we should stop at the call site if it is on a
5049 different source line. Otherwise continue through the
5050 inlined function. */
5051 if (call_sal.line == ecs->event_thread->current_line
5052 && call_sal.symtab == ecs->event_thread->current_symtab)
5053 keep_going (ecs);
5054 else
5055 {
5056 ecs->event_thread->control.stop_step = 1;
5057 print_end_stepping_range_reason ();
5058 stop_stepping (ecs);
5059 }
5060 return;
5061 }
5062 }
5063
5064 /* Look for "calls" to inlined functions, part two. If we are still
5065 in the same real function we were stepping through, but we have
5066 to go further up to find the exact frame ID, we are stepping
5067 through a more inlined call beyond its call site. */
5068
5069 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5070 && !frame_id_eq (get_frame_id (get_current_frame ()),
5071 ecs->event_thread->control.step_frame_id)
5072 && stepped_in_from (get_current_frame (),
5073 ecs->event_thread->control.step_frame_id))
5074 {
5075 if (debug_infrun)
5076 fprintf_unfiltered (gdb_stdlog,
5077 "infrun: stepping through inlined function\n");
5078
5079 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5080 keep_going (ecs);
5081 else
5082 {
5083 ecs->event_thread->control.stop_step = 1;
5084 print_end_stepping_range_reason ();
5085 stop_stepping (ecs);
5086 }
5087 return;
5088 }
5089
5090 if ((stop_pc == stop_pc_sal.pc)
5091 && (ecs->event_thread->current_line != stop_pc_sal.line
5092 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5093 {
5094 /* We are at the start of a different line. So stop. Note that
5095 we don't stop if we step into the middle of a different line.
5096 That is said to make things like for (;;) statements work
5097 better. */
5098 if (debug_infrun)
5099 fprintf_unfiltered (gdb_stdlog,
5100 "infrun: stepped to a different line\n");
5101 ecs->event_thread->control.stop_step = 1;
5102 print_end_stepping_range_reason ();
5103 stop_stepping (ecs);
5104 return;
5105 }
5106
5107 /* We aren't done stepping.
5108
5109 Optimize by setting the stepping range to the line.
5110 (We might not be in the original line, but if we entered a
5111 new line in mid-statement, we continue stepping. This makes
5112 things like for(;;) statements work better.) */
5113
5114 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5115 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5116 set_step_info (frame, stop_pc_sal);
5117
5118 if (debug_infrun)
5119 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5120 keep_going (ecs);
5121 }
5122
5123 /* Is thread TP in the middle of single-stepping? */
5124
5125 static int
5126 currently_stepping (struct thread_info *tp)
5127 {
5128 return ((tp->control.step_range_end
5129 && tp->control.step_resume_breakpoint == NULL)
5130 || tp->control.trap_expected
5131 || tp->stepping_through_solib_after_catch
5132 || bpstat_should_step ());
5133 }
5134
5135 /* Returns true if any thread *but* the one passed in "data" is in the
5136 middle of stepping or of handling a "next". */
5137
5138 static int
5139 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5140 {
5141 if (tp == data)
5142 return 0;
5143
5144 return (tp->control.step_range_end
5145 || tp->control.trap_expected
5146 || tp->stepping_through_solib_after_catch);
5147 }
5148
5149 /* Inferior has stepped into a subroutine call with source code that
5150 we should not step over. Do step to the first line of code in
5151 it. */
5152
5153 static void
5154 handle_step_into_function (struct gdbarch *gdbarch,
5155 struct execution_control_state *ecs)
5156 {
5157 struct symtab *s;
5158 struct symtab_and_line stop_func_sal, sr_sal;
5159
5160 fill_in_stop_func (gdbarch, ecs);
5161
5162 s = find_pc_symtab (stop_pc);
5163 if (s && s->language != language_asm)
5164 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5165 ecs->stop_func_start);
5166
5167 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5168 /* Use the step_resume_break to step until the end of the prologue,
5169 even if that involves jumps (as it seems to on the vax under
5170 4.2). */
5171 /* If the prologue ends in the middle of a source line, continue to
5172 the end of that source line (if it is still within the function).
5173 Otherwise, just go to end of prologue. */
5174 if (stop_func_sal.end
5175 && stop_func_sal.pc != ecs->stop_func_start
5176 && stop_func_sal.end < ecs->stop_func_end)
5177 ecs->stop_func_start = stop_func_sal.end;
5178
5179 /* Architectures which require breakpoint adjustment might not be able
5180 to place a breakpoint at the computed address. If so, the test
5181 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5182 ecs->stop_func_start to an address at which a breakpoint may be
5183 legitimately placed.
5184
5185 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5186 made, GDB will enter an infinite loop when stepping through
5187 optimized code consisting of VLIW instructions which contain
5188 subinstructions corresponding to different source lines. On
5189 FR-V, it's not permitted to place a breakpoint on any but the
5190 first subinstruction of a VLIW instruction. When a breakpoint is
5191 set, GDB will adjust the breakpoint address to the beginning of
5192 the VLIW instruction. Thus, we need to make the corresponding
5193 adjustment here when computing the stop address. */
5194
5195 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5196 {
5197 ecs->stop_func_start
5198 = gdbarch_adjust_breakpoint_address (gdbarch,
5199 ecs->stop_func_start);
5200 }
5201
5202 if (ecs->stop_func_start == stop_pc)
5203 {
5204 /* We are already there: stop now. */
5205 ecs->event_thread->control.stop_step = 1;
5206 print_end_stepping_range_reason ();
5207 stop_stepping (ecs);
5208 return;
5209 }
5210 else
5211 {
5212 /* Put the step-breakpoint there and go until there. */
5213 init_sal (&sr_sal); /* initialize to zeroes */
5214 sr_sal.pc = ecs->stop_func_start;
5215 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5216 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5217
5218 /* Do not specify what the fp should be when we stop since on
5219 some machines the prologue is where the new fp value is
5220 established. */
5221 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5222
5223 /* And make sure stepping stops right away then. */
5224 ecs->event_thread->control.step_range_end
5225 = ecs->event_thread->control.step_range_start;
5226 }
5227 keep_going (ecs);
5228 }
5229
5230 /* Inferior has stepped backward into a subroutine call with source
5231 code that we should not step over. Do step to the beginning of the
5232 last line of code in it. */
5233
5234 static void
5235 handle_step_into_function_backward (struct gdbarch *gdbarch,
5236 struct execution_control_state *ecs)
5237 {
5238 struct symtab *s;
5239 struct symtab_and_line stop_func_sal;
5240
5241 fill_in_stop_func (gdbarch, ecs);
5242
5243 s = find_pc_symtab (stop_pc);
5244 if (s && s->language != language_asm)
5245 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5246 ecs->stop_func_start);
5247
5248 stop_func_sal = find_pc_line (stop_pc, 0);
5249
5250 /* OK, we're just going to keep stepping here. */
5251 if (stop_func_sal.pc == stop_pc)
5252 {
5253 /* We're there already. Just stop stepping now. */
5254 ecs->event_thread->control.stop_step = 1;
5255 print_end_stepping_range_reason ();
5256 stop_stepping (ecs);
5257 }
5258 else
5259 {
5260 /* Else just reset the step range and keep going.
5261 No step-resume breakpoint, they don't work for
5262 epilogues, which can have multiple entry paths. */
5263 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5264 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5265 keep_going (ecs);
5266 }
5267 return;
5268 }
5269
5270 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5271 This is used to both functions and to skip over code. */
5272
5273 static void
5274 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5275 struct symtab_and_line sr_sal,
5276 struct frame_id sr_id,
5277 enum bptype sr_type)
5278 {
5279 /* There should never be more than one step-resume or longjmp-resume
5280 breakpoint per thread, so we should never be setting a new
5281 step_resume_breakpoint when one is already active. */
5282 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5283 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5284
5285 if (debug_infrun)
5286 fprintf_unfiltered (gdb_stdlog,
5287 "infrun: inserting step-resume breakpoint at %s\n",
5288 paddress (gdbarch, sr_sal.pc));
5289
5290 inferior_thread ()->control.step_resume_breakpoint
5291 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5292 }
5293
5294 void
5295 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5296 struct symtab_and_line sr_sal,
5297 struct frame_id sr_id)
5298 {
5299 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5300 sr_sal, sr_id,
5301 bp_step_resume);
5302 }
5303
5304 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5305 This is used to skip a potential signal handler.
5306
5307 This is called with the interrupted function's frame. The signal
5308 handler, when it returns, will resume the interrupted function at
5309 RETURN_FRAME.pc. */
5310
5311 static void
5312 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5313 {
5314 struct symtab_and_line sr_sal;
5315 struct gdbarch *gdbarch;
5316
5317 gdb_assert (return_frame != NULL);
5318 init_sal (&sr_sal); /* initialize to zeros */
5319
5320 gdbarch = get_frame_arch (return_frame);
5321 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5322 sr_sal.section = find_pc_overlay (sr_sal.pc);
5323 sr_sal.pspace = get_frame_program_space (return_frame);
5324
5325 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5326 get_stack_frame_id (return_frame),
5327 bp_hp_step_resume);
5328 }
5329
5330 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5331 is used to skip a function after stepping into it (for "next" or if
5332 the called function has no debugging information).
5333
5334 The current function has almost always been reached by single
5335 stepping a call or return instruction. NEXT_FRAME belongs to the
5336 current function, and the breakpoint will be set at the caller's
5337 resume address.
5338
5339 This is a separate function rather than reusing
5340 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5341 get_prev_frame, which may stop prematurely (see the implementation
5342 of frame_unwind_caller_id for an example). */
5343
5344 static void
5345 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5346 {
5347 struct symtab_and_line sr_sal;
5348 struct gdbarch *gdbarch;
5349
5350 /* We shouldn't have gotten here if we don't know where the call site
5351 is. */
5352 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5353
5354 init_sal (&sr_sal); /* initialize to zeros */
5355
5356 gdbarch = frame_unwind_caller_arch (next_frame);
5357 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5358 frame_unwind_caller_pc (next_frame));
5359 sr_sal.section = find_pc_overlay (sr_sal.pc);
5360 sr_sal.pspace = frame_unwind_program_space (next_frame);
5361
5362 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5363 frame_unwind_caller_id (next_frame));
5364 }
5365
5366 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5367 new breakpoint at the target of a jmp_buf. The handling of
5368 longjmp-resume uses the same mechanisms used for handling
5369 "step-resume" breakpoints. */
5370
5371 static void
5372 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5373 {
5374 /* There should never be more than one step-resume or longjmp-resume
5375 breakpoint per thread, so we should never be setting a new
5376 longjmp_resume_breakpoint when one is already active. */
5377 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5378
5379 if (debug_infrun)
5380 fprintf_unfiltered (gdb_stdlog,
5381 "infrun: inserting longjmp-resume breakpoint at %s\n",
5382 paddress (gdbarch, pc));
5383
5384 inferior_thread ()->control.step_resume_breakpoint =
5385 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5386 }
5387
5388 /* Insert an exception resume breakpoint. TP is the thread throwing
5389 the exception. The block B is the block of the unwinder debug hook
5390 function. FRAME is the frame corresponding to the call to this
5391 function. SYM is the symbol of the function argument holding the
5392 target PC of the exception. */
5393
5394 static void
5395 insert_exception_resume_breakpoint (struct thread_info *tp,
5396 struct block *b,
5397 struct frame_info *frame,
5398 struct symbol *sym)
5399 {
5400 struct gdb_exception e;
5401
5402 /* We want to ignore errors here. */
5403 TRY_CATCH (e, RETURN_MASK_ERROR)
5404 {
5405 struct symbol *vsym;
5406 struct value *value;
5407 CORE_ADDR handler;
5408 struct breakpoint *bp;
5409
5410 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5411 value = read_var_value (vsym, frame);
5412 /* If the value was optimized out, revert to the old behavior. */
5413 if (! value_optimized_out (value))
5414 {
5415 handler = value_as_address (value);
5416
5417 if (debug_infrun)
5418 fprintf_unfiltered (gdb_stdlog,
5419 "infrun: exception resume at %lx\n",
5420 (unsigned long) handler);
5421
5422 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5423 handler, bp_exception_resume);
5424 bp->thread = tp->num;
5425 inferior_thread ()->control.exception_resume_breakpoint = bp;
5426 }
5427 }
5428 }
5429
5430 /* This is called when an exception has been intercepted. Check to
5431 see whether the exception's destination is of interest, and if so,
5432 set an exception resume breakpoint there. */
5433
5434 static void
5435 check_exception_resume (struct execution_control_state *ecs,
5436 struct frame_info *frame, struct symbol *func)
5437 {
5438 struct gdb_exception e;
5439
5440 TRY_CATCH (e, RETURN_MASK_ERROR)
5441 {
5442 struct block *b;
5443 struct dict_iterator iter;
5444 struct symbol *sym;
5445 int argno = 0;
5446
5447 /* The exception breakpoint is a thread-specific breakpoint on
5448 the unwinder's debug hook, declared as:
5449
5450 void _Unwind_DebugHook (void *cfa, void *handler);
5451
5452 The CFA argument indicates the frame to which control is
5453 about to be transferred. HANDLER is the destination PC.
5454
5455 We ignore the CFA and set a temporary breakpoint at HANDLER.
5456 This is not extremely efficient but it avoids issues in gdb
5457 with computing the DWARF CFA, and it also works even in weird
5458 cases such as throwing an exception from inside a signal
5459 handler. */
5460
5461 b = SYMBOL_BLOCK_VALUE (func);
5462 ALL_BLOCK_SYMBOLS (b, iter, sym)
5463 {
5464 if (!SYMBOL_IS_ARGUMENT (sym))
5465 continue;
5466
5467 if (argno == 0)
5468 ++argno;
5469 else
5470 {
5471 insert_exception_resume_breakpoint (ecs->event_thread,
5472 b, frame, sym);
5473 break;
5474 }
5475 }
5476 }
5477 }
5478
5479 static void
5480 stop_stepping (struct execution_control_state *ecs)
5481 {
5482 if (debug_infrun)
5483 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5484
5485 /* Let callers know we don't want to wait for the inferior anymore. */
5486 ecs->wait_some_more = 0;
5487 }
5488
5489 /* This function handles various cases where we need to continue
5490 waiting for the inferior. */
5491 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5492
5493 static void
5494 keep_going (struct execution_control_state *ecs)
5495 {
5496 /* Make sure normal_stop is called if we get a QUIT handled before
5497 reaching resume. */
5498 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5499
5500 /* Save the pc before execution, to compare with pc after stop. */
5501 ecs->event_thread->prev_pc
5502 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5503
5504 /* If we did not do break;, it means we should keep running the
5505 inferior and not return to debugger. */
5506
5507 if (ecs->event_thread->control.trap_expected
5508 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5509 {
5510 /* We took a signal (which we are supposed to pass through to
5511 the inferior, else we'd not get here) and we haven't yet
5512 gotten our trap. Simply continue. */
5513
5514 discard_cleanups (old_cleanups);
5515 resume (currently_stepping (ecs->event_thread),
5516 ecs->event_thread->suspend.stop_signal);
5517 }
5518 else
5519 {
5520 /* Either the trap was not expected, but we are continuing
5521 anyway (the user asked that this signal be passed to the
5522 child)
5523 -- or --
5524 The signal was SIGTRAP, e.g. it was our signal, but we
5525 decided we should resume from it.
5526
5527 We're going to run this baby now!
5528
5529 Note that insert_breakpoints won't try to re-insert
5530 already inserted breakpoints. Therefore, we don't
5531 care if breakpoints were already inserted, or not. */
5532
5533 if (ecs->event_thread->stepping_over_breakpoint)
5534 {
5535 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5536
5537 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5538 /* Since we can't do a displaced step, we have to remove
5539 the breakpoint while we step it. To keep things
5540 simple, we remove them all. */
5541 remove_breakpoints ();
5542 }
5543 else
5544 {
5545 struct gdb_exception e;
5546
5547 /* Stop stepping when inserting breakpoints
5548 has failed. */
5549 TRY_CATCH (e, RETURN_MASK_ERROR)
5550 {
5551 insert_breakpoints ();
5552 }
5553 if (e.reason < 0)
5554 {
5555 exception_print (gdb_stderr, e);
5556 stop_stepping (ecs);
5557 return;
5558 }
5559 }
5560
5561 ecs->event_thread->control.trap_expected
5562 = ecs->event_thread->stepping_over_breakpoint;
5563
5564 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5565 specifies that such a signal should be delivered to the
5566 target program).
5567
5568 Typically, this would occure when a user is debugging a
5569 target monitor on a simulator: the target monitor sets a
5570 breakpoint; the simulator encounters this break-point and
5571 halts the simulation handing control to GDB; GDB, noteing
5572 that the break-point isn't valid, returns control back to the
5573 simulator; the simulator then delivers the hardware
5574 equivalent of a SIGNAL_TRAP to the program being debugged. */
5575
5576 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5577 && !signal_program[ecs->event_thread->suspend.stop_signal])
5578 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5579
5580 discard_cleanups (old_cleanups);
5581 resume (currently_stepping (ecs->event_thread),
5582 ecs->event_thread->suspend.stop_signal);
5583 }
5584
5585 prepare_to_wait (ecs);
5586 }
5587
5588 /* This function normally comes after a resume, before
5589 handle_inferior_event exits. It takes care of any last bits of
5590 housekeeping, and sets the all-important wait_some_more flag. */
5591
5592 static void
5593 prepare_to_wait (struct execution_control_state *ecs)
5594 {
5595 if (debug_infrun)
5596 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5597
5598 /* This is the old end of the while loop. Let everybody know we
5599 want to wait for the inferior some more and get called again
5600 soon. */
5601 ecs->wait_some_more = 1;
5602 }
5603
5604 /* Several print_*_reason functions to print why the inferior has stopped.
5605 We always print something when the inferior exits, or receives a signal.
5606 The rest of the cases are dealt with later on in normal_stop and
5607 print_it_typical. Ideally there should be a call to one of these
5608 print_*_reason functions functions from handle_inferior_event each time
5609 stop_stepping is called. */
5610
5611 /* Print why the inferior has stopped.
5612 We are done with a step/next/si/ni command, print why the inferior has
5613 stopped. For now print nothing. Print a message only if not in the middle
5614 of doing a "step n" operation for n > 1. */
5615
5616 static void
5617 print_end_stepping_range_reason (void)
5618 {
5619 if ((!inferior_thread ()->step_multi
5620 || !inferior_thread ()->control.stop_step)
5621 && ui_out_is_mi_like_p (current_uiout))
5622 ui_out_field_string (current_uiout, "reason",
5623 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5624 }
5625
5626 /* The inferior was terminated by a signal, print why it stopped. */
5627
5628 static void
5629 print_signal_exited_reason (enum target_signal siggnal)
5630 {
5631 struct ui_out *uiout = current_uiout;
5632
5633 annotate_signalled ();
5634 if (ui_out_is_mi_like_p (uiout))
5635 ui_out_field_string
5636 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5637 ui_out_text (uiout, "\nProgram terminated with signal ");
5638 annotate_signal_name ();
5639 ui_out_field_string (uiout, "signal-name",
5640 target_signal_to_name (siggnal));
5641 annotate_signal_name_end ();
5642 ui_out_text (uiout, ", ");
5643 annotate_signal_string ();
5644 ui_out_field_string (uiout, "signal-meaning",
5645 target_signal_to_string (siggnal));
5646 annotate_signal_string_end ();
5647 ui_out_text (uiout, ".\n");
5648 ui_out_text (uiout, "The program no longer exists.\n");
5649 }
5650
5651 /* The inferior program is finished, print why it stopped. */
5652
5653 static void
5654 print_exited_reason (int exitstatus)
5655 {
5656 struct inferior *inf = current_inferior ();
5657 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5658 struct ui_out *uiout = current_uiout;
5659
5660 annotate_exited (exitstatus);
5661 if (exitstatus)
5662 {
5663 if (ui_out_is_mi_like_p (uiout))
5664 ui_out_field_string (uiout, "reason",
5665 async_reason_lookup (EXEC_ASYNC_EXITED));
5666 ui_out_text (uiout, "[Inferior ");
5667 ui_out_text (uiout, plongest (inf->num));
5668 ui_out_text (uiout, " (");
5669 ui_out_text (uiout, pidstr);
5670 ui_out_text (uiout, ") exited with code ");
5671 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5672 ui_out_text (uiout, "]\n");
5673 }
5674 else
5675 {
5676 if (ui_out_is_mi_like_p (uiout))
5677 ui_out_field_string
5678 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5679 ui_out_text (uiout, "[Inferior ");
5680 ui_out_text (uiout, plongest (inf->num));
5681 ui_out_text (uiout, " (");
5682 ui_out_text (uiout, pidstr);
5683 ui_out_text (uiout, ") exited normally]\n");
5684 }
5685 /* Support the --return-child-result option. */
5686 return_child_result_value = exitstatus;
5687 }
5688
5689 /* Signal received, print why the inferior has stopped. The signal table
5690 tells us to print about it. */
5691
5692 static void
5693 print_signal_received_reason (enum target_signal siggnal)
5694 {
5695 struct ui_out *uiout = current_uiout;
5696
5697 annotate_signal ();
5698
5699 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5700 {
5701 struct thread_info *t = inferior_thread ();
5702
5703 ui_out_text (uiout, "\n[");
5704 ui_out_field_string (uiout, "thread-name",
5705 target_pid_to_str (t->ptid));
5706 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5707 ui_out_text (uiout, " stopped");
5708 }
5709 else
5710 {
5711 ui_out_text (uiout, "\nProgram received signal ");
5712 annotate_signal_name ();
5713 if (ui_out_is_mi_like_p (uiout))
5714 ui_out_field_string
5715 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5716 ui_out_field_string (uiout, "signal-name",
5717 target_signal_to_name (siggnal));
5718 annotate_signal_name_end ();
5719 ui_out_text (uiout, ", ");
5720 annotate_signal_string ();
5721 ui_out_field_string (uiout, "signal-meaning",
5722 target_signal_to_string (siggnal));
5723 annotate_signal_string_end ();
5724 }
5725 ui_out_text (uiout, ".\n");
5726 }
5727
5728 /* Reverse execution: target ran out of history info, print why the inferior
5729 has stopped. */
5730
5731 static void
5732 print_no_history_reason (void)
5733 {
5734 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5735 }
5736
5737 /* Here to return control to GDB when the inferior stops for real.
5738 Print appropriate messages, remove breakpoints, give terminal our modes.
5739
5740 STOP_PRINT_FRAME nonzero means print the executing frame
5741 (pc, function, args, file, line number and line text).
5742 BREAKPOINTS_FAILED nonzero means stop was due to error
5743 attempting to insert breakpoints. */
5744
5745 void
5746 normal_stop (void)
5747 {
5748 struct target_waitstatus last;
5749 ptid_t last_ptid;
5750 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5751
5752 get_last_target_status (&last_ptid, &last);
5753
5754 /* If an exception is thrown from this point on, make sure to
5755 propagate GDB's knowledge of the executing state to the
5756 frontend/user running state. A QUIT is an easy exception to see
5757 here, so do this before any filtered output. */
5758 if (!non_stop)
5759 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5760 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5761 && last.kind != TARGET_WAITKIND_EXITED)
5762 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5763
5764 /* In non-stop mode, we don't want GDB to switch threads behind the
5765 user's back, to avoid races where the user is typing a command to
5766 apply to thread x, but GDB switches to thread y before the user
5767 finishes entering the command. */
5768
5769 /* As with the notification of thread events, we want to delay
5770 notifying the user that we've switched thread context until
5771 the inferior actually stops.
5772
5773 There's no point in saying anything if the inferior has exited.
5774 Note that SIGNALLED here means "exited with a signal", not
5775 "received a signal". */
5776 if (!non_stop
5777 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5778 && target_has_execution
5779 && last.kind != TARGET_WAITKIND_SIGNALLED
5780 && last.kind != TARGET_WAITKIND_EXITED)
5781 {
5782 target_terminal_ours_for_output ();
5783 printf_filtered (_("[Switching to %s]\n"),
5784 target_pid_to_str (inferior_ptid));
5785 annotate_thread_changed ();
5786 previous_inferior_ptid = inferior_ptid;
5787 }
5788
5789 if (!breakpoints_always_inserted_mode () && target_has_execution)
5790 {
5791 if (remove_breakpoints ())
5792 {
5793 target_terminal_ours_for_output ();
5794 printf_filtered (_("Cannot remove breakpoints because "
5795 "program is no longer writable.\nFurther "
5796 "execution is probably impossible.\n"));
5797 }
5798 }
5799
5800 /* If an auto-display called a function and that got a signal,
5801 delete that auto-display to avoid an infinite recursion. */
5802
5803 if (stopped_by_random_signal)
5804 disable_current_display ();
5805
5806 /* Don't print a message if in the middle of doing a "step n"
5807 operation for n > 1 */
5808 if (target_has_execution
5809 && last.kind != TARGET_WAITKIND_SIGNALLED
5810 && last.kind != TARGET_WAITKIND_EXITED
5811 && inferior_thread ()->step_multi
5812 && inferior_thread ()->control.stop_step)
5813 goto done;
5814
5815 target_terminal_ours ();
5816
5817 /* Set the current source location. This will also happen if we
5818 display the frame below, but the current SAL will be incorrect
5819 during a user hook-stop function. */
5820 if (has_stack_frames () && !stop_stack_dummy)
5821 set_current_sal_from_frame (get_current_frame (), 1);
5822
5823 /* Let the user/frontend see the threads as stopped. */
5824 do_cleanups (old_chain);
5825
5826 /* Look up the hook_stop and run it (CLI internally handles problem
5827 of stop_command's pre-hook not existing). */
5828 if (stop_command)
5829 catch_errors (hook_stop_stub, stop_command,
5830 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5831
5832 if (!has_stack_frames ())
5833 goto done;
5834
5835 if (last.kind == TARGET_WAITKIND_SIGNALLED
5836 || last.kind == TARGET_WAITKIND_EXITED)
5837 goto done;
5838
5839 /* Select innermost stack frame - i.e., current frame is frame 0,
5840 and current location is based on that.
5841 Don't do this on return from a stack dummy routine,
5842 or if the program has exited. */
5843
5844 if (!stop_stack_dummy)
5845 {
5846 select_frame (get_current_frame ());
5847
5848 /* Print current location without a level number, if
5849 we have changed functions or hit a breakpoint.
5850 Print source line if we have one.
5851 bpstat_print() contains the logic deciding in detail
5852 what to print, based on the event(s) that just occurred. */
5853
5854 /* If --batch-silent is enabled then there's no need to print the current
5855 source location, and to try risks causing an error message about
5856 missing source files. */
5857 if (stop_print_frame && !batch_silent)
5858 {
5859 int bpstat_ret;
5860 int source_flag;
5861 int do_frame_printing = 1;
5862 struct thread_info *tp = inferior_thread ();
5863
5864 bpstat_ret = bpstat_print (tp->control.stop_bpstat);
5865 switch (bpstat_ret)
5866 {
5867 case PRINT_UNKNOWN:
5868 /* If we had hit a shared library event breakpoint,
5869 bpstat_print would print out this message. If we hit
5870 an OS-level shared library event, do the same
5871 thing. */
5872 if (last.kind == TARGET_WAITKIND_LOADED)
5873 {
5874 printf_filtered (_("Stopped due to shared library event\n"));
5875 source_flag = SRC_LINE; /* something bogus */
5876 do_frame_printing = 0;
5877 break;
5878 }
5879
5880 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5881 (or should) carry around the function and does (or
5882 should) use that when doing a frame comparison. */
5883 if (tp->control.stop_step
5884 && frame_id_eq (tp->control.step_frame_id,
5885 get_frame_id (get_current_frame ()))
5886 && step_start_function == find_pc_function (stop_pc))
5887 source_flag = SRC_LINE; /* Finished step, just
5888 print source line. */
5889 else
5890 source_flag = SRC_AND_LOC; /* Print location and
5891 source line. */
5892 break;
5893 case PRINT_SRC_AND_LOC:
5894 source_flag = SRC_AND_LOC; /* Print location and
5895 source line. */
5896 break;
5897 case PRINT_SRC_ONLY:
5898 source_flag = SRC_LINE;
5899 break;
5900 case PRINT_NOTHING:
5901 source_flag = SRC_LINE; /* something bogus */
5902 do_frame_printing = 0;
5903 break;
5904 default:
5905 internal_error (__FILE__, __LINE__, _("Unknown value."));
5906 }
5907
5908 /* The behavior of this routine with respect to the source
5909 flag is:
5910 SRC_LINE: Print only source line
5911 LOCATION: Print only location
5912 SRC_AND_LOC: Print location and source line. */
5913 if (do_frame_printing)
5914 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5915
5916 /* Display the auto-display expressions. */
5917 do_displays ();
5918 }
5919 }
5920
5921 /* Save the function value return registers, if we care.
5922 We might be about to restore their previous contents. */
5923 if (inferior_thread ()->control.proceed_to_finish
5924 && execution_direction != EXEC_REVERSE)
5925 {
5926 /* This should not be necessary. */
5927 if (stop_registers)
5928 regcache_xfree (stop_registers);
5929
5930 /* NB: The copy goes through to the target picking up the value of
5931 all the registers. */
5932 stop_registers = regcache_dup (get_current_regcache ());
5933 }
5934
5935 if (stop_stack_dummy == STOP_STACK_DUMMY)
5936 {
5937 /* Pop the empty frame that contains the stack dummy.
5938 This also restores inferior state prior to the call
5939 (struct infcall_suspend_state). */
5940 struct frame_info *frame = get_current_frame ();
5941
5942 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5943 frame_pop (frame);
5944 /* frame_pop() calls reinit_frame_cache as the last thing it
5945 does which means there's currently no selected frame. We
5946 don't need to re-establish a selected frame if the dummy call
5947 returns normally, that will be done by
5948 restore_infcall_control_state. However, we do have to handle
5949 the case where the dummy call is returning after being
5950 stopped (e.g. the dummy call previously hit a breakpoint).
5951 We can't know which case we have so just always re-establish
5952 a selected frame here. */
5953 select_frame (get_current_frame ());
5954 }
5955
5956 done:
5957 annotate_stopped ();
5958
5959 /* Suppress the stop observer if we're in the middle of:
5960
5961 - a step n (n > 1), as there still more steps to be done.
5962
5963 - a "finish" command, as the observer will be called in
5964 finish_command_continuation, so it can include the inferior
5965 function's return value.
5966
5967 - calling an inferior function, as we pretend we inferior didn't
5968 run at all. The return value of the call is handled by the
5969 expression evaluator, through call_function_by_hand. */
5970
5971 if (!target_has_execution
5972 || last.kind == TARGET_WAITKIND_SIGNALLED
5973 || last.kind == TARGET_WAITKIND_EXITED
5974 || (!inferior_thread ()->step_multi
5975 && !(inferior_thread ()->control.stop_bpstat
5976 && inferior_thread ()->control.proceed_to_finish)
5977 && !inferior_thread ()->control.in_infcall))
5978 {
5979 if (!ptid_equal (inferior_ptid, null_ptid))
5980 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
5981 stop_print_frame);
5982 else
5983 observer_notify_normal_stop (NULL, stop_print_frame);
5984 }
5985
5986 if (target_has_execution)
5987 {
5988 if (last.kind != TARGET_WAITKIND_SIGNALLED
5989 && last.kind != TARGET_WAITKIND_EXITED)
5990 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5991 Delete any breakpoint that is to be deleted at the next stop. */
5992 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
5993 }
5994
5995 /* Try to get rid of automatically added inferiors that are no
5996 longer needed. Keeping those around slows down things linearly.
5997 Note that this never removes the current inferior. */
5998 prune_inferiors ();
5999 }
6000
6001 static int
6002 hook_stop_stub (void *cmd)
6003 {
6004 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6005 return (0);
6006 }
6007 \f
6008 int
6009 signal_stop_state (int signo)
6010 {
6011 return signal_stop[signo];
6012 }
6013
6014 int
6015 signal_print_state (int signo)
6016 {
6017 return signal_print[signo];
6018 }
6019
6020 int
6021 signal_pass_state (int signo)
6022 {
6023 return signal_program[signo];
6024 }
6025
6026 static void
6027 signal_cache_update (int signo)
6028 {
6029 if (signo == -1)
6030 {
6031 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6032 signal_cache_update (signo);
6033
6034 return;
6035 }
6036
6037 signal_pass[signo] = (signal_stop[signo] == 0
6038 && signal_print[signo] == 0
6039 && signal_program[signo] == 1);
6040 }
6041
6042 int
6043 signal_stop_update (int signo, int state)
6044 {
6045 int ret = signal_stop[signo];
6046
6047 signal_stop[signo] = state;
6048 signal_cache_update (signo);
6049 return ret;
6050 }
6051
6052 int
6053 signal_print_update (int signo, int state)
6054 {
6055 int ret = signal_print[signo];
6056
6057 signal_print[signo] = state;
6058 signal_cache_update (signo);
6059 return ret;
6060 }
6061
6062 int
6063 signal_pass_update (int signo, int state)
6064 {
6065 int ret = signal_program[signo];
6066
6067 signal_program[signo] = state;
6068 signal_cache_update (signo);
6069 return ret;
6070 }
6071
6072 static void
6073 sig_print_header (void)
6074 {
6075 printf_filtered (_("Signal Stop\tPrint\tPass "
6076 "to program\tDescription\n"));
6077 }
6078
6079 static void
6080 sig_print_info (enum target_signal oursig)
6081 {
6082 const char *name = target_signal_to_name (oursig);
6083 int name_padding = 13 - strlen (name);
6084
6085 if (name_padding <= 0)
6086 name_padding = 0;
6087
6088 printf_filtered ("%s", name);
6089 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6090 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6091 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6092 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6093 printf_filtered ("%s\n", target_signal_to_string (oursig));
6094 }
6095
6096 /* Specify how various signals in the inferior should be handled. */
6097
6098 static void
6099 handle_command (char *args, int from_tty)
6100 {
6101 char **argv;
6102 int digits, wordlen;
6103 int sigfirst, signum, siglast;
6104 enum target_signal oursig;
6105 int allsigs;
6106 int nsigs;
6107 unsigned char *sigs;
6108 struct cleanup *old_chain;
6109
6110 if (args == NULL)
6111 {
6112 error_no_arg (_("signal to handle"));
6113 }
6114
6115 /* Allocate and zero an array of flags for which signals to handle. */
6116
6117 nsigs = (int) TARGET_SIGNAL_LAST;
6118 sigs = (unsigned char *) alloca (nsigs);
6119 memset (sigs, 0, nsigs);
6120
6121 /* Break the command line up into args. */
6122
6123 argv = gdb_buildargv (args);
6124 old_chain = make_cleanup_freeargv (argv);
6125
6126 /* Walk through the args, looking for signal oursigs, signal names, and
6127 actions. Signal numbers and signal names may be interspersed with
6128 actions, with the actions being performed for all signals cumulatively
6129 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6130
6131 while (*argv != NULL)
6132 {
6133 wordlen = strlen (*argv);
6134 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6135 {;
6136 }
6137 allsigs = 0;
6138 sigfirst = siglast = -1;
6139
6140 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6141 {
6142 /* Apply action to all signals except those used by the
6143 debugger. Silently skip those. */
6144 allsigs = 1;
6145 sigfirst = 0;
6146 siglast = nsigs - 1;
6147 }
6148 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6149 {
6150 SET_SIGS (nsigs, sigs, signal_stop);
6151 SET_SIGS (nsigs, sigs, signal_print);
6152 }
6153 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6154 {
6155 UNSET_SIGS (nsigs, sigs, signal_program);
6156 }
6157 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6158 {
6159 SET_SIGS (nsigs, sigs, signal_print);
6160 }
6161 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6162 {
6163 SET_SIGS (nsigs, sigs, signal_program);
6164 }
6165 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6166 {
6167 UNSET_SIGS (nsigs, sigs, signal_stop);
6168 }
6169 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6170 {
6171 SET_SIGS (nsigs, sigs, signal_program);
6172 }
6173 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6174 {
6175 UNSET_SIGS (nsigs, sigs, signal_print);
6176 UNSET_SIGS (nsigs, sigs, signal_stop);
6177 }
6178 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6179 {
6180 UNSET_SIGS (nsigs, sigs, signal_program);
6181 }
6182 else if (digits > 0)
6183 {
6184 /* It is numeric. The numeric signal refers to our own
6185 internal signal numbering from target.h, not to host/target
6186 signal number. This is a feature; users really should be
6187 using symbolic names anyway, and the common ones like
6188 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6189
6190 sigfirst = siglast = (int)
6191 target_signal_from_command (atoi (*argv));
6192 if ((*argv)[digits] == '-')
6193 {
6194 siglast = (int)
6195 target_signal_from_command (atoi ((*argv) + digits + 1));
6196 }
6197 if (sigfirst > siglast)
6198 {
6199 /* Bet he didn't figure we'd think of this case... */
6200 signum = sigfirst;
6201 sigfirst = siglast;
6202 siglast = signum;
6203 }
6204 }
6205 else
6206 {
6207 oursig = target_signal_from_name (*argv);
6208 if (oursig != TARGET_SIGNAL_UNKNOWN)
6209 {
6210 sigfirst = siglast = (int) oursig;
6211 }
6212 else
6213 {
6214 /* Not a number and not a recognized flag word => complain. */
6215 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6216 }
6217 }
6218
6219 /* If any signal numbers or symbol names were found, set flags for
6220 which signals to apply actions to. */
6221
6222 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6223 {
6224 switch ((enum target_signal) signum)
6225 {
6226 case TARGET_SIGNAL_TRAP:
6227 case TARGET_SIGNAL_INT:
6228 if (!allsigs && !sigs[signum])
6229 {
6230 if (query (_("%s is used by the debugger.\n\
6231 Are you sure you want to change it? "),
6232 target_signal_to_name ((enum target_signal) signum)))
6233 {
6234 sigs[signum] = 1;
6235 }
6236 else
6237 {
6238 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6239 gdb_flush (gdb_stdout);
6240 }
6241 }
6242 break;
6243 case TARGET_SIGNAL_0:
6244 case TARGET_SIGNAL_DEFAULT:
6245 case TARGET_SIGNAL_UNKNOWN:
6246 /* Make sure that "all" doesn't print these. */
6247 break;
6248 default:
6249 sigs[signum] = 1;
6250 break;
6251 }
6252 }
6253
6254 argv++;
6255 }
6256
6257 for (signum = 0; signum < nsigs; signum++)
6258 if (sigs[signum])
6259 {
6260 signal_cache_update (-1);
6261 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6262
6263 if (from_tty)
6264 {
6265 /* Show the results. */
6266 sig_print_header ();
6267 for (; signum < nsigs; signum++)
6268 if (sigs[signum])
6269 sig_print_info (signum);
6270 }
6271
6272 break;
6273 }
6274
6275 do_cleanups (old_chain);
6276 }
6277
6278 static void
6279 xdb_handle_command (char *args, int from_tty)
6280 {
6281 char **argv;
6282 struct cleanup *old_chain;
6283
6284 if (args == NULL)
6285 error_no_arg (_("xdb command"));
6286
6287 /* Break the command line up into args. */
6288
6289 argv = gdb_buildargv (args);
6290 old_chain = make_cleanup_freeargv (argv);
6291 if (argv[1] != (char *) NULL)
6292 {
6293 char *argBuf;
6294 int bufLen;
6295
6296 bufLen = strlen (argv[0]) + 20;
6297 argBuf = (char *) xmalloc (bufLen);
6298 if (argBuf)
6299 {
6300 int validFlag = 1;
6301 enum target_signal oursig;
6302
6303 oursig = target_signal_from_name (argv[0]);
6304 memset (argBuf, 0, bufLen);
6305 if (strcmp (argv[1], "Q") == 0)
6306 sprintf (argBuf, "%s %s", argv[0], "noprint");
6307 else
6308 {
6309 if (strcmp (argv[1], "s") == 0)
6310 {
6311 if (!signal_stop[oursig])
6312 sprintf (argBuf, "%s %s", argv[0], "stop");
6313 else
6314 sprintf (argBuf, "%s %s", argv[0], "nostop");
6315 }
6316 else if (strcmp (argv[1], "i") == 0)
6317 {
6318 if (!signal_program[oursig])
6319 sprintf (argBuf, "%s %s", argv[0], "pass");
6320 else
6321 sprintf (argBuf, "%s %s", argv[0], "nopass");
6322 }
6323 else if (strcmp (argv[1], "r") == 0)
6324 {
6325 if (!signal_print[oursig])
6326 sprintf (argBuf, "%s %s", argv[0], "print");
6327 else
6328 sprintf (argBuf, "%s %s", argv[0], "noprint");
6329 }
6330 else
6331 validFlag = 0;
6332 }
6333 if (validFlag)
6334 handle_command (argBuf, from_tty);
6335 else
6336 printf_filtered (_("Invalid signal handling flag.\n"));
6337 if (argBuf)
6338 xfree (argBuf);
6339 }
6340 }
6341 do_cleanups (old_chain);
6342 }
6343
6344 /* Print current contents of the tables set by the handle command.
6345 It is possible we should just be printing signals actually used
6346 by the current target (but for things to work right when switching
6347 targets, all signals should be in the signal tables). */
6348
6349 static void
6350 signals_info (char *signum_exp, int from_tty)
6351 {
6352 enum target_signal oursig;
6353
6354 sig_print_header ();
6355
6356 if (signum_exp)
6357 {
6358 /* First see if this is a symbol name. */
6359 oursig = target_signal_from_name (signum_exp);
6360 if (oursig == TARGET_SIGNAL_UNKNOWN)
6361 {
6362 /* No, try numeric. */
6363 oursig =
6364 target_signal_from_command (parse_and_eval_long (signum_exp));
6365 }
6366 sig_print_info (oursig);
6367 return;
6368 }
6369
6370 printf_filtered ("\n");
6371 /* These ugly casts brought to you by the native VAX compiler. */
6372 for (oursig = TARGET_SIGNAL_FIRST;
6373 (int) oursig < (int) TARGET_SIGNAL_LAST;
6374 oursig = (enum target_signal) ((int) oursig + 1))
6375 {
6376 QUIT;
6377
6378 if (oursig != TARGET_SIGNAL_UNKNOWN
6379 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6380 sig_print_info (oursig);
6381 }
6382
6383 printf_filtered (_("\nUse the \"handle\" command "
6384 "to change these tables.\n"));
6385 }
6386
6387 /* Check if it makes sense to read $_siginfo from the current thread
6388 at this point. If not, throw an error. */
6389
6390 static void
6391 validate_siginfo_access (void)
6392 {
6393 /* No current inferior, no siginfo. */
6394 if (ptid_equal (inferior_ptid, null_ptid))
6395 error (_("No thread selected."));
6396
6397 /* Don't try to read from a dead thread. */
6398 if (is_exited (inferior_ptid))
6399 error (_("The current thread has terminated"));
6400
6401 /* ... or from a spinning thread. */
6402 if (is_running (inferior_ptid))
6403 error (_("Selected thread is running."));
6404 }
6405
6406 /* The $_siginfo convenience variable is a bit special. We don't know
6407 for sure the type of the value until we actually have a chance to
6408 fetch the data. The type can change depending on gdbarch, so it is
6409 also dependent on which thread you have selected.
6410
6411 1. making $_siginfo be an internalvar that creates a new value on
6412 access.
6413
6414 2. making the value of $_siginfo be an lval_computed value. */
6415
6416 /* This function implements the lval_computed support for reading a
6417 $_siginfo value. */
6418
6419 static void
6420 siginfo_value_read (struct value *v)
6421 {
6422 LONGEST transferred;
6423
6424 validate_siginfo_access ();
6425
6426 transferred =
6427 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6428 NULL,
6429 value_contents_all_raw (v),
6430 value_offset (v),
6431 TYPE_LENGTH (value_type (v)));
6432
6433 if (transferred != TYPE_LENGTH (value_type (v)))
6434 error (_("Unable to read siginfo"));
6435 }
6436
6437 /* This function implements the lval_computed support for writing a
6438 $_siginfo value. */
6439
6440 static void
6441 siginfo_value_write (struct value *v, struct value *fromval)
6442 {
6443 LONGEST transferred;
6444
6445 validate_siginfo_access ();
6446
6447 transferred = target_write (&current_target,
6448 TARGET_OBJECT_SIGNAL_INFO,
6449 NULL,
6450 value_contents_all_raw (fromval),
6451 value_offset (v),
6452 TYPE_LENGTH (value_type (fromval)));
6453
6454 if (transferred != TYPE_LENGTH (value_type (fromval)))
6455 error (_("Unable to write siginfo"));
6456 }
6457
6458 static const struct lval_funcs siginfo_value_funcs =
6459 {
6460 siginfo_value_read,
6461 siginfo_value_write
6462 };
6463
6464 /* Return a new value with the correct type for the siginfo object of
6465 the current thread using architecture GDBARCH. Return a void value
6466 if there's no object available. */
6467
6468 static struct value *
6469 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6470 {
6471 if (target_has_stack
6472 && !ptid_equal (inferior_ptid, null_ptid)
6473 && gdbarch_get_siginfo_type_p (gdbarch))
6474 {
6475 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6476
6477 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6478 }
6479
6480 return allocate_value (builtin_type (gdbarch)->builtin_void);
6481 }
6482
6483 \f
6484 /* infcall_suspend_state contains state about the program itself like its
6485 registers and any signal it received when it last stopped.
6486 This state must be restored regardless of how the inferior function call
6487 ends (either successfully, or after it hits a breakpoint or signal)
6488 if the program is to properly continue where it left off. */
6489
6490 struct infcall_suspend_state
6491 {
6492 struct thread_suspend_state thread_suspend;
6493 struct inferior_suspend_state inferior_suspend;
6494
6495 /* Other fields: */
6496 CORE_ADDR stop_pc;
6497 struct regcache *registers;
6498
6499 /* Format of SIGINFO_DATA or NULL if it is not present. */
6500 struct gdbarch *siginfo_gdbarch;
6501
6502 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6503 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6504 content would be invalid. */
6505 gdb_byte *siginfo_data;
6506 };
6507
6508 struct infcall_suspend_state *
6509 save_infcall_suspend_state (void)
6510 {
6511 struct infcall_suspend_state *inf_state;
6512 struct thread_info *tp = inferior_thread ();
6513 struct inferior *inf = current_inferior ();
6514 struct regcache *regcache = get_current_regcache ();
6515 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6516 gdb_byte *siginfo_data = NULL;
6517
6518 if (gdbarch_get_siginfo_type_p (gdbarch))
6519 {
6520 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6521 size_t len = TYPE_LENGTH (type);
6522 struct cleanup *back_to;
6523
6524 siginfo_data = xmalloc (len);
6525 back_to = make_cleanup (xfree, siginfo_data);
6526
6527 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6528 siginfo_data, 0, len) == len)
6529 discard_cleanups (back_to);
6530 else
6531 {
6532 /* Errors ignored. */
6533 do_cleanups (back_to);
6534 siginfo_data = NULL;
6535 }
6536 }
6537
6538 inf_state = XZALLOC (struct infcall_suspend_state);
6539
6540 if (siginfo_data)
6541 {
6542 inf_state->siginfo_gdbarch = gdbarch;
6543 inf_state->siginfo_data = siginfo_data;
6544 }
6545
6546 inf_state->thread_suspend = tp->suspend;
6547 inf_state->inferior_suspend = inf->suspend;
6548
6549 /* run_inferior_call will not use the signal due to its `proceed' call with
6550 TARGET_SIGNAL_0 anyway. */
6551 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6552
6553 inf_state->stop_pc = stop_pc;
6554
6555 inf_state->registers = regcache_dup (regcache);
6556
6557 return inf_state;
6558 }
6559
6560 /* Restore inferior session state to INF_STATE. */
6561
6562 void
6563 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6564 {
6565 struct thread_info *tp = inferior_thread ();
6566 struct inferior *inf = current_inferior ();
6567 struct regcache *regcache = get_current_regcache ();
6568 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6569
6570 tp->suspend = inf_state->thread_suspend;
6571 inf->suspend = inf_state->inferior_suspend;
6572
6573 stop_pc = inf_state->stop_pc;
6574
6575 if (inf_state->siginfo_gdbarch == gdbarch)
6576 {
6577 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6578 size_t len = TYPE_LENGTH (type);
6579
6580 /* Errors ignored. */
6581 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6582 inf_state->siginfo_data, 0, len);
6583 }
6584
6585 /* The inferior can be gone if the user types "print exit(0)"
6586 (and perhaps other times). */
6587 if (target_has_execution)
6588 /* NB: The register write goes through to the target. */
6589 regcache_cpy (regcache, inf_state->registers);
6590
6591 discard_infcall_suspend_state (inf_state);
6592 }
6593
6594 static void
6595 do_restore_infcall_suspend_state_cleanup (void *state)
6596 {
6597 restore_infcall_suspend_state (state);
6598 }
6599
6600 struct cleanup *
6601 make_cleanup_restore_infcall_suspend_state
6602 (struct infcall_suspend_state *inf_state)
6603 {
6604 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6605 }
6606
6607 void
6608 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6609 {
6610 regcache_xfree (inf_state->registers);
6611 xfree (inf_state->siginfo_data);
6612 xfree (inf_state);
6613 }
6614
6615 struct regcache *
6616 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6617 {
6618 return inf_state->registers;
6619 }
6620
6621 /* infcall_control_state contains state regarding gdb's control of the
6622 inferior itself like stepping control. It also contains session state like
6623 the user's currently selected frame. */
6624
6625 struct infcall_control_state
6626 {
6627 struct thread_control_state thread_control;
6628 struct inferior_control_state inferior_control;
6629
6630 /* Other fields: */
6631 enum stop_stack_kind stop_stack_dummy;
6632 int stopped_by_random_signal;
6633 int stop_after_trap;
6634
6635 /* ID if the selected frame when the inferior function call was made. */
6636 struct frame_id selected_frame_id;
6637 };
6638
6639 /* Save all of the information associated with the inferior<==>gdb
6640 connection. */
6641
6642 struct infcall_control_state *
6643 save_infcall_control_state (void)
6644 {
6645 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6646 struct thread_info *tp = inferior_thread ();
6647 struct inferior *inf = current_inferior ();
6648
6649 inf_status->thread_control = tp->control;
6650 inf_status->inferior_control = inf->control;
6651
6652 tp->control.step_resume_breakpoint = NULL;
6653 tp->control.exception_resume_breakpoint = NULL;
6654
6655 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6656 chain. If caller's caller is walking the chain, they'll be happier if we
6657 hand them back the original chain when restore_infcall_control_state is
6658 called. */
6659 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6660
6661 /* Other fields: */
6662 inf_status->stop_stack_dummy = stop_stack_dummy;
6663 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6664 inf_status->stop_after_trap = stop_after_trap;
6665
6666 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6667
6668 return inf_status;
6669 }
6670
6671 static int
6672 restore_selected_frame (void *args)
6673 {
6674 struct frame_id *fid = (struct frame_id *) args;
6675 struct frame_info *frame;
6676
6677 frame = frame_find_by_id (*fid);
6678
6679 /* If inf_status->selected_frame_id is NULL, there was no previously
6680 selected frame. */
6681 if (frame == NULL)
6682 {
6683 warning (_("Unable to restore previously selected frame."));
6684 return 0;
6685 }
6686
6687 select_frame (frame);
6688
6689 return (1);
6690 }
6691
6692 /* Restore inferior session state to INF_STATUS. */
6693
6694 void
6695 restore_infcall_control_state (struct infcall_control_state *inf_status)
6696 {
6697 struct thread_info *tp = inferior_thread ();
6698 struct inferior *inf = current_inferior ();
6699
6700 if (tp->control.step_resume_breakpoint)
6701 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6702
6703 if (tp->control.exception_resume_breakpoint)
6704 tp->control.exception_resume_breakpoint->disposition
6705 = disp_del_at_next_stop;
6706
6707 /* Handle the bpstat_copy of the chain. */
6708 bpstat_clear (&tp->control.stop_bpstat);
6709
6710 tp->control = inf_status->thread_control;
6711 inf->control = inf_status->inferior_control;
6712
6713 /* Other fields: */
6714 stop_stack_dummy = inf_status->stop_stack_dummy;
6715 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6716 stop_after_trap = inf_status->stop_after_trap;
6717
6718 if (target_has_stack)
6719 {
6720 /* The point of catch_errors is that if the stack is clobbered,
6721 walking the stack might encounter a garbage pointer and
6722 error() trying to dereference it. */
6723 if (catch_errors
6724 (restore_selected_frame, &inf_status->selected_frame_id,
6725 "Unable to restore previously selected frame:\n",
6726 RETURN_MASK_ERROR) == 0)
6727 /* Error in restoring the selected frame. Select the innermost
6728 frame. */
6729 select_frame (get_current_frame ());
6730 }
6731
6732 xfree (inf_status);
6733 }
6734
6735 static void
6736 do_restore_infcall_control_state_cleanup (void *sts)
6737 {
6738 restore_infcall_control_state (sts);
6739 }
6740
6741 struct cleanup *
6742 make_cleanup_restore_infcall_control_state
6743 (struct infcall_control_state *inf_status)
6744 {
6745 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6746 }
6747
6748 void
6749 discard_infcall_control_state (struct infcall_control_state *inf_status)
6750 {
6751 if (inf_status->thread_control.step_resume_breakpoint)
6752 inf_status->thread_control.step_resume_breakpoint->disposition
6753 = disp_del_at_next_stop;
6754
6755 if (inf_status->thread_control.exception_resume_breakpoint)
6756 inf_status->thread_control.exception_resume_breakpoint->disposition
6757 = disp_del_at_next_stop;
6758
6759 /* See save_infcall_control_state for info on stop_bpstat. */
6760 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6761
6762 xfree (inf_status);
6763 }
6764 \f
6765 int
6766 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6767 {
6768 struct target_waitstatus last;
6769 ptid_t last_ptid;
6770
6771 get_last_target_status (&last_ptid, &last);
6772
6773 if (last.kind != TARGET_WAITKIND_FORKED)
6774 return 0;
6775
6776 if (!ptid_equal (last_ptid, pid))
6777 return 0;
6778
6779 *child_pid = last.value.related_pid;
6780 return 1;
6781 }
6782
6783 int
6784 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6785 {
6786 struct target_waitstatus last;
6787 ptid_t last_ptid;
6788
6789 get_last_target_status (&last_ptid, &last);
6790
6791 if (last.kind != TARGET_WAITKIND_VFORKED)
6792 return 0;
6793
6794 if (!ptid_equal (last_ptid, pid))
6795 return 0;
6796
6797 *child_pid = last.value.related_pid;
6798 return 1;
6799 }
6800
6801 int
6802 inferior_has_execd (ptid_t pid, char **execd_pathname)
6803 {
6804 struct target_waitstatus last;
6805 ptid_t last_ptid;
6806
6807 get_last_target_status (&last_ptid, &last);
6808
6809 if (last.kind != TARGET_WAITKIND_EXECD)
6810 return 0;
6811
6812 if (!ptid_equal (last_ptid, pid))
6813 return 0;
6814
6815 *execd_pathname = xstrdup (last.value.execd_pathname);
6816 return 1;
6817 }
6818
6819 int
6820 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6821 {
6822 struct target_waitstatus last;
6823 ptid_t last_ptid;
6824
6825 get_last_target_status (&last_ptid, &last);
6826
6827 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6828 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6829 return 0;
6830
6831 if (!ptid_equal (last_ptid, pid))
6832 return 0;
6833
6834 *syscall_number = last.value.syscall_number;
6835 return 1;
6836 }
6837
6838 int
6839 ptid_match (ptid_t ptid, ptid_t filter)
6840 {
6841 if (ptid_equal (filter, minus_one_ptid))
6842 return 1;
6843 if (ptid_is_pid (filter)
6844 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6845 return 1;
6846 else if (ptid_equal (ptid, filter))
6847 return 1;
6848
6849 return 0;
6850 }
6851
6852 /* restore_inferior_ptid() will be used by the cleanup machinery
6853 to restore the inferior_ptid value saved in a call to
6854 save_inferior_ptid(). */
6855
6856 static void
6857 restore_inferior_ptid (void *arg)
6858 {
6859 ptid_t *saved_ptid_ptr = arg;
6860
6861 inferior_ptid = *saved_ptid_ptr;
6862 xfree (arg);
6863 }
6864
6865 /* Save the value of inferior_ptid so that it may be restored by a
6866 later call to do_cleanups(). Returns the struct cleanup pointer
6867 needed for later doing the cleanup. */
6868
6869 struct cleanup *
6870 save_inferior_ptid (void)
6871 {
6872 ptid_t *saved_ptid_ptr;
6873
6874 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6875 *saved_ptid_ptr = inferior_ptid;
6876 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6877 }
6878 \f
6879
6880 /* User interface for reverse debugging:
6881 Set exec-direction / show exec-direction commands
6882 (returns error unless target implements to_set_exec_direction method). */
6883
6884 int execution_direction = EXEC_FORWARD;
6885 static const char exec_forward[] = "forward";
6886 static const char exec_reverse[] = "reverse";
6887 static const char *exec_direction = exec_forward;
6888 static const char *exec_direction_names[] = {
6889 exec_forward,
6890 exec_reverse,
6891 NULL
6892 };
6893
6894 static void
6895 set_exec_direction_func (char *args, int from_tty,
6896 struct cmd_list_element *cmd)
6897 {
6898 if (target_can_execute_reverse)
6899 {
6900 if (!strcmp (exec_direction, exec_forward))
6901 execution_direction = EXEC_FORWARD;
6902 else if (!strcmp (exec_direction, exec_reverse))
6903 execution_direction = EXEC_REVERSE;
6904 }
6905 else
6906 {
6907 exec_direction = exec_forward;
6908 error (_("Target does not support this operation."));
6909 }
6910 }
6911
6912 static void
6913 show_exec_direction_func (struct ui_file *out, int from_tty,
6914 struct cmd_list_element *cmd, const char *value)
6915 {
6916 switch (execution_direction) {
6917 case EXEC_FORWARD:
6918 fprintf_filtered (out, _("Forward.\n"));
6919 break;
6920 case EXEC_REVERSE:
6921 fprintf_filtered (out, _("Reverse.\n"));
6922 break;
6923 default:
6924 internal_error (__FILE__, __LINE__,
6925 _("bogus execution_direction value: %d"),
6926 (int) execution_direction);
6927 }
6928 }
6929
6930 /* User interface for non-stop mode. */
6931
6932 int non_stop = 0;
6933
6934 static void
6935 set_non_stop (char *args, int from_tty,
6936 struct cmd_list_element *c)
6937 {
6938 if (target_has_execution)
6939 {
6940 non_stop_1 = non_stop;
6941 error (_("Cannot change this setting while the inferior is running."));
6942 }
6943
6944 non_stop = non_stop_1;
6945 }
6946
6947 static void
6948 show_non_stop (struct ui_file *file, int from_tty,
6949 struct cmd_list_element *c, const char *value)
6950 {
6951 fprintf_filtered (file,
6952 _("Controlling the inferior in non-stop mode is %s.\n"),
6953 value);
6954 }
6955
6956 static void
6957 show_schedule_multiple (struct ui_file *file, int from_tty,
6958 struct cmd_list_element *c, const char *value)
6959 {
6960 fprintf_filtered (file, _("Resuming the execution of threads "
6961 "of all processes is %s.\n"), value);
6962 }
6963
6964 void
6965 _initialize_infrun (void)
6966 {
6967 int i;
6968 int numsigs;
6969
6970 add_info ("signals", signals_info, _("\
6971 What debugger does when program gets various signals.\n\
6972 Specify a signal as argument to print info on that signal only."));
6973 add_info_alias ("handle", "signals", 0);
6974
6975 add_com ("handle", class_run, handle_command, _("\
6976 Specify how to handle a signal.\n\
6977 Args are signals and actions to apply to those signals.\n\
6978 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6979 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6980 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6981 The special arg \"all\" is recognized to mean all signals except those\n\
6982 used by the debugger, typically SIGTRAP and SIGINT.\n\
6983 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6984 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6985 Stop means reenter debugger if this signal happens (implies print).\n\
6986 Print means print a message if this signal happens.\n\
6987 Pass means let program see this signal; otherwise program doesn't know.\n\
6988 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6989 Pass and Stop may be combined."));
6990 if (xdb_commands)
6991 {
6992 add_com ("lz", class_info, signals_info, _("\
6993 What debugger does when program gets various signals.\n\
6994 Specify a signal as argument to print info on that signal only."));
6995 add_com ("z", class_run, xdb_handle_command, _("\
6996 Specify how to handle a signal.\n\
6997 Args are signals and actions to apply to those signals.\n\
6998 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6999 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7000 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7001 The special arg \"all\" is recognized to mean all signals except those\n\
7002 used by the debugger, typically SIGTRAP and SIGINT.\n\
7003 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7004 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7005 nopass), \"Q\" (noprint)\n\
7006 Stop means reenter debugger if this signal happens (implies print).\n\
7007 Print means print a message if this signal happens.\n\
7008 Pass means let program see this signal; otherwise program doesn't know.\n\
7009 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7010 Pass and Stop may be combined."));
7011 }
7012
7013 if (!dbx_commands)
7014 stop_command = add_cmd ("stop", class_obscure,
7015 not_just_help_class_command, _("\
7016 There is no `stop' command, but you can set a hook on `stop'.\n\
7017 This allows you to set a list of commands to be run each time execution\n\
7018 of the program stops."), &cmdlist);
7019
7020 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7021 Set inferior debugging."), _("\
7022 Show inferior debugging."), _("\
7023 When non-zero, inferior specific debugging is enabled."),
7024 NULL,
7025 show_debug_infrun,
7026 &setdebuglist, &showdebuglist);
7027
7028 add_setshow_boolean_cmd ("displaced", class_maintenance,
7029 &debug_displaced, _("\
7030 Set displaced stepping debugging."), _("\
7031 Show displaced stepping debugging."), _("\
7032 When non-zero, displaced stepping specific debugging is enabled."),
7033 NULL,
7034 show_debug_displaced,
7035 &setdebuglist, &showdebuglist);
7036
7037 add_setshow_boolean_cmd ("non-stop", no_class,
7038 &non_stop_1, _("\
7039 Set whether gdb controls the inferior in non-stop mode."), _("\
7040 Show whether gdb controls the inferior in non-stop mode."), _("\
7041 When debugging a multi-threaded program and this setting is\n\
7042 off (the default, also called all-stop mode), when one thread stops\n\
7043 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7044 all other threads in the program while you interact with the thread of\n\
7045 interest. When you continue or step a thread, you can allow the other\n\
7046 threads to run, or have them remain stopped, but while you inspect any\n\
7047 thread's state, all threads stop.\n\
7048 \n\
7049 In non-stop mode, when one thread stops, other threads can continue\n\
7050 to run freely. You'll be able to step each thread independently,\n\
7051 leave it stopped or free to run as needed."),
7052 set_non_stop,
7053 show_non_stop,
7054 &setlist,
7055 &showlist);
7056
7057 numsigs = (int) TARGET_SIGNAL_LAST;
7058 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7059 signal_print = (unsigned char *)
7060 xmalloc (sizeof (signal_print[0]) * numsigs);
7061 signal_program = (unsigned char *)
7062 xmalloc (sizeof (signal_program[0]) * numsigs);
7063 signal_pass = (unsigned char *)
7064 xmalloc (sizeof (signal_program[0]) * numsigs);
7065 for (i = 0; i < numsigs; i++)
7066 {
7067 signal_stop[i] = 1;
7068 signal_print[i] = 1;
7069 signal_program[i] = 1;
7070 }
7071
7072 /* Signals caused by debugger's own actions
7073 should not be given to the program afterwards. */
7074 signal_program[TARGET_SIGNAL_TRAP] = 0;
7075 signal_program[TARGET_SIGNAL_INT] = 0;
7076
7077 /* Signals that are not errors should not normally enter the debugger. */
7078 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7079 signal_print[TARGET_SIGNAL_ALRM] = 0;
7080 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7081 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7082 signal_stop[TARGET_SIGNAL_PROF] = 0;
7083 signal_print[TARGET_SIGNAL_PROF] = 0;
7084 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7085 signal_print[TARGET_SIGNAL_CHLD] = 0;
7086 signal_stop[TARGET_SIGNAL_IO] = 0;
7087 signal_print[TARGET_SIGNAL_IO] = 0;
7088 signal_stop[TARGET_SIGNAL_POLL] = 0;
7089 signal_print[TARGET_SIGNAL_POLL] = 0;
7090 signal_stop[TARGET_SIGNAL_URG] = 0;
7091 signal_print[TARGET_SIGNAL_URG] = 0;
7092 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7093 signal_print[TARGET_SIGNAL_WINCH] = 0;
7094 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7095 signal_print[TARGET_SIGNAL_PRIO] = 0;
7096
7097 /* These signals are used internally by user-level thread
7098 implementations. (See signal(5) on Solaris.) Like the above
7099 signals, a healthy program receives and handles them as part of
7100 its normal operation. */
7101 signal_stop[TARGET_SIGNAL_LWP] = 0;
7102 signal_print[TARGET_SIGNAL_LWP] = 0;
7103 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7104 signal_print[TARGET_SIGNAL_WAITING] = 0;
7105 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7106 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7107
7108 /* Update cached state. */
7109 signal_cache_update (-1);
7110
7111 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7112 &stop_on_solib_events, _("\
7113 Set stopping for shared library events."), _("\
7114 Show stopping for shared library events."), _("\
7115 If nonzero, gdb will give control to the user when the dynamic linker\n\
7116 notifies gdb of shared library events. The most common event of interest\n\
7117 to the user would be loading/unloading of a new library."),
7118 NULL,
7119 show_stop_on_solib_events,
7120 &setlist, &showlist);
7121
7122 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7123 follow_fork_mode_kind_names,
7124 &follow_fork_mode_string, _("\
7125 Set debugger response to a program call of fork or vfork."), _("\
7126 Show debugger response to a program call of fork or vfork."), _("\
7127 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7128 parent - the original process is debugged after a fork\n\
7129 child - the new process is debugged after a fork\n\
7130 The unfollowed process will continue to run.\n\
7131 By default, the debugger will follow the parent process."),
7132 NULL,
7133 show_follow_fork_mode_string,
7134 &setlist, &showlist);
7135
7136 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7137 follow_exec_mode_names,
7138 &follow_exec_mode_string, _("\
7139 Set debugger response to a program call of exec."), _("\
7140 Show debugger response to a program call of exec."), _("\
7141 An exec call replaces the program image of a process.\n\
7142 \n\
7143 follow-exec-mode can be:\n\
7144 \n\
7145 new - the debugger creates a new inferior and rebinds the process\n\
7146 to this new inferior. The program the process was running before\n\
7147 the exec call can be restarted afterwards by restarting the original\n\
7148 inferior.\n\
7149 \n\
7150 same - the debugger keeps the process bound to the same inferior.\n\
7151 The new executable image replaces the previous executable loaded in\n\
7152 the inferior. Restarting the inferior after the exec call restarts\n\
7153 the executable the process was running after the exec call.\n\
7154 \n\
7155 By default, the debugger will use the same inferior."),
7156 NULL,
7157 show_follow_exec_mode_string,
7158 &setlist, &showlist);
7159
7160 add_setshow_enum_cmd ("scheduler-locking", class_run,
7161 scheduler_enums, &scheduler_mode, _("\
7162 Set mode for locking scheduler during execution."), _("\
7163 Show mode for locking scheduler during execution."), _("\
7164 off == no locking (threads may preempt at any time)\n\
7165 on == full locking (no thread except the current thread may run)\n\
7166 step == scheduler locked during every single-step operation.\n\
7167 In this mode, no other thread may run during a step command.\n\
7168 Other threads may run while stepping over a function call ('next')."),
7169 set_schedlock_func, /* traps on target vector */
7170 show_scheduler_mode,
7171 &setlist, &showlist);
7172
7173 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7174 Set mode for resuming threads of all processes."), _("\
7175 Show mode for resuming threads of all processes."), _("\
7176 When on, execution commands (such as 'continue' or 'next') resume all\n\
7177 threads of all processes. When off (which is the default), execution\n\
7178 commands only resume the threads of the current process. The set of\n\
7179 threads that are resumed is further refined by the scheduler-locking\n\
7180 mode (see help set scheduler-locking)."),
7181 NULL,
7182 show_schedule_multiple,
7183 &setlist, &showlist);
7184
7185 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7186 Set mode of the step operation."), _("\
7187 Show mode of the step operation."), _("\
7188 When set, doing a step over a function without debug line information\n\
7189 will stop at the first instruction of that function. Otherwise, the\n\
7190 function is skipped and the step command stops at a different source line."),
7191 NULL,
7192 show_step_stop_if_no_debug,
7193 &setlist, &showlist);
7194
7195 add_setshow_enum_cmd ("displaced-stepping", class_run,
7196 can_use_displaced_stepping_enum,
7197 &can_use_displaced_stepping, _("\
7198 Set debugger's willingness to use displaced stepping."), _("\
7199 Show debugger's willingness to use displaced stepping."), _("\
7200 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7201 supported by the target architecture. If off, gdb will not use displaced\n\
7202 stepping to step over breakpoints, even if such is supported by the target\n\
7203 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7204 if the target architecture supports it and non-stop mode is active, but will not\n\
7205 use it in all-stop mode (see help set non-stop)."),
7206 NULL,
7207 show_can_use_displaced_stepping,
7208 &setlist, &showlist);
7209
7210 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7211 &exec_direction, _("Set direction of execution.\n\
7212 Options are 'forward' or 'reverse'."),
7213 _("Show direction of execution (forward/reverse)."),
7214 _("Tells gdb whether to execute forward or backward."),
7215 set_exec_direction_func, show_exec_direction_func,
7216 &setlist, &showlist);
7217
7218 /* Set/show detach-on-fork: user-settable mode. */
7219
7220 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7221 Set whether gdb will detach the child of a fork."), _("\
7222 Show whether gdb will detach the child of a fork."), _("\
7223 Tells gdb whether to detach the child of a fork."),
7224 NULL, NULL, &setlist, &showlist);
7225
7226 /* ptid initializations */
7227 inferior_ptid = null_ptid;
7228 target_last_wait_ptid = minus_one_ptid;
7229
7230 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7231 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7232 observer_attach_thread_exit (infrun_thread_thread_exit);
7233 observer_attach_inferior_exit (infrun_inferior_exit);
7234
7235 /* Explicitly create without lookup, since that tries to create a
7236 value with a void typed value, and when we get here, gdbarch
7237 isn't initialized yet. At this point, we're quite sure there
7238 isn't another convenience variable of the same name. */
7239 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7240
7241 add_setshow_boolean_cmd ("observer", no_class,
7242 &observer_mode_1, _("\
7243 Set whether gdb controls the inferior in observer mode."), _("\
7244 Show whether gdb controls the inferior in observer mode."), _("\
7245 In observer mode, GDB can get data from the inferior, but not\n\
7246 affect its execution. Registers and memory may not be changed,\n\
7247 breakpoints may not be set, and the program cannot be interrupted\n\
7248 or signalled."),
7249 set_observer_mode,
7250 show_observer_mode,
7251 &setlist,
7252 &showlist);
7253 }