ChangeLog:
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181 /* "Observer mode" is somewhat like a more extreme version of
182 non-stop, in which all GDB operations that might affect the
183 target's execution have been disabled. */
184
185 static int non_stop_1 = 0;
186
187 int observer_mode = 0;
188 static int observer_mode_1 = 0;
189
190 static void
191 set_observer_mode (char *args, int from_tty,
192 struct cmd_list_element *c)
193 {
194 extern int pagination_enabled;
195
196 if (target_has_execution)
197 {
198 observer_mode_1 = observer_mode;
199 error (_("Cannot change this setting while the inferior is running."));
200 }
201
202 observer_mode = observer_mode_1;
203
204 may_write_registers = !observer_mode;
205 may_write_memory = !observer_mode;
206 may_insert_breakpoints = !observer_mode;
207 may_insert_tracepoints = !observer_mode;
208 /* We can insert fast tracepoints in or out of observer mode,
209 but enable them if we're going into this mode. */
210 if (observer_mode)
211 may_insert_fast_tracepoints = 1;
212 may_stop = !observer_mode;
213 update_target_permissions ();
214
215 /* Going *into* observer mode we must force non-stop, then
216 going out we leave it that way. */
217 if (observer_mode)
218 {
219 target_async_permitted = 1;
220 pagination_enabled = 0;
221 non_stop = non_stop_1 = 1;
222 }
223
224 if (from_tty)
225 printf_filtered (_("Observer mode is now %s.\n"),
226 (observer_mode ? "on" : "off"));
227 }
228
229 static void
230 show_observer_mode (struct ui_file *file, int from_tty,
231 struct cmd_list_element *c, const char *value)
232 {
233 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
234 }
235
236 /* This updates the value of observer mode based on changes in
237 permissions. Note that we are deliberately ignoring the values of
238 may-write-registers and may-write-memory, since the user may have
239 reason to enable these during a session, for instance to turn on a
240 debugging-related global. */
241
242 void
243 update_observer_mode (void)
244 {
245 int newval;
246
247 newval = (!may_insert_breakpoints
248 && !may_insert_tracepoints
249 && may_insert_fast_tracepoints
250 && !may_stop
251 && non_stop);
252
253 /* Let the user know if things change. */
254 if (newval != observer_mode)
255 printf_filtered (_("Observer mode is now %s.\n"),
256 (newval ? "on" : "off"));
257
258 observer_mode = observer_mode_1 = newval;
259 }
260
261 /* Tables of how to react to signals; the user sets them. */
262
263 static unsigned char *signal_stop;
264 static unsigned char *signal_print;
265 static unsigned char *signal_program;
266
267 #define SET_SIGS(nsigs,sigs,flags) \
268 do { \
269 int signum = (nsigs); \
270 while (signum-- > 0) \
271 if ((sigs)[signum]) \
272 (flags)[signum] = 1; \
273 } while (0)
274
275 #define UNSET_SIGS(nsigs,sigs,flags) \
276 do { \
277 int signum = (nsigs); \
278 while (signum-- > 0) \
279 if ((sigs)[signum]) \
280 (flags)[signum] = 0; \
281 } while (0)
282
283 /* Value to pass to target_resume() to cause all threads to resume */
284
285 #define RESUME_ALL minus_one_ptid
286
287 /* Command list pointer for the "stop" placeholder. */
288
289 static struct cmd_list_element *stop_command;
290
291 /* Function inferior was in as of last step command. */
292
293 static struct symbol *step_start_function;
294
295 /* Nonzero if we want to give control to the user when we're notified
296 of shared library events by the dynamic linker. */
297 static int stop_on_solib_events;
298 static void
299 show_stop_on_solib_events (struct ui_file *file, int from_tty,
300 struct cmd_list_element *c, const char *value)
301 {
302 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
303 value);
304 }
305
306 /* Nonzero means expecting a trace trap
307 and should stop the inferior and return silently when it happens. */
308
309 int stop_after_trap;
310
311 /* Save register contents here when executing a "finish" command or are
312 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
313 Thus this contains the return value from the called function (assuming
314 values are returned in a register). */
315
316 struct regcache *stop_registers;
317
318 /* Nonzero after stop if current stack frame should be printed. */
319
320 static int stop_print_frame;
321
322 /* This is a cached copy of the pid/waitstatus of the last event
323 returned by target_wait()/deprecated_target_wait_hook(). This
324 information is returned by get_last_target_status(). */
325 static ptid_t target_last_wait_ptid;
326 static struct target_waitstatus target_last_waitstatus;
327
328 static void context_switch (ptid_t ptid);
329
330 void init_thread_stepping_state (struct thread_info *tss);
331
332 void init_infwait_state (void);
333
334 static const char follow_fork_mode_child[] = "child";
335 static const char follow_fork_mode_parent[] = "parent";
336
337 static const char *follow_fork_mode_kind_names[] = {
338 follow_fork_mode_child,
339 follow_fork_mode_parent,
340 NULL
341 };
342
343 static const char *follow_fork_mode_string = follow_fork_mode_parent;
344 static void
345 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
346 struct cmd_list_element *c, const char *value)
347 {
348 fprintf_filtered (file, _("\
349 Debugger response to a program call of fork or vfork is \"%s\".\n"),
350 value);
351 }
352 \f
353
354 /* Tell the target to follow the fork we're stopped at. Returns true
355 if the inferior should be resumed; false, if the target for some
356 reason decided it's best not to resume. */
357
358 static int
359 follow_fork (void)
360 {
361 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
362 int should_resume = 1;
363 struct thread_info *tp;
364
365 /* Copy user stepping state to the new inferior thread. FIXME: the
366 followed fork child thread should have a copy of most of the
367 parent thread structure's run control related fields, not just these.
368 Initialized to avoid "may be used uninitialized" warnings from gcc. */
369 struct breakpoint *step_resume_breakpoint = NULL;
370 CORE_ADDR step_range_start = 0;
371 CORE_ADDR step_range_end = 0;
372 struct frame_id step_frame_id = { 0 };
373
374 if (!non_stop)
375 {
376 ptid_t wait_ptid;
377 struct target_waitstatus wait_status;
378
379 /* Get the last target status returned by target_wait(). */
380 get_last_target_status (&wait_ptid, &wait_status);
381
382 /* If not stopped at a fork event, then there's nothing else to
383 do. */
384 if (wait_status.kind != TARGET_WAITKIND_FORKED
385 && wait_status.kind != TARGET_WAITKIND_VFORKED)
386 return 1;
387
388 /* Check if we switched over from WAIT_PTID, since the event was
389 reported. */
390 if (!ptid_equal (wait_ptid, minus_one_ptid)
391 && !ptid_equal (inferior_ptid, wait_ptid))
392 {
393 /* We did. Switch back to WAIT_PTID thread, to tell the
394 target to follow it (in either direction). We'll
395 afterwards refuse to resume, and inform the user what
396 happened. */
397 switch_to_thread (wait_ptid);
398 should_resume = 0;
399 }
400 }
401
402 tp = inferior_thread ();
403
404 /* If there were any forks/vforks that were caught and are now to be
405 followed, then do so now. */
406 switch (tp->pending_follow.kind)
407 {
408 case TARGET_WAITKIND_FORKED:
409 case TARGET_WAITKIND_VFORKED:
410 {
411 ptid_t parent, child;
412
413 /* If the user did a next/step, etc, over a fork call,
414 preserve the stepping state in the fork child. */
415 if (follow_child && should_resume)
416 {
417 step_resume_breakpoint
418 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
419 step_range_start = tp->step_range_start;
420 step_range_end = tp->step_range_end;
421 step_frame_id = tp->step_frame_id;
422
423 /* For now, delete the parent's sr breakpoint, otherwise,
424 parent/child sr breakpoints are considered duplicates,
425 and the child version will not be installed. Remove
426 this when the breakpoints module becomes aware of
427 inferiors and address spaces. */
428 delete_step_resume_breakpoint (tp);
429 tp->step_range_start = 0;
430 tp->step_range_end = 0;
431 tp->step_frame_id = null_frame_id;
432 }
433
434 parent = inferior_ptid;
435 child = tp->pending_follow.value.related_pid;
436
437 /* Tell the target to do whatever is necessary to follow
438 either parent or child. */
439 if (target_follow_fork (follow_child))
440 {
441 /* Target refused to follow, or there's some other reason
442 we shouldn't resume. */
443 should_resume = 0;
444 }
445 else
446 {
447 /* This pending follow fork event is now handled, one way
448 or another. The previous selected thread may be gone
449 from the lists by now, but if it is still around, need
450 to clear the pending follow request. */
451 tp = find_thread_ptid (parent);
452 if (tp)
453 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
454
455 /* This makes sure we don't try to apply the "Switched
456 over from WAIT_PID" logic above. */
457 nullify_last_target_wait_ptid ();
458
459 /* If we followed the child, switch to it... */
460 if (follow_child)
461 {
462 switch_to_thread (child);
463
464 /* ... and preserve the stepping state, in case the
465 user was stepping over the fork call. */
466 if (should_resume)
467 {
468 tp = inferior_thread ();
469 tp->step_resume_breakpoint = step_resume_breakpoint;
470 tp->step_range_start = step_range_start;
471 tp->step_range_end = step_range_end;
472 tp->step_frame_id = step_frame_id;
473 }
474 else
475 {
476 /* If we get here, it was because we're trying to
477 resume from a fork catchpoint, but, the user
478 has switched threads away from the thread that
479 forked. In that case, the resume command
480 issued is most likely not applicable to the
481 child, so just warn, and refuse to resume. */
482 warning (_("\
483 Not resuming: switched threads before following fork child.\n"));
484 }
485
486 /* Reset breakpoints in the child as appropriate. */
487 follow_inferior_reset_breakpoints ();
488 }
489 else
490 switch_to_thread (parent);
491 }
492 }
493 break;
494 case TARGET_WAITKIND_SPURIOUS:
495 /* Nothing to follow. */
496 break;
497 default:
498 internal_error (__FILE__, __LINE__,
499 "Unexpected pending_follow.kind %d\n",
500 tp->pending_follow.kind);
501 break;
502 }
503
504 return should_resume;
505 }
506
507 void
508 follow_inferior_reset_breakpoints (void)
509 {
510 struct thread_info *tp = inferior_thread ();
511
512 /* Was there a step_resume breakpoint? (There was if the user
513 did a "next" at the fork() call.) If so, explicitly reset its
514 thread number.
515
516 step_resumes are a form of bp that are made to be per-thread.
517 Since we created the step_resume bp when the parent process
518 was being debugged, and now are switching to the child process,
519 from the breakpoint package's viewpoint, that's a switch of
520 "threads". We must update the bp's notion of which thread
521 it is for, or it'll be ignored when it triggers. */
522
523 if (tp->step_resume_breakpoint)
524 breakpoint_re_set_thread (tp->step_resume_breakpoint);
525
526 /* Reinsert all breakpoints in the child. The user may have set
527 breakpoints after catching the fork, in which case those
528 were never set in the child, but only in the parent. This makes
529 sure the inserted breakpoints match the breakpoint list. */
530
531 breakpoint_re_set ();
532 insert_breakpoints ();
533 }
534
535 /* The child has exited or execed: resume threads of the parent the
536 user wanted to be executing. */
537
538 static int
539 proceed_after_vfork_done (struct thread_info *thread,
540 void *arg)
541 {
542 int pid = * (int *) arg;
543
544 if (ptid_get_pid (thread->ptid) == pid
545 && is_running (thread->ptid)
546 && !is_executing (thread->ptid)
547 && !thread->stop_requested
548 && thread->stop_signal == TARGET_SIGNAL_0)
549 {
550 if (debug_infrun)
551 fprintf_unfiltered (gdb_stdlog,
552 "infrun: resuming vfork parent thread %s\n",
553 target_pid_to_str (thread->ptid));
554
555 switch_to_thread (thread->ptid);
556 clear_proceed_status ();
557 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
558 }
559
560 return 0;
561 }
562
563 /* Called whenever we notice an exec or exit event, to handle
564 detaching or resuming a vfork parent. */
565
566 static void
567 handle_vfork_child_exec_or_exit (int exec)
568 {
569 struct inferior *inf = current_inferior ();
570
571 if (inf->vfork_parent)
572 {
573 int resume_parent = -1;
574
575 /* This exec or exit marks the end of the shared memory region
576 between the parent and the child. If the user wanted to
577 detach from the parent, now is the time. */
578
579 if (inf->vfork_parent->pending_detach)
580 {
581 struct thread_info *tp;
582 struct cleanup *old_chain;
583 struct program_space *pspace;
584 struct address_space *aspace;
585
586 /* follow-fork child, detach-on-fork on */
587
588 old_chain = make_cleanup_restore_current_thread ();
589
590 /* We're letting loose of the parent. */
591 tp = any_live_thread_of_process (inf->vfork_parent->pid);
592 switch_to_thread (tp->ptid);
593
594 /* We're about to detach from the parent, which implicitly
595 removes breakpoints from its address space. There's a
596 catch here: we want to reuse the spaces for the child,
597 but, parent/child are still sharing the pspace at this
598 point, although the exec in reality makes the kernel give
599 the child a fresh set of new pages. The problem here is
600 that the breakpoints module being unaware of this, would
601 likely chose the child process to write to the parent
602 address space. Swapping the child temporarily away from
603 the spaces has the desired effect. Yes, this is "sort
604 of" a hack. */
605
606 pspace = inf->pspace;
607 aspace = inf->aspace;
608 inf->aspace = NULL;
609 inf->pspace = NULL;
610
611 if (debug_infrun || info_verbose)
612 {
613 target_terminal_ours ();
614
615 if (exec)
616 fprintf_filtered (gdb_stdlog,
617 "Detaching vfork parent process %d after child exec.\n",
618 inf->vfork_parent->pid);
619 else
620 fprintf_filtered (gdb_stdlog,
621 "Detaching vfork parent process %d after child exit.\n",
622 inf->vfork_parent->pid);
623 }
624
625 target_detach (NULL, 0);
626
627 /* Put it back. */
628 inf->pspace = pspace;
629 inf->aspace = aspace;
630
631 do_cleanups (old_chain);
632 }
633 else if (exec)
634 {
635 /* We're staying attached to the parent, so, really give the
636 child a new address space. */
637 inf->pspace = add_program_space (maybe_new_address_space ());
638 inf->aspace = inf->pspace->aspace;
639 inf->removable = 1;
640 set_current_program_space (inf->pspace);
641
642 resume_parent = inf->vfork_parent->pid;
643
644 /* Break the bonds. */
645 inf->vfork_parent->vfork_child = NULL;
646 }
647 else
648 {
649 struct cleanup *old_chain;
650 struct program_space *pspace;
651
652 /* If this is a vfork child exiting, then the pspace and
653 aspaces were shared with the parent. Since we're
654 reporting the process exit, we'll be mourning all that is
655 found in the address space, and switching to null_ptid,
656 preparing to start a new inferior. But, since we don't
657 want to clobber the parent's address/program spaces, we
658 go ahead and create a new one for this exiting
659 inferior. */
660
661 /* Switch to null_ptid, so that clone_program_space doesn't want
662 to read the selected frame of a dead process. */
663 old_chain = save_inferior_ptid ();
664 inferior_ptid = null_ptid;
665
666 /* This inferior is dead, so avoid giving the breakpoints
667 module the option to write through to it (cloning a
668 program space resets breakpoints). */
669 inf->aspace = NULL;
670 inf->pspace = NULL;
671 pspace = add_program_space (maybe_new_address_space ());
672 set_current_program_space (pspace);
673 inf->removable = 1;
674 clone_program_space (pspace, inf->vfork_parent->pspace);
675 inf->pspace = pspace;
676 inf->aspace = pspace->aspace;
677
678 /* Put back inferior_ptid. We'll continue mourning this
679 inferior. */
680 do_cleanups (old_chain);
681
682 resume_parent = inf->vfork_parent->pid;
683 /* Break the bonds. */
684 inf->vfork_parent->vfork_child = NULL;
685 }
686
687 inf->vfork_parent = NULL;
688
689 gdb_assert (current_program_space == inf->pspace);
690
691 if (non_stop && resume_parent != -1)
692 {
693 /* If the user wanted the parent to be running, let it go
694 free now. */
695 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
696
697 if (debug_infrun)
698 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
699 resume_parent);
700
701 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
702
703 do_cleanups (old_chain);
704 }
705 }
706 }
707
708 /* Enum strings for "set|show displaced-stepping". */
709
710 static const char follow_exec_mode_new[] = "new";
711 static const char follow_exec_mode_same[] = "same";
712 static const char *follow_exec_mode_names[] =
713 {
714 follow_exec_mode_new,
715 follow_exec_mode_same,
716 NULL,
717 };
718
719 static const char *follow_exec_mode_string = follow_exec_mode_same;
720 static void
721 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
722 struct cmd_list_element *c, const char *value)
723 {
724 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
725 }
726
727 /* EXECD_PATHNAME is assumed to be non-NULL. */
728
729 static void
730 follow_exec (ptid_t pid, char *execd_pathname)
731 {
732 struct thread_info *th = inferior_thread ();
733 struct inferior *inf = current_inferior ();
734
735 /* This is an exec event that we actually wish to pay attention to.
736 Refresh our symbol table to the newly exec'd program, remove any
737 momentary bp's, etc.
738
739 If there are breakpoints, they aren't really inserted now,
740 since the exec() transformed our inferior into a fresh set
741 of instructions.
742
743 We want to preserve symbolic breakpoints on the list, since
744 we have hopes that they can be reset after the new a.out's
745 symbol table is read.
746
747 However, any "raw" breakpoints must be removed from the list
748 (e.g., the solib bp's), since their address is probably invalid
749 now.
750
751 And, we DON'T want to call delete_breakpoints() here, since
752 that may write the bp's "shadow contents" (the instruction
753 value that was overwritten witha TRAP instruction). Since
754 we now have a new a.out, those shadow contents aren't valid. */
755
756 mark_breakpoints_out ();
757
758 update_breakpoints_after_exec ();
759
760 /* If there was one, it's gone now. We cannot truly step-to-next
761 statement through an exec(). */
762 th->step_resume_breakpoint = NULL;
763 th->step_range_start = 0;
764 th->step_range_end = 0;
765
766 /* The target reports the exec event to the main thread, even if
767 some other thread does the exec, and even if the main thread was
768 already stopped --- if debugging in non-stop mode, it's possible
769 the user had the main thread held stopped in the previous image
770 --- release it now. This is the same behavior as step-over-exec
771 with scheduler-locking on in all-stop mode. */
772 th->stop_requested = 0;
773
774 /* What is this a.out's name? */
775 printf_unfiltered (_("%s is executing new program: %s\n"),
776 target_pid_to_str (inferior_ptid),
777 execd_pathname);
778
779 /* We've followed the inferior through an exec. Therefore, the
780 inferior has essentially been killed & reborn. */
781
782 gdb_flush (gdb_stdout);
783
784 breakpoint_init_inferior (inf_execd);
785
786 if (gdb_sysroot && *gdb_sysroot)
787 {
788 char *name = alloca (strlen (gdb_sysroot)
789 + strlen (execd_pathname)
790 + 1);
791
792 strcpy (name, gdb_sysroot);
793 strcat (name, execd_pathname);
794 execd_pathname = name;
795 }
796
797 /* Reset the shared library package. This ensures that we get a
798 shlib event when the child reaches "_start", at which point the
799 dld will have had a chance to initialize the child. */
800 /* Also, loading a symbol file below may trigger symbol lookups, and
801 we don't want those to be satisfied by the libraries of the
802 previous incarnation of this process. */
803 no_shared_libraries (NULL, 0);
804
805 if (follow_exec_mode_string == follow_exec_mode_new)
806 {
807 struct program_space *pspace;
808
809 /* The user wants to keep the old inferior and program spaces
810 around. Create a new fresh one, and switch to it. */
811
812 inf = add_inferior (current_inferior ()->pid);
813 pspace = add_program_space (maybe_new_address_space ());
814 inf->pspace = pspace;
815 inf->aspace = pspace->aspace;
816
817 exit_inferior_num_silent (current_inferior ()->num);
818
819 set_current_inferior (inf);
820 set_current_program_space (pspace);
821 }
822
823 gdb_assert (current_program_space == inf->pspace);
824
825 /* That a.out is now the one to use. */
826 exec_file_attach (execd_pathname, 0);
827
828 /* Load the main file's symbols. */
829 symbol_file_add_main (execd_pathname, 0);
830
831 #ifdef SOLIB_CREATE_INFERIOR_HOOK
832 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
833 #else
834 solib_create_inferior_hook (0);
835 #endif
836
837 jit_inferior_created_hook ();
838
839 /* Reinsert all breakpoints. (Those which were symbolic have
840 been reset to the proper address in the new a.out, thanks
841 to symbol_file_command...) */
842 insert_breakpoints ();
843
844 /* The next resume of this inferior should bring it to the shlib
845 startup breakpoints. (If the user had also set bp's on
846 "main" from the old (parent) process, then they'll auto-
847 matically get reset there in the new process.) */
848 }
849
850 /* Non-zero if we just simulating a single-step. This is needed
851 because we cannot remove the breakpoints in the inferior process
852 until after the `wait' in `wait_for_inferior'. */
853 static int singlestep_breakpoints_inserted_p = 0;
854
855 /* The thread we inserted single-step breakpoints for. */
856 static ptid_t singlestep_ptid;
857
858 /* PC when we started this single-step. */
859 static CORE_ADDR singlestep_pc;
860
861 /* If another thread hit the singlestep breakpoint, we save the original
862 thread here so that we can resume single-stepping it later. */
863 static ptid_t saved_singlestep_ptid;
864 static int stepping_past_singlestep_breakpoint;
865
866 /* If not equal to null_ptid, this means that after stepping over breakpoint
867 is finished, we need to switch to deferred_step_ptid, and step it.
868
869 The use case is when one thread has hit a breakpoint, and then the user
870 has switched to another thread and issued 'step'. We need to step over
871 breakpoint in the thread which hit the breakpoint, but then continue
872 stepping the thread user has selected. */
873 static ptid_t deferred_step_ptid;
874 \f
875 /* Displaced stepping. */
876
877 /* In non-stop debugging mode, we must take special care to manage
878 breakpoints properly; in particular, the traditional strategy for
879 stepping a thread past a breakpoint it has hit is unsuitable.
880 'Displaced stepping' is a tactic for stepping one thread past a
881 breakpoint it has hit while ensuring that other threads running
882 concurrently will hit the breakpoint as they should.
883
884 The traditional way to step a thread T off a breakpoint in a
885 multi-threaded program in all-stop mode is as follows:
886
887 a0) Initially, all threads are stopped, and breakpoints are not
888 inserted.
889 a1) We single-step T, leaving breakpoints uninserted.
890 a2) We insert breakpoints, and resume all threads.
891
892 In non-stop debugging, however, this strategy is unsuitable: we
893 don't want to have to stop all threads in the system in order to
894 continue or step T past a breakpoint. Instead, we use displaced
895 stepping:
896
897 n0) Initially, T is stopped, other threads are running, and
898 breakpoints are inserted.
899 n1) We copy the instruction "under" the breakpoint to a separate
900 location, outside the main code stream, making any adjustments
901 to the instruction, register, and memory state as directed by
902 T's architecture.
903 n2) We single-step T over the instruction at its new location.
904 n3) We adjust the resulting register and memory state as directed
905 by T's architecture. This includes resetting T's PC to point
906 back into the main instruction stream.
907 n4) We resume T.
908
909 This approach depends on the following gdbarch methods:
910
911 - gdbarch_max_insn_length and gdbarch_displaced_step_location
912 indicate where to copy the instruction, and how much space must
913 be reserved there. We use these in step n1.
914
915 - gdbarch_displaced_step_copy_insn copies a instruction to a new
916 address, and makes any necessary adjustments to the instruction,
917 register contents, and memory. We use this in step n1.
918
919 - gdbarch_displaced_step_fixup adjusts registers and memory after
920 we have successfuly single-stepped the instruction, to yield the
921 same effect the instruction would have had if we had executed it
922 at its original address. We use this in step n3.
923
924 - gdbarch_displaced_step_free_closure provides cleanup.
925
926 The gdbarch_displaced_step_copy_insn and
927 gdbarch_displaced_step_fixup functions must be written so that
928 copying an instruction with gdbarch_displaced_step_copy_insn,
929 single-stepping across the copied instruction, and then applying
930 gdbarch_displaced_insn_fixup should have the same effects on the
931 thread's memory and registers as stepping the instruction in place
932 would have. Exactly which responsibilities fall to the copy and
933 which fall to the fixup is up to the author of those functions.
934
935 See the comments in gdbarch.sh for details.
936
937 Note that displaced stepping and software single-step cannot
938 currently be used in combination, although with some care I think
939 they could be made to. Software single-step works by placing
940 breakpoints on all possible subsequent instructions; if the
941 displaced instruction is a PC-relative jump, those breakpoints
942 could fall in very strange places --- on pages that aren't
943 executable, or at addresses that are not proper instruction
944 boundaries. (We do generally let other threads run while we wait
945 to hit the software single-step breakpoint, and they might
946 encounter such a corrupted instruction.) One way to work around
947 this would be to have gdbarch_displaced_step_copy_insn fully
948 simulate the effect of PC-relative instructions (and return NULL)
949 on architectures that use software single-stepping.
950
951 In non-stop mode, we can have independent and simultaneous step
952 requests, so more than one thread may need to simultaneously step
953 over a breakpoint. The current implementation assumes there is
954 only one scratch space per process. In this case, we have to
955 serialize access to the scratch space. If thread A wants to step
956 over a breakpoint, but we are currently waiting for some other
957 thread to complete a displaced step, we leave thread A stopped and
958 place it in the displaced_step_request_queue. Whenever a displaced
959 step finishes, we pick the next thread in the queue and start a new
960 displaced step operation on it. See displaced_step_prepare and
961 displaced_step_fixup for details. */
962
963 struct displaced_step_request
964 {
965 ptid_t ptid;
966 struct displaced_step_request *next;
967 };
968
969 /* Per-inferior displaced stepping state. */
970 struct displaced_step_inferior_state
971 {
972 /* Pointer to next in linked list. */
973 struct displaced_step_inferior_state *next;
974
975 /* The process this displaced step state refers to. */
976 int pid;
977
978 /* A queue of pending displaced stepping requests. One entry per
979 thread that needs to do a displaced step. */
980 struct displaced_step_request *step_request_queue;
981
982 /* If this is not null_ptid, this is the thread carrying out a
983 displaced single-step in process PID. This thread's state will
984 require fixing up once it has completed its step. */
985 ptid_t step_ptid;
986
987 /* The architecture the thread had when we stepped it. */
988 struct gdbarch *step_gdbarch;
989
990 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
991 for post-step cleanup. */
992 struct displaced_step_closure *step_closure;
993
994 /* The address of the original instruction, and the copy we
995 made. */
996 CORE_ADDR step_original, step_copy;
997
998 /* Saved contents of copy area. */
999 gdb_byte *step_saved_copy;
1000 };
1001
1002 /* The list of states of processes involved in displaced stepping
1003 presently. */
1004 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1005
1006 /* Get the displaced stepping state of process PID. */
1007
1008 static struct displaced_step_inferior_state *
1009 get_displaced_stepping_state (int pid)
1010 {
1011 struct displaced_step_inferior_state *state;
1012
1013 for (state = displaced_step_inferior_states;
1014 state != NULL;
1015 state = state->next)
1016 if (state->pid == pid)
1017 return state;
1018
1019 return NULL;
1020 }
1021
1022 /* Add a new displaced stepping state for process PID to the displaced
1023 stepping state list, or return a pointer to an already existing
1024 entry, if it already exists. Never returns NULL. */
1025
1026 static struct displaced_step_inferior_state *
1027 add_displaced_stepping_state (int pid)
1028 {
1029 struct displaced_step_inferior_state *state;
1030
1031 for (state = displaced_step_inferior_states;
1032 state != NULL;
1033 state = state->next)
1034 if (state->pid == pid)
1035 return state;
1036
1037 state = xcalloc (1, sizeof (*state));
1038 state->pid = pid;
1039 state->next = displaced_step_inferior_states;
1040 displaced_step_inferior_states = state;
1041
1042 return state;
1043 }
1044
1045 /* Remove the displaced stepping state of process PID. */
1046
1047 static void
1048 remove_displaced_stepping_state (int pid)
1049 {
1050 struct displaced_step_inferior_state *it, **prev_next_p;
1051
1052 gdb_assert (pid != 0);
1053
1054 it = displaced_step_inferior_states;
1055 prev_next_p = &displaced_step_inferior_states;
1056 while (it)
1057 {
1058 if (it->pid == pid)
1059 {
1060 *prev_next_p = it->next;
1061 xfree (it);
1062 return;
1063 }
1064
1065 prev_next_p = &it->next;
1066 it = *prev_next_p;
1067 }
1068 }
1069
1070 static void
1071 infrun_inferior_exit (struct inferior *inf)
1072 {
1073 remove_displaced_stepping_state (inf->pid);
1074 }
1075
1076 /* Enum strings for "set|show displaced-stepping". */
1077
1078 static const char can_use_displaced_stepping_auto[] = "auto";
1079 static const char can_use_displaced_stepping_on[] = "on";
1080 static const char can_use_displaced_stepping_off[] = "off";
1081 static const char *can_use_displaced_stepping_enum[] =
1082 {
1083 can_use_displaced_stepping_auto,
1084 can_use_displaced_stepping_on,
1085 can_use_displaced_stepping_off,
1086 NULL,
1087 };
1088
1089 /* If ON, and the architecture supports it, GDB will use displaced
1090 stepping to step over breakpoints. If OFF, or if the architecture
1091 doesn't support it, GDB will instead use the traditional
1092 hold-and-step approach. If AUTO (which is the default), GDB will
1093 decide which technique to use to step over breakpoints depending on
1094 which of all-stop or non-stop mode is active --- displaced stepping
1095 in non-stop mode; hold-and-step in all-stop mode. */
1096
1097 static const char *can_use_displaced_stepping =
1098 can_use_displaced_stepping_auto;
1099
1100 static void
1101 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1102 struct cmd_list_element *c,
1103 const char *value)
1104 {
1105 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1106 fprintf_filtered (file, _("\
1107 Debugger's willingness to use displaced stepping to step over \
1108 breakpoints is %s (currently %s).\n"),
1109 value, non_stop ? "on" : "off");
1110 else
1111 fprintf_filtered (file, _("\
1112 Debugger's willingness to use displaced stepping to step over \
1113 breakpoints is %s.\n"), value);
1114 }
1115
1116 /* Return non-zero if displaced stepping can/should be used to step
1117 over breakpoints. */
1118
1119 static int
1120 use_displaced_stepping (struct gdbarch *gdbarch)
1121 {
1122 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1123 && non_stop)
1124 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1125 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1126 && !RECORD_IS_USED);
1127 }
1128
1129 /* Clean out any stray displaced stepping state. */
1130 static void
1131 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1132 {
1133 /* Indicate that there is no cleanup pending. */
1134 displaced->step_ptid = null_ptid;
1135
1136 if (displaced->step_closure)
1137 {
1138 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1139 displaced->step_closure);
1140 displaced->step_closure = NULL;
1141 }
1142 }
1143
1144 static void
1145 displaced_step_clear_cleanup (void *arg)
1146 {
1147 struct displaced_step_inferior_state *state = arg;
1148
1149 displaced_step_clear (state);
1150 }
1151
1152 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1153 void
1154 displaced_step_dump_bytes (struct ui_file *file,
1155 const gdb_byte *buf,
1156 size_t len)
1157 {
1158 int i;
1159
1160 for (i = 0; i < len; i++)
1161 fprintf_unfiltered (file, "%02x ", buf[i]);
1162 fputs_unfiltered ("\n", file);
1163 }
1164
1165 /* Prepare to single-step, using displaced stepping.
1166
1167 Note that we cannot use displaced stepping when we have a signal to
1168 deliver. If we have a signal to deliver and an instruction to step
1169 over, then after the step, there will be no indication from the
1170 target whether the thread entered a signal handler or ignored the
1171 signal and stepped over the instruction successfully --- both cases
1172 result in a simple SIGTRAP. In the first case we mustn't do a
1173 fixup, and in the second case we must --- but we can't tell which.
1174 Comments in the code for 'random signals' in handle_inferior_event
1175 explain how we handle this case instead.
1176
1177 Returns 1 if preparing was successful -- this thread is going to be
1178 stepped now; or 0 if displaced stepping this thread got queued. */
1179 static int
1180 displaced_step_prepare (ptid_t ptid)
1181 {
1182 struct cleanup *old_cleanups, *ignore_cleanups;
1183 struct regcache *regcache = get_thread_regcache (ptid);
1184 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1185 CORE_ADDR original, copy;
1186 ULONGEST len;
1187 struct displaced_step_closure *closure;
1188 struct displaced_step_inferior_state *displaced;
1189
1190 /* We should never reach this function if the architecture does not
1191 support displaced stepping. */
1192 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1193
1194 /* We have to displaced step one thread at a time, as we only have
1195 access to a single scratch space per inferior. */
1196
1197 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1198
1199 if (!ptid_equal (displaced->step_ptid, null_ptid))
1200 {
1201 /* Already waiting for a displaced step to finish. Defer this
1202 request and place in queue. */
1203 struct displaced_step_request *req, *new_req;
1204
1205 if (debug_displaced)
1206 fprintf_unfiltered (gdb_stdlog,
1207 "displaced: defering step of %s\n",
1208 target_pid_to_str (ptid));
1209
1210 new_req = xmalloc (sizeof (*new_req));
1211 new_req->ptid = ptid;
1212 new_req->next = NULL;
1213
1214 if (displaced->step_request_queue)
1215 {
1216 for (req = displaced->step_request_queue;
1217 req && req->next;
1218 req = req->next)
1219 ;
1220 req->next = new_req;
1221 }
1222 else
1223 displaced->step_request_queue = new_req;
1224
1225 return 0;
1226 }
1227 else
1228 {
1229 if (debug_displaced)
1230 fprintf_unfiltered (gdb_stdlog,
1231 "displaced: stepping %s now\n",
1232 target_pid_to_str (ptid));
1233 }
1234
1235 displaced_step_clear (displaced);
1236
1237 old_cleanups = save_inferior_ptid ();
1238 inferior_ptid = ptid;
1239
1240 original = regcache_read_pc (regcache);
1241
1242 copy = gdbarch_displaced_step_location (gdbarch);
1243 len = gdbarch_max_insn_length (gdbarch);
1244
1245 /* Save the original contents of the copy area. */
1246 displaced->step_saved_copy = xmalloc (len);
1247 ignore_cleanups = make_cleanup (free_current_contents,
1248 &displaced->step_saved_copy);
1249 read_memory (copy, displaced->step_saved_copy, len);
1250 if (debug_displaced)
1251 {
1252 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1253 paddress (gdbarch, copy));
1254 displaced_step_dump_bytes (gdb_stdlog,
1255 displaced->step_saved_copy,
1256 len);
1257 };
1258
1259 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1260 original, copy, regcache);
1261
1262 /* We don't support the fully-simulated case at present. */
1263 gdb_assert (closure);
1264
1265 /* Save the information we need to fix things up if the step
1266 succeeds. */
1267 displaced->step_ptid = ptid;
1268 displaced->step_gdbarch = gdbarch;
1269 displaced->step_closure = closure;
1270 displaced->step_original = original;
1271 displaced->step_copy = copy;
1272
1273 make_cleanup (displaced_step_clear_cleanup, displaced);
1274
1275 /* Resume execution at the copy. */
1276 regcache_write_pc (regcache, copy);
1277
1278 discard_cleanups (ignore_cleanups);
1279
1280 do_cleanups (old_cleanups);
1281
1282 if (debug_displaced)
1283 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1284 paddress (gdbarch, copy));
1285
1286 return 1;
1287 }
1288
1289 static void
1290 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1291 {
1292 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1293
1294 inferior_ptid = ptid;
1295 write_memory (memaddr, myaddr, len);
1296 do_cleanups (ptid_cleanup);
1297 }
1298
1299 static void
1300 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1301 {
1302 struct cleanup *old_cleanups;
1303 struct displaced_step_inferior_state *displaced
1304 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1305
1306 /* Was any thread of this process doing a displaced step? */
1307 if (displaced == NULL)
1308 return;
1309
1310 /* Was this event for the pid we displaced? */
1311 if (ptid_equal (displaced->step_ptid, null_ptid)
1312 || ! ptid_equal (displaced->step_ptid, event_ptid))
1313 return;
1314
1315 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1316
1317 /* Restore the contents of the copy area. */
1318 {
1319 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1320
1321 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1322 displaced->step_saved_copy, len);
1323 if (debug_displaced)
1324 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1325 paddress (displaced->step_gdbarch,
1326 displaced->step_copy));
1327 }
1328
1329 /* Did the instruction complete successfully? */
1330 if (signal == TARGET_SIGNAL_TRAP)
1331 {
1332 /* Fix up the resulting state. */
1333 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1334 displaced->step_closure,
1335 displaced->step_original,
1336 displaced->step_copy,
1337 get_thread_regcache (displaced->step_ptid));
1338 }
1339 else
1340 {
1341 /* Since the instruction didn't complete, all we can do is
1342 relocate the PC. */
1343 struct regcache *regcache = get_thread_regcache (event_ptid);
1344 CORE_ADDR pc = regcache_read_pc (regcache);
1345
1346 pc = displaced->step_original + (pc - displaced->step_copy);
1347 regcache_write_pc (regcache, pc);
1348 }
1349
1350 do_cleanups (old_cleanups);
1351
1352 displaced->step_ptid = null_ptid;
1353
1354 /* Are there any pending displaced stepping requests? If so, run
1355 one now. Leave the state object around, since we're likely to
1356 need it again soon. */
1357 while (displaced->step_request_queue)
1358 {
1359 struct displaced_step_request *head;
1360 ptid_t ptid;
1361 struct regcache *regcache;
1362 struct gdbarch *gdbarch;
1363 CORE_ADDR actual_pc;
1364 struct address_space *aspace;
1365
1366 head = displaced->step_request_queue;
1367 ptid = head->ptid;
1368 displaced->step_request_queue = head->next;
1369 xfree (head);
1370
1371 context_switch (ptid);
1372
1373 regcache = get_thread_regcache (ptid);
1374 actual_pc = regcache_read_pc (regcache);
1375 aspace = get_regcache_aspace (regcache);
1376
1377 if (breakpoint_here_p (aspace, actual_pc))
1378 {
1379 if (debug_displaced)
1380 fprintf_unfiltered (gdb_stdlog,
1381 "displaced: stepping queued %s now\n",
1382 target_pid_to_str (ptid));
1383
1384 displaced_step_prepare (ptid);
1385
1386 gdbarch = get_regcache_arch (regcache);
1387
1388 if (debug_displaced)
1389 {
1390 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1391 gdb_byte buf[4];
1392
1393 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1394 paddress (gdbarch, actual_pc));
1395 read_memory (actual_pc, buf, sizeof (buf));
1396 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1397 }
1398
1399 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1400 displaced->step_closure))
1401 target_resume (ptid, 1, TARGET_SIGNAL_0);
1402 else
1403 target_resume (ptid, 0, TARGET_SIGNAL_0);
1404
1405 /* Done, we're stepping a thread. */
1406 break;
1407 }
1408 else
1409 {
1410 int step;
1411 struct thread_info *tp = inferior_thread ();
1412
1413 /* The breakpoint we were sitting under has since been
1414 removed. */
1415 tp->trap_expected = 0;
1416
1417 /* Go back to what we were trying to do. */
1418 step = currently_stepping (tp);
1419
1420 if (debug_displaced)
1421 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1422 target_pid_to_str (tp->ptid), step);
1423
1424 target_resume (ptid, step, TARGET_SIGNAL_0);
1425 tp->stop_signal = TARGET_SIGNAL_0;
1426
1427 /* This request was discarded. See if there's any other
1428 thread waiting for its turn. */
1429 }
1430 }
1431 }
1432
1433 /* Update global variables holding ptids to hold NEW_PTID if they were
1434 holding OLD_PTID. */
1435 static void
1436 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1437 {
1438 struct displaced_step_request *it;
1439 struct displaced_step_inferior_state *displaced;
1440
1441 if (ptid_equal (inferior_ptid, old_ptid))
1442 inferior_ptid = new_ptid;
1443
1444 if (ptid_equal (singlestep_ptid, old_ptid))
1445 singlestep_ptid = new_ptid;
1446
1447 if (ptid_equal (deferred_step_ptid, old_ptid))
1448 deferred_step_ptid = new_ptid;
1449
1450 for (displaced = displaced_step_inferior_states;
1451 displaced;
1452 displaced = displaced->next)
1453 {
1454 if (ptid_equal (displaced->step_ptid, old_ptid))
1455 displaced->step_ptid = new_ptid;
1456
1457 for (it = displaced->step_request_queue; it; it = it->next)
1458 if (ptid_equal (it->ptid, old_ptid))
1459 it->ptid = new_ptid;
1460 }
1461 }
1462
1463 \f
1464 /* Resuming. */
1465
1466 /* Things to clean up if we QUIT out of resume (). */
1467 static void
1468 resume_cleanups (void *ignore)
1469 {
1470 normal_stop ();
1471 }
1472
1473 static const char schedlock_off[] = "off";
1474 static const char schedlock_on[] = "on";
1475 static const char schedlock_step[] = "step";
1476 static const char *scheduler_enums[] = {
1477 schedlock_off,
1478 schedlock_on,
1479 schedlock_step,
1480 NULL
1481 };
1482 static const char *scheduler_mode = schedlock_off;
1483 static void
1484 show_scheduler_mode (struct ui_file *file, int from_tty,
1485 struct cmd_list_element *c, const char *value)
1486 {
1487 fprintf_filtered (file, _("\
1488 Mode for locking scheduler during execution is \"%s\".\n"),
1489 value);
1490 }
1491
1492 static void
1493 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1494 {
1495 if (!target_can_lock_scheduler)
1496 {
1497 scheduler_mode = schedlock_off;
1498 error (_("Target '%s' cannot support this command."), target_shortname);
1499 }
1500 }
1501
1502 /* True if execution commands resume all threads of all processes by
1503 default; otherwise, resume only threads of the current inferior
1504 process. */
1505 int sched_multi = 0;
1506
1507 /* Try to setup for software single stepping over the specified location.
1508 Return 1 if target_resume() should use hardware single step.
1509
1510 GDBARCH the current gdbarch.
1511 PC the location to step over. */
1512
1513 static int
1514 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1515 {
1516 int hw_step = 1;
1517
1518 if (gdbarch_software_single_step_p (gdbarch)
1519 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1520 {
1521 hw_step = 0;
1522 /* Do not pull these breakpoints until after a `wait' in
1523 `wait_for_inferior' */
1524 singlestep_breakpoints_inserted_p = 1;
1525 singlestep_ptid = inferior_ptid;
1526 singlestep_pc = pc;
1527 }
1528 return hw_step;
1529 }
1530
1531 /* Resume the inferior, but allow a QUIT. This is useful if the user
1532 wants to interrupt some lengthy single-stepping operation
1533 (for child processes, the SIGINT goes to the inferior, and so
1534 we get a SIGINT random_signal, but for remote debugging and perhaps
1535 other targets, that's not true).
1536
1537 STEP nonzero if we should step (zero to continue instead).
1538 SIG is the signal to give the inferior (zero for none). */
1539 void
1540 resume (int step, enum target_signal sig)
1541 {
1542 int should_resume = 1;
1543 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1544 struct regcache *regcache = get_current_regcache ();
1545 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1546 struct thread_info *tp = inferior_thread ();
1547 CORE_ADDR pc = regcache_read_pc (regcache);
1548 struct address_space *aspace = get_regcache_aspace (regcache);
1549
1550 QUIT;
1551
1552 if (debug_infrun)
1553 fprintf_unfiltered (gdb_stdlog,
1554 "infrun: resume (step=%d, signal=%d), "
1555 "trap_expected=%d\n",
1556 step, sig, tp->trap_expected);
1557
1558 /* Normally, by the time we reach `resume', the breakpoints are either
1559 removed or inserted, as appropriate. The exception is if we're sitting
1560 at a permanent breakpoint; we need to step over it, but permanent
1561 breakpoints can't be removed. So we have to test for it here. */
1562 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1563 {
1564 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1565 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1566 else
1567 error (_("\
1568 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1569 how to step past a permanent breakpoint on this architecture. Try using\n\
1570 a command like `return' or `jump' to continue execution."));
1571 }
1572
1573 /* If enabled, step over breakpoints by executing a copy of the
1574 instruction at a different address.
1575
1576 We can't use displaced stepping when we have a signal to deliver;
1577 the comments for displaced_step_prepare explain why. The
1578 comments in the handle_inferior event for dealing with 'random
1579 signals' explain what we do instead. */
1580 if (use_displaced_stepping (gdbarch)
1581 && (tp->trap_expected
1582 || (step && gdbarch_software_single_step_p (gdbarch)))
1583 && sig == TARGET_SIGNAL_0)
1584 {
1585 struct displaced_step_inferior_state *displaced;
1586
1587 if (!displaced_step_prepare (inferior_ptid))
1588 {
1589 /* Got placed in displaced stepping queue. Will be resumed
1590 later when all the currently queued displaced stepping
1591 requests finish. The thread is not executing at this point,
1592 and the call to set_executing will be made later. But we
1593 need to call set_running here, since from frontend point of view,
1594 the thread is running. */
1595 set_running (inferior_ptid, 1);
1596 discard_cleanups (old_cleanups);
1597 return;
1598 }
1599
1600 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1601 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1602 displaced->step_closure);
1603 }
1604
1605 /* Do we need to do it the hard way, w/temp breakpoints? */
1606 else if (step)
1607 step = maybe_software_singlestep (gdbarch, pc);
1608
1609 if (should_resume)
1610 {
1611 ptid_t resume_ptid;
1612
1613 /* If STEP is set, it's a request to use hardware stepping
1614 facilities. But in that case, we should never
1615 use singlestep breakpoint. */
1616 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1617
1618 /* Decide the set of threads to ask the target to resume. Start
1619 by assuming everything will be resumed, than narrow the set
1620 by applying increasingly restricting conditions. */
1621
1622 /* By default, resume all threads of all processes. */
1623 resume_ptid = RESUME_ALL;
1624
1625 /* Maybe resume only all threads of the current process. */
1626 if (!sched_multi && target_supports_multi_process ())
1627 {
1628 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1629 }
1630
1631 /* Maybe resume a single thread after all. */
1632 if (singlestep_breakpoints_inserted_p
1633 && stepping_past_singlestep_breakpoint)
1634 {
1635 /* The situation here is as follows. In thread T1 we wanted to
1636 single-step. Lacking hardware single-stepping we've
1637 set breakpoint at the PC of the next instruction -- call it
1638 P. After resuming, we've hit that breakpoint in thread T2.
1639 Now we've removed original breakpoint, inserted breakpoint
1640 at P+1, and try to step to advance T2 past breakpoint.
1641 We need to step only T2, as if T1 is allowed to freely run,
1642 it can run past P, and if other threads are allowed to run,
1643 they can hit breakpoint at P+1, and nested hits of single-step
1644 breakpoints is not something we'd want -- that's complicated
1645 to support, and has no value. */
1646 resume_ptid = inferior_ptid;
1647 }
1648 else if ((step || singlestep_breakpoints_inserted_p)
1649 && tp->trap_expected)
1650 {
1651 /* We're allowing a thread to run past a breakpoint it has
1652 hit, by single-stepping the thread with the breakpoint
1653 removed. In which case, we need to single-step only this
1654 thread, and keep others stopped, as they can miss this
1655 breakpoint if allowed to run.
1656
1657 The current code actually removes all breakpoints when
1658 doing this, not just the one being stepped over, so if we
1659 let other threads run, we can actually miss any
1660 breakpoint, not just the one at PC. */
1661 resume_ptid = inferior_ptid;
1662 }
1663 else if (non_stop)
1664 {
1665 /* With non-stop mode on, threads are always handled
1666 individually. */
1667 resume_ptid = inferior_ptid;
1668 }
1669 else if ((scheduler_mode == schedlock_on)
1670 || (scheduler_mode == schedlock_step
1671 && (step || singlestep_breakpoints_inserted_p)))
1672 {
1673 /* User-settable 'scheduler' mode requires solo thread resume. */
1674 resume_ptid = inferior_ptid;
1675 }
1676
1677 if (gdbarch_cannot_step_breakpoint (gdbarch))
1678 {
1679 /* Most targets can step a breakpoint instruction, thus
1680 executing it normally. But if this one cannot, just
1681 continue and we will hit it anyway. */
1682 if (step && breakpoint_inserted_here_p (aspace, pc))
1683 step = 0;
1684 }
1685
1686 if (debug_displaced
1687 && use_displaced_stepping (gdbarch)
1688 && tp->trap_expected)
1689 {
1690 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1691 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1692 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1693 gdb_byte buf[4];
1694
1695 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1696 paddress (resume_gdbarch, actual_pc));
1697 read_memory (actual_pc, buf, sizeof (buf));
1698 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1699 }
1700
1701 /* Install inferior's terminal modes. */
1702 target_terminal_inferior ();
1703
1704 /* Avoid confusing the next resume, if the next stop/resume
1705 happens to apply to another thread. */
1706 tp->stop_signal = TARGET_SIGNAL_0;
1707
1708 target_resume (resume_ptid, step, sig);
1709 }
1710
1711 discard_cleanups (old_cleanups);
1712 }
1713 \f
1714 /* Proceeding. */
1715
1716 /* Clear out all variables saying what to do when inferior is continued.
1717 First do this, then set the ones you want, then call `proceed'. */
1718
1719 static void
1720 clear_proceed_status_thread (struct thread_info *tp)
1721 {
1722 if (debug_infrun)
1723 fprintf_unfiltered (gdb_stdlog,
1724 "infrun: clear_proceed_status_thread (%s)\n",
1725 target_pid_to_str (tp->ptid));
1726
1727 tp->trap_expected = 0;
1728 tp->step_range_start = 0;
1729 tp->step_range_end = 0;
1730 tp->step_frame_id = null_frame_id;
1731 tp->step_stack_frame_id = null_frame_id;
1732 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1733 tp->stop_requested = 0;
1734
1735 tp->stop_step = 0;
1736
1737 tp->proceed_to_finish = 0;
1738
1739 /* Discard any remaining commands or status from previous stop. */
1740 bpstat_clear (&tp->stop_bpstat);
1741 }
1742
1743 static int
1744 clear_proceed_status_callback (struct thread_info *tp, void *data)
1745 {
1746 if (is_exited (tp->ptid))
1747 return 0;
1748
1749 clear_proceed_status_thread (tp);
1750 return 0;
1751 }
1752
1753 void
1754 clear_proceed_status (void)
1755 {
1756 if (!non_stop)
1757 {
1758 /* In all-stop mode, delete the per-thread status of all
1759 threads, even if inferior_ptid is null_ptid, there may be
1760 threads on the list. E.g., we may be launching a new
1761 process, while selecting the executable. */
1762 iterate_over_threads (clear_proceed_status_callback, NULL);
1763 }
1764
1765 if (!ptid_equal (inferior_ptid, null_ptid))
1766 {
1767 struct inferior *inferior;
1768
1769 if (non_stop)
1770 {
1771 /* If in non-stop mode, only delete the per-thread status of
1772 the current thread. */
1773 clear_proceed_status_thread (inferior_thread ());
1774 }
1775
1776 inferior = current_inferior ();
1777 inferior->stop_soon = NO_STOP_QUIETLY;
1778 }
1779
1780 stop_after_trap = 0;
1781
1782 observer_notify_about_to_proceed ();
1783
1784 if (stop_registers)
1785 {
1786 regcache_xfree (stop_registers);
1787 stop_registers = NULL;
1788 }
1789 }
1790
1791 /* Check the current thread against the thread that reported the most recent
1792 event. If a step-over is required return TRUE and set the current thread
1793 to the old thread. Otherwise return FALSE.
1794
1795 This should be suitable for any targets that support threads. */
1796
1797 static int
1798 prepare_to_proceed (int step)
1799 {
1800 ptid_t wait_ptid;
1801 struct target_waitstatus wait_status;
1802 int schedlock_enabled;
1803
1804 /* With non-stop mode on, threads are always handled individually. */
1805 gdb_assert (! non_stop);
1806
1807 /* Get the last target status returned by target_wait(). */
1808 get_last_target_status (&wait_ptid, &wait_status);
1809
1810 /* Make sure we were stopped at a breakpoint. */
1811 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1812 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1813 && wait_status.value.sig != TARGET_SIGNAL_ILL
1814 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1815 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1816 {
1817 return 0;
1818 }
1819
1820 schedlock_enabled = (scheduler_mode == schedlock_on
1821 || (scheduler_mode == schedlock_step
1822 && step));
1823
1824 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1825 if (schedlock_enabled)
1826 return 0;
1827
1828 /* Don't switch over if we're about to resume some other process
1829 other than WAIT_PTID's, and schedule-multiple is off. */
1830 if (!sched_multi
1831 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1832 return 0;
1833
1834 /* Switched over from WAIT_PID. */
1835 if (!ptid_equal (wait_ptid, minus_one_ptid)
1836 && !ptid_equal (inferior_ptid, wait_ptid))
1837 {
1838 struct regcache *regcache = get_thread_regcache (wait_ptid);
1839
1840 if (breakpoint_here_p (get_regcache_aspace (regcache),
1841 regcache_read_pc (regcache)))
1842 {
1843 /* If stepping, remember current thread to switch back to. */
1844 if (step)
1845 deferred_step_ptid = inferior_ptid;
1846
1847 /* Switch back to WAIT_PID thread. */
1848 switch_to_thread (wait_ptid);
1849
1850 /* We return 1 to indicate that there is a breakpoint here,
1851 so we need to step over it before continuing to avoid
1852 hitting it straight away. */
1853 return 1;
1854 }
1855 }
1856
1857 return 0;
1858 }
1859
1860 /* Basic routine for continuing the program in various fashions.
1861
1862 ADDR is the address to resume at, or -1 for resume where stopped.
1863 SIGGNAL is the signal to give it, or 0 for none,
1864 or -1 for act according to how it stopped.
1865 STEP is nonzero if should trap after one instruction.
1866 -1 means return after that and print nothing.
1867 You should probably set various step_... variables
1868 before calling here, if you are stepping.
1869
1870 You should call clear_proceed_status before calling proceed. */
1871
1872 void
1873 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1874 {
1875 struct regcache *regcache;
1876 struct gdbarch *gdbarch;
1877 struct thread_info *tp;
1878 CORE_ADDR pc;
1879 struct address_space *aspace;
1880 int oneproc = 0;
1881
1882 /* If we're stopped at a fork/vfork, follow the branch set by the
1883 "set follow-fork-mode" command; otherwise, we'll just proceed
1884 resuming the current thread. */
1885 if (!follow_fork ())
1886 {
1887 /* The target for some reason decided not to resume. */
1888 normal_stop ();
1889 return;
1890 }
1891
1892 regcache = get_current_regcache ();
1893 gdbarch = get_regcache_arch (regcache);
1894 aspace = get_regcache_aspace (regcache);
1895 pc = regcache_read_pc (regcache);
1896
1897 if (step > 0)
1898 step_start_function = find_pc_function (pc);
1899 if (step < 0)
1900 stop_after_trap = 1;
1901
1902 if (addr == (CORE_ADDR) -1)
1903 {
1904 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1905 && execution_direction != EXEC_REVERSE)
1906 /* There is a breakpoint at the address we will resume at,
1907 step one instruction before inserting breakpoints so that
1908 we do not stop right away (and report a second hit at this
1909 breakpoint).
1910
1911 Note, we don't do this in reverse, because we won't
1912 actually be executing the breakpoint insn anyway.
1913 We'll be (un-)executing the previous instruction. */
1914
1915 oneproc = 1;
1916 else if (gdbarch_single_step_through_delay_p (gdbarch)
1917 && gdbarch_single_step_through_delay (gdbarch,
1918 get_current_frame ()))
1919 /* We stepped onto an instruction that needs to be stepped
1920 again before re-inserting the breakpoint, do so. */
1921 oneproc = 1;
1922 }
1923 else
1924 {
1925 regcache_write_pc (regcache, addr);
1926 }
1927
1928 if (debug_infrun)
1929 fprintf_unfiltered (gdb_stdlog,
1930 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1931 paddress (gdbarch, addr), siggnal, step);
1932
1933 /* We're handling a live event, so make sure we're doing live
1934 debugging. If we're looking at traceframes while the target is
1935 running, we're going to need to get back to that mode after
1936 handling the event. */
1937 if (non_stop)
1938 {
1939 make_cleanup_restore_current_traceframe ();
1940 set_traceframe_number (-1);
1941 }
1942
1943 if (non_stop)
1944 /* In non-stop, each thread is handled individually. The context
1945 must already be set to the right thread here. */
1946 ;
1947 else
1948 {
1949 /* In a multi-threaded task we may select another thread and
1950 then continue or step.
1951
1952 But if the old thread was stopped at a breakpoint, it will
1953 immediately cause another breakpoint stop without any
1954 execution (i.e. it will report a breakpoint hit incorrectly).
1955 So we must step over it first.
1956
1957 prepare_to_proceed checks the current thread against the
1958 thread that reported the most recent event. If a step-over
1959 is required it returns TRUE and sets the current thread to
1960 the old thread. */
1961 if (prepare_to_proceed (step))
1962 oneproc = 1;
1963 }
1964
1965 /* prepare_to_proceed may change the current thread. */
1966 tp = inferior_thread ();
1967
1968 if (oneproc)
1969 {
1970 tp->trap_expected = 1;
1971 /* If displaced stepping is enabled, we can step over the
1972 breakpoint without hitting it, so leave all breakpoints
1973 inserted. Otherwise we need to disable all breakpoints, step
1974 one instruction, and then re-add them when that step is
1975 finished. */
1976 if (!use_displaced_stepping (gdbarch))
1977 remove_breakpoints ();
1978 }
1979
1980 /* We can insert breakpoints if we're not trying to step over one,
1981 or if we are stepping over one but we're using displaced stepping
1982 to do so. */
1983 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1984 insert_breakpoints ();
1985
1986 if (!non_stop)
1987 {
1988 /* Pass the last stop signal to the thread we're resuming,
1989 irrespective of whether the current thread is the thread that
1990 got the last event or not. This was historically GDB's
1991 behaviour before keeping a stop_signal per thread. */
1992
1993 struct thread_info *last_thread;
1994 ptid_t last_ptid;
1995 struct target_waitstatus last_status;
1996
1997 get_last_target_status (&last_ptid, &last_status);
1998 if (!ptid_equal (inferior_ptid, last_ptid)
1999 && !ptid_equal (last_ptid, null_ptid)
2000 && !ptid_equal (last_ptid, minus_one_ptid))
2001 {
2002 last_thread = find_thread_ptid (last_ptid);
2003 if (last_thread)
2004 {
2005 tp->stop_signal = last_thread->stop_signal;
2006 last_thread->stop_signal = TARGET_SIGNAL_0;
2007 }
2008 }
2009 }
2010
2011 if (siggnal != TARGET_SIGNAL_DEFAULT)
2012 tp->stop_signal = siggnal;
2013 /* If this signal should not be seen by program,
2014 give it zero. Used for debugging signals. */
2015 else if (!signal_program[tp->stop_signal])
2016 tp->stop_signal = TARGET_SIGNAL_0;
2017
2018 annotate_starting ();
2019
2020 /* Make sure that output from GDB appears before output from the
2021 inferior. */
2022 gdb_flush (gdb_stdout);
2023
2024 /* Refresh prev_pc value just prior to resuming. This used to be
2025 done in stop_stepping, however, setting prev_pc there did not handle
2026 scenarios such as inferior function calls or returning from
2027 a function via the return command. In those cases, the prev_pc
2028 value was not set properly for subsequent commands. The prev_pc value
2029 is used to initialize the starting line number in the ecs. With an
2030 invalid value, the gdb next command ends up stopping at the position
2031 represented by the next line table entry past our start position.
2032 On platforms that generate one line table entry per line, this
2033 is not a problem. However, on the ia64, the compiler generates
2034 extraneous line table entries that do not increase the line number.
2035 When we issue the gdb next command on the ia64 after an inferior call
2036 or a return command, we often end up a few instructions forward, still
2037 within the original line we started.
2038
2039 An attempt was made to refresh the prev_pc at the same time the
2040 execution_control_state is initialized (for instance, just before
2041 waiting for an inferior event). But this approach did not work
2042 because of platforms that use ptrace, where the pc register cannot
2043 be read unless the inferior is stopped. At that point, we are not
2044 guaranteed the inferior is stopped and so the regcache_read_pc() call
2045 can fail. Setting the prev_pc value here ensures the value is updated
2046 correctly when the inferior is stopped. */
2047 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2048
2049 /* Fill in with reasonable starting values. */
2050 init_thread_stepping_state (tp);
2051
2052 /* Reset to normal state. */
2053 init_infwait_state ();
2054
2055 /* Resume inferior. */
2056 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
2057
2058 /* Wait for it to stop (if not standalone)
2059 and in any case decode why it stopped, and act accordingly. */
2060 /* Do this only if we are not using the event loop, or if the target
2061 does not support asynchronous execution. */
2062 if (!target_can_async_p ())
2063 {
2064 wait_for_inferior (0);
2065 normal_stop ();
2066 }
2067 }
2068 \f
2069
2070 /* Start remote-debugging of a machine over a serial link. */
2071
2072 void
2073 start_remote (int from_tty)
2074 {
2075 struct inferior *inferior;
2076
2077 init_wait_for_inferior ();
2078 inferior = current_inferior ();
2079 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2080
2081 /* Always go on waiting for the target, regardless of the mode. */
2082 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2083 indicate to wait_for_inferior that a target should timeout if
2084 nothing is returned (instead of just blocking). Because of this,
2085 targets expecting an immediate response need to, internally, set
2086 things up so that the target_wait() is forced to eventually
2087 timeout. */
2088 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2089 differentiate to its caller what the state of the target is after
2090 the initial open has been performed. Here we're assuming that
2091 the target has stopped. It should be possible to eventually have
2092 target_open() return to the caller an indication that the target
2093 is currently running and GDB state should be set to the same as
2094 for an async run. */
2095 wait_for_inferior (0);
2096
2097 /* Now that the inferior has stopped, do any bookkeeping like
2098 loading shared libraries. We want to do this before normal_stop,
2099 so that the displayed frame is up to date. */
2100 post_create_inferior (&current_target, from_tty);
2101
2102 normal_stop ();
2103 }
2104
2105 /* Initialize static vars when a new inferior begins. */
2106
2107 void
2108 init_wait_for_inferior (void)
2109 {
2110 /* These are meaningless until the first time through wait_for_inferior. */
2111
2112 breakpoint_init_inferior (inf_starting);
2113
2114 clear_proceed_status ();
2115
2116 stepping_past_singlestep_breakpoint = 0;
2117 deferred_step_ptid = null_ptid;
2118
2119 target_last_wait_ptid = minus_one_ptid;
2120
2121 previous_inferior_ptid = null_ptid;
2122 init_infwait_state ();
2123
2124 /* Discard any skipped inlined frames. */
2125 clear_inline_frame_state (minus_one_ptid);
2126 }
2127
2128 \f
2129 /* This enum encodes possible reasons for doing a target_wait, so that
2130 wfi can call target_wait in one place. (Ultimately the call will be
2131 moved out of the infinite loop entirely.) */
2132
2133 enum infwait_states
2134 {
2135 infwait_normal_state,
2136 infwait_thread_hop_state,
2137 infwait_step_watch_state,
2138 infwait_nonstep_watch_state
2139 };
2140
2141 /* Why did the inferior stop? Used to print the appropriate messages
2142 to the interface from within handle_inferior_event(). */
2143 enum inferior_stop_reason
2144 {
2145 /* Step, next, nexti, stepi finished. */
2146 END_STEPPING_RANGE,
2147 /* Inferior terminated by signal. */
2148 SIGNAL_EXITED,
2149 /* Inferior exited. */
2150 EXITED,
2151 /* Inferior received signal, and user asked to be notified. */
2152 SIGNAL_RECEIVED,
2153 /* Reverse execution -- target ran out of history info. */
2154 NO_HISTORY
2155 };
2156
2157 /* The PTID we'll do a target_wait on.*/
2158 ptid_t waiton_ptid;
2159
2160 /* Current inferior wait state. */
2161 enum infwait_states infwait_state;
2162
2163 /* Data to be passed around while handling an event. This data is
2164 discarded between events. */
2165 struct execution_control_state
2166 {
2167 ptid_t ptid;
2168 /* The thread that got the event, if this was a thread event; NULL
2169 otherwise. */
2170 struct thread_info *event_thread;
2171
2172 struct target_waitstatus ws;
2173 int random_signal;
2174 CORE_ADDR stop_func_start;
2175 CORE_ADDR stop_func_end;
2176 char *stop_func_name;
2177 int new_thread_event;
2178 int wait_some_more;
2179 };
2180
2181 static void handle_inferior_event (struct execution_control_state *ecs);
2182
2183 static void handle_step_into_function (struct gdbarch *gdbarch,
2184 struct execution_control_state *ecs);
2185 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2186 struct execution_control_state *ecs);
2187 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2188 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2189 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2190 struct symtab_and_line sr_sal,
2191 struct frame_id sr_id);
2192 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2193
2194 static void stop_stepping (struct execution_control_state *ecs);
2195 static void prepare_to_wait (struct execution_control_state *ecs);
2196 static void keep_going (struct execution_control_state *ecs);
2197 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2198 int stop_info);
2199
2200 /* Callback for iterate over threads. If the thread is stopped, but
2201 the user/frontend doesn't know about that yet, go through
2202 normal_stop, as if the thread had just stopped now. ARG points at
2203 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2204 ptid_is_pid(PTID) is true, applies to all threads of the process
2205 pointed at by PTID. Otherwise, apply only to the thread pointed by
2206 PTID. */
2207
2208 static int
2209 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2210 {
2211 ptid_t ptid = * (ptid_t *) arg;
2212
2213 if ((ptid_equal (info->ptid, ptid)
2214 || ptid_equal (minus_one_ptid, ptid)
2215 || (ptid_is_pid (ptid)
2216 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2217 && is_running (info->ptid)
2218 && !is_executing (info->ptid))
2219 {
2220 struct cleanup *old_chain;
2221 struct execution_control_state ecss;
2222 struct execution_control_state *ecs = &ecss;
2223
2224 memset (ecs, 0, sizeof (*ecs));
2225
2226 old_chain = make_cleanup_restore_current_thread ();
2227
2228 switch_to_thread (info->ptid);
2229
2230 /* Go through handle_inferior_event/normal_stop, so we always
2231 have consistent output as if the stop event had been
2232 reported. */
2233 ecs->ptid = info->ptid;
2234 ecs->event_thread = find_thread_ptid (info->ptid);
2235 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2236 ecs->ws.value.sig = TARGET_SIGNAL_0;
2237
2238 handle_inferior_event (ecs);
2239
2240 if (!ecs->wait_some_more)
2241 {
2242 struct thread_info *tp;
2243
2244 normal_stop ();
2245
2246 /* Finish off the continuations. The continations
2247 themselves are responsible for realising the thread
2248 didn't finish what it was supposed to do. */
2249 tp = inferior_thread ();
2250 do_all_intermediate_continuations_thread (tp);
2251 do_all_continuations_thread (tp);
2252 }
2253
2254 do_cleanups (old_chain);
2255 }
2256
2257 return 0;
2258 }
2259
2260 /* This function is attached as a "thread_stop_requested" observer.
2261 Cleanup local state that assumed the PTID was to be resumed, and
2262 report the stop to the frontend. */
2263
2264 static void
2265 infrun_thread_stop_requested (ptid_t ptid)
2266 {
2267 struct displaced_step_inferior_state *displaced;
2268
2269 /* PTID was requested to stop. Remove it from the displaced
2270 stepping queue, so we don't try to resume it automatically. */
2271
2272 for (displaced = displaced_step_inferior_states;
2273 displaced;
2274 displaced = displaced->next)
2275 {
2276 struct displaced_step_request *it, **prev_next_p;
2277
2278 it = displaced->step_request_queue;
2279 prev_next_p = &displaced->step_request_queue;
2280 while (it)
2281 {
2282 if (ptid_match (it->ptid, ptid))
2283 {
2284 *prev_next_p = it->next;
2285 it->next = NULL;
2286 xfree (it);
2287 }
2288 else
2289 {
2290 prev_next_p = &it->next;
2291 }
2292
2293 it = *prev_next_p;
2294 }
2295 }
2296
2297 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2298 }
2299
2300 static void
2301 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2302 {
2303 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2304 nullify_last_target_wait_ptid ();
2305 }
2306
2307 /* Callback for iterate_over_threads. */
2308
2309 static int
2310 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2311 {
2312 if (is_exited (info->ptid))
2313 return 0;
2314
2315 delete_step_resume_breakpoint (info);
2316 return 0;
2317 }
2318
2319 /* In all-stop, delete the step resume breakpoint of any thread that
2320 had one. In non-stop, delete the step resume breakpoint of the
2321 thread that just stopped. */
2322
2323 static void
2324 delete_step_thread_step_resume_breakpoint (void)
2325 {
2326 if (!target_has_execution
2327 || ptid_equal (inferior_ptid, null_ptid))
2328 /* If the inferior has exited, we have already deleted the step
2329 resume breakpoints out of GDB's lists. */
2330 return;
2331
2332 if (non_stop)
2333 {
2334 /* If in non-stop mode, only delete the step-resume or
2335 longjmp-resume breakpoint of the thread that just stopped
2336 stepping. */
2337 struct thread_info *tp = inferior_thread ();
2338
2339 delete_step_resume_breakpoint (tp);
2340 }
2341 else
2342 /* In all-stop mode, delete all step-resume and longjmp-resume
2343 breakpoints of any thread that had them. */
2344 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2345 }
2346
2347 /* A cleanup wrapper. */
2348
2349 static void
2350 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2351 {
2352 delete_step_thread_step_resume_breakpoint ();
2353 }
2354
2355 /* Pretty print the results of target_wait, for debugging purposes. */
2356
2357 static void
2358 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2359 const struct target_waitstatus *ws)
2360 {
2361 char *status_string = target_waitstatus_to_string (ws);
2362 struct ui_file *tmp_stream = mem_fileopen ();
2363 char *text;
2364
2365 /* The text is split over several lines because it was getting too long.
2366 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2367 output as a unit; we want only one timestamp printed if debug_timestamp
2368 is set. */
2369
2370 fprintf_unfiltered (tmp_stream,
2371 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2372 if (PIDGET (waiton_ptid) != -1)
2373 fprintf_unfiltered (tmp_stream,
2374 " [%s]", target_pid_to_str (waiton_ptid));
2375 fprintf_unfiltered (tmp_stream, ", status) =\n");
2376 fprintf_unfiltered (tmp_stream,
2377 "infrun: %d [%s],\n",
2378 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2379 fprintf_unfiltered (tmp_stream,
2380 "infrun: %s\n",
2381 status_string);
2382
2383 text = ui_file_xstrdup (tmp_stream, NULL);
2384
2385 /* This uses %s in part to handle %'s in the text, but also to avoid
2386 a gcc error: the format attribute requires a string literal. */
2387 fprintf_unfiltered (gdb_stdlog, "%s", text);
2388
2389 xfree (status_string);
2390 xfree (text);
2391 ui_file_delete (tmp_stream);
2392 }
2393
2394 /* Prepare and stabilize the inferior for detaching it. E.g.,
2395 detaching while a thread is displaced stepping is a recipe for
2396 crashing it, as nothing would readjust the PC out of the scratch
2397 pad. */
2398
2399 void
2400 prepare_for_detach (void)
2401 {
2402 struct inferior *inf = current_inferior ();
2403 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2404 struct cleanup *old_chain_1;
2405 struct displaced_step_inferior_state *displaced;
2406
2407 displaced = get_displaced_stepping_state (inf->pid);
2408
2409 /* Is any thread of this process displaced stepping? If not,
2410 there's nothing else to do. */
2411 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2412 return;
2413
2414 if (debug_infrun)
2415 fprintf_unfiltered (gdb_stdlog,
2416 "displaced-stepping in-process while detaching");
2417
2418 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2419 inf->detaching = 1;
2420
2421 while (!ptid_equal (displaced->step_ptid, null_ptid))
2422 {
2423 struct cleanup *old_chain_2;
2424 struct execution_control_state ecss;
2425 struct execution_control_state *ecs;
2426
2427 ecs = &ecss;
2428 memset (ecs, 0, sizeof (*ecs));
2429
2430 overlay_cache_invalid = 1;
2431
2432 /* We have to invalidate the registers BEFORE calling
2433 target_wait because they can be loaded from the target while
2434 in target_wait. This makes remote debugging a bit more
2435 efficient for those targets that provide critical registers
2436 as part of their normal status mechanism. */
2437
2438 registers_changed ();
2439
2440 if (deprecated_target_wait_hook)
2441 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2442 else
2443 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2444
2445 if (debug_infrun)
2446 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2447
2448 /* If an error happens while handling the event, propagate GDB's
2449 knowledge of the executing state to the frontend/user running
2450 state. */
2451 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2452
2453 /* In non-stop mode, each thread is handled individually.
2454 Switch early, so the global state is set correctly for this
2455 thread. */
2456 if (non_stop
2457 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2458 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2459 context_switch (ecs->ptid);
2460
2461 /* Now figure out what to do with the result of the result. */
2462 handle_inferior_event (ecs);
2463
2464 /* No error, don't finish the state yet. */
2465 discard_cleanups (old_chain_2);
2466
2467 /* Breakpoints and watchpoints are not installed on the target
2468 at this point, and signals are passed directly to the
2469 inferior, so this must mean the process is gone. */
2470 if (!ecs->wait_some_more)
2471 {
2472 discard_cleanups (old_chain_1);
2473 error (_("Program exited while detaching"));
2474 }
2475 }
2476
2477 discard_cleanups (old_chain_1);
2478 }
2479
2480 /* Wait for control to return from inferior to debugger.
2481
2482 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2483 as if they were SIGTRAP signals. This can be useful during
2484 the startup sequence on some targets such as HP/UX, where
2485 we receive an EXEC event instead of the expected SIGTRAP.
2486
2487 If inferior gets a signal, we may decide to start it up again
2488 instead of returning. That is why there is a loop in this function.
2489 When this function actually returns it means the inferior
2490 should be left stopped and GDB should read more commands. */
2491
2492 void
2493 wait_for_inferior (int treat_exec_as_sigtrap)
2494 {
2495 struct cleanup *old_cleanups;
2496 struct execution_control_state ecss;
2497 struct execution_control_state *ecs;
2498
2499 if (debug_infrun)
2500 fprintf_unfiltered
2501 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2502 treat_exec_as_sigtrap);
2503
2504 old_cleanups =
2505 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2506
2507 ecs = &ecss;
2508 memset (ecs, 0, sizeof (*ecs));
2509
2510 /* We'll update this if & when we switch to a new thread. */
2511 previous_inferior_ptid = inferior_ptid;
2512
2513 while (1)
2514 {
2515 struct cleanup *old_chain;
2516
2517 /* We have to invalidate the registers BEFORE calling target_wait
2518 because they can be loaded from the target while in target_wait.
2519 This makes remote debugging a bit more efficient for those
2520 targets that provide critical registers as part of their normal
2521 status mechanism. */
2522
2523 overlay_cache_invalid = 1;
2524 registers_changed ();
2525
2526 if (deprecated_target_wait_hook)
2527 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2528 else
2529 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2530
2531 if (debug_infrun)
2532 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2533
2534 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2535 {
2536 xfree (ecs->ws.value.execd_pathname);
2537 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2538 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2539 }
2540
2541 /* If an error happens while handling the event, propagate GDB's
2542 knowledge of the executing state to the frontend/user running
2543 state. */
2544 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2545
2546 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2547 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2548 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2549
2550 /* Now figure out what to do with the result of the result. */
2551 handle_inferior_event (ecs);
2552
2553 /* No error, don't finish the state yet. */
2554 discard_cleanups (old_chain);
2555
2556 if (!ecs->wait_some_more)
2557 break;
2558 }
2559
2560 do_cleanups (old_cleanups);
2561 }
2562
2563 /* Asynchronous version of wait_for_inferior. It is called by the
2564 event loop whenever a change of state is detected on the file
2565 descriptor corresponding to the target. It can be called more than
2566 once to complete a single execution command. In such cases we need
2567 to keep the state in a global variable ECSS. If it is the last time
2568 that this function is called for a single execution command, then
2569 report to the user that the inferior has stopped, and do the
2570 necessary cleanups. */
2571
2572 void
2573 fetch_inferior_event (void *client_data)
2574 {
2575 struct execution_control_state ecss;
2576 struct execution_control_state *ecs = &ecss;
2577 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2578 struct cleanup *ts_old_chain;
2579 int was_sync = sync_execution;
2580
2581 memset (ecs, 0, sizeof (*ecs));
2582
2583 /* We'll update this if & when we switch to a new thread. */
2584 previous_inferior_ptid = inferior_ptid;
2585
2586 if (non_stop)
2587 /* In non-stop mode, the user/frontend should not notice a thread
2588 switch due to internal events. Make sure we reverse to the
2589 user selected thread and frame after handling the event and
2590 running any breakpoint commands. */
2591 make_cleanup_restore_current_thread ();
2592
2593 /* We have to invalidate the registers BEFORE calling target_wait
2594 because they can be loaded from the target while in target_wait.
2595 This makes remote debugging a bit more efficient for those
2596 targets that provide critical registers as part of their normal
2597 status mechanism. */
2598
2599 overlay_cache_invalid = 1;
2600 registers_changed ();
2601
2602 if (deprecated_target_wait_hook)
2603 ecs->ptid =
2604 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2605 else
2606 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2607
2608 if (debug_infrun)
2609 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2610
2611 if (non_stop
2612 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2613 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2614 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2615 /* In non-stop mode, each thread is handled individually. Switch
2616 early, so the global state is set correctly for this
2617 thread. */
2618 context_switch (ecs->ptid);
2619
2620 /* If an error happens while handling the event, propagate GDB's
2621 knowledge of the executing state to the frontend/user running
2622 state. */
2623 if (!non_stop)
2624 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2625 else
2626 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2627
2628 /* Now figure out what to do with the result of the result. */
2629 handle_inferior_event (ecs);
2630
2631 if (!ecs->wait_some_more)
2632 {
2633 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2634
2635 delete_step_thread_step_resume_breakpoint ();
2636
2637 /* We may not find an inferior if this was a process exit. */
2638 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2639 normal_stop ();
2640
2641 if (target_has_execution
2642 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2643 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2644 && ecs->event_thread->step_multi
2645 && ecs->event_thread->stop_step)
2646 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2647 else
2648 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2649 }
2650
2651 /* No error, don't finish the thread states yet. */
2652 discard_cleanups (ts_old_chain);
2653
2654 /* Revert thread and frame. */
2655 do_cleanups (old_chain);
2656
2657 /* If the inferior was in sync execution mode, and now isn't,
2658 restore the prompt. */
2659 if (was_sync && !sync_execution)
2660 display_gdb_prompt (0);
2661 }
2662
2663 /* Record the frame and location we're currently stepping through. */
2664 void
2665 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2666 {
2667 struct thread_info *tp = inferior_thread ();
2668
2669 tp->step_frame_id = get_frame_id (frame);
2670 tp->step_stack_frame_id = get_stack_frame_id (frame);
2671
2672 tp->current_symtab = sal.symtab;
2673 tp->current_line = sal.line;
2674 }
2675
2676 /* Clear context switchable stepping state. */
2677
2678 void
2679 init_thread_stepping_state (struct thread_info *tss)
2680 {
2681 tss->stepping_over_breakpoint = 0;
2682 tss->step_after_step_resume_breakpoint = 0;
2683 tss->stepping_through_solib_after_catch = 0;
2684 tss->stepping_through_solib_catchpoints = NULL;
2685 }
2686
2687 /* Return the cached copy of the last pid/waitstatus returned by
2688 target_wait()/deprecated_target_wait_hook(). The data is actually
2689 cached by handle_inferior_event(), which gets called immediately
2690 after target_wait()/deprecated_target_wait_hook(). */
2691
2692 void
2693 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2694 {
2695 *ptidp = target_last_wait_ptid;
2696 *status = target_last_waitstatus;
2697 }
2698
2699 void
2700 nullify_last_target_wait_ptid (void)
2701 {
2702 target_last_wait_ptid = minus_one_ptid;
2703 }
2704
2705 /* Switch thread contexts. */
2706
2707 static void
2708 context_switch (ptid_t ptid)
2709 {
2710 if (debug_infrun)
2711 {
2712 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2713 target_pid_to_str (inferior_ptid));
2714 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2715 target_pid_to_str (ptid));
2716 }
2717
2718 switch_to_thread (ptid);
2719 }
2720
2721 static void
2722 adjust_pc_after_break (struct execution_control_state *ecs)
2723 {
2724 struct regcache *regcache;
2725 struct gdbarch *gdbarch;
2726 struct address_space *aspace;
2727 CORE_ADDR breakpoint_pc;
2728
2729 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2730 we aren't, just return.
2731
2732 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2733 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2734 implemented by software breakpoints should be handled through the normal
2735 breakpoint layer.
2736
2737 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2738 different signals (SIGILL or SIGEMT for instance), but it is less
2739 clear where the PC is pointing afterwards. It may not match
2740 gdbarch_decr_pc_after_break. I don't know any specific target that
2741 generates these signals at breakpoints (the code has been in GDB since at
2742 least 1992) so I can not guess how to handle them here.
2743
2744 In earlier versions of GDB, a target with
2745 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2746 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2747 target with both of these set in GDB history, and it seems unlikely to be
2748 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2749
2750 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2751 return;
2752
2753 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2754 return;
2755
2756 /* In reverse execution, when a breakpoint is hit, the instruction
2757 under it has already been de-executed. The reported PC always
2758 points at the breakpoint address, so adjusting it further would
2759 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2760 architecture:
2761
2762 B1 0x08000000 : INSN1
2763 B2 0x08000001 : INSN2
2764 0x08000002 : INSN3
2765 PC -> 0x08000003 : INSN4
2766
2767 Say you're stopped at 0x08000003 as above. Reverse continuing
2768 from that point should hit B2 as below. Reading the PC when the
2769 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2770 been de-executed already.
2771
2772 B1 0x08000000 : INSN1
2773 B2 PC -> 0x08000001 : INSN2
2774 0x08000002 : INSN3
2775 0x08000003 : INSN4
2776
2777 We can't apply the same logic as for forward execution, because
2778 we would wrongly adjust the PC to 0x08000000, since there's a
2779 breakpoint at PC - 1. We'd then report a hit on B1, although
2780 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2781 behaviour. */
2782 if (execution_direction == EXEC_REVERSE)
2783 return;
2784
2785 /* If this target does not decrement the PC after breakpoints, then
2786 we have nothing to do. */
2787 regcache = get_thread_regcache (ecs->ptid);
2788 gdbarch = get_regcache_arch (regcache);
2789 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2790 return;
2791
2792 aspace = get_regcache_aspace (regcache);
2793
2794 /* Find the location where (if we've hit a breakpoint) the
2795 breakpoint would be. */
2796 breakpoint_pc = regcache_read_pc (regcache)
2797 - gdbarch_decr_pc_after_break (gdbarch);
2798
2799 /* Check whether there actually is a software breakpoint inserted at
2800 that location.
2801
2802 If in non-stop mode, a race condition is possible where we've
2803 removed a breakpoint, but stop events for that breakpoint were
2804 already queued and arrive later. To suppress those spurious
2805 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2806 and retire them after a number of stop events are reported. */
2807 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2808 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2809 {
2810 struct cleanup *old_cleanups = NULL;
2811
2812 if (RECORD_IS_USED)
2813 old_cleanups = record_gdb_operation_disable_set ();
2814
2815 /* When using hardware single-step, a SIGTRAP is reported for both
2816 a completed single-step and a software breakpoint. Need to
2817 differentiate between the two, as the latter needs adjusting
2818 but the former does not.
2819
2820 The SIGTRAP can be due to a completed hardware single-step only if
2821 - we didn't insert software single-step breakpoints
2822 - the thread to be examined is still the current thread
2823 - this thread is currently being stepped
2824
2825 If any of these events did not occur, we must have stopped due
2826 to hitting a software breakpoint, and have to back up to the
2827 breakpoint address.
2828
2829 As a special case, we could have hardware single-stepped a
2830 software breakpoint. In this case (prev_pc == breakpoint_pc),
2831 we also need to back up to the breakpoint address. */
2832
2833 if (singlestep_breakpoints_inserted_p
2834 || !ptid_equal (ecs->ptid, inferior_ptid)
2835 || !currently_stepping (ecs->event_thread)
2836 || ecs->event_thread->prev_pc == breakpoint_pc)
2837 regcache_write_pc (regcache, breakpoint_pc);
2838
2839 if (RECORD_IS_USED)
2840 do_cleanups (old_cleanups);
2841 }
2842 }
2843
2844 void
2845 init_infwait_state (void)
2846 {
2847 waiton_ptid = pid_to_ptid (-1);
2848 infwait_state = infwait_normal_state;
2849 }
2850
2851 void
2852 error_is_running (void)
2853 {
2854 error (_("\
2855 Cannot execute this command while the selected thread is running."));
2856 }
2857
2858 void
2859 ensure_not_running (void)
2860 {
2861 if (is_running (inferior_ptid))
2862 error_is_running ();
2863 }
2864
2865 static int
2866 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2867 {
2868 for (frame = get_prev_frame (frame);
2869 frame != NULL;
2870 frame = get_prev_frame (frame))
2871 {
2872 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2873 return 1;
2874 if (get_frame_type (frame) != INLINE_FRAME)
2875 break;
2876 }
2877
2878 return 0;
2879 }
2880
2881 /* Auxiliary function that handles syscall entry/return events.
2882 It returns 1 if the inferior should keep going (and GDB
2883 should ignore the event), or 0 if the event deserves to be
2884 processed. */
2885
2886 static int
2887 handle_syscall_event (struct execution_control_state *ecs)
2888 {
2889 struct regcache *regcache;
2890 struct gdbarch *gdbarch;
2891 int syscall_number;
2892
2893 if (!ptid_equal (ecs->ptid, inferior_ptid))
2894 context_switch (ecs->ptid);
2895
2896 regcache = get_thread_regcache (ecs->ptid);
2897 gdbarch = get_regcache_arch (regcache);
2898 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2899 stop_pc = regcache_read_pc (regcache);
2900
2901 target_last_waitstatus.value.syscall_number = syscall_number;
2902
2903 if (catch_syscall_enabled () > 0
2904 && catching_syscall_number (syscall_number) > 0)
2905 {
2906 if (debug_infrun)
2907 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2908 syscall_number);
2909
2910 ecs->event_thread->stop_bpstat
2911 = bpstat_stop_status (get_regcache_aspace (regcache),
2912 stop_pc, ecs->ptid);
2913 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2914
2915 if (!ecs->random_signal)
2916 {
2917 /* Catchpoint hit. */
2918 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2919 return 0;
2920 }
2921 }
2922
2923 /* If no catchpoint triggered for this, then keep going. */
2924 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2925 keep_going (ecs);
2926 return 1;
2927 }
2928
2929 /* Given an execution control state that has been freshly filled in
2930 by an event from the inferior, figure out what it means and take
2931 appropriate action. */
2932
2933 static void
2934 handle_inferior_event (struct execution_control_state *ecs)
2935 {
2936 struct frame_info *frame;
2937 struct gdbarch *gdbarch;
2938 int sw_single_step_trap_p = 0;
2939 int stopped_by_watchpoint;
2940 int stepped_after_stopped_by_watchpoint = 0;
2941 struct symtab_and_line stop_pc_sal;
2942 enum stop_kind stop_soon;
2943
2944 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2945 {
2946 /* We had an event in the inferior, but we are not interested in
2947 handling it at this level. The lower layers have already
2948 done what needs to be done, if anything.
2949
2950 One of the possible circumstances for this is when the
2951 inferior produces output for the console. The inferior has
2952 not stopped, and we are ignoring the event. Another possible
2953 circumstance is any event which the lower level knows will be
2954 reported multiple times without an intervening resume. */
2955 if (debug_infrun)
2956 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2957 prepare_to_wait (ecs);
2958 return;
2959 }
2960
2961 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2962 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2963 {
2964 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2965
2966 gdb_assert (inf);
2967 stop_soon = inf->stop_soon;
2968 }
2969 else
2970 stop_soon = NO_STOP_QUIETLY;
2971
2972 /* Cache the last pid/waitstatus. */
2973 target_last_wait_ptid = ecs->ptid;
2974 target_last_waitstatus = ecs->ws;
2975
2976 /* Always clear state belonging to the previous time we stopped. */
2977 stop_stack_dummy = STOP_NONE;
2978
2979 /* If it's a new process, add it to the thread database */
2980
2981 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2982 && !ptid_equal (ecs->ptid, minus_one_ptid)
2983 && !in_thread_list (ecs->ptid));
2984
2985 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2986 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2987 add_thread (ecs->ptid);
2988
2989 ecs->event_thread = find_thread_ptid (ecs->ptid);
2990
2991 /* Dependent on valid ECS->EVENT_THREAD. */
2992 adjust_pc_after_break (ecs);
2993
2994 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2995 reinit_frame_cache ();
2996
2997 breakpoint_retire_moribund ();
2998
2999 /* First, distinguish signals caused by the debugger from signals
3000 that have to do with the program's own actions. Note that
3001 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3002 on the operating system version. Here we detect when a SIGILL or
3003 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3004 something similar for SIGSEGV, since a SIGSEGV will be generated
3005 when we're trying to execute a breakpoint instruction on a
3006 non-executable stack. This happens for call dummy breakpoints
3007 for architectures like SPARC that place call dummies on the
3008 stack. */
3009 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3010 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3011 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3012 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3013 {
3014 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3015
3016 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3017 regcache_read_pc (regcache)))
3018 {
3019 if (debug_infrun)
3020 fprintf_unfiltered (gdb_stdlog,
3021 "infrun: Treating signal as SIGTRAP\n");
3022 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3023 }
3024 }
3025
3026 /* Mark the non-executing threads accordingly. In all-stop, all
3027 threads of all processes are stopped when we get any event
3028 reported. In non-stop mode, only the event thread stops. If
3029 we're handling a process exit in non-stop mode, there's nothing
3030 to do, as threads of the dead process are gone, and threads of
3031 any other process were left running. */
3032 if (!non_stop)
3033 set_executing (minus_one_ptid, 0);
3034 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3035 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3036 set_executing (inferior_ptid, 0);
3037
3038 switch (infwait_state)
3039 {
3040 case infwait_thread_hop_state:
3041 if (debug_infrun)
3042 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3043 break;
3044
3045 case infwait_normal_state:
3046 if (debug_infrun)
3047 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3048 break;
3049
3050 case infwait_step_watch_state:
3051 if (debug_infrun)
3052 fprintf_unfiltered (gdb_stdlog,
3053 "infrun: infwait_step_watch_state\n");
3054
3055 stepped_after_stopped_by_watchpoint = 1;
3056 break;
3057
3058 case infwait_nonstep_watch_state:
3059 if (debug_infrun)
3060 fprintf_unfiltered (gdb_stdlog,
3061 "infrun: infwait_nonstep_watch_state\n");
3062 insert_breakpoints ();
3063
3064 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3065 handle things like signals arriving and other things happening
3066 in combination correctly? */
3067 stepped_after_stopped_by_watchpoint = 1;
3068 break;
3069
3070 default:
3071 internal_error (__FILE__, __LINE__, _("bad switch"));
3072 }
3073
3074 infwait_state = infwait_normal_state;
3075 waiton_ptid = pid_to_ptid (-1);
3076
3077 switch (ecs->ws.kind)
3078 {
3079 case TARGET_WAITKIND_LOADED:
3080 if (debug_infrun)
3081 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3082 /* Ignore gracefully during startup of the inferior, as it might
3083 be the shell which has just loaded some objects, otherwise
3084 add the symbols for the newly loaded objects. Also ignore at
3085 the beginning of an attach or remote session; we will query
3086 the full list of libraries once the connection is
3087 established. */
3088 if (stop_soon == NO_STOP_QUIETLY)
3089 {
3090 /* Check for any newly added shared libraries if we're
3091 supposed to be adding them automatically. Switch
3092 terminal for any messages produced by
3093 breakpoint_re_set. */
3094 target_terminal_ours_for_output ();
3095 /* NOTE: cagney/2003-11-25: Make certain that the target
3096 stack's section table is kept up-to-date. Architectures,
3097 (e.g., PPC64), use the section table to perform
3098 operations such as address => section name and hence
3099 require the table to contain all sections (including
3100 those found in shared libraries). */
3101 #ifdef SOLIB_ADD
3102 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3103 #else
3104 solib_add (NULL, 0, &current_target, auto_solib_add);
3105 #endif
3106 target_terminal_inferior ();
3107
3108 /* If requested, stop when the dynamic linker notifies
3109 gdb of events. This allows the user to get control
3110 and place breakpoints in initializer routines for
3111 dynamically loaded objects (among other things). */
3112 if (stop_on_solib_events)
3113 {
3114 /* Make sure we print "Stopped due to solib-event" in
3115 normal_stop. */
3116 stop_print_frame = 1;
3117
3118 stop_stepping (ecs);
3119 return;
3120 }
3121
3122 /* NOTE drow/2007-05-11: This might be a good place to check
3123 for "catch load". */
3124 }
3125
3126 /* If we are skipping through a shell, or through shared library
3127 loading that we aren't interested in, resume the program. If
3128 we're running the program normally, also resume. But stop if
3129 we're attaching or setting up a remote connection. */
3130 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3131 {
3132 /* Loading of shared libraries might have changed breakpoint
3133 addresses. Make sure new breakpoints are inserted. */
3134 if (stop_soon == NO_STOP_QUIETLY
3135 && !breakpoints_always_inserted_mode ())
3136 insert_breakpoints ();
3137 resume (0, TARGET_SIGNAL_0);
3138 prepare_to_wait (ecs);
3139 return;
3140 }
3141
3142 break;
3143
3144 case TARGET_WAITKIND_SPURIOUS:
3145 if (debug_infrun)
3146 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3147 resume (0, TARGET_SIGNAL_0);
3148 prepare_to_wait (ecs);
3149 return;
3150
3151 case TARGET_WAITKIND_EXITED:
3152 if (debug_infrun)
3153 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3154 inferior_ptid = ecs->ptid;
3155 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3156 set_current_program_space (current_inferior ()->pspace);
3157 handle_vfork_child_exec_or_exit (0);
3158 target_terminal_ours (); /* Must do this before mourn anyway */
3159 print_stop_reason (EXITED, ecs->ws.value.integer);
3160
3161 /* Record the exit code in the convenience variable $_exitcode, so
3162 that the user can inspect this again later. */
3163 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3164 (LONGEST) ecs->ws.value.integer);
3165 gdb_flush (gdb_stdout);
3166 target_mourn_inferior ();
3167 singlestep_breakpoints_inserted_p = 0;
3168 cancel_single_step_breakpoints ();
3169 stop_print_frame = 0;
3170 stop_stepping (ecs);
3171 return;
3172
3173 case TARGET_WAITKIND_SIGNALLED:
3174 if (debug_infrun)
3175 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3176 inferior_ptid = ecs->ptid;
3177 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3178 set_current_program_space (current_inferior ()->pspace);
3179 handle_vfork_child_exec_or_exit (0);
3180 stop_print_frame = 0;
3181 target_terminal_ours (); /* Must do this before mourn anyway */
3182
3183 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3184 reach here unless the inferior is dead. However, for years
3185 target_kill() was called here, which hints that fatal signals aren't
3186 really fatal on some systems. If that's true, then some changes
3187 may be needed. */
3188 target_mourn_inferior ();
3189
3190 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3191 singlestep_breakpoints_inserted_p = 0;
3192 cancel_single_step_breakpoints ();
3193 stop_stepping (ecs);
3194 return;
3195
3196 /* The following are the only cases in which we keep going;
3197 the above cases end in a continue or goto. */
3198 case TARGET_WAITKIND_FORKED:
3199 case TARGET_WAITKIND_VFORKED:
3200 if (debug_infrun)
3201 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3202
3203 if (!ptid_equal (ecs->ptid, inferior_ptid))
3204 {
3205 context_switch (ecs->ptid);
3206 reinit_frame_cache ();
3207 }
3208
3209 /* Immediately detach breakpoints from the child before there's
3210 any chance of letting the user delete breakpoints from the
3211 breakpoint lists. If we don't do this early, it's easy to
3212 leave left over traps in the child, vis: "break foo; catch
3213 fork; c; <fork>; del; c; <child calls foo>". We only follow
3214 the fork on the last `continue', and by that time the
3215 breakpoint at "foo" is long gone from the breakpoint table.
3216 If we vforked, then we don't need to unpatch here, since both
3217 parent and child are sharing the same memory pages; we'll
3218 need to unpatch at follow/detach time instead to be certain
3219 that new breakpoints added between catchpoint hit time and
3220 vfork follow are detached. */
3221 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3222 {
3223 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3224
3225 /* This won't actually modify the breakpoint list, but will
3226 physically remove the breakpoints from the child. */
3227 detach_breakpoints (child_pid);
3228 }
3229
3230 if (singlestep_breakpoints_inserted_p)
3231 {
3232 /* Pull the single step breakpoints out of the target. */
3233 remove_single_step_breakpoints ();
3234 singlestep_breakpoints_inserted_p = 0;
3235 }
3236
3237 /* In case the event is caught by a catchpoint, remember that
3238 the event is to be followed at the next resume of the thread,
3239 and not immediately. */
3240 ecs->event_thread->pending_follow = ecs->ws;
3241
3242 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3243
3244 ecs->event_thread->stop_bpstat
3245 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3246 stop_pc, ecs->ptid);
3247
3248 /* Note that we're interested in knowing the bpstat actually
3249 causes a stop, not just if it may explain the signal.
3250 Software watchpoints, for example, always appear in the
3251 bpstat. */
3252 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3253
3254 /* If no catchpoint triggered for this, then keep going. */
3255 if (ecs->random_signal)
3256 {
3257 ptid_t parent;
3258 ptid_t child;
3259 int should_resume;
3260 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3261
3262 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3263
3264 should_resume = follow_fork ();
3265
3266 parent = ecs->ptid;
3267 child = ecs->ws.value.related_pid;
3268
3269 /* In non-stop mode, also resume the other branch. */
3270 if (non_stop && !detach_fork)
3271 {
3272 if (follow_child)
3273 switch_to_thread (parent);
3274 else
3275 switch_to_thread (child);
3276
3277 ecs->event_thread = inferior_thread ();
3278 ecs->ptid = inferior_ptid;
3279 keep_going (ecs);
3280 }
3281
3282 if (follow_child)
3283 switch_to_thread (child);
3284 else
3285 switch_to_thread (parent);
3286
3287 ecs->event_thread = inferior_thread ();
3288 ecs->ptid = inferior_ptid;
3289
3290 if (should_resume)
3291 keep_going (ecs);
3292 else
3293 stop_stepping (ecs);
3294 return;
3295 }
3296 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3297 goto process_event_stop_test;
3298
3299 case TARGET_WAITKIND_VFORK_DONE:
3300 /* Done with the shared memory region. Re-insert breakpoints in
3301 the parent, and keep going. */
3302
3303 if (debug_infrun)
3304 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3305
3306 if (!ptid_equal (ecs->ptid, inferior_ptid))
3307 context_switch (ecs->ptid);
3308
3309 current_inferior ()->waiting_for_vfork_done = 0;
3310 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3311 /* This also takes care of reinserting breakpoints in the
3312 previously locked inferior. */
3313 keep_going (ecs);
3314 return;
3315
3316 case TARGET_WAITKIND_EXECD:
3317 if (debug_infrun)
3318 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3319
3320 if (!ptid_equal (ecs->ptid, inferior_ptid))
3321 {
3322 context_switch (ecs->ptid);
3323 reinit_frame_cache ();
3324 }
3325
3326 singlestep_breakpoints_inserted_p = 0;
3327 cancel_single_step_breakpoints ();
3328
3329 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3330
3331 /* Do whatever is necessary to the parent branch of the vfork. */
3332 handle_vfork_child_exec_or_exit (1);
3333
3334 /* This causes the eventpoints and symbol table to be reset.
3335 Must do this now, before trying to determine whether to
3336 stop. */
3337 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3338
3339 ecs->event_thread->stop_bpstat
3340 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3341 stop_pc, ecs->ptid);
3342 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3343
3344 /* Note that this may be referenced from inside
3345 bpstat_stop_status above, through inferior_has_execd. */
3346 xfree (ecs->ws.value.execd_pathname);
3347 ecs->ws.value.execd_pathname = NULL;
3348
3349 /* If no catchpoint triggered for this, then keep going. */
3350 if (ecs->random_signal)
3351 {
3352 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3353 keep_going (ecs);
3354 return;
3355 }
3356 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3357 goto process_event_stop_test;
3358
3359 /* Be careful not to try to gather much state about a thread
3360 that's in a syscall. It's frequently a losing proposition. */
3361 case TARGET_WAITKIND_SYSCALL_ENTRY:
3362 if (debug_infrun)
3363 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3364 /* Getting the current syscall number */
3365 if (handle_syscall_event (ecs) != 0)
3366 return;
3367 goto process_event_stop_test;
3368
3369 /* Before examining the threads further, step this thread to
3370 get it entirely out of the syscall. (We get notice of the
3371 event when the thread is just on the verge of exiting a
3372 syscall. Stepping one instruction seems to get it back
3373 into user code.) */
3374 case TARGET_WAITKIND_SYSCALL_RETURN:
3375 if (debug_infrun)
3376 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3377 if (handle_syscall_event (ecs) != 0)
3378 return;
3379 goto process_event_stop_test;
3380
3381 case TARGET_WAITKIND_STOPPED:
3382 if (debug_infrun)
3383 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3384 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3385 break;
3386
3387 case TARGET_WAITKIND_NO_HISTORY:
3388 /* Reverse execution: target ran out of history info. */
3389 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3390 print_stop_reason (NO_HISTORY, 0);
3391 stop_stepping (ecs);
3392 return;
3393 }
3394
3395 if (ecs->new_thread_event)
3396 {
3397 if (non_stop)
3398 /* Non-stop assumes that the target handles adding new threads
3399 to the thread list. */
3400 internal_error (__FILE__, __LINE__, "\
3401 targets should add new threads to the thread list themselves in non-stop mode.");
3402
3403 /* We may want to consider not doing a resume here in order to
3404 give the user a chance to play with the new thread. It might
3405 be good to make that a user-settable option. */
3406
3407 /* At this point, all threads are stopped (happens automatically
3408 in either the OS or the native code). Therefore we need to
3409 continue all threads in order to make progress. */
3410
3411 if (!ptid_equal (ecs->ptid, inferior_ptid))
3412 context_switch (ecs->ptid);
3413 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3414 prepare_to_wait (ecs);
3415 return;
3416 }
3417
3418 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3419 {
3420 /* Do we need to clean up the state of a thread that has
3421 completed a displaced single-step? (Doing so usually affects
3422 the PC, so do it here, before we set stop_pc.) */
3423 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3424
3425 /* If we either finished a single-step or hit a breakpoint, but
3426 the user wanted this thread to be stopped, pretend we got a
3427 SIG0 (generic unsignaled stop). */
3428
3429 if (ecs->event_thread->stop_requested
3430 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3431 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3432 }
3433
3434 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3435
3436 if (debug_infrun)
3437 {
3438 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3439 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3440 struct cleanup *old_chain = save_inferior_ptid ();
3441
3442 inferior_ptid = ecs->ptid;
3443
3444 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3445 paddress (gdbarch, stop_pc));
3446 if (target_stopped_by_watchpoint ())
3447 {
3448 CORE_ADDR addr;
3449
3450 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3451
3452 if (target_stopped_data_address (&current_target, &addr))
3453 fprintf_unfiltered (gdb_stdlog,
3454 "infrun: stopped data address = %s\n",
3455 paddress (gdbarch, addr));
3456 else
3457 fprintf_unfiltered (gdb_stdlog,
3458 "infrun: (no data address available)\n");
3459 }
3460
3461 do_cleanups (old_chain);
3462 }
3463
3464 if (stepping_past_singlestep_breakpoint)
3465 {
3466 gdb_assert (singlestep_breakpoints_inserted_p);
3467 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3468 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3469
3470 stepping_past_singlestep_breakpoint = 0;
3471
3472 /* We've either finished single-stepping past the single-step
3473 breakpoint, or stopped for some other reason. It would be nice if
3474 we could tell, but we can't reliably. */
3475 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3476 {
3477 if (debug_infrun)
3478 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3479 /* Pull the single step breakpoints out of the target. */
3480 remove_single_step_breakpoints ();
3481 singlestep_breakpoints_inserted_p = 0;
3482
3483 ecs->random_signal = 0;
3484 ecs->event_thread->trap_expected = 0;
3485
3486 context_switch (saved_singlestep_ptid);
3487 if (deprecated_context_hook)
3488 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3489
3490 resume (1, TARGET_SIGNAL_0);
3491 prepare_to_wait (ecs);
3492 return;
3493 }
3494 }
3495
3496 if (!ptid_equal (deferred_step_ptid, null_ptid))
3497 {
3498 /* In non-stop mode, there's never a deferred_step_ptid set. */
3499 gdb_assert (!non_stop);
3500
3501 /* If we stopped for some other reason than single-stepping, ignore
3502 the fact that we were supposed to switch back. */
3503 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3504 {
3505 if (debug_infrun)
3506 fprintf_unfiltered (gdb_stdlog,
3507 "infrun: handling deferred step\n");
3508
3509 /* Pull the single step breakpoints out of the target. */
3510 if (singlestep_breakpoints_inserted_p)
3511 {
3512 remove_single_step_breakpoints ();
3513 singlestep_breakpoints_inserted_p = 0;
3514 }
3515
3516 /* Note: We do not call context_switch at this point, as the
3517 context is already set up for stepping the original thread. */
3518 switch_to_thread (deferred_step_ptid);
3519 deferred_step_ptid = null_ptid;
3520 /* Suppress spurious "Switching to ..." message. */
3521 previous_inferior_ptid = inferior_ptid;
3522
3523 resume (1, TARGET_SIGNAL_0);
3524 prepare_to_wait (ecs);
3525 return;
3526 }
3527
3528 deferred_step_ptid = null_ptid;
3529 }
3530
3531 /* See if a thread hit a thread-specific breakpoint that was meant for
3532 another thread. If so, then step that thread past the breakpoint,
3533 and continue it. */
3534
3535 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3536 {
3537 int thread_hop_needed = 0;
3538 struct address_space *aspace =
3539 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3540
3541 /* Check if a regular breakpoint has been hit before checking
3542 for a potential single step breakpoint. Otherwise, GDB will
3543 not see this breakpoint hit when stepping onto breakpoints. */
3544 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3545 {
3546 ecs->random_signal = 0;
3547 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3548 thread_hop_needed = 1;
3549 }
3550 else if (singlestep_breakpoints_inserted_p)
3551 {
3552 /* We have not context switched yet, so this should be true
3553 no matter which thread hit the singlestep breakpoint. */
3554 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3555 if (debug_infrun)
3556 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3557 "trap for %s\n",
3558 target_pid_to_str (ecs->ptid));
3559
3560 ecs->random_signal = 0;
3561 /* The call to in_thread_list is necessary because PTIDs sometimes
3562 change when we go from single-threaded to multi-threaded. If
3563 the singlestep_ptid is still in the list, assume that it is
3564 really different from ecs->ptid. */
3565 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3566 && in_thread_list (singlestep_ptid))
3567 {
3568 /* If the PC of the thread we were trying to single-step
3569 has changed, discard this event (which we were going
3570 to ignore anyway), and pretend we saw that thread
3571 trap. This prevents us continuously moving the
3572 single-step breakpoint forward, one instruction at a
3573 time. If the PC has changed, then the thread we were
3574 trying to single-step has trapped or been signalled,
3575 but the event has not been reported to GDB yet.
3576
3577 There might be some cases where this loses signal
3578 information, if a signal has arrived at exactly the
3579 same time that the PC changed, but this is the best
3580 we can do with the information available. Perhaps we
3581 should arrange to report all events for all threads
3582 when they stop, or to re-poll the remote looking for
3583 this particular thread (i.e. temporarily enable
3584 schedlock). */
3585
3586 CORE_ADDR new_singlestep_pc
3587 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3588
3589 if (new_singlestep_pc != singlestep_pc)
3590 {
3591 enum target_signal stop_signal;
3592
3593 if (debug_infrun)
3594 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3595 " but expected thread advanced also\n");
3596
3597 /* The current context still belongs to
3598 singlestep_ptid. Don't swap here, since that's
3599 the context we want to use. Just fudge our
3600 state and continue. */
3601 stop_signal = ecs->event_thread->stop_signal;
3602 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3603 ecs->ptid = singlestep_ptid;
3604 ecs->event_thread = find_thread_ptid (ecs->ptid);
3605 ecs->event_thread->stop_signal = stop_signal;
3606 stop_pc = new_singlestep_pc;
3607 }
3608 else
3609 {
3610 if (debug_infrun)
3611 fprintf_unfiltered (gdb_stdlog,
3612 "infrun: unexpected thread\n");
3613
3614 thread_hop_needed = 1;
3615 stepping_past_singlestep_breakpoint = 1;
3616 saved_singlestep_ptid = singlestep_ptid;
3617 }
3618 }
3619 }
3620
3621 if (thread_hop_needed)
3622 {
3623 struct regcache *thread_regcache;
3624 int remove_status = 0;
3625
3626 if (debug_infrun)
3627 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3628
3629 /* Switch context before touching inferior memory, the
3630 previous thread may have exited. */
3631 if (!ptid_equal (inferior_ptid, ecs->ptid))
3632 context_switch (ecs->ptid);
3633
3634 /* Saw a breakpoint, but it was hit by the wrong thread.
3635 Just continue. */
3636
3637 if (singlestep_breakpoints_inserted_p)
3638 {
3639 /* Pull the single step breakpoints out of the target. */
3640 remove_single_step_breakpoints ();
3641 singlestep_breakpoints_inserted_p = 0;
3642 }
3643
3644 /* If the arch can displace step, don't remove the
3645 breakpoints. */
3646 thread_regcache = get_thread_regcache (ecs->ptid);
3647 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3648 remove_status = remove_breakpoints ();
3649
3650 /* Did we fail to remove breakpoints? If so, try
3651 to set the PC past the bp. (There's at least
3652 one situation in which we can fail to remove
3653 the bp's: On HP-UX's that use ttrace, we can't
3654 change the address space of a vforking child
3655 process until the child exits (well, okay, not
3656 then either :-) or execs. */
3657 if (remove_status != 0)
3658 error (_("Cannot step over breakpoint hit in wrong thread"));
3659 else
3660 { /* Single step */
3661 if (!non_stop)
3662 {
3663 /* Only need to require the next event from this
3664 thread in all-stop mode. */
3665 waiton_ptid = ecs->ptid;
3666 infwait_state = infwait_thread_hop_state;
3667 }
3668
3669 ecs->event_thread->stepping_over_breakpoint = 1;
3670 keep_going (ecs);
3671 return;
3672 }
3673 }
3674 else if (singlestep_breakpoints_inserted_p)
3675 {
3676 sw_single_step_trap_p = 1;
3677 ecs->random_signal = 0;
3678 }
3679 }
3680 else
3681 ecs->random_signal = 1;
3682
3683 /* See if something interesting happened to the non-current thread. If
3684 so, then switch to that thread. */
3685 if (!ptid_equal (ecs->ptid, inferior_ptid))
3686 {
3687 if (debug_infrun)
3688 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3689
3690 context_switch (ecs->ptid);
3691
3692 if (deprecated_context_hook)
3693 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3694 }
3695
3696 /* At this point, get hold of the now-current thread's frame. */
3697 frame = get_current_frame ();
3698 gdbarch = get_frame_arch (frame);
3699
3700 if (singlestep_breakpoints_inserted_p)
3701 {
3702 /* Pull the single step breakpoints out of the target. */
3703 remove_single_step_breakpoints ();
3704 singlestep_breakpoints_inserted_p = 0;
3705 }
3706
3707 if (stepped_after_stopped_by_watchpoint)
3708 stopped_by_watchpoint = 0;
3709 else
3710 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3711
3712 /* If necessary, step over this watchpoint. We'll be back to display
3713 it in a moment. */
3714 if (stopped_by_watchpoint
3715 && (target_have_steppable_watchpoint
3716 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3717 {
3718 /* At this point, we are stopped at an instruction which has
3719 attempted to write to a piece of memory under control of
3720 a watchpoint. The instruction hasn't actually executed
3721 yet. If we were to evaluate the watchpoint expression
3722 now, we would get the old value, and therefore no change
3723 would seem to have occurred.
3724
3725 In order to make watchpoints work `right', we really need
3726 to complete the memory write, and then evaluate the
3727 watchpoint expression. We do this by single-stepping the
3728 target.
3729
3730 It may not be necessary to disable the watchpoint to stop over
3731 it. For example, the PA can (with some kernel cooperation)
3732 single step over a watchpoint without disabling the watchpoint.
3733
3734 It is far more common to need to disable a watchpoint to step
3735 the inferior over it. If we have non-steppable watchpoints,
3736 we must disable the current watchpoint; it's simplest to
3737 disable all watchpoints and breakpoints. */
3738 int hw_step = 1;
3739
3740 if (!target_have_steppable_watchpoint)
3741 remove_breakpoints ();
3742 /* Single step */
3743 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3744 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3745 waiton_ptid = ecs->ptid;
3746 if (target_have_steppable_watchpoint)
3747 infwait_state = infwait_step_watch_state;
3748 else
3749 infwait_state = infwait_nonstep_watch_state;
3750 prepare_to_wait (ecs);
3751 return;
3752 }
3753
3754 ecs->stop_func_start = 0;
3755 ecs->stop_func_end = 0;
3756 ecs->stop_func_name = 0;
3757 /* Don't care about return value; stop_func_start and stop_func_name
3758 will both be 0 if it doesn't work. */
3759 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3760 &ecs->stop_func_start, &ecs->stop_func_end);
3761 ecs->stop_func_start
3762 += gdbarch_deprecated_function_start_offset (gdbarch);
3763 ecs->event_thread->stepping_over_breakpoint = 0;
3764 bpstat_clear (&ecs->event_thread->stop_bpstat);
3765 ecs->event_thread->stop_step = 0;
3766 stop_print_frame = 1;
3767 ecs->random_signal = 0;
3768 stopped_by_random_signal = 0;
3769
3770 /* Hide inlined functions starting here, unless we just performed stepi or
3771 nexti. After stepi and nexti, always show the innermost frame (not any
3772 inline function call sites). */
3773 if (ecs->event_thread->step_range_end != 1)
3774 skip_inline_frames (ecs->ptid);
3775
3776 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3777 && ecs->event_thread->trap_expected
3778 && gdbarch_single_step_through_delay_p (gdbarch)
3779 && currently_stepping (ecs->event_thread))
3780 {
3781 /* We're trying to step off a breakpoint. Turns out that we're
3782 also on an instruction that needs to be stepped multiple
3783 times before it's been fully executing. E.g., architectures
3784 with a delay slot. It needs to be stepped twice, once for
3785 the instruction and once for the delay slot. */
3786 int step_through_delay
3787 = gdbarch_single_step_through_delay (gdbarch, frame);
3788
3789 if (debug_infrun && step_through_delay)
3790 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3791 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3792 {
3793 /* The user issued a continue when stopped at a breakpoint.
3794 Set up for another trap and get out of here. */
3795 ecs->event_thread->stepping_over_breakpoint = 1;
3796 keep_going (ecs);
3797 return;
3798 }
3799 else if (step_through_delay)
3800 {
3801 /* The user issued a step when stopped at a breakpoint.
3802 Maybe we should stop, maybe we should not - the delay
3803 slot *might* correspond to a line of source. In any
3804 case, don't decide that here, just set
3805 ecs->stepping_over_breakpoint, making sure we
3806 single-step again before breakpoints are re-inserted. */
3807 ecs->event_thread->stepping_over_breakpoint = 1;
3808 }
3809 }
3810
3811 /* Look at the cause of the stop, and decide what to do.
3812 The alternatives are:
3813 1) stop_stepping and return; to really stop and return to the debugger,
3814 2) keep_going and return to start up again
3815 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3816 3) set ecs->random_signal to 1, and the decision between 1 and 2
3817 will be made according to the signal handling tables. */
3818
3819 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3820 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3821 || stop_soon == STOP_QUIETLY_REMOTE)
3822 {
3823 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3824 {
3825 if (debug_infrun)
3826 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3827 stop_print_frame = 0;
3828 stop_stepping (ecs);
3829 return;
3830 }
3831
3832 /* This is originated from start_remote(), start_inferior() and
3833 shared libraries hook functions. */
3834 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3835 {
3836 if (debug_infrun)
3837 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3838 stop_stepping (ecs);
3839 return;
3840 }
3841
3842 /* This originates from attach_command(). We need to overwrite
3843 the stop_signal here, because some kernels don't ignore a
3844 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3845 See more comments in inferior.h. On the other hand, if we
3846 get a non-SIGSTOP, report it to the user - assume the backend
3847 will handle the SIGSTOP if it should show up later.
3848
3849 Also consider that the attach is complete when we see a
3850 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3851 target extended-remote report it instead of a SIGSTOP
3852 (e.g. gdbserver). We already rely on SIGTRAP being our
3853 signal, so this is no exception.
3854
3855 Also consider that the attach is complete when we see a
3856 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3857 the target to stop all threads of the inferior, in case the
3858 low level attach operation doesn't stop them implicitly. If
3859 they weren't stopped implicitly, then the stub will report a
3860 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3861 other than GDB's request. */
3862 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3863 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3864 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3865 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3866 {
3867 stop_stepping (ecs);
3868 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3869 return;
3870 }
3871
3872 /* See if there is a breakpoint at the current PC. */
3873 ecs->event_thread->stop_bpstat
3874 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3875 stop_pc, ecs->ptid);
3876
3877 /* Following in case break condition called a
3878 function. */
3879 stop_print_frame = 1;
3880
3881 /* This is where we handle "moribund" watchpoints. Unlike
3882 software breakpoints traps, hardware watchpoint traps are
3883 always distinguishable from random traps. If no high-level
3884 watchpoint is associated with the reported stop data address
3885 anymore, then the bpstat does not explain the signal ---
3886 simply make sure to ignore it if `stopped_by_watchpoint' is
3887 set. */
3888
3889 if (debug_infrun
3890 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3891 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3892 && stopped_by_watchpoint)
3893 fprintf_unfiltered (gdb_stdlog, "\
3894 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3895
3896 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3897 at one stage in the past included checks for an inferior
3898 function call's call dummy's return breakpoint. The original
3899 comment, that went with the test, read:
3900
3901 ``End of a stack dummy. Some systems (e.g. Sony news) give
3902 another signal besides SIGTRAP, so check here as well as
3903 above.''
3904
3905 If someone ever tries to get call dummys on a
3906 non-executable stack to work (where the target would stop
3907 with something like a SIGSEGV), then those tests might need
3908 to be re-instated. Given, however, that the tests were only
3909 enabled when momentary breakpoints were not being used, I
3910 suspect that it won't be the case.
3911
3912 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3913 be necessary for call dummies on a non-executable stack on
3914 SPARC. */
3915
3916 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3917 ecs->random_signal
3918 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3919 || stopped_by_watchpoint
3920 || ecs->event_thread->trap_expected
3921 || (ecs->event_thread->step_range_end
3922 && ecs->event_thread->step_resume_breakpoint == NULL));
3923 else
3924 {
3925 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3926 if (!ecs->random_signal)
3927 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3928 }
3929 }
3930
3931 /* When we reach this point, we've pretty much decided
3932 that the reason for stopping must've been a random
3933 (unexpected) signal. */
3934
3935 else
3936 ecs->random_signal = 1;
3937
3938 process_event_stop_test:
3939
3940 /* Re-fetch current thread's frame in case we did a
3941 "goto process_event_stop_test" above. */
3942 frame = get_current_frame ();
3943 gdbarch = get_frame_arch (frame);
3944
3945 /* For the program's own signals, act according to
3946 the signal handling tables. */
3947
3948 if (ecs->random_signal)
3949 {
3950 /* Signal not for debugging purposes. */
3951 int printed = 0;
3952 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3953
3954 if (debug_infrun)
3955 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3956 ecs->event_thread->stop_signal);
3957
3958 stopped_by_random_signal = 1;
3959
3960 if (signal_print[ecs->event_thread->stop_signal])
3961 {
3962 printed = 1;
3963 target_terminal_ours_for_output ();
3964 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3965 }
3966 /* Always stop on signals if we're either just gaining control
3967 of the program, or the user explicitly requested this thread
3968 to remain stopped. */
3969 if (stop_soon != NO_STOP_QUIETLY
3970 || ecs->event_thread->stop_requested
3971 || (!inf->detaching
3972 && signal_stop_state (ecs->event_thread->stop_signal)))
3973 {
3974 stop_stepping (ecs);
3975 return;
3976 }
3977 /* If not going to stop, give terminal back
3978 if we took it away. */
3979 else if (printed)
3980 target_terminal_inferior ();
3981
3982 /* Clear the signal if it should not be passed. */
3983 if (signal_program[ecs->event_thread->stop_signal] == 0)
3984 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3985
3986 if (ecs->event_thread->prev_pc == stop_pc
3987 && ecs->event_thread->trap_expected
3988 && ecs->event_thread->step_resume_breakpoint == NULL)
3989 {
3990 /* We were just starting a new sequence, attempting to
3991 single-step off of a breakpoint and expecting a SIGTRAP.
3992 Instead this signal arrives. This signal will take us out
3993 of the stepping range so GDB needs to remember to, when
3994 the signal handler returns, resume stepping off that
3995 breakpoint. */
3996 /* To simplify things, "continue" is forced to use the same
3997 code paths as single-step - set a breakpoint at the
3998 signal return address and then, once hit, step off that
3999 breakpoint. */
4000 if (debug_infrun)
4001 fprintf_unfiltered (gdb_stdlog,
4002 "infrun: signal arrived while stepping over "
4003 "breakpoint\n");
4004
4005 insert_step_resume_breakpoint_at_frame (frame);
4006 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4007 keep_going (ecs);
4008 return;
4009 }
4010
4011 if (ecs->event_thread->step_range_end != 0
4012 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
4013 && (ecs->event_thread->step_range_start <= stop_pc
4014 && stop_pc < ecs->event_thread->step_range_end)
4015 && frame_id_eq (get_stack_frame_id (frame),
4016 ecs->event_thread->step_stack_frame_id)
4017 && ecs->event_thread->step_resume_breakpoint == NULL)
4018 {
4019 /* The inferior is about to take a signal that will take it
4020 out of the single step range. Set a breakpoint at the
4021 current PC (which is presumably where the signal handler
4022 will eventually return) and then allow the inferior to
4023 run free.
4024
4025 Note that this is only needed for a signal delivered
4026 while in the single-step range. Nested signals aren't a
4027 problem as they eventually all return. */
4028 if (debug_infrun)
4029 fprintf_unfiltered (gdb_stdlog,
4030 "infrun: signal may take us out of "
4031 "single-step range\n");
4032
4033 insert_step_resume_breakpoint_at_frame (frame);
4034 keep_going (ecs);
4035 return;
4036 }
4037
4038 /* Note: step_resume_breakpoint may be non-NULL. This occures
4039 when either there's a nested signal, or when there's a
4040 pending signal enabled just as the signal handler returns
4041 (leaving the inferior at the step-resume-breakpoint without
4042 actually executing it). Either way continue until the
4043 breakpoint is really hit. */
4044 keep_going (ecs);
4045 return;
4046 }
4047
4048 /* Handle cases caused by hitting a breakpoint. */
4049 {
4050 CORE_ADDR jmp_buf_pc;
4051 struct bpstat_what what;
4052
4053 what = bpstat_what (ecs->event_thread->stop_bpstat);
4054
4055 if (what.call_dummy)
4056 {
4057 stop_stack_dummy = what.call_dummy;
4058 }
4059
4060 switch (what.main_action)
4061 {
4062 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4063 /* If we hit the breakpoint at longjmp while stepping, we
4064 install a momentary breakpoint at the target of the
4065 jmp_buf. */
4066
4067 if (debug_infrun)
4068 fprintf_unfiltered (gdb_stdlog,
4069 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4070
4071 ecs->event_thread->stepping_over_breakpoint = 1;
4072
4073 if (!gdbarch_get_longjmp_target_p (gdbarch)
4074 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
4075 {
4076 if (debug_infrun)
4077 fprintf_unfiltered (gdb_stdlog, "\
4078 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4079 keep_going (ecs);
4080 return;
4081 }
4082
4083 /* We're going to replace the current step-resume breakpoint
4084 with a longjmp-resume breakpoint. */
4085 delete_step_resume_breakpoint (ecs->event_thread);
4086
4087 /* Insert a breakpoint at resume address. */
4088 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4089
4090 keep_going (ecs);
4091 return;
4092
4093 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4094 if (debug_infrun)
4095 fprintf_unfiltered (gdb_stdlog,
4096 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4097
4098 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4099 delete_step_resume_breakpoint (ecs->event_thread);
4100
4101 ecs->event_thread->stop_step = 1;
4102 print_stop_reason (END_STEPPING_RANGE, 0);
4103 stop_stepping (ecs);
4104 return;
4105
4106 case BPSTAT_WHAT_SINGLE:
4107 if (debug_infrun)
4108 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4109 ecs->event_thread->stepping_over_breakpoint = 1;
4110 /* Still need to check other stuff, at least the case
4111 where we are stepping and step out of the right range. */
4112 break;
4113
4114 case BPSTAT_WHAT_STOP_NOISY:
4115 if (debug_infrun)
4116 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4117 stop_print_frame = 1;
4118
4119 /* We are about to nuke the step_resume_breakpointt via the
4120 cleanup chain, so no need to worry about it here. */
4121
4122 stop_stepping (ecs);
4123 return;
4124
4125 case BPSTAT_WHAT_STOP_SILENT:
4126 if (debug_infrun)
4127 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4128 stop_print_frame = 0;
4129
4130 /* We are about to nuke the step_resume_breakpoin via the
4131 cleanup chain, so no need to worry about it here. */
4132
4133 stop_stepping (ecs);
4134 return;
4135
4136 case BPSTAT_WHAT_STEP_RESUME:
4137 if (debug_infrun)
4138 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4139
4140 delete_step_resume_breakpoint (ecs->event_thread);
4141 if (ecs->event_thread->step_after_step_resume_breakpoint)
4142 {
4143 /* Back when the step-resume breakpoint was inserted, we
4144 were trying to single-step off a breakpoint. Go back
4145 to doing that. */
4146 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4147 ecs->event_thread->stepping_over_breakpoint = 1;
4148 keep_going (ecs);
4149 return;
4150 }
4151 if (stop_pc == ecs->stop_func_start
4152 && execution_direction == EXEC_REVERSE)
4153 {
4154 /* We are stepping over a function call in reverse, and
4155 just hit the step-resume breakpoint at the start
4156 address of the function. Go back to single-stepping,
4157 which should take us back to the function call. */
4158 ecs->event_thread->stepping_over_breakpoint = 1;
4159 keep_going (ecs);
4160 return;
4161 }
4162 break;
4163
4164 case BPSTAT_WHAT_CHECK_SHLIBS:
4165 {
4166 if (debug_infrun)
4167 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
4168
4169 /* Check for any newly added shared libraries if we're
4170 supposed to be adding them automatically. Switch
4171 terminal for any messages produced by
4172 breakpoint_re_set. */
4173 target_terminal_ours_for_output ();
4174 /* NOTE: cagney/2003-11-25: Make certain that the target
4175 stack's section table is kept up-to-date. Architectures,
4176 (e.g., PPC64), use the section table to perform
4177 operations such as address => section name and hence
4178 require the table to contain all sections (including
4179 those found in shared libraries). */
4180 #ifdef SOLIB_ADD
4181 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
4182 #else
4183 solib_add (NULL, 0, &current_target, auto_solib_add);
4184 #endif
4185 target_terminal_inferior ();
4186
4187 /* If requested, stop when the dynamic linker notifies
4188 gdb of events. This allows the user to get control
4189 and place breakpoints in initializer routines for
4190 dynamically loaded objects (among other things). */
4191 if (stop_on_solib_events || stop_stack_dummy)
4192 {
4193 stop_stepping (ecs);
4194 return;
4195 }
4196 else
4197 {
4198 /* We want to step over this breakpoint, then keep going. */
4199 ecs->event_thread->stepping_over_breakpoint = 1;
4200 break;
4201 }
4202 }
4203 break;
4204
4205 case BPSTAT_WHAT_CHECK_JIT:
4206 if (debug_infrun)
4207 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
4208
4209 /* Switch terminal for any messages produced by breakpoint_re_set. */
4210 target_terminal_ours_for_output ();
4211
4212 jit_event_handler (gdbarch);
4213
4214 target_terminal_inferior ();
4215
4216 /* We want to step over this breakpoint, then keep going. */
4217 ecs->event_thread->stepping_over_breakpoint = 1;
4218
4219 break;
4220
4221 case BPSTAT_WHAT_LAST:
4222 /* Not a real code, but listed here to shut up gcc -Wall. */
4223
4224 case BPSTAT_WHAT_KEEP_CHECKING:
4225 break;
4226 }
4227 }
4228
4229 /* We come here if we hit a breakpoint but should not
4230 stop for it. Possibly we also were stepping
4231 and should stop for that. So fall through and
4232 test for stepping. But, if not stepping,
4233 do not stop. */
4234
4235 /* In all-stop mode, if we're currently stepping but have stopped in
4236 some other thread, we need to switch back to the stepped thread. */
4237 if (!non_stop)
4238 {
4239 struct thread_info *tp;
4240
4241 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4242 ecs->event_thread);
4243 if (tp)
4244 {
4245 /* However, if the current thread is blocked on some internal
4246 breakpoint, and we simply need to step over that breakpoint
4247 to get it going again, do that first. */
4248 if ((ecs->event_thread->trap_expected
4249 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4250 || ecs->event_thread->stepping_over_breakpoint)
4251 {
4252 keep_going (ecs);
4253 return;
4254 }
4255
4256 /* If the stepping thread exited, then don't try to switch
4257 back and resume it, which could fail in several different
4258 ways depending on the target. Instead, just keep going.
4259
4260 We can find a stepping dead thread in the thread list in
4261 two cases:
4262
4263 - The target supports thread exit events, and when the
4264 target tries to delete the thread from the thread list,
4265 inferior_ptid pointed at the exiting thread. In such
4266 case, calling delete_thread does not really remove the
4267 thread from the list; instead, the thread is left listed,
4268 with 'exited' state.
4269
4270 - The target's debug interface does not support thread
4271 exit events, and so we have no idea whatsoever if the
4272 previously stepping thread is still alive. For that
4273 reason, we need to synchronously query the target
4274 now. */
4275 if (is_exited (tp->ptid)
4276 || !target_thread_alive (tp->ptid))
4277 {
4278 if (debug_infrun)
4279 fprintf_unfiltered (gdb_stdlog, "\
4280 infrun: not switching back to stepped thread, it has vanished\n");
4281
4282 delete_thread (tp->ptid);
4283 keep_going (ecs);
4284 return;
4285 }
4286
4287 /* Otherwise, we no longer expect a trap in the current thread.
4288 Clear the trap_expected flag before switching back -- this is
4289 what keep_going would do as well, if we called it. */
4290 ecs->event_thread->trap_expected = 0;
4291
4292 if (debug_infrun)
4293 fprintf_unfiltered (gdb_stdlog,
4294 "infrun: switching back to stepped thread\n");
4295
4296 ecs->event_thread = tp;
4297 ecs->ptid = tp->ptid;
4298 context_switch (ecs->ptid);
4299 keep_going (ecs);
4300 return;
4301 }
4302 }
4303
4304 /* Are we stepping to get the inferior out of the dynamic linker's
4305 hook (and possibly the dld itself) after catching a shlib
4306 event? */
4307 if (ecs->event_thread->stepping_through_solib_after_catch)
4308 {
4309 #if defined(SOLIB_ADD)
4310 /* Have we reached our destination? If not, keep going. */
4311 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4312 {
4313 if (debug_infrun)
4314 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4315 ecs->event_thread->stepping_over_breakpoint = 1;
4316 keep_going (ecs);
4317 return;
4318 }
4319 #endif
4320 if (debug_infrun)
4321 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4322 /* Else, stop and report the catchpoint(s) whose triggering
4323 caused us to begin stepping. */
4324 ecs->event_thread->stepping_through_solib_after_catch = 0;
4325 bpstat_clear (&ecs->event_thread->stop_bpstat);
4326 ecs->event_thread->stop_bpstat
4327 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4328 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4329 stop_print_frame = 1;
4330 stop_stepping (ecs);
4331 return;
4332 }
4333
4334 if (ecs->event_thread->step_resume_breakpoint)
4335 {
4336 if (debug_infrun)
4337 fprintf_unfiltered (gdb_stdlog,
4338 "infrun: step-resume breakpoint is inserted\n");
4339
4340 /* Having a step-resume breakpoint overrides anything
4341 else having to do with stepping commands until
4342 that breakpoint is reached. */
4343 keep_going (ecs);
4344 return;
4345 }
4346
4347 if (ecs->event_thread->step_range_end == 0)
4348 {
4349 if (debug_infrun)
4350 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4351 /* Likewise if we aren't even stepping. */
4352 keep_going (ecs);
4353 return;
4354 }
4355
4356 /* Re-fetch current thread's frame in case the code above caused
4357 the frame cache to be re-initialized, making our FRAME variable
4358 a dangling pointer. */
4359 frame = get_current_frame ();
4360
4361 /* If stepping through a line, keep going if still within it.
4362
4363 Note that step_range_end is the address of the first instruction
4364 beyond the step range, and NOT the address of the last instruction
4365 within it!
4366
4367 Note also that during reverse execution, we may be stepping
4368 through a function epilogue and therefore must detect when
4369 the current-frame changes in the middle of a line. */
4370
4371 if (stop_pc >= ecs->event_thread->step_range_start
4372 && stop_pc < ecs->event_thread->step_range_end
4373 && (execution_direction != EXEC_REVERSE
4374 || frame_id_eq (get_frame_id (frame),
4375 ecs->event_thread->step_frame_id)))
4376 {
4377 if (debug_infrun)
4378 fprintf_unfiltered
4379 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4380 paddress (gdbarch, ecs->event_thread->step_range_start),
4381 paddress (gdbarch, ecs->event_thread->step_range_end));
4382
4383 /* When stepping backward, stop at beginning of line range
4384 (unless it's the function entry point, in which case
4385 keep going back to the call point). */
4386 if (stop_pc == ecs->event_thread->step_range_start
4387 && stop_pc != ecs->stop_func_start
4388 && execution_direction == EXEC_REVERSE)
4389 {
4390 ecs->event_thread->stop_step = 1;
4391 print_stop_reason (END_STEPPING_RANGE, 0);
4392 stop_stepping (ecs);
4393 }
4394 else
4395 keep_going (ecs);
4396
4397 return;
4398 }
4399
4400 /* We stepped out of the stepping range. */
4401
4402 /* If we are stepping at the source level and entered the runtime
4403 loader dynamic symbol resolution code...
4404
4405 EXEC_FORWARD: we keep on single stepping until we exit the run
4406 time loader code and reach the callee's address.
4407
4408 EXEC_REVERSE: we've already executed the callee (backward), and
4409 the runtime loader code is handled just like any other
4410 undebuggable function call. Now we need only keep stepping
4411 backward through the trampoline code, and that's handled further
4412 down, so there is nothing for us to do here. */
4413
4414 if (execution_direction != EXEC_REVERSE
4415 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4416 && in_solib_dynsym_resolve_code (stop_pc))
4417 {
4418 CORE_ADDR pc_after_resolver =
4419 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4420
4421 if (debug_infrun)
4422 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4423
4424 if (pc_after_resolver)
4425 {
4426 /* Set up a step-resume breakpoint at the address
4427 indicated by SKIP_SOLIB_RESOLVER. */
4428 struct symtab_and_line sr_sal;
4429
4430 init_sal (&sr_sal);
4431 sr_sal.pc = pc_after_resolver;
4432 sr_sal.pspace = get_frame_program_space (frame);
4433
4434 insert_step_resume_breakpoint_at_sal (gdbarch,
4435 sr_sal, null_frame_id);
4436 }
4437
4438 keep_going (ecs);
4439 return;
4440 }
4441
4442 if (ecs->event_thread->step_range_end != 1
4443 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4444 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4445 && get_frame_type (frame) == SIGTRAMP_FRAME)
4446 {
4447 if (debug_infrun)
4448 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4449 /* The inferior, while doing a "step" or "next", has ended up in
4450 a signal trampoline (either by a signal being delivered or by
4451 the signal handler returning). Just single-step until the
4452 inferior leaves the trampoline (either by calling the handler
4453 or returning). */
4454 keep_going (ecs);
4455 return;
4456 }
4457
4458 /* Check for subroutine calls. The check for the current frame
4459 equalling the step ID is not necessary - the check of the
4460 previous frame's ID is sufficient - but it is a common case and
4461 cheaper than checking the previous frame's ID.
4462
4463 NOTE: frame_id_eq will never report two invalid frame IDs as
4464 being equal, so to get into this block, both the current and
4465 previous frame must have valid frame IDs. */
4466 /* The outer_frame_id check is a heuristic to detect stepping
4467 through startup code. If we step over an instruction which
4468 sets the stack pointer from an invalid value to a valid value,
4469 we may detect that as a subroutine call from the mythical
4470 "outermost" function. This could be fixed by marking
4471 outermost frames as !stack_p,code_p,special_p. Then the
4472 initial outermost frame, before sp was valid, would
4473 have code_addr == &_start. See the comment in frame_id_eq
4474 for more. */
4475 if (!frame_id_eq (get_stack_frame_id (frame),
4476 ecs->event_thread->step_stack_frame_id)
4477 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4478 ecs->event_thread->step_stack_frame_id)
4479 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4480 outer_frame_id)
4481 || step_start_function != find_pc_function (stop_pc))))
4482 {
4483 CORE_ADDR real_stop_pc;
4484
4485 if (debug_infrun)
4486 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4487
4488 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4489 || ((ecs->event_thread->step_range_end == 1)
4490 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4491 ecs->stop_func_start)))
4492 {
4493 /* I presume that step_over_calls is only 0 when we're
4494 supposed to be stepping at the assembly language level
4495 ("stepi"). Just stop. */
4496 /* Also, maybe we just did a "nexti" inside a prolog, so we
4497 thought it was a subroutine call but it was not. Stop as
4498 well. FENN */
4499 /* And this works the same backward as frontward. MVS */
4500 ecs->event_thread->stop_step = 1;
4501 print_stop_reason (END_STEPPING_RANGE, 0);
4502 stop_stepping (ecs);
4503 return;
4504 }
4505
4506 /* Reverse stepping through solib trampolines. */
4507
4508 if (execution_direction == EXEC_REVERSE
4509 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4510 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4511 || (ecs->stop_func_start == 0
4512 && in_solib_dynsym_resolve_code (stop_pc))))
4513 {
4514 /* Any solib trampoline code can be handled in reverse
4515 by simply continuing to single-step. We have already
4516 executed the solib function (backwards), and a few
4517 steps will take us back through the trampoline to the
4518 caller. */
4519 keep_going (ecs);
4520 return;
4521 }
4522
4523 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4524 {
4525 /* We're doing a "next".
4526
4527 Normal (forward) execution: set a breakpoint at the
4528 callee's return address (the address at which the caller
4529 will resume).
4530
4531 Reverse (backward) execution. set the step-resume
4532 breakpoint at the start of the function that we just
4533 stepped into (backwards), and continue to there. When we
4534 get there, we'll need to single-step back to the caller. */
4535
4536 if (execution_direction == EXEC_REVERSE)
4537 {
4538 struct symtab_and_line sr_sal;
4539
4540 /* Normal function call return (static or dynamic). */
4541 init_sal (&sr_sal);
4542 sr_sal.pc = ecs->stop_func_start;
4543 sr_sal.pspace = get_frame_program_space (frame);
4544 insert_step_resume_breakpoint_at_sal (gdbarch,
4545 sr_sal, null_frame_id);
4546 }
4547 else
4548 insert_step_resume_breakpoint_at_caller (frame);
4549
4550 keep_going (ecs);
4551 return;
4552 }
4553
4554 /* If we are in a function call trampoline (a stub between the
4555 calling routine and the real function), locate the real
4556 function. That's what tells us (a) whether we want to step
4557 into it at all, and (b) what prologue we want to run to the
4558 end of, if we do step into it. */
4559 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4560 if (real_stop_pc == 0)
4561 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4562 if (real_stop_pc != 0)
4563 ecs->stop_func_start = real_stop_pc;
4564
4565 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4566 {
4567 struct symtab_and_line sr_sal;
4568
4569 init_sal (&sr_sal);
4570 sr_sal.pc = ecs->stop_func_start;
4571 sr_sal.pspace = get_frame_program_space (frame);
4572
4573 insert_step_resume_breakpoint_at_sal (gdbarch,
4574 sr_sal, null_frame_id);
4575 keep_going (ecs);
4576 return;
4577 }
4578
4579 /* If we have line number information for the function we are
4580 thinking of stepping into, step into it.
4581
4582 If there are several symtabs at that PC (e.g. with include
4583 files), just want to know whether *any* of them have line
4584 numbers. find_pc_line handles this. */
4585 {
4586 struct symtab_and_line tmp_sal;
4587
4588 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4589 tmp_sal.pspace = get_frame_program_space (frame);
4590 if (tmp_sal.line != 0)
4591 {
4592 if (execution_direction == EXEC_REVERSE)
4593 handle_step_into_function_backward (gdbarch, ecs);
4594 else
4595 handle_step_into_function (gdbarch, ecs);
4596 return;
4597 }
4598 }
4599
4600 /* If we have no line number and the step-stop-if-no-debug is
4601 set, we stop the step so that the user has a chance to switch
4602 in assembly mode. */
4603 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4604 && step_stop_if_no_debug)
4605 {
4606 ecs->event_thread->stop_step = 1;
4607 print_stop_reason (END_STEPPING_RANGE, 0);
4608 stop_stepping (ecs);
4609 return;
4610 }
4611
4612 if (execution_direction == EXEC_REVERSE)
4613 {
4614 /* Set a breakpoint at callee's start address.
4615 From there we can step once and be back in the caller. */
4616 struct symtab_and_line sr_sal;
4617
4618 init_sal (&sr_sal);
4619 sr_sal.pc = ecs->stop_func_start;
4620 sr_sal.pspace = get_frame_program_space (frame);
4621 insert_step_resume_breakpoint_at_sal (gdbarch,
4622 sr_sal, null_frame_id);
4623 }
4624 else
4625 /* Set a breakpoint at callee's return address (the address
4626 at which the caller will resume). */
4627 insert_step_resume_breakpoint_at_caller (frame);
4628
4629 keep_going (ecs);
4630 return;
4631 }
4632
4633 /* Reverse stepping through solib trampolines. */
4634
4635 if (execution_direction == EXEC_REVERSE
4636 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4637 {
4638 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4639 || (ecs->stop_func_start == 0
4640 && in_solib_dynsym_resolve_code (stop_pc)))
4641 {
4642 /* Any solib trampoline code can be handled in reverse
4643 by simply continuing to single-step. We have already
4644 executed the solib function (backwards), and a few
4645 steps will take us back through the trampoline to the
4646 caller. */
4647 keep_going (ecs);
4648 return;
4649 }
4650 else if (in_solib_dynsym_resolve_code (stop_pc))
4651 {
4652 /* Stepped backward into the solib dynsym resolver.
4653 Set a breakpoint at its start and continue, then
4654 one more step will take us out. */
4655 struct symtab_and_line sr_sal;
4656
4657 init_sal (&sr_sal);
4658 sr_sal.pc = ecs->stop_func_start;
4659 sr_sal.pspace = get_frame_program_space (frame);
4660 insert_step_resume_breakpoint_at_sal (gdbarch,
4661 sr_sal, null_frame_id);
4662 keep_going (ecs);
4663 return;
4664 }
4665 }
4666
4667 /* If we're in the return path from a shared library trampoline,
4668 we want to proceed through the trampoline when stepping. */
4669 if (gdbarch_in_solib_return_trampoline (gdbarch,
4670 stop_pc, ecs->stop_func_name))
4671 {
4672 /* Determine where this trampoline returns. */
4673 CORE_ADDR real_stop_pc;
4674
4675 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4676
4677 if (debug_infrun)
4678 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4679
4680 /* Only proceed through if we know where it's going. */
4681 if (real_stop_pc)
4682 {
4683 /* And put the step-breakpoint there and go until there. */
4684 struct symtab_and_line sr_sal;
4685
4686 init_sal (&sr_sal); /* initialize to zeroes */
4687 sr_sal.pc = real_stop_pc;
4688 sr_sal.section = find_pc_overlay (sr_sal.pc);
4689 sr_sal.pspace = get_frame_program_space (frame);
4690
4691 /* Do not specify what the fp should be when we stop since
4692 on some machines the prologue is where the new fp value
4693 is established. */
4694 insert_step_resume_breakpoint_at_sal (gdbarch,
4695 sr_sal, null_frame_id);
4696
4697 /* Restart without fiddling with the step ranges or
4698 other state. */
4699 keep_going (ecs);
4700 return;
4701 }
4702 }
4703
4704 stop_pc_sal = find_pc_line (stop_pc, 0);
4705
4706 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4707 the trampoline processing logic, however, there are some trampolines
4708 that have no names, so we should do trampoline handling first. */
4709 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4710 && ecs->stop_func_name == NULL
4711 && stop_pc_sal.line == 0)
4712 {
4713 if (debug_infrun)
4714 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4715
4716 /* The inferior just stepped into, or returned to, an
4717 undebuggable function (where there is no debugging information
4718 and no line number corresponding to the address where the
4719 inferior stopped). Since we want to skip this kind of code,
4720 we keep going until the inferior returns from this
4721 function - unless the user has asked us not to (via
4722 set step-mode) or we no longer know how to get back
4723 to the call site. */
4724 if (step_stop_if_no_debug
4725 || !frame_id_p (frame_unwind_caller_id (frame)))
4726 {
4727 /* If we have no line number and the step-stop-if-no-debug
4728 is set, we stop the step so that the user has a chance to
4729 switch in assembly mode. */
4730 ecs->event_thread->stop_step = 1;
4731 print_stop_reason (END_STEPPING_RANGE, 0);
4732 stop_stepping (ecs);
4733 return;
4734 }
4735 else
4736 {
4737 /* Set a breakpoint at callee's return address (the address
4738 at which the caller will resume). */
4739 insert_step_resume_breakpoint_at_caller (frame);
4740 keep_going (ecs);
4741 return;
4742 }
4743 }
4744
4745 if (ecs->event_thread->step_range_end == 1)
4746 {
4747 /* It is stepi or nexti. We always want to stop stepping after
4748 one instruction. */
4749 if (debug_infrun)
4750 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4751 ecs->event_thread->stop_step = 1;
4752 print_stop_reason (END_STEPPING_RANGE, 0);
4753 stop_stepping (ecs);
4754 return;
4755 }
4756
4757 if (stop_pc_sal.line == 0)
4758 {
4759 /* We have no line number information. That means to stop
4760 stepping (does this always happen right after one instruction,
4761 when we do "s" in a function with no line numbers,
4762 or can this happen as a result of a return or longjmp?). */
4763 if (debug_infrun)
4764 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4765 ecs->event_thread->stop_step = 1;
4766 print_stop_reason (END_STEPPING_RANGE, 0);
4767 stop_stepping (ecs);
4768 return;
4769 }
4770
4771 /* Look for "calls" to inlined functions, part one. If the inline
4772 frame machinery detected some skipped call sites, we have entered
4773 a new inline function. */
4774
4775 if (frame_id_eq (get_frame_id (get_current_frame ()),
4776 ecs->event_thread->step_frame_id)
4777 && inline_skipped_frames (ecs->ptid))
4778 {
4779 struct symtab_and_line call_sal;
4780
4781 if (debug_infrun)
4782 fprintf_unfiltered (gdb_stdlog,
4783 "infrun: stepped into inlined function\n");
4784
4785 find_frame_sal (get_current_frame (), &call_sal);
4786
4787 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4788 {
4789 /* For "step", we're going to stop. But if the call site
4790 for this inlined function is on the same source line as
4791 we were previously stepping, go down into the function
4792 first. Otherwise stop at the call site. */
4793
4794 if (call_sal.line == ecs->event_thread->current_line
4795 && call_sal.symtab == ecs->event_thread->current_symtab)
4796 step_into_inline_frame (ecs->ptid);
4797
4798 ecs->event_thread->stop_step = 1;
4799 print_stop_reason (END_STEPPING_RANGE, 0);
4800 stop_stepping (ecs);
4801 return;
4802 }
4803 else
4804 {
4805 /* For "next", we should stop at the call site if it is on a
4806 different source line. Otherwise continue through the
4807 inlined function. */
4808 if (call_sal.line == ecs->event_thread->current_line
4809 && call_sal.symtab == ecs->event_thread->current_symtab)
4810 keep_going (ecs);
4811 else
4812 {
4813 ecs->event_thread->stop_step = 1;
4814 print_stop_reason (END_STEPPING_RANGE, 0);
4815 stop_stepping (ecs);
4816 }
4817 return;
4818 }
4819 }
4820
4821 /* Look for "calls" to inlined functions, part two. If we are still
4822 in the same real function we were stepping through, but we have
4823 to go further up to find the exact frame ID, we are stepping
4824 through a more inlined call beyond its call site. */
4825
4826 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4827 && !frame_id_eq (get_frame_id (get_current_frame ()),
4828 ecs->event_thread->step_frame_id)
4829 && stepped_in_from (get_current_frame (),
4830 ecs->event_thread->step_frame_id))
4831 {
4832 if (debug_infrun)
4833 fprintf_unfiltered (gdb_stdlog,
4834 "infrun: stepping through inlined function\n");
4835
4836 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4837 keep_going (ecs);
4838 else
4839 {
4840 ecs->event_thread->stop_step = 1;
4841 print_stop_reason (END_STEPPING_RANGE, 0);
4842 stop_stepping (ecs);
4843 }
4844 return;
4845 }
4846
4847 if ((stop_pc == stop_pc_sal.pc)
4848 && (ecs->event_thread->current_line != stop_pc_sal.line
4849 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4850 {
4851 /* We are at the start of a different line. So stop. Note that
4852 we don't stop if we step into the middle of a different line.
4853 That is said to make things like for (;;) statements work
4854 better. */
4855 if (debug_infrun)
4856 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4857 ecs->event_thread->stop_step = 1;
4858 print_stop_reason (END_STEPPING_RANGE, 0);
4859 stop_stepping (ecs);
4860 return;
4861 }
4862
4863 /* We aren't done stepping.
4864
4865 Optimize by setting the stepping range to the line.
4866 (We might not be in the original line, but if we entered a
4867 new line in mid-statement, we continue stepping. This makes
4868 things like for(;;) statements work better.) */
4869
4870 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4871 ecs->event_thread->step_range_end = stop_pc_sal.end;
4872 set_step_info (frame, stop_pc_sal);
4873
4874 if (debug_infrun)
4875 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4876 keep_going (ecs);
4877 }
4878
4879 /* Is thread TP in the middle of single-stepping? */
4880
4881 static int
4882 currently_stepping (struct thread_info *tp)
4883 {
4884 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4885 || tp->trap_expected
4886 || tp->stepping_through_solib_after_catch
4887 || bpstat_should_step ());
4888 }
4889
4890 /* Returns true if any thread *but* the one passed in "data" is in the
4891 middle of stepping or of handling a "next". */
4892
4893 static int
4894 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4895 {
4896 if (tp == data)
4897 return 0;
4898
4899 return (tp->step_range_end
4900 || tp->trap_expected
4901 || tp->stepping_through_solib_after_catch);
4902 }
4903
4904 /* Inferior has stepped into a subroutine call with source code that
4905 we should not step over. Do step to the first line of code in
4906 it. */
4907
4908 static void
4909 handle_step_into_function (struct gdbarch *gdbarch,
4910 struct execution_control_state *ecs)
4911 {
4912 struct symtab *s;
4913 struct symtab_and_line stop_func_sal, sr_sal;
4914
4915 s = find_pc_symtab (stop_pc);
4916 if (s && s->language != language_asm)
4917 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4918 ecs->stop_func_start);
4919
4920 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4921 /* Use the step_resume_break to step until the end of the prologue,
4922 even if that involves jumps (as it seems to on the vax under
4923 4.2). */
4924 /* If the prologue ends in the middle of a source line, continue to
4925 the end of that source line (if it is still within the function).
4926 Otherwise, just go to end of prologue. */
4927 if (stop_func_sal.end
4928 && stop_func_sal.pc != ecs->stop_func_start
4929 && stop_func_sal.end < ecs->stop_func_end)
4930 ecs->stop_func_start = stop_func_sal.end;
4931
4932 /* Architectures which require breakpoint adjustment might not be able
4933 to place a breakpoint at the computed address. If so, the test
4934 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4935 ecs->stop_func_start to an address at which a breakpoint may be
4936 legitimately placed.
4937
4938 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4939 made, GDB will enter an infinite loop when stepping through
4940 optimized code consisting of VLIW instructions which contain
4941 subinstructions corresponding to different source lines. On
4942 FR-V, it's not permitted to place a breakpoint on any but the
4943 first subinstruction of a VLIW instruction. When a breakpoint is
4944 set, GDB will adjust the breakpoint address to the beginning of
4945 the VLIW instruction. Thus, we need to make the corresponding
4946 adjustment here when computing the stop address. */
4947
4948 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4949 {
4950 ecs->stop_func_start
4951 = gdbarch_adjust_breakpoint_address (gdbarch,
4952 ecs->stop_func_start);
4953 }
4954
4955 if (ecs->stop_func_start == stop_pc)
4956 {
4957 /* We are already there: stop now. */
4958 ecs->event_thread->stop_step = 1;
4959 print_stop_reason (END_STEPPING_RANGE, 0);
4960 stop_stepping (ecs);
4961 return;
4962 }
4963 else
4964 {
4965 /* Put the step-breakpoint there and go until there. */
4966 init_sal (&sr_sal); /* initialize to zeroes */
4967 sr_sal.pc = ecs->stop_func_start;
4968 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4969 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4970
4971 /* Do not specify what the fp should be when we stop since on
4972 some machines the prologue is where the new fp value is
4973 established. */
4974 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4975
4976 /* And make sure stepping stops right away then. */
4977 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4978 }
4979 keep_going (ecs);
4980 }
4981
4982 /* Inferior has stepped backward into a subroutine call with source
4983 code that we should not step over. Do step to the beginning of the
4984 last line of code in it. */
4985
4986 static void
4987 handle_step_into_function_backward (struct gdbarch *gdbarch,
4988 struct execution_control_state *ecs)
4989 {
4990 struct symtab *s;
4991 struct symtab_and_line stop_func_sal;
4992
4993 s = find_pc_symtab (stop_pc);
4994 if (s && s->language != language_asm)
4995 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4996 ecs->stop_func_start);
4997
4998 stop_func_sal = find_pc_line (stop_pc, 0);
4999
5000 /* OK, we're just going to keep stepping here. */
5001 if (stop_func_sal.pc == stop_pc)
5002 {
5003 /* We're there already. Just stop stepping now. */
5004 ecs->event_thread->stop_step = 1;
5005 print_stop_reason (END_STEPPING_RANGE, 0);
5006 stop_stepping (ecs);
5007 }
5008 else
5009 {
5010 /* Else just reset the step range and keep going.
5011 No step-resume breakpoint, they don't work for
5012 epilogues, which can have multiple entry paths. */
5013 ecs->event_thread->step_range_start = stop_func_sal.pc;
5014 ecs->event_thread->step_range_end = stop_func_sal.end;
5015 keep_going (ecs);
5016 }
5017 return;
5018 }
5019
5020 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5021 This is used to both functions and to skip over code. */
5022
5023 static void
5024 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5025 struct symtab_and_line sr_sal,
5026 struct frame_id sr_id)
5027 {
5028 /* There should never be more than one step-resume or longjmp-resume
5029 breakpoint per thread, so we should never be setting a new
5030 step_resume_breakpoint when one is already active. */
5031 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5032
5033 if (debug_infrun)
5034 fprintf_unfiltered (gdb_stdlog,
5035 "infrun: inserting step-resume breakpoint at %s\n",
5036 paddress (gdbarch, sr_sal.pc));
5037
5038 inferior_thread ()->step_resume_breakpoint
5039 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
5040 }
5041
5042 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
5043 to skip a potential signal handler.
5044
5045 This is called with the interrupted function's frame. The signal
5046 handler, when it returns, will resume the interrupted function at
5047 RETURN_FRAME.pc. */
5048
5049 static void
5050 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5051 {
5052 struct symtab_and_line sr_sal;
5053 struct gdbarch *gdbarch;
5054
5055 gdb_assert (return_frame != NULL);
5056 init_sal (&sr_sal); /* initialize to zeros */
5057
5058 gdbarch = get_frame_arch (return_frame);
5059 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5060 sr_sal.section = find_pc_overlay (sr_sal.pc);
5061 sr_sal.pspace = get_frame_program_space (return_frame);
5062
5063 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5064 get_stack_frame_id (return_frame));
5065 }
5066
5067 /* Similar to insert_step_resume_breakpoint_at_frame, except
5068 but a breakpoint at the previous frame's PC. This is used to
5069 skip a function after stepping into it (for "next" or if the called
5070 function has no debugging information).
5071
5072 The current function has almost always been reached by single
5073 stepping a call or return instruction. NEXT_FRAME belongs to the
5074 current function, and the breakpoint will be set at the caller's
5075 resume address.
5076
5077 This is a separate function rather than reusing
5078 insert_step_resume_breakpoint_at_frame in order to avoid
5079 get_prev_frame, which may stop prematurely (see the implementation
5080 of frame_unwind_caller_id for an example). */
5081
5082 static void
5083 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5084 {
5085 struct symtab_and_line sr_sal;
5086 struct gdbarch *gdbarch;
5087
5088 /* We shouldn't have gotten here if we don't know where the call site
5089 is. */
5090 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5091
5092 init_sal (&sr_sal); /* initialize to zeros */
5093
5094 gdbarch = frame_unwind_caller_arch (next_frame);
5095 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5096 frame_unwind_caller_pc (next_frame));
5097 sr_sal.section = find_pc_overlay (sr_sal.pc);
5098 sr_sal.pspace = frame_unwind_program_space (next_frame);
5099
5100 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5101 frame_unwind_caller_id (next_frame));
5102 }
5103
5104 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5105 new breakpoint at the target of a jmp_buf. The handling of
5106 longjmp-resume uses the same mechanisms used for handling
5107 "step-resume" breakpoints. */
5108
5109 static void
5110 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5111 {
5112 /* There should never be more than one step-resume or longjmp-resume
5113 breakpoint per thread, so we should never be setting a new
5114 longjmp_resume_breakpoint when one is already active. */
5115 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5116
5117 if (debug_infrun)
5118 fprintf_unfiltered (gdb_stdlog,
5119 "infrun: inserting longjmp-resume breakpoint at %s\n",
5120 paddress (gdbarch, pc));
5121
5122 inferior_thread ()->step_resume_breakpoint =
5123 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5124 }
5125
5126 static void
5127 stop_stepping (struct execution_control_state *ecs)
5128 {
5129 if (debug_infrun)
5130 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5131
5132 /* Let callers know we don't want to wait for the inferior anymore. */
5133 ecs->wait_some_more = 0;
5134 }
5135
5136 /* This function handles various cases where we need to continue
5137 waiting for the inferior. */
5138 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5139
5140 static void
5141 keep_going (struct execution_control_state *ecs)
5142 {
5143 /* Make sure normal_stop is called if we get a QUIT handled before
5144 reaching resume. */
5145 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5146
5147 /* Save the pc before execution, to compare with pc after stop. */
5148 ecs->event_thread->prev_pc
5149 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5150
5151 /* If we did not do break;, it means we should keep running the
5152 inferior and not return to debugger. */
5153
5154 if (ecs->event_thread->trap_expected
5155 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5156 {
5157 /* We took a signal (which we are supposed to pass through to
5158 the inferior, else we'd not get here) and we haven't yet
5159 gotten our trap. Simply continue. */
5160
5161 discard_cleanups (old_cleanups);
5162 resume (currently_stepping (ecs->event_thread),
5163 ecs->event_thread->stop_signal);
5164 }
5165 else
5166 {
5167 /* Either the trap was not expected, but we are continuing
5168 anyway (the user asked that this signal be passed to the
5169 child)
5170 -- or --
5171 The signal was SIGTRAP, e.g. it was our signal, but we
5172 decided we should resume from it.
5173
5174 We're going to run this baby now!
5175
5176 Note that insert_breakpoints won't try to re-insert
5177 already inserted breakpoints. Therefore, we don't
5178 care if breakpoints were already inserted, or not. */
5179
5180 if (ecs->event_thread->stepping_over_breakpoint)
5181 {
5182 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5183
5184 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5185 /* Since we can't do a displaced step, we have to remove
5186 the breakpoint while we step it. To keep things
5187 simple, we remove them all. */
5188 remove_breakpoints ();
5189 }
5190 else
5191 {
5192 struct gdb_exception e;
5193
5194 /* Stop stepping when inserting breakpoints
5195 has failed. */
5196 TRY_CATCH (e, RETURN_MASK_ERROR)
5197 {
5198 insert_breakpoints ();
5199 }
5200 if (e.reason < 0)
5201 {
5202 exception_print (gdb_stderr, e);
5203 stop_stepping (ecs);
5204 return;
5205 }
5206 }
5207
5208 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5209
5210 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5211 specifies that such a signal should be delivered to the
5212 target program).
5213
5214 Typically, this would occure when a user is debugging a
5215 target monitor on a simulator: the target monitor sets a
5216 breakpoint; the simulator encounters this break-point and
5217 halts the simulation handing control to GDB; GDB, noteing
5218 that the break-point isn't valid, returns control back to the
5219 simulator; the simulator then delivers the hardware
5220 equivalent of a SIGNAL_TRAP to the program being debugged. */
5221
5222 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5223 && !signal_program[ecs->event_thread->stop_signal])
5224 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5225
5226 discard_cleanups (old_cleanups);
5227 resume (currently_stepping (ecs->event_thread),
5228 ecs->event_thread->stop_signal);
5229 }
5230
5231 prepare_to_wait (ecs);
5232 }
5233
5234 /* This function normally comes after a resume, before
5235 handle_inferior_event exits. It takes care of any last bits of
5236 housekeeping, and sets the all-important wait_some_more flag. */
5237
5238 static void
5239 prepare_to_wait (struct execution_control_state *ecs)
5240 {
5241 if (debug_infrun)
5242 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5243
5244 /* This is the old end of the while loop. Let everybody know we
5245 want to wait for the inferior some more and get called again
5246 soon. */
5247 ecs->wait_some_more = 1;
5248 }
5249
5250 /* Print why the inferior has stopped. We always print something when
5251 the inferior exits, or receives a signal. The rest of the cases are
5252 dealt with later on in normal_stop() and print_it_typical(). Ideally
5253 there should be a call to this function from handle_inferior_event()
5254 each time stop_stepping() is called.*/
5255 static void
5256 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5257 {
5258 switch (stop_reason)
5259 {
5260 case END_STEPPING_RANGE:
5261 /* We are done with a step/next/si/ni command. */
5262 /* For now print nothing. */
5263 /* Print a message only if not in the middle of doing a "step n"
5264 operation for n > 1 */
5265 if (!inferior_thread ()->step_multi
5266 || !inferior_thread ()->stop_step)
5267 if (ui_out_is_mi_like_p (uiout))
5268 ui_out_field_string
5269 (uiout, "reason",
5270 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5271 break;
5272 case SIGNAL_EXITED:
5273 /* The inferior was terminated by a signal. */
5274 annotate_signalled ();
5275 if (ui_out_is_mi_like_p (uiout))
5276 ui_out_field_string
5277 (uiout, "reason",
5278 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5279 ui_out_text (uiout, "\nProgram terminated with signal ");
5280 annotate_signal_name ();
5281 ui_out_field_string (uiout, "signal-name",
5282 target_signal_to_name (stop_info));
5283 annotate_signal_name_end ();
5284 ui_out_text (uiout, ", ");
5285 annotate_signal_string ();
5286 ui_out_field_string (uiout, "signal-meaning",
5287 target_signal_to_string (stop_info));
5288 annotate_signal_string_end ();
5289 ui_out_text (uiout, ".\n");
5290 ui_out_text (uiout, "The program no longer exists.\n");
5291 break;
5292 case EXITED:
5293 /* The inferior program is finished. */
5294 annotate_exited (stop_info);
5295 if (stop_info)
5296 {
5297 if (ui_out_is_mi_like_p (uiout))
5298 ui_out_field_string (uiout, "reason",
5299 async_reason_lookup (EXEC_ASYNC_EXITED));
5300 ui_out_text (uiout, "\nProgram exited with code ");
5301 ui_out_field_fmt (uiout, "exit-code", "0%o",
5302 (unsigned int) stop_info);
5303 ui_out_text (uiout, ".\n");
5304 }
5305 else
5306 {
5307 if (ui_out_is_mi_like_p (uiout))
5308 ui_out_field_string
5309 (uiout, "reason",
5310 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5311 ui_out_text (uiout, "\nProgram exited normally.\n");
5312 }
5313 /* Support the --return-child-result option. */
5314 return_child_result_value = stop_info;
5315 break;
5316 case SIGNAL_RECEIVED:
5317 /* Signal received. The signal table tells us to print about
5318 it. */
5319 annotate_signal ();
5320
5321 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5322 {
5323 struct thread_info *t = inferior_thread ();
5324
5325 ui_out_text (uiout, "\n[");
5326 ui_out_field_string (uiout, "thread-name",
5327 target_pid_to_str (t->ptid));
5328 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5329 ui_out_text (uiout, " stopped");
5330 }
5331 else
5332 {
5333 ui_out_text (uiout, "\nProgram received signal ");
5334 annotate_signal_name ();
5335 if (ui_out_is_mi_like_p (uiout))
5336 ui_out_field_string
5337 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5338 ui_out_field_string (uiout, "signal-name",
5339 target_signal_to_name (stop_info));
5340 annotate_signal_name_end ();
5341 ui_out_text (uiout, ", ");
5342 annotate_signal_string ();
5343 ui_out_field_string (uiout, "signal-meaning",
5344 target_signal_to_string (stop_info));
5345 annotate_signal_string_end ();
5346 }
5347 ui_out_text (uiout, ".\n");
5348 break;
5349 case NO_HISTORY:
5350 /* Reverse execution: target ran out of history info. */
5351 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5352 break;
5353 default:
5354 internal_error (__FILE__, __LINE__,
5355 _("print_stop_reason: unrecognized enum value"));
5356 break;
5357 }
5358 }
5359 \f
5360
5361 /* Here to return control to GDB when the inferior stops for real.
5362 Print appropriate messages, remove breakpoints, give terminal our modes.
5363
5364 STOP_PRINT_FRAME nonzero means print the executing frame
5365 (pc, function, args, file, line number and line text).
5366 BREAKPOINTS_FAILED nonzero means stop was due to error
5367 attempting to insert breakpoints. */
5368
5369 void
5370 normal_stop (void)
5371 {
5372 struct target_waitstatus last;
5373 ptid_t last_ptid;
5374 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5375
5376 get_last_target_status (&last_ptid, &last);
5377
5378 /* If an exception is thrown from this point on, make sure to
5379 propagate GDB's knowledge of the executing state to the
5380 frontend/user running state. A QUIT is an easy exception to see
5381 here, so do this before any filtered output. */
5382 if (!non_stop)
5383 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5384 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5385 && last.kind != TARGET_WAITKIND_EXITED)
5386 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5387
5388 /* In non-stop mode, we don't want GDB to switch threads behind the
5389 user's back, to avoid races where the user is typing a command to
5390 apply to thread x, but GDB switches to thread y before the user
5391 finishes entering the command. */
5392
5393 /* As with the notification of thread events, we want to delay
5394 notifying the user that we've switched thread context until
5395 the inferior actually stops.
5396
5397 There's no point in saying anything if the inferior has exited.
5398 Note that SIGNALLED here means "exited with a signal", not
5399 "received a signal". */
5400 if (!non_stop
5401 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5402 && target_has_execution
5403 && last.kind != TARGET_WAITKIND_SIGNALLED
5404 && last.kind != TARGET_WAITKIND_EXITED)
5405 {
5406 target_terminal_ours_for_output ();
5407 printf_filtered (_("[Switching to %s]\n"),
5408 target_pid_to_str (inferior_ptid));
5409 annotate_thread_changed ();
5410 previous_inferior_ptid = inferior_ptid;
5411 }
5412
5413 if (!breakpoints_always_inserted_mode () && target_has_execution)
5414 {
5415 if (remove_breakpoints ())
5416 {
5417 target_terminal_ours_for_output ();
5418 printf_filtered (_("\
5419 Cannot remove breakpoints because program is no longer writable.\n\
5420 Further execution is probably impossible.\n"));
5421 }
5422 }
5423
5424 /* If an auto-display called a function and that got a signal,
5425 delete that auto-display to avoid an infinite recursion. */
5426
5427 if (stopped_by_random_signal)
5428 disable_current_display ();
5429
5430 /* Don't print a message if in the middle of doing a "step n"
5431 operation for n > 1 */
5432 if (target_has_execution
5433 && last.kind != TARGET_WAITKIND_SIGNALLED
5434 && last.kind != TARGET_WAITKIND_EXITED
5435 && inferior_thread ()->step_multi
5436 && inferior_thread ()->stop_step)
5437 goto done;
5438
5439 target_terminal_ours ();
5440
5441 /* Set the current source location. This will also happen if we
5442 display the frame below, but the current SAL will be incorrect
5443 during a user hook-stop function. */
5444 if (has_stack_frames () && !stop_stack_dummy)
5445 set_current_sal_from_frame (get_current_frame (), 1);
5446
5447 /* Let the user/frontend see the threads as stopped. */
5448 do_cleanups (old_chain);
5449
5450 /* Look up the hook_stop and run it (CLI internally handles problem
5451 of stop_command's pre-hook not existing). */
5452 if (stop_command)
5453 catch_errors (hook_stop_stub, stop_command,
5454 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5455
5456 if (!has_stack_frames ())
5457 goto done;
5458
5459 if (last.kind == TARGET_WAITKIND_SIGNALLED
5460 || last.kind == TARGET_WAITKIND_EXITED)
5461 goto done;
5462
5463 /* Select innermost stack frame - i.e., current frame is frame 0,
5464 and current location is based on that.
5465 Don't do this on return from a stack dummy routine,
5466 or if the program has exited. */
5467
5468 if (!stop_stack_dummy)
5469 {
5470 select_frame (get_current_frame ());
5471
5472 /* Print current location without a level number, if
5473 we have changed functions or hit a breakpoint.
5474 Print source line if we have one.
5475 bpstat_print() contains the logic deciding in detail
5476 what to print, based on the event(s) that just occurred. */
5477
5478 /* If --batch-silent is enabled then there's no need to print the current
5479 source location, and to try risks causing an error message about
5480 missing source files. */
5481 if (stop_print_frame && !batch_silent)
5482 {
5483 int bpstat_ret;
5484 int source_flag;
5485 int do_frame_printing = 1;
5486 struct thread_info *tp = inferior_thread ();
5487
5488 bpstat_ret = bpstat_print (tp->stop_bpstat);
5489 switch (bpstat_ret)
5490 {
5491 case PRINT_UNKNOWN:
5492 /* If we had hit a shared library event breakpoint,
5493 bpstat_print would print out this message. If we hit
5494 an OS-level shared library event, do the same
5495 thing. */
5496 if (last.kind == TARGET_WAITKIND_LOADED)
5497 {
5498 printf_filtered (_("Stopped due to shared library event\n"));
5499 source_flag = SRC_LINE; /* something bogus */
5500 do_frame_printing = 0;
5501 break;
5502 }
5503
5504 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5505 (or should) carry around the function and does (or
5506 should) use that when doing a frame comparison. */
5507 if (tp->stop_step
5508 && frame_id_eq (tp->step_frame_id,
5509 get_frame_id (get_current_frame ()))
5510 && step_start_function == find_pc_function (stop_pc))
5511 source_flag = SRC_LINE; /* finished step, just print source line */
5512 else
5513 source_flag = SRC_AND_LOC; /* print location and source line */
5514 break;
5515 case PRINT_SRC_AND_LOC:
5516 source_flag = SRC_AND_LOC; /* print location and source line */
5517 break;
5518 case PRINT_SRC_ONLY:
5519 source_flag = SRC_LINE;
5520 break;
5521 case PRINT_NOTHING:
5522 source_flag = SRC_LINE; /* something bogus */
5523 do_frame_printing = 0;
5524 break;
5525 default:
5526 internal_error (__FILE__, __LINE__, _("Unknown value."));
5527 }
5528
5529 /* The behavior of this routine with respect to the source
5530 flag is:
5531 SRC_LINE: Print only source line
5532 LOCATION: Print only location
5533 SRC_AND_LOC: Print location and source line */
5534 if (do_frame_printing)
5535 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5536
5537 /* Display the auto-display expressions. */
5538 do_displays ();
5539 }
5540 }
5541
5542 /* Save the function value return registers, if we care.
5543 We might be about to restore their previous contents. */
5544 if (inferior_thread ()->proceed_to_finish)
5545 {
5546 /* This should not be necessary. */
5547 if (stop_registers)
5548 regcache_xfree (stop_registers);
5549
5550 /* NB: The copy goes through to the target picking up the value of
5551 all the registers. */
5552 stop_registers = regcache_dup (get_current_regcache ());
5553 }
5554
5555 if (stop_stack_dummy == STOP_STACK_DUMMY)
5556 {
5557 /* Pop the empty frame that contains the stack dummy.
5558 This also restores inferior state prior to the call
5559 (struct inferior_thread_state). */
5560 struct frame_info *frame = get_current_frame ();
5561
5562 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5563 frame_pop (frame);
5564 /* frame_pop() calls reinit_frame_cache as the last thing it does
5565 which means there's currently no selected frame. We don't need
5566 to re-establish a selected frame if the dummy call returns normally,
5567 that will be done by restore_inferior_status. However, we do have
5568 to handle the case where the dummy call is returning after being
5569 stopped (e.g. the dummy call previously hit a breakpoint). We
5570 can't know which case we have so just always re-establish a
5571 selected frame here. */
5572 select_frame (get_current_frame ());
5573 }
5574
5575 done:
5576 annotate_stopped ();
5577
5578 /* Suppress the stop observer if we're in the middle of:
5579
5580 - a step n (n > 1), as there still more steps to be done.
5581
5582 - a "finish" command, as the observer will be called in
5583 finish_command_continuation, so it can include the inferior
5584 function's return value.
5585
5586 - calling an inferior function, as we pretend we inferior didn't
5587 run at all. The return value of the call is handled by the
5588 expression evaluator, through call_function_by_hand. */
5589
5590 if (!target_has_execution
5591 || last.kind == TARGET_WAITKIND_SIGNALLED
5592 || last.kind == TARGET_WAITKIND_EXITED
5593 || (!inferior_thread ()->step_multi
5594 && !(inferior_thread ()->stop_bpstat
5595 && inferior_thread ()->proceed_to_finish)
5596 && !inferior_thread ()->in_infcall))
5597 {
5598 if (!ptid_equal (inferior_ptid, null_ptid))
5599 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5600 stop_print_frame);
5601 else
5602 observer_notify_normal_stop (NULL, stop_print_frame);
5603 }
5604
5605 if (target_has_execution)
5606 {
5607 if (last.kind != TARGET_WAITKIND_SIGNALLED
5608 && last.kind != TARGET_WAITKIND_EXITED)
5609 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5610 Delete any breakpoint that is to be deleted at the next stop. */
5611 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5612 }
5613
5614 /* Try to get rid of automatically added inferiors that are no
5615 longer needed. Keeping those around slows down things linearly.
5616 Note that this never removes the current inferior. */
5617 prune_inferiors ();
5618 }
5619
5620 static int
5621 hook_stop_stub (void *cmd)
5622 {
5623 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5624 return (0);
5625 }
5626 \f
5627 int
5628 signal_stop_state (int signo)
5629 {
5630 return signal_stop[signo];
5631 }
5632
5633 int
5634 signal_print_state (int signo)
5635 {
5636 return signal_print[signo];
5637 }
5638
5639 int
5640 signal_pass_state (int signo)
5641 {
5642 return signal_program[signo];
5643 }
5644
5645 int
5646 signal_stop_update (int signo, int state)
5647 {
5648 int ret = signal_stop[signo];
5649
5650 signal_stop[signo] = state;
5651 return ret;
5652 }
5653
5654 int
5655 signal_print_update (int signo, int state)
5656 {
5657 int ret = signal_print[signo];
5658
5659 signal_print[signo] = state;
5660 return ret;
5661 }
5662
5663 int
5664 signal_pass_update (int signo, int state)
5665 {
5666 int ret = signal_program[signo];
5667
5668 signal_program[signo] = state;
5669 return ret;
5670 }
5671
5672 static void
5673 sig_print_header (void)
5674 {
5675 printf_filtered (_("\
5676 Signal Stop\tPrint\tPass to program\tDescription\n"));
5677 }
5678
5679 static void
5680 sig_print_info (enum target_signal oursig)
5681 {
5682 const char *name = target_signal_to_name (oursig);
5683 int name_padding = 13 - strlen (name);
5684
5685 if (name_padding <= 0)
5686 name_padding = 0;
5687
5688 printf_filtered ("%s", name);
5689 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5690 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5691 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5692 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5693 printf_filtered ("%s\n", target_signal_to_string (oursig));
5694 }
5695
5696 /* Specify how various signals in the inferior should be handled. */
5697
5698 static void
5699 handle_command (char *args, int from_tty)
5700 {
5701 char **argv;
5702 int digits, wordlen;
5703 int sigfirst, signum, siglast;
5704 enum target_signal oursig;
5705 int allsigs;
5706 int nsigs;
5707 unsigned char *sigs;
5708 struct cleanup *old_chain;
5709
5710 if (args == NULL)
5711 {
5712 error_no_arg (_("signal to handle"));
5713 }
5714
5715 /* Allocate and zero an array of flags for which signals to handle. */
5716
5717 nsigs = (int) TARGET_SIGNAL_LAST;
5718 sigs = (unsigned char *) alloca (nsigs);
5719 memset (sigs, 0, nsigs);
5720
5721 /* Break the command line up into args. */
5722
5723 argv = gdb_buildargv (args);
5724 old_chain = make_cleanup_freeargv (argv);
5725
5726 /* Walk through the args, looking for signal oursigs, signal names, and
5727 actions. Signal numbers and signal names may be interspersed with
5728 actions, with the actions being performed for all signals cumulatively
5729 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5730
5731 while (*argv != NULL)
5732 {
5733 wordlen = strlen (*argv);
5734 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5735 {;
5736 }
5737 allsigs = 0;
5738 sigfirst = siglast = -1;
5739
5740 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5741 {
5742 /* Apply action to all signals except those used by the
5743 debugger. Silently skip those. */
5744 allsigs = 1;
5745 sigfirst = 0;
5746 siglast = nsigs - 1;
5747 }
5748 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5749 {
5750 SET_SIGS (nsigs, sigs, signal_stop);
5751 SET_SIGS (nsigs, sigs, signal_print);
5752 }
5753 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5754 {
5755 UNSET_SIGS (nsigs, sigs, signal_program);
5756 }
5757 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5758 {
5759 SET_SIGS (nsigs, sigs, signal_print);
5760 }
5761 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5762 {
5763 SET_SIGS (nsigs, sigs, signal_program);
5764 }
5765 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5766 {
5767 UNSET_SIGS (nsigs, sigs, signal_stop);
5768 }
5769 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5770 {
5771 SET_SIGS (nsigs, sigs, signal_program);
5772 }
5773 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5774 {
5775 UNSET_SIGS (nsigs, sigs, signal_print);
5776 UNSET_SIGS (nsigs, sigs, signal_stop);
5777 }
5778 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5779 {
5780 UNSET_SIGS (nsigs, sigs, signal_program);
5781 }
5782 else if (digits > 0)
5783 {
5784 /* It is numeric. The numeric signal refers to our own
5785 internal signal numbering from target.h, not to host/target
5786 signal number. This is a feature; users really should be
5787 using symbolic names anyway, and the common ones like
5788 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5789
5790 sigfirst = siglast = (int)
5791 target_signal_from_command (atoi (*argv));
5792 if ((*argv)[digits] == '-')
5793 {
5794 siglast = (int)
5795 target_signal_from_command (atoi ((*argv) + digits + 1));
5796 }
5797 if (sigfirst > siglast)
5798 {
5799 /* Bet he didn't figure we'd think of this case... */
5800 signum = sigfirst;
5801 sigfirst = siglast;
5802 siglast = signum;
5803 }
5804 }
5805 else
5806 {
5807 oursig = target_signal_from_name (*argv);
5808 if (oursig != TARGET_SIGNAL_UNKNOWN)
5809 {
5810 sigfirst = siglast = (int) oursig;
5811 }
5812 else
5813 {
5814 /* Not a number and not a recognized flag word => complain. */
5815 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5816 }
5817 }
5818
5819 /* If any signal numbers or symbol names were found, set flags for
5820 which signals to apply actions to. */
5821
5822 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5823 {
5824 switch ((enum target_signal) signum)
5825 {
5826 case TARGET_SIGNAL_TRAP:
5827 case TARGET_SIGNAL_INT:
5828 if (!allsigs && !sigs[signum])
5829 {
5830 if (query (_("%s is used by the debugger.\n\
5831 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5832 {
5833 sigs[signum] = 1;
5834 }
5835 else
5836 {
5837 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5838 gdb_flush (gdb_stdout);
5839 }
5840 }
5841 break;
5842 case TARGET_SIGNAL_0:
5843 case TARGET_SIGNAL_DEFAULT:
5844 case TARGET_SIGNAL_UNKNOWN:
5845 /* Make sure that "all" doesn't print these. */
5846 break;
5847 default:
5848 sigs[signum] = 1;
5849 break;
5850 }
5851 }
5852
5853 argv++;
5854 }
5855
5856 for (signum = 0; signum < nsigs; signum++)
5857 if (sigs[signum])
5858 {
5859 target_notice_signals (inferior_ptid);
5860
5861 if (from_tty)
5862 {
5863 /* Show the results. */
5864 sig_print_header ();
5865 for (; signum < nsigs; signum++)
5866 if (sigs[signum])
5867 sig_print_info (signum);
5868 }
5869
5870 break;
5871 }
5872
5873 do_cleanups (old_chain);
5874 }
5875
5876 static void
5877 xdb_handle_command (char *args, int from_tty)
5878 {
5879 char **argv;
5880 struct cleanup *old_chain;
5881
5882 if (args == NULL)
5883 error_no_arg (_("xdb command"));
5884
5885 /* Break the command line up into args. */
5886
5887 argv = gdb_buildargv (args);
5888 old_chain = make_cleanup_freeargv (argv);
5889 if (argv[1] != (char *) NULL)
5890 {
5891 char *argBuf;
5892 int bufLen;
5893
5894 bufLen = strlen (argv[0]) + 20;
5895 argBuf = (char *) xmalloc (bufLen);
5896 if (argBuf)
5897 {
5898 int validFlag = 1;
5899 enum target_signal oursig;
5900
5901 oursig = target_signal_from_name (argv[0]);
5902 memset (argBuf, 0, bufLen);
5903 if (strcmp (argv[1], "Q") == 0)
5904 sprintf (argBuf, "%s %s", argv[0], "noprint");
5905 else
5906 {
5907 if (strcmp (argv[1], "s") == 0)
5908 {
5909 if (!signal_stop[oursig])
5910 sprintf (argBuf, "%s %s", argv[0], "stop");
5911 else
5912 sprintf (argBuf, "%s %s", argv[0], "nostop");
5913 }
5914 else if (strcmp (argv[1], "i") == 0)
5915 {
5916 if (!signal_program[oursig])
5917 sprintf (argBuf, "%s %s", argv[0], "pass");
5918 else
5919 sprintf (argBuf, "%s %s", argv[0], "nopass");
5920 }
5921 else if (strcmp (argv[1], "r") == 0)
5922 {
5923 if (!signal_print[oursig])
5924 sprintf (argBuf, "%s %s", argv[0], "print");
5925 else
5926 sprintf (argBuf, "%s %s", argv[0], "noprint");
5927 }
5928 else
5929 validFlag = 0;
5930 }
5931 if (validFlag)
5932 handle_command (argBuf, from_tty);
5933 else
5934 printf_filtered (_("Invalid signal handling flag.\n"));
5935 if (argBuf)
5936 xfree (argBuf);
5937 }
5938 }
5939 do_cleanups (old_chain);
5940 }
5941
5942 /* Print current contents of the tables set by the handle command.
5943 It is possible we should just be printing signals actually used
5944 by the current target (but for things to work right when switching
5945 targets, all signals should be in the signal tables). */
5946
5947 static void
5948 signals_info (char *signum_exp, int from_tty)
5949 {
5950 enum target_signal oursig;
5951
5952 sig_print_header ();
5953
5954 if (signum_exp)
5955 {
5956 /* First see if this is a symbol name. */
5957 oursig = target_signal_from_name (signum_exp);
5958 if (oursig == TARGET_SIGNAL_UNKNOWN)
5959 {
5960 /* No, try numeric. */
5961 oursig =
5962 target_signal_from_command (parse_and_eval_long (signum_exp));
5963 }
5964 sig_print_info (oursig);
5965 return;
5966 }
5967
5968 printf_filtered ("\n");
5969 /* These ugly casts brought to you by the native VAX compiler. */
5970 for (oursig = TARGET_SIGNAL_FIRST;
5971 (int) oursig < (int) TARGET_SIGNAL_LAST;
5972 oursig = (enum target_signal) ((int) oursig + 1))
5973 {
5974 QUIT;
5975
5976 if (oursig != TARGET_SIGNAL_UNKNOWN
5977 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5978 sig_print_info (oursig);
5979 }
5980
5981 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5982 }
5983
5984 /* The $_siginfo convenience variable is a bit special. We don't know
5985 for sure the type of the value until we actually have a chance to
5986 fetch the data. The type can change depending on gdbarch, so it it
5987 also dependent on which thread you have selected.
5988
5989 1. making $_siginfo be an internalvar that creates a new value on
5990 access.
5991
5992 2. making the value of $_siginfo be an lval_computed value. */
5993
5994 /* This function implements the lval_computed support for reading a
5995 $_siginfo value. */
5996
5997 static void
5998 siginfo_value_read (struct value *v)
5999 {
6000 LONGEST transferred;
6001
6002 transferred =
6003 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6004 NULL,
6005 value_contents_all_raw (v),
6006 value_offset (v),
6007 TYPE_LENGTH (value_type (v)));
6008
6009 if (transferred != TYPE_LENGTH (value_type (v)))
6010 error (_("Unable to read siginfo"));
6011 }
6012
6013 /* This function implements the lval_computed support for writing a
6014 $_siginfo value. */
6015
6016 static void
6017 siginfo_value_write (struct value *v, struct value *fromval)
6018 {
6019 LONGEST transferred;
6020
6021 transferred = target_write (&current_target,
6022 TARGET_OBJECT_SIGNAL_INFO,
6023 NULL,
6024 value_contents_all_raw (fromval),
6025 value_offset (v),
6026 TYPE_LENGTH (value_type (fromval)));
6027
6028 if (transferred != TYPE_LENGTH (value_type (fromval)))
6029 error (_("Unable to write siginfo"));
6030 }
6031
6032 static struct lval_funcs siginfo_value_funcs =
6033 {
6034 siginfo_value_read,
6035 siginfo_value_write
6036 };
6037
6038 /* Return a new value with the correct type for the siginfo object of
6039 the current thread using architecture GDBARCH. Return a void value
6040 if there's no object available. */
6041
6042 static struct value *
6043 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6044 {
6045 if (target_has_stack
6046 && !ptid_equal (inferior_ptid, null_ptid)
6047 && gdbarch_get_siginfo_type_p (gdbarch))
6048 {
6049 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6050
6051 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6052 }
6053
6054 return allocate_value (builtin_type (gdbarch)->builtin_void);
6055 }
6056
6057 \f
6058 /* Inferior thread state.
6059 These are details related to the inferior itself, and don't include
6060 things like what frame the user had selected or what gdb was doing
6061 with the target at the time.
6062 For inferior function calls these are things we want to restore
6063 regardless of whether the function call successfully completes
6064 or the dummy frame has to be manually popped. */
6065
6066 struct inferior_thread_state
6067 {
6068 enum target_signal stop_signal;
6069 CORE_ADDR stop_pc;
6070 struct regcache *registers;
6071 };
6072
6073 struct inferior_thread_state *
6074 save_inferior_thread_state (void)
6075 {
6076 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
6077 struct thread_info *tp = inferior_thread ();
6078
6079 inf_state->stop_signal = tp->stop_signal;
6080 inf_state->stop_pc = stop_pc;
6081
6082 inf_state->registers = regcache_dup (get_current_regcache ());
6083
6084 return inf_state;
6085 }
6086
6087 /* Restore inferior session state to INF_STATE. */
6088
6089 void
6090 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6091 {
6092 struct thread_info *tp = inferior_thread ();
6093
6094 tp->stop_signal = inf_state->stop_signal;
6095 stop_pc = inf_state->stop_pc;
6096
6097 /* The inferior can be gone if the user types "print exit(0)"
6098 (and perhaps other times). */
6099 if (target_has_execution)
6100 /* NB: The register write goes through to the target. */
6101 regcache_cpy (get_current_regcache (), inf_state->registers);
6102 regcache_xfree (inf_state->registers);
6103 xfree (inf_state);
6104 }
6105
6106 static void
6107 do_restore_inferior_thread_state_cleanup (void *state)
6108 {
6109 restore_inferior_thread_state (state);
6110 }
6111
6112 struct cleanup *
6113 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6114 {
6115 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6116 }
6117
6118 void
6119 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6120 {
6121 regcache_xfree (inf_state->registers);
6122 xfree (inf_state);
6123 }
6124
6125 struct regcache *
6126 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6127 {
6128 return inf_state->registers;
6129 }
6130
6131 /* Session related state for inferior function calls.
6132 These are the additional bits of state that need to be restored
6133 when an inferior function call successfully completes. */
6134
6135 struct inferior_status
6136 {
6137 bpstat stop_bpstat;
6138 int stop_step;
6139 enum stop_stack_kind stop_stack_dummy;
6140 int stopped_by_random_signal;
6141 int stepping_over_breakpoint;
6142 CORE_ADDR step_range_start;
6143 CORE_ADDR step_range_end;
6144 struct frame_id step_frame_id;
6145 struct frame_id step_stack_frame_id;
6146 enum step_over_calls_kind step_over_calls;
6147 CORE_ADDR step_resume_break_address;
6148 int stop_after_trap;
6149 int stop_soon;
6150
6151 /* ID if the selected frame when the inferior function call was made. */
6152 struct frame_id selected_frame_id;
6153
6154 int proceed_to_finish;
6155 int in_infcall;
6156 };
6157
6158 /* Save all of the information associated with the inferior<==>gdb
6159 connection. */
6160
6161 struct inferior_status *
6162 save_inferior_status (void)
6163 {
6164 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6165 struct thread_info *tp = inferior_thread ();
6166 struct inferior *inf = current_inferior ();
6167
6168 inf_status->stop_step = tp->stop_step;
6169 inf_status->stop_stack_dummy = stop_stack_dummy;
6170 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6171 inf_status->stepping_over_breakpoint = tp->trap_expected;
6172 inf_status->step_range_start = tp->step_range_start;
6173 inf_status->step_range_end = tp->step_range_end;
6174 inf_status->step_frame_id = tp->step_frame_id;
6175 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6176 inf_status->step_over_calls = tp->step_over_calls;
6177 inf_status->stop_after_trap = stop_after_trap;
6178 inf_status->stop_soon = inf->stop_soon;
6179 /* Save original bpstat chain here; replace it with copy of chain.
6180 If caller's caller is walking the chain, they'll be happier if we
6181 hand them back the original chain when restore_inferior_status is
6182 called. */
6183 inf_status->stop_bpstat = tp->stop_bpstat;
6184 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6185 inf_status->proceed_to_finish = tp->proceed_to_finish;
6186 inf_status->in_infcall = tp->in_infcall;
6187
6188 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6189
6190 return inf_status;
6191 }
6192
6193 static int
6194 restore_selected_frame (void *args)
6195 {
6196 struct frame_id *fid = (struct frame_id *) args;
6197 struct frame_info *frame;
6198
6199 frame = frame_find_by_id (*fid);
6200
6201 /* If inf_status->selected_frame_id is NULL, there was no previously
6202 selected frame. */
6203 if (frame == NULL)
6204 {
6205 warning (_("Unable to restore previously selected frame."));
6206 return 0;
6207 }
6208
6209 select_frame (frame);
6210
6211 return (1);
6212 }
6213
6214 /* Restore inferior session state to INF_STATUS. */
6215
6216 void
6217 restore_inferior_status (struct inferior_status *inf_status)
6218 {
6219 struct thread_info *tp = inferior_thread ();
6220 struct inferior *inf = current_inferior ();
6221
6222 tp->stop_step = inf_status->stop_step;
6223 stop_stack_dummy = inf_status->stop_stack_dummy;
6224 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6225 tp->trap_expected = inf_status->stepping_over_breakpoint;
6226 tp->step_range_start = inf_status->step_range_start;
6227 tp->step_range_end = inf_status->step_range_end;
6228 tp->step_frame_id = inf_status->step_frame_id;
6229 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6230 tp->step_over_calls = inf_status->step_over_calls;
6231 stop_after_trap = inf_status->stop_after_trap;
6232 inf->stop_soon = inf_status->stop_soon;
6233 bpstat_clear (&tp->stop_bpstat);
6234 tp->stop_bpstat = inf_status->stop_bpstat;
6235 inf_status->stop_bpstat = NULL;
6236 tp->proceed_to_finish = inf_status->proceed_to_finish;
6237 tp->in_infcall = inf_status->in_infcall;
6238
6239 if (target_has_stack)
6240 {
6241 /* The point of catch_errors is that if the stack is clobbered,
6242 walking the stack might encounter a garbage pointer and
6243 error() trying to dereference it. */
6244 if (catch_errors
6245 (restore_selected_frame, &inf_status->selected_frame_id,
6246 "Unable to restore previously selected frame:\n",
6247 RETURN_MASK_ERROR) == 0)
6248 /* Error in restoring the selected frame. Select the innermost
6249 frame. */
6250 select_frame (get_current_frame ());
6251 }
6252
6253 xfree (inf_status);
6254 }
6255
6256 static void
6257 do_restore_inferior_status_cleanup (void *sts)
6258 {
6259 restore_inferior_status (sts);
6260 }
6261
6262 struct cleanup *
6263 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6264 {
6265 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6266 }
6267
6268 void
6269 discard_inferior_status (struct inferior_status *inf_status)
6270 {
6271 /* See save_inferior_status for info on stop_bpstat. */
6272 bpstat_clear (&inf_status->stop_bpstat);
6273 xfree (inf_status);
6274 }
6275 \f
6276 int
6277 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6278 {
6279 struct target_waitstatus last;
6280 ptid_t last_ptid;
6281
6282 get_last_target_status (&last_ptid, &last);
6283
6284 if (last.kind != TARGET_WAITKIND_FORKED)
6285 return 0;
6286
6287 if (!ptid_equal (last_ptid, pid))
6288 return 0;
6289
6290 *child_pid = last.value.related_pid;
6291 return 1;
6292 }
6293
6294 int
6295 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6296 {
6297 struct target_waitstatus last;
6298 ptid_t last_ptid;
6299
6300 get_last_target_status (&last_ptid, &last);
6301
6302 if (last.kind != TARGET_WAITKIND_VFORKED)
6303 return 0;
6304
6305 if (!ptid_equal (last_ptid, pid))
6306 return 0;
6307
6308 *child_pid = last.value.related_pid;
6309 return 1;
6310 }
6311
6312 int
6313 inferior_has_execd (ptid_t pid, char **execd_pathname)
6314 {
6315 struct target_waitstatus last;
6316 ptid_t last_ptid;
6317
6318 get_last_target_status (&last_ptid, &last);
6319
6320 if (last.kind != TARGET_WAITKIND_EXECD)
6321 return 0;
6322
6323 if (!ptid_equal (last_ptid, pid))
6324 return 0;
6325
6326 *execd_pathname = xstrdup (last.value.execd_pathname);
6327 return 1;
6328 }
6329
6330 int
6331 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6332 {
6333 struct target_waitstatus last;
6334 ptid_t last_ptid;
6335
6336 get_last_target_status (&last_ptid, &last);
6337
6338 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6339 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6340 return 0;
6341
6342 if (!ptid_equal (last_ptid, pid))
6343 return 0;
6344
6345 *syscall_number = last.value.syscall_number;
6346 return 1;
6347 }
6348
6349 /* Oft used ptids */
6350 ptid_t null_ptid;
6351 ptid_t minus_one_ptid;
6352
6353 /* Create a ptid given the necessary PID, LWP, and TID components. */
6354
6355 ptid_t
6356 ptid_build (int pid, long lwp, long tid)
6357 {
6358 ptid_t ptid;
6359
6360 ptid.pid = pid;
6361 ptid.lwp = lwp;
6362 ptid.tid = tid;
6363 return ptid;
6364 }
6365
6366 /* Create a ptid from just a pid. */
6367
6368 ptid_t
6369 pid_to_ptid (int pid)
6370 {
6371 return ptid_build (pid, 0, 0);
6372 }
6373
6374 /* Fetch the pid (process id) component from a ptid. */
6375
6376 int
6377 ptid_get_pid (ptid_t ptid)
6378 {
6379 return ptid.pid;
6380 }
6381
6382 /* Fetch the lwp (lightweight process) component from a ptid. */
6383
6384 long
6385 ptid_get_lwp (ptid_t ptid)
6386 {
6387 return ptid.lwp;
6388 }
6389
6390 /* Fetch the tid (thread id) component from a ptid. */
6391
6392 long
6393 ptid_get_tid (ptid_t ptid)
6394 {
6395 return ptid.tid;
6396 }
6397
6398 /* ptid_equal() is used to test equality of two ptids. */
6399
6400 int
6401 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6402 {
6403 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6404 && ptid1.tid == ptid2.tid);
6405 }
6406
6407 /* Returns true if PTID represents a process. */
6408
6409 int
6410 ptid_is_pid (ptid_t ptid)
6411 {
6412 if (ptid_equal (minus_one_ptid, ptid))
6413 return 0;
6414 if (ptid_equal (null_ptid, ptid))
6415 return 0;
6416
6417 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6418 }
6419
6420 int
6421 ptid_match (ptid_t ptid, ptid_t filter)
6422 {
6423 /* Since both parameters have the same type, prevent easy mistakes
6424 from happening. */
6425 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6426 && !ptid_equal (ptid, null_ptid));
6427
6428 if (ptid_equal (filter, minus_one_ptid))
6429 return 1;
6430 if (ptid_is_pid (filter)
6431 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6432 return 1;
6433 else if (ptid_equal (ptid, filter))
6434 return 1;
6435
6436 return 0;
6437 }
6438
6439 /* restore_inferior_ptid() will be used by the cleanup machinery
6440 to restore the inferior_ptid value saved in a call to
6441 save_inferior_ptid(). */
6442
6443 static void
6444 restore_inferior_ptid (void *arg)
6445 {
6446 ptid_t *saved_ptid_ptr = arg;
6447
6448 inferior_ptid = *saved_ptid_ptr;
6449 xfree (arg);
6450 }
6451
6452 /* Save the value of inferior_ptid so that it may be restored by a
6453 later call to do_cleanups(). Returns the struct cleanup pointer
6454 needed for later doing the cleanup. */
6455
6456 struct cleanup *
6457 save_inferior_ptid (void)
6458 {
6459 ptid_t *saved_ptid_ptr;
6460
6461 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6462 *saved_ptid_ptr = inferior_ptid;
6463 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6464 }
6465 \f
6466
6467 /* User interface for reverse debugging:
6468 Set exec-direction / show exec-direction commands
6469 (returns error unless target implements to_set_exec_direction method). */
6470
6471 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6472 static const char exec_forward[] = "forward";
6473 static const char exec_reverse[] = "reverse";
6474 static const char *exec_direction = exec_forward;
6475 static const char *exec_direction_names[] = {
6476 exec_forward,
6477 exec_reverse,
6478 NULL
6479 };
6480
6481 static void
6482 set_exec_direction_func (char *args, int from_tty,
6483 struct cmd_list_element *cmd)
6484 {
6485 if (target_can_execute_reverse)
6486 {
6487 if (!strcmp (exec_direction, exec_forward))
6488 execution_direction = EXEC_FORWARD;
6489 else if (!strcmp (exec_direction, exec_reverse))
6490 execution_direction = EXEC_REVERSE;
6491 }
6492 }
6493
6494 static void
6495 show_exec_direction_func (struct ui_file *out, int from_tty,
6496 struct cmd_list_element *cmd, const char *value)
6497 {
6498 switch (execution_direction) {
6499 case EXEC_FORWARD:
6500 fprintf_filtered (out, _("Forward.\n"));
6501 break;
6502 case EXEC_REVERSE:
6503 fprintf_filtered (out, _("Reverse.\n"));
6504 break;
6505 case EXEC_ERROR:
6506 default:
6507 fprintf_filtered (out,
6508 _("Forward (target `%s' does not support exec-direction).\n"),
6509 target_shortname);
6510 break;
6511 }
6512 }
6513
6514 /* User interface for non-stop mode. */
6515
6516 int non_stop = 0;
6517
6518 static void
6519 set_non_stop (char *args, int from_tty,
6520 struct cmd_list_element *c)
6521 {
6522 if (target_has_execution)
6523 {
6524 non_stop_1 = non_stop;
6525 error (_("Cannot change this setting while the inferior is running."));
6526 }
6527
6528 non_stop = non_stop_1;
6529 }
6530
6531 static void
6532 show_non_stop (struct ui_file *file, int from_tty,
6533 struct cmd_list_element *c, const char *value)
6534 {
6535 fprintf_filtered (file,
6536 _("Controlling the inferior in non-stop mode is %s.\n"),
6537 value);
6538 }
6539
6540 static void
6541 show_schedule_multiple (struct ui_file *file, int from_tty,
6542 struct cmd_list_element *c, const char *value)
6543 {
6544 fprintf_filtered (file, _("\
6545 Resuming the execution of threads of all processes is %s.\n"), value);
6546 }
6547
6548 void
6549 _initialize_infrun (void)
6550 {
6551 int i;
6552 int numsigs;
6553
6554 add_info ("signals", signals_info, _("\
6555 What debugger does when program gets various signals.\n\
6556 Specify a signal as argument to print info on that signal only."));
6557 add_info_alias ("handle", "signals", 0);
6558
6559 add_com ("handle", class_run, handle_command, _("\
6560 Specify how to handle a signal.\n\
6561 Args are signals and actions to apply to those signals.\n\
6562 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6563 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6564 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6565 The special arg \"all\" is recognized to mean all signals except those\n\
6566 used by the debugger, typically SIGTRAP and SIGINT.\n\
6567 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6568 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6569 Stop means reenter debugger if this signal happens (implies print).\n\
6570 Print means print a message if this signal happens.\n\
6571 Pass means let program see this signal; otherwise program doesn't know.\n\
6572 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6573 Pass and Stop may be combined."));
6574 if (xdb_commands)
6575 {
6576 add_com ("lz", class_info, signals_info, _("\
6577 What debugger does when program gets various signals.\n\
6578 Specify a signal as argument to print info on that signal only."));
6579 add_com ("z", class_run, xdb_handle_command, _("\
6580 Specify how to handle a signal.\n\
6581 Args are signals and actions to apply to those signals.\n\
6582 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6583 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6584 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6585 The special arg \"all\" is recognized to mean all signals except those\n\
6586 used by the debugger, typically SIGTRAP and SIGINT.\n\
6587 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6588 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6589 nopass), \"Q\" (noprint)\n\
6590 Stop means reenter debugger if this signal happens (implies print).\n\
6591 Print means print a message if this signal happens.\n\
6592 Pass means let program see this signal; otherwise program doesn't know.\n\
6593 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6594 Pass and Stop may be combined."));
6595 }
6596
6597 if (!dbx_commands)
6598 stop_command = add_cmd ("stop", class_obscure,
6599 not_just_help_class_command, _("\
6600 There is no `stop' command, but you can set a hook on `stop'.\n\
6601 This allows you to set a list of commands to be run each time execution\n\
6602 of the program stops."), &cmdlist);
6603
6604 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6605 Set inferior debugging."), _("\
6606 Show inferior debugging."), _("\
6607 When non-zero, inferior specific debugging is enabled."),
6608 NULL,
6609 show_debug_infrun,
6610 &setdebuglist, &showdebuglist);
6611
6612 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6613 Set displaced stepping debugging."), _("\
6614 Show displaced stepping debugging."), _("\
6615 When non-zero, displaced stepping specific debugging is enabled."),
6616 NULL,
6617 show_debug_displaced,
6618 &setdebuglist, &showdebuglist);
6619
6620 add_setshow_boolean_cmd ("non-stop", no_class,
6621 &non_stop_1, _("\
6622 Set whether gdb controls the inferior in non-stop mode."), _("\
6623 Show whether gdb controls the inferior in non-stop mode."), _("\
6624 When debugging a multi-threaded program and this setting is\n\
6625 off (the default, also called all-stop mode), when one thread stops\n\
6626 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6627 all other threads in the program while you interact with the thread of\n\
6628 interest. When you continue or step a thread, you can allow the other\n\
6629 threads to run, or have them remain stopped, but while you inspect any\n\
6630 thread's state, all threads stop.\n\
6631 \n\
6632 In non-stop mode, when one thread stops, other threads can continue\n\
6633 to run freely. You'll be able to step each thread independently,\n\
6634 leave it stopped or free to run as needed."),
6635 set_non_stop,
6636 show_non_stop,
6637 &setlist,
6638 &showlist);
6639
6640 numsigs = (int) TARGET_SIGNAL_LAST;
6641 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6642 signal_print = (unsigned char *)
6643 xmalloc (sizeof (signal_print[0]) * numsigs);
6644 signal_program = (unsigned char *)
6645 xmalloc (sizeof (signal_program[0]) * numsigs);
6646 for (i = 0; i < numsigs; i++)
6647 {
6648 signal_stop[i] = 1;
6649 signal_print[i] = 1;
6650 signal_program[i] = 1;
6651 }
6652
6653 /* Signals caused by debugger's own actions
6654 should not be given to the program afterwards. */
6655 signal_program[TARGET_SIGNAL_TRAP] = 0;
6656 signal_program[TARGET_SIGNAL_INT] = 0;
6657
6658 /* Signals that are not errors should not normally enter the debugger. */
6659 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6660 signal_print[TARGET_SIGNAL_ALRM] = 0;
6661 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6662 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6663 signal_stop[TARGET_SIGNAL_PROF] = 0;
6664 signal_print[TARGET_SIGNAL_PROF] = 0;
6665 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6666 signal_print[TARGET_SIGNAL_CHLD] = 0;
6667 signal_stop[TARGET_SIGNAL_IO] = 0;
6668 signal_print[TARGET_SIGNAL_IO] = 0;
6669 signal_stop[TARGET_SIGNAL_POLL] = 0;
6670 signal_print[TARGET_SIGNAL_POLL] = 0;
6671 signal_stop[TARGET_SIGNAL_URG] = 0;
6672 signal_print[TARGET_SIGNAL_URG] = 0;
6673 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6674 signal_print[TARGET_SIGNAL_WINCH] = 0;
6675
6676 /* These signals are used internally by user-level thread
6677 implementations. (See signal(5) on Solaris.) Like the above
6678 signals, a healthy program receives and handles them as part of
6679 its normal operation. */
6680 signal_stop[TARGET_SIGNAL_LWP] = 0;
6681 signal_print[TARGET_SIGNAL_LWP] = 0;
6682 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6683 signal_print[TARGET_SIGNAL_WAITING] = 0;
6684 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6685 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6686
6687 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6688 &stop_on_solib_events, _("\
6689 Set stopping for shared library events."), _("\
6690 Show stopping for shared library events."), _("\
6691 If nonzero, gdb will give control to the user when the dynamic linker\n\
6692 notifies gdb of shared library events. The most common event of interest\n\
6693 to the user would be loading/unloading of a new library."),
6694 NULL,
6695 show_stop_on_solib_events,
6696 &setlist, &showlist);
6697
6698 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6699 follow_fork_mode_kind_names,
6700 &follow_fork_mode_string, _("\
6701 Set debugger response to a program call of fork or vfork."), _("\
6702 Show debugger response to a program call of fork or vfork."), _("\
6703 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6704 parent - the original process is debugged after a fork\n\
6705 child - the new process is debugged after a fork\n\
6706 The unfollowed process will continue to run.\n\
6707 By default, the debugger will follow the parent process."),
6708 NULL,
6709 show_follow_fork_mode_string,
6710 &setlist, &showlist);
6711
6712 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6713 follow_exec_mode_names,
6714 &follow_exec_mode_string, _("\
6715 Set debugger response to a program call of exec."), _("\
6716 Show debugger response to a program call of exec."), _("\
6717 An exec call replaces the program image of a process.\n\
6718 \n\
6719 follow-exec-mode can be:\n\
6720 \n\
6721 new - the debugger creates a new inferior and rebinds the process \n\
6722 to this new inferior. The program the process was running before\n\
6723 the exec call can be restarted afterwards by restarting the original\n\
6724 inferior.\n\
6725 \n\
6726 same - the debugger keeps the process bound to the same inferior.\n\
6727 The new executable image replaces the previous executable loaded in\n\
6728 the inferior. Restarting the inferior after the exec call restarts\n\
6729 the executable the process was running after the exec call.\n\
6730 \n\
6731 By default, the debugger will use the same inferior."),
6732 NULL,
6733 show_follow_exec_mode_string,
6734 &setlist, &showlist);
6735
6736 add_setshow_enum_cmd ("scheduler-locking", class_run,
6737 scheduler_enums, &scheduler_mode, _("\
6738 Set mode for locking scheduler during execution."), _("\
6739 Show mode for locking scheduler during execution."), _("\
6740 off == no locking (threads may preempt at any time)\n\
6741 on == full locking (no thread except the current thread may run)\n\
6742 step == scheduler locked during every single-step operation.\n\
6743 In this mode, no other thread may run during a step command.\n\
6744 Other threads may run while stepping over a function call ('next')."),
6745 set_schedlock_func, /* traps on target vector */
6746 show_scheduler_mode,
6747 &setlist, &showlist);
6748
6749 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6750 Set mode for resuming threads of all processes."), _("\
6751 Show mode for resuming threads of all processes."), _("\
6752 When on, execution commands (such as 'continue' or 'next') resume all\n\
6753 threads of all processes. When off (which is the default), execution\n\
6754 commands only resume the threads of the current process. The set of\n\
6755 threads that are resumed is further refined by the scheduler-locking\n\
6756 mode (see help set scheduler-locking)."),
6757 NULL,
6758 show_schedule_multiple,
6759 &setlist, &showlist);
6760
6761 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6762 Set mode of the step operation."), _("\
6763 Show mode of the step operation."), _("\
6764 When set, doing a step over a function without debug line information\n\
6765 will stop at the first instruction of that function. Otherwise, the\n\
6766 function is skipped and the step command stops at a different source line."),
6767 NULL,
6768 show_step_stop_if_no_debug,
6769 &setlist, &showlist);
6770
6771 add_setshow_enum_cmd ("displaced-stepping", class_run,
6772 can_use_displaced_stepping_enum,
6773 &can_use_displaced_stepping, _("\
6774 Set debugger's willingness to use displaced stepping."), _("\
6775 Show debugger's willingness to use displaced stepping."), _("\
6776 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6777 supported by the target architecture. If off, gdb will not use displaced\n\
6778 stepping to step over breakpoints, even if such is supported by the target\n\
6779 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6780 if the target architecture supports it and non-stop mode is active, but will not\n\
6781 use it in all-stop mode (see help set non-stop)."),
6782 NULL,
6783 show_can_use_displaced_stepping,
6784 &setlist, &showlist);
6785
6786 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6787 &exec_direction, _("Set direction of execution.\n\
6788 Options are 'forward' or 'reverse'."),
6789 _("Show direction of execution (forward/reverse)."),
6790 _("Tells gdb whether to execute forward or backward."),
6791 set_exec_direction_func, show_exec_direction_func,
6792 &setlist, &showlist);
6793
6794 /* Set/show detach-on-fork: user-settable mode. */
6795
6796 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6797 Set whether gdb will detach the child of a fork."), _("\
6798 Show whether gdb will detach the child of a fork."), _("\
6799 Tells gdb whether to detach the child of a fork."),
6800 NULL, NULL, &setlist, &showlist);
6801
6802 /* ptid initializations */
6803 null_ptid = ptid_build (0, 0, 0);
6804 minus_one_ptid = ptid_build (-1, 0, 0);
6805 inferior_ptid = null_ptid;
6806 target_last_wait_ptid = minus_one_ptid;
6807
6808 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6809 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6810 observer_attach_thread_exit (infrun_thread_thread_exit);
6811 observer_attach_inferior_exit (infrun_inferior_exit);
6812
6813 /* Explicitly create without lookup, since that tries to create a
6814 value with a void typed value, and when we get here, gdbarch
6815 isn't initialized yet. At this point, we're quite sure there
6816 isn't another convenience variable of the same name. */
6817 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6818
6819 add_setshow_boolean_cmd ("observer", no_class,
6820 &observer_mode_1, _("\
6821 Set whether gdb controls the inferior in observer mode."), _("\
6822 Show whether gdb controls the inferior in observer mode."), _("\
6823 In observer mode, GDB can get data from the inferior, but not\n\
6824 affect its execution. Registers and memory may not be changed,\n\
6825 breakpoints may not be set, and the program cannot be interrupted\n\
6826 or signalled."),
6827 set_observer_mode,
6828 show_observer_mode,
6829 &setlist,
6830 &showlist);
6831 }