Per-process displaced stepping queue.
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181
182 /* Convert the #defines into values. This is temporary until wfi control
183 flow is completely sorted out. */
184
185 #ifndef CANNOT_STEP_HW_WATCHPOINTS
186 #define CANNOT_STEP_HW_WATCHPOINTS 0
187 #else
188 #undef CANNOT_STEP_HW_WATCHPOINTS
189 #define CANNOT_STEP_HW_WATCHPOINTS 1
190 #endif
191
192 /* Tables of how to react to signals; the user sets them. */
193
194 static unsigned char *signal_stop;
195 static unsigned char *signal_print;
196 static unsigned char *signal_program;
197
198 #define SET_SIGS(nsigs,sigs,flags) \
199 do { \
200 int signum = (nsigs); \
201 while (signum-- > 0) \
202 if ((sigs)[signum]) \
203 (flags)[signum] = 1; \
204 } while (0)
205
206 #define UNSET_SIGS(nsigs,sigs,flags) \
207 do { \
208 int signum = (nsigs); \
209 while (signum-- > 0) \
210 if ((sigs)[signum]) \
211 (flags)[signum] = 0; \
212 } while (0)
213
214 /* Value to pass to target_resume() to cause all threads to resume */
215
216 #define RESUME_ALL minus_one_ptid
217
218 /* Command list pointer for the "stop" placeholder. */
219
220 static struct cmd_list_element *stop_command;
221
222 /* Function inferior was in as of last step command. */
223
224 static struct symbol *step_start_function;
225
226 /* Nonzero if we want to give control to the user when we're notified
227 of shared library events by the dynamic linker. */
228 static int stop_on_solib_events;
229 static void
230 show_stop_on_solib_events (struct ui_file *file, int from_tty,
231 struct cmd_list_element *c, const char *value)
232 {
233 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
234 value);
235 }
236
237 /* Nonzero means expecting a trace trap
238 and should stop the inferior and return silently when it happens. */
239
240 int stop_after_trap;
241
242 /* Save register contents here when executing a "finish" command or are
243 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
244 Thus this contains the return value from the called function (assuming
245 values are returned in a register). */
246
247 struct regcache *stop_registers;
248
249 /* Nonzero after stop if current stack frame should be printed. */
250
251 static int stop_print_frame;
252
253 /* This is a cached copy of the pid/waitstatus of the last event
254 returned by target_wait()/deprecated_target_wait_hook(). This
255 information is returned by get_last_target_status(). */
256 static ptid_t target_last_wait_ptid;
257 static struct target_waitstatus target_last_waitstatus;
258
259 static void context_switch (ptid_t ptid);
260
261 void init_thread_stepping_state (struct thread_info *tss);
262
263 void init_infwait_state (void);
264
265 static const char follow_fork_mode_child[] = "child";
266 static const char follow_fork_mode_parent[] = "parent";
267
268 static const char *follow_fork_mode_kind_names[] = {
269 follow_fork_mode_child,
270 follow_fork_mode_parent,
271 NULL
272 };
273
274 static const char *follow_fork_mode_string = follow_fork_mode_parent;
275 static void
276 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
277 struct cmd_list_element *c, const char *value)
278 {
279 fprintf_filtered (file, _("\
280 Debugger response to a program call of fork or vfork is \"%s\".\n"),
281 value);
282 }
283 \f
284
285 /* Tell the target to follow the fork we're stopped at. Returns true
286 if the inferior should be resumed; false, if the target for some
287 reason decided it's best not to resume. */
288
289 static int
290 follow_fork (void)
291 {
292 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
293 int should_resume = 1;
294 struct thread_info *tp;
295
296 /* Copy user stepping state to the new inferior thread. FIXME: the
297 followed fork child thread should have a copy of most of the
298 parent thread structure's run control related fields, not just these.
299 Initialized to avoid "may be used uninitialized" warnings from gcc. */
300 struct breakpoint *step_resume_breakpoint = NULL;
301 CORE_ADDR step_range_start = 0;
302 CORE_ADDR step_range_end = 0;
303 struct frame_id step_frame_id = { 0 };
304
305 if (!non_stop)
306 {
307 ptid_t wait_ptid;
308 struct target_waitstatus wait_status;
309
310 /* Get the last target status returned by target_wait(). */
311 get_last_target_status (&wait_ptid, &wait_status);
312
313 /* If not stopped at a fork event, then there's nothing else to
314 do. */
315 if (wait_status.kind != TARGET_WAITKIND_FORKED
316 && wait_status.kind != TARGET_WAITKIND_VFORKED)
317 return 1;
318
319 /* Check if we switched over from WAIT_PTID, since the event was
320 reported. */
321 if (!ptid_equal (wait_ptid, minus_one_ptid)
322 && !ptid_equal (inferior_ptid, wait_ptid))
323 {
324 /* We did. Switch back to WAIT_PTID thread, to tell the
325 target to follow it (in either direction). We'll
326 afterwards refuse to resume, and inform the user what
327 happened. */
328 switch_to_thread (wait_ptid);
329 should_resume = 0;
330 }
331 }
332
333 tp = inferior_thread ();
334
335 /* If there were any forks/vforks that were caught and are now to be
336 followed, then do so now. */
337 switch (tp->pending_follow.kind)
338 {
339 case TARGET_WAITKIND_FORKED:
340 case TARGET_WAITKIND_VFORKED:
341 {
342 ptid_t parent, child;
343
344 /* If the user did a next/step, etc, over a fork call,
345 preserve the stepping state in the fork child. */
346 if (follow_child && should_resume)
347 {
348 step_resume_breakpoint
349 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
350 step_range_start = tp->step_range_start;
351 step_range_end = tp->step_range_end;
352 step_frame_id = tp->step_frame_id;
353
354 /* For now, delete the parent's sr breakpoint, otherwise,
355 parent/child sr breakpoints are considered duplicates,
356 and the child version will not be installed. Remove
357 this when the breakpoints module becomes aware of
358 inferiors and address spaces. */
359 delete_step_resume_breakpoint (tp);
360 tp->step_range_start = 0;
361 tp->step_range_end = 0;
362 tp->step_frame_id = null_frame_id;
363 }
364
365 parent = inferior_ptid;
366 child = tp->pending_follow.value.related_pid;
367
368 /* Tell the target to do whatever is necessary to follow
369 either parent or child. */
370 if (target_follow_fork (follow_child))
371 {
372 /* Target refused to follow, or there's some other reason
373 we shouldn't resume. */
374 should_resume = 0;
375 }
376 else
377 {
378 /* This pending follow fork event is now handled, one way
379 or another. The previous selected thread may be gone
380 from the lists by now, but if it is still around, need
381 to clear the pending follow request. */
382 tp = find_thread_ptid (parent);
383 if (tp)
384 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
385
386 /* This makes sure we don't try to apply the "Switched
387 over from WAIT_PID" logic above. */
388 nullify_last_target_wait_ptid ();
389
390 /* If we followed the child, switch to it... */
391 if (follow_child)
392 {
393 switch_to_thread (child);
394
395 /* ... and preserve the stepping state, in case the
396 user was stepping over the fork call. */
397 if (should_resume)
398 {
399 tp = inferior_thread ();
400 tp->step_resume_breakpoint = step_resume_breakpoint;
401 tp->step_range_start = step_range_start;
402 tp->step_range_end = step_range_end;
403 tp->step_frame_id = step_frame_id;
404 }
405 else
406 {
407 /* If we get here, it was because we're trying to
408 resume from a fork catchpoint, but, the user
409 has switched threads away from the thread that
410 forked. In that case, the resume command
411 issued is most likely not applicable to the
412 child, so just warn, and refuse to resume. */
413 warning (_("\
414 Not resuming: switched threads before following fork child.\n"));
415 }
416
417 /* Reset breakpoints in the child as appropriate. */
418 follow_inferior_reset_breakpoints ();
419 }
420 else
421 switch_to_thread (parent);
422 }
423 }
424 break;
425 case TARGET_WAITKIND_SPURIOUS:
426 /* Nothing to follow. */
427 break;
428 default:
429 internal_error (__FILE__, __LINE__,
430 "Unexpected pending_follow.kind %d\n",
431 tp->pending_follow.kind);
432 break;
433 }
434
435 return should_resume;
436 }
437
438 void
439 follow_inferior_reset_breakpoints (void)
440 {
441 struct thread_info *tp = inferior_thread ();
442
443 /* Was there a step_resume breakpoint? (There was if the user
444 did a "next" at the fork() call.) If so, explicitly reset its
445 thread number.
446
447 step_resumes are a form of bp that are made to be per-thread.
448 Since we created the step_resume bp when the parent process
449 was being debugged, and now are switching to the child process,
450 from the breakpoint package's viewpoint, that's a switch of
451 "threads". We must update the bp's notion of which thread
452 it is for, or it'll be ignored when it triggers. */
453
454 if (tp->step_resume_breakpoint)
455 breakpoint_re_set_thread (tp->step_resume_breakpoint);
456
457 /* Reinsert all breakpoints in the child. The user may have set
458 breakpoints after catching the fork, in which case those
459 were never set in the child, but only in the parent. This makes
460 sure the inserted breakpoints match the breakpoint list. */
461
462 breakpoint_re_set ();
463 insert_breakpoints ();
464 }
465
466 /* The child has exited or execed: resume threads of the parent the
467 user wanted to be executing. */
468
469 static int
470 proceed_after_vfork_done (struct thread_info *thread,
471 void *arg)
472 {
473 int pid = * (int *) arg;
474
475 if (ptid_get_pid (thread->ptid) == pid
476 && is_running (thread->ptid)
477 && !is_executing (thread->ptid)
478 && !thread->stop_requested
479 && thread->stop_signal == TARGET_SIGNAL_0)
480 {
481 if (debug_infrun)
482 fprintf_unfiltered (gdb_stdlog,
483 "infrun: resuming vfork parent thread %s\n",
484 target_pid_to_str (thread->ptid));
485
486 switch_to_thread (thread->ptid);
487 clear_proceed_status ();
488 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
489 }
490
491 return 0;
492 }
493
494 /* Called whenever we notice an exec or exit event, to handle
495 detaching or resuming a vfork parent. */
496
497 static void
498 handle_vfork_child_exec_or_exit (int exec)
499 {
500 struct inferior *inf = current_inferior ();
501
502 if (inf->vfork_parent)
503 {
504 int resume_parent = -1;
505
506 /* This exec or exit marks the end of the shared memory region
507 between the parent and the child. If the user wanted to
508 detach from the parent, now is the time. */
509
510 if (inf->vfork_parent->pending_detach)
511 {
512 struct thread_info *tp;
513 struct cleanup *old_chain;
514 struct program_space *pspace;
515 struct address_space *aspace;
516
517 /* follow-fork child, detach-on-fork on */
518
519 old_chain = make_cleanup_restore_current_thread ();
520
521 /* We're letting loose of the parent. */
522 tp = any_live_thread_of_process (inf->vfork_parent->pid);
523 switch_to_thread (tp->ptid);
524
525 /* We're about to detach from the parent, which implicitly
526 removes breakpoints from its address space. There's a
527 catch here: we want to reuse the spaces for the child,
528 but, parent/child are still sharing the pspace at this
529 point, although the exec in reality makes the kernel give
530 the child a fresh set of new pages. The problem here is
531 that the breakpoints module being unaware of this, would
532 likely chose the child process to write to the parent
533 address space. Swapping the child temporarily away from
534 the spaces has the desired effect. Yes, this is "sort
535 of" a hack. */
536
537 pspace = inf->pspace;
538 aspace = inf->aspace;
539 inf->aspace = NULL;
540 inf->pspace = NULL;
541
542 if (debug_infrun || info_verbose)
543 {
544 target_terminal_ours ();
545
546 if (exec)
547 fprintf_filtered (gdb_stdlog,
548 "Detaching vfork parent process %d after child exec.\n",
549 inf->vfork_parent->pid);
550 else
551 fprintf_filtered (gdb_stdlog,
552 "Detaching vfork parent process %d after child exit.\n",
553 inf->vfork_parent->pid);
554 }
555
556 target_detach (NULL, 0);
557
558 /* Put it back. */
559 inf->pspace = pspace;
560 inf->aspace = aspace;
561
562 do_cleanups (old_chain);
563 }
564 else if (exec)
565 {
566 /* We're staying attached to the parent, so, really give the
567 child a new address space. */
568 inf->pspace = add_program_space (maybe_new_address_space ());
569 inf->aspace = inf->pspace->aspace;
570 inf->removable = 1;
571 set_current_program_space (inf->pspace);
572
573 resume_parent = inf->vfork_parent->pid;
574
575 /* Break the bonds. */
576 inf->vfork_parent->vfork_child = NULL;
577 }
578 else
579 {
580 struct cleanup *old_chain;
581 struct program_space *pspace;
582
583 /* If this is a vfork child exiting, then the pspace and
584 aspaces were shared with the parent. Since we're
585 reporting the process exit, we'll be mourning all that is
586 found in the address space, and switching to null_ptid,
587 preparing to start a new inferior. But, since we don't
588 want to clobber the parent's address/program spaces, we
589 go ahead and create a new one for this exiting
590 inferior. */
591
592 /* Switch to null_ptid, so that clone_program_space doesn't want
593 to read the selected frame of a dead process. */
594 old_chain = save_inferior_ptid ();
595 inferior_ptid = null_ptid;
596
597 /* This inferior is dead, so avoid giving the breakpoints
598 module the option to write through to it (cloning a
599 program space resets breakpoints). */
600 inf->aspace = NULL;
601 inf->pspace = NULL;
602 pspace = add_program_space (maybe_new_address_space ());
603 set_current_program_space (pspace);
604 inf->removable = 1;
605 clone_program_space (pspace, inf->vfork_parent->pspace);
606 inf->pspace = pspace;
607 inf->aspace = pspace->aspace;
608
609 /* Put back inferior_ptid. We'll continue mourning this
610 inferior. */
611 do_cleanups (old_chain);
612
613 resume_parent = inf->vfork_parent->pid;
614 /* Break the bonds. */
615 inf->vfork_parent->vfork_child = NULL;
616 }
617
618 inf->vfork_parent = NULL;
619
620 gdb_assert (current_program_space == inf->pspace);
621
622 if (non_stop && resume_parent != -1)
623 {
624 /* If the user wanted the parent to be running, let it go
625 free now. */
626 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
627
628 if (debug_infrun)
629 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
630 resume_parent);
631
632 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
633
634 do_cleanups (old_chain);
635 }
636 }
637 }
638
639 /* Enum strings for "set|show displaced-stepping". */
640
641 static const char follow_exec_mode_new[] = "new";
642 static const char follow_exec_mode_same[] = "same";
643 static const char *follow_exec_mode_names[] =
644 {
645 follow_exec_mode_new,
646 follow_exec_mode_same,
647 NULL,
648 };
649
650 static const char *follow_exec_mode_string = follow_exec_mode_same;
651 static void
652 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
653 struct cmd_list_element *c, const char *value)
654 {
655 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
656 }
657
658 /* EXECD_PATHNAME is assumed to be non-NULL. */
659
660 static void
661 follow_exec (ptid_t pid, char *execd_pathname)
662 {
663 struct target_ops *tgt;
664 struct thread_info *th = inferior_thread ();
665 struct inferior *inf = current_inferior ();
666
667 /* This is an exec event that we actually wish to pay attention to.
668 Refresh our symbol table to the newly exec'd program, remove any
669 momentary bp's, etc.
670
671 If there are breakpoints, they aren't really inserted now,
672 since the exec() transformed our inferior into a fresh set
673 of instructions.
674
675 We want to preserve symbolic breakpoints on the list, since
676 we have hopes that they can be reset after the new a.out's
677 symbol table is read.
678
679 However, any "raw" breakpoints must be removed from the list
680 (e.g., the solib bp's), since their address is probably invalid
681 now.
682
683 And, we DON'T want to call delete_breakpoints() here, since
684 that may write the bp's "shadow contents" (the instruction
685 value that was overwritten witha TRAP instruction). Since
686 we now have a new a.out, those shadow contents aren't valid. */
687
688 mark_breakpoints_out ();
689
690 update_breakpoints_after_exec ();
691
692 /* If there was one, it's gone now. We cannot truly step-to-next
693 statement through an exec(). */
694 th->step_resume_breakpoint = NULL;
695 th->step_range_start = 0;
696 th->step_range_end = 0;
697
698 /* The target reports the exec event to the main thread, even if
699 some other thread does the exec, and even if the main thread was
700 already stopped --- if debugging in non-stop mode, it's possible
701 the user had the main thread held stopped in the previous image
702 --- release it now. This is the same behavior as step-over-exec
703 with scheduler-locking on in all-stop mode. */
704 th->stop_requested = 0;
705
706 /* What is this a.out's name? */
707 printf_unfiltered (_("%s is executing new program: %s\n"),
708 target_pid_to_str (inferior_ptid),
709 execd_pathname);
710
711 /* We've followed the inferior through an exec. Therefore, the
712 inferior has essentially been killed & reborn. */
713
714 gdb_flush (gdb_stdout);
715
716 breakpoint_init_inferior (inf_execd);
717
718 if (gdb_sysroot && *gdb_sysroot)
719 {
720 char *name = alloca (strlen (gdb_sysroot)
721 + strlen (execd_pathname)
722 + 1);
723 strcpy (name, gdb_sysroot);
724 strcat (name, execd_pathname);
725 execd_pathname = name;
726 }
727
728 /* Reset the shared library package. This ensures that we get a
729 shlib event when the child reaches "_start", at which point the
730 dld will have had a chance to initialize the child. */
731 /* Also, loading a symbol file below may trigger symbol lookups, and
732 we don't want those to be satisfied by the libraries of the
733 previous incarnation of this process. */
734 no_shared_libraries (NULL, 0);
735
736 if (follow_exec_mode_string == follow_exec_mode_new)
737 {
738 struct program_space *pspace;
739 struct inferior *new_inf;
740
741 /* The user wants to keep the old inferior and program spaces
742 around. Create a new fresh one, and switch to it. */
743
744 inf = add_inferior (current_inferior ()->pid);
745 pspace = add_program_space (maybe_new_address_space ());
746 inf->pspace = pspace;
747 inf->aspace = pspace->aspace;
748
749 exit_inferior_num_silent (current_inferior ()->num);
750
751 set_current_inferior (inf);
752 set_current_program_space (pspace);
753 }
754
755 gdb_assert (current_program_space == inf->pspace);
756
757 /* That a.out is now the one to use. */
758 exec_file_attach (execd_pathname, 0);
759
760 /* Load the main file's symbols. */
761 symbol_file_add_main (execd_pathname, 0);
762
763 #ifdef SOLIB_CREATE_INFERIOR_HOOK
764 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
765 #else
766 solib_create_inferior_hook (0);
767 #endif
768
769 jit_inferior_created_hook ();
770
771 /* Reinsert all breakpoints. (Those which were symbolic have
772 been reset to the proper address in the new a.out, thanks
773 to symbol_file_command...) */
774 insert_breakpoints ();
775
776 /* The next resume of this inferior should bring it to the shlib
777 startup breakpoints. (If the user had also set bp's on
778 "main" from the old (parent) process, then they'll auto-
779 matically get reset there in the new process.) */
780 }
781
782 /* Non-zero if we just simulating a single-step. This is needed
783 because we cannot remove the breakpoints in the inferior process
784 until after the `wait' in `wait_for_inferior'. */
785 static int singlestep_breakpoints_inserted_p = 0;
786
787 /* The thread we inserted single-step breakpoints for. */
788 static ptid_t singlestep_ptid;
789
790 /* PC when we started this single-step. */
791 static CORE_ADDR singlestep_pc;
792
793 /* If another thread hit the singlestep breakpoint, we save the original
794 thread here so that we can resume single-stepping it later. */
795 static ptid_t saved_singlestep_ptid;
796 static int stepping_past_singlestep_breakpoint;
797
798 /* If not equal to null_ptid, this means that after stepping over breakpoint
799 is finished, we need to switch to deferred_step_ptid, and step it.
800
801 The use case is when one thread has hit a breakpoint, and then the user
802 has switched to another thread and issued 'step'. We need to step over
803 breakpoint in the thread which hit the breakpoint, but then continue
804 stepping the thread user has selected. */
805 static ptid_t deferred_step_ptid;
806 \f
807 /* Displaced stepping. */
808
809 /* In non-stop debugging mode, we must take special care to manage
810 breakpoints properly; in particular, the traditional strategy for
811 stepping a thread past a breakpoint it has hit is unsuitable.
812 'Displaced stepping' is a tactic for stepping one thread past a
813 breakpoint it has hit while ensuring that other threads running
814 concurrently will hit the breakpoint as they should.
815
816 The traditional way to step a thread T off a breakpoint in a
817 multi-threaded program in all-stop mode is as follows:
818
819 a0) Initially, all threads are stopped, and breakpoints are not
820 inserted.
821 a1) We single-step T, leaving breakpoints uninserted.
822 a2) We insert breakpoints, and resume all threads.
823
824 In non-stop debugging, however, this strategy is unsuitable: we
825 don't want to have to stop all threads in the system in order to
826 continue or step T past a breakpoint. Instead, we use displaced
827 stepping:
828
829 n0) Initially, T is stopped, other threads are running, and
830 breakpoints are inserted.
831 n1) We copy the instruction "under" the breakpoint to a separate
832 location, outside the main code stream, making any adjustments
833 to the instruction, register, and memory state as directed by
834 T's architecture.
835 n2) We single-step T over the instruction at its new location.
836 n3) We adjust the resulting register and memory state as directed
837 by T's architecture. This includes resetting T's PC to point
838 back into the main instruction stream.
839 n4) We resume T.
840
841 This approach depends on the following gdbarch methods:
842
843 - gdbarch_max_insn_length and gdbarch_displaced_step_location
844 indicate where to copy the instruction, and how much space must
845 be reserved there. We use these in step n1.
846
847 - gdbarch_displaced_step_copy_insn copies a instruction to a new
848 address, and makes any necessary adjustments to the instruction,
849 register contents, and memory. We use this in step n1.
850
851 - gdbarch_displaced_step_fixup adjusts registers and memory after
852 we have successfuly single-stepped the instruction, to yield the
853 same effect the instruction would have had if we had executed it
854 at its original address. We use this in step n3.
855
856 - gdbarch_displaced_step_free_closure provides cleanup.
857
858 The gdbarch_displaced_step_copy_insn and
859 gdbarch_displaced_step_fixup functions must be written so that
860 copying an instruction with gdbarch_displaced_step_copy_insn,
861 single-stepping across the copied instruction, and then applying
862 gdbarch_displaced_insn_fixup should have the same effects on the
863 thread's memory and registers as stepping the instruction in place
864 would have. Exactly which responsibilities fall to the copy and
865 which fall to the fixup is up to the author of those functions.
866
867 See the comments in gdbarch.sh for details.
868
869 Note that displaced stepping and software single-step cannot
870 currently be used in combination, although with some care I think
871 they could be made to. Software single-step works by placing
872 breakpoints on all possible subsequent instructions; if the
873 displaced instruction is a PC-relative jump, those breakpoints
874 could fall in very strange places --- on pages that aren't
875 executable, or at addresses that are not proper instruction
876 boundaries. (We do generally let other threads run while we wait
877 to hit the software single-step breakpoint, and they might
878 encounter such a corrupted instruction.) One way to work around
879 this would be to have gdbarch_displaced_step_copy_insn fully
880 simulate the effect of PC-relative instructions (and return NULL)
881 on architectures that use software single-stepping.
882
883 In non-stop mode, we can have independent and simultaneous step
884 requests, so more than one thread may need to simultaneously step
885 over a breakpoint. The current implementation assumes there is
886 only one scratch space per process. In this case, we have to
887 serialize access to the scratch space. If thread A wants to step
888 over a breakpoint, but we are currently waiting for some other
889 thread to complete a displaced step, we leave thread A stopped and
890 place it in the displaced_step_request_queue. Whenever a displaced
891 step finishes, we pick the next thread in the queue and start a new
892 displaced step operation on it. See displaced_step_prepare and
893 displaced_step_fixup for details. */
894
895 struct displaced_step_request
896 {
897 ptid_t ptid;
898 struct displaced_step_request *next;
899 };
900
901 /* Per-inferior displaced stepping state. */
902 struct displaced_step_inferior_state
903 {
904 /* Pointer to next in linked list. */
905 struct displaced_step_inferior_state *next;
906
907 /* The process this displaced step state refers to. */
908 int pid;
909
910 /* A queue of pending displaced stepping requests. One entry per
911 thread that needs to do a displaced step. */
912 struct displaced_step_request *step_request_queue;
913
914 /* If this is not null_ptid, this is the thread carrying out a
915 displaced single-step in process PID. This thread's state will
916 require fixing up once it has completed its step. */
917 ptid_t step_ptid;
918
919 /* The architecture the thread had when we stepped it. */
920 struct gdbarch *step_gdbarch;
921
922 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
923 for post-step cleanup. */
924 struct displaced_step_closure *step_closure;
925
926 /* The address of the original instruction, and the copy we
927 made. */
928 CORE_ADDR step_original, step_copy;
929
930 /* Saved contents of copy area. */
931 gdb_byte *step_saved_copy;
932 };
933
934 /* The list of states of processes involved in displaced stepping
935 presently. */
936 static struct displaced_step_inferior_state *displaced_step_inferior_states;
937
938 /* Get the displaced stepping state of process PID. */
939
940 static struct displaced_step_inferior_state *
941 get_displaced_stepping_state (int pid)
942 {
943 struct displaced_step_inferior_state *state;
944
945 for (state = displaced_step_inferior_states;
946 state != NULL;
947 state = state->next)
948 if (state->pid == pid)
949 return state;
950
951 return NULL;
952 }
953
954 /* Add a new displaced stepping state for process PID to the displaced
955 stepping state list, or return a pointer to an already existing
956 entry, if it already exists. Never returns NULL. */
957
958 static struct displaced_step_inferior_state *
959 add_displaced_stepping_state (int pid)
960 {
961 struct displaced_step_inferior_state *state;
962
963 for (state = displaced_step_inferior_states;
964 state != NULL;
965 state = state->next)
966 if (state->pid == pid)
967 return state;
968
969 state = xcalloc (1, sizeof (*state));
970 state->pid = pid;
971 state->next = displaced_step_inferior_states;
972 displaced_step_inferior_states = state;
973
974 return state;
975 }
976
977 /* Remove the displaced stepping state of process PID. */
978
979 static void
980 remove_displaced_stepping_state (int pid)
981 {
982 struct displaced_step_inferior_state *it, **prev_next_p;
983
984 gdb_assert (pid != 0);
985
986 it = displaced_step_inferior_states;
987 prev_next_p = &displaced_step_inferior_states;
988 while (it)
989 {
990 if (it->pid == pid)
991 {
992 *prev_next_p = it->next;
993 xfree (it);
994 return;
995 }
996
997 prev_next_p = &it->next;
998 it = *prev_next_p;
999 }
1000 }
1001
1002 static void
1003 infrun_inferior_exit (struct inferior *inf)
1004 {
1005 remove_displaced_stepping_state (inf->pid);
1006 }
1007
1008 /* Enum strings for "set|show displaced-stepping". */
1009
1010 static const char can_use_displaced_stepping_auto[] = "auto";
1011 static const char can_use_displaced_stepping_on[] = "on";
1012 static const char can_use_displaced_stepping_off[] = "off";
1013 static const char *can_use_displaced_stepping_enum[] =
1014 {
1015 can_use_displaced_stepping_auto,
1016 can_use_displaced_stepping_on,
1017 can_use_displaced_stepping_off,
1018 NULL,
1019 };
1020
1021 /* If ON, and the architecture supports it, GDB will use displaced
1022 stepping to step over breakpoints. If OFF, or if the architecture
1023 doesn't support it, GDB will instead use the traditional
1024 hold-and-step approach. If AUTO (which is the default), GDB will
1025 decide which technique to use to step over breakpoints depending on
1026 which of all-stop or non-stop mode is active --- displaced stepping
1027 in non-stop mode; hold-and-step in all-stop mode. */
1028
1029 static const char *can_use_displaced_stepping =
1030 can_use_displaced_stepping_auto;
1031
1032 static void
1033 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1034 struct cmd_list_element *c,
1035 const char *value)
1036 {
1037 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1038 fprintf_filtered (file, _("\
1039 Debugger's willingness to use displaced stepping to step over \
1040 breakpoints is %s (currently %s).\n"),
1041 value, non_stop ? "on" : "off");
1042 else
1043 fprintf_filtered (file, _("\
1044 Debugger's willingness to use displaced stepping to step over \
1045 breakpoints is %s.\n"), value);
1046 }
1047
1048 /* Return non-zero if displaced stepping can/should be used to step
1049 over breakpoints. */
1050
1051 static int
1052 use_displaced_stepping (struct gdbarch *gdbarch)
1053 {
1054 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1055 && non_stop)
1056 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1057 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1058 && !RECORD_IS_USED);
1059 }
1060
1061 /* Clean out any stray displaced stepping state. */
1062 static void
1063 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1064 {
1065 /* Indicate that there is no cleanup pending. */
1066 displaced->step_ptid = null_ptid;
1067
1068 if (displaced->step_closure)
1069 {
1070 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1071 displaced->step_closure);
1072 displaced->step_closure = NULL;
1073 }
1074 }
1075
1076 static void
1077 displaced_step_clear_cleanup (void *arg)
1078 {
1079 struct displaced_step_inferior_state *state = arg;
1080
1081 displaced_step_clear (state);
1082 }
1083
1084 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1085 void
1086 displaced_step_dump_bytes (struct ui_file *file,
1087 const gdb_byte *buf,
1088 size_t len)
1089 {
1090 int i;
1091
1092 for (i = 0; i < len; i++)
1093 fprintf_unfiltered (file, "%02x ", buf[i]);
1094 fputs_unfiltered ("\n", file);
1095 }
1096
1097 /* Prepare to single-step, using displaced stepping.
1098
1099 Note that we cannot use displaced stepping when we have a signal to
1100 deliver. If we have a signal to deliver and an instruction to step
1101 over, then after the step, there will be no indication from the
1102 target whether the thread entered a signal handler or ignored the
1103 signal and stepped over the instruction successfully --- both cases
1104 result in a simple SIGTRAP. In the first case we mustn't do a
1105 fixup, and in the second case we must --- but we can't tell which.
1106 Comments in the code for 'random signals' in handle_inferior_event
1107 explain how we handle this case instead.
1108
1109 Returns 1 if preparing was successful -- this thread is going to be
1110 stepped now; or 0 if displaced stepping this thread got queued. */
1111 static int
1112 displaced_step_prepare (ptid_t ptid)
1113 {
1114 struct cleanup *old_cleanups, *ignore_cleanups;
1115 struct regcache *regcache = get_thread_regcache (ptid);
1116 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1117 CORE_ADDR original, copy;
1118 ULONGEST len;
1119 struct displaced_step_closure *closure;
1120 struct displaced_step_inferior_state *displaced;
1121
1122 /* We should never reach this function if the architecture does not
1123 support displaced stepping. */
1124 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1125
1126 /* We have to displaced step one thread at a time, as we only have
1127 access to a single scratch space per inferior. */
1128
1129 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1130
1131 if (!ptid_equal (displaced->step_ptid, null_ptid))
1132 {
1133 /* Already waiting for a displaced step to finish. Defer this
1134 request and place in queue. */
1135 struct displaced_step_request *req, *new_req;
1136
1137 if (debug_displaced)
1138 fprintf_unfiltered (gdb_stdlog,
1139 "displaced: defering step of %s\n",
1140 target_pid_to_str (ptid));
1141
1142 new_req = xmalloc (sizeof (*new_req));
1143 new_req->ptid = ptid;
1144 new_req->next = NULL;
1145
1146 if (displaced->step_request_queue)
1147 {
1148 for (req = displaced->step_request_queue;
1149 req && req->next;
1150 req = req->next)
1151 ;
1152 req->next = new_req;
1153 }
1154 else
1155 displaced->step_request_queue = new_req;
1156
1157 return 0;
1158 }
1159 else
1160 {
1161 if (debug_displaced)
1162 fprintf_unfiltered (gdb_stdlog,
1163 "displaced: stepping %s now\n",
1164 target_pid_to_str (ptid));
1165 }
1166
1167 displaced_step_clear (displaced);
1168
1169 old_cleanups = save_inferior_ptid ();
1170 inferior_ptid = ptid;
1171
1172 original = regcache_read_pc (regcache);
1173
1174 copy = gdbarch_displaced_step_location (gdbarch);
1175 len = gdbarch_max_insn_length (gdbarch);
1176
1177 /* Save the original contents of the copy area. */
1178 displaced->step_saved_copy = xmalloc (len);
1179 ignore_cleanups = make_cleanup (free_current_contents,
1180 &displaced->step_saved_copy);
1181 read_memory (copy, displaced->step_saved_copy, len);
1182 if (debug_displaced)
1183 {
1184 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1185 paddress (gdbarch, copy));
1186 displaced_step_dump_bytes (gdb_stdlog,
1187 displaced->step_saved_copy,
1188 len);
1189 };
1190
1191 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1192 original, copy, regcache);
1193
1194 /* We don't support the fully-simulated case at present. */
1195 gdb_assert (closure);
1196
1197 /* Save the information we need to fix things up if the step
1198 succeeds. */
1199 displaced->step_ptid = ptid;
1200 displaced->step_gdbarch = gdbarch;
1201 displaced->step_closure = closure;
1202 displaced->step_original = original;
1203 displaced->step_copy = copy;
1204
1205 make_cleanup (displaced_step_clear_cleanup, displaced);
1206
1207 /* Resume execution at the copy. */
1208 regcache_write_pc (regcache, copy);
1209
1210 discard_cleanups (ignore_cleanups);
1211
1212 do_cleanups (old_cleanups);
1213
1214 if (debug_displaced)
1215 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1216 paddress (gdbarch, copy));
1217
1218 return 1;
1219 }
1220
1221 static void
1222 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1223 {
1224 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1225 inferior_ptid = ptid;
1226 write_memory (memaddr, myaddr, len);
1227 do_cleanups (ptid_cleanup);
1228 }
1229
1230 static void
1231 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1232 {
1233 struct cleanup *old_cleanups;
1234 struct displaced_step_inferior_state *displaced
1235 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1236
1237 /* Was any thread of this process doing a displaced step? */
1238 if (displaced == NULL)
1239 return;
1240
1241 /* Was this event for the pid we displaced? */
1242 if (ptid_equal (displaced->step_ptid, null_ptid)
1243 || ! ptid_equal (displaced->step_ptid, event_ptid))
1244 return;
1245
1246 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1247
1248 /* Restore the contents of the copy area. */
1249 {
1250 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1251 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1252 displaced->step_saved_copy, len);
1253 if (debug_displaced)
1254 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1255 paddress (displaced->step_gdbarch,
1256 displaced->step_copy));
1257 }
1258
1259 /* Did the instruction complete successfully? */
1260 if (signal == TARGET_SIGNAL_TRAP)
1261 {
1262 /* Fix up the resulting state. */
1263 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1264 displaced->step_closure,
1265 displaced->step_original,
1266 displaced->step_copy,
1267 get_thread_regcache (displaced->step_ptid));
1268 }
1269 else
1270 {
1271 /* Since the instruction didn't complete, all we can do is
1272 relocate the PC. */
1273 struct regcache *regcache = get_thread_regcache (event_ptid);
1274 CORE_ADDR pc = regcache_read_pc (regcache);
1275 pc = displaced->step_original + (pc - displaced->step_copy);
1276 regcache_write_pc (regcache, pc);
1277 }
1278
1279 do_cleanups (old_cleanups);
1280
1281 displaced->step_ptid = null_ptid;
1282
1283 /* Are there any pending displaced stepping requests? If so, run
1284 one now. Leave the state object around, since we're likely to
1285 need it again soon. */
1286 while (displaced->step_request_queue)
1287 {
1288 struct displaced_step_request *head;
1289 ptid_t ptid;
1290 struct regcache *regcache;
1291 struct gdbarch *gdbarch;
1292 CORE_ADDR actual_pc;
1293 struct address_space *aspace;
1294
1295 head = displaced->step_request_queue;
1296 ptid = head->ptid;
1297 displaced->step_request_queue = head->next;
1298 xfree (head);
1299
1300 context_switch (ptid);
1301
1302 regcache = get_thread_regcache (ptid);
1303 actual_pc = regcache_read_pc (regcache);
1304 aspace = get_regcache_aspace (regcache);
1305
1306 if (breakpoint_here_p (aspace, actual_pc))
1307 {
1308 if (debug_displaced)
1309 fprintf_unfiltered (gdb_stdlog,
1310 "displaced: stepping queued %s now\n",
1311 target_pid_to_str (ptid));
1312
1313 displaced_step_prepare (ptid);
1314
1315 gdbarch = get_regcache_arch (regcache);
1316
1317 if (debug_displaced)
1318 {
1319 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1320 gdb_byte buf[4];
1321
1322 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1323 paddress (gdbarch, actual_pc));
1324 read_memory (actual_pc, buf, sizeof (buf));
1325 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1326 }
1327
1328 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1329 displaced->step_closure))
1330 target_resume (ptid, 1, TARGET_SIGNAL_0);
1331 else
1332 target_resume (ptid, 0, TARGET_SIGNAL_0);
1333
1334 /* Done, we're stepping a thread. */
1335 break;
1336 }
1337 else
1338 {
1339 int step;
1340 struct thread_info *tp = inferior_thread ();
1341
1342 /* The breakpoint we were sitting under has since been
1343 removed. */
1344 tp->trap_expected = 0;
1345
1346 /* Go back to what we were trying to do. */
1347 step = currently_stepping (tp);
1348
1349 if (debug_displaced)
1350 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1351 target_pid_to_str (tp->ptid), step);
1352
1353 target_resume (ptid, step, TARGET_SIGNAL_0);
1354 tp->stop_signal = TARGET_SIGNAL_0;
1355
1356 /* This request was discarded. See if there's any other
1357 thread waiting for its turn. */
1358 }
1359 }
1360 }
1361
1362 /* Update global variables holding ptids to hold NEW_PTID if they were
1363 holding OLD_PTID. */
1364 static void
1365 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1366 {
1367 struct displaced_step_request *it;
1368 struct displaced_step_inferior_state *displaced;
1369
1370 if (ptid_equal (inferior_ptid, old_ptid))
1371 inferior_ptid = new_ptid;
1372
1373 if (ptid_equal (singlestep_ptid, old_ptid))
1374 singlestep_ptid = new_ptid;
1375
1376 if (ptid_equal (deferred_step_ptid, old_ptid))
1377 deferred_step_ptid = new_ptid;
1378
1379 for (displaced = displaced_step_inferior_states;
1380 displaced;
1381 displaced = displaced->next)
1382 {
1383 if (ptid_equal (displaced->step_ptid, old_ptid))
1384 displaced->step_ptid = new_ptid;
1385
1386 for (it = displaced->step_request_queue; it; it = it->next)
1387 if (ptid_equal (it->ptid, old_ptid))
1388 it->ptid = new_ptid;
1389 }
1390 }
1391
1392 \f
1393 /* Resuming. */
1394
1395 /* Things to clean up if we QUIT out of resume (). */
1396 static void
1397 resume_cleanups (void *ignore)
1398 {
1399 normal_stop ();
1400 }
1401
1402 static const char schedlock_off[] = "off";
1403 static const char schedlock_on[] = "on";
1404 static const char schedlock_step[] = "step";
1405 static const char *scheduler_enums[] = {
1406 schedlock_off,
1407 schedlock_on,
1408 schedlock_step,
1409 NULL
1410 };
1411 static const char *scheduler_mode = schedlock_off;
1412 static void
1413 show_scheduler_mode (struct ui_file *file, int from_tty,
1414 struct cmd_list_element *c, const char *value)
1415 {
1416 fprintf_filtered (file, _("\
1417 Mode for locking scheduler during execution is \"%s\".\n"),
1418 value);
1419 }
1420
1421 static void
1422 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1423 {
1424 if (!target_can_lock_scheduler)
1425 {
1426 scheduler_mode = schedlock_off;
1427 error (_("Target '%s' cannot support this command."), target_shortname);
1428 }
1429 }
1430
1431 /* True if execution commands resume all threads of all processes by
1432 default; otherwise, resume only threads of the current inferior
1433 process. */
1434 int sched_multi = 0;
1435
1436 /* Try to setup for software single stepping over the specified location.
1437 Return 1 if target_resume() should use hardware single step.
1438
1439 GDBARCH the current gdbarch.
1440 PC the location to step over. */
1441
1442 static int
1443 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1444 {
1445 int hw_step = 1;
1446
1447 if (gdbarch_software_single_step_p (gdbarch)
1448 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1449 {
1450 hw_step = 0;
1451 /* Do not pull these breakpoints until after a `wait' in
1452 `wait_for_inferior' */
1453 singlestep_breakpoints_inserted_p = 1;
1454 singlestep_ptid = inferior_ptid;
1455 singlestep_pc = pc;
1456 }
1457 return hw_step;
1458 }
1459
1460 /* Resume the inferior, but allow a QUIT. This is useful if the user
1461 wants to interrupt some lengthy single-stepping operation
1462 (for child processes, the SIGINT goes to the inferior, and so
1463 we get a SIGINT random_signal, but for remote debugging and perhaps
1464 other targets, that's not true).
1465
1466 STEP nonzero if we should step (zero to continue instead).
1467 SIG is the signal to give the inferior (zero for none). */
1468 void
1469 resume (int step, enum target_signal sig)
1470 {
1471 int should_resume = 1;
1472 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1473 struct regcache *regcache = get_current_regcache ();
1474 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1475 struct thread_info *tp = inferior_thread ();
1476 CORE_ADDR pc = regcache_read_pc (regcache);
1477 struct address_space *aspace = get_regcache_aspace (regcache);
1478
1479 QUIT;
1480
1481 if (debug_infrun)
1482 fprintf_unfiltered (gdb_stdlog,
1483 "infrun: resume (step=%d, signal=%d), "
1484 "trap_expected=%d\n",
1485 step, sig, tp->trap_expected);
1486
1487 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1488 over an instruction that causes a page fault without triggering
1489 a hardware watchpoint. The kernel properly notices that it shouldn't
1490 stop, because the hardware watchpoint is not triggered, but it forgets
1491 the step request and continues the program normally.
1492 Work around the problem by removing hardware watchpoints if a step is
1493 requested, GDB will check for a hardware watchpoint trigger after the
1494 step anyway. */
1495 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1496 remove_hw_watchpoints ();
1497
1498
1499 /* Normally, by the time we reach `resume', the breakpoints are either
1500 removed or inserted, as appropriate. The exception is if we're sitting
1501 at a permanent breakpoint; we need to step over it, but permanent
1502 breakpoints can't be removed. So we have to test for it here. */
1503 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1504 {
1505 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1506 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1507 else
1508 error (_("\
1509 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1510 how to step past a permanent breakpoint on this architecture. Try using\n\
1511 a command like `return' or `jump' to continue execution."));
1512 }
1513
1514 /* If enabled, step over breakpoints by executing a copy of the
1515 instruction at a different address.
1516
1517 We can't use displaced stepping when we have a signal to deliver;
1518 the comments for displaced_step_prepare explain why. The
1519 comments in the handle_inferior event for dealing with 'random
1520 signals' explain what we do instead. */
1521 if (use_displaced_stepping (gdbarch)
1522 && (tp->trap_expected
1523 || (step && gdbarch_software_single_step_p (gdbarch)))
1524 && sig == TARGET_SIGNAL_0)
1525 {
1526 struct displaced_step_inferior_state *displaced;
1527
1528 if (!displaced_step_prepare (inferior_ptid))
1529 {
1530 /* Got placed in displaced stepping queue. Will be resumed
1531 later when all the currently queued displaced stepping
1532 requests finish. The thread is not executing at this point,
1533 and the call to set_executing will be made later. But we
1534 need to call set_running here, since from frontend point of view,
1535 the thread is running. */
1536 set_running (inferior_ptid, 1);
1537 discard_cleanups (old_cleanups);
1538 return;
1539 }
1540
1541 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1542 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1543 displaced->step_closure);
1544 }
1545
1546 /* Do we need to do it the hard way, w/temp breakpoints? */
1547 else if (step)
1548 step = maybe_software_singlestep (gdbarch, pc);
1549
1550 if (should_resume)
1551 {
1552 ptid_t resume_ptid;
1553
1554 /* If STEP is set, it's a request to use hardware stepping
1555 facilities. But in that case, we should never
1556 use singlestep breakpoint. */
1557 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1558
1559 /* Decide the set of threads to ask the target to resume. Start
1560 by assuming everything will be resumed, than narrow the set
1561 by applying increasingly restricting conditions. */
1562
1563 /* By default, resume all threads of all processes. */
1564 resume_ptid = RESUME_ALL;
1565
1566 /* Maybe resume only all threads of the current process. */
1567 if (!sched_multi && target_supports_multi_process ())
1568 {
1569 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1570 }
1571
1572 /* Maybe resume a single thread after all. */
1573 if (singlestep_breakpoints_inserted_p
1574 && stepping_past_singlestep_breakpoint)
1575 {
1576 /* The situation here is as follows. In thread T1 we wanted to
1577 single-step. Lacking hardware single-stepping we've
1578 set breakpoint at the PC of the next instruction -- call it
1579 P. After resuming, we've hit that breakpoint in thread T2.
1580 Now we've removed original breakpoint, inserted breakpoint
1581 at P+1, and try to step to advance T2 past breakpoint.
1582 We need to step only T2, as if T1 is allowed to freely run,
1583 it can run past P, and if other threads are allowed to run,
1584 they can hit breakpoint at P+1, and nested hits of single-step
1585 breakpoints is not something we'd want -- that's complicated
1586 to support, and has no value. */
1587 resume_ptid = inferior_ptid;
1588 }
1589 else if ((step || singlestep_breakpoints_inserted_p)
1590 && tp->trap_expected)
1591 {
1592 /* We're allowing a thread to run past a breakpoint it has
1593 hit, by single-stepping the thread with the breakpoint
1594 removed. In which case, we need to single-step only this
1595 thread, and keep others stopped, as they can miss this
1596 breakpoint if allowed to run.
1597
1598 The current code actually removes all breakpoints when
1599 doing this, not just the one being stepped over, so if we
1600 let other threads run, we can actually miss any
1601 breakpoint, not just the one at PC. */
1602 resume_ptid = inferior_ptid;
1603 }
1604 else if (non_stop)
1605 {
1606 /* With non-stop mode on, threads are always handled
1607 individually. */
1608 resume_ptid = inferior_ptid;
1609 }
1610 else if ((scheduler_mode == schedlock_on)
1611 || (scheduler_mode == schedlock_step
1612 && (step || singlestep_breakpoints_inserted_p)))
1613 {
1614 /* User-settable 'scheduler' mode requires solo thread resume. */
1615 resume_ptid = inferior_ptid;
1616 }
1617
1618 if (gdbarch_cannot_step_breakpoint (gdbarch))
1619 {
1620 /* Most targets can step a breakpoint instruction, thus
1621 executing it normally. But if this one cannot, just
1622 continue and we will hit it anyway. */
1623 if (step && breakpoint_inserted_here_p (aspace, pc))
1624 step = 0;
1625 }
1626
1627 if (debug_displaced
1628 && use_displaced_stepping (gdbarch)
1629 && tp->trap_expected)
1630 {
1631 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1632 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1633 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1634 gdb_byte buf[4];
1635
1636 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1637 paddress (resume_gdbarch, actual_pc));
1638 read_memory (actual_pc, buf, sizeof (buf));
1639 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1640 }
1641
1642 /* Install inferior's terminal modes. */
1643 target_terminal_inferior ();
1644
1645 /* Avoid confusing the next resume, if the next stop/resume
1646 happens to apply to another thread. */
1647 tp->stop_signal = TARGET_SIGNAL_0;
1648
1649 target_resume (resume_ptid, step, sig);
1650 }
1651
1652 discard_cleanups (old_cleanups);
1653 }
1654 \f
1655 /* Proceeding. */
1656
1657 /* Clear out all variables saying what to do when inferior is continued.
1658 First do this, then set the ones you want, then call `proceed'. */
1659
1660 static void
1661 clear_proceed_status_thread (struct thread_info *tp)
1662 {
1663 if (debug_infrun)
1664 fprintf_unfiltered (gdb_stdlog,
1665 "infrun: clear_proceed_status_thread (%s)\n",
1666 target_pid_to_str (tp->ptid));
1667
1668 tp->trap_expected = 0;
1669 tp->step_range_start = 0;
1670 tp->step_range_end = 0;
1671 tp->step_frame_id = null_frame_id;
1672 tp->step_stack_frame_id = null_frame_id;
1673 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1674 tp->stop_requested = 0;
1675
1676 tp->stop_step = 0;
1677
1678 tp->proceed_to_finish = 0;
1679
1680 /* Discard any remaining commands or status from previous stop. */
1681 bpstat_clear (&tp->stop_bpstat);
1682 }
1683
1684 static int
1685 clear_proceed_status_callback (struct thread_info *tp, void *data)
1686 {
1687 if (is_exited (tp->ptid))
1688 return 0;
1689
1690 clear_proceed_status_thread (tp);
1691 return 0;
1692 }
1693
1694 void
1695 clear_proceed_status (void)
1696 {
1697 if (!non_stop)
1698 {
1699 /* In all-stop mode, delete the per-thread status of all
1700 threads, even if inferior_ptid is null_ptid, there may be
1701 threads on the list. E.g., we may be launching a new
1702 process, while selecting the executable. */
1703 iterate_over_threads (clear_proceed_status_callback, NULL);
1704 }
1705
1706 if (!ptid_equal (inferior_ptid, null_ptid))
1707 {
1708 struct inferior *inferior;
1709
1710 if (non_stop)
1711 {
1712 /* If in non-stop mode, only delete the per-thread status of
1713 the current thread. */
1714 clear_proceed_status_thread (inferior_thread ());
1715 }
1716
1717 inferior = current_inferior ();
1718 inferior->stop_soon = NO_STOP_QUIETLY;
1719 }
1720
1721 stop_after_trap = 0;
1722
1723 observer_notify_about_to_proceed ();
1724
1725 if (stop_registers)
1726 {
1727 regcache_xfree (stop_registers);
1728 stop_registers = NULL;
1729 }
1730 }
1731
1732 /* Check the current thread against the thread that reported the most recent
1733 event. If a step-over is required return TRUE and set the current thread
1734 to the old thread. Otherwise return FALSE.
1735
1736 This should be suitable for any targets that support threads. */
1737
1738 static int
1739 prepare_to_proceed (int step)
1740 {
1741 ptid_t wait_ptid;
1742 struct target_waitstatus wait_status;
1743 int schedlock_enabled;
1744
1745 /* With non-stop mode on, threads are always handled individually. */
1746 gdb_assert (! non_stop);
1747
1748 /* Get the last target status returned by target_wait(). */
1749 get_last_target_status (&wait_ptid, &wait_status);
1750
1751 /* Make sure we were stopped at a breakpoint. */
1752 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1753 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1754 && wait_status.value.sig != TARGET_SIGNAL_ILL
1755 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1756 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1757 {
1758 return 0;
1759 }
1760
1761 schedlock_enabled = (scheduler_mode == schedlock_on
1762 || (scheduler_mode == schedlock_step
1763 && step));
1764
1765 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1766 if (schedlock_enabled)
1767 return 0;
1768
1769 /* Don't switch over if we're about to resume some other process
1770 other than WAIT_PTID's, and schedule-multiple is off. */
1771 if (!sched_multi
1772 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1773 return 0;
1774
1775 /* Switched over from WAIT_PID. */
1776 if (!ptid_equal (wait_ptid, minus_one_ptid)
1777 && !ptid_equal (inferior_ptid, wait_ptid))
1778 {
1779 struct regcache *regcache = get_thread_regcache (wait_ptid);
1780
1781 if (breakpoint_here_p (get_regcache_aspace (regcache),
1782 regcache_read_pc (regcache)))
1783 {
1784 /* If stepping, remember current thread to switch back to. */
1785 if (step)
1786 deferred_step_ptid = inferior_ptid;
1787
1788 /* Switch back to WAIT_PID thread. */
1789 switch_to_thread (wait_ptid);
1790
1791 /* We return 1 to indicate that there is a breakpoint here,
1792 so we need to step over it before continuing to avoid
1793 hitting it straight away. */
1794 return 1;
1795 }
1796 }
1797
1798 return 0;
1799 }
1800
1801 /* Basic routine for continuing the program in various fashions.
1802
1803 ADDR is the address to resume at, or -1 for resume where stopped.
1804 SIGGNAL is the signal to give it, or 0 for none,
1805 or -1 for act according to how it stopped.
1806 STEP is nonzero if should trap after one instruction.
1807 -1 means return after that and print nothing.
1808 You should probably set various step_... variables
1809 before calling here, if you are stepping.
1810
1811 You should call clear_proceed_status before calling proceed. */
1812
1813 void
1814 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1815 {
1816 struct regcache *regcache;
1817 struct gdbarch *gdbarch;
1818 struct thread_info *tp;
1819 CORE_ADDR pc;
1820 struct address_space *aspace;
1821 int oneproc = 0;
1822
1823 /* If we're stopped at a fork/vfork, follow the branch set by the
1824 "set follow-fork-mode" command; otherwise, we'll just proceed
1825 resuming the current thread. */
1826 if (!follow_fork ())
1827 {
1828 /* The target for some reason decided not to resume. */
1829 normal_stop ();
1830 return;
1831 }
1832
1833 regcache = get_current_regcache ();
1834 gdbarch = get_regcache_arch (regcache);
1835 aspace = get_regcache_aspace (regcache);
1836 pc = regcache_read_pc (regcache);
1837
1838 if (step > 0)
1839 step_start_function = find_pc_function (pc);
1840 if (step < 0)
1841 stop_after_trap = 1;
1842
1843 if (addr == (CORE_ADDR) -1)
1844 {
1845 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1846 && execution_direction != EXEC_REVERSE)
1847 /* There is a breakpoint at the address we will resume at,
1848 step one instruction before inserting breakpoints so that
1849 we do not stop right away (and report a second hit at this
1850 breakpoint).
1851
1852 Note, we don't do this in reverse, because we won't
1853 actually be executing the breakpoint insn anyway.
1854 We'll be (un-)executing the previous instruction. */
1855
1856 oneproc = 1;
1857 else if (gdbarch_single_step_through_delay_p (gdbarch)
1858 && gdbarch_single_step_through_delay (gdbarch,
1859 get_current_frame ()))
1860 /* We stepped onto an instruction that needs to be stepped
1861 again before re-inserting the breakpoint, do so. */
1862 oneproc = 1;
1863 }
1864 else
1865 {
1866 regcache_write_pc (regcache, addr);
1867 }
1868
1869 if (debug_infrun)
1870 fprintf_unfiltered (gdb_stdlog,
1871 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1872 paddress (gdbarch, addr), siggnal, step);
1873
1874 /* We're handling a live event, so make sure we're doing live
1875 debugging. If we're looking at traceframes while the target is
1876 running, we're going to need to get back to that mode after
1877 handling the event. */
1878 if (non_stop)
1879 {
1880 make_cleanup_restore_current_traceframe ();
1881 set_traceframe_number (-1);
1882 }
1883
1884 if (non_stop)
1885 /* In non-stop, each thread is handled individually. The context
1886 must already be set to the right thread here. */
1887 ;
1888 else
1889 {
1890 /* In a multi-threaded task we may select another thread and
1891 then continue or step.
1892
1893 But if the old thread was stopped at a breakpoint, it will
1894 immediately cause another breakpoint stop without any
1895 execution (i.e. it will report a breakpoint hit incorrectly).
1896 So we must step over it first.
1897
1898 prepare_to_proceed checks the current thread against the
1899 thread that reported the most recent event. If a step-over
1900 is required it returns TRUE and sets the current thread to
1901 the old thread. */
1902 if (prepare_to_proceed (step))
1903 oneproc = 1;
1904 }
1905
1906 /* prepare_to_proceed may change the current thread. */
1907 tp = inferior_thread ();
1908
1909 if (oneproc)
1910 {
1911 tp->trap_expected = 1;
1912 /* If displaced stepping is enabled, we can step over the
1913 breakpoint without hitting it, so leave all breakpoints
1914 inserted. Otherwise we need to disable all breakpoints, step
1915 one instruction, and then re-add them when that step is
1916 finished. */
1917 if (!use_displaced_stepping (gdbarch))
1918 remove_breakpoints ();
1919 }
1920
1921 /* We can insert breakpoints if we're not trying to step over one,
1922 or if we are stepping over one but we're using displaced stepping
1923 to do so. */
1924 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1925 insert_breakpoints ();
1926
1927 if (!non_stop)
1928 {
1929 /* Pass the last stop signal to the thread we're resuming,
1930 irrespective of whether the current thread is the thread that
1931 got the last event or not. This was historically GDB's
1932 behaviour before keeping a stop_signal per thread. */
1933
1934 struct thread_info *last_thread;
1935 ptid_t last_ptid;
1936 struct target_waitstatus last_status;
1937
1938 get_last_target_status (&last_ptid, &last_status);
1939 if (!ptid_equal (inferior_ptid, last_ptid)
1940 && !ptid_equal (last_ptid, null_ptid)
1941 && !ptid_equal (last_ptid, minus_one_ptid))
1942 {
1943 last_thread = find_thread_ptid (last_ptid);
1944 if (last_thread)
1945 {
1946 tp->stop_signal = last_thread->stop_signal;
1947 last_thread->stop_signal = TARGET_SIGNAL_0;
1948 }
1949 }
1950 }
1951
1952 if (siggnal != TARGET_SIGNAL_DEFAULT)
1953 tp->stop_signal = siggnal;
1954 /* If this signal should not be seen by program,
1955 give it zero. Used for debugging signals. */
1956 else if (!signal_program[tp->stop_signal])
1957 tp->stop_signal = TARGET_SIGNAL_0;
1958
1959 annotate_starting ();
1960
1961 /* Make sure that output from GDB appears before output from the
1962 inferior. */
1963 gdb_flush (gdb_stdout);
1964
1965 /* Refresh prev_pc value just prior to resuming. This used to be
1966 done in stop_stepping, however, setting prev_pc there did not handle
1967 scenarios such as inferior function calls or returning from
1968 a function via the return command. In those cases, the prev_pc
1969 value was not set properly for subsequent commands. The prev_pc value
1970 is used to initialize the starting line number in the ecs. With an
1971 invalid value, the gdb next command ends up stopping at the position
1972 represented by the next line table entry past our start position.
1973 On platforms that generate one line table entry per line, this
1974 is not a problem. However, on the ia64, the compiler generates
1975 extraneous line table entries that do not increase the line number.
1976 When we issue the gdb next command on the ia64 after an inferior call
1977 or a return command, we often end up a few instructions forward, still
1978 within the original line we started.
1979
1980 An attempt was made to refresh the prev_pc at the same time the
1981 execution_control_state is initialized (for instance, just before
1982 waiting for an inferior event). But this approach did not work
1983 because of platforms that use ptrace, where the pc register cannot
1984 be read unless the inferior is stopped. At that point, we are not
1985 guaranteed the inferior is stopped and so the regcache_read_pc() call
1986 can fail. Setting the prev_pc value here ensures the value is updated
1987 correctly when the inferior is stopped. */
1988 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1989
1990 /* Fill in with reasonable starting values. */
1991 init_thread_stepping_state (tp);
1992
1993 /* Reset to normal state. */
1994 init_infwait_state ();
1995
1996 /* Resume inferior. */
1997 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1998
1999 /* Wait for it to stop (if not standalone)
2000 and in any case decode why it stopped, and act accordingly. */
2001 /* Do this only if we are not using the event loop, or if the target
2002 does not support asynchronous execution. */
2003 if (!target_can_async_p ())
2004 {
2005 wait_for_inferior (0);
2006 normal_stop ();
2007 }
2008 }
2009 \f
2010
2011 /* Start remote-debugging of a machine over a serial link. */
2012
2013 void
2014 start_remote (int from_tty)
2015 {
2016 struct inferior *inferior;
2017 init_wait_for_inferior ();
2018
2019 inferior = current_inferior ();
2020 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2021
2022 /* Always go on waiting for the target, regardless of the mode. */
2023 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2024 indicate to wait_for_inferior that a target should timeout if
2025 nothing is returned (instead of just blocking). Because of this,
2026 targets expecting an immediate response need to, internally, set
2027 things up so that the target_wait() is forced to eventually
2028 timeout. */
2029 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2030 differentiate to its caller what the state of the target is after
2031 the initial open has been performed. Here we're assuming that
2032 the target has stopped. It should be possible to eventually have
2033 target_open() return to the caller an indication that the target
2034 is currently running and GDB state should be set to the same as
2035 for an async run. */
2036 wait_for_inferior (0);
2037
2038 /* Now that the inferior has stopped, do any bookkeeping like
2039 loading shared libraries. We want to do this before normal_stop,
2040 so that the displayed frame is up to date. */
2041 post_create_inferior (&current_target, from_tty);
2042
2043 normal_stop ();
2044 }
2045
2046 /* Initialize static vars when a new inferior begins. */
2047
2048 void
2049 init_wait_for_inferior (void)
2050 {
2051 /* These are meaningless until the first time through wait_for_inferior. */
2052
2053 breakpoint_init_inferior (inf_starting);
2054
2055 clear_proceed_status ();
2056
2057 stepping_past_singlestep_breakpoint = 0;
2058 deferred_step_ptid = null_ptid;
2059
2060 target_last_wait_ptid = minus_one_ptid;
2061
2062 previous_inferior_ptid = null_ptid;
2063 init_infwait_state ();
2064
2065 /* Discard any skipped inlined frames. */
2066 clear_inline_frame_state (minus_one_ptid);
2067 }
2068
2069 \f
2070 /* This enum encodes possible reasons for doing a target_wait, so that
2071 wfi can call target_wait in one place. (Ultimately the call will be
2072 moved out of the infinite loop entirely.) */
2073
2074 enum infwait_states
2075 {
2076 infwait_normal_state,
2077 infwait_thread_hop_state,
2078 infwait_step_watch_state,
2079 infwait_nonstep_watch_state
2080 };
2081
2082 /* Why did the inferior stop? Used to print the appropriate messages
2083 to the interface from within handle_inferior_event(). */
2084 enum inferior_stop_reason
2085 {
2086 /* Step, next, nexti, stepi finished. */
2087 END_STEPPING_RANGE,
2088 /* Inferior terminated by signal. */
2089 SIGNAL_EXITED,
2090 /* Inferior exited. */
2091 EXITED,
2092 /* Inferior received signal, and user asked to be notified. */
2093 SIGNAL_RECEIVED,
2094 /* Reverse execution -- target ran out of history info. */
2095 NO_HISTORY
2096 };
2097
2098 /* The PTID we'll do a target_wait on.*/
2099 ptid_t waiton_ptid;
2100
2101 /* Current inferior wait state. */
2102 enum infwait_states infwait_state;
2103
2104 /* Data to be passed around while handling an event. This data is
2105 discarded between events. */
2106 struct execution_control_state
2107 {
2108 ptid_t ptid;
2109 /* The thread that got the event, if this was a thread event; NULL
2110 otherwise. */
2111 struct thread_info *event_thread;
2112
2113 struct target_waitstatus ws;
2114 int random_signal;
2115 CORE_ADDR stop_func_start;
2116 CORE_ADDR stop_func_end;
2117 char *stop_func_name;
2118 int new_thread_event;
2119 int wait_some_more;
2120 };
2121
2122 static void handle_inferior_event (struct execution_control_state *ecs);
2123
2124 static void handle_step_into_function (struct gdbarch *gdbarch,
2125 struct execution_control_state *ecs);
2126 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2127 struct execution_control_state *ecs);
2128 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2129 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2130 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2131 struct symtab_and_line sr_sal,
2132 struct frame_id sr_id);
2133 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2134
2135 static void stop_stepping (struct execution_control_state *ecs);
2136 static void prepare_to_wait (struct execution_control_state *ecs);
2137 static void keep_going (struct execution_control_state *ecs);
2138 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2139 int stop_info);
2140
2141 /* Callback for iterate over threads. If the thread is stopped, but
2142 the user/frontend doesn't know about that yet, go through
2143 normal_stop, as if the thread had just stopped now. ARG points at
2144 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2145 ptid_is_pid(PTID) is true, applies to all threads of the process
2146 pointed at by PTID. Otherwise, apply only to the thread pointed by
2147 PTID. */
2148
2149 static int
2150 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2151 {
2152 ptid_t ptid = * (ptid_t *) arg;
2153
2154 if ((ptid_equal (info->ptid, ptid)
2155 || ptid_equal (minus_one_ptid, ptid)
2156 || (ptid_is_pid (ptid)
2157 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2158 && is_running (info->ptid)
2159 && !is_executing (info->ptid))
2160 {
2161 struct cleanup *old_chain;
2162 struct execution_control_state ecss;
2163 struct execution_control_state *ecs = &ecss;
2164
2165 memset (ecs, 0, sizeof (*ecs));
2166
2167 old_chain = make_cleanup_restore_current_thread ();
2168
2169 switch_to_thread (info->ptid);
2170
2171 /* Go through handle_inferior_event/normal_stop, so we always
2172 have consistent output as if the stop event had been
2173 reported. */
2174 ecs->ptid = info->ptid;
2175 ecs->event_thread = find_thread_ptid (info->ptid);
2176 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2177 ecs->ws.value.sig = TARGET_SIGNAL_0;
2178
2179 handle_inferior_event (ecs);
2180
2181 if (!ecs->wait_some_more)
2182 {
2183 struct thread_info *tp;
2184
2185 normal_stop ();
2186
2187 /* Finish off the continuations. The continations
2188 themselves are responsible for realising the thread
2189 didn't finish what it was supposed to do. */
2190 tp = inferior_thread ();
2191 do_all_intermediate_continuations_thread (tp);
2192 do_all_continuations_thread (tp);
2193 }
2194
2195 do_cleanups (old_chain);
2196 }
2197
2198 return 0;
2199 }
2200
2201 /* This function is attached as a "thread_stop_requested" observer.
2202 Cleanup local state that assumed the PTID was to be resumed, and
2203 report the stop to the frontend. */
2204
2205 static void
2206 infrun_thread_stop_requested (ptid_t ptid)
2207 {
2208 struct displaced_step_inferior_state *displaced;
2209
2210 /* PTID was requested to stop. Remove it from the displaced
2211 stepping queue, so we don't try to resume it automatically. */
2212
2213 for (displaced = displaced_step_inferior_states;
2214 displaced;
2215 displaced = displaced->next)
2216 {
2217 struct displaced_step_request *it, **prev_next_p;
2218
2219 it = displaced->step_request_queue;
2220 prev_next_p = &displaced->step_request_queue;
2221 while (it)
2222 {
2223 if (ptid_match (it->ptid, ptid))
2224 {
2225 *prev_next_p = it->next;
2226 it->next = NULL;
2227 xfree (it);
2228 }
2229 else
2230 {
2231 prev_next_p = &it->next;
2232 }
2233
2234 it = *prev_next_p;
2235 }
2236 }
2237
2238 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2239 }
2240
2241 static void
2242 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2243 {
2244 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2245 nullify_last_target_wait_ptid ();
2246 }
2247
2248 /* Callback for iterate_over_threads. */
2249
2250 static int
2251 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2252 {
2253 if (is_exited (info->ptid))
2254 return 0;
2255
2256 delete_step_resume_breakpoint (info);
2257 return 0;
2258 }
2259
2260 /* In all-stop, delete the step resume breakpoint of any thread that
2261 had one. In non-stop, delete the step resume breakpoint of the
2262 thread that just stopped. */
2263
2264 static void
2265 delete_step_thread_step_resume_breakpoint (void)
2266 {
2267 if (!target_has_execution
2268 || ptid_equal (inferior_ptid, null_ptid))
2269 /* If the inferior has exited, we have already deleted the step
2270 resume breakpoints out of GDB's lists. */
2271 return;
2272
2273 if (non_stop)
2274 {
2275 /* If in non-stop mode, only delete the step-resume or
2276 longjmp-resume breakpoint of the thread that just stopped
2277 stepping. */
2278 struct thread_info *tp = inferior_thread ();
2279 delete_step_resume_breakpoint (tp);
2280 }
2281 else
2282 /* In all-stop mode, delete all step-resume and longjmp-resume
2283 breakpoints of any thread that had them. */
2284 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2285 }
2286
2287 /* A cleanup wrapper. */
2288
2289 static void
2290 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2291 {
2292 delete_step_thread_step_resume_breakpoint ();
2293 }
2294
2295 /* Pretty print the results of target_wait, for debugging purposes. */
2296
2297 static void
2298 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2299 const struct target_waitstatus *ws)
2300 {
2301 char *status_string = target_waitstatus_to_string (ws);
2302 struct ui_file *tmp_stream = mem_fileopen ();
2303 char *text;
2304
2305 /* The text is split over several lines because it was getting too long.
2306 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2307 output as a unit; we want only one timestamp printed if debug_timestamp
2308 is set. */
2309
2310 fprintf_unfiltered (tmp_stream,
2311 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2312 if (PIDGET (waiton_ptid) != -1)
2313 fprintf_unfiltered (tmp_stream,
2314 " [%s]", target_pid_to_str (waiton_ptid));
2315 fprintf_unfiltered (tmp_stream, ", status) =\n");
2316 fprintf_unfiltered (tmp_stream,
2317 "infrun: %d [%s],\n",
2318 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2319 fprintf_unfiltered (tmp_stream,
2320 "infrun: %s\n",
2321 status_string);
2322
2323 text = ui_file_xstrdup (tmp_stream, NULL);
2324
2325 /* This uses %s in part to handle %'s in the text, but also to avoid
2326 a gcc error: the format attribute requires a string literal. */
2327 fprintf_unfiltered (gdb_stdlog, "%s", text);
2328
2329 xfree (status_string);
2330 xfree (text);
2331 ui_file_delete (tmp_stream);
2332 }
2333
2334 /* Wait for control to return from inferior to debugger.
2335
2336 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2337 as if they were SIGTRAP signals. This can be useful during
2338 the startup sequence on some targets such as HP/UX, where
2339 we receive an EXEC event instead of the expected SIGTRAP.
2340
2341 If inferior gets a signal, we may decide to start it up again
2342 instead of returning. That is why there is a loop in this function.
2343 When this function actually returns it means the inferior
2344 should be left stopped and GDB should read more commands. */
2345
2346 void
2347 wait_for_inferior (int treat_exec_as_sigtrap)
2348 {
2349 struct cleanup *old_cleanups;
2350 struct execution_control_state ecss;
2351 struct execution_control_state *ecs;
2352
2353 if (debug_infrun)
2354 fprintf_unfiltered
2355 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2356 treat_exec_as_sigtrap);
2357
2358 old_cleanups =
2359 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2360
2361 ecs = &ecss;
2362 memset (ecs, 0, sizeof (*ecs));
2363
2364 /* We'll update this if & when we switch to a new thread. */
2365 previous_inferior_ptid = inferior_ptid;
2366
2367 while (1)
2368 {
2369 struct cleanup *old_chain;
2370
2371 /* We have to invalidate the registers BEFORE calling target_wait
2372 because they can be loaded from the target while in target_wait.
2373 This makes remote debugging a bit more efficient for those
2374 targets that provide critical registers as part of their normal
2375 status mechanism. */
2376
2377 overlay_cache_invalid = 1;
2378 registers_changed ();
2379
2380 if (deprecated_target_wait_hook)
2381 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2382 else
2383 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2384
2385 if (debug_infrun)
2386 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2387
2388 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2389 {
2390 xfree (ecs->ws.value.execd_pathname);
2391 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2392 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2393 }
2394
2395 /* If an error happens while handling the event, propagate GDB's
2396 knowledge of the executing state to the frontend/user running
2397 state. */
2398 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2399
2400 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2401 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2402 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2403
2404 /* Now figure out what to do with the result of the result. */
2405 handle_inferior_event (ecs);
2406
2407 /* No error, don't finish the state yet. */
2408 discard_cleanups (old_chain);
2409
2410 if (!ecs->wait_some_more)
2411 break;
2412 }
2413
2414 do_cleanups (old_cleanups);
2415 }
2416
2417 /* Asynchronous version of wait_for_inferior. It is called by the
2418 event loop whenever a change of state is detected on the file
2419 descriptor corresponding to the target. It can be called more than
2420 once to complete a single execution command. In such cases we need
2421 to keep the state in a global variable ECSS. If it is the last time
2422 that this function is called for a single execution command, then
2423 report to the user that the inferior has stopped, and do the
2424 necessary cleanups. */
2425
2426 void
2427 fetch_inferior_event (void *client_data)
2428 {
2429 struct execution_control_state ecss;
2430 struct execution_control_state *ecs = &ecss;
2431 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2432 struct cleanup *ts_old_chain;
2433 int was_sync = sync_execution;
2434
2435 memset (ecs, 0, sizeof (*ecs));
2436
2437 /* We'll update this if & when we switch to a new thread. */
2438 previous_inferior_ptid = inferior_ptid;
2439
2440 if (non_stop)
2441 /* In non-stop mode, the user/frontend should not notice a thread
2442 switch due to internal events. Make sure we reverse to the
2443 user selected thread and frame after handling the event and
2444 running any breakpoint commands. */
2445 make_cleanup_restore_current_thread ();
2446
2447 /* We have to invalidate the registers BEFORE calling target_wait
2448 because they can be loaded from the target while in target_wait.
2449 This makes remote debugging a bit more efficient for those
2450 targets that provide critical registers as part of their normal
2451 status mechanism. */
2452
2453 overlay_cache_invalid = 1;
2454 registers_changed ();
2455
2456 if (deprecated_target_wait_hook)
2457 ecs->ptid =
2458 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2459 else
2460 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2461
2462 if (debug_infrun)
2463 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2464
2465 if (non_stop
2466 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2467 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2468 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2469 /* In non-stop mode, each thread is handled individually. Switch
2470 early, so the global state is set correctly for this
2471 thread. */
2472 context_switch (ecs->ptid);
2473
2474 /* If an error happens while handling the event, propagate GDB's
2475 knowledge of the executing state to the frontend/user running
2476 state. */
2477 if (!non_stop)
2478 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2479 else
2480 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2481
2482 /* Now figure out what to do with the result of the result. */
2483 handle_inferior_event (ecs);
2484
2485 if (!ecs->wait_some_more)
2486 {
2487 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2488
2489 delete_step_thread_step_resume_breakpoint ();
2490
2491 /* We may not find an inferior if this was a process exit. */
2492 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2493 normal_stop ();
2494
2495 if (target_has_execution
2496 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2497 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2498 && ecs->event_thread->step_multi
2499 && ecs->event_thread->stop_step)
2500 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2501 else
2502 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2503 }
2504
2505 /* No error, don't finish the thread states yet. */
2506 discard_cleanups (ts_old_chain);
2507
2508 /* Revert thread and frame. */
2509 do_cleanups (old_chain);
2510
2511 /* If the inferior was in sync execution mode, and now isn't,
2512 restore the prompt. */
2513 if (was_sync && !sync_execution)
2514 display_gdb_prompt (0);
2515 }
2516
2517 /* Record the frame and location we're currently stepping through. */
2518 void
2519 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2520 {
2521 struct thread_info *tp = inferior_thread ();
2522
2523 tp->step_frame_id = get_frame_id (frame);
2524 tp->step_stack_frame_id = get_stack_frame_id (frame);
2525
2526 tp->current_symtab = sal.symtab;
2527 tp->current_line = sal.line;
2528 }
2529
2530 /* Clear context switchable stepping state. */
2531
2532 void
2533 init_thread_stepping_state (struct thread_info *tss)
2534 {
2535 tss->stepping_over_breakpoint = 0;
2536 tss->step_after_step_resume_breakpoint = 0;
2537 tss->stepping_through_solib_after_catch = 0;
2538 tss->stepping_through_solib_catchpoints = NULL;
2539 }
2540
2541 /* Return the cached copy of the last pid/waitstatus returned by
2542 target_wait()/deprecated_target_wait_hook(). The data is actually
2543 cached by handle_inferior_event(), which gets called immediately
2544 after target_wait()/deprecated_target_wait_hook(). */
2545
2546 void
2547 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2548 {
2549 *ptidp = target_last_wait_ptid;
2550 *status = target_last_waitstatus;
2551 }
2552
2553 void
2554 nullify_last_target_wait_ptid (void)
2555 {
2556 target_last_wait_ptid = minus_one_ptid;
2557 }
2558
2559 /* Switch thread contexts. */
2560
2561 static void
2562 context_switch (ptid_t ptid)
2563 {
2564 if (debug_infrun)
2565 {
2566 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2567 target_pid_to_str (inferior_ptid));
2568 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2569 target_pid_to_str (ptid));
2570 }
2571
2572 switch_to_thread (ptid);
2573 }
2574
2575 static void
2576 adjust_pc_after_break (struct execution_control_state *ecs)
2577 {
2578 struct regcache *regcache;
2579 struct gdbarch *gdbarch;
2580 struct address_space *aspace;
2581 CORE_ADDR breakpoint_pc;
2582
2583 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2584 we aren't, just return.
2585
2586 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2587 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2588 implemented by software breakpoints should be handled through the normal
2589 breakpoint layer.
2590
2591 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2592 different signals (SIGILL or SIGEMT for instance), but it is less
2593 clear where the PC is pointing afterwards. It may not match
2594 gdbarch_decr_pc_after_break. I don't know any specific target that
2595 generates these signals at breakpoints (the code has been in GDB since at
2596 least 1992) so I can not guess how to handle them here.
2597
2598 In earlier versions of GDB, a target with
2599 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2600 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2601 target with both of these set in GDB history, and it seems unlikely to be
2602 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2603
2604 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2605 return;
2606
2607 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2608 return;
2609
2610 /* In reverse execution, when a breakpoint is hit, the instruction
2611 under it has already been de-executed. The reported PC always
2612 points at the breakpoint address, so adjusting it further would
2613 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2614 architecture:
2615
2616 B1 0x08000000 : INSN1
2617 B2 0x08000001 : INSN2
2618 0x08000002 : INSN3
2619 PC -> 0x08000003 : INSN4
2620
2621 Say you're stopped at 0x08000003 as above. Reverse continuing
2622 from that point should hit B2 as below. Reading the PC when the
2623 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2624 been de-executed already.
2625
2626 B1 0x08000000 : INSN1
2627 B2 PC -> 0x08000001 : INSN2
2628 0x08000002 : INSN3
2629 0x08000003 : INSN4
2630
2631 We can't apply the same logic as for forward execution, because
2632 we would wrongly adjust the PC to 0x08000000, since there's a
2633 breakpoint at PC - 1. We'd then report a hit on B1, although
2634 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2635 behaviour. */
2636 if (execution_direction == EXEC_REVERSE)
2637 return;
2638
2639 /* If this target does not decrement the PC after breakpoints, then
2640 we have nothing to do. */
2641 regcache = get_thread_regcache (ecs->ptid);
2642 gdbarch = get_regcache_arch (regcache);
2643 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2644 return;
2645
2646 aspace = get_regcache_aspace (regcache);
2647
2648 /* Find the location where (if we've hit a breakpoint) the
2649 breakpoint would be. */
2650 breakpoint_pc = regcache_read_pc (regcache)
2651 - gdbarch_decr_pc_after_break (gdbarch);
2652
2653 /* Check whether there actually is a software breakpoint inserted at
2654 that location.
2655
2656 If in non-stop mode, a race condition is possible where we've
2657 removed a breakpoint, but stop events for that breakpoint were
2658 already queued and arrive later. To suppress those spurious
2659 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2660 and retire them after a number of stop events are reported. */
2661 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2662 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2663 {
2664 struct cleanup *old_cleanups = NULL;
2665 if (RECORD_IS_USED)
2666 old_cleanups = record_gdb_operation_disable_set ();
2667
2668 /* When using hardware single-step, a SIGTRAP is reported for both
2669 a completed single-step and a software breakpoint. Need to
2670 differentiate between the two, as the latter needs adjusting
2671 but the former does not.
2672
2673 The SIGTRAP can be due to a completed hardware single-step only if
2674 - we didn't insert software single-step breakpoints
2675 - the thread to be examined is still the current thread
2676 - this thread is currently being stepped
2677
2678 If any of these events did not occur, we must have stopped due
2679 to hitting a software breakpoint, and have to back up to the
2680 breakpoint address.
2681
2682 As a special case, we could have hardware single-stepped a
2683 software breakpoint. In this case (prev_pc == breakpoint_pc),
2684 we also need to back up to the breakpoint address. */
2685
2686 if (singlestep_breakpoints_inserted_p
2687 || !ptid_equal (ecs->ptid, inferior_ptid)
2688 || !currently_stepping (ecs->event_thread)
2689 || ecs->event_thread->prev_pc == breakpoint_pc)
2690 regcache_write_pc (regcache, breakpoint_pc);
2691
2692 if (RECORD_IS_USED)
2693 do_cleanups (old_cleanups);
2694 }
2695 }
2696
2697 void
2698 init_infwait_state (void)
2699 {
2700 waiton_ptid = pid_to_ptid (-1);
2701 infwait_state = infwait_normal_state;
2702 }
2703
2704 void
2705 error_is_running (void)
2706 {
2707 error (_("\
2708 Cannot execute this command while the selected thread is running."));
2709 }
2710
2711 void
2712 ensure_not_running (void)
2713 {
2714 if (is_running (inferior_ptid))
2715 error_is_running ();
2716 }
2717
2718 static int
2719 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2720 {
2721 for (frame = get_prev_frame (frame);
2722 frame != NULL;
2723 frame = get_prev_frame (frame))
2724 {
2725 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2726 return 1;
2727 if (get_frame_type (frame) != INLINE_FRAME)
2728 break;
2729 }
2730
2731 return 0;
2732 }
2733
2734 /* Auxiliary function that handles syscall entry/return events.
2735 It returns 1 if the inferior should keep going (and GDB
2736 should ignore the event), or 0 if the event deserves to be
2737 processed. */
2738
2739 static int
2740 handle_syscall_event (struct execution_control_state *ecs)
2741 {
2742 struct regcache *regcache;
2743 struct gdbarch *gdbarch;
2744 int syscall_number;
2745
2746 if (!ptid_equal (ecs->ptid, inferior_ptid))
2747 context_switch (ecs->ptid);
2748
2749 regcache = get_thread_regcache (ecs->ptid);
2750 gdbarch = get_regcache_arch (regcache);
2751 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2752 stop_pc = regcache_read_pc (regcache);
2753
2754 target_last_waitstatus.value.syscall_number = syscall_number;
2755
2756 if (catch_syscall_enabled () > 0
2757 && catching_syscall_number (syscall_number) > 0)
2758 {
2759 if (debug_infrun)
2760 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2761 syscall_number);
2762
2763 ecs->event_thread->stop_bpstat
2764 = bpstat_stop_status (get_regcache_aspace (regcache),
2765 stop_pc, ecs->ptid);
2766 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2767
2768 if (!ecs->random_signal)
2769 {
2770 /* Catchpoint hit. */
2771 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2772 return 0;
2773 }
2774 }
2775
2776 /* If no catchpoint triggered for this, then keep going. */
2777 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2778 keep_going (ecs);
2779 return 1;
2780 }
2781
2782 /* Given an execution control state that has been freshly filled in
2783 by an event from the inferior, figure out what it means and take
2784 appropriate action. */
2785
2786 static void
2787 handle_inferior_event (struct execution_control_state *ecs)
2788 {
2789 struct frame_info *frame;
2790 struct gdbarch *gdbarch;
2791 int sw_single_step_trap_p = 0;
2792 int stopped_by_watchpoint;
2793 int stepped_after_stopped_by_watchpoint = 0;
2794 struct symtab_and_line stop_pc_sal;
2795 enum stop_kind stop_soon;
2796
2797 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2798 {
2799 /* We had an event in the inferior, but we are not interested in
2800 handling it at this level. The lower layers have already
2801 done what needs to be done, if anything.
2802
2803 One of the possible circumstances for this is when the
2804 inferior produces output for the console. The inferior has
2805 not stopped, and we are ignoring the event. Another possible
2806 circumstance is any event which the lower level knows will be
2807 reported multiple times without an intervening resume. */
2808 if (debug_infrun)
2809 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2810 prepare_to_wait (ecs);
2811 return;
2812 }
2813
2814 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2815 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2816 {
2817 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2818 gdb_assert (inf);
2819 stop_soon = inf->stop_soon;
2820 }
2821 else
2822 stop_soon = NO_STOP_QUIETLY;
2823
2824 /* Cache the last pid/waitstatus. */
2825 target_last_wait_ptid = ecs->ptid;
2826 target_last_waitstatus = ecs->ws;
2827
2828 /* Always clear state belonging to the previous time we stopped. */
2829 stop_stack_dummy = 0;
2830
2831 /* If it's a new process, add it to the thread database */
2832
2833 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2834 && !ptid_equal (ecs->ptid, minus_one_ptid)
2835 && !in_thread_list (ecs->ptid));
2836
2837 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2838 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2839 add_thread (ecs->ptid);
2840
2841 ecs->event_thread = find_thread_ptid (ecs->ptid);
2842
2843 /* Dependent on valid ECS->EVENT_THREAD. */
2844 adjust_pc_after_break (ecs);
2845
2846 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2847 reinit_frame_cache ();
2848
2849 breakpoint_retire_moribund ();
2850
2851 /* First, distinguish signals caused by the debugger from signals
2852 that have to do with the program's own actions. Note that
2853 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2854 on the operating system version. Here we detect when a SIGILL or
2855 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2856 something similar for SIGSEGV, since a SIGSEGV will be generated
2857 when we're trying to execute a breakpoint instruction on a
2858 non-executable stack. This happens for call dummy breakpoints
2859 for architectures like SPARC that place call dummies on the
2860 stack. */
2861 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2862 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2863 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2864 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
2865 {
2866 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2867
2868 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2869 regcache_read_pc (regcache)))
2870 {
2871 if (debug_infrun)
2872 fprintf_unfiltered (gdb_stdlog,
2873 "infrun: Treating signal as SIGTRAP\n");
2874 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2875 }
2876 }
2877
2878 /* Mark the non-executing threads accordingly. In all-stop, all
2879 threads of all processes are stopped when we get any event
2880 reported. In non-stop mode, only the event thread stops. If
2881 we're handling a process exit in non-stop mode, there's nothing
2882 to do, as threads of the dead process are gone, and threads of
2883 any other process were left running. */
2884 if (!non_stop)
2885 set_executing (minus_one_ptid, 0);
2886 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2887 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2888 set_executing (inferior_ptid, 0);
2889
2890 switch (infwait_state)
2891 {
2892 case infwait_thread_hop_state:
2893 if (debug_infrun)
2894 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2895 break;
2896
2897 case infwait_normal_state:
2898 if (debug_infrun)
2899 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2900 break;
2901
2902 case infwait_step_watch_state:
2903 if (debug_infrun)
2904 fprintf_unfiltered (gdb_stdlog,
2905 "infrun: infwait_step_watch_state\n");
2906
2907 stepped_after_stopped_by_watchpoint = 1;
2908 break;
2909
2910 case infwait_nonstep_watch_state:
2911 if (debug_infrun)
2912 fprintf_unfiltered (gdb_stdlog,
2913 "infrun: infwait_nonstep_watch_state\n");
2914 insert_breakpoints ();
2915
2916 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2917 handle things like signals arriving and other things happening
2918 in combination correctly? */
2919 stepped_after_stopped_by_watchpoint = 1;
2920 break;
2921
2922 default:
2923 internal_error (__FILE__, __LINE__, _("bad switch"));
2924 }
2925
2926 infwait_state = infwait_normal_state;
2927 waiton_ptid = pid_to_ptid (-1);
2928
2929 switch (ecs->ws.kind)
2930 {
2931 case TARGET_WAITKIND_LOADED:
2932 if (debug_infrun)
2933 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2934 /* Ignore gracefully during startup of the inferior, as it might
2935 be the shell which has just loaded some objects, otherwise
2936 add the symbols for the newly loaded objects. Also ignore at
2937 the beginning of an attach or remote session; we will query
2938 the full list of libraries once the connection is
2939 established. */
2940 if (stop_soon == NO_STOP_QUIETLY)
2941 {
2942 /* Check for any newly added shared libraries if we're
2943 supposed to be adding them automatically. Switch
2944 terminal for any messages produced by
2945 breakpoint_re_set. */
2946 target_terminal_ours_for_output ();
2947 /* NOTE: cagney/2003-11-25: Make certain that the target
2948 stack's section table is kept up-to-date. Architectures,
2949 (e.g., PPC64), use the section table to perform
2950 operations such as address => section name and hence
2951 require the table to contain all sections (including
2952 those found in shared libraries). */
2953 #ifdef SOLIB_ADD
2954 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
2955 #else
2956 solib_add (NULL, 0, &current_target, auto_solib_add);
2957 #endif
2958 target_terminal_inferior ();
2959
2960 /* If requested, stop when the dynamic linker notifies
2961 gdb of events. This allows the user to get control
2962 and place breakpoints in initializer routines for
2963 dynamically loaded objects (among other things). */
2964 if (stop_on_solib_events)
2965 {
2966 /* Make sure we print "Stopped due to solib-event" in
2967 normal_stop. */
2968 stop_print_frame = 1;
2969
2970 stop_stepping (ecs);
2971 return;
2972 }
2973
2974 /* NOTE drow/2007-05-11: This might be a good place to check
2975 for "catch load". */
2976 }
2977
2978 /* If we are skipping through a shell, or through shared library
2979 loading that we aren't interested in, resume the program. If
2980 we're running the program normally, also resume. But stop if
2981 we're attaching or setting up a remote connection. */
2982 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
2983 {
2984 /* Loading of shared libraries might have changed breakpoint
2985 addresses. Make sure new breakpoints are inserted. */
2986 if (stop_soon == NO_STOP_QUIETLY
2987 && !breakpoints_always_inserted_mode ())
2988 insert_breakpoints ();
2989 resume (0, TARGET_SIGNAL_0);
2990 prepare_to_wait (ecs);
2991 return;
2992 }
2993
2994 break;
2995
2996 case TARGET_WAITKIND_SPURIOUS:
2997 if (debug_infrun)
2998 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
2999 resume (0, TARGET_SIGNAL_0);
3000 prepare_to_wait (ecs);
3001 return;
3002
3003 case TARGET_WAITKIND_EXITED:
3004 if (debug_infrun)
3005 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3006 inferior_ptid = ecs->ptid;
3007 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3008 set_current_program_space (current_inferior ()->pspace);
3009 handle_vfork_child_exec_or_exit (0);
3010 target_terminal_ours (); /* Must do this before mourn anyway */
3011 print_stop_reason (EXITED, ecs->ws.value.integer);
3012
3013 /* Record the exit code in the convenience variable $_exitcode, so
3014 that the user can inspect this again later. */
3015 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3016 (LONGEST) ecs->ws.value.integer);
3017 gdb_flush (gdb_stdout);
3018 target_mourn_inferior ();
3019 singlestep_breakpoints_inserted_p = 0;
3020 stop_print_frame = 0;
3021 stop_stepping (ecs);
3022 return;
3023
3024 case TARGET_WAITKIND_SIGNALLED:
3025 if (debug_infrun)
3026 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3027 inferior_ptid = ecs->ptid;
3028 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3029 set_current_program_space (current_inferior ()->pspace);
3030 handle_vfork_child_exec_or_exit (0);
3031 stop_print_frame = 0;
3032 target_terminal_ours (); /* Must do this before mourn anyway */
3033
3034 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3035 reach here unless the inferior is dead. However, for years
3036 target_kill() was called here, which hints that fatal signals aren't
3037 really fatal on some systems. If that's true, then some changes
3038 may be needed. */
3039 target_mourn_inferior ();
3040
3041 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3042 singlestep_breakpoints_inserted_p = 0;
3043 stop_stepping (ecs);
3044 return;
3045
3046 /* The following are the only cases in which we keep going;
3047 the above cases end in a continue or goto. */
3048 case TARGET_WAITKIND_FORKED:
3049 case TARGET_WAITKIND_VFORKED:
3050 if (debug_infrun)
3051 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3052
3053 if (!ptid_equal (ecs->ptid, inferior_ptid))
3054 {
3055 context_switch (ecs->ptid);
3056 reinit_frame_cache ();
3057 }
3058
3059 /* Immediately detach breakpoints from the child before there's
3060 any chance of letting the user delete breakpoints from the
3061 breakpoint lists. If we don't do this early, it's easy to
3062 leave left over traps in the child, vis: "break foo; catch
3063 fork; c; <fork>; del; c; <child calls foo>". We only follow
3064 the fork on the last `continue', and by that time the
3065 breakpoint at "foo" is long gone from the breakpoint table.
3066 If we vforked, then we don't need to unpatch here, since both
3067 parent and child are sharing the same memory pages; we'll
3068 need to unpatch at follow/detach time instead to be certain
3069 that new breakpoints added between catchpoint hit time and
3070 vfork follow are detached. */
3071 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3072 {
3073 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3074
3075 /* This won't actually modify the breakpoint list, but will
3076 physically remove the breakpoints from the child. */
3077 detach_breakpoints (child_pid);
3078 }
3079
3080 /* In case the event is caught by a catchpoint, remember that
3081 the event is to be followed at the next resume of the thread,
3082 and not immediately. */
3083 ecs->event_thread->pending_follow = ecs->ws;
3084
3085 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3086
3087 ecs->event_thread->stop_bpstat
3088 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3089 stop_pc, ecs->ptid);
3090
3091 /* Note that we're interested in knowing the bpstat actually
3092 causes a stop, not just if it may explain the signal.
3093 Software watchpoints, for example, always appear in the
3094 bpstat. */
3095 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3096
3097 /* If no catchpoint triggered for this, then keep going. */
3098 if (ecs->random_signal)
3099 {
3100 ptid_t parent;
3101 ptid_t child;
3102 int should_resume;
3103 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3104
3105 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3106
3107 should_resume = follow_fork ();
3108
3109 parent = ecs->ptid;
3110 child = ecs->ws.value.related_pid;
3111
3112 /* In non-stop mode, also resume the other branch. */
3113 if (non_stop && !detach_fork)
3114 {
3115 if (follow_child)
3116 switch_to_thread (parent);
3117 else
3118 switch_to_thread (child);
3119
3120 ecs->event_thread = inferior_thread ();
3121 ecs->ptid = inferior_ptid;
3122 keep_going (ecs);
3123 }
3124
3125 if (follow_child)
3126 switch_to_thread (child);
3127 else
3128 switch_to_thread (parent);
3129
3130 ecs->event_thread = inferior_thread ();
3131 ecs->ptid = inferior_ptid;
3132
3133 if (should_resume)
3134 keep_going (ecs);
3135 else
3136 stop_stepping (ecs);
3137 return;
3138 }
3139 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3140 goto process_event_stop_test;
3141
3142 case TARGET_WAITKIND_VFORK_DONE:
3143 /* Done with the shared memory region. Re-insert breakpoints in
3144 the parent, and keep going. */
3145
3146 if (debug_infrun)
3147 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3148
3149 if (!ptid_equal (ecs->ptid, inferior_ptid))
3150 context_switch (ecs->ptid);
3151
3152 current_inferior ()->waiting_for_vfork_done = 0;
3153 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3154 /* This also takes care of reinserting breakpoints in the
3155 previously locked inferior. */
3156 keep_going (ecs);
3157 return;
3158
3159 case TARGET_WAITKIND_EXECD:
3160 if (debug_infrun)
3161 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3162
3163 if (!ptid_equal (ecs->ptid, inferior_ptid))
3164 {
3165 context_switch (ecs->ptid);
3166 reinit_frame_cache ();
3167 }
3168
3169 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3170
3171 /* Do whatever is necessary to the parent branch of the vfork. */
3172 handle_vfork_child_exec_or_exit (1);
3173
3174 /* This causes the eventpoints and symbol table to be reset.
3175 Must do this now, before trying to determine whether to
3176 stop. */
3177 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3178
3179 ecs->event_thread->stop_bpstat
3180 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3181 stop_pc, ecs->ptid);
3182 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3183
3184 /* Note that this may be referenced from inside
3185 bpstat_stop_status above, through inferior_has_execd. */
3186 xfree (ecs->ws.value.execd_pathname);
3187 ecs->ws.value.execd_pathname = NULL;
3188
3189 /* If no catchpoint triggered for this, then keep going. */
3190 if (ecs->random_signal)
3191 {
3192 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3193 keep_going (ecs);
3194 return;
3195 }
3196 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3197 goto process_event_stop_test;
3198
3199 /* Be careful not to try to gather much state about a thread
3200 that's in a syscall. It's frequently a losing proposition. */
3201 case TARGET_WAITKIND_SYSCALL_ENTRY:
3202 if (debug_infrun)
3203 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3204 /* Getting the current syscall number */
3205 if (handle_syscall_event (ecs) != 0)
3206 return;
3207 goto process_event_stop_test;
3208
3209 /* Before examining the threads further, step this thread to
3210 get it entirely out of the syscall. (We get notice of the
3211 event when the thread is just on the verge of exiting a
3212 syscall. Stepping one instruction seems to get it back
3213 into user code.) */
3214 case TARGET_WAITKIND_SYSCALL_RETURN:
3215 if (debug_infrun)
3216 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3217 if (handle_syscall_event (ecs) != 0)
3218 return;
3219 goto process_event_stop_test;
3220
3221 case TARGET_WAITKIND_STOPPED:
3222 if (debug_infrun)
3223 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3224 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3225 break;
3226
3227 case TARGET_WAITKIND_NO_HISTORY:
3228 /* Reverse execution: target ran out of history info. */
3229 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3230 print_stop_reason (NO_HISTORY, 0);
3231 stop_stepping (ecs);
3232 return;
3233 }
3234
3235 if (ecs->new_thread_event)
3236 {
3237 if (non_stop)
3238 /* Non-stop assumes that the target handles adding new threads
3239 to the thread list. */
3240 internal_error (__FILE__, __LINE__, "\
3241 targets should add new threads to the thread list themselves in non-stop mode.");
3242
3243 /* We may want to consider not doing a resume here in order to
3244 give the user a chance to play with the new thread. It might
3245 be good to make that a user-settable option. */
3246
3247 /* At this point, all threads are stopped (happens automatically
3248 in either the OS or the native code). Therefore we need to
3249 continue all threads in order to make progress. */
3250
3251 if (!ptid_equal (ecs->ptid, inferior_ptid))
3252 context_switch (ecs->ptid);
3253 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3254 prepare_to_wait (ecs);
3255 return;
3256 }
3257
3258 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3259 {
3260 /* Do we need to clean up the state of a thread that has
3261 completed a displaced single-step? (Doing so usually affects
3262 the PC, so do it here, before we set stop_pc.) */
3263 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3264
3265 /* If we either finished a single-step or hit a breakpoint, but
3266 the user wanted this thread to be stopped, pretend we got a
3267 SIG0 (generic unsignaled stop). */
3268
3269 if (ecs->event_thread->stop_requested
3270 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3271 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3272 }
3273
3274 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3275
3276 if (debug_infrun)
3277 {
3278 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3279 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3280 struct cleanup *old_chain = save_inferior_ptid ();
3281
3282 inferior_ptid = ecs->ptid;
3283
3284 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3285 paddress (gdbarch, stop_pc));
3286 if (target_stopped_by_watchpoint ())
3287 {
3288 CORE_ADDR addr;
3289 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3290
3291 if (target_stopped_data_address (&current_target, &addr))
3292 fprintf_unfiltered (gdb_stdlog,
3293 "infrun: stopped data address = %s\n",
3294 paddress (gdbarch, addr));
3295 else
3296 fprintf_unfiltered (gdb_stdlog,
3297 "infrun: (no data address available)\n");
3298 }
3299
3300 do_cleanups (old_chain);
3301 }
3302
3303 if (stepping_past_singlestep_breakpoint)
3304 {
3305 gdb_assert (singlestep_breakpoints_inserted_p);
3306 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3307 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3308
3309 stepping_past_singlestep_breakpoint = 0;
3310
3311 /* We've either finished single-stepping past the single-step
3312 breakpoint, or stopped for some other reason. It would be nice if
3313 we could tell, but we can't reliably. */
3314 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3315 {
3316 if (debug_infrun)
3317 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3318 /* Pull the single step breakpoints out of the target. */
3319 remove_single_step_breakpoints ();
3320 singlestep_breakpoints_inserted_p = 0;
3321
3322 ecs->random_signal = 0;
3323 ecs->event_thread->trap_expected = 0;
3324
3325 context_switch (saved_singlestep_ptid);
3326 if (deprecated_context_hook)
3327 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3328
3329 resume (1, TARGET_SIGNAL_0);
3330 prepare_to_wait (ecs);
3331 return;
3332 }
3333 }
3334
3335 if (!ptid_equal (deferred_step_ptid, null_ptid))
3336 {
3337 /* In non-stop mode, there's never a deferred_step_ptid set. */
3338 gdb_assert (!non_stop);
3339
3340 /* If we stopped for some other reason than single-stepping, ignore
3341 the fact that we were supposed to switch back. */
3342 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3343 {
3344 if (debug_infrun)
3345 fprintf_unfiltered (gdb_stdlog,
3346 "infrun: handling deferred step\n");
3347
3348 /* Pull the single step breakpoints out of the target. */
3349 if (singlestep_breakpoints_inserted_p)
3350 {
3351 remove_single_step_breakpoints ();
3352 singlestep_breakpoints_inserted_p = 0;
3353 }
3354
3355 /* Note: We do not call context_switch at this point, as the
3356 context is already set up for stepping the original thread. */
3357 switch_to_thread (deferred_step_ptid);
3358 deferred_step_ptid = null_ptid;
3359 /* Suppress spurious "Switching to ..." message. */
3360 previous_inferior_ptid = inferior_ptid;
3361
3362 resume (1, TARGET_SIGNAL_0);
3363 prepare_to_wait (ecs);
3364 return;
3365 }
3366
3367 deferred_step_ptid = null_ptid;
3368 }
3369
3370 /* See if a thread hit a thread-specific breakpoint that was meant for
3371 another thread. If so, then step that thread past the breakpoint,
3372 and continue it. */
3373
3374 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3375 {
3376 int thread_hop_needed = 0;
3377 struct address_space *aspace =
3378 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3379
3380 /* Check if a regular breakpoint has been hit before checking
3381 for a potential single step breakpoint. Otherwise, GDB will
3382 not see this breakpoint hit when stepping onto breakpoints. */
3383 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3384 {
3385 ecs->random_signal = 0;
3386 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3387 thread_hop_needed = 1;
3388 }
3389 else if (singlestep_breakpoints_inserted_p)
3390 {
3391 /* We have not context switched yet, so this should be true
3392 no matter which thread hit the singlestep breakpoint. */
3393 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3394 if (debug_infrun)
3395 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3396 "trap for %s\n",
3397 target_pid_to_str (ecs->ptid));
3398
3399 ecs->random_signal = 0;
3400 /* The call to in_thread_list is necessary because PTIDs sometimes
3401 change when we go from single-threaded to multi-threaded. If
3402 the singlestep_ptid is still in the list, assume that it is
3403 really different from ecs->ptid. */
3404 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3405 && in_thread_list (singlestep_ptid))
3406 {
3407 /* If the PC of the thread we were trying to single-step
3408 has changed, discard this event (which we were going
3409 to ignore anyway), and pretend we saw that thread
3410 trap. This prevents us continuously moving the
3411 single-step breakpoint forward, one instruction at a
3412 time. If the PC has changed, then the thread we were
3413 trying to single-step has trapped or been signalled,
3414 but the event has not been reported to GDB yet.
3415
3416 There might be some cases where this loses signal
3417 information, if a signal has arrived at exactly the
3418 same time that the PC changed, but this is the best
3419 we can do with the information available. Perhaps we
3420 should arrange to report all events for all threads
3421 when they stop, or to re-poll the remote looking for
3422 this particular thread (i.e. temporarily enable
3423 schedlock). */
3424
3425 CORE_ADDR new_singlestep_pc
3426 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3427
3428 if (new_singlestep_pc != singlestep_pc)
3429 {
3430 enum target_signal stop_signal;
3431
3432 if (debug_infrun)
3433 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3434 " but expected thread advanced also\n");
3435
3436 /* The current context still belongs to
3437 singlestep_ptid. Don't swap here, since that's
3438 the context we want to use. Just fudge our
3439 state and continue. */
3440 stop_signal = ecs->event_thread->stop_signal;
3441 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3442 ecs->ptid = singlestep_ptid;
3443 ecs->event_thread = find_thread_ptid (ecs->ptid);
3444 ecs->event_thread->stop_signal = stop_signal;
3445 stop_pc = new_singlestep_pc;
3446 }
3447 else
3448 {
3449 if (debug_infrun)
3450 fprintf_unfiltered (gdb_stdlog,
3451 "infrun: unexpected thread\n");
3452
3453 thread_hop_needed = 1;
3454 stepping_past_singlestep_breakpoint = 1;
3455 saved_singlestep_ptid = singlestep_ptid;
3456 }
3457 }
3458 }
3459
3460 if (thread_hop_needed)
3461 {
3462 struct regcache *thread_regcache;
3463 int remove_status = 0;
3464
3465 if (debug_infrun)
3466 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3467
3468 /* Switch context before touching inferior memory, the
3469 previous thread may have exited. */
3470 if (!ptid_equal (inferior_ptid, ecs->ptid))
3471 context_switch (ecs->ptid);
3472
3473 /* Saw a breakpoint, but it was hit by the wrong thread.
3474 Just continue. */
3475
3476 if (singlestep_breakpoints_inserted_p)
3477 {
3478 /* Pull the single step breakpoints out of the target. */
3479 remove_single_step_breakpoints ();
3480 singlestep_breakpoints_inserted_p = 0;
3481 }
3482
3483 /* If the arch can displace step, don't remove the
3484 breakpoints. */
3485 thread_regcache = get_thread_regcache (ecs->ptid);
3486 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3487 remove_status = remove_breakpoints ();
3488
3489 /* Did we fail to remove breakpoints? If so, try
3490 to set the PC past the bp. (There's at least
3491 one situation in which we can fail to remove
3492 the bp's: On HP-UX's that use ttrace, we can't
3493 change the address space of a vforking child
3494 process until the child exits (well, okay, not
3495 then either :-) or execs. */
3496 if (remove_status != 0)
3497 error (_("Cannot step over breakpoint hit in wrong thread"));
3498 else
3499 { /* Single step */
3500 if (!non_stop)
3501 {
3502 /* Only need to require the next event from this
3503 thread in all-stop mode. */
3504 waiton_ptid = ecs->ptid;
3505 infwait_state = infwait_thread_hop_state;
3506 }
3507
3508 ecs->event_thread->stepping_over_breakpoint = 1;
3509 keep_going (ecs);
3510 return;
3511 }
3512 }
3513 else if (singlestep_breakpoints_inserted_p)
3514 {
3515 sw_single_step_trap_p = 1;
3516 ecs->random_signal = 0;
3517 }
3518 }
3519 else
3520 ecs->random_signal = 1;
3521
3522 /* See if something interesting happened to the non-current thread. If
3523 so, then switch to that thread. */
3524 if (!ptid_equal (ecs->ptid, inferior_ptid))
3525 {
3526 if (debug_infrun)
3527 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3528
3529 context_switch (ecs->ptid);
3530
3531 if (deprecated_context_hook)
3532 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3533 }
3534
3535 /* At this point, get hold of the now-current thread's frame. */
3536 frame = get_current_frame ();
3537 gdbarch = get_frame_arch (frame);
3538
3539 if (singlestep_breakpoints_inserted_p)
3540 {
3541 /* Pull the single step breakpoints out of the target. */
3542 remove_single_step_breakpoints ();
3543 singlestep_breakpoints_inserted_p = 0;
3544 }
3545
3546 if (stepped_after_stopped_by_watchpoint)
3547 stopped_by_watchpoint = 0;
3548 else
3549 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3550
3551 /* If necessary, step over this watchpoint. We'll be back to display
3552 it in a moment. */
3553 if (stopped_by_watchpoint
3554 && (target_have_steppable_watchpoint
3555 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3556 {
3557 /* At this point, we are stopped at an instruction which has
3558 attempted to write to a piece of memory under control of
3559 a watchpoint. The instruction hasn't actually executed
3560 yet. If we were to evaluate the watchpoint expression
3561 now, we would get the old value, and therefore no change
3562 would seem to have occurred.
3563
3564 In order to make watchpoints work `right', we really need
3565 to complete the memory write, and then evaluate the
3566 watchpoint expression. We do this by single-stepping the
3567 target.
3568
3569 It may not be necessary to disable the watchpoint to stop over
3570 it. For example, the PA can (with some kernel cooperation)
3571 single step over a watchpoint without disabling the watchpoint.
3572
3573 It is far more common to need to disable a watchpoint to step
3574 the inferior over it. If we have non-steppable watchpoints,
3575 we must disable the current watchpoint; it's simplest to
3576 disable all watchpoints and breakpoints. */
3577 int hw_step = 1;
3578
3579 if (!target_have_steppable_watchpoint)
3580 remove_breakpoints ();
3581 /* Single step */
3582 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3583 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3584 waiton_ptid = ecs->ptid;
3585 if (target_have_steppable_watchpoint)
3586 infwait_state = infwait_step_watch_state;
3587 else
3588 infwait_state = infwait_nonstep_watch_state;
3589 prepare_to_wait (ecs);
3590 return;
3591 }
3592
3593 ecs->stop_func_start = 0;
3594 ecs->stop_func_end = 0;
3595 ecs->stop_func_name = 0;
3596 /* Don't care about return value; stop_func_start and stop_func_name
3597 will both be 0 if it doesn't work. */
3598 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3599 &ecs->stop_func_start, &ecs->stop_func_end);
3600 ecs->stop_func_start
3601 += gdbarch_deprecated_function_start_offset (gdbarch);
3602 ecs->event_thread->stepping_over_breakpoint = 0;
3603 bpstat_clear (&ecs->event_thread->stop_bpstat);
3604 ecs->event_thread->stop_step = 0;
3605 stop_print_frame = 1;
3606 ecs->random_signal = 0;
3607 stopped_by_random_signal = 0;
3608
3609 /* Hide inlined functions starting here, unless we just performed stepi or
3610 nexti. After stepi and nexti, always show the innermost frame (not any
3611 inline function call sites). */
3612 if (ecs->event_thread->step_range_end != 1)
3613 skip_inline_frames (ecs->ptid);
3614
3615 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3616 && ecs->event_thread->trap_expected
3617 && gdbarch_single_step_through_delay_p (gdbarch)
3618 && currently_stepping (ecs->event_thread))
3619 {
3620 /* We're trying to step off a breakpoint. Turns out that we're
3621 also on an instruction that needs to be stepped multiple
3622 times before it's been fully executing. E.g., architectures
3623 with a delay slot. It needs to be stepped twice, once for
3624 the instruction and once for the delay slot. */
3625 int step_through_delay
3626 = gdbarch_single_step_through_delay (gdbarch, frame);
3627 if (debug_infrun && step_through_delay)
3628 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3629 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3630 {
3631 /* The user issued a continue when stopped at a breakpoint.
3632 Set up for another trap and get out of here. */
3633 ecs->event_thread->stepping_over_breakpoint = 1;
3634 keep_going (ecs);
3635 return;
3636 }
3637 else if (step_through_delay)
3638 {
3639 /* The user issued a step when stopped at a breakpoint.
3640 Maybe we should stop, maybe we should not - the delay
3641 slot *might* correspond to a line of source. In any
3642 case, don't decide that here, just set
3643 ecs->stepping_over_breakpoint, making sure we
3644 single-step again before breakpoints are re-inserted. */
3645 ecs->event_thread->stepping_over_breakpoint = 1;
3646 }
3647 }
3648
3649 /* Look at the cause of the stop, and decide what to do.
3650 The alternatives are:
3651 1) stop_stepping and return; to really stop and return to the debugger,
3652 2) keep_going and return to start up again
3653 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3654 3) set ecs->random_signal to 1, and the decision between 1 and 2
3655 will be made according to the signal handling tables. */
3656
3657 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3658 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3659 || stop_soon == STOP_QUIETLY_REMOTE)
3660 {
3661 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3662 {
3663 if (debug_infrun)
3664 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3665 stop_print_frame = 0;
3666 stop_stepping (ecs);
3667 return;
3668 }
3669
3670 /* This is originated from start_remote(), start_inferior() and
3671 shared libraries hook functions. */
3672 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3673 {
3674 if (debug_infrun)
3675 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3676 stop_stepping (ecs);
3677 return;
3678 }
3679
3680 /* This originates from attach_command(). We need to overwrite
3681 the stop_signal here, because some kernels don't ignore a
3682 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3683 See more comments in inferior.h. On the other hand, if we
3684 get a non-SIGSTOP, report it to the user - assume the backend
3685 will handle the SIGSTOP if it should show up later.
3686
3687 Also consider that the attach is complete when we see a
3688 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3689 target extended-remote report it instead of a SIGSTOP
3690 (e.g. gdbserver). We already rely on SIGTRAP being our
3691 signal, so this is no exception.
3692
3693 Also consider that the attach is complete when we see a
3694 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3695 the target to stop all threads of the inferior, in case the
3696 low level attach operation doesn't stop them implicitly. If
3697 they weren't stopped implicitly, then the stub will report a
3698 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3699 other than GDB's request. */
3700 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3701 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3702 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3703 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3704 {
3705 stop_stepping (ecs);
3706 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3707 return;
3708 }
3709
3710 /* See if there is a breakpoint at the current PC. */
3711 ecs->event_thread->stop_bpstat
3712 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3713 stop_pc, ecs->ptid);
3714
3715 /* Following in case break condition called a
3716 function. */
3717 stop_print_frame = 1;
3718
3719 /* This is where we handle "moribund" watchpoints. Unlike
3720 software breakpoints traps, hardware watchpoint traps are
3721 always distinguishable from random traps. If no high-level
3722 watchpoint is associated with the reported stop data address
3723 anymore, then the bpstat does not explain the signal ---
3724 simply make sure to ignore it if `stopped_by_watchpoint' is
3725 set. */
3726
3727 if (debug_infrun
3728 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3729 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3730 && stopped_by_watchpoint)
3731 fprintf_unfiltered (gdb_stdlog, "\
3732 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3733
3734 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3735 at one stage in the past included checks for an inferior
3736 function call's call dummy's return breakpoint. The original
3737 comment, that went with the test, read:
3738
3739 ``End of a stack dummy. Some systems (e.g. Sony news) give
3740 another signal besides SIGTRAP, so check here as well as
3741 above.''
3742
3743 If someone ever tries to get call dummys on a
3744 non-executable stack to work (where the target would stop
3745 with something like a SIGSEGV), then those tests might need
3746 to be re-instated. Given, however, that the tests were only
3747 enabled when momentary breakpoints were not being used, I
3748 suspect that it won't be the case.
3749
3750 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3751 be necessary for call dummies on a non-executable stack on
3752 SPARC. */
3753
3754 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3755 ecs->random_signal
3756 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3757 || stopped_by_watchpoint
3758 || ecs->event_thread->trap_expected
3759 || (ecs->event_thread->step_range_end
3760 && ecs->event_thread->step_resume_breakpoint == NULL));
3761 else
3762 {
3763 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3764 if (!ecs->random_signal)
3765 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3766 }
3767 }
3768
3769 /* When we reach this point, we've pretty much decided
3770 that the reason for stopping must've been a random
3771 (unexpected) signal. */
3772
3773 else
3774 ecs->random_signal = 1;
3775
3776 process_event_stop_test:
3777
3778 /* Re-fetch current thread's frame in case we did a
3779 "goto process_event_stop_test" above. */
3780 frame = get_current_frame ();
3781 gdbarch = get_frame_arch (frame);
3782
3783 /* For the program's own signals, act according to
3784 the signal handling tables. */
3785
3786 if (ecs->random_signal)
3787 {
3788 /* Signal not for debugging purposes. */
3789 int printed = 0;
3790
3791 if (debug_infrun)
3792 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3793 ecs->event_thread->stop_signal);
3794
3795 stopped_by_random_signal = 1;
3796
3797 if (signal_print[ecs->event_thread->stop_signal])
3798 {
3799 printed = 1;
3800 target_terminal_ours_for_output ();
3801 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3802 }
3803 /* Always stop on signals if we're either just gaining control
3804 of the program, or the user explicitly requested this thread
3805 to remain stopped. */
3806 if (stop_soon != NO_STOP_QUIETLY
3807 || ecs->event_thread->stop_requested
3808 || signal_stop_state (ecs->event_thread->stop_signal))
3809 {
3810 stop_stepping (ecs);
3811 return;
3812 }
3813 /* If not going to stop, give terminal back
3814 if we took it away. */
3815 else if (printed)
3816 target_terminal_inferior ();
3817
3818 /* Clear the signal if it should not be passed. */
3819 if (signal_program[ecs->event_thread->stop_signal] == 0)
3820 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3821
3822 if (ecs->event_thread->prev_pc == stop_pc
3823 && ecs->event_thread->trap_expected
3824 && ecs->event_thread->step_resume_breakpoint == NULL)
3825 {
3826 /* We were just starting a new sequence, attempting to
3827 single-step off of a breakpoint and expecting a SIGTRAP.
3828 Instead this signal arrives. This signal will take us out
3829 of the stepping range so GDB needs to remember to, when
3830 the signal handler returns, resume stepping off that
3831 breakpoint. */
3832 /* To simplify things, "continue" is forced to use the same
3833 code paths as single-step - set a breakpoint at the
3834 signal return address and then, once hit, step off that
3835 breakpoint. */
3836 if (debug_infrun)
3837 fprintf_unfiltered (gdb_stdlog,
3838 "infrun: signal arrived while stepping over "
3839 "breakpoint\n");
3840
3841 insert_step_resume_breakpoint_at_frame (frame);
3842 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3843 keep_going (ecs);
3844 return;
3845 }
3846
3847 if (ecs->event_thread->step_range_end != 0
3848 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3849 && (ecs->event_thread->step_range_start <= stop_pc
3850 && stop_pc < ecs->event_thread->step_range_end)
3851 && frame_id_eq (get_stack_frame_id (frame),
3852 ecs->event_thread->step_stack_frame_id)
3853 && ecs->event_thread->step_resume_breakpoint == NULL)
3854 {
3855 /* The inferior is about to take a signal that will take it
3856 out of the single step range. Set a breakpoint at the
3857 current PC (which is presumably where the signal handler
3858 will eventually return) and then allow the inferior to
3859 run free.
3860
3861 Note that this is only needed for a signal delivered
3862 while in the single-step range. Nested signals aren't a
3863 problem as they eventually all return. */
3864 if (debug_infrun)
3865 fprintf_unfiltered (gdb_stdlog,
3866 "infrun: signal may take us out of "
3867 "single-step range\n");
3868
3869 insert_step_resume_breakpoint_at_frame (frame);
3870 keep_going (ecs);
3871 return;
3872 }
3873
3874 /* Note: step_resume_breakpoint may be non-NULL. This occures
3875 when either there's a nested signal, or when there's a
3876 pending signal enabled just as the signal handler returns
3877 (leaving the inferior at the step-resume-breakpoint without
3878 actually executing it). Either way continue until the
3879 breakpoint is really hit. */
3880 keep_going (ecs);
3881 return;
3882 }
3883
3884 /* Handle cases caused by hitting a breakpoint. */
3885 {
3886 CORE_ADDR jmp_buf_pc;
3887 struct bpstat_what what;
3888
3889 what = bpstat_what (ecs->event_thread->stop_bpstat);
3890
3891 if (what.call_dummy)
3892 {
3893 stop_stack_dummy = 1;
3894 }
3895
3896 switch (what.main_action)
3897 {
3898 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3899 /* If we hit the breakpoint at longjmp while stepping, we
3900 install a momentary breakpoint at the target of the
3901 jmp_buf. */
3902
3903 if (debug_infrun)
3904 fprintf_unfiltered (gdb_stdlog,
3905 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3906
3907 ecs->event_thread->stepping_over_breakpoint = 1;
3908
3909 if (!gdbarch_get_longjmp_target_p (gdbarch)
3910 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3911 {
3912 if (debug_infrun)
3913 fprintf_unfiltered (gdb_stdlog, "\
3914 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3915 keep_going (ecs);
3916 return;
3917 }
3918
3919 /* We're going to replace the current step-resume breakpoint
3920 with a longjmp-resume breakpoint. */
3921 delete_step_resume_breakpoint (ecs->event_thread);
3922
3923 /* Insert a breakpoint at resume address. */
3924 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3925
3926 keep_going (ecs);
3927 return;
3928
3929 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3930 if (debug_infrun)
3931 fprintf_unfiltered (gdb_stdlog,
3932 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3933
3934 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3935 delete_step_resume_breakpoint (ecs->event_thread);
3936
3937 ecs->event_thread->stop_step = 1;
3938 print_stop_reason (END_STEPPING_RANGE, 0);
3939 stop_stepping (ecs);
3940 return;
3941
3942 case BPSTAT_WHAT_SINGLE:
3943 if (debug_infrun)
3944 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
3945 ecs->event_thread->stepping_over_breakpoint = 1;
3946 /* Still need to check other stuff, at least the case
3947 where we are stepping and step out of the right range. */
3948 break;
3949
3950 case BPSTAT_WHAT_STOP_NOISY:
3951 if (debug_infrun)
3952 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
3953 stop_print_frame = 1;
3954
3955 /* We are about to nuke the step_resume_breakpointt via the
3956 cleanup chain, so no need to worry about it here. */
3957
3958 stop_stepping (ecs);
3959 return;
3960
3961 case BPSTAT_WHAT_STOP_SILENT:
3962 if (debug_infrun)
3963 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
3964 stop_print_frame = 0;
3965
3966 /* We are about to nuke the step_resume_breakpoin via the
3967 cleanup chain, so no need to worry about it here. */
3968
3969 stop_stepping (ecs);
3970 return;
3971
3972 case BPSTAT_WHAT_STEP_RESUME:
3973 if (debug_infrun)
3974 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
3975
3976 delete_step_resume_breakpoint (ecs->event_thread);
3977 if (ecs->event_thread->step_after_step_resume_breakpoint)
3978 {
3979 /* Back when the step-resume breakpoint was inserted, we
3980 were trying to single-step off a breakpoint. Go back
3981 to doing that. */
3982 ecs->event_thread->step_after_step_resume_breakpoint = 0;
3983 ecs->event_thread->stepping_over_breakpoint = 1;
3984 keep_going (ecs);
3985 return;
3986 }
3987 if (stop_pc == ecs->stop_func_start
3988 && execution_direction == EXEC_REVERSE)
3989 {
3990 /* We are stepping over a function call in reverse, and
3991 just hit the step-resume breakpoint at the start
3992 address of the function. Go back to single-stepping,
3993 which should take us back to the function call. */
3994 ecs->event_thread->stepping_over_breakpoint = 1;
3995 keep_going (ecs);
3996 return;
3997 }
3998 break;
3999
4000 case BPSTAT_WHAT_CHECK_SHLIBS:
4001 {
4002 if (debug_infrun)
4003 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
4004
4005 /* Check for any newly added shared libraries if we're
4006 supposed to be adding them automatically. Switch
4007 terminal for any messages produced by
4008 breakpoint_re_set. */
4009 target_terminal_ours_for_output ();
4010 /* NOTE: cagney/2003-11-25: Make certain that the target
4011 stack's section table is kept up-to-date. Architectures,
4012 (e.g., PPC64), use the section table to perform
4013 operations such as address => section name and hence
4014 require the table to contain all sections (including
4015 those found in shared libraries). */
4016 #ifdef SOLIB_ADD
4017 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
4018 #else
4019 solib_add (NULL, 0, &current_target, auto_solib_add);
4020 #endif
4021 target_terminal_inferior ();
4022
4023 /* If requested, stop when the dynamic linker notifies
4024 gdb of events. This allows the user to get control
4025 and place breakpoints in initializer routines for
4026 dynamically loaded objects (among other things). */
4027 if (stop_on_solib_events || stop_stack_dummy)
4028 {
4029 stop_stepping (ecs);
4030 return;
4031 }
4032 else
4033 {
4034 /* We want to step over this breakpoint, then keep going. */
4035 ecs->event_thread->stepping_over_breakpoint = 1;
4036 break;
4037 }
4038 }
4039 break;
4040
4041 case BPSTAT_WHAT_CHECK_JIT:
4042 if (debug_infrun)
4043 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
4044
4045 /* Switch terminal for any messages produced by breakpoint_re_set. */
4046 target_terminal_ours_for_output ();
4047
4048 jit_event_handler (gdbarch);
4049
4050 target_terminal_inferior ();
4051
4052 /* We want to step over this breakpoint, then keep going. */
4053 ecs->event_thread->stepping_over_breakpoint = 1;
4054
4055 break;
4056
4057 case BPSTAT_WHAT_LAST:
4058 /* Not a real code, but listed here to shut up gcc -Wall. */
4059
4060 case BPSTAT_WHAT_KEEP_CHECKING:
4061 break;
4062 }
4063 }
4064
4065 /* We come here if we hit a breakpoint but should not
4066 stop for it. Possibly we also were stepping
4067 and should stop for that. So fall through and
4068 test for stepping. But, if not stepping,
4069 do not stop. */
4070
4071 /* In all-stop mode, if we're currently stepping but have stopped in
4072 some other thread, we need to switch back to the stepped thread. */
4073 if (!non_stop)
4074 {
4075 struct thread_info *tp;
4076 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4077 ecs->event_thread);
4078 if (tp)
4079 {
4080 /* However, if the current thread is blocked on some internal
4081 breakpoint, and we simply need to step over that breakpoint
4082 to get it going again, do that first. */
4083 if ((ecs->event_thread->trap_expected
4084 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4085 || ecs->event_thread->stepping_over_breakpoint)
4086 {
4087 keep_going (ecs);
4088 return;
4089 }
4090
4091 /* If the stepping thread exited, then don't try to switch
4092 back and resume it, which could fail in several different
4093 ways depending on the target. Instead, just keep going.
4094
4095 We can find a stepping dead thread in the thread list in
4096 two cases:
4097
4098 - The target supports thread exit events, and when the
4099 target tries to delete the thread from the thread list,
4100 inferior_ptid pointed at the exiting thread. In such
4101 case, calling delete_thread does not really remove the
4102 thread from the list; instead, the thread is left listed,
4103 with 'exited' state.
4104
4105 - The target's debug interface does not support thread
4106 exit events, and so we have no idea whatsoever if the
4107 previously stepping thread is still alive. For that
4108 reason, we need to synchronously query the target
4109 now. */
4110 if (is_exited (tp->ptid)
4111 || !target_thread_alive (tp->ptid))
4112 {
4113 if (debug_infrun)
4114 fprintf_unfiltered (gdb_stdlog, "\
4115 infrun: not switching back to stepped thread, it has vanished\n");
4116
4117 delete_thread (tp->ptid);
4118 keep_going (ecs);
4119 return;
4120 }
4121
4122 /* Otherwise, we no longer expect a trap in the current thread.
4123 Clear the trap_expected flag before switching back -- this is
4124 what keep_going would do as well, if we called it. */
4125 ecs->event_thread->trap_expected = 0;
4126
4127 if (debug_infrun)
4128 fprintf_unfiltered (gdb_stdlog,
4129 "infrun: switching back to stepped thread\n");
4130
4131 ecs->event_thread = tp;
4132 ecs->ptid = tp->ptid;
4133 context_switch (ecs->ptid);
4134 keep_going (ecs);
4135 return;
4136 }
4137 }
4138
4139 /* Are we stepping to get the inferior out of the dynamic linker's
4140 hook (and possibly the dld itself) after catching a shlib
4141 event? */
4142 if (ecs->event_thread->stepping_through_solib_after_catch)
4143 {
4144 #if defined(SOLIB_ADD)
4145 /* Have we reached our destination? If not, keep going. */
4146 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4147 {
4148 if (debug_infrun)
4149 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4150 ecs->event_thread->stepping_over_breakpoint = 1;
4151 keep_going (ecs);
4152 return;
4153 }
4154 #endif
4155 if (debug_infrun)
4156 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4157 /* Else, stop and report the catchpoint(s) whose triggering
4158 caused us to begin stepping. */
4159 ecs->event_thread->stepping_through_solib_after_catch = 0;
4160 bpstat_clear (&ecs->event_thread->stop_bpstat);
4161 ecs->event_thread->stop_bpstat
4162 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4163 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4164 stop_print_frame = 1;
4165 stop_stepping (ecs);
4166 return;
4167 }
4168
4169 if (ecs->event_thread->step_resume_breakpoint)
4170 {
4171 if (debug_infrun)
4172 fprintf_unfiltered (gdb_stdlog,
4173 "infrun: step-resume breakpoint is inserted\n");
4174
4175 /* Having a step-resume breakpoint overrides anything
4176 else having to do with stepping commands until
4177 that breakpoint is reached. */
4178 keep_going (ecs);
4179 return;
4180 }
4181
4182 if (ecs->event_thread->step_range_end == 0)
4183 {
4184 if (debug_infrun)
4185 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4186 /* Likewise if we aren't even stepping. */
4187 keep_going (ecs);
4188 return;
4189 }
4190
4191 /* Re-fetch current thread's frame in case the code above caused
4192 the frame cache to be re-initialized, making our FRAME variable
4193 a dangling pointer. */
4194 frame = get_current_frame ();
4195
4196 /* If stepping through a line, keep going if still within it.
4197
4198 Note that step_range_end is the address of the first instruction
4199 beyond the step range, and NOT the address of the last instruction
4200 within it!
4201
4202 Note also that during reverse execution, we may be stepping
4203 through a function epilogue and therefore must detect when
4204 the current-frame changes in the middle of a line. */
4205
4206 if (stop_pc >= ecs->event_thread->step_range_start
4207 && stop_pc < ecs->event_thread->step_range_end
4208 && (execution_direction != EXEC_REVERSE
4209 || frame_id_eq (get_frame_id (frame),
4210 ecs->event_thread->step_frame_id)))
4211 {
4212 if (debug_infrun)
4213 fprintf_unfiltered
4214 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4215 paddress (gdbarch, ecs->event_thread->step_range_start),
4216 paddress (gdbarch, ecs->event_thread->step_range_end));
4217
4218 /* When stepping backward, stop at beginning of line range
4219 (unless it's the function entry point, in which case
4220 keep going back to the call point). */
4221 if (stop_pc == ecs->event_thread->step_range_start
4222 && stop_pc != ecs->stop_func_start
4223 && execution_direction == EXEC_REVERSE)
4224 {
4225 ecs->event_thread->stop_step = 1;
4226 print_stop_reason (END_STEPPING_RANGE, 0);
4227 stop_stepping (ecs);
4228 }
4229 else
4230 keep_going (ecs);
4231
4232 return;
4233 }
4234
4235 /* We stepped out of the stepping range. */
4236
4237 /* If we are stepping at the source level and entered the runtime
4238 loader dynamic symbol resolution code...
4239
4240 EXEC_FORWARD: we keep on single stepping until we exit the run
4241 time loader code and reach the callee's address.
4242
4243 EXEC_REVERSE: we've already executed the callee (backward), and
4244 the runtime loader code is handled just like any other
4245 undebuggable function call. Now we need only keep stepping
4246 backward through the trampoline code, and that's handled further
4247 down, so there is nothing for us to do here. */
4248
4249 if (execution_direction != EXEC_REVERSE
4250 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4251 && in_solib_dynsym_resolve_code (stop_pc))
4252 {
4253 CORE_ADDR pc_after_resolver =
4254 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4255
4256 if (debug_infrun)
4257 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4258
4259 if (pc_after_resolver)
4260 {
4261 /* Set up a step-resume breakpoint at the address
4262 indicated by SKIP_SOLIB_RESOLVER. */
4263 struct symtab_and_line sr_sal;
4264 init_sal (&sr_sal);
4265 sr_sal.pc = pc_after_resolver;
4266 sr_sal.pspace = get_frame_program_space (frame);
4267
4268 insert_step_resume_breakpoint_at_sal (gdbarch,
4269 sr_sal, null_frame_id);
4270 }
4271
4272 keep_going (ecs);
4273 return;
4274 }
4275
4276 if (ecs->event_thread->step_range_end != 1
4277 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4278 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4279 && get_frame_type (frame) == SIGTRAMP_FRAME)
4280 {
4281 if (debug_infrun)
4282 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4283 /* The inferior, while doing a "step" or "next", has ended up in
4284 a signal trampoline (either by a signal being delivered or by
4285 the signal handler returning). Just single-step until the
4286 inferior leaves the trampoline (either by calling the handler
4287 or returning). */
4288 keep_going (ecs);
4289 return;
4290 }
4291
4292 /* Check for subroutine calls. The check for the current frame
4293 equalling the step ID is not necessary - the check of the
4294 previous frame's ID is sufficient - but it is a common case and
4295 cheaper than checking the previous frame's ID.
4296
4297 NOTE: frame_id_eq will never report two invalid frame IDs as
4298 being equal, so to get into this block, both the current and
4299 previous frame must have valid frame IDs. */
4300 /* The outer_frame_id check is a heuristic to detect stepping
4301 through startup code. If we step over an instruction which
4302 sets the stack pointer from an invalid value to a valid value,
4303 we may detect that as a subroutine call from the mythical
4304 "outermost" function. This could be fixed by marking
4305 outermost frames as !stack_p,code_p,special_p. Then the
4306 initial outermost frame, before sp was valid, would
4307 have code_addr == &_start. See the comment in frame_id_eq
4308 for more. */
4309 if (!frame_id_eq (get_stack_frame_id (frame),
4310 ecs->event_thread->step_stack_frame_id)
4311 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4312 ecs->event_thread->step_stack_frame_id)
4313 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4314 outer_frame_id)
4315 || step_start_function != find_pc_function (stop_pc))))
4316 {
4317 CORE_ADDR real_stop_pc;
4318
4319 if (debug_infrun)
4320 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4321
4322 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4323 || ((ecs->event_thread->step_range_end == 1)
4324 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4325 ecs->stop_func_start)))
4326 {
4327 /* I presume that step_over_calls is only 0 when we're
4328 supposed to be stepping at the assembly language level
4329 ("stepi"). Just stop. */
4330 /* Also, maybe we just did a "nexti" inside a prolog, so we
4331 thought it was a subroutine call but it was not. Stop as
4332 well. FENN */
4333 /* And this works the same backward as frontward. MVS */
4334 ecs->event_thread->stop_step = 1;
4335 print_stop_reason (END_STEPPING_RANGE, 0);
4336 stop_stepping (ecs);
4337 return;
4338 }
4339
4340 /* Reverse stepping through solib trampolines. */
4341
4342 if (execution_direction == EXEC_REVERSE
4343 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4344 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4345 || (ecs->stop_func_start == 0
4346 && in_solib_dynsym_resolve_code (stop_pc))))
4347 {
4348 /* Any solib trampoline code can be handled in reverse
4349 by simply continuing to single-step. We have already
4350 executed the solib function (backwards), and a few
4351 steps will take us back through the trampoline to the
4352 caller. */
4353 keep_going (ecs);
4354 return;
4355 }
4356
4357 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4358 {
4359 /* We're doing a "next".
4360
4361 Normal (forward) execution: set a breakpoint at the
4362 callee's return address (the address at which the caller
4363 will resume).
4364
4365 Reverse (backward) execution. set the step-resume
4366 breakpoint at the start of the function that we just
4367 stepped into (backwards), and continue to there. When we
4368 get there, we'll need to single-step back to the caller. */
4369
4370 if (execution_direction == EXEC_REVERSE)
4371 {
4372 struct symtab_and_line sr_sal;
4373
4374 /* Normal function call return (static or dynamic). */
4375 init_sal (&sr_sal);
4376 sr_sal.pc = ecs->stop_func_start;
4377 sr_sal.pspace = get_frame_program_space (frame);
4378 insert_step_resume_breakpoint_at_sal (gdbarch,
4379 sr_sal, null_frame_id);
4380 }
4381 else
4382 insert_step_resume_breakpoint_at_caller (frame);
4383
4384 keep_going (ecs);
4385 return;
4386 }
4387
4388 /* If we are in a function call trampoline (a stub between the
4389 calling routine and the real function), locate the real
4390 function. That's what tells us (a) whether we want to step
4391 into it at all, and (b) what prologue we want to run to the
4392 end of, if we do step into it. */
4393 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4394 if (real_stop_pc == 0)
4395 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4396 if (real_stop_pc != 0)
4397 ecs->stop_func_start = real_stop_pc;
4398
4399 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4400 {
4401 struct symtab_and_line sr_sal;
4402 init_sal (&sr_sal);
4403 sr_sal.pc = ecs->stop_func_start;
4404 sr_sal.pspace = get_frame_program_space (frame);
4405
4406 insert_step_resume_breakpoint_at_sal (gdbarch,
4407 sr_sal, null_frame_id);
4408 keep_going (ecs);
4409 return;
4410 }
4411
4412 /* If we have line number information for the function we are
4413 thinking of stepping into, step into it.
4414
4415 If there are several symtabs at that PC (e.g. with include
4416 files), just want to know whether *any* of them have line
4417 numbers. find_pc_line handles this. */
4418 {
4419 struct symtab_and_line tmp_sal;
4420
4421 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4422 tmp_sal.pspace = get_frame_program_space (frame);
4423 if (tmp_sal.line != 0)
4424 {
4425 if (execution_direction == EXEC_REVERSE)
4426 handle_step_into_function_backward (gdbarch, ecs);
4427 else
4428 handle_step_into_function (gdbarch, ecs);
4429 return;
4430 }
4431 }
4432
4433 /* If we have no line number and the step-stop-if-no-debug is
4434 set, we stop the step so that the user has a chance to switch
4435 in assembly mode. */
4436 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4437 && step_stop_if_no_debug)
4438 {
4439 ecs->event_thread->stop_step = 1;
4440 print_stop_reason (END_STEPPING_RANGE, 0);
4441 stop_stepping (ecs);
4442 return;
4443 }
4444
4445 if (execution_direction == EXEC_REVERSE)
4446 {
4447 /* Set a breakpoint at callee's start address.
4448 From there we can step once and be back in the caller. */
4449 struct symtab_and_line sr_sal;
4450 init_sal (&sr_sal);
4451 sr_sal.pc = ecs->stop_func_start;
4452 sr_sal.pspace = get_frame_program_space (frame);
4453 insert_step_resume_breakpoint_at_sal (gdbarch,
4454 sr_sal, null_frame_id);
4455 }
4456 else
4457 /* Set a breakpoint at callee's return address (the address
4458 at which the caller will resume). */
4459 insert_step_resume_breakpoint_at_caller (frame);
4460
4461 keep_going (ecs);
4462 return;
4463 }
4464
4465 /* Reverse stepping through solib trampolines. */
4466
4467 if (execution_direction == EXEC_REVERSE
4468 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4469 {
4470 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4471 || (ecs->stop_func_start == 0
4472 && in_solib_dynsym_resolve_code (stop_pc)))
4473 {
4474 /* Any solib trampoline code can be handled in reverse
4475 by simply continuing to single-step. We have already
4476 executed the solib function (backwards), and a few
4477 steps will take us back through the trampoline to the
4478 caller. */
4479 keep_going (ecs);
4480 return;
4481 }
4482 else if (in_solib_dynsym_resolve_code (stop_pc))
4483 {
4484 /* Stepped backward into the solib dynsym resolver.
4485 Set a breakpoint at its start and continue, then
4486 one more step will take us out. */
4487 struct symtab_and_line sr_sal;
4488 init_sal (&sr_sal);
4489 sr_sal.pc = ecs->stop_func_start;
4490 sr_sal.pspace = get_frame_program_space (frame);
4491 insert_step_resume_breakpoint_at_sal (gdbarch,
4492 sr_sal, null_frame_id);
4493 keep_going (ecs);
4494 return;
4495 }
4496 }
4497
4498 /* If we're in the return path from a shared library trampoline,
4499 we want to proceed through the trampoline when stepping. */
4500 if (gdbarch_in_solib_return_trampoline (gdbarch,
4501 stop_pc, ecs->stop_func_name))
4502 {
4503 /* Determine where this trampoline returns. */
4504 CORE_ADDR real_stop_pc;
4505 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4506
4507 if (debug_infrun)
4508 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4509
4510 /* Only proceed through if we know where it's going. */
4511 if (real_stop_pc)
4512 {
4513 /* And put the step-breakpoint there and go until there. */
4514 struct symtab_and_line sr_sal;
4515
4516 init_sal (&sr_sal); /* initialize to zeroes */
4517 sr_sal.pc = real_stop_pc;
4518 sr_sal.section = find_pc_overlay (sr_sal.pc);
4519 sr_sal.pspace = get_frame_program_space (frame);
4520
4521 /* Do not specify what the fp should be when we stop since
4522 on some machines the prologue is where the new fp value
4523 is established. */
4524 insert_step_resume_breakpoint_at_sal (gdbarch,
4525 sr_sal, null_frame_id);
4526
4527 /* Restart without fiddling with the step ranges or
4528 other state. */
4529 keep_going (ecs);
4530 return;
4531 }
4532 }
4533
4534 stop_pc_sal = find_pc_line (stop_pc, 0);
4535
4536 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4537 the trampoline processing logic, however, there are some trampolines
4538 that have no names, so we should do trampoline handling first. */
4539 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4540 && ecs->stop_func_name == NULL
4541 && stop_pc_sal.line == 0)
4542 {
4543 if (debug_infrun)
4544 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4545
4546 /* The inferior just stepped into, or returned to, an
4547 undebuggable function (where there is no debugging information
4548 and no line number corresponding to the address where the
4549 inferior stopped). Since we want to skip this kind of code,
4550 we keep going until the inferior returns from this
4551 function - unless the user has asked us not to (via
4552 set step-mode) or we no longer know how to get back
4553 to the call site. */
4554 if (step_stop_if_no_debug
4555 || !frame_id_p (frame_unwind_caller_id (frame)))
4556 {
4557 /* If we have no line number and the step-stop-if-no-debug
4558 is set, we stop the step so that the user has a chance to
4559 switch in assembly mode. */
4560 ecs->event_thread->stop_step = 1;
4561 print_stop_reason (END_STEPPING_RANGE, 0);
4562 stop_stepping (ecs);
4563 return;
4564 }
4565 else
4566 {
4567 /* Set a breakpoint at callee's return address (the address
4568 at which the caller will resume). */
4569 insert_step_resume_breakpoint_at_caller (frame);
4570 keep_going (ecs);
4571 return;
4572 }
4573 }
4574
4575 if (ecs->event_thread->step_range_end == 1)
4576 {
4577 /* It is stepi or nexti. We always want to stop stepping after
4578 one instruction. */
4579 if (debug_infrun)
4580 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4581 ecs->event_thread->stop_step = 1;
4582 print_stop_reason (END_STEPPING_RANGE, 0);
4583 stop_stepping (ecs);
4584 return;
4585 }
4586
4587 if (stop_pc_sal.line == 0)
4588 {
4589 /* We have no line number information. That means to stop
4590 stepping (does this always happen right after one instruction,
4591 when we do "s" in a function with no line numbers,
4592 or can this happen as a result of a return or longjmp?). */
4593 if (debug_infrun)
4594 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4595 ecs->event_thread->stop_step = 1;
4596 print_stop_reason (END_STEPPING_RANGE, 0);
4597 stop_stepping (ecs);
4598 return;
4599 }
4600
4601 /* Look for "calls" to inlined functions, part one. If the inline
4602 frame machinery detected some skipped call sites, we have entered
4603 a new inline function. */
4604
4605 if (frame_id_eq (get_frame_id (get_current_frame ()),
4606 ecs->event_thread->step_frame_id)
4607 && inline_skipped_frames (ecs->ptid))
4608 {
4609 struct symtab_and_line call_sal;
4610
4611 if (debug_infrun)
4612 fprintf_unfiltered (gdb_stdlog,
4613 "infrun: stepped into inlined function\n");
4614
4615 find_frame_sal (get_current_frame (), &call_sal);
4616
4617 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4618 {
4619 /* For "step", we're going to stop. But if the call site
4620 for this inlined function is on the same source line as
4621 we were previously stepping, go down into the function
4622 first. Otherwise stop at the call site. */
4623
4624 if (call_sal.line == ecs->event_thread->current_line
4625 && call_sal.symtab == ecs->event_thread->current_symtab)
4626 step_into_inline_frame (ecs->ptid);
4627
4628 ecs->event_thread->stop_step = 1;
4629 print_stop_reason (END_STEPPING_RANGE, 0);
4630 stop_stepping (ecs);
4631 return;
4632 }
4633 else
4634 {
4635 /* For "next", we should stop at the call site if it is on a
4636 different source line. Otherwise continue through the
4637 inlined function. */
4638 if (call_sal.line == ecs->event_thread->current_line
4639 && call_sal.symtab == ecs->event_thread->current_symtab)
4640 keep_going (ecs);
4641 else
4642 {
4643 ecs->event_thread->stop_step = 1;
4644 print_stop_reason (END_STEPPING_RANGE, 0);
4645 stop_stepping (ecs);
4646 }
4647 return;
4648 }
4649 }
4650
4651 /* Look for "calls" to inlined functions, part two. If we are still
4652 in the same real function we were stepping through, but we have
4653 to go further up to find the exact frame ID, we are stepping
4654 through a more inlined call beyond its call site. */
4655
4656 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4657 && !frame_id_eq (get_frame_id (get_current_frame ()),
4658 ecs->event_thread->step_frame_id)
4659 && stepped_in_from (get_current_frame (),
4660 ecs->event_thread->step_frame_id))
4661 {
4662 if (debug_infrun)
4663 fprintf_unfiltered (gdb_stdlog,
4664 "infrun: stepping through inlined function\n");
4665
4666 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4667 keep_going (ecs);
4668 else
4669 {
4670 ecs->event_thread->stop_step = 1;
4671 print_stop_reason (END_STEPPING_RANGE, 0);
4672 stop_stepping (ecs);
4673 }
4674 return;
4675 }
4676
4677 if ((stop_pc == stop_pc_sal.pc)
4678 && (ecs->event_thread->current_line != stop_pc_sal.line
4679 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4680 {
4681 /* We are at the start of a different line. So stop. Note that
4682 we don't stop if we step into the middle of a different line.
4683 That is said to make things like for (;;) statements work
4684 better. */
4685 if (debug_infrun)
4686 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4687 ecs->event_thread->stop_step = 1;
4688 print_stop_reason (END_STEPPING_RANGE, 0);
4689 stop_stepping (ecs);
4690 return;
4691 }
4692
4693 /* We aren't done stepping.
4694
4695 Optimize by setting the stepping range to the line.
4696 (We might not be in the original line, but if we entered a
4697 new line in mid-statement, we continue stepping. This makes
4698 things like for(;;) statements work better.) */
4699
4700 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4701 ecs->event_thread->step_range_end = stop_pc_sal.end;
4702 set_step_info (frame, stop_pc_sal);
4703
4704 if (debug_infrun)
4705 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4706 keep_going (ecs);
4707 }
4708
4709 /* Is thread TP in the middle of single-stepping? */
4710
4711 static int
4712 currently_stepping (struct thread_info *tp)
4713 {
4714 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4715 || tp->trap_expected
4716 || tp->stepping_through_solib_after_catch
4717 || bpstat_should_step ());
4718 }
4719
4720 /* Returns true if any thread *but* the one passed in "data" is in the
4721 middle of stepping or of handling a "next". */
4722
4723 static int
4724 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4725 {
4726 if (tp == data)
4727 return 0;
4728
4729 return (tp->step_range_end
4730 || tp->trap_expected
4731 || tp->stepping_through_solib_after_catch);
4732 }
4733
4734 /* Inferior has stepped into a subroutine call with source code that
4735 we should not step over. Do step to the first line of code in
4736 it. */
4737
4738 static void
4739 handle_step_into_function (struct gdbarch *gdbarch,
4740 struct execution_control_state *ecs)
4741 {
4742 struct symtab *s;
4743 struct symtab_and_line stop_func_sal, sr_sal;
4744
4745 s = find_pc_symtab (stop_pc);
4746 if (s && s->language != language_asm)
4747 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4748 ecs->stop_func_start);
4749
4750 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4751 /* Use the step_resume_break to step until the end of the prologue,
4752 even if that involves jumps (as it seems to on the vax under
4753 4.2). */
4754 /* If the prologue ends in the middle of a source line, continue to
4755 the end of that source line (if it is still within the function).
4756 Otherwise, just go to end of prologue. */
4757 if (stop_func_sal.end
4758 && stop_func_sal.pc != ecs->stop_func_start
4759 && stop_func_sal.end < ecs->stop_func_end)
4760 ecs->stop_func_start = stop_func_sal.end;
4761
4762 /* Architectures which require breakpoint adjustment might not be able
4763 to place a breakpoint at the computed address. If so, the test
4764 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4765 ecs->stop_func_start to an address at which a breakpoint may be
4766 legitimately placed.
4767
4768 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4769 made, GDB will enter an infinite loop when stepping through
4770 optimized code consisting of VLIW instructions which contain
4771 subinstructions corresponding to different source lines. On
4772 FR-V, it's not permitted to place a breakpoint on any but the
4773 first subinstruction of a VLIW instruction. When a breakpoint is
4774 set, GDB will adjust the breakpoint address to the beginning of
4775 the VLIW instruction. Thus, we need to make the corresponding
4776 adjustment here when computing the stop address. */
4777
4778 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4779 {
4780 ecs->stop_func_start
4781 = gdbarch_adjust_breakpoint_address (gdbarch,
4782 ecs->stop_func_start);
4783 }
4784
4785 if (ecs->stop_func_start == stop_pc)
4786 {
4787 /* We are already there: stop now. */
4788 ecs->event_thread->stop_step = 1;
4789 print_stop_reason (END_STEPPING_RANGE, 0);
4790 stop_stepping (ecs);
4791 return;
4792 }
4793 else
4794 {
4795 /* Put the step-breakpoint there and go until there. */
4796 init_sal (&sr_sal); /* initialize to zeroes */
4797 sr_sal.pc = ecs->stop_func_start;
4798 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4799 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4800
4801 /* Do not specify what the fp should be when we stop since on
4802 some machines the prologue is where the new fp value is
4803 established. */
4804 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4805
4806 /* And make sure stepping stops right away then. */
4807 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4808 }
4809 keep_going (ecs);
4810 }
4811
4812 /* Inferior has stepped backward into a subroutine call with source
4813 code that we should not step over. Do step to the beginning of the
4814 last line of code in it. */
4815
4816 static void
4817 handle_step_into_function_backward (struct gdbarch *gdbarch,
4818 struct execution_control_state *ecs)
4819 {
4820 struct symtab *s;
4821 struct symtab_and_line stop_func_sal, sr_sal;
4822
4823 s = find_pc_symtab (stop_pc);
4824 if (s && s->language != language_asm)
4825 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4826 ecs->stop_func_start);
4827
4828 stop_func_sal = find_pc_line (stop_pc, 0);
4829
4830 /* OK, we're just going to keep stepping here. */
4831 if (stop_func_sal.pc == stop_pc)
4832 {
4833 /* We're there already. Just stop stepping now. */
4834 ecs->event_thread->stop_step = 1;
4835 print_stop_reason (END_STEPPING_RANGE, 0);
4836 stop_stepping (ecs);
4837 }
4838 else
4839 {
4840 /* Else just reset the step range and keep going.
4841 No step-resume breakpoint, they don't work for
4842 epilogues, which can have multiple entry paths. */
4843 ecs->event_thread->step_range_start = stop_func_sal.pc;
4844 ecs->event_thread->step_range_end = stop_func_sal.end;
4845 keep_going (ecs);
4846 }
4847 return;
4848 }
4849
4850 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4851 This is used to both functions and to skip over code. */
4852
4853 static void
4854 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4855 struct symtab_and_line sr_sal,
4856 struct frame_id sr_id)
4857 {
4858 /* There should never be more than one step-resume or longjmp-resume
4859 breakpoint per thread, so we should never be setting a new
4860 step_resume_breakpoint when one is already active. */
4861 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4862
4863 if (debug_infrun)
4864 fprintf_unfiltered (gdb_stdlog,
4865 "infrun: inserting step-resume breakpoint at %s\n",
4866 paddress (gdbarch, sr_sal.pc));
4867
4868 inferior_thread ()->step_resume_breakpoint
4869 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4870 }
4871
4872 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4873 to skip a potential signal handler.
4874
4875 This is called with the interrupted function's frame. The signal
4876 handler, when it returns, will resume the interrupted function at
4877 RETURN_FRAME.pc. */
4878
4879 static void
4880 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4881 {
4882 struct symtab_and_line sr_sal;
4883 struct gdbarch *gdbarch;
4884
4885 gdb_assert (return_frame != NULL);
4886 init_sal (&sr_sal); /* initialize to zeros */
4887
4888 gdbarch = get_frame_arch (return_frame);
4889 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4890 sr_sal.section = find_pc_overlay (sr_sal.pc);
4891 sr_sal.pspace = get_frame_program_space (return_frame);
4892
4893 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4894 get_stack_frame_id (return_frame));
4895 }
4896
4897 /* Similar to insert_step_resume_breakpoint_at_frame, except
4898 but a breakpoint at the previous frame's PC. This is used to
4899 skip a function after stepping into it (for "next" or if the called
4900 function has no debugging information).
4901
4902 The current function has almost always been reached by single
4903 stepping a call or return instruction. NEXT_FRAME belongs to the
4904 current function, and the breakpoint will be set at the caller's
4905 resume address.
4906
4907 This is a separate function rather than reusing
4908 insert_step_resume_breakpoint_at_frame in order to avoid
4909 get_prev_frame, which may stop prematurely (see the implementation
4910 of frame_unwind_caller_id for an example). */
4911
4912 static void
4913 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4914 {
4915 struct symtab_and_line sr_sal;
4916 struct gdbarch *gdbarch;
4917
4918 /* We shouldn't have gotten here if we don't know where the call site
4919 is. */
4920 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4921
4922 init_sal (&sr_sal); /* initialize to zeros */
4923
4924 gdbarch = frame_unwind_caller_arch (next_frame);
4925 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4926 frame_unwind_caller_pc (next_frame));
4927 sr_sal.section = find_pc_overlay (sr_sal.pc);
4928 sr_sal.pspace = frame_unwind_program_space (next_frame);
4929
4930 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4931 frame_unwind_caller_id (next_frame));
4932 }
4933
4934 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4935 new breakpoint at the target of a jmp_buf. The handling of
4936 longjmp-resume uses the same mechanisms used for handling
4937 "step-resume" breakpoints. */
4938
4939 static void
4940 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
4941 {
4942 /* There should never be more than one step-resume or longjmp-resume
4943 breakpoint per thread, so we should never be setting a new
4944 longjmp_resume_breakpoint when one is already active. */
4945 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4946
4947 if (debug_infrun)
4948 fprintf_unfiltered (gdb_stdlog,
4949 "infrun: inserting longjmp-resume breakpoint at %s\n",
4950 paddress (gdbarch, pc));
4951
4952 inferior_thread ()->step_resume_breakpoint =
4953 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
4954 }
4955
4956 static void
4957 stop_stepping (struct execution_control_state *ecs)
4958 {
4959 if (debug_infrun)
4960 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
4961
4962 /* Let callers know we don't want to wait for the inferior anymore. */
4963 ecs->wait_some_more = 0;
4964 }
4965
4966 /* This function handles various cases where we need to continue
4967 waiting for the inferior. */
4968 /* (Used to be the keep_going: label in the old wait_for_inferior) */
4969
4970 static void
4971 keep_going (struct execution_control_state *ecs)
4972 {
4973 /* Make sure normal_stop is called if we get a QUIT handled before
4974 reaching resume. */
4975 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
4976
4977 /* Save the pc before execution, to compare with pc after stop. */
4978 ecs->event_thread->prev_pc
4979 = regcache_read_pc (get_thread_regcache (ecs->ptid));
4980
4981 /* If we did not do break;, it means we should keep running the
4982 inferior and not return to debugger. */
4983
4984 if (ecs->event_thread->trap_expected
4985 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4986 {
4987 /* We took a signal (which we are supposed to pass through to
4988 the inferior, else we'd not get here) and we haven't yet
4989 gotten our trap. Simply continue. */
4990
4991 discard_cleanups (old_cleanups);
4992 resume (currently_stepping (ecs->event_thread),
4993 ecs->event_thread->stop_signal);
4994 }
4995 else
4996 {
4997 /* Either the trap was not expected, but we are continuing
4998 anyway (the user asked that this signal be passed to the
4999 child)
5000 -- or --
5001 The signal was SIGTRAP, e.g. it was our signal, but we
5002 decided we should resume from it.
5003
5004 We're going to run this baby now!
5005
5006 Note that insert_breakpoints won't try to re-insert
5007 already inserted breakpoints. Therefore, we don't
5008 care if breakpoints were already inserted, or not. */
5009
5010 if (ecs->event_thread->stepping_over_breakpoint)
5011 {
5012 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5013 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5014 /* Since we can't do a displaced step, we have to remove
5015 the breakpoint while we step it. To keep things
5016 simple, we remove them all. */
5017 remove_breakpoints ();
5018 }
5019 else
5020 {
5021 struct gdb_exception e;
5022 /* Stop stepping when inserting breakpoints
5023 has failed. */
5024 TRY_CATCH (e, RETURN_MASK_ERROR)
5025 {
5026 insert_breakpoints ();
5027 }
5028 if (e.reason < 0)
5029 {
5030 exception_print (gdb_stderr, e);
5031 stop_stepping (ecs);
5032 return;
5033 }
5034 }
5035
5036 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5037
5038 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5039 specifies that such a signal should be delivered to the
5040 target program).
5041
5042 Typically, this would occure when a user is debugging a
5043 target monitor on a simulator: the target monitor sets a
5044 breakpoint; the simulator encounters this break-point and
5045 halts the simulation handing control to GDB; GDB, noteing
5046 that the break-point isn't valid, returns control back to the
5047 simulator; the simulator then delivers the hardware
5048 equivalent of a SIGNAL_TRAP to the program being debugged. */
5049
5050 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5051 && !signal_program[ecs->event_thread->stop_signal])
5052 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5053
5054 discard_cleanups (old_cleanups);
5055 resume (currently_stepping (ecs->event_thread),
5056 ecs->event_thread->stop_signal);
5057 }
5058
5059 prepare_to_wait (ecs);
5060 }
5061
5062 /* This function normally comes after a resume, before
5063 handle_inferior_event exits. It takes care of any last bits of
5064 housekeeping, and sets the all-important wait_some_more flag. */
5065
5066 static void
5067 prepare_to_wait (struct execution_control_state *ecs)
5068 {
5069 if (debug_infrun)
5070 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5071
5072 /* This is the old end of the while loop. Let everybody know we
5073 want to wait for the inferior some more and get called again
5074 soon. */
5075 ecs->wait_some_more = 1;
5076 }
5077
5078 /* Print why the inferior has stopped. We always print something when
5079 the inferior exits, or receives a signal. The rest of the cases are
5080 dealt with later on in normal_stop() and print_it_typical(). Ideally
5081 there should be a call to this function from handle_inferior_event()
5082 each time stop_stepping() is called.*/
5083 static void
5084 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5085 {
5086 switch (stop_reason)
5087 {
5088 case END_STEPPING_RANGE:
5089 /* We are done with a step/next/si/ni command. */
5090 /* For now print nothing. */
5091 /* Print a message only if not in the middle of doing a "step n"
5092 operation for n > 1 */
5093 if (!inferior_thread ()->step_multi
5094 || !inferior_thread ()->stop_step)
5095 if (ui_out_is_mi_like_p (uiout))
5096 ui_out_field_string
5097 (uiout, "reason",
5098 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5099 break;
5100 case SIGNAL_EXITED:
5101 /* The inferior was terminated by a signal. */
5102 annotate_signalled ();
5103 if (ui_out_is_mi_like_p (uiout))
5104 ui_out_field_string
5105 (uiout, "reason",
5106 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5107 ui_out_text (uiout, "\nProgram terminated with signal ");
5108 annotate_signal_name ();
5109 ui_out_field_string (uiout, "signal-name",
5110 target_signal_to_name (stop_info));
5111 annotate_signal_name_end ();
5112 ui_out_text (uiout, ", ");
5113 annotate_signal_string ();
5114 ui_out_field_string (uiout, "signal-meaning",
5115 target_signal_to_string (stop_info));
5116 annotate_signal_string_end ();
5117 ui_out_text (uiout, ".\n");
5118 ui_out_text (uiout, "The program no longer exists.\n");
5119 break;
5120 case EXITED:
5121 /* The inferior program is finished. */
5122 annotate_exited (stop_info);
5123 if (stop_info)
5124 {
5125 if (ui_out_is_mi_like_p (uiout))
5126 ui_out_field_string (uiout, "reason",
5127 async_reason_lookup (EXEC_ASYNC_EXITED));
5128 ui_out_text (uiout, "\nProgram exited with code ");
5129 ui_out_field_fmt (uiout, "exit-code", "0%o",
5130 (unsigned int) stop_info);
5131 ui_out_text (uiout, ".\n");
5132 }
5133 else
5134 {
5135 if (ui_out_is_mi_like_p (uiout))
5136 ui_out_field_string
5137 (uiout, "reason",
5138 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5139 ui_out_text (uiout, "\nProgram exited normally.\n");
5140 }
5141 /* Support the --return-child-result option. */
5142 return_child_result_value = stop_info;
5143 break;
5144 case SIGNAL_RECEIVED:
5145 /* Signal received. The signal table tells us to print about
5146 it. */
5147 annotate_signal ();
5148
5149 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5150 {
5151 struct thread_info *t = inferior_thread ();
5152
5153 ui_out_text (uiout, "\n[");
5154 ui_out_field_string (uiout, "thread-name",
5155 target_pid_to_str (t->ptid));
5156 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5157 ui_out_text (uiout, " stopped");
5158 }
5159 else
5160 {
5161 ui_out_text (uiout, "\nProgram received signal ");
5162 annotate_signal_name ();
5163 if (ui_out_is_mi_like_p (uiout))
5164 ui_out_field_string
5165 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5166 ui_out_field_string (uiout, "signal-name",
5167 target_signal_to_name (stop_info));
5168 annotate_signal_name_end ();
5169 ui_out_text (uiout, ", ");
5170 annotate_signal_string ();
5171 ui_out_field_string (uiout, "signal-meaning",
5172 target_signal_to_string (stop_info));
5173 annotate_signal_string_end ();
5174 }
5175 ui_out_text (uiout, ".\n");
5176 break;
5177 case NO_HISTORY:
5178 /* Reverse execution: target ran out of history info. */
5179 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5180 break;
5181 default:
5182 internal_error (__FILE__, __LINE__,
5183 _("print_stop_reason: unrecognized enum value"));
5184 break;
5185 }
5186 }
5187 \f
5188
5189 /* Here to return control to GDB when the inferior stops for real.
5190 Print appropriate messages, remove breakpoints, give terminal our modes.
5191
5192 STOP_PRINT_FRAME nonzero means print the executing frame
5193 (pc, function, args, file, line number and line text).
5194 BREAKPOINTS_FAILED nonzero means stop was due to error
5195 attempting to insert breakpoints. */
5196
5197 void
5198 normal_stop (void)
5199 {
5200 struct target_waitstatus last;
5201 ptid_t last_ptid;
5202 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5203
5204 get_last_target_status (&last_ptid, &last);
5205
5206 /* If an exception is thrown from this point on, make sure to
5207 propagate GDB's knowledge of the executing state to the
5208 frontend/user running state. A QUIT is an easy exception to see
5209 here, so do this before any filtered output. */
5210 if (!non_stop)
5211 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5212 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5213 && last.kind != TARGET_WAITKIND_EXITED)
5214 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5215
5216 /* In non-stop mode, we don't want GDB to switch threads behind the
5217 user's back, to avoid races where the user is typing a command to
5218 apply to thread x, but GDB switches to thread y before the user
5219 finishes entering the command. */
5220
5221 /* As with the notification of thread events, we want to delay
5222 notifying the user that we've switched thread context until
5223 the inferior actually stops.
5224
5225 There's no point in saying anything if the inferior has exited.
5226 Note that SIGNALLED here means "exited with a signal", not
5227 "received a signal". */
5228 if (!non_stop
5229 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5230 && target_has_execution
5231 && last.kind != TARGET_WAITKIND_SIGNALLED
5232 && last.kind != TARGET_WAITKIND_EXITED)
5233 {
5234 target_terminal_ours_for_output ();
5235 printf_filtered (_("[Switching to %s]\n"),
5236 target_pid_to_str (inferior_ptid));
5237 annotate_thread_changed ();
5238 previous_inferior_ptid = inferior_ptid;
5239 }
5240
5241 if (!breakpoints_always_inserted_mode () && target_has_execution)
5242 {
5243 if (remove_breakpoints ())
5244 {
5245 target_terminal_ours_for_output ();
5246 printf_filtered (_("\
5247 Cannot remove breakpoints because program is no longer writable.\n\
5248 Further execution is probably impossible.\n"));
5249 }
5250 }
5251
5252 /* If an auto-display called a function and that got a signal,
5253 delete that auto-display to avoid an infinite recursion. */
5254
5255 if (stopped_by_random_signal)
5256 disable_current_display ();
5257
5258 /* Don't print a message if in the middle of doing a "step n"
5259 operation for n > 1 */
5260 if (target_has_execution
5261 && last.kind != TARGET_WAITKIND_SIGNALLED
5262 && last.kind != TARGET_WAITKIND_EXITED
5263 && inferior_thread ()->step_multi
5264 && inferior_thread ()->stop_step)
5265 goto done;
5266
5267 target_terminal_ours ();
5268
5269 /* Set the current source location. This will also happen if we
5270 display the frame below, but the current SAL will be incorrect
5271 during a user hook-stop function. */
5272 if (has_stack_frames () && !stop_stack_dummy)
5273 set_current_sal_from_frame (get_current_frame (), 1);
5274
5275 /* Let the user/frontend see the threads as stopped. */
5276 do_cleanups (old_chain);
5277
5278 /* Look up the hook_stop and run it (CLI internally handles problem
5279 of stop_command's pre-hook not existing). */
5280 if (stop_command)
5281 catch_errors (hook_stop_stub, stop_command,
5282 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5283
5284 if (!has_stack_frames ())
5285 goto done;
5286
5287 if (last.kind == TARGET_WAITKIND_SIGNALLED
5288 || last.kind == TARGET_WAITKIND_EXITED)
5289 goto done;
5290
5291 /* Select innermost stack frame - i.e., current frame is frame 0,
5292 and current location is based on that.
5293 Don't do this on return from a stack dummy routine,
5294 or if the program has exited. */
5295
5296 if (!stop_stack_dummy)
5297 {
5298 select_frame (get_current_frame ());
5299
5300 /* Print current location without a level number, if
5301 we have changed functions or hit a breakpoint.
5302 Print source line if we have one.
5303 bpstat_print() contains the logic deciding in detail
5304 what to print, based on the event(s) that just occurred. */
5305
5306 /* If --batch-silent is enabled then there's no need to print the current
5307 source location, and to try risks causing an error message about
5308 missing source files. */
5309 if (stop_print_frame && !batch_silent)
5310 {
5311 int bpstat_ret;
5312 int source_flag;
5313 int do_frame_printing = 1;
5314 struct thread_info *tp = inferior_thread ();
5315
5316 bpstat_ret = bpstat_print (tp->stop_bpstat);
5317 switch (bpstat_ret)
5318 {
5319 case PRINT_UNKNOWN:
5320 /* If we had hit a shared library event breakpoint,
5321 bpstat_print would print out this message. If we hit
5322 an OS-level shared library event, do the same
5323 thing. */
5324 if (last.kind == TARGET_WAITKIND_LOADED)
5325 {
5326 printf_filtered (_("Stopped due to shared library event\n"));
5327 source_flag = SRC_LINE; /* something bogus */
5328 do_frame_printing = 0;
5329 break;
5330 }
5331
5332 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5333 (or should) carry around the function and does (or
5334 should) use that when doing a frame comparison. */
5335 if (tp->stop_step
5336 && frame_id_eq (tp->step_frame_id,
5337 get_frame_id (get_current_frame ()))
5338 && step_start_function == find_pc_function (stop_pc))
5339 source_flag = SRC_LINE; /* finished step, just print source line */
5340 else
5341 source_flag = SRC_AND_LOC; /* print location and source line */
5342 break;
5343 case PRINT_SRC_AND_LOC:
5344 source_flag = SRC_AND_LOC; /* print location and source line */
5345 break;
5346 case PRINT_SRC_ONLY:
5347 source_flag = SRC_LINE;
5348 break;
5349 case PRINT_NOTHING:
5350 source_flag = SRC_LINE; /* something bogus */
5351 do_frame_printing = 0;
5352 break;
5353 default:
5354 internal_error (__FILE__, __LINE__, _("Unknown value."));
5355 }
5356
5357 /* The behavior of this routine with respect to the source
5358 flag is:
5359 SRC_LINE: Print only source line
5360 LOCATION: Print only location
5361 SRC_AND_LOC: Print location and source line */
5362 if (do_frame_printing)
5363 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5364
5365 /* Display the auto-display expressions. */
5366 do_displays ();
5367 }
5368 }
5369
5370 /* Save the function value return registers, if we care.
5371 We might be about to restore their previous contents. */
5372 if (inferior_thread ()->proceed_to_finish)
5373 {
5374 /* This should not be necessary. */
5375 if (stop_registers)
5376 regcache_xfree (stop_registers);
5377
5378 /* NB: The copy goes through to the target picking up the value of
5379 all the registers. */
5380 stop_registers = regcache_dup (get_current_regcache ());
5381 }
5382
5383 if (stop_stack_dummy)
5384 {
5385 /* Pop the empty frame that contains the stack dummy.
5386 This also restores inferior state prior to the call
5387 (struct inferior_thread_state). */
5388 struct frame_info *frame = get_current_frame ();
5389 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5390 frame_pop (frame);
5391 /* frame_pop() calls reinit_frame_cache as the last thing it does
5392 which means there's currently no selected frame. We don't need
5393 to re-establish a selected frame if the dummy call returns normally,
5394 that will be done by restore_inferior_status. However, we do have
5395 to handle the case where the dummy call is returning after being
5396 stopped (e.g. the dummy call previously hit a breakpoint). We
5397 can't know which case we have so just always re-establish a
5398 selected frame here. */
5399 select_frame (get_current_frame ());
5400 }
5401
5402 done:
5403 annotate_stopped ();
5404
5405 /* Suppress the stop observer if we're in the middle of:
5406
5407 - a step n (n > 1), as there still more steps to be done.
5408
5409 - a "finish" command, as the observer will be called in
5410 finish_command_continuation, so it can include the inferior
5411 function's return value.
5412
5413 - calling an inferior function, as we pretend we inferior didn't
5414 run at all. The return value of the call is handled by the
5415 expression evaluator, through call_function_by_hand. */
5416
5417 if (!target_has_execution
5418 || last.kind == TARGET_WAITKIND_SIGNALLED
5419 || last.kind == TARGET_WAITKIND_EXITED
5420 || (!inferior_thread ()->step_multi
5421 && !(inferior_thread ()->stop_bpstat
5422 && inferior_thread ()->proceed_to_finish)
5423 && !inferior_thread ()->in_infcall))
5424 {
5425 if (!ptid_equal (inferior_ptid, null_ptid))
5426 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5427 stop_print_frame);
5428 else
5429 observer_notify_normal_stop (NULL, stop_print_frame);
5430 }
5431
5432 if (target_has_execution)
5433 {
5434 if (last.kind != TARGET_WAITKIND_SIGNALLED
5435 && last.kind != TARGET_WAITKIND_EXITED)
5436 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5437 Delete any breakpoint that is to be deleted at the next stop. */
5438 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5439 }
5440
5441 /* Try to get rid of automatically added inferiors that are no
5442 longer needed. Keeping those around slows down things linearly.
5443 Note that this never removes the current inferior. */
5444 prune_inferiors ();
5445 }
5446
5447 static int
5448 hook_stop_stub (void *cmd)
5449 {
5450 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5451 return (0);
5452 }
5453 \f
5454 int
5455 signal_stop_state (int signo)
5456 {
5457 return signal_stop[signo];
5458 }
5459
5460 int
5461 signal_print_state (int signo)
5462 {
5463 return signal_print[signo];
5464 }
5465
5466 int
5467 signal_pass_state (int signo)
5468 {
5469 return signal_program[signo];
5470 }
5471
5472 int
5473 signal_stop_update (int signo, int state)
5474 {
5475 int ret = signal_stop[signo];
5476 signal_stop[signo] = state;
5477 return ret;
5478 }
5479
5480 int
5481 signal_print_update (int signo, int state)
5482 {
5483 int ret = signal_print[signo];
5484 signal_print[signo] = state;
5485 return ret;
5486 }
5487
5488 int
5489 signal_pass_update (int signo, int state)
5490 {
5491 int ret = signal_program[signo];
5492 signal_program[signo] = state;
5493 return ret;
5494 }
5495
5496 static void
5497 sig_print_header (void)
5498 {
5499 printf_filtered (_("\
5500 Signal Stop\tPrint\tPass to program\tDescription\n"));
5501 }
5502
5503 static void
5504 sig_print_info (enum target_signal oursig)
5505 {
5506 const char *name = target_signal_to_name (oursig);
5507 int name_padding = 13 - strlen (name);
5508
5509 if (name_padding <= 0)
5510 name_padding = 0;
5511
5512 printf_filtered ("%s", name);
5513 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5514 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5515 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5516 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5517 printf_filtered ("%s\n", target_signal_to_string (oursig));
5518 }
5519
5520 /* Specify how various signals in the inferior should be handled. */
5521
5522 static void
5523 handle_command (char *args, int from_tty)
5524 {
5525 char **argv;
5526 int digits, wordlen;
5527 int sigfirst, signum, siglast;
5528 enum target_signal oursig;
5529 int allsigs;
5530 int nsigs;
5531 unsigned char *sigs;
5532 struct cleanup *old_chain;
5533
5534 if (args == NULL)
5535 {
5536 error_no_arg (_("signal to handle"));
5537 }
5538
5539 /* Allocate and zero an array of flags for which signals to handle. */
5540
5541 nsigs = (int) TARGET_SIGNAL_LAST;
5542 sigs = (unsigned char *) alloca (nsigs);
5543 memset (sigs, 0, nsigs);
5544
5545 /* Break the command line up into args. */
5546
5547 argv = gdb_buildargv (args);
5548 old_chain = make_cleanup_freeargv (argv);
5549
5550 /* Walk through the args, looking for signal oursigs, signal names, and
5551 actions. Signal numbers and signal names may be interspersed with
5552 actions, with the actions being performed for all signals cumulatively
5553 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5554
5555 while (*argv != NULL)
5556 {
5557 wordlen = strlen (*argv);
5558 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5559 {;
5560 }
5561 allsigs = 0;
5562 sigfirst = siglast = -1;
5563
5564 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5565 {
5566 /* Apply action to all signals except those used by the
5567 debugger. Silently skip those. */
5568 allsigs = 1;
5569 sigfirst = 0;
5570 siglast = nsigs - 1;
5571 }
5572 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5573 {
5574 SET_SIGS (nsigs, sigs, signal_stop);
5575 SET_SIGS (nsigs, sigs, signal_print);
5576 }
5577 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5578 {
5579 UNSET_SIGS (nsigs, sigs, signal_program);
5580 }
5581 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5582 {
5583 SET_SIGS (nsigs, sigs, signal_print);
5584 }
5585 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5586 {
5587 SET_SIGS (nsigs, sigs, signal_program);
5588 }
5589 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5590 {
5591 UNSET_SIGS (nsigs, sigs, signal_stop);
5592 }
5593 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5594 {
5595 SET_SIGS (nsigs, sigs, signal_program);
5596 }
5597 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5598 {
5599 UNSET_SIGS (nsigs, sigs, signal_print);
5600 UNSET_SIGS (nsigs, sigs, signal_stop);
5601 }
5602 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5603 {
5604 UNSET_SIGS (nsigs, sigs, signal_program);
5605 }
5606 else if (digits > 0)
5607 {
5608 /* It is numeric. The numeric signal refers to our own
5609 internal signal numbering from target.h, not to host/target
5610 signal number. This is a feature; users really should be
5611 using symbolic names anyway, and the common ones like
5612 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5613
5614 sigfirst = siglast = (int)
5615 target_signal_from_command (atoi (*argv));
5616 if ((*argv)[digits] == '-')
5617 {
5618 siglast = (int)
5619 target_signal_from_command (atoi ((*argv) + digits + 1));
5620 }
5621 if (sigfirst > siglast)
5622 {
5623 /* Bet he didn't figure we'd think of this case... */
5624 signum = sigfirst;
5625 sigfirst = siglast;
5626 siglast = signum;
5627 }
5628 }
5629 else
5630 {
5631 oursig = target_signal_from_name (*argv);
5632 if (oursig != TARGET_SIGNAL_UNKNOWN)
5633 {
5634 sigfirst = siglast = (int) oursig;
5635 }
5636 else
5637 {
5638 /* Not a number and not a recognized flag word => complain. */
5639 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5640 }
5641 }
5642
5643 /* If any signal numbers or symbol names were found, set flags for
5644 which signals to apply actions to. */
5645
5646 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5647 {
5648 switch ((enum target_signal) signum)
5649 {
5650 case TARGET_SIGNAL_TRAP:
5651 case TARGET_SIGNAL_INT:
5652 if (!allsigs && !sigs[signum])
5653 {
5654 if (query (_("%s is used by the debugger.\n\
5655 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5656 {
5657 sigs[signum] = 1;
5658 }
5659 else
5660 {
5661 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5662 gdb_flush (gdb_stdout);
5663 }
5664 }
5665 break;
5666 case TARGET_SIGNAL_0:
5667 case TARGET_SIGNAL_DEFAULT:
5668 case TARGET_SIGNAL_UNKNOWN:
5669 /* Make sure that "all" doesn't print these. */
5670 break;
5671 default:
5672 sigs[signum] = 1;
5673 break;
5674 }
5675 }
5676
5677 argv++;
5678 }
5679
5680 for (signum = 0; signum < nsigs; signum++)
5681 if (sigs[signum])
5682 {
5683 target_notice_signals (inferior_ptid);
5684
5685 if (from_tty)
5686 {
5687 /* Show the results. */
5688 sig_print_header ();
5689 for (; signum < nsigs; signum++)
5690 if (sigs[signum])
5691 sig_print_info (signum);
5692 }
5693
5694 break;
5695 }
5696
5697 do_cleanups (old_chain);
5698 }
5699
5700 static void
5701 xdb_handle_command (char *args, int from_tty)
5702 {
5703 char **argv;
5704 struct cleanup *old_chain;
5705
5706 if (args == NULL)
5707 error_no_arg (_("xdb command"));
5708
5709 /* Break the command line up into args. */
5710
5711 argv = gdb_buildargv (args);
5712 old_chain = make_cleanup_freeargv (argv);
5713 if (argv[1] != (char *) NULL)
5714 {
5715 char *argBuf;
5716 int bufLen;
5717
5718 bufLen = strlen (argv[0]) + 20;
5719 argBuf = (char *) xmalloc (bufLen);
5720 if (argBuf)
5721 {
5722 int validFlag = 1;
5723 enum target_signal oursig;
5724
5725 oursig = target_signal_from_name (argv[0]);
5726 memset (argBuf, 0, bufLen);
5727 if (strcmp (argv[1], "Q") == 0)
5728 sprintf (argBuf, "%s %s", argv[0], "noprint");
5729 else
5730 {
5731 if (strcmp (argv[1], "s") == 0)
5732 {
5733 if (!signal_stop[oursig])
5734 sprintf (argBuf, "%s %s", argv[0], "stop");
5735 else
5736 sprintf (argBuf, "%s %s", argv[0], "nostop");
5737 }
5738 else if (strcmp (argv[1], "i") == 0)
5739 {
5740 if (!signal_program[oursig])
5741 sprintf (argBuf, "%s %s", argv[0], "pass");
5742 else
5743 sprintf (argBuf, "%s %s", argv[0], "nopass");
5744 }
5745 else if (strcmp (argv[1], "r") == 0)
5746 {
5747 if (!signal_print[oursig])
5748 sprintf (argBuf, "%s %s", argv[0], "print");
5749 else
5750 sprintf (argBuf, "%s %s", argv[0], "noprint");
5751 }
5752 else
5753 validFlag = 0;
5754 }
5755 if (validFlag)
5756 handle_command (argBuf, from_tty);
5757 else
5758 printf_filtered (_("Invalid signal handling flag.\n"));
5759 if (argBuf)
5760 xfree (argBuf);
5761 }
5762 }
5763 do_cleanups (old_chain);
5764 }
5765
5766 /* Print current contents of the tables set by the handle command.
5767 It is possible we should just be printing signals actually used
5768 by the current target (but for things to work right when switching
5769 targets, all signals should be in the signal tables). */
5770
5771 static void
5772 signals_info (char *signum_exp, int from_tty)
5773 {
5774 enum target_signal oursig;
5775 sig_print_header ();
5776
5777 if (signum_exp)
5778 {
5779 /* First see if this is a symbol name. */
5780 oursig = target_signal_from_name (signum_exp);
5781 if (oursig == TARGET_SIGNAL_UNKNOWN)
5782 {
5783 /* No, try numeric. */
5784 oursig =
5785 target_signal_from_command (parse_and_eval_long (signum_exp));
5786 }
5787 sig_print_info (oursig);
5788 return;
5789 }
5790
5791 printf_filtered ("\n");
5792 /* These ugly casts brought to you by the native VAX compiler. */
5793 for (oursig = TARGET_SIGNAL_FIRST;
5794 (int) oursig < (int) TARGET_SIGNAL_LAST;
5795 oursig = (enum target_signal) ((int) oursig + 1))
5796 {
5797 QUIT;
5798
5799 if (oursig != TARGET_SIGNAL_UNKNOWN
5800 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5801 sig_print_info (oursig);
5802 }
5803
5804 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5805 }
5806
5807 /* The $_siginfo convenience variable is a bit special. We don't know
5808 for sure the type of the value until we actually have a chance to
5809 fetch the data. The type can change depending on gdbarch, so it it
5810 also dependent on which thread you have selected.
5811
5812 1. making $_siginfo be an internalvar that creates a new value on
5813 access.
5814
5815 2. making the value of $_siginfo be an lval_computed value. */
5816
5817 /* This function implements the lval_computed support for reading a
5818 $_siginfo value. */
5819
5820 static void
5821 siginfo_value_read (struct value *v)
5822 {
5823 LONGEST transferred;
5824
5825 transferred =
5826 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5827 NULL,
5828 value_contents_all_raw (v),
5829 value_offset (v),
5830 TYPE_LENGTH (value_type (v)));
5831
5832 if (transferred != TYPE_LENGTH (value_type (v)))
5833 error (_("Unable to read siginfo"));
5834 }
5835
5836 /* This function implements the lval_computed support for writing a
5837 $_siginfo value. */
5838
5839 static void
5840 siginfo_value_write (struct value *v, struct value *fromval)
5841 {
5842 LONGEST transferred;
5843
5844 transferred = target_write (&current_target,
5845 TARGET_OBJECT_SIGNAL_INFO,
5846 NULL,
5847 value_contents_all_raw (fromval),
5848 value_offset (v),
5849 TYPE_LENGTH (value_type (fromval)));
5850
5851 if (transferred != TYPE_LENGTH (value_type (fromval)))
5852 error (_("Unable to write siginfo"));
5853 }
5854
5855 static struct lval_funcs siginfo_value_funcs =
5856 {
5857 siginfo_value_read,
5858 siginfo_value_write
5859 };
5860
5861 /* Return a new value with the correct type for the siginfo object of
5862 the current thread using architecture GDBARCH. Return a void value
5863 if there's no object available. */
5864
5865 static struct value *
5866 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5867 {
5868 if (target_has_stack
5869 && !ptid_equal (inferior_ptid, null_ptid)
5870 && gdbarch_get_siginfo_type_p (gdbarch))
5871 {
5872 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5873 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5874 }
5875
5876 return allocate_value (builtin_type (gdbarch)->builtin_void);
5877 }
5878
5879 \f
5880 /* Inferior thread state.
5881 These are details related to the inferior itself, and don't include
5882 things like what frame the user had selected or what gdb was doing
5883 with the target at the time.
5884 For inferior function calls these are things we want to restore
5885 regardless of whether the function call successfully completes
5886 or the dummy frame has to be manually popped. */
5887
5888 struct inferior_thread_state
5889 {
5890 enum target_signal stop_signal;
5891 CORE_ADDR stop_pc;
5892 struct regcache *registers;
5893 };
5894
5895 struct inferior_thread_state *
5896 save_inferior_thread_state (void)
5897 {
5898 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5899 struct thread_info *tp = inferior_thread ();
5900
5901 inf_state->stop_signal = tp->stop_signal;
5902 inf_state->stop_pc = stop_pc;
5903
5904 inf_state->registers = regcache_dup (get_current_regcache ());
5905
5906 return inf_state;
5907 }
5908
5909 /* Restore inferior session state to INF_STATE. */
5910
5911 void
5912 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5913 {
5914 struct thread_info *tp = inferior_thread ();
5915
5916 tp->stop_signal = inf_state->stop_signal;
5917 stop_pc = inf_state->stop_pc;
5918
5919 /* The inferior can be gone if the user types "print exit(0)"
5920 (and perhaps other times). */
5921 if (target_has_execution)
5922 /* NB: The register write goes through to the target. */
5923 regcache_cpy (get_current_regcache (), inf_state->registers);
5924 regcache_xfree (inf_state->registers);
5925 xfree (inf_state);
5926 }
5927
5928 static void
5929 do_restore_inferior_thread_state_cleanup (void *state)
5930 {
5931 restore_inferior_thread_state (state);
5932 }
5933
5934 struct cleanup *
5935 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5936 {
5937 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
5938 }
5939
5940 void
5941 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
5942 {
5943 regcache_xfree (inf_state->registers);
5944 xfree (inf_state);
5945 }
5946
5947 struct regcache *
5948 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
5949 {
5950 return inf_state->registers;
5951 }
5952
5953 /* Session related state for inferior function calls.
5954 These are the additional bits of state that need to be restored
5955 when an inferior function call successfully completes. */
5956
5957 struct inferior_status
5958 {
5959 bpstat stop_bpstat;
5960 int stop_step;
5961 int stop_stack_dummy;
5962 int stopped_by_random_signal;
5963 int stepping_over_breakpoint;
5964 CORE_ADDR step_range_start;
5965 CORE_ADDR step_range_end;
5966 struct frame_id step_frame_id;
5967 struct frame_id step_stack_frame_id;
5968 enum step_over_calls_kind step_over_calls;
5969 CORE_ADDR step_resume_break_address;
5970 int stop_after_trap;
5971 int stop_soon;
5972
5973 /* ID if the selected frame when the inferior function call was made. */
5974 struct frame_id selected_frame_id;
5975
5976 int proceed_to_finish;
5977 int in_infcall;
5978 };
5979
5980 /* Save all of the information associated with the inferior<==>gdb
5981 connection. */
5982
5983 struct inferior_status *
5984 save_inferior_status (void)
5985 {
5986 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
5987 struct thread_info *tp = inferior_thread ();
5988 struct inferior *inf = current_inferior ();
5989
5990 inf_status->stop_step = tp->stop_step;
5991 inf_status->stop_stack_dummy = stop_stack_dummy;
5992 inf_status->stopped_by_random_signal = stopped_by_random_signal;
5993 inf_status->stepping_over_breakpoint = tp->trap_expected;
5994 inf_status->step_range_start = tp->step_range_start;
5995 inf_status->step_range_end = tp->step_range_end;
5996 inf_status->step_frame_id = tp->step_frame_id;
5997 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
5998 inf_status->step_over_calls = tp->step_over_calls;
5999 inf_status->stop_after_trap = stop_after_trap;
6000 inf_status->stop_soon = inf->stop_soon;
6001 /* Save original bpstat chain here; replace it with copy of chain.
6002 If caller's caller is walking the chain, they'll be happier if we
6003 hand them back the original chain when restore_inferior_status is
6004 called. */
6005 inf_status->stop_bpstat = tp->stop_bpstat;
6006 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6007 inf_status->proceed_to_finish = tp->proceed_to_finish;
6008 inf_status->in_infcall = tp->in_infcall;
6009
6010 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6011
6012 return inf_status;
6013 }
6014
6015 static int
6016 restore_selected_frame (void *args)
6017 {
6018 struct frame_id *fid = (struct frame_id *) args;
6019 struct frame_info *frame;
6020
6021 frame = frame_find_by_id (*fid);
6022
6023 /* If inf_status->selected_frame_id is NULL, there was no previously
6024 selected frame. */
6025 if (frame == NULL)
6026 {
6027 warning (_("Unable to restore previously selected frame."));
6028 return 0;
6029 }
6030
6031 select_frame (frame);
6032
6033 return (1);
6034 }
6035
6036 /* Restore inferior session state to INF_STATUS. */
6037
6038 void
6039 restore_inferior_status (struct inferior_status *inf_status)
6040 {
6041 struct thread_info *tp = inferior_thread ();
6042 struct inferior *inf = current_inferior ();
6043
6044 tp->stop_step = inf_status->stop_step;
6045 stop_stack_dummy = inf_status->stop_stack_dummy;
6046 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6047 tp->trap_expected = inf_status->stepping_over_breakpoint;
6048 tp->step_range_start = inf_status->step_range_start;
6049 tp->step_range_end = inf_status->step_range_end;
6050 tp->step_frame_id = inf_status->step_frame_id;
6051 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6052 tp->step_over_calls = inf_status->step_over_calls;
6053 stop_after_trap = inf_status->stop_after_trap;
6054 inf->stop_soon = inf_status->stop_soon;
6055 bpstat_clear (&tp->stop_bpstat);
6056 tp->stop_bpstat = inf_status->stop_bpstat;
6057 inf_status->stop_bpstat = NULL;
6058 tp->proceed_to_finish = inf_status->proceed_to_finish;
6059 tp->in_infcall = inf_status->in_infcall;
6060
6061 if (target_has_stack)
6062 {
6063 /* The point of catch_errors is that if the stack is clobbered,
6064 walking the stack might encounter a garbage pointer and
6065 error() trying to dereference it. */
6066 if (catch_errors
6067 (restore_selected_frame, &inf_status->selected_frame_id,
6068 "Unable to restore previously selected frame:\n",
6069 RETURN_MASK_ERROR) == 0)
6070 /* Error in restoring the selected frame. Select the innermost
6071 frame. */
6072 select_frame (get_current_frame ());
6073 }
6074
6075 xfree (inf_status);
6076 }
6077
6078 static void
6079 do_restore_inferior_status_cleanup (void *sts)
6080 {
6081 restore_inferior_status (sts);
6082 }
6083
6084 struct cleanup *
6085 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6086 {
6087 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6088 }
6089
6090 void
6091 discard_inferior_status (struct inferior_status *inf_status)
6092 {
6093 /* See save_inferior_status for info on stop_bpstat. */
6094 bpstat_clear (&inf_status->stop_bpstat);
6095 xfree (inf_status);
6096 }
6097 \f
6098 int
6099 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6100 {
6101 struct target_waitstatus last;
6102 ptid_t last_ptid;
6103
6104 get_last_target_status (&last_ptid, &last);
6105
6106 if (last.kind != TARGET_WAITKIND_FORKED)
6107 return 0;
6108
6109 if (!ptid_equal (last_ptid, pid))
6110 return 0;
6111
6112 *child_pid = last.value.related_pid;
6113 return 1;
6114 }
6115
6116 int
6117 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6118 {
6119 struct target_waitstatus last;
6120 ptid_t last_ptid;
6121
6122 get_last_target_status (&last_ptid, &last);
6123
6124 if (last.kind != TARGET_WAITKIND_VFORKED)
6125 return 0;
6126
6127 if (!ptid_equal (last_ptid, pid))
6128 return 0;
6129
6130 *child_pid = last.value.related_pid;
6131 return 1;
6132 }
6133
6134 int
6135 inferior_has_execd (ptid_t pid, char **execd_pathname)
6136 {
6137 struct target_waitstatus last;
6138 ptid_t last_ptid;
6139
6140 get_last_target_status (&last_ptid, &last);
6141
6142 if (last.kind != TARGET_WAITKIND_EXECD)
6143 return 0;
6144
6145 if (!ptid_equal (last_ptid, pid))
6146 return 0;
6147
6148 *execd_pathname = xstrdup (last.value.execd_pathname);
6149 return 1;
6150 }
6151
6152 int
6153 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6154 {
6155 struct target_waitstatus last;
6156 ptid_t last_ptid;
6157
6158 get_last_target_status (&last_ptid, &last);
6159
6160 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6161 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6162 return 0;
6163
6164 if (!ptid_equal (last_ptid, pid))
6165 return 0;
6166
6167 *syscall_number = last.value.syscall_number;
6168 return 1;
6169 }
6170
6171 /* Oft used ptids */
6172 ptid_t null_ptid;
6173 ptid_t minus_one_ptid;
6174
6175 /* Create a ptid given the necessary PID, LWP, and TID components. */
6176
6177 ptid_t
6178 ptid_build (int pid, long lwp, long tid)
6179 {
6180 ptid_t ptid;
6181
6182 ptid.pid = pid;
6183 ptid.lwp = lwp;
6184 ptid.tid = tid;
6185 return ptid;
6186 }
6187
6188 /* Create a ptid from just a pid. */
6189
6190 ptid_t
6191 pid_to_ptid (int pid)
6192 {
6193 return ptid_build (pid, 0, 0);
6194 }
6195
6196 /* Fetch the pid (process id) component from a ptid. */
6197
6198 int
6199 ptid_get_pid (ptid_t ptid)
6200 {
6201 return ptid.pid;
6202 }
6203
6204 /* Fetch the lwp (lightweight process) component from a ptid. */
6205
6206 long
6207 ptid_get_lwp (ptid_t ptid)
6208 {
6209 return ptid.lwp;
6210 }
6211
6212 /* Fetch the tid (thread id) component from a ptid. */
6213
6214 long
6215 ptid_get_tid (ptid_t ptid)
6216 {
6217 return ptid.tid;
6218 }
6219
6220 /* ptid_equal() is used to test equality of two ptids. */
6221
6222 int
6223 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6224 {
6225 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6226 && ptid1.tid == ptid2.tid);
6227 }
6228
6229 /* Returns true if PTID represents a process. */
6230
6231 int
6232 ptid_is_pid (ptid_t ptid)
6233 {
6234 if (ptid_equal (minus_one_ptid, ptid))
6235 return 0;
6236 if (ptid_equal (null_ptid, ptid))
6237 return 0;
6238
6239 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6240 }
6241
6242 int
6243 ptid_match (ptid_t ptid, ptid_t filter)
6244 {
6245 /* Since both parameters have the same type, prevent easy mistakes
6246 from happening. */
6247 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6248 && !ptid_equal (ptid, null_ptid)
6249 && !ptid_is_pid (ptid));
6250
6251 if (ptid_equal (filter, minus_one_ptid))
6252 return 1;
6253 if (ptid_is_pid (filter)
6254 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6255 return 1;
6256 else if (ptid_equal (ptid, filter))
6257 return 1;
6258
6259 return 0;
6260 }
6261
6262 /* restore_inferior_ptid() will be used by the cleanup machinery
6263 to restore the inferior_ptid value saved in a call to
6264 save_inferior_ptid(). */
6265
6266 static void
6267 restore_inferior_ptid (void *arg)
6268 {
6269 ptid_t *saved_ptid_ptr = arg;
6270 inferior_ptid = *saved_ptid_ptr;
6271 xfree (arg);
6272 }
6273
6274 /* Save the value of inferior_ptid so that it may be restored by a
6275 later call to do_cleanups(). Returns the struct cleanup pointer
6276 needed for later doing the cleanup. */
6277
6278 struct cleanup *
6279 save_inferior_ptid (void)
6280 {
6281 ptid_t *saved_ptid_ptr;
6282
6283 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6284 *saved_ptid_ptr = inferior_ptid;
6285 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6286 }
6287 \f
6288
6289 /* User interface for reverse debugging:
6290 Set exec-direction / show exec-direction commands
6291 (returns error unless target implements to_set_exec_direction method). */
6292
6293 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6294 static const char exec_forward[] = "forward";
6295 static const char exec_reverse[] = "reverse";
6296 static const char *exec_direction = exec_forward;
6297 static const char *exec_direction_names[] = {
6298 exec_forward,
6299 exec_reverse,
6300 NULL
6301 };
6302
6303 static void
6304 set_exec_direction_func (char *args, int from_tty,
6305 struct cmd_list_element *cmd)
6306 {
6307 if (target_can_execute_reverse)
6308 {
6309 if (!strcmp (exec_direction, exec_forward))
6310 execution_direction = EXEC_FORWARD;
6311 else if (!strcmp (exec_direction, exec_reverse))
6312 execution_direction = EXEC_REVERSE;
6313 }
6314 }
6315
6316 static void
6317 show_exec_direction_func (struct ui_file *out, int from_tty,
6318 struct cmd_list_element *cmd, const char *value)
6319 {
6320 switch (execution_direction) {
6321 case EXEC_FORWARD:
6322 fprintf_filtered (out, _("Forward.\n"));
6323 break;
6324 case EXEC_REVERSE:
6325 fprintf_filtered (out, _("Reverse.\n"));
6326 break;
6327 case EXEC_ERROR:
6328 default:
6329 fprintf_filtered (out,
6330 _("Forward (target `%s' does not support exec-direction).\n"),
6331 target_shortname);
6332 break;
6333 }
6334 }
6335
6336 /* User interface for non-stop mode. */
6337
6338 int non_stop = 0;
6339 static int non_stop_1 = 0;
6340
6341 static void
6342 set_non_stop (char *args, int from_tty,
6343 struct cmd_list_element *c)
6344 {
6345 if (target_has_execution)
6346 {
6347 non_stop_1 = non_stop;
6348 error (_("Cannot change this setting while the inferior is running."));
6349 }
6350
6351 non_stop = non_stop_1;
6352 }
6353
6354 static void
6355 show_non_stop (struct ui_file *file, int from_tty,
6356 struct cmd_list_element *c, const char *value)
6357 {
6358 fprintf_filtered (file,
6359 _("Controlling the inferior in non-stop mode is %s.\n"),
6360 value);
6361 }
6362
6363 static void
6364 show_schedule_multiple (struct ui_file *file, int from_tty,
6365 struct cmd_list_element *c, const char *value)
6366 {
6367 fprintf_filtered (file, _("\
6368 Resuming the execution of threads of all processes is %s.\n"), value);
6369 }
6370
6371 void
6372 _initialize_infrun (void)
6373 {
6374 int i;
6375 int numsigs;
6376 struct cmd_list_element *c;
6377
6378 add_info ("signals", signals_info, _("\
6379 What debugger does when program gets various signals.\n\
6380 Specify a signal as argument to print info on that signal only."));
6381 add_info_alias ("handle", "signals", 0);
6382
6383 add_com ("handle", class_run, handle_command, _("\
6384 Specify how to handle a signal.\n\
6385 Args are signals and actions to apply to those signals.\n\
6386 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6387 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6388 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6389 The special arg \"all\" is recognized to mean all signals except those\n\
6390 used by the debugger, typically SIGTRAP and SIGINT.\n\
6391 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6392 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6393 Stop means reenter debugger if this signal happens (implies print).\n\
6394 Print means print a message if this signal happens.\n\
6395 Pass means let program see this signal; otherwise program doesn't know.\n\
6396 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6397 Pass and Stop may be combined."));
6398 if (xdb_commands)
6399 {
6400 add_com ("lz", class_info, signals_info, _("\
6401 What debugger does when program gets various signals.\n\
6402 Specify a signal as argument to print info on that signal only."));
6403 add_com ("z", class_run, xdb_handle_command, _("\
6404 Specify how to handle a signal.\n\
6405 Args are signals and actions to apply to those signals.\n\
6406 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6407 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6408 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6409 The special arg \"all\" is recognized to mean all signals except those\n\
6410 used by the debugger, typically SIGTRAP and SIGINT.\n\
6411 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6412 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6413 nopass), \"Q\" (noprint)\n\
6414 Stop means reenter debugger if this signal happens (implies print).\n\
6415 Print means print a message if this signal happens.\n\
6416 Pass means let program see this signal; otherwise program doesn't know.\n\
6417 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6418 Pass and Stop may be combined."));
6419 }
6420
6421 if (!dbx_commands)
6422 stop_command = add_cmd ("stop", class_obscure,
6423 not_just_help_class_command, _("\
6424 There is no `stop' command, but you can set a hook on `stop'.\n\
6425 This allows you to set a list of commands to be run each time execution\n\
6426 of the program stops."), &cmdlist);
6427
6428 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6429 Set inferior debugging."), _("\
6430 Show inferior debugging."), _("\
6431 When non-zero, inferior specific debugging is enabled."),
6432 NULL,
6433 show_debug_infrun,
6434 &setdebuglist, &showdebuglist);
6435
6436 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6437 Set displaced stepping debugging."), _("\
6438 Show displaced stepping debugging."), _("\
6439 When non-zero, displaced stepping specific debugging is enabled."),
6440 NULL,
6441 show_debug_displaced,
6442 &setdebuglist, &showdebuglist);
6443
6444 add_setshow_boolean_cmd ("non-stop", no_class,
6445 &non_stop_1, _("\
6446 Set whether gdb controls the inferior in non-stop mode."), _("\
6447 Show whether gdb controls the inferior in non-stop mode."), _("\
6448 When debugging a multi-threaded program and this setting is\n\
6449 off (the default, also called all-stop mode), when one thread stops\n\
6450 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6451 all other threads in the program while you interact with the thread of\n\
6452 interest. When you continue or step a thread, you can allow the other\n\
6453 threads to run, or have them remain stopped, but while you inspect any\n\
6454 thread's state, all threads stop.\n\
6455 \n\
6456 In non-stop mode, when one thread stops, other threads can continue\n\
6457 to run freely. You'll be able to step each thread independently,\n\
6458 leave it stopped or free to run as needed."),
6459 set_non_stop,
6460 show_non_stop,
6461 &setlist,
6462 &showlist);
6463
6464 numsigs = (int) TARGET_SIGNAL_LAST;
6465 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6466 signal_print = (unsigned char *)
6467 xmalloc (sizeof (signal_print[0]) * numsigs);
6468 signal_program = (unsigned char *)
6469 xmalloc (sizeof (signal_program[0]) * numsigs);
6470 for (i = 0; i < numsigs; i++)
6471 {
6472 signal_stop[i] = 1;
6473 signal_print[i] = 1;
6474 signal_program[i] = 1;
6475 }
6476
6477 /* Signals caused by debugger's own actions
6478 should not be given to the program afterwards. */
6479 signal_program[TARGET_SIGNAL_TRAP] = 0;
6480 signal_program[TARGET_SIGNAL_INT] = 0;
6481
6482 /* Signals that are not errors should not normally enter the debugger. */
6483 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6484 signal_print[TARGET_SIGNAL_ALRM] = 0;
6485 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6486 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6487 signal_stop[TARGET_SIGNAL_PROF] = 0;
6488 signal_print[TARGET_SIGNAL_PROF] = 0;
6489 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6490 signal_print[TARGET_SIGNAL_CHLD] = 0;
6491 signal_stop[TARGET_SIGNAL_IO] = 0;
6492 signal_print[TARGET_SIGNAL_IO] = 0;
6493 signal_stop[TARGET_SIGNAL_POLL] = 0;
6494 signal_print[TARGET_SIGNAL_POLL] = 0;
6495 signal_stop[TARGET_SIGNAL_URG] = 0;
6496 signal_print[TARGET_SIGNAL_URG] = 0;
6497 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6498 signal_print[TARGET_SIGNAL_WINCH] = 0;
6499
6500 /* These signals are used internally by user-level thread
6501 implementations. (See signal(5) on Solaris.) Like the above
6502 signals, a healthy program receives and handles them as part of
6503 its normal operation. */
6504 signal_stop[TARGET_SIGNAL_LWP] = 0;
6505 signal_print[TARGET_SIGNAL_LWP] = 0;
6506 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6507 signal_print[TARGET_SIGNAL_WAITING] = 0;
6508 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6509 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6510
6511 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6512 &stop_on_solib_events, _("\
6513 Set stopping for shared library events."), _("\
6514 Show stopping for shared library events."), _("\
6515 If nonzero, gdb will give control to the user when the dynamic linker\n\
6516 notifies gdb of shared library events. The most common event of interest\n\
6517 to the user would be loading/unloading of a new library."),
6518 NULL,
6519 show_stop_on_solib_events,
6520 &setlist, &showlist);
6521
6522 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6523 follow_fork_mode_kind_names,
6524 &follow_fork_mode_string, _("\
6525 Set debugger response to a program call of fork or vfork."), _("\
6526 Show debugger response to a program call of fork or vfork."), _("\
6527 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6528 parent - the original process is debugged after a fork\n\
6529 child - the new process is debugged after a fork\n\
6530 The unfollowed process will continue to run.\n\
6531 By default, the debugger will follow the parent process."),
6532 NULL,
6533 show_follow_fork_mode_string,
6534 &setlist, &showlist);
6535
6536 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6537 follow_exec_mode_names,
6538 &follow_exec_mode_string, _("\
6539 Set debugger response to a program call of exec."), _("\
6540 Show debugger response to a program call of exec."), _("\
6541 An exec call replaces the program image of a process.\n\
6542 \n\
6543 follow-exec-mode can be:\n\
6544 \n\
6545 new - the debugger creates a new inferior and rebinds the process \n\
6546 to this new inferior. The program the process was running before\n\
6547 the exec call can be restarted afterwards by restarting the original\n\
6548 inferior.\n\
6549 \n\
6550 same - the debugger keeps the process bound to the same inferior.\n\
6551 The new executable image replaces the previous executable loaded in\n\
6552 the inferior. Restarting the inferior after the exec call restarts\n\
6553 the executable the process was running after the exec call.\n\
6554 \n\
6555 By default, the debugger will use the same inferior."),
6556 NULL,
6557 show_follow_exec_mode_string,
6558 &setlist, &showlist);
6559
6560 add_setshow_enum_cmd ("scheduler-locking", class_run,
6561 scheduler_enums, &scheduler_mode, _("\
6562 Set mode for locking scheduler during execution."), _("\
6563 Show mode for locking scheduler during execution."), _("\
6564 off == no locking (threads may preempt at any time)\n\
6565 on == full locking (no thread except the current thread may run)\n\
6566 step == scheduler locked during every single-step operation.\n\
6567 In this mode, no other thread may run during a step command.\n\
6568 Other threads may run while stepping over a function call ('next')."),
6569 set_schedlock_func, /* traps on target vector */
6570 show_scheduler_mode,
6571 &setlist, &showlist);
6572
6573 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6574 Set mode for resuming threads of all processes."), _("\
6575 Show mode for resuming threads of all processes."), _("\
6576 When on, execution commands (such as 'continue' or 'next') resume all\n\
6577 threads of all processes. When off (which is the default), execution\n\
6578 commands only resume the threads of the current process. The set of\n\
6579 threads that are resumed is further refined by the scheduler-locking\n\
6580 mode (see help set scheduler-locking)."),
6581 NULL,
6582 show_schedule_multiple,
6583 &setlist, &showlist);
6584
6585 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6586 Set mode of the step operation."), _("\
6587 Show mode of the step operation."), _("\
6588 When set, doing a step over a function without debug line information\n\
6589 will stop at the first instruction of that function. Otherwise, the\n\
6590 function is skipped and the step command stops at a different source line."),
6591 NULL,
6592 show_step_stop_if_no_debug,
6593 &setlist, &showlist);
6594
6595 add_setshow_enum_cmd ("displaced-stepping", class_run,
6596 can_use_displaced_stepping_enum,
6597 &can_use_displaced_stepping, _("\
6598 Set debugger's willingness to use displaced stepping."), _("\
6599 Show debugger's willingness to use displaced stepping."), _("\
6600 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6601 supported by the target architecture. If off, gdb will not use displaced\n\
6602 stepping to step over breakpoints, even if such is supported by the target\n\
6603 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6604 if the target architecture supports it and non-stop mode is active, but will not\n\
6605 use it in all-stop mode (see help set non-stop)."),
6606 NULL,
6607 show_can_use_displaced_stepping,
6608 &setlist, &showlist);
6609
6610 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6611 &exec_direction, _("Set direction of execution.\n\
6612 Options are 'forward' or 'reverse'."),
6613 _("Show direction of execution (forward/reverse)."),
6614 _("Tells gdb whether to execute forward or backward."),
6615 set_exec_direction_func, show_exec_direction_func,
6616 &setlist, &showlist);
6617
6618 /* Set/show detach-on-fork: user-settable mode. */
6619
6620 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6621 Set whether gdb will detach the child of a fork."), _("\
6622 Show whether gdb will detach the child of a fork."), _("\
6623 Tells gdb whether to detach the child of a fork."),
6624 NULL, NULL, &setlist, &showlist);
6625
6626 /* ptid initializations */
6627 null_ptid = ptid_build (0, 0, 0);
6628 minus_one_ptid = ptid_build (-1, 0, 0);
6629 inferior_ptid = null_ptid;
6630 target_last_wait_ptid = minus_one_ptid;
6631
6632 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6633 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6634 observer_attach_thread_exit (infrun_thread_thread_exit);
6635 observer_attach_inferior_exit (infrun_inferior_exit);
6636
6637 /* Explicitly create without lookup, since that tries to create a
6638 value with a void typed value, and when we get here, gdbarch
6639 isn't initialized yet. At this point, we're quite sure there
6640 isn't another convenience variable of the same name. */
6641 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6642 }