Delete unused or undefined functions.
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54
55 /* Prototypes for local functions */
56
57 static void signals_info (char *, int);
58
59 static void handle_command (char *, int);
60
61 static void sig_print_info (enum target_signal);
62
63 static void sig_print_header (void);
64
65 static void resume_cleanups (void *);
66
67 static int hook_stop_stub (void *);
68
69 static int restore_selected_frame (void *);
70
71 static int follow_fork (void);
72
73 static void set_schedlock_func (char *args, int from_tty,
74 struct cmd_list_element *c);
75
76 static int currently_stepping (struct thread_info *tp);
77
78 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
79 void *data);
80
81 static void xdb_handle_command (char *args, int from_tty);
82
83 static int prepare_to_proceed (int);
84
85 void _initialize_infrun (void);
86
87 void nullify_last_target_wait_ptid (void);
88
89 /* When set, stop the 'step' command if we enter a function which has
90 no line number information. The normal behavior is that we step
91 over such function. */
92 int step_stop_if_no_debug = 0;
93 static void
94 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
95 struct cmd_list_element *c, const char *value)
96 {
97 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
98 }
99
100 /* In asynchronous mode, but simulating synchronous execution. */
101
102 int sync_execution = 0;
103
104 /* wait_for_inferior and normal_stop use this to notify the user
105 when the inferior stopped in a different thread than it had been
106 running in. */
107
108 static ptid_t previous_inferior_ptid;
109
110 /* Default behavior is to detach newly forked processes (legacy). */
111 int detach_fork = 1;
112
113 int debug_displaced = 0;
114 static void
115 show_debug_displaced (struct ui_file *file, int from_tty,
116 struct cmd_list_element *c, const char *value)
117 {
118 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
119 }
120
121 static int debug_infrun = 0;
122 static void
123 show_debug_infrun (struct ui_file *file, int from_tty,
124 struct cmd_list_element *c, const char *value)
125 {
126 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
127 }
128
129 /* If the program uses ELF-style shared libraries, then calls to
130 functions in shared libraries go through stubs, which live in a
131 table called the PLT (Procedure Linkage Table). The first time the
132 function is called, the stub sends control to the dynamic linker,
133 which looks up the function's real address, patches the stub so
134 that future calls will go directly to the function, and then passes
135 control to the function.
136
137 If we are stepping at the source level, we don't want to see any of
138 this --- we just want to skip over the stub and the dynamic linker.
139 The simple approach is to single-step until control leaves the
140 dynamic linker.
141
142 However, on some systems (e.g., Red Hat's 5.2 distribution) the
143 dynamic linker calls functions in the shared C library, so you
144 can't tell from the PC alone whether the dynamic linker is still
145 running. In this case, we use a step-resume breakpoint to get us
146 past the dynamic linker, as if we were using "next" to step over a
147 function call.
148
149 in_solib_dynsym_resolve_code() says whether we're in the dynamic
150 linker code or not. Normally, this means we single-step. However,
151 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
152 address where we can place a step-resume breakpoint to get past the
153 linker's symbol resolution function.
154
155 in_solib_dynsym_resolve_code() can generally be implemented in a
156 pretty portable way, by comparing the PC against the address ranges
157 of the dynamic linker's sections.
158
159 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
160 it depends on internal details of the dynamic linker. It's usually
161 not too hard to figure out where to put a breakpoint, but it
162 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
163 sanity checking. If it can't figure things out, returning zero and
164 getting the (possibly confusing) stepping behavior is better than
165 signalling an error, which will obscure the change in the
166 inferior's state. */
167
168 /* This function returns TRUE if pc is the address of an instruction
169 that lies within the dynamic linker (such as the event hook, or the
170 dld itself).
171
172 This function must be used only when a dynamic linker event has
173 been caught, and the inferior is being stepped out of the hook, or
174 undefined results are guaranteed. */
175
176 #ifndef SOLIB_IN_DYNAMIC_LINKER
177 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
178 #endif
179
180
181 /* Convert the #defines into values. This is temporary until wfi control
182 flow is completely sorted out. */
183
184 #ifndef CANNOT_STEP_HW_WATCHPOINTS
185 #define CANNOT_STEP_HW_WATCHPOINTS 0
186 #else
187 #undef CANNOT_STEP_HW_WATCHPOINTS
188 #define CANNOT_STEP_HW_WATCHPOINTS 1
189 #endif
190
191 /* Tables of how to react to signals; the user sets them. */
192
193 static unsigned char *signal_stop;
194 static unsigned char *signal_print;
195 static unsigned char *signal_program;
196
197 #define SET_SIGS(nsigs,sigs,flags) \
198 do { \
199 int signum = (nsigs); \
200 while (signum-- > 0) \
201 if ((sigs)[signum]) \
202 (flags)[signum] = 1; \
203 } while (0)
204
205 #define UNSET_SIGS(nsigs,sigs,flags) \
206 do { \
207 int signum = (nsigs); \
208 while (signum-- > 0) \
209 if ((sigs)[signum]) \
210 (flags)[signum] = 0; \
211 } while (0)
212
213 /* Value to pass to target_resume() to cause all threads to resume */
214
215 #define RESUME_ALL minus_one_ptid
216
217 /* Command list pointer for the "stop" placeholder. */
218
219 static struct cmd_list_element *stop_command;
220
221 /* Function inferior was in as of last step command. */
222
223 static struct symbol *step_start_function;
224
225 /* Nonzero if we want to give control to the user when we're notified
226 of shared library events by the dynamic linker. */
227 static int stop_on_solib_events;
228 static void
229 show_stop_on_solib_events (struct ui_file *file, int from_tty,
230 struct cmd_list_element *c, const char *value)
231 {
232 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
233 value);
234 }
235
236 /* Nonzero means expecting a trace trap
237 and should stop the inferior and return silently when it happens. */
238
239 int stop_after_trap;
240
241 /* Save register contents here when executing a "finish" command or are
242 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
243 Thus this contains the return value from the called function (assuming
244 values are returned in a register). */
245
246 struct regcache *stop_registers;
247
248 /* Nonzero after stop if current stack frame should be printed. */
249
250 static int stop_print_frame;
251
252 /* This is a cached copy of the pid/waitstatus of the last event
253 returned by target_wait()/deprecated_target_wait_hook(). This
254 information is returned by get_last_target_status(). */
255 static ptid_t target_last_wait_ptid;
256 static struct target_waitstatus target_last_waitstatus;
257
258 static void context_switch (ptid_t ptid);
259
260 void init_thread_stepping_state (struct thread_info *tss);
261
262 void init_infwait_state (void);
263
264 static const char follow_fork_mode_child[] = "child";
265 static const char follow_fork_mode_parent[] = "parent";
266
267 static const char *follow_fork_mode_kind_names[] = {
268 follow_fork_mode_child,
269 follow_fork_mode_parent,
270 NULL
271 };
272
273 static const char *follow_fork_mode_string = follow_fork_mode_parent;
274 static void
275 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
276 struct cmd_list_element *c, const char *value)
277 {
278 fprintf_filtered (file, _("\
279 Debugger response to a program call of fork or vfork is \"%s\".\n"),
280 value);
281 }
282 \f
283
284 /* Tell the target to follow the fork we're stopped at. Returns true
285 if the inferior should be resumed; false, if the target for some
286 reason decided it's best not to resume. */
287
288 static int
289 follow_fork (void)
290 {
291 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
292 int should_resume = 1;
293 struct thread_info *tp;
294
295 /* Copy user stepping state to the new inferior thread. FIXME: the
296 followed fork child thread should have a copy of most of the
297 parent thread structure's run control related fields, not just these.
298 Initialized to avoid "may be used uninitialized" warnings from gcc. */
299 struct breakpoint *step_resume_breakpoint = NULL;
300 CORE_ADDR step_range_start = 0;
301 CORE_ADDR step_range_end = 0;
302 struct frame_id step_frame_id = { 0 };
303
304 if (!non_stop)
305 {
306 ptid_t wait_ptid;
307 struct target_waitstatus wait_status;
308
309 /* Get the last target status returned by target_wait(). */
310 get_last_target_status (&wait_ptid, &wait_status);
311
312 /* If not stopped at a fork event, then there's nothing else to
313 do. */
314 if (wait_status.kind != TARGET_WAITKIND_FORKED
315 && wait_status.kind != TARGET_WAITKIND_VFORKED)
316 return 1;
317
318 /* Check if we switched over from WAIT_PTID, since the event was
319 reported. */
320 if (!ptid_equal (wait_ptid, minus_one_ptid)
321 && !ptid_equal (inferior_ptid, wait_ptid))
322 {
323 /* We did. Switch back to WAIT_PTID thread, to tell the
324 target to follow it (in either direction). We'll
325 afterwards refuse to resume, and inform the user what
326 happened. */
327 switch_to_thread (wait_ptid);
328 should_resume = 0;
329 }
330 }
331
332 tp = inferior_thread ();
333
334 /* If there were any forks/vforks that were caught and are now to be
335 followed, then do so now. */
336 switch (tp->pending_follow.kind)
337 {
338 case TARGET_WAITKIND_FORKED:
339 case TARGET_WAITKIND_VFORKED:
340 {
341 ptid_t parent, child;
342
343 /* If the user did a next/step, etc, over a fork call,
344 preserve the stepping state in the fork child. */
345 if (follow_child && should_resume)
346 {
347 step_resume_breakpoint
348 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
349 step_range_start = tp->step_range_start;
350 step_range_end = tp->step_range_end;
351 step_frame_id = tp->step_frame_id;
352
353 /* For now, delete the parent's sr breakpoint, otherwise,
354 parent/child sr breakpoints are considered duplicates,
355 and the child version will not be installed. Remove
356 this when the breakpoints module becomes aware of
357 inferiors and address spaces. */
358 delete_step_resume_breakpoint (tp);
359 tp->step_range_start = 0;
360 tp->step_range_end = 0;
361 tp->step_frame_id = null_frame_id;
362 }
363
364 parent = inferior_ptid;
365 child = tp->pending_follow.value.related_pid;
366
367 /* Tell the target to do whatever is necessary to follow
368 either parent or child. */
369 if (target_follow_fork (follow_child))
370 {
371 /* Target refused to follow, or there's some other reason
372 we shouldn't resume. */
373 should_resume = 0;
374 }
375 else
376 {
377 /* This pending follow fork event is now handled, one way
378 or another. The previous selected thread may be gone
379 from the lists by now, but if it is still around, need
380 to clear the pending follow request. */
381 tp = find_thread_ptid (parent);
382 if (tp)
383 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
384
385 /* This makes sure we don't try to apply the "Switched
386 over from WAIT_PID" logic above. */
387 nullify_last_target_wait_ptid ();
388
389 /* If we followed the child, switch to it... */
390 if (follow_child)
391 {
392 switch_to_thread (child);
393
394 /* ... and preserve the stepping state, in case the
395 user was stepping over the fork call. */
396 if (should_resume)
397 {
398 tp = inferior_thread ();
399 tp->step_resume_breakpoint = step_resume_breakpoint;
400 tp->step_range_start = step_range_start;
401 tp->step_range_end = step_range_end;
402 tp->step_frame_id = step_frame_id;
403 }
404 else
405 {
406 /* If we get here, it was because we're trying to
407 resume from a fork catchpoint, but, the user
408 has switched threads away from the thread that
409 forked. In that case, the resume command
410 issued is most likely not applicable to the
411 child, so just warn, and refuse to resume. */
412 warning (_("\
413 Not resuming: switched threads before following fork child.\n"));
414 }
415
416 /* Reset breakpoints in the child as appropriate. */
417 follow_inferior_reset_breakpoints ();
418 }
419 else
420 switch_to_thread (parent);
421 }
422 }
423 break;
424 case TARGET_WAITKIND_SPURIOUS:
425 /* Nothing to follow. */
426 break;
427 default:
428 internal_error (__FILE__, __LINE__,
429 "Unexpected pending_follow.kind %d\n",
430 tp->pending_follow.kind);
431 break;
432 }
433
434 return should_resume;
435 }
436
437 void
438 follow_inferior_reset_breakpoints (void)
439 {
440 struct thread_info *tp = inferior_thread ();
441
442 /* Was there a step_resume breakpoint? (There was if the user
443 did a "next" at the fork() call.) If so, explicitly reset its
444 thread number.
445
446 step_resumes are a form of bp that are made to be per-thread.
447 Since we created the step_resume bp when the parent process
448 was being debugged, and now are switching to the child process,
449 from the breakpoint package's viewpoint, that's a switch of
450 "threads". We must update the bp's notion of which thread
451 it is for, or it'll be ignored when it triggers. */
452
453 if (tp->step_resume_breakpoint)
454 breakpoint_re_set_thread (tp->step_resume_breakpoint);
455
456 /* Reinsert all breakpoints in the child. The user may have set
457 breakpoints after catching the fork, in which case those
458 were never set in the child, but only in the parent. This makes
459 sure the inserted breakpoints match the breakpoint list. */
460
461 breakpoint_re_set ();
462 insert_breakpoints ();
463 }
464
465 /* The child has exited or execed: resume threads of the parent the
466 user wanted to be executing. */
467
468 static int
469 proceed_after_vfork_done (struct thread_info *thread,
470 void *arg)
471 {
472 int pid = * (int *) arg;
473
474 if (ptid_get_pid (thread->ptid) == pid
475 && is_running (thread->ptid)
476 && !is_executing (thread->ptid)
477 && !thread->stop_requested
478 && thread->stop_signal == TARGET_SIGNAL_0)
479 {
480 if (debug_infrun)
481 fprintf_unfiltered (gdb_stdlog,
482 "infrun: resuming vfork parent thread %s\n",
483 target_pid_to_str (thread->ptid));
484
485 switch_to_thread (thread->ptid);
486 clear_proceed_status ();
487 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
488 }
489
490 return 0;
491 }
492
493 /* Called whenever we notice an exec or exit event, to handle
494 detaching or resuming a vfork parent. */
495
496 static void
497 handle_vfork_child_exec_or_exit (int exec)
498 {
499 struct inferior *inf = current_inferior ();
500
501 if (inf->vfork_parent)
502 {
503 int resume_parent = -1;
504
505 /* This exec or exit marks the end of the shared memory region
506 between the parent and the child. If the user wanted to
507 detach from the parent, now is the time. */
508
509 if (inf->vfork_parent->pending_detach)
510 {
511 struct thread_info *tp;
512 struct cleanup *old_chain;
513 struct program_space *pspace;
514 struct address_space *aspace;
515
516 /* follow-fork child, detach-on-fork on */
517
518 old_chain = make_cleanup_restore_current_thread ();
519
520 /* We're letting loose of the parent. */
521 tp = any_live_thread_of_process (inf->vfork_parent->pid);
522 switch_to_thread (tp->ptid);
523
524 /* We're about to detach from the parent, which implicitly
525 removes breakpoints from its address space. There's a
526 catch here: we want to reuse the spaces for the child,
527 but, parent/child are still sharing the pspace at this
528 point, although the exec in reality makes the kernel give
529 the child a fresh set of new pages. The problem here is
530 that the breakpoints module being unaware of this, would
531 likely chose the child process to write to the parent
532 address space. Swapping the child temporarily away from
533 the spaces has the desired effect. Yes, this is "sort
534 of" a hack. */
535
536 pspace = inf->pspace;
537 aspace = inf->aspace;
538 inf->aspace = NULL;
539 inf->pspace = NULL;
540
541 if (debug_infrun || info_verbose)
542 {
543 target_terminal_ours ();
544
545 if (exec)
546 fprintf_filtered (gdb_stdlog,
547 "Detaching vfork parent process %d after child exec.\n",
548 inf->vfork_parent->pid);
549 else
550 fprintf_filtered (gdb_stdlog,
551 "Detaching vfork parent process %d after child exit.\n",
552 inf->vfork_parent->pid);
553 }
554
555 target_detach (NULL, 0);
556
557 /* Put it back. */
558 inf->pspace = pspace;
559 inf->aspace = aspace;
560
561 do_cleanups (old_chain);
562 }
563 else if (exec)
564 {
565 /* We're staying attached to the parent, so, really give the
566 child a new address space. */
567 inf->pspace = add_program_space (maybe_new_address_space ());
568 inf->aspace = inf->pspace->aspace;
569 inf->removable = 1;
570 set_current_program_space (inf->pspace);
571
572 resume_parent = inf->vfork_parent->pid;
573
574 /* Break the bonds. */
575 inf->vfork_parent->vfork_child = NULL;
576 }
577 else
578 {
579 struct cleanup *old_chain;
580 struct program_space *pspace;
581
582 /* If this is a vfork child exiting, then the pspace and
583 aspaces were shared with the parent. Since we're
584 reporting the process exit, we'll be mourning all that is
585 found in the address space, and switching to null_ptid,
586 preparing to start a new inferior. But, since we don't
587 want to clobber the parent's address/program spaces, we
588 go ahead and create a new one for this exiting
589 inferior. */
590
591 /* Switch to null_ptid, so that clone_program_space doesn't want
592 to read the selected frame of a dead process. */
593 old_chain = save_inferior_ptid ();
594 inferior_ptid = null_ptid;
595
596 /* This inferior is dead, so avoid giving the breakpoints
597 module the option to write through to it (cloning a
598 program space resets breakpoints). */
599 inf->aspace = NULL;
600 inf->pspace = NULL;
601 pspace = add_program_space (maybe_new_address_space ());
602 set_current_program_space (pspace);
603 inf->removable = 1;
604 clone_program_space (pspace, inf->vfork_parent->pspace);
605 inf->pspace = pspace;
606 inf->aspace = pspace->aspace;
607
608 /* Put back inferior_ptid. We'll continue mourning this
609 inferior. */
610 do_cleanups (old_chain);
611
612 resume_parent = inf->vfork_parent->pid;
613 /* Break the bonds. */
614 inf->vfork_parent->vfork_child = NULL;
615 }
616
617 inf->vfork_parent = NULL;
618
619 gdb_assert (current_program_space == inf->pspace);
620
621 if (non_stop && resume_parent != -1)
622 {
623 /* If the user wanted the parent to be running, let it go
624 free now. */
625 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
626
627 if (debug_infrun)
628 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
629 resume_parent);
630
631 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
632
633 do_cleanups (old_chain);
634 }
635 }
636 }
637
638 /* Enum strings for "set|show displaced-stepping". */
639
640 static const char follow_exec_mode_new[] = "new";
641 static const char follow_exec_mode_same[] = "same";
642 static const char *follow_exec_mode_names[] =
643 {
644 follow_exec_mode_new,
645 follow_exec_mode_same,
646 NULL,
647 };
648
649 static const char *follow_exec_mode_string = follow_exec_mode_same;
650 static void
651 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
652 struct cmd_list_element *c, const char *value)
653 {
654 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
655 }
656
657 /* EXECD_PATHNAME is assumed to be non-NULL. */
658
659 static void
660 follow_exec (ptid_t pid, char *execd_pathname)
661 {
662 struct target_ops *tgt;
663 struct thread_info *th = inferior_thread ();
664 struct inferior *inf = current_inferior ();
665
666 /* This is an exec event that we actually wish to pay attention to.
667 Refresh our symbol table to the newly exec'd program, remove any
668 momentary bp's, etc.
669
670 If there are breakpoints, they aren't really inserted now,
671 since the exec() transformed our inferior into a fresh set
672 of instructions.
673
674 We want to preserve symbolic breakpoints on the list, since
675 we have hopes that they can be reset after the new a.out's
676 symbol table is read.
677
678 However, any "raw" breakpoints must be removed from the list
679 (e.g., the solib bp's), since their address is probably invalid
680 now.
681
682 And, we DON'T want to call delete_breakpoints() here, since
683 that may write the bp's "shadow contents" (the instruction
684 value that was overwritten witha TRAP instruction). Since
685 we now have a new a.out, those shadow contents aren't valid. */
686
687 mark_breakpoints_out ();
688
689 update_breakpoints_after_exec ();
690
691 /* If there was one, it's gone now. We cannot truly step-to-next
692 statement through an exec(). */
693 th->step_resume_breakpoint = NULL;
694 th->step_range_start = 0;
695 th->step_range_end = 0;
696
697 /* The target reports the exec event to the main thread, even if
698 some other thread does the exec, and even if the main thread was
699 already stopped --- if debugging in non-stop mode, it's possible
700 the user had the main thread held stopped in the previous image
701 --- release it now. This is the same behavior as step-over-exec
702 with scheduler-locking on in all-stop mode. */
703 th->stop_requested = 0;
704
705 /* What is this a.out's name? */
706 printf_unfiltered (_("%s is executing new program: %s\n"),
707 target_pid_to_str (inferior_ptid),
708 execd_pathname);
709
710 /* We've followed the inferior through an exec. Therefore, the
711 inferior has essentially been killed & reborn. */
712
713 gdb_flush (gdb_stdout);
714
715 breakpoint_init_inferior (inf_execd);
716
717 if (gdb_sysroot && *gdb_sysroot)
718 {
719 char *name = alloca (strlen (gdb_sysroot)
720 + strlen (execd_pathname)
721 + 1);
722 strcpy (name, gdb_sysroot);
723 strcat (name, execd_pathname);
724 execd_pathname = name;
725 }
726
727 /* Reset the shared library package. This ensures that we get a
728 shlib event when the child reaches "_start", at which point the
729 dld will have had a chance to initialize the child. */
730 /* Also, loading a symbol file below may trigger symbol lookups, and
731 we don't want those to be satisfied by the libraries of the
732 previous incarnation of this process. */
733 no_shared_libraries (NULL, 0);
734
735 if (follow_exec_mode_string == follow_exec_mode_new)
736 {
737 struct program_space *pspace;
738 struct inferior *new_inf;
739
740 /* The user wants to keep the old inferior and program spaces
741 around. Create a new fresh one, and switch to it. */
742
743 inf = add_inferior (current_inferior ()->pid);
744 pspace = add_program_space (maybe_new_address_space ());
745 inf->pspace = pspace;
746 inf->aspace = pspace->aspace;
747
748 exit_inferior_num_silent (current_inferior ()->num);
749
750 set_current_inferior (inf);
751 set_current_program_space (pspace);
752 }
753
754 gdb_assert (current_program_space == inf->pspace);
755
756 /* That a.out is now the one to use. */
757 exec_file_attach (execd_pathname, 0);
758
759 /* Load the main file's symbols. */
760 symbol_file_add_main (execd_pathname, 0);
761
762 #ifdef SOLIB_CREATE_INFERIOR_HOOK
763 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
764 #else
765 solib_create_inferior_hook (0);
766 #endif
767
768 jit_inferior_created_hook ();
769
770 /* Reinsert all breakpoints. (Those which were symbolic have
771 been reset to the proper address in the new a.out, thanks
772 to symbol_file_command...) */
773 insert_breakpoints ();
774
775 /* The next resume of this inferior should bring it to the shlib
776 startup breakpoints. (If the user had also set bp's on
777 "main" from the old (parent) process, then they'll auto-
778 matically get reset there in the new process.) */
779 }
780
781 /* Non-zero if we just simulating a single-step. This is needed
782 because we cannot remove the breakpoints in the inferior process
783 until after the `wait' in `wait_for_inferior'. */
784 static int singlestep_breakpoints_inserted_p = 0;
785
786 /* The thread we inserted single-step breakpoints for. */
787 static ptid_t singlestep_ptid;
788
789 /* PC when we started this single-step. */
790 static CORE_ADDR singlestep_pc;
791
792 /* If another thread hit the singlestep breakpoint, we save the original
793 thread here so that we can resume single-stepping it later. */
794 static ptid_t saved_singlestep_ptid;
795 static int stepping_past_singlestep_breakpoint;
796
797 /* If not equal to null_ptid, this means that after stepping over breakpoint
798 is finished, we need to switch to deferred_step_ptid, and step it.
799
800 The use case is when one thread has hit a breakpoint, and then the user
801 has switched to another thread and issued 'step'. We need to step over
802 breakpoint in the thread which hit the breakpoint, but then continue
803 stepping the thread user has selected. */
804 static ptid_t deferred_step_ptid;
805 \f
806 /* Displaced stepping. */
807
808 /* In non-stop debugging mode, we must take special care to manage
809 breakpoints properly; in particular, the traditional strategy for
810 stepping a thread past a breakpoint it has hit is unsuitable.
811 'Displaced stepping' is a tactic for stepping one thread past a
812 breakpoint it has hit while ensuring that other threads running
813 concurrently will hit the breakpoint as they should.
814
815 The traditional way to step a thread T off a breakpoint in a
816 multi-threaded program in all-stop mode is as follows:
817
818 a0) Initially, all threads are stopped, and breakpoints are not
819 inserted.
820 a1) We single-step T, leaving breakpoints uninserted.
821 a2) We insert breakpoints, and resume all threads.
822
823 In non-stop debugging, however, this strategy is unsuitable: we
824 don't want to have to stop all threads in the system in order to
825 continue or step T past a breakpoint. Instead, we use displaced
826 stepping:
827
828 n0) Initially, T is stopped, other threads are running, and
829 breakpoints are inserted.
830 n1) We copy the instruction "under" the breakpoint to a separate
831 location, outside the main code stream, making any adjustments
832 to the instruction, register, and memory state as directed by
833 T's architecture.
834 n2) We single-step T over the instruction at its new location.
835 n3) We adjust the resulting register and memory state as directed
836 by T's architecture. This includes resetting T's PC to point
837 back into the main instruction stream.
838 n4) We resume T.
839
840 This approach depends on the following gdbarch methods:
841
842 - gdbarch_max_insn_length and gdbarch_displaced_step_location
843 indicate where to copy the instruction, and how much space must
844 be reserved there. We use these in step n1.
845
846 - gdbarch_displaced_step_copy_insn copies a instruction to a new
847 address, and makes any necessary adjustments to the instruction,
848 register contents, and memory. We use this in step n1.
849
850 - gdbarch_displaced_step_fixup adjusts registers and memory after
851 we have successfuly single-stepped the instruction, to yield the
852 same effect the instruction would have had if we had executed it
853 at its original address. We use this in step n3.
854
855 - gdbarch_displaced_step_free_closure provides cleanup.
856
857 The gdbarch_displaced_step_copy_insn and
858 gdbarch_displaced_step_fixup functions must be written so that
859 copying an instruction with gdbarch_displaced_step_copy_insn,
860 single-stepping across the copied instruction, and then applying
861 gdbarch_displaced_insn_fixup should have the same effects on the
862 thread's memory and registers as stepping the instruction in place
863 would have. Exactly which responsibilities fall to the copy and
864 which fall to the fixup is up to the author of those functions.
865
866 See the comments in gdbarch.sh for details.
867
868 Note that displaced stepping and software single-step cannot
869 currently be used in combination, although with some care I think
870 they could be made to. Software single-step works by placing
871 breakpoints on all possible subsequent instructions; if the
872 displaced instruction is a PC-relative jump, those breakpoints
873 could fall in very strange places --- on pages that aren't
874 executable, or at addresses that are not proper instruction
875 boundaries. (We do generally let other threads run while we wait
876 to hit the software single-step breakpoint, and they might
877 encounter such a corrupted instruction.) One way to work around
878 this would be to have gdbarch_displaced_step_copy_insn fully
879 simulate the effect of PC-relative instructions (and return NULL)
880 on architectures that use software single-stepping.
881
882 In non-stop mode, we can have independent and simultaneous step
883 requests, so more than one thread may need to simultaneously step
884 over a breakpoint. The current implementation assumes there is
885 only one scratch space per process. In this case, we have to
886 serialize access to the scratch space. If thread A wants to step
887 over a breakpoint, but we are currently waiting for some other
888 thread to complete a displaced step, we leave thread A stopped and
889 place it in the displaced_step_request_queue. Whenever a displaced
890 step finishes, we pick the next thread in the queue and start a new
891 displaced step operation on it. See displaced_step_prepare and
892 displaced_step_fixup for details. */
893
894 /* If this is not null_ptid, this is the thread carrying out a
895 displaced single-step. This thread's state will require fixing up
896 once it has completed its step. */
897 static ptid_t displaced_step_ptid;
898
899 struct displaced_step_request
900 {
901 ptid_t ptid;
902 struct displaced_step_request *next;
903 };
904
905 /* A queue of pending displaced stepping requests. */
906 struct displaced_step_request *displaced_step_request_queue;
907
908 /* The architecture the thread had when we stepped it. */
909 static struct gdbarch *displaced_step_gdbarch;
910
911 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
912 for post-step cleanup. */
913 static struct displaced_step_closure *displaced_step_closure;
914
915 /* The address of the original instruction, and the copy we made. */
916 static CORE_ADDR displaced_step_original, displaced_step_copy;
917
918 /* Saved contents of copy area. */
919 static gdb_byte *displaced_step_saved_copy;
920
921 /* Enum strings for "set|show displaced-stepping". */
922
923 static const char can_use_displaced_stepping_auto[] = "auto";
924 static const char can_use_displaced_stepping_on[] = "on";
925 static const char can_use_displaced_stepping_off[] = "off";
926 static const char *can_use_displaced_stepping_enum[] =
927 {
928 can_use_displaced_stepping_auto,
929 can_use_displaced_stepping_on,
930 can_use_displaced_stepping_off,
931 NULL,
932 };
933
934 /* If ON, and the architecture supports it, GDB will use displaced
935 stepping to step over breakpoints. If OFF, or if the architecture
936 doesn't support it, GDB will instead use the traditional
937 hold-and-step approach. If AUTO (which is the default), GDB will
938 decide which technique to use to step over breakpoints depending on
939 which of all-stop or non-stop mode is active --- displaced stepping
940 in non-stop mode; hold-and-step in all-stop mode. */
941
942 static const char *can_use_displaced_stepping =
943 can_use_displaced_stepping_auto;
944
945 static void
946 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
947 struct cmd_list_element *c,
948 const char *value)
949 {
950 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
951 fprintf_filtered (file, _("\
952 Debugger's willingness to use displaced stepping to step over \
953 breakpoints is %s (currently %s).\n"),
954 value, non_stop ? "on" : "off");
955 else
956 fprintf_filtered (file, _("\
957 Debugger's willingness to use displaced stepping to step over \
958 breakpoints is %s.\n"), value);
959 }
960
961 /* Return non-zero if displaced stepping can/should be used to step
962 over breakpoints. */
963
964 static int
965 use_displaced_stepping (struct gdbarch *gdbarch)
966 {
967 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
968 && non_stop)
969 || can_use_displaced_stepping == can_use_displaced_stepping_on)
970 && gdbarch_displaced_step_copy_insn_p (gdbarch)
971 && !RECORD_IS_USED);
972 }
973
974 /* Clean out any stray displaced stepping state. */
975 static void
976 displaced_step_clear (void)
977 {
978 /* Indicate that there is no cleanup pending. */
979 displaced_step_ptid = null_ptid;
980
981 if (displaced_step_closure)
982 {
983 gdbarch_displaced_step_free_closure (displaced_step_gdbarch,
984 displaced_step_closure);
985 displaced_step_closure = NULL;
986 }
987 }
988
989 static void
990 displaced_step_clear_cleanup (void *ignore)
991 {
992 displaced_step_clear ();
993 }
994
995 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
996 void
997 displaced_step_dump_bytes (struct ui_file *file,
998 const gdb_byte *buf,
999 size_t len)
1000 {
1001 int i;
1002
1003 for (i = 0; i < len; i++)
1004 fprintf_unfiltered (file, "%02x ", buf[i]);
1005 fputs_unfiltered ("\n", file);
1006 }
1007
1008 /* Prepare to single-step, using displaced stepping.
1009
1010 Note that we cannot use displaced stepping when we have a signal to
1011 deliver. If we have a signal to deliver and an instruction to step
1012 over, then after the step, there will be no indication from the
1013 target whether the thread entered a signal handler or ignored the
1014 signal and stepped over the instruction successfully --- both cases
1015 result in a simple SIGTRAP. In the first case we mustn't do a
1016 fixup, and in the second case we must --- but we can't tell which.
1017 Comments in the code for 'random signals' in handle_inferior_event
1018 explain how we handle this case instead.
1019
1020 Returns 1 if preparing was successful -- this thread is going to be
1021 stepped now; or 0 if displaced stepping this thread got queued. */
1022 static int
1023 displaced_step_prepare (ptid_t ptid)
1024 {
1025 struct cleanup *old_cleanups, *ignore_cleanups;
1026 struct regcache *regcache = get_thread_regcache (ptid);
1027 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1028 CORE_ADDR original, copy;
1029 ULONGEST len;
1030 struct displaced_step_closure *closure;
1031
1032 /* We should never reach this function if the architecture does not
1033 support displaced stepping. */
1034 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1035
1036 /* For the first cut, we're displaced stepping one thread at a
1037 time. */
1038
1039 if (!ptid_equal (displaced_step_ptid, null_ptid))
1040 {
1041 /* Already waiting for a displaced step to finish. Defer this
1042 request and place in queue. */
1043 struct displaced_step_request *req, *new_req;
1044
1045 if (debug_displaced)
1046 fprintf_unfiltered (gdb_stdlog,
1047 "displaced: defering step of %s\n",
1048 target_pid_to_str (ptid));
1049
1050 new_req = xmalloc (sizeof (*new_req));
1051 new_req->ptid = ptid;
1052 new_req->next = NULL;
1053
1054 if (displaced_step_request_queue)
1055 {
1056 for (req = displaced_step_request_queue;
1057 req && req->next;
1058 req = req->next)
1059 ;
1060 req->next = new_req;
1061 }
1062 else
1063 displaced_step_request_queue = new_req;
1064
1065 return 0;
1066 }
1067 else
1068 {
1069 if (debug_displaced)
1070 fprintf_unfiltered (gdb_stdlog,
1071 "displaced: stepping %s now\n",
1072 target_pid_to_str (ptid));
1073 }
1074
1075 displaced_step_clear ();
1076
1077 old_cleanups = save_inferior_ptid ();
1078 inferior_ptid = ptid;
1079
1080 original = regcache_read_pc (regcache);
1081
1082 copy = gdbarch_displaced_step_location (gdbarch);
1083 len = gdbarch_max_insn_length (gdbarch);
1084
1085 /* Save the original contents of the copy area. */
1086 displaced_step_saved_copy = xmalloc (len);
1087 ignore_cleanups = make_cleanup (free_current_contents,
1088 &displaced_step_saved_copy);
1089 read_memory (copy, displaced_step_saved_copy, len);
1090 if (debug_displaced)
1091 {
1092 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1093 paddress (gdbarch, copy));
1094 displaced_step_dump_bytes (gdb_stdlog, displaced_step_saved_copy, len);
1095 };
1096
1097 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1098 original, copy, regcache);
1099
1100 /* We don't support the fully-simulated case at present. */
1101 gdb_assert (closure);
1102
1103 /* Save the information we need to fix things up if the step
1104 succeeds. */
1105 displaced_step_ptid = ptid;
1106 displaced_step_gdbarch = gdbarch;
1107 displaced_step_closure = closure;
1108 displaced_step_original = original;
1109 displaced_step_copy = copy;
1110
1111 make_cleanup (displaced_step_clear_cleanup, 0);
1112
1113 /* Resume execution at the copy. */
1114 regcache_write_pc (regcache, copy);
1115
1116 discard_cleanups (ignore_cleanups);
1117
1118 do_cleanups (old_cleanups);
1119
1120 if (debug_displaced)
1121 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1122 paddress (gdbarch, copy));
1123
1124 return 1;
1125 }
1126
1127 static void
1128 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1129 {
1130 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1131 inferior_ptid = ptid;
1132 write_memory (memaddr, myaddr, len);
1133 do_cleanups (ptid_cleanup);
1134 }
1135
1136 static void
1137 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1138 {
1139 struct cleanup *old_cleanups;
1140
1141 /* Was this event for the pid we displaced? */
1142 if (ptid_equal (displaced_step_ptid, null_ptid)
1143 || ! ptid_equal (displaced_step_ptid, event_ptid))
1144 return;
1145
1146 old_cleanups = make_cleanup (displaced_step_clear_cleanup, 0);
1147
1148 /* Restore the contents of the copy area. */
1149 {
1150 ULONGEST len = gdbarch_max_insn_length (displaced_step_gdbarch);
1151 write_memory_ptid (displaced_step_ptid, displaced_step_copy,
1152 displaced_step_saved_copy, len);
1153 if (debug_displaced)
1154 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1155 paddress (displaced_step_gdbarch,
1156 displaced_step_copy));
1157 }
1158
1159 /* Did the instruction complete successfully? */
1160 if (signal == TARGET_SIGNAL_TRAP)
1161 {
1162 /* Fix up the resulting state. */
1163 gdbarch_displaced_step_fixup (displaced_step_gdbarch,
1164 displaced_step_closure,
1165 displaced_step_original,
1166 displaced_step_copy,
1167 get_thread_regcache (displaced_step_ptid));
1168 }
1169 else
1170 {
1171 /* Since the instruction didn't complete, all we can do is
1172 relocate the PC. */
1173 struct regcache *regcache = get_thread_regcache (event_ptid);
1174 CORE_ADDR pc = regcache_read_pc (regcache);
1175 pc = displaced_step_original + (pc - displaced_step_copy);
1176 regcache_write_pc (regcache, pc);
1177 }
1178
1179 do_cleanups (old_cleanups);
1180
1181 displaced_step_ptid = null_ptid;
1182
1183 /* Are there any pending displaced stepping requests? If so, run
1184 one now. */
1185 while (displaced_step_request_queue)
1186 {
1187 struct displaced_step_request *head;
1188 ptid_t ptid;
1189 struct regcache *regcache;
1190 struct gdbarch *gdbarch;
1191 CORE_ADDR actual_pc;
1192 struct address_space *aspace;
1193
1194 head = displaced_step_request_queue;
1195 ptid = head->ptid;
1196 displaced_step_request_queue = head->next;
1197 xfree (head);
1198
1199 context_switch (ptid);
1200
1201 regcache = get_thread_regcache (ptid);
1202 actual_pc = regcache_read_pc (regcache);
1203 aspace = get_regcache_aspace (regcache);
1204
1205 if (breakpoint_here_p (aspace, actual_pc))
1206 {
1207 if (debug_displaced)
1208 fprintf_unfiltered (gdb_stdlog,
1209 "displaced: stepping queued %s now\n",
1210 target_pid_to_str (ptid));
1211
1212 displaced_step_prepare (ptid);
1213
1214 gdbarch = get_regcache_arch (regcache);
1215
1216 if (debug_displaced)
1217 {
1218 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1219 gdb_byte buf[4];
1220
1221 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1222 paddress (gdbarch, actual_pc));
1223 read_memory (actual_pc, buf, sizeof (buf));
1224 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1225 }
1226
1227 if (gdbarch_displaced_step_hw_singlestep
1228 (gdbarch, displaced_step_closure))
1229 target_resume (ptid, 1, TARGET_SIGNAL_0);
1230 else
1231 target_resume (ptid, 0, TARGET_SIGNAL_0);
1232
1233 /* Done, we're stepping a thread. */
1234 break;
1235 }
1236 else
1237 {
1238 int step;
1239 struct thread_info *tp = inferior_thread ();
1240
1241 /* The breakpoint we were sitting under has since been
1242 removed. */
1243 tp->trap_expected = 0;
1244
1245 /* Go back to what we were trying to do. */
1246 step = currently_stepping (tp);
1247
1248 if (debug_displaced)
1249 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1250 target_pid_to_str (tp->ptid), step);
1251
1252 target_resume (ptid, step, TARGET_SIGNAL_0);
1253 tp->stop_signal = TARGET_SIGNAL_0;
1254
1255 /* This request was discarded. See if there's any other
1256 thread waiting for its turn. */
1257 }
1258 }
1259 }
1260
1261 /* Update global variables holding ptids to hold NEW_PTID if they were
1262 holding OLD_PTID. */
1263 static void
1264 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1265 {
1266 struct displaced_step_request *it;
1267
1268 if (ptid_equal (inferior_ptid, old_ptid))
1269 inferior_ptid = new_ptid;
1270
1271 if (ptid_equal (singlestep_ptid, old_ptid))
1272 singlestep_ptid = new_ptid;
1273
1274 if (ptid_equal (displaced_step_ptid, old_ptid))
1275 displaced_step_ptid = new_ptid;
1276
1277 if (ptid_equal (deferred_step_ptid, old_ptid))
1278 deferred_step_ptid = new_ptid;
1279
1280 for (it = displaced_step_request_queue; it; it = it->next)
1281 if (ptid_equal (it->ptid, old_ptid))
1282 it->ptid = new_ptid;
1283 }
1284
1285 \f
1286 /* Resuming. */
1287
1288 /* Things to clean up if we QUIT out of resume (). */
1289 static void
1290 resume_cleanups (void *ignore)
1291 {
1292 normal_stop ();
1293 }
1294
1295 static const char schedlock_off[] = "off";
1296 static const char schedlock_on[] = "on";
1297 static const char schedlock_step[] = "step";
1298 static const char *scheduler_enums[] = {
1299 schedlock_off,
1300 schedlock_on,
1301 schedlock_step,
1302 NULL
1303 };
1304 static const char *scheduler_mode = schedlock_off;
1305 static void
1306 show_scheduler_mode (struct ui_file *file, int from_tty,
1307 struct cmd_list_element *c, const char *value)
1308 {
1309 fprintf_filtered (file, _("\
1310 Mode for locking scheduler during execution is \"%s\".\n"),
1311 value);
1312 }
1313
1314 static void
1315 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1316 {
1317 if (!target_can_lock_scheduler)
1318 {
1319 scheduler_mode = schedlock_off;
1320 error (_("Target '%s' cannot support this command."), target_shortname);
1321 }
1322 }
1323
1324 /* True if execution commands resume all threads of all processes by
1325 default; otherwise, resume only threads of the current inferior
1326 process. */
1327 int sched_multi = 0;
1328
1329 /* Try to setup for software single stepping over the specified location.
1330 Return 1 if target_resume() should use hardware single step.
1331
1332 GDBARCH the current gdbarch.
1333 PC the location to step over. */
1334
1335 static int
1336 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1337 {
1338 int hw_step = 1;
1339
1340 if (gdbarch_software_single_step_p (gdbarch)
1341 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1342 {
1343 hw_step = 0;
1344 /* Do not pull these breakpoints until after a `wait' in
1345 `wait_for_inferior' */
1346 singlestep_breakpoints_inserted_p = 1;
1347 singlestep_ptid = inferior_ptid;
1348 singlestep_pc = pc;
1349 }
1350 return hw_step;
1351 }
1352
1353 /* Resume the inferior, but allow a QUIT. This is useful if the user
1354 wants to interrupt some lengthy single-stepping operation
1355 (for child processes, the SIGINT goes to the inferior, and so
1356 we get a SIGINT random_signal, but for remote debugging and perhaps
1357 other targets, that's not true).
1358
1359 STEP nonzero if we should step (zero to continue instead).
1360 SIG is the signal to give the inferior (zero for none). */
1361 void
1362 resume (int step, enum target_signal sig)
1363 {
1364 int should_resume = 1;
1365 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1366 struct regcache *regcache = get_current_regcache ();
1367 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1368 struct thread_info *tp = inferior_thread ();
1369 CORE_ADDR pc = regcache_read_pc (regcache);
1370 struct address_space *aspace = get_regcache_aspace (regcache);
1371
1372 QUIT;
1373
1374 if (debug_infrun)
1375 fprintf_unfiltered (gdb_stdlog,
1376 "infrun: resume (step=%d, signal=%d), "
1377 "trap_expected=%d\n",
1378 step, sig, tp->trap_expected);
1379
1380 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1381 over an instruction that causes a page fault without triggering
1382 a hardware watchpoint. The kernel properly notices that it shouldn't
1383 stop, because the hardware watchpoint is not triggered, but it forgets
1384 the step request and continues the program normally.
1385 Work around the problem by removing hardware watchpoints if a step is
1386 requested, GDB will check for a hardware watchpoint trigger after the
1387 step anyway. */
1388 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1389 remove_hw_watchpoints ();
1390
1391
1392 /* Normally, by the time we reach `resume', the breakpoints are either
1393 removed or inserted, as appropriate. The exception is if we're sitting
1394 at a permanent breakpoint; we need to step over it, but permanent
1395 breakpoints can't be removed. So we have to test for it here. */
1396 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1397 {
1398 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1399 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1400 else
1401 error (_("\
1402 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1403 how to step past a permanent breakpoint on this architecture. Try using\n\
1404 a command like `return' or `jump' to continue execution."));
1405 }
1406
1407 /* If enabled, step over breakpoints by executing a copy of the
1408 instruction at a different address.
1409
1410 We can't use displaced stepping when we have a signal to deliver;
1411 the comments for displaced_step_prepare explain why. The
1412 comments in the handle_inferior event for dealing with 'random
1413 signals' explain what we do instead. */
1414 if (use_displaced_stepping (gdbarch)
1415 && (tp->trap_expected
1416 || (step && gdbarch_software_single_step_p (gdbarch)))
1417 && sig == TARGET_SIGNAL_0)
1418 {
1419 if (!displaced_step_prepare (inferior_ptid))
1420 {
1421 /* Got placed in displaced stepping queue. Will be resumed
1422 later when all the currently queued displaced stepping
1423 requests finish. The thread is not executing at this point,
1424 and the call to set_executing will be made later. But we
1425 need to call set_running here, since from frontend point of view,
1426 the thread is running. */
1427 set_running (inferior_ptid, 1);
1428 discard_cleanups (old_cleanups);
1429 return;
1430 }
1431
1432 step = gdbarch_displaced_step_hw_singlestep
1433 (gdbarch, displaced_step_closure);
1434 }
1435
1436 /* Do we need to do it the hard way, w/temp breakpoints? */
1437 else if (step)
1438 step = maybe_software_singlestep (gdbarch, pc);
1439
1440 if (should_resume)
1441 {
1442 ptid_t resume_ptid;
1443
1444 /* If STEP is set, it's a request to use hardware stepping
1445 facilities. But in that case, we should never
1446 use singlestep breakpoint. */
1447 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1448
1449 /* Decide the set of threads to ask the target to resume. Start
1450 by assuming everything will be resumed, than narrow the set
1451 by applying increasingly restricting conditions. */
1452
1453 /* By default, resume all threads of all processes. */
1454 resume_ptid = RESUME_ALL;
1455
1456 /* Maybe resume only all threads of the current process. */
1457 if (!sched_multi && target_supports_multi_process ())
1458 {
1459 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1460 }
1461
1462 /* Maybe resume a single thread after all. */
1463 if (singlestep_breakpoints_inserted_p
1464 && stepping_past_singlestep_breakpoint)
1465 {
1466 /* The situation here is as follows. In thread T1 we wanted to
1467 single-step. Lacking hardware single-stepping we've
1468 set breakpoint at the PC of the next instruction -- call it
1469 P. After resuming, we've hit that breakpoint in thread T2.
1470 Now we've removed original breakpoint, inserted breakpoint
1471 at P+1, and try to step to advance T2 past breakpoint.
1472 We need to step only T2, as if T1 is allowed to freely run,
1473 it can run past P, and if other threads are allowed to run,
1474 they can hit breakpoint at P+1, and nested hits of single-step
1475 breakpoints is not something we'd want -- that's complicated
1476 to support, and has no value. */
1477 resume_ptid = inferior_ptid;
1478 }
1479 else if ((step || singlestep_breakpoints_inserted_p)
1480 && tp->trap_expected)
1481 {
1482 /* We're allowing a thread to run past a breakpoint it has
1483 hit, by single-stepping the thread with the breakpoint
1484 removed. In which case, we need to single-step only this
1485 thread, and keep others stopped, as they can miss this
1486 breakpoint if allowed to run.
1487
1488 The current code actually removes all breakpoints when
1489 doing this, not just the one being stepped over, so if we
1490 let other threads run, we can actually miss any
1491 breakpoint, not just the one at PC. */
1492 resume_ptid = inferior_ptid;
1493 }
1494 else if (non_stop)
1495 {
1496 /* With non-stop mode on, threads are always handled
1497 individually. */
1498 resume_ptid = inferior_ptid;
1499 }
1500 else if ((scheduler_mode == schedlock_on)
1501 || (scheduler_mode == schedlock_step
1502 && (step || singlestep_breakpoints_inserted_p)))
1503 {
1504 /* User-settable 'scheduler' mode requires solo thread resume. */
1505 resume_ptid = inferior_ptid;
1506 }
1507
1508 if (gdbarch_cannot_step_breakpoint (gdbarch))
1509 {
1510 /* Most targets can step a breakpoint instruction, thus
1511 executing it normally. But if this one cannot, just
1512 continue and we will hit it anyway. */
1513 if (step && breakpoint_inserted_here_p (aspace, pc))
1514 step = 0;
1515 }
1516
1517 if (debug_displaced
1518 && use_displaced_stepping (gdbarch)
1519 && tp->trap_expected)
1520 {
1521 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1522 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1523 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1524 gdb_byte buf[4];
1525
1526 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1527 paddress (resume_gdbarch, actual_pc));
1528 read_memory (actual_pc, buf, sizeof (buf));
1529 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1530 }
1531
1532 /* Install inferior's terminal modes. */
1533 target_terminal_inferior ();
1534
1535 /* Avoid confusing the next resume, if the next stop/resume
1536 happens to apply to another thread. */
1537 tp->stop_signal = TARGET_SIGNAL_0;
1538
1539 target_resume (resume_ptid, step, sig);
1540 }
1541
1542 discard_cleanups (old_cleanups);
1543 }
1544 \f
1545 /* Proceeding. */
1546
1547 /* Clear out all variables saying what to do when inferior is continued.
1548 First do this, then set the ones you want, then call `proceed'. */
1549
1550 static void
1551 clear_proceed_status_thread (struct thread_info *tp)
1552 {
1553 if (debug_infrun)
1554 fprintf_unfiltered (gdb_stdlog,
1555 "infrun: clear_proceed_status_thread (%s)\n",
1556 target_pid_to_str (tp->ptid));
1557
1558 tp->trap_expected = 0;
1559 tp->step_range_start = 0;
1560 tp->step_range_end = 0;
1561 tp->step_frame_id = null_frame_id;
1562 tp->step_stack_frame_id = null_frame_id;
1563 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1564 tp->stop_requested = 0;
1565
1566 tp->stop_step = 0;
1567
1568 tp->proceed_to_finish = 0;
1569
1570 /* Discard any remaining commands or status from previous stop. */
1571 bpstat_clear (&tp->stop_bpstat);
1572 }
1573
1574 static int
1575 clear_proceed_status_callback (struct thread_info *tp, void *data)
1576 {
1577 if (is_exited (tp->ptid))
1578 return 0;
1579
1580 clear_proceed_status_thread (tp);
1581 return 0;
1582 }
1583
1584 void
1585 clear_proceed_status (void)
1586 {
1587 if (!non_stop)
1588 {
1589 /* In all-stop mode, delete the per-thread status of all
1590 threads, even if inferior_ptid is null_ptid, there may be
1591 threads on the list. E.g., we may be launching a new
1592 process, while selecting the executable. */
1593 iterate_over_threads (clear_proceed_status_callback, NULL);
1594 }
1595
1596 if (!ptid_equal (inferior_ptid, null_ptid))
1597 {
1598 struct inferior *inferior;
1599
1600 if (non_stop)
1601 {
1602 /* If in non-stop mode, only delete the per-thread status of
1603 the current thread. */
1604 clear_proceed_status_thread (inferior_thread ());
1605 }
1606
1607 inferior = current_inferior ();
1608 inferior->stop_soon = NO_STOP_QUIETLY;
1609 }
1610
1611 stop_after_trap = 0;
1612
1613 observer_notify_about_to_proceed ();
1614
1615 if (stop_registers)
1616 {
1617 regcache_xfree (stop_registers);
1618 stop_registers = NULL;
1619 }
1620 }
1621
1622 /* Check the current thread against the thread that reported the most recent
1623 event. If a step-over is required return TRUE and set the current thread
1624 to the old thread. Otherwise return FALSE.
1625
1626 This should be suitable for any targets that support threads. */
1627
1628 static int
1629 prepare_to_proceed (int step)
1630 {
1631 ptid_t wait_ptid;
1632 struct target_waitstatus wait_status;
1633 int schedlock_enabled;
1634
1635 /* With non-stop mode on, threads are always handled individually. */
1636 gdb_assert (! non_stop);
1637
1638 /* Get the last target status returned by target_wait(). */
1639 get_last_target_status (&wait_ptid, &wait_status);
1640
1641 /* Make sure we were stopped at a breakpoint. */
1642 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1643 || wait_status.value.sig != TARGET_SIGNAL_TRAP)
1644 {
1645 return 0;
1646 }
1647
1648 schedlock_enabled = (scheduler_mode == schedlock_on
1649 || (scheduler_mode == schedlock_step
1650 && step));
1651
1652 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1653 if (schedlock_enabled)
1654 return 0;
1655
1656 /* Don't switch over if we're about to resume some other process
1657 other than WAIT_PTID's, and schedule-multiple is off. */
1658 if (!sched_multi
1659 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1660 return 0;
1661
1662 /* Switched over from WAIT_PID. */
1663 if (!ptid_equal (wait_ptid, minus_one_ptid)
1664 && !ptid_equal (inferior_ptid, wait_ptid))
1665 {
1666 struct regcache *regcache = get_thread_regcache (wait_ptid);
1667
1668 if (breakpoint_here_p (get_regcache_aspace (regcache),
1669 regcache_read_pc (regcache)))
1670 {
1671 /* If stepping, remember current thread to switch back to. */
1672 if (step)
1673 deferred_step_ptid = inferior_ptid;
1674
1675 /* Switch back to WAIT_PID thread. */
1676 switch_to_thread (wait_ptid);
1677
1678 /* We return 1 to indicate that there is a breakpoint here,
1679 so we need to step over it before continuing to avoid
1680 hitting it straight away. */
1681 return 1;
1682 }
1683 }
1684
1685 return 0;
1686 }
1687
1688 /* Basic routine for continuing the program in various fashions.
1689
1690 ADDR is the address to resume at, or -1 for resume where stopped.
1691 SIGGNAL is the signal to give it, or 0 for none,
1692 or -1 for act according to how it stopped.
1693 STEP is nonzero if should trap after one instruction.
1694 -1 means return after that and print nothing.
1695 You should probably set various step_... variables
1696 before calling here, if you are stepping.
1697
1698 You should call clear_proceed_status before calling proceed. */
1699
1700 void
1701 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1702 {
1703 struct regcache *regcache;
1704 struct gdbarch *gdbarch;
1705 struct thread_info *tp;
1706 CORE_ADDR pc;
1707 struct address_space *aspace;
1708 int oneproc = 0;
1709
1710 /* If we're stopped at a fork/vfork, follow the branch set by the
1711 "set follow-fork-mode" command; otherwise, we'll just proceed
1712 resuming the current thread. */
1713 if (!follow_fork ())
1714 {
1715 /* The target for some reason decided not to resume. */
1716 normal_stop ();
1717 return;
1718 }
1719
1720 regcache = get_current_regcache ();
1721 gdbarch = get_regcache_arch (regcache);
1722 aspace = get_regcache_aspace (regcache);
1723 pc = regcache_read_pc (regcache);
1724
1725 if (step > 0)
1726 step_start_function = find_pc_function (pc);
1727 if (step < 0)
1728 stop_after_trap = 1;
1729
1730 if (addr == (CORE_ADDR) -1)
1731 {
1732 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1733 && execution_direction != EXEC_REVERSE)
1734 /* There is a breakpoint at the address we will resume at,
1735 step one instruction before inserting breakpoints so that
1736 we do not stop right away (and report a second hit at this
1737 breakpoint).
1738
1739 Note, we don't do this in reverse, because we won't
1740 actually be executing the breakpoint insn anyway.
1741 We'll be (un-)executing the previous instruction. */
1742
1743 oneproc = 1;
1744 else if (gdbarch_single_step_through_delay_p (gdbarch)
1745 && gdbarch_single_step_through_delay (gdbarch,
1746 get_current_frame ()))
1747 /* We stepped onto an instruction that needs to be stepped
1748 again before re-inserting the breakpoint, do so. */
1749 oneproc = 1;
1750 }
1751 else
1752 {
1753 regcache_write_pc (regcache, addr);
1754 }
1755
1756 if (debug_infrun)
1757 fprintf_unfiltered (gdb_stdlog,
1758 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1759 paddress (gdbarch, addr), siggnal, step);
1760
1761 if (non_stop)
1762 /* In non-stop, each thread is handled individually. The context
1763 must already be set to the right thread here. */
1764 ;
1765 else
1766 {
1767 /* In a multi-threaded task we may select another thread and
1768 then continue or step.
1769
1770 But if the old thread was stopped at a breakpoint, it will
1771 immediately cause another breakpoint stop without any
1772 execution (i.e. it will report a breakpoint hit incorrectly).
1773 So we must step over it first.
1774
1775 prepare_to_proceed checks the current thread against the
1776 thread that reported the most recent event. If a step-over
1777 is required it returns TRUE and sets the current thread to
1778 the old thread. */
1779 if (prepare_to_proceed (step))
1780 oneproc = 1;
1781 }
1782
1783 /* prepare_to_proceed may change the current thread. */
1784 tp = inferior_thread ();
1785
1786 if (oneproc)
1787 {
1788 tp->trap_expected = 1;
1789 /* If displaced stepping is enabled, we can step over the
1790 breakpoint without hitting it, so leave all breakpoints
1791 inserted. Otherwise we need to disable all breakpoints, step
1792 one instruction, and then re-add them when that step is
1793 finished. */
1794 if (!use_displaced_stepping (gdbarch))
1795 remove_breakpoints ();
1796 }
1797
1798 /* We can insert breakpoints if we're not trying to step over one,
1799 or if we are stepping over one but we're using displaced stepping
1800 to do so. */
1801 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1802 insert_breakpoints ();
1803
1804 if (!non_stop)
1805 {
1806 /* Pass the last stop signal to the thread we're resuming,
1807 irrespective of whether the current thread is the thread that
1808 got the last event or not. This was historically GDB's
1809 behaviour before keeping a stop_signal per thread. */
1810
1811 struct thread_info *last_thread;
1812 ptid_t last_ptid;
1813 struct target_waitstatus last_status;
1814
1815 get_last_target_status (&last_ptid, &last_status);
1816 if (!ptid_equal (inferior_ptid, last_ptid)
1817 && !ptid_equal (last_ptid, null_ptid)
1818 && !ptid_equal (last_ptid, minus_one_ptid))
1819 {
1820 last_thread = find_thread_ptid (last_ptid);
1821 if (last_thread)
1822 {
1823 tp->stop_signal = last_thread->stop_signal;
1824 last_thread->stop_signal = TARGET_SIGNAL_0;
1825 }
1826 }
1827 }
1828
1829 if (siggnal != TARGET_SIGNAL_DEFAULT)
1830 tp->stop_signal = siggnal;
1831 /* If this signal should not be seen by program,
1832 give it zero. Used for debugging signals. */
1833 else if (!signal_program[tp->stop_signal])
1834 tp->stop_signal = TARGET_SIGNAL_0;
1835
1836 annotate_starting ();
1837
1838 /* Make sure that output from GDB appears before output from the
1839 inferior. */
1840 gdb_flush (gdb_stdout);
1841
1842 /* Refresh prev_pc value just prior to resuming. This used to be
1843 done in stop_stepping, however, setting prev_pc there did not handle
1844 scenarios such as inferior function calls or returning from
1845 a function via the return command. In those cases, the prev_pc
1846 value was not set properly for subsequent commands. The prev_pc value
1847 is used to initialize the starting line number in the ecs. With an
1848 invalid value, the gdb next command ends up stopping at the position
1849 represented by the next line table entry past our start position.
1850 On platforms that generate one line table entry per line, this
1851 is not a problem. However, on the ia64, the compiler generates
1852 extraneous line table entries that do not increase the line number.
1853 When we issue the gdb next command on the ia64 after an inferior call
1854 or a return command, we often end up a few instructions forward, still
1855 within the original line we started.
1856
1857 An attempt was made to refresh the prev_pc at the same time the
1858 execution_control_state is initialized (for instance, just before
1859 waiting for an inferior event). But this approach did not work
1860 because of platforms that use ptrace, where the pc register cannot
1861 be read unless the inferior is stopped. At that point, we are not
1862 guaranteed the inferior is stopped and so the regcache_read_pc() call
1863 can fail. Setting the prev_pc value here ensures the value is updated
1864 correctly when the inferior is stopped. */
1865 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1866
1867 /* Fill in with reasonable starting values. */
1868 init_thread_stepping_state (tp);
1869
1870 /* Reset to normal state. */
1871 init_infwait_state ();
1872
1873 /* Resume inferior. */
1874 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1875
1876 /* Wait for it to stop (if not standalone)
1877 and in any case decode why it stopped, and act accordingly. */
1878 /* Do this only if we are not using the event loop, or if the target
1879 does not support asynchronous execution. */
1880 if (!target_can_async_p ())
1881 {
1882 wait_for_inferior (0);
1883 normal_stop ();
1884 }
1885 }
1886 \f
1887
1888 /* Start remote-debugging of a machine over a serial link. */
1889
1890 void
1891 start_remote (int from_tty)
1892 {
1893 struct inferior *inferior;
1894 init_wait_for_inferior ();
1895
1896 inferior = current_inferior ();
1897 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1898
1899 /* Always go on waiting for the target, regardless of the mode. */
1900 /* FIXME: cagney/1999-09-23: At present it isn't possible to
1901 indicate to wait_for_inferior that a target should timeout if
1902 nothing is returned (instead of just blocking). Because of this,
1903 targets expecting an immediate response need to, internally, set
1904 things up so that the target_wait() is forced to eventually
1905 timeout. */
1906 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
1907 differentiate to its caller what the state of the target is after
1908 the initial open has been performed. Here we're assuming that
1909 the target has stopped. It should be possible to eventually have
1910 target_open() return to the caller an indication that the target
1911 is currently running and GDB state should be set to the same as
1912 for an async run. */
1913 wait_for_inferior (0);
1914
1915 /* Now that the inferior has stopped, do any bookkeeping like
1916 loading shared libraries. We want to do this before normal_stop,
1917 so that the displayed frame is up to date. */
1918 post_create_inferior (&current_target, from_tty);
1919
1920 normal_stop ();
1921 }
1922
1923 /* Initialize static vars when a new inferior begins. */
1924
1925 void
1926 init_wait_for_inferior (void)
1927 {
1928 /* These are meaningless until the first time through wait_for_inferior. */
1929
1930 breakpoint_init_inferior (inf_starting);
1931
1932 clear_proceed_status ();
1933
1934 stepping_past_singlestep_breakpoint = 0;
1935 deferred_step_ptid = null_ptid;
1936
1937 target_last_wait_ptid = minus_one_ptid;
1938
1939 previous_inferior_ptid = null_ptid;
1940 init_infwait_state ();
1941
1942 displaced_step_clear ();
1943
1944 /* Discard any skipped inlined frames. */
1945 clear_inline_frame_state (minus_one_ptid);
1946 }
1947
1948 \f
1949 /* This enum encodes possible reasons for doing a target_wait, so that
1950 wfi can call target_wait in one place. (Ultimately the call will be
1951 moved out of the infinite loop entirely.) */
1952
1953 enum infwait_states
1954 {
1955 infwait_normal_state,
1956 infwait_thread_hop_state,
1957 infwait_step_watch_state,
1958 infwait_nonstep_watch_state
1959 };
1960
1961 /* Why did the inferior stop? Used to print the appropriate messages
1962 to the interface from within handle_inferior_event(). */
1963 enum inferior_stop_reason
1964 {
1965 /* Step, next, nexti, stepi finished. */
1966 END_STEPPING_RANGE,
1967 /* Inferior terminated by signal. */
1968 SIGNAL_EXITED,
1969 /* Inferior exited. */
1970 EXITED,
1971 /* Inferior received signal, and user asked to be notified. */
1972 SIGNAL_RECEIVED,
1973 /* Reverse execution -- target ran out of history info. */
1974 NO_HISTORY
1975 };
1976
1977 /* The PTID we'll do a target_wait on.*/
1978 ptid_t waiton_ptid;
1979
1980 /* Current inferior wait state. */
1981 enum infwait_states infwait_state;
1982
1983 /* Data to be passed around while handling an event. This data is
1984 discarded between events. */
1985 struct execution_control_state
1986 {
1987 ptid_t ptid;
1988 /* The thread that got the event, if this was a thread event; NULL
1989 otherwise. */
1990 struct thread_info *event_thread;
1991
1992 struct target_waitstatus ws;
1993 int random_signal;
1994 CORE_ADDR stop_func_start;
1995 CORE_ADDR stop_func_end;
1996 char *stop_func_name;
1997 int new_thread_event;
1998 int wait_some_more;
1999 };
2000
2001 static void handle_inferior_event (struct execution_control_state *ecs);
2002
2003 static void handle_step_into_function (struct gdbarch *gdbarch,
2004 struct execution_control_state *ecs);
2005 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2006 struct execution_control_state *ecs);
2007 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2008 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2009 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2010 struct symtab_and_line sr_sal,
2011 struct frame_id sr_id);
2012 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2013
2014 static void stop_stepping (struct execution_control_state *ecs);
2015 static void prepare_to_wait (struct execution_control_state *ecs);
2016 static void keep_going (struct execution_control_state *ecs);
2017 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2018 int stop_info);
2019
2020 /* Callback for iterate over threads. If the thread is stopped, but
2021 the user/frontend doesn't know about that yet, go through
2022 normal_stop, as if the thread had just stopped now. ARG points at
2023 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2024 ptid_is_pid(PTID) is true, applies to all threads of the process
2025 pointed at by PTID. Otherwise, apply only to the thread pointed by
2026 PTID. */
2027
2028 static int
2029 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2030 {
2031 ptid_t ptid = * (ptid_t *) arg;
2032
2033 if ((ptid_equal (info->ptid, ptid)
2034 || ptid_equal (minus_one_ptid, ptid)
2035 || (ptid_is_pid (ptid)
2036 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2037 && is_running (info->ptid)
2038 && !is_executing (info->ptid))
2039 {
2040 struct cleanup *old_chain;
2041 struct execution_control_state ecss;
2042 struct execution_control_state *ecs = &ecss;
2043
2044 memset (ecs, 0, sizeof (*ecs));
2045
2046 old_chain = make_cleanup_restore_current_thread ();
2047
2048 switch_to_thread (info->ptid);
2049
2050 /* Go through handle_inferior_event/normal_stop, so we always
2051 have consistent output as if the stop event had been
2052 reported. */
2053 ecs->ptid = info->ptid;
2054 ecs->event_thread = find_thread_ptid (info->ptid);
2055 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2056 ecs->ws.value.sig = TARGET_SIGNAL_0;
2057
2058 handle_inferior_event (ecs);
2059
2060 if (!ecs->wait_some_more)
2061 {
2062 struct thread_info *tp;
2063
2064 normal_stop ();
2065
2066 /* Finish off the continuations. The continations
2067 themselves are responsible for realising the thread
2068 didn't finish what it was supposed to do. */
2069 tp = inferior_thread ();
2070 do_all_intermediate_continuations_thread (tp);
2071 do_all_continuations_thread (tp);
2072 }
2073
2074 do_cleanups (old_chain);
2075 }
2076
2077 return 0;
2078 }
2079
2080 /* This function is attached as a "thread_stop_requested" observer.
2081 Cleanup local state that assumed the PTID was to be resumed, and
2082 report the stop to the frontend. */
2083
2084 static void
2085 infrun_thread_stop_requested (ptid_t ptid)
2086 {
2087 struct displaced_step_request *it, *next, *prev = NULL;
2088
2089 /* PTID was requested to stop. Remove it from the displaced
2090 stepping queue, so we don't try to resume it automatically. */
2091 for (it = displaced_step_request_queue; it; it = next)
2092 {
2093 next = it->next;
2094
2095 if (ptid_equal (it->ptid, ptid)
2096 || ptid_equal (minus_one_ptid, ptid)
2097 || (ptid_is_pid (ptid)
2098 && ptid_get_pid (ptid) == ptid_get_pid (it->ptid)))
2099 {
2100 if (displaced_step_request_queue == it)
2101 displaced_step_request_queue = it->next;
2102 else
2103 prev->next = it->next;
2104
2105 xfree (it);
2106 }
2107 else
2108 prev = it;
2109 }
2110
2111 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2112 }
2113
2114 static void
2115 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2116 {
2117 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2118 nullify_last_target_wait_ptid ();
2119 }
2120
2121 /* Callback for iterate_over_threads. */
2122
2123 static int
2124 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2125 {
2126 if (is_exited (info->ptid))
2127 return 0;
2128
2129 delete_step_resume_breakpoint (info);
2130 return 0;
2131 }
2132
2133 /* In all-stop, delete the step resume breakpoint of any thread that
2134 had one. In non-stop, delete the step resume breakpoint of the
2135 thread that just stopped. */
2136
2137 static void
2138 delete_step_thread_step_resume_breakpoint (void)
2139 {
2140 if (!target_has_execution
2141 || ptid_equal (inferior_ptid, null_ptid))
2142 /* If the inferior has exited, we have already deleted the step
2143 resume breakpoints out of GDB's lists. */
2144 return;
2145
2146 if (non_stop)
2147 {
2148 /* If in non-stop mode, only delete the step-resume or
2149 longjmp-resume breakpoint of the thread that just stopped
2150 stepping. */
2151 struct thread_info *tp = inferior_thread ();
2152 delete_step_resume_breakpoint (tp);
2153 }
2154 else
2155 /* In all-stop mode, delete all step-resume and longjmp-resume
2156 breakpoints of any thread that had them. */
2157 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2158 }
2159
2160 /* A cleanup wrapper. */
2161
2162 static void
2163 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2164 {
2165 delete_step_thread_step_resume_breakpoint ();
2166 }
2167
2168 /* Pretty print the results of target_wait, for debugging purposes. */
2169
2170 static void
2171 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2172 const struct target_waitstatus *ws)
2173 {
2174 char *status_string = target_waitstatus_to_string (ws);
2175 struct ui_file *tmp_stream = mem_fileopen ();
2176 char *text;
2177
2178 /* The text is split over several lines because it was getting too long.
2179 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2180 output as a unit; we want only one timestamp printed if debug_timestamp
2181 is set. */
2182
2183 fprintf_unfiltered (tmp_stream,
2184 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2185 if (PIDGET (waiton_ptid) != -1)
2186 fprintf_unfiltered (tmp_stream,
2187 " [%s]", target_pid_to_str (waiton_ptid));
2188 fprintf_unfiltered (tmp_stream, ", status) =\n");
2189 fprintf_unfiltered (tmp_stream,
2190 "infrun: %d [%s],\n",
2191 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2192 fprintf_unfiltered (tmp_stream,
2193 "infrun: %s\n",
2194 status_string);
2195
2196 text = ui_file_xstrdup (tmp_stream, NULL);
2197
2198 /* This uses %s in part to handle %'s in the text, but also to avoid
2199 a gcc error: the format attribute requires a string literal. */
2200 fprintf_unfiltered (gdb_stdlog, "%s", text);
2201
2202 xfree (status_string);
2203 xfree (text);
2204 ui_file_delete (tmp_stream);
2205 }
2206
2207 /* Wait for control to return from inferior to debugger.
2208
2209 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2210 as if they were SIGTRAP signals. This can be useful during
2211 the startup sequence on some targets such as HP/UX, where
2212 we receive an EXEC event instead of the expected SIGTRAP.
2213
2214 If inferior gets a signal, we may decide to start it up again
2215 instead of returning. That is why there is a loop in this function.
2216 When this function actually returns it means the inferior
2217 should be left stopped and GDB should read more commands. */
2218
2219 void
2220 wait_for_inferior (int treat_exec_as_sigtrap)
2221 {
2222 struct cleanup *old_cleanups;
2223 struct execution_control_state ecss;
2224 struct execution_control_state *ecs;
2225
2226 if (debug_infrun)
2227 fprintf_unfiltered
2228 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2229 treat_exec_as_sigtrap);
2230
2231 old_cleanups =
2232 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2233
2234 ecs = &ecss;
2235 memset (ecs, 0, sizeof (*ecs));
2236
2237 /* We'll update this if & when we switch to a new thread. */
2238 previous_inferior_ptid = inferior_ptid;
2239
2240 while (1)
2241 {
2242 struct cleanup *old_chain;
2243
2244 /* We have to invalidate the registers BEFORE calling target_wait
2245 because they can be loaded from the target while in target_wait.
2246 This makes remote debugging a bit more efficient for those
2247 targets that provide critical registers as part of their normal
2248 status mechanism. */
2249
2250 overlay_cache_invalid = 1;
2251 registers_changed ();
2252
2253 if (deprecated_target_wait_hook)
2254 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2255 else
2256 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2257
2258 if (debug_infrun)
2259 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2260
2261 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2262 {
2263 xfree (ecs->ws.value.execd_pathname);
2264 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2265 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2266 }
2267
2268 /* If an error happens while handling the event, propagate GDB's
2269 knowledge of the executing state to the frontend/user running
2270 state. */
2271 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2272
2273 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2274 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2275 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2276
2277 /* Now figure out what to do with the result of the result. */
2278 handle_inferior_event (ecs);
2279
2280 /* No error, don't finish the state yet. */
2281 discard_cleanups (old_chain);
2282
2283 if (!ecs->wait_some_more)
2284 break;
2285 }
2286
2287 do_cleanups (old_cleanups);
2288 }
2289
2290 /* Asynchronous version of wait_for_inferior. It is called by the
2291 event loop whenever a change of state is detected on the file
2292 descriptor corresponding to the target. It can be called more than
2293 once to complete a single execution command. In such cases we need
2294 to keep the state in a global variable ECSS. If it is the last time
2295 that this function is called for a single execution command, then
2296 report to the user that the inferior has stopped, and do the
2297 necessary cleanups. */
2298
2299 void
2300 fetch_inferior_event (void *client_data)
2301 {
2302 struct execution_control_state ecss;
2303 struct execution_control_state *ecs = &ecss;
2304 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2305 struct cleanup *ts_old_chain;
2306 int was_sync = sync_execution;
2307
2308 memset (ecs, 0, sizeof (*ecs));
2309
2310 /* We'll update this if & when we switch to a new thread. */
2311 previous_inferior_ptid = inferior_ptid;
2312
2313 if (non_stop)
2314 /* In non-stop mode, the user/frontend should not notice a thread
2315 switch due to internal events. Make sure we reverse to the
2316 user selected thread and frame after handling the event and
2317 running any breakpoint commands. */
2318 make_cleanup_restore_current_thread ();
2319
2320 /* We have to invalidate the registers BEFORE calling target_wait
2321 because they can be loaded from the target while in target_wait.
2322 This makes remote debugging a bit more efficient for those
2323 targets that provide critical registers as part of their normal
2324 status mechanism. */
2325
2326 overlay_cache_invalid = 1;
2327 registers_changed ();
2328
2329 if (deprecated_target_wait_hook)
2330 ecs->ptid =
2331 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2332 else
2333 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2334
2335 if (debug_infrun)
2336 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2337
2338 if (non_stop
2339 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2340 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2341 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2342 /* In non-stop mode, each thread is handled individually. Switch
2343 early, so the global state is set correctly for this
2344 thread. */
2345 context_switch (ecs->ptid);
2346
2347 /* If an error happens while handling the event, propagate GDB's
2348 knowledge of the executing state to the frontend/user running
2349 state. */
2350 if (!non_stop)
2351 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2352 else
2353 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2354
2355 /* Now figure out what to do with the result of the result. */
2356 handle_inferior_event (ecs);
2357
2358 if (!ecs->wait_some_more)
2359 {
2360 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2361
2362 delete_step_thread_step_resume_breakpoint ();
2363
2364 /* We may not find an inferior if this was a process exit. */
2365 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2366 normal_stop ();
2367
2368 if (target_has_execution
2369 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2370 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2371 && ecs->event_thread->step_multi
2372 && ecs->event_thread->stop_step)
2373 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2374 else
2375 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2376 }
2377
2378 /* No error, don't finish the thread states yet. */
2379 discard_cleanups (ts_old_chain);
2380
2381 /* Revert thread and frame. */
2382 do_cleanups (old_chain);
2383
2384 /* If the inferior was in sync execution mode, and now isn't,
2385 restore the prompt. */
2386 if (was_sync && !sync_execution)
2387 display_gdb_prompt (0);
2388 }
2389
2390 /* Record the frame and location we're currently stepping through. */
2391 void
2392 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2393 {
2394 struct thread_info *tp = inferior_thread ();
2395
2396 tp->step_frame_id = get_frame_id (frame);
2397 tp->step_stack_frame_id = get_stack_frame_id (frame);
2398
2399 tp->current_symtab = sal.symtab;
2400 tp->current_line = sal.line;
2401 }
2402
2403 /* Clear context switchable stepping state. */
2404
2405 void
2406 init_thread_stepping_state (struct thread_info *tss)
2407 {
2408 tss->stepping_over_breakpoint = 0;
2409 tss->step_after_step_resume_breakpoint = 0;
2410 tss->stepping_through_solib_after_catch = 0;
2411 tss->stepping_through_solib_catchpoints = NULL;
2412 }
2413
2414 /* Return the cached copy of the last pid/waitstatus returned by
2415 target_wait()/deprecated_target_wait_hook(). The data is actually
2416 cached by handle_inferior_event(), which gets called immediately
2417 after target_wait()/deprecated_target_wait_hook(). */
2418
2419 void
2420 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2421 {
2422 *ptidp = target_last_wait_ptid;
2423 *status = target_last_waitstatus;
2424 }
2425
2426 void
2427 nullify_last_target_wait_ptid (void)
2428 {
2429 target_last_wait_ptid = minus_one_ptid;
2430 }
2431
2432 /* Switch thread contexts. */
2433
2434 static void
2435 context_switch (ptid_t ptid)
2436 {
2437 if (debug_infrun)
2438 {
2439 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2440 target_pid_to_str (inferior_ptid));
2441 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2442 target_pid_to_str (ptid));
2443 }
2444
2445 switch_to_thread (ptid);
2446 }
2447
2448 static void
2449 adjust_pc_after_break (struct execution_control_state *ecs)
2450 {
2451 struct regcache *regcache;
2452 struct gdbarch *gdbarch;
2453 struct address_space *aspace;
2454 CORE_ADDR breakpoint_pc;
2455
2456 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2457 we aren't, just return.
2458
2459 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2460 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2461 implemented by software breakpoints should be handled through the normal
2462 breakpoint layer.
2463
2464 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2465 different signals (SIGILL or SIGEMT for instance), but it is less
2466 clear where the PC is pointing afterwards. It may not match
2467 gdbarch_decr_pc_after_break. I don't know any specific target that
2468 generates these signals at breakpoints (the code has been in GDB since at
2469 least 1992) so I can not guess how to handle them here.
2470
2471 In earlier versions of GDB, a target with
2472 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2473 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2474 target with both of these set in GDB history, and it seems unlikely to be
2475 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2476
2477 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2478 return;
2479
2480 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2481 return;
2482
2483 /* In reverse execution, when a breakpoint is hit, the instruction
2484 under it has already been de-executed. The reported PC always
2485 points at the breakpoint address, so adjusting it further would
2486 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2487 architecture:
2488
2489 B1 0x08000000 : INSN1
2490 B2 0x08000001 : INSN2
2491 0x08000002 : INSN3
2492 PC -> 0x08000003 : INSN4
2493
2494 Say you're stopped at 0x08000003 as above. Reverse continuing
2495 from that point should hit B2 as below. Reading the PC when the
2496 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2497 been de-executed already.
2498
2499 B1 0x08000000 : INSN1
2500 B2 PC -> 0x08000001 : INSN2
2501 0x08000002 : INSN3
2502 0x08000003 : INSN4
2503
2504 We can't apply the same logic as for forward execution, because
2505 we would wrongly adjust the PC to 0x08000000, since there's a
2506 breakpoint at PC - 1. We'd then report a hit on B1, although
2507 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2508 behaviour. */
2509 if (execution_direction == EXEC_REVERSE)
2510 return;
2511
2512 /* If this target does not decrement the PC after breakpoints, then
2513 we have nothing to do. */
2514 regcache = get_thread_regcache (ecs->ptid);
2515 gdbarch = get_regcache_arch (regcache);
2516 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2517 return;
2518
2519 aspace = get_regcache_aspace (regcache);
2520
2521 /* Find the location where (if we've hit a breakpoint) the
2522 breakpoint would be. */
2523 breakpoint_pc = regcache_read_pc (regcache)
2524 - gdbarch_decr_pc_after_break (gdbarch);
2525
2526 /* Check whether there actually is a software breakpoint inserted at
2527 that location.
2528
2529 If in non-stop mode, a race condition is possible where we've
2530 removed a breakpoint, but stop events for that breakpoint were
2531 already queued and arrive later. To suppress those spurious
2532 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2533 and retire them after a number of stop events are reported. */
2534 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2535 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2536 {
2537 struct cleanup *old_cleanups = NULL;
2538 if (RECORD_IS_USED)
2539 old_cleanups = record_gdb_operation_disable_set ();
2540
2541 /* When using hardware single-step, a SIGTRAP is reported for both
2542 a completed single-step and a software breakpoint. Need to
2543 differentiate between the two, as the latter needs adjusting
2544 but the former does not.
2545
2546 The SIGTRAP can be due to a completed hardware single-step only if
2547 - we didn't insert software single-step breakpoints
2548 - the thread to be examined is still the current thread
2549 - this thread is currently being stepped
2550
2551 If any of these events did not occur, we must have stopped due
2552 to hitting a software breakpoint, and have to back up to the
2553 breakpoint address.
2554
2555 As a special case, we could have hardware single-stepped a
2556 software breakpoint. In this case (prev_pc == breakpoint_pc),
2557 we also need to back up to the breakpoint address. */
2558
2559 if (singlestep_breakpoints_inserted_p
2560 || !ptid_equal (ecs->ptid, inferior_ptid)
2561 || !currently_stepping (ecs->event_thread)
2562 || ecs->event_thread->prev_pc == breakpoint_pc)
2563 regcache_write_pc (regcache, breakpoint_pc);
2564
2565 if (RECORD_IS_USED)
2566 do_cleanups (old_cleanups);
2567 }
2568 }
2569
2570 void
2571 init_infwait_state (void)
2572 {
2573 waiton_ptid = pid_to_ptid (-1);
2574 infwait_state = infwait_normal_state;
2575 }
2576
2577 void
2578 error_is_running (void)
2579 {
2580 error (_("\
2581 Cannot execute this command while the selected thread is running."));
2582 }
2583
2584 void
2585 ensure_not_running (void)
2586 {
2587 if (is_running (inferior_ptid))
2588 error_is_running ();
2589 }
2590
2591 static int
2592 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2593 {
2594 for (frame = get_prev_frame (frame);
2595 frame != NULL;
2596 frame = get_prev_frame (frame))
2597 {
2598 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2599 return 1;
2600 if (get_frame_type (frame) != INLINE_FRAME)
2601 break;
2602 }
2603
2604 return 0;
2605 }
2606
2607 /* Auxiliary function that handles syscall entry/return events.
2608 It returns 1 if the inferior should keep going (and GDB
2609 should ignore the event), or 0 if the event deserves to be
2610 processed. */
2611
2612 static int
2613 handle_syscall_event (struct execution_control_state *ecs)
2614 {
2615 struct regcache *regcache;
2616 struct gdbarch *gdbarch;
2617 int syscall_number;
2618
2619 if (!ptid_equal (ecs->ptid, inferior_ptid))
2620 context_switch (ecs->ptid);
2621
2622 regcache = get_thread_regcache (ecs->ptid);
2623 gdbarch = get_regcache_arch (regcache);
2624 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2625 stop_pc = regcache_read_pc (regcache);
2626
2627 target_last_waitstatus.value.syscall_number = syscall_number;
2628
2629 if (catch_syscall_enabled () > 0
2630 && catching_syscall_number (syscall_number) > 0)
2631 {
2632 if (debug_infrun)
2633 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2634 syscall_number);
2635
2636 ecs->event_thread->stop_bpstat
2637 = bpstat_stop_status (get_regcache_aspace (regcache),
2638 stop_pc, ecs->ptid);
2639 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2640
2641 if (!ecs->random_signal)
2642 {
2643 /* Catchpoint hit. */
2644 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2645 return 0;
2646 }
2647 }
2648
2649 /* If no catchpoint triggered for this, then keep going. */
2650 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2651 keep_going (ecs);
2652 return 1;
2653 }
2654
2655 /* Given an execution control state that has been freshly filled in
2656 by an event from the inferior, figure out what it means and take
2657 appropriate action. */
2658
2659 static void
2660 handle_inferior_event (struct execution_control_state *ecs)
2661 {
2662 struct frame_info *frame;
2663 struct gdbarch *gdbarch;
2664 int sw_single_step_trap_p = 0;
2665 int stopped_by_watchpoint;
2666 int stepped_after_stopped_by_watchpoint = 0;
2667 struct symtab_and_line stop_pc_sal;
2668 enum stop_kind stop_soon;
2669
2670 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2671 {
2672 /* We had an event in the inferior, but we are not interested in
2673 handling it at this level. The lower layers have already
2674 done what needs to be done, if anything.
2675
2676 One of the possible circumstances for this is when the
2677 inferior produces output for the console. The inferior has
2678 not stopped, and we are ignoring the event. Another possible
2679 circumstance is any event which the lower level knows will be
2680 reported multiple times without an intervening resume. */
2681 if (debug_infrun)
2682 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2683 prepare_to_wait (ecs);
2684 return;
2685 }
2686
2687 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2688 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2689 {
2690 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2691 gdb_assert (inf);
2692 stop_soon = inf->stop_soon;
2693 }
2694 else
2695 stop_soon = NO_STOP_QUIETLY;
2696
2697 /* Cache the last pid/waitstatus. */
2698 target_last_wait_ptid = ecs->ptid;
2699 target_last_waitstatus = ecs->ws;
2700
2701 /* Always clear state belonging to the previous time we stopped. */
2702 stop_stack_dummy = 0;
2703
2704 /* If it's a new process, add it to the thread database */
2705
2706 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2707 && !ptid_equal (ecs->ptid, minus_one_ptid)
2708 && !in_thread_list (ecs->ptid));
2709
2710 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2711 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2712 add_thread (ecs->ptid);
2713
2714 ecs->event_thread = find_thread_ptid (ecs->ptid);
2715
2716 /* Dependent on valid ECS->EVENT_THREAD. */
2717 adjust_pc_after_break (ecs);
2718
2719 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2720 reinit_frame_cache ();
2721
2722 breakpoint_retire_moribund ();
2723
2724 /* Mark the non-executing threads accordingly. In all-stop, all
2725 threads of all processes are stopped when we get any event
2726 reported. In non-stop mode, only the event thread stops. If
2727 we're handling a process exit in non-stop mode, there's nothing
2728 to do, as threads of the dead process are gone, and threads of
2729 any other process were left running. */
2730 if (!non_stop)
2731 set_executing (minus_one_ptid, 0);
2732 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2733 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2734 set_executing (inferior_ptid, 0);
2735
2736 switch (infwait_state)
2737 {
2738 case infwait_thread_hop_state:
2739 if (debug_infrun)
2740 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2741 break;
2742
2743 case infwait_normal_state:
2744 if (debug_infrun)
2745 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2746 break;
2747
2748 case infwait_step_watch_state:
2749 if (debug_infrun)
2750 fprintf_unfiltered (gdb_stdlog,
2751 "infrun: infwait_step_watch_state\n");
2752
2753 stepped_after_stopped_by_watchpoint = 1;
2754 break;
2755
2756 case infwait_nonstep_watch_state:
2757 if (debug_infrun)
2758 fprintf_unfiltered (gdb_stdlog,
2759 "infrun: infwait_nonstep_watch_state\n");
2760 insert_breakpoints ();
2761
2762 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2763 handle things like signals arriving and other things happening
2764 in combination correctly? */
2765 stepped_after_stopped_by_watchpoint = 1;
2766 break;
2767
2768 default:
2769 internal_error (__FILE__, __LINE__, _("bad switch"));
2770 }
2771
2772 infwait_state = infwait_normal_state;
2773 waiton_ptid = pid_to_ptid (-1);
2774
2775 switch (ecs->ws.kind)
2776 {
2777 case TARGET_WAITKIND_LOADED:
2778 if (debug_infrun)
2779 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2780 /* Ignore gracefully during startup of the inferior, as it might
2781 be the shell which has just loaded some objects, otherwise
2782 add the symbols for the newly loaded objects. Also ignore at
2783 the beginning of an attach or remote session; we will query
2784 the full list of libraries once the connection is
2785 established. */
2786 if (stop_soon == NO_STOP_QUIETLY)
2787 {
2788 /* Check for any newly added shared libraries if we're
2789 supposed to be adding them automatically. Switch
2790 terminal for any messages produced by
2791 breakpoint_re_set. */
2792 target_terminal_ours_for_output ();
2793 /* NOTE: cagney/2003-11-25: Make certain that the target
2794 stack's section table is kept up-to-date. Architectures,
2795 (e.g., PPC64), use the section table to perform
2796 operations such as address => section name and hence
2797 require the table to contain all sections (including
2798 those found in shared libraries). */
2799 #ifdef SOLIB_ADD
2800 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
2801 #else
2802 solib_add (NULL, 0, &current_target, auto_solib_add);
2803 #endif
2804 target_terminal_inferior ();
2805
2806 /* If requested, stop when the dynamic linker notifies
2807 gdb of events. This allows the user to get control
2808 and place breakpoints in initializer routines for
2809 dynamically loaded objects (among other things). */
2810 if (stop_on_solib_events)
2811 {
2812 /* Make sure we print "Stopped due to solib-event" in
2813 normal_stop. */
2814 stop_print_frame = 1;
2815
2816 stop_stepping (ecs);
2817 return;
2818 }
2819
2820 /* NOTE drow/2007-05-11: This might be a good place to check
2821 for "catch load". */
2822 }
2823
2824 /* If we are skipping through a shell, or through shared library
2825 loading that we aren't interested in, resume the program. If
2826 we're running the program normally, also resume. But stop if
2827 we're attaching or setting up a remote connection. */
2828 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
2829 {
2830 /* Loading of shared libraries might have changed breakpoint
2831 addresses. Make sure new breakpoints are inserted. */
2832 if (stop_soon == NO_STOP_QUIETLY
2833 && !breakpoints_always_inserted_mode ())
2834 insert_breakpoints ();
2835 resume (0, TARGET_SIGNAL_0);
2836 prepare_to_wait (ecs);
2837 return;
2838 }
2839
2840 break;
2841
2842 case TARGET_WAITKIND_SPURIOUS:
2843 if (debug_infrun)
2844 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
2845 resume (0, TARGET_SIGNAL_0);
2846 prepare_to_wait (ecs);
2847 return;
2848
2849 case TARGET_WAITKIND_EXITED:
2850 if (debug_infrun)
2851 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
2852 inferior_ptid = ecs->ptid;
2853 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
2854 set_current_program_space (current_inferior ()->pspace);
2855 handle_vfork_child_exec_or_exit (0);
2856 target_terminal_ours (); /* Must do this before mourn anyway */
2857 print_stop_reason (EXITED, ecs->ws.value.integer);
2858
2859 /* Record the exit code in the convenience variable $_exitcode, so
2860 that the user can inspect this again later. */
2861 set_internalvar_integer (lookup_internalvar ("_exitcode"),
2862 (LONGEST) ecs->ws.value.integer);
2863 gdb_flush (gdb_stdout);
2864 target_mourn_inferior ();
2865 singlestep_breakpoints_inserted_p = 0;
2866 stop_print_frame = 0;
2867 stop_stepping (ecs);
2868 return;
2869
2870 case TARGET_WAITKIND_SIGNALLED:
2871 if (debug_infrun)
2872 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
2873 inferior_ptid = ecs->ptid;
2874 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
2875 set_current_program_space (current_inferior ()->pspace);
2876 handle_vfork_child_exec_or_exit (0);
2877 stop_print_frame = 0;
2878 target_terminal_ours (); /* Must do this before mourn anyway */
2879
2880 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
2881 reach here unless the inferior is dead. However, for years
2882 target_kill() was called here, which hints that fatal signals aren't
2883 really fatal on some systems. If that's true, then some changes
2884 may be needed. */
2885 target_mourn_inferior ();
2886
2887 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
2888 singlestep_breakpoints_inserted_p = 0;
2889 stop_stepping (ecs);
2890 return;
2891
2892 /* The following are the only cases in which we keep going;
2893 the above cases end in a continue or goto. */
2894 case TARGET_WAITKIND_FORKED:
2895 case TARGET_WAITKIND_VFORKED:
2896 if (debug_infrun)
2897 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
2898
2899 if (!ptid_equal (ecs->ptid, inferior_ptid))
2900 {
2901 context_switch (ecs->ptid);
2902 reinit_frame_cache ();
2903 }
2904
2905 /* Immediately detach breakpoints from the child before there's
2906 any chance of letting the user delete breakpoints from the
2907 breakpoint lists. If we don't do this early, it's easy to
2908 leave left over traps in the child, vis: "break foo; catch
2909 fork; c; <fork>; del; c; <child calls foo>". We only follow
2910 the fork on the last `continue', and by that time the
2911 breakpoint at "foo" is long gone from the breakpoint table.
2912 If we vforked, then we don't need to unpatch here, since both
2913 parent and child are sharing the same memory pages; we'll
2914 need to unpatch at follow/detach time instead to be certain
2915 that new breakpoints added between catchpoint hit time and
2916 vfork follow are detached. */
2917 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
2918 {
2919 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
2920
2921 /* This won't actually modify the breakpoint list, but will
2922 physically remove the breakpoints from the child. */
2923 detach_breakpoints (child_pid);
2924 }
2925
2926 /* In case the event is caught by a catchpoint, remember that
2927 the event is to be followed at the next resume of the thread,
2928 and not immediately. */
2929 ecs->event_thread->pending_follow = ecs->ws;
2930
2931 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2932
2933 ecs->event_thread->stop_bpstat
2934 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
2935 stop_pc, ecs->ptid);
2936
2937 /* Note that we're interested in knowing the bpstat actually
2938 causes a stop, not just if it may explain the signal.
2939 Software watchpoints, for example, always appear in the
2940 bpstat. */
2941 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
2942
2943 /* If no catchpoint triggered for this, then keep going. */
2944 if (ecs->random_signal)
2945 {
2946 ptid_t parent;
2947 ptid_t child;
2948 int should_resume;
2949 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
2950
2951 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2952
2953 should_resume = follow_fork ();
2954
2955 parent = ecs->ptid;
2956 child = ecs->ws.value.related_pid;
2957
2958 /* In non-stop mode, also resume the other branch. */
2959 if (non_stop && !detach_fork)
2960 {
2961 if (follow_child)
2962 switch_to_thread (parent);
2963 else
2964 switch_to_thread (child);
2965
2966 ecs->event_thread = inferior_thread ();
2967 ecs->ptid = inferior_ptid;
2968 keep_going (ecs);
2969 }
2970
2971 if (follow_child)
2972 switch_to_thread (child);
2973 else
2974 switch_to_thread (parent);
2975
2976 ecs->event_thread = inferior_thread ();
2977 ecs->ptid = inferior_ptid;
2978
2979 if (should_resume)
2980 keep_going (ecs);
2981 else
2982 stop_stepping (ecs);
2983 return;
2984 }
2985 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2986 goto process_event_stop_test;
2987
2988 case TARGET_WAITKIND_VFORK_DONE:
2989 /* Done with the shared memory region. Re-insert breakpoints in
2990 the parent, and keep going. */
2991
2992 if (debug_infrun)
2993 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
2994
2995 if (!ptid_equal (ecs->ptid, inferior_ptid))
2996 context_switch (ecs->ptid);
2997
2998 current_inferior ()->waiting_for_vfork_done = 0;
2999 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3000 /* This also takes care of reinserting breakpoints in the
3001 previously locked inferior. */
3002 keep_going (ecs);
3003 return;
3004
3005 case TARGET_WAITKIND_EXECD:
3006 if (debug_infrun)
3007 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3008
3009 if (!ptid_equal (ecs->ptid, inferior_ptid))
3010 {
3011 context_switch (ecs->ptid);
3012 reinit_frame_cache ();
3013 }
3014
3015 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3016
3017 /* Do whatever is necessary to the parent branch of the vfork. */
3018 handle_vfork_child_exec_or_exit (1);
3019
3020 /* This causes the eventpoints and symbol table to be reset.
3021 Must do this now, before trying to determine whether to
3022 stop. */
3023 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3024
3025 ecs->event_thread->stop_bpstat
3026 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3027 stop_pc, ecs->ptid);
3028 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3029
3030 /* Note that this may be referenced from inside
3031 bpstat_stop_status above, through inferior_has_execd. */
3032 xfree (ecs->ws.value.execd_pathname);
3033 ecs->ws.value.execd_pathname = NULL;
3034
3035 /* If no catchpoint triggered for this, then keep going. */
3036 if (ecs->random_signal)
3037 {
3038 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3039 keep_going (ecs);
3040 return;
3041 }
3042 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3043 goto process_event_stop_test;
3044
3045 /* Be careful not to try to gather much state about a thread
3046 that's in a syscall. It's frequently a losing proposition. */
3047 case TARGET_WAITKIND_SYSCALL_ENTRY:
3048 if (debug_infrun)
3049 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3050 /* Getting the current syscall number */
3051 if (handle_syscall_event (ecs) != 0)
3052 return;
3053 goto process_event_stop_test;
3054
3055 /* Before examining the threads further, step this thread to
3056 get it entirely out of the syscall. (We get notice of the
3057 event when the thread is just on the verge of exiting a
3058 syscall. Stepping one instruction seems to get it back
3059 into user code.) */
3060 case TARGET_WAITKIND_SYSCALL_RETURN:
3061 if (debug_infrun)
3062 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3063 if (handle_syscall_event (ecs) != 0)
3064 return;
3065 goto process_event_stop_test;
3066
3067 case TARGET_WAITKIND_STOPPED:
3068 if (debug_infrun)
3069 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3070 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3071 break;
3072
3073 case TARGET_WAITKIND_NO_HISTORY:
3074 /* Reverse execution: target ran out of history info. */
3075 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3076 print_stop_reason (NO_HISTORY, 0);
3077 stop_stepping (ecs);
3078 return;
3079 }
3080
3081 if (ecs->new_thread_event)
3082 {
3083 if (non_stop)
3084 /* Non-stop assumes that the target handles adding new threads
3085 to the thread list. */
3086 internal_error (__FILE__, __LINE__, "\
3087 targets should add new threads to the thread list themselves in non-stop mode.");
3088
3089 /* We may want to consider not doing a resume here in order to
3090 give the user a chance to play with the new thread. It might
3091 be good to make that a user-settable option. */
3092
3093 /* At this point, all threads are stopped (happens automatically
3094 in either the OS or the native code). Therefore we need to
3095 continue all threads in order to make progress. */
3096
3097 if (!ptid_equal (ecs->ptid, inferior_ptid))
3098 context_switch (ecs->ptid);
3099 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3100 prepare_to_wait (ecs);
3101 return;
3102 }
3103
3104 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3105 {
3106 /* Do we need to clean up the state of a thread that has
3107 completed a displaced single-step? (Doing so usually affects
3108 the PC, so do it here, before we set stop_pc.) */
3109 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3110
3111 /* If we either finished a single-step or hit a breakpoint, but
3112 the user wanted this thread to be stopped, pretend we got a
3113 SIG0 (generic unsignaled stop). */
3114
3115 if (ecs->event_thread->stop_requested
3116 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3117 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3118 }
3119
3120 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3121
3122 if (debug_infrun)
3123 {
3124 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3125 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3126 struct cleanup *old_chain = save_inferior_ptid ();
3127
3128 inferior_ptid = ecs->ptid;
3129
3130 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3131 paddress (gdbarch, stop_pc));
3132 if (target_stopped_by_watchpoint ())
3133 {
3134 CORE_ADDR addr;
3135 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3136
3137 if (target_stopped_data_address (&current_target, &addr))
3138 fprintf_unfiltered (gdb_stdlog,
3139 "infrun: stopped data address = %s\n",
3140 paddress (gdbarch, addr));
3141 else
3142 fprintf_unfiltered (gdb_stdlog,
3143 "infrun: (no data address available)\n");
3144 }
3145
3146 do_cleanups (old_chain);
3147 }
3148
3149 if (stepping_past_singlestep_breakpoint)
3150 {
3151 gdb_assert (singlestep_breakpoints_inserted_p);
3152 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3153 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3154
3155 stepping_past_singlestep_breakpoint = 0;
3156
3157 /* We've either finished single-stepping past the single-step
3158 breakpoint, or stopped for some other reason. It would be nice if
3159 we could tell, but we can't reliably. */
3160 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3161 {
3162 if (debug_infrun)
3163 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3164 /* Pull the single step breakpoints out of the target. */
3165 remove_single_step_breakpoints ();
3166 singlestep_breakpoints_inserted_p = 0;
3167
3168 ecs->random_signal = 0;
3169 ecs->event_thread->trap_expected = 0;
3170
3171 context_switch (saved_singlestep_ptid);
3172 if (deprecated_context_hook)
3173 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3174
3175 resume (1, TARGET_SIGNAL_0);
3176 prepare_to_wait (ecs);
3177 return;
3178 }
3179 }
3180
3181 if (!ptid_equal (deferred_step_ptid, null_ptid))
3182 {
3183 /* In non-stop mode, there's never a deferred_step_ptid set. */
3184 gdb_assert (!non_stop);
3185
3186 /* If we stopped for some other reason than single-stepping, ignore
3187 the fact that we were supposed to switch back. */
3188 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3189 {
3190 if (debug_infrun)
3191 fprintf_unfiltered (gdb_stdlog,
3192 "infrun: handling deferred step\n");
3193
3194 /* Pull the single step breakpoints out of the target. */
3195 if (singlestep_breakpoints_inserted_p)
3196 {
3197 remove_single_step_breakpoints ();
3198 singlestep_breakpoints_inserted_p = 0;
3199 }
3200
3201 /* Note: We do not call context_switch at this point, as the
3202 context is already set up for stepping the original thread. */
3203 switch_to_thread (deferred_step_ptid);
3204 deferred_step_ptid = null_ptid;
3205 /* Suppress spurious "Switching to ..." message. */
3206 previous_inferior_ptid = inferior_ptid;
3207
3208 resume (1, TARGET_SIGNAL_0);
3209 prepare_to_wait (ecs);
3210 return;
3211 }
3212
3213 deferred_step_ptid = null_ptid;
3214 }
3215
3216 /* See if a thread hit a thread-specific breakpoint that was meant for
3217 another thread. If so, then step that thread past the breakpoint,
3218 and continue it. */
3219
3220 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3221 {
3222 int thread_hop_needed = 0;
3223 struct address_space *aspace =
3224 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3225
3226 /* Check if a regular breakpoint has been hit before checking
3227 for a potential single step breakpoint. Otherwise, GDB will
3228 not see this breakpoint hit when stepping onto breakpoints. */
3229 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3230 {
3231 ecs->random_signal = 0;
3232 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3233 thread_hop_needed = 1;
3234 }
3235 else if (singlestep_breakpoints_inserted_p)
3236 {
3237 /* We have not context switched yet, so this should be true
3238 no matter which thread hit the singlestep breakpoint. */
3239 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3240 if (debug_infrun)
3241 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3242 "trap for %s\n",
3243 target_pid_to_str (ecs->ptid));
3244
3245 ecs->random_signal = 0;
3246 /* The call to in_thread_list is necessary because PTIDs sometimes
3247 change when we go from single-threaded to multi-threaded. If
3248 the singlestep_ptid is still in the list, assume that it is
3249 really different from ecs->ptid. */
3250 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3251 && in_thread_list (singlestep_ptid))
3252 {
3253 /* If the PC of the thread we were trying to single-step
3254 has changed, discard this event (which we were going
3255 to ignore anyway), and pretend we saw that thread
3256 trap. This prevents us continuously moving the
3257 single-step breakpoint forward, one instruction at a
3258 time. If the PC has changed, then the thread we were
3259 trying to single-step has trapped or been signalled,
3260 but the event has not been reported to GDB yet.
3261
3262 There might be some cases where this loses signal
3263 information, if a signal has arrived at exactly the
3264 same time that the PC changed, but this is the best
3265 we can do with the information available. Perhaps we
3266 should arrange to report all events for all threads
3267 when they stop, or to re-poll the remote looking for
3268 this particular thread (i.e. temporarily enable
3269 schedlock). */
3270
3271 CORE_ADDR new_singlestep_pc
3272 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3273
3274 if (new_singlestep_pc != singlestep_pc)
3275 {
3276 enum target_signal stop_signal;
3277
3278 if (debug_infrun)
3279 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3280 " but expected thread advanced also\n");
3281
3282 /* The current context still belongs to
3283 singlestep_ptid. Don't swap here, since that's
3284 the context we want to use. Just fudge our
3285 state and continue. */
3286 stop_signal = ecs->event_thread->stop_signal;
3287 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3288 ecs->ptid = singlestep_ptid;
3289 ecs->event_thread = find_thread_ptid (ecs->ptid);
3290 ecs->event_thread->stop_signal = stop_signal;
3291 stop_pc = new_singlestep_pc;
3292 }
3293 else
3294 {
3295 if (debug_infrun)
3296 fprintf_unfiltered (gdb_stdlog,
3297 "infrun: unexpected thread\n");
3298
3299 thread_hop_needed = 1;
3300 stepping_past_singlestep_breakpoint = 1;
3301 saved_singlestep_ptid = singlestep_ptid;
3302 }
3303 }
3304 }
3305
3306 if (thread_hop_needed)
3307 {
3308 struct regcache *thread_regcache;
3309 int remove_status = 0;
3310
3311 if (debug_infrun)
3312 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3313
3314 /* Switch context before touching inferior memory, the
3315 previous thread may have exited. */
3316 if (!ptid_equal (inferior_ptid, ecs->ptid))
3317 context_switch (ecs->ptid);
3318
3319 /* Saw a breakpoint, but it was hit by the wrong thread.
3320 Just continue. */
3321
3322 if (singlestep_breakpoints_inserted_p)
3323 {
3324 /* Pull the single step breakpoints out of the target. */
3325 remove_single_step_breakpoints ();
3326 singlestep_breakpoints_inserted_p = 0;
3327 }
3328
3329 /* If the arch can displace step, don't remove the
3330 breakpoints. */
3331 thread_regcache = get_thread_regcache (ecs->ptid);
3332 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3333 remove_status = remove_breakpoints ();
3334
3335 /* Did we fail to remove breakpoints? If so, try
3336 to set the PC past the bp. (There's at least
3337 one situation in which we can fail to remove
3338 the bp's: On HP-UX's that use ttrace, we can't
3339 change the address space of a vforking child
3340 process until the child exits (well, okay, not
3341 then either :-) or execs. */
3342 if (remove_status != 0)
3343 error (_("Cannot step over breakpoint hit in wrong thread"));
3344 else
3345 { /* Single step */
3346 if (!non_stop)
3347 {
3348 /* Only need to require the next event from this
3349 thread in all-stop mode. */
3350 waiton_ptid = ecs->ptid;
3351 infwait_state = infwait_thread_hop_state;
3352 }
3353
3354 ecs->event_thread->stepping_over_breakpoint = 1;
3355 keep_going (ecs);
3356 return;
3357 }
3358 }
3359 else if (singlestep_breakpoints_inserted_p)
3360 {
3361 sw_single_step_trap_p = 1;
3362 ecs->random_signal = 0;
3363 }
3364 }
3365 else
3366 ecs->random_signal = 1;
3367
3368 /* See if something interesting happened to the non-current thread. If
3369 so, then switch to that thread. */
3370 if (!ptid_equal (ecs->ptid, inferior_ptid))
3371 {
3372 if (debug_infrun)
3373 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3374
3375 context_switch (ecs->ptid);
3376
3377 if (deprecated_context_hook)
3378 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3379 }
3380
3381 /* At this point, get hold of the now-current thread's frame. */
3382 frame = get_current_frame ();
3383 gdbarch = get_frame_arch (frame);
3384
3385 if (singlestep_breakpoints_inserted_p)
3386 {
3387 /* Pull the single step breakpoints out of the target. */
3388 remove_single_step_breakpoints ();
3389 singlestep_breakpoints_inserted_p = 0;
3390 }
3391
3392 if (stepped_after_stopped_by_watchpoint)
3393 stopped_by_watchpoint = 0;
3394 else
3395 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3396
3397 /* If necessary, step over this watchpoint. We'll be back to display
3398 it in a moment. */
3399 if (stopped_by_watchpoint
3400 && (target_have_steppable_watchpoint
3401 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3402 {
3403 /* At this point, we are stopped at an instruction which has
3404 attempted to write to a piece of memory under control of
3405 a watchpoint. The instruction hasn't actually executed
3406 yet. If we were to evaluate the watchpoint expression
3407 now, we would get the old value, and therefore no change
3408 would seem to have occurred.
3409
3410 In order to make watchpoints work `right', we really need
3411 to complete the memory write, and then evaluate the
3412 watchpoint expression. We do this by single-stepping the
3413 target.
3414
3415 It may not be necessary to disable the watchpoint to stop over
3416 it. For example, the PA can (with some kernel cooperation)
3417 single step over a watchpoint without disabling the watchpoint.
3418
3419 It is far more common to need to disable a watchpoint to step
3420 the inferior over it. If we have non-steppable watchpoints,
3421 we must disable the current watchpoint; it's simplest to
3422 disable all watchpoints and breakpoints. */
3423 int hw_step = 1;
3424
3425 if (!target_have_steppable_watchpoint)
3426 remove_breakpoints ();
3427 /* Single step */
3428 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3429 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3430 waiton_ptid = ecs->ptid;
3431 if (target_have_steppable_watchpoint)
3432 infwait_state = infwait_step_watch_state;
3433 else
3434 infwait_state = infwait_nonstep_watch_state;
3435 prepare_to_wait (ecs);
3436 return;
3437 }
3438
3439 ecs->stop_func_start = 0;
3440 ecs->stop_func_end = 0;
3441 ecs->stop_func_name = 0;
3442 /* Don't care about return value; stop_func_start and stop_func_name
3443 will both be 0 if it doesn't work. */
3444 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3445 &ecs->stop_func_start, &ecs->stop_func_end);
3446 ecs->stop_func_start
3447 += gdbarch_deprecated_function_start_offset (gdbarch);
3448 ecs->event_thread->stepping_over_breakpoint = 0;
3449 bpstat_clear (&ecs->event_thread->stop_bpstat);
3450 ecs->event_thread->stop_step = 0;
3451 stop_print_frame = 1;
3452 ecs->random_signal = 0;
3453 stopped_by_random_signal = 0;
3454
3455 /* Hide inlined functions starting here, unless we just performed stepi or
3456 nexti. After stepi and nexti, always show the innermost frame (not any
3457 inline function call sites). */
3458 if (ecs->event_thread->step_range_end != 1)
3459 skip_inline_frames (ecs->ptid);
3460
3461 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3462 && ecs->event_thread->trap_expected
3463 && gdbarch_single_step_through_delay_p (gdbarch)
3464 && currently_stepping (ecs->event_thread))
3465 {
3466 /* We're trying to step off a breakpoint. Turns out that we're
3467 also on an instruction that needs to be stepped multiple
3468 times before it's been fully executing. E.g., architectures
3469 with a delay slot. It needs to be stepped twice, once for
3470 the instruction and once for the delay slot. */
3471 int step_through_delay
3472 = gdbarch_single_step_through_delay (gdbarch, frame);
3473 if (debug_infrun && step_through_delay)
3474 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3475 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3476 {
3477 /* The user issued a continue when stopped at a breakpoint.
3478 Set up for another trap and get out of here. */
3479 ecs->event_thread->stepping_over_breakpoint = 1;
3480 keep_going (ecs);
3481 return;
3482 }
3483 else if (step_through_delay)
3484 {
3485 /* The user issued a step when stopped at a breakpoint.
3486 Maybe we should stop, maybe we should not - the delay
3487 slot *might* correspond to a line of source. In any
3488 case, don't decide that here, just set
3489 ecs->stepping_over_breakpoint, making sure we
3490 single-step again before breakpoints are re-inserted. */
3491 ecs->event_thread->stepping_over_breakpoint = 1;
3492 }
3493 }
3494
3495 /* Look at the cause of the stop, and decide what to do.
3496 The alternatives are:
3497 1) stop_stepping and return; to really stop and return to the debugger,
3498 2) keep_going and return to start up again
3499 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3500 3) set ecs->random_signal to 1, and the decision between 1 and 2
3501 will be made according to the signal handling tables. */
3502
3503 /* First, distinguish signals caused by the debugger from signals
3504 that have to do with the program's own actions. Note that
3505 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3506 on the operating system version. Here we detect when a SIGILL or
3507 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3508 something similar for SIGSEGV, since a SIGSEGV will be generated
3509 when we're trying to execute a breakpoint instruction on a
3510 non-executable stack. This happens for call dummy breakpoints
3511 for architectures like SPARC that place call dummies on the
3512 stack.
3513
3514 If we're doing a displaced step past a breakpoint, then the
3515 breakpoint is always inserted at the original instruction;
3516 non-standard signals can't be explained by the breakpoint. */
3517 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3518 || (! ecs->event_thread->trap_expected
3519 && breakpoint_inserted_here_p (get_regcache_aspace (get_current_regcache ()),
3520 stop_pc)
3521 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_ILL
3522 || ecs->event_thread->stop_signal == TARGET_SIGNAL_SEGV
3523 || ecs->event_thread->stop_signal == TARGET_SIGNAL_EMT))
3524 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3525 || stop_soon == STOP_QUIETLY_REMOTE)
3526 {
3527 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3528 {
3529 if (debug_infrun)
3530 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3531 stop_print_frame = 0;
3532 stop_stepping (ecs);
3533 return;
3534 }
3535
3536 /* This is originated from start_remote(), start_inferior() and
3537 shared libraries hook functions. */
3538 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3539 {
3540 if (debug_infrun)
3541 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3542 stop_stepping (ecs);
3543 return;
3544 }
3545
3546 /* This originates from attach_command(). We need to overwrite
3547 the stop_signal here, because some kernels don't ignore a
3548 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3549 See more comments in inferior.h. On the other hand, if we
3550 get a non-SIGSTOP, report it to the user - assume the backend
3551 will handle the SIGSTOP if it should show up later.
3552
3553 Also consider that the attach is complete when we see a
3554 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3555 target extended-remote report it instead of a SIGSTOP
3556 (e.g. gdbserver). We already rely on SIGTRAP being our
3557 signal, so this is no exception.
3558
3559 Also consider that the attach is complete when we see a
3560 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3561 the target to stop all threads of the inferior, in case the
3562 low level attach operation doesn't stop them implicitly. If
3563 they weren't stopped implicitly, then the stub will report a
3564 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3565 other than GDB's request. */
3566 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3567 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3568 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3569 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3570 {
3571 stop_stepping (ecs);
3572 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3573 return;
3574 }
3575
3576 /* See if there is a breakpoint at the current PC. */
3577 ecs->event_thread->stop_bpstat
3578 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3579 stop_pc, ecs->ptid);
3580
3581 /* Following in case break condition called a
3582 function. */
3583 stop_print_frame = 1;
3584
3585 /* This is where we handle "moribund" watchpoints. Unlike
3586 software breakpoints traps, hardware watchpoint traps are
3587 always distinguishable from random traps. If no high-level
3588 watchpoint is associated with the reported stop data address
3589 anymore, then the bpstat does not explain the signal ---
3590 simply make sure to ignore it if `stopped_by_watchpoint' is
3591 set. */
3592
3593 if (debug_infrun
3594 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3595 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3596 && stopped_by_watchpoint)
3597 fprintf_unfiltered (gdb_stdlog, "\
3598 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3599
3600 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3601 at one stage in the past included checks for an inferior
3602 function call's call dummy's return breakpoint. The original
3603 comment, that went with the test, read:
3604
3605 ``End of a stack dummy. Some systems (e.g. Sony news) give
3606 another signal besides SIGTRAP, so check here as well as
3607 above.''
3608
3609 If someone ever tries to get call dummys on a
3610 non-executable stack to work (where the target would stop
3611 with something like a SIGSEGV), then those tests might need
3612 to be re-instated. Given, however, that the tests were only
3613 enabled when momentary breakpoints were not being used, I
3614 suspect that it won't be the case.
3615
3616 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3617 be necessary for call dummies on a non-executable stack on
3618 SPARC. */
3619
3620 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3621 ecs->random_signal
3622 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3623 || stopped_by_watchpoint
3624 || ecs->event_thread->trap_expected
3625 || (ecs->event_thread->step_range_end
3626 && ecs->event_thread->step_resume_breakpoint == NULL));
3627 else
3628 {
3629 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3630 if (!ecs->random_signal)
3631 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3632 }
3633 }
3634
3635 /* When we reach this point, we've pretty much decided
3636 that the reason for stopping must've been a random
3637 (unexpected) signal. */
3638
3639 else
3640 ecs->random_signal = 1;
3641
3642 process_event_stop_test:
3643
3644 /* Re-fetch current thread's frame in case we did a
3645 "goto process_event_stop_test" above. */
3646 frame = get_current_frame ();
3647 gdbarch = get_frame_arch (frame);
3648
3649 /* For the program's own signals, act according to
3650 the signal handling tables. */
3651
3652 if (ecs->random_signal)
3653 {
3654 /* Signal not for debugging purposes. */
3655 int printed = 0;
3656
3657 if (debug_infrun)
3658 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3659 ecs->event_thread->stop_signal);
3660
3661 stopped_by_random_signal = 1;
3662
3663 if (signal_print[ecs->event_thread->stop_signal])
3664 {
3665 printed = 1;
3666 target_terminal_ours_for_output ();
3667 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3668 }
3669 /* Always stop on signals if we're either just gaining control
3670 of the program, or the user explicitly requested this thread
3671 to remain stopped. */
3672 if (stop_soon != NO_STOP_QUIETLY
3673 || ecs->event_thread->stop_requested
3674 || signal_stop_state (ecs->event_thread->stop_signal))
3675 {
3676 stop_stepping (ecs);
3677 return;
3678 }
3679 /* If not going to stop, give terminal back
3680 if we took it away. */
3681 else if (printed)
3682 target_terminal_inferior ();
3683
3684 /* Clear the signal if it should not be passed. */
3685 if (signal_program[ecs->event_thread->stop_signal] == 0)
3686 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3687
3688 if (ecs->event_thread->prev_pc == stop_pc
3689 && ecs->event_thread->trap_expected
3690 && ecs->event_thread->step_resume_breakpoint == NULL)
3691 {
3692 /* We were just starting a new sequence, attempting to
3693 single-step off of a breakpoint and expecting a SIGTRAP.
3694 Instead this signal arrives. This signal will take us out
3695 of the stepping range so GDB needs to remember to, when
3696 the signal handler returns, resume stepping off that
3697 breakpoint. */
3698 /* To simplify things, "continue" is forced to use the same
3699 code paths as single-step - set a breakpoint at the
3700 signal return address and then, once hit, step off that
3701 breakpoint. */
3702 if (debug_infrun)
3703 fprintf_unfiltered (gdb_stdlog,
3704 "infrun: signal arrived while stepping over "
3705 "breakpoint\n");
3706
3707 insert_step_resume_breakpoint_at_frame (frame);
3708 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3709 keep_going (ecs);
3710 return;
3711 }
3712
3713 if (ecs->event_thread->step_range_end != 0
3714 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3715 && (ecs->event_thread->step_range_start <= stop_pc
3716 && stop_pc < ecs->event_thread->step_range_end)
3717 && frame_id_eq (get_stack_frame_id (frame),
3718 ecs->event_thread->step_stack_frame_id)
3719 && ecs->event_thread->step_resume_breakpoint == NULL)
3720 {
3721 /* The inferior is about to take a signal that will take it
3722 out of the single step range. Set a breakpoint at the
3723 current PC (which is presumably where the signal handler
3724 will eventually return) and then allow the inferior to
3725 run free.
3726
3727 Note that this is only needed for a signal delivered
3728 while in the single-step range. Nested signals aren't a
3729 problem as they eventually all return. */
3730 if (debug_infrun)
3731 fprintf_unfiltered (gdb_stdlog,
3732 "infrun: signal may take us out of "
3733 "single-step range\n");
3734
3735 insert_step_resume_breakpoint_at_frame (frame);
3736 keep_going (ecs);
3737 return;
3738 }
3739
3740 /* Note: step_resume_breakpoint may be non-NULL. This occures
3741 when either there's a nested signal, or when there's a
3742 pending signal enabled just as the signal handler returns
3743 (leaving the inferior at the step-resume-breakpoint without
3744 actually executing it). Either way continue until the
3745 breakpoint is really hit. */
3746 keep_going (ecs);
3747 return;
3748 }
3749
3750 /* Handle cases caused by hitting a breakpoint. */
3751 {
3752 CORE_ADDR jmp_buf_pc;
3753 struct bpstat_what what;
3754
3755 what = bpstat_what (ecs->event_thread->stop_bpstat);
3756
3757 if (what.call_dummy)
3758 {
3759 stop_stack_dummy = 1;
3760 }
3761
3762 switch (what.main_action)
3763 {
3764 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3765 /* If we hit the breakpoint at longjmp while stepping, we
3766 install a momentary breakpoint at the target of the
3767 jmp_buf. */
3768
3769 if (debug_infrun)
3770 fprintf_unfiltered (gdb_stdlog,
3771 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3772
3773 ecs->event_thread->stepping_over_breakpoint = 1;
3774
3775 if (!gdbarch_get_longjmp_target_p (gdbarch)
3776 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3777 {
3778 if (debug_infrun)
3779 fprintf_unfiltered (gdb_stdlog, "\
3780 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3781 keep_going (ecs);
3782 return;
3783 }
3784
3785 /* We're going to replace the current step-resume breakpoint
3786 with a longjmp-resume breakpoint. */
3787 delete_step_resume_breakpoint (ecs->event_thread);
3788
3789 /* Insert a breakpoint at resume address. */
3790 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3791
3792 keep_going (ecs);
3793 return;
3794
3795 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3796 if (debug_infrun)
3797 fprintf_unfiltered (gdb_stdlog,
3798 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3799
3800 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3801 delete_step_resume_breakpoint (ecs->event_thread);
3802
3803 ecs->event_thread->stop_step = 1;
3804 print_stop_reason (END_STEPPING_RANGE, 0);
3805 stop_stepping (ecs);
3806 return;
3807
3808 case BPSTAT_WHAT_SINGLE:
3809 if (debug_infrun)
3810 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
3811 ecs->event_thread->stepping_over_breakpoint = 1;
3812 /* Still need to check other stuff, at least the case
3813 where we are stepping and step out of the right range. */
3814 break;
3815
3816 case BPSTAT_WHAT_STOP_NOISY:
3817 if (debug_infrun)
3818 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
3819 stop_print_frame = 1;
3820
3821 /* We are about to nuke the step_resume_breakpointt via the
3822 cleanup chain, so no need to worry about it here. */
3823
3824 stop_stepping (ecs);
3825 return;
3826
3827 case BPSTAT_WHAT_STOP_SILENT:
3828 if (debug_infrun)
3829 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
3830 stop_print_frame = 0;
3831
3832 /* We are about to nuke the step_resume_breakpoin via the
3833 cleanup chain, so no need to worry about it here. */
3834
3835 stop_stepping (ecs);
3836 return;
3837
3838 case BPSTAT_WHAT_STEP_RESUME:
3839 if (debug_infrun)
3840 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
3841
3842 delete_step_resume_breakpoint (ecs->event_thread);
3843 if (ecs->event_thread->step_after_step_resume_breakpoint)
3844 {
3845 /* Back when the step-resume breakpoint was inserted, we
3846 were trying to single-step off a breakpoint. Go back
3847 to doing that. */
3848 ecs->event_thread->step_after_step_resume_breakpoint = 0;
3849 ecs->event_thread->stepping_over_breakpoint = 1;
3850 keep_going (ecs);
3851 return;
3852 }
3853 if (stop_pc == ecs->stop_func_start
3854 && execution_direction == EXEC_REVERSE)
3855 {
3856 /* We are stepping over a function call in reverse, and
3857 just hit the step-resume breakpoint at the start
3858 address of the function. Go back to single-stepping,
3859 which should take us back to the function call. */
3860 ecs->event_thread->stepping_over_breakpoint = 1;
3861 keep_going (ecs);
3862 return;
3863 }
3864 break;
3865
3866 case BPSTAT_WHAT_CHECK_SHLIBS:
3867 {
3868 if (debug_infrun)
3869 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
3870
3871 /* Check for any newly added shared libraries if we're
3872 supposed to be adding them automatically. Switch
3873 terminal for any messages produced by
3874 breakpoint_re_set. */
3875 target_terminal_ours_for_output ();
3876 /* NOTE: cagney/2003-11-25: Make certain that the target
3877 stack's section table is kept up-to-date. Architectures,
3878 (e.g., PPC64), use the section table to perform
3879 operations such as address => section name and hence
3880 require the table to contain all sections (including
3881 those found in shared libraries). */
3882 #ifdef SOLIB_ADD
3883 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3884 #else
3885 solib_add (NULL, 0, &current_target, auto_solib_add);
3886 #endif
3887 target_terminal_inferior ();
3888
3889 /* If requested, stop when the dynamic linker notifies
3890 gdb of events. This allows the user to get control
3891 and place breakpoints in initializer routines for
3892 dynamically loaded objects (among other things). */
3893 if (stop_on_solib_events || stop_stack_dummy)
3894 {
3895 stop_stepping (ecs);
3896 return;
3897 }
3898 else
3899 {
3900 /* We want to step over this breakpoint, then keep going. */
3901 ecs->event_thread->stepping_over_breakpoint = 1;
3902 break;
3903 }
3904 }
3905 break;
3906
3907 case BPSTAT_WHAT_CHECK_JIT:
3908 if (debug_infrun)
3909 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
3910
3911 /* Switch terminal for any messages produced by breakpoint_re_set. */
3912 target_terminal_ours_for_output ();
3913
3914 jit_event_handler (gdbarch);
3915
3916 target_terminal_inferior ();
3917
3918 /* We want to step over this breakpoint, then keep going. */
3919 ecs->event_thread->stepping_over_breakpoint = 1;
3920
3921 break;
3922
3923 case BPSTAT_WHAT_LAST:
3924 /* Not a real code, but listed here to shut up gcc -Wall. */
3925
3926 case BPSTAT_WHAT_KEEP_CHECKING:
3927 break;
3928 }
3929 }
3930
3931 /* We come here if we hit a breakpoint but should not
3932 stop for it. Possibly we also were stepping
3933 and should stop for that. So fall through and
3934 test for stepping. But, if not stepping,
3935 do not stop. */
3936
3937 /* In all-stop mode, if we're currently stepping but have stopped in
3938 some other thread, we need to switch back to the stepped thread. */
3939 if (!non_stop)
3940 {
3941 struct thread_info *tp;
3942 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
3943 ecs->event_thread);
3944 if (tp)
3945 {
3946 /* However, if the current thread is blocked on some internal
3947 breakpoint, and we simply need to step over that breakpoint
3948 to get it going again, do that first. */
3949 if ((ecs->event_thread->trap_expected
3950 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
3951 || ecs->event_thread->stepping_over_breakpoint)
3952 {
3953 keep_going (ecs);
3954 return;
3955 }
3956
3957 /* If the stepping thread exited, then don't try to switch
3958 back and resume it, which could fail in several different
3959 ways depending on the target. Instead, just keep going.
3960
3961 We can find a stepping dead thread in the thread list in
3962 two cases:
3963
3964 - The target supports thread exit events, and when the
3965 target tries to delete the thread from the thread list,
3966 inferior_ptid pointed at the exiting thread. In such
3967 case, calling delete_thread does not really remove the
3968 thread from the list; instead, the thread is left listed,
3969 with 'exited' state.
3970
3971 - The target's debug interface does not support thread
3972 exit events, and so we have no idea whatsoever if the
3973 previously stepping thread is still alive. For that
3974 reason, we need to synchronously query the target
3975 now. */
3976 if (is_exited (tp->ptid)
3977 || !target_thread_alive (tp->ptid))
3978 {
3979 if (debug_infrun)
3980 fprintf_unfiltered (gdb_stdlog, "\
3981 infrun: not switching back to stepped thread, it has vanished\n");
3982
3983 delete_thread (tp->ptid);
3984 keep_going (ecs);
3985 return;
3986 }
3987
3988 /* Otherwise, we no longer expect a trap in the current thread.
3989 Clear the trap_expected flag before switching back -- this is
3990 what keep_going would do as well, if we called it. */
3991 ecs->event_thread->trap_expected = 0;
3992
3993 if (debug_infrun)
3994 fprintf_unfiltered (gdb_stdlog,
3995 "infrun: switching back to stepped thread\n");
3996
3997 ecs->event_thread = tp;
3998 ecs->ptid = tp->ptid;
3999 context_switch (ecs->ptid);
4000 keep_going (ecs);
4001 return;
4002 }
4003 }
4004
4005 /* Are we stepping to get the inferior out of the dynamic linker's
4006 hook (and possibly the dld itself) after catching a shlib
4007 event? */
4008 if (ecs->event_thread->stepping_through_solib_after_catch)
4009 {
4010 #if defined(SOLIB_ADD)
4011 /* Have we reached our destination? If not, keep going. */
4012 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4013 {
4014 if (debug_infrun)
4015 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4016 ecs->event_thread->stepping_over_breakpoint = 1;
4017 keep_going (ecs);
4018 return;
4019 }
4020 #endif
4021 if (debug_infrun)
4022 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4023 /* Else, stop and report the catchpoint(s) whose triggering
4024 caused us to begin stepping. */
4025 ecs->event_thread->stepping_through_solib_after_catch = 0;
4026 bpstat_clear (&ecs->event_thread->stop_bpstat);
4027 ecs->event_thread->stop_bpstat
4028 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4029 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4030 stop_print_frame = 1;
4031 stop_stepping (ecs);
4032 return;
4033 }
4034
4035 if (ecs->event_thread->step_resume_breakpoint)
4036 {
4037 if (debug_infrun)
4038 fprintf_unfiltered (gdb_stdlog,
4039 "infrun: step-resume breakpoint is inserted\n");
4040
4041 /* Having a step-resume breakpoint overrides anything
4042 else having to do with stepping commands until
4043 that breakpoint is reached. */
4044 keep_going (ecs);
4045 return;
4046 }
4047
4048 if (ecs->event_thread->step_range_end == 0)
4049 {
4050 if (debug_infrun)
4051 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4052 /* Likewise if we aren't even stepping. */
4053 keep_going (ecs);
4054 return;
4055 }
4056
4057 /* Re-fetch current thread's frame in case the code above caused
4058 the frame cache to be re-initialized, making our FRAME variable
4059 a dangling pointer. */
4060 frame = get_current_frame ();
4061
4062 /* If stepping through a line, keep going if still within it.
4063
4064 Note that step_range_end is the address of the first instruction
4065 beyond the step range, and NOT the address of the last instruction
4066 within it!
4067
4068 Note also that during reverse execution, we may be stepping
4069 through a function epilogue and therefore must detect when
4070 the current-frame changes in the middle of a line. */
4071
4072 if (stop_pc >= ecs->event_thread->step_range_start
4073 && stop_pc < ecs->event_thread->step_range_end
4074 && (execution_direction != EXEC_REVERSE
4075 || frame_id_eq (get_frame_id (frame),
4076 ecs->event_thread->step_frame_id)))
4077 {
4078 if (debug_infrun)
4079 fprintf_unfiltered
4080 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4081 paddress (gdbarch, ecs->event_thread->step_range_start),
4082 paddress (gdbarch, ecs->event_thread->step_range_end));
4083
4084 /* When stepping backward, stop at beginning of line range
4085 (unless it's the function entry point, in which case
4086 keep going back to the call point). */
4087 if (stop_pc == ecs->event_thread->step_range_start
4088 && stop_pc != ecs->stop_func_start
4089 && execution_direction == EXEC_REVERSE)
4090 {
4091 ecs->event_thread->stop_step = 1;
4092 print_stop_reason (END_STEPPING_RANGE, 0);
4093 stop_stepping (ecs);
4094 }
4095 else
4096 keep_going (ecs);
4097
4098 return;
4099 }
4100
4101 /* We stepped out of the stepping range. */
4102
4103 /* If we are stepping at the source level and entered the runtime
4104 loader dynamic symbol resolution code...
4105
4106 EXEC_FORWARD: we keep on single stepping until we exit the run
4107 time loader code and reach the callee's address.
4108
4109 EXEC_REVERSE: we've already executed the callee (backward), and
4110 the runtime loader code is handled just like any other
4111 undebuggable function call. Now we need only keep stepping
4112 backward through the trampoline code, and that's handled further
4113 down, so there is nothing for us to do here. */
4114
4115 if (execution_direction != EXEC_REVERSE
4116 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4117 && in_solib_dynsym_resolve_code (stop_pc))
4118 {
4119 CORE_ADDR pc_after_resolver =
4120 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4121
4122 if (debug_infrun)
4123 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4124
4125 if (pc_after_resolver)
4126 {
4127 /* Set up a step-resume breakpoint at the address
4128 indicated by SKIP_SOLIB_RESOLVER. */
4129 struct symtab_and_line sr_sal;
4130 init_sal (&sr_sal);
4131 sr_sal.pc = pc_after_resolver;
4132 sr_sal.pspace = get_frame_program_space (frame);
4133
4134 insert_step_resume_breakpoint_at_sal (gdbarch,
4135 sr_sal, null_frame_id);
4136 }
4137
4138 keep_going (ecs);
4139 return;
4140 }
4141
4142 if (ecs->event_thread->step_range_end != 1
4143 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4144 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4145 && get_frame_type (frame) == SIGTRAMP_FRAME)
4146 {
4147 if (debug_infrun)
4148 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4149 /* The inferior, while doing a "step" or "next", has ended up in
4150 a signal trampoline (either by a signal being delivered or by
4151 the signal handler returning). Just single-step until the
4152 inferior leaves the trampoline (either by calling the handler
4153 or returning). */
4154 keep_going (ecs);
4155 return;
4156 }
4157
4158 /* Check for subroutine calls. The check for the current frame
4159 equalling the step ID is not necessary - the check of the
4160 previous frame's ID is sufficient - but it is a common case and
4161 cheaper than checking the previous frame's ID.
4162
4163 NOTE: frame_id_eq will never report two invalid frame IDs as
4164 being equal, so to get into this block, both the current and
4165 previous frame must have valid frame IDs. */
4166 /* The outer_frame_id check is a heuristic to detect stepping
4167 through startup code. If we step over an instruction which
4168 sets the stack pointer from an invalid value to a valid value,
4169 we may detect that as a subroutine call from the mythical
4170 "outermost" function. This could be fixed by marking
4171 outermost frames as !stack_p,code_p,special_p. Then the
4172 initial outermost frame, before sp was valid, would
4173 have code_addr == &_start. See the comment in frame_id_eq
4174 for more. */
4175 if (!frame_id_eq (get_stack_frame_id (frame),
4176 ecs->event_thread->step_stack_frame_id)
4177 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4178 ecs->event_thread->step_stack_frame_id)
4179 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4180 outer_frame_id)
4181 || step_start_function != find_pc_function (stop_pc))))
4182 {
4183 CORE_ADDR real_stop_pc;
4184
4185 if (debug_infrun)
4186 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4187
4188 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4189 || ((ecs->event_thread->step_range_end == 1)
4190 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4191 ecs->stop_func_start)))
4192 {
4193 /* I presume that step_over_calls is only 0 when we're
4194 supposed to be stepping at the assembly language level
4195 ("stepi"). Just stop. */
4196 /* Also, maybe we just did a "nexti" inside a prolog, so we
4197 thought it was a subroutine call but it was not. Stop as
4198 well. FENN */
4199 /* And this works the same backward as frontward. MVS */
4200 ecs->event_thread->stop_step = 1;
4201 print_stop_reason (END_STEPPING_RANGE, 0);
4202 stop_stepping (ecs);
4203 return;
4204 }
4205
4206 /* Reverse stepping through solib trampolines. */
4207
4208 if (execution_direction == EXEC_REVERSE
4209 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4210 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4211 || (ecs->stop_func_start == 0
4212 && in_solib_dynsym_resolve_code (stop_pc))))
4213 {
4214 /* Any solib trampoline code can be handled in reverse
4215 by simply continuing to single-step. We have already
4216 executed the solib function (backwards), and a few
4217 steps will take us back through the trampoline to the
4218 caller. */
4219 keep_going (ecs);
4220 return;
4221 }
4222
4223 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4224 {
4225 /* We're doing a "next".
4226
4227 Normal (forward) execution: set a breakpoint at the
4228 callee's return address (the address at which the caller
4229 will resume).
4230
4231 Reverse (backward) execution. set the step-resume
4232 breakpoint at the start of the function that we just
4233 stepped into (backwards), and continue to there. When we
4234 get there, we'll need to single-step back to the caller. */
4235
4236 if (execution_direction == EXEC_REVERSE)
4237 {
4238 struct symtab_and_line sr_sal;
4239
4240 /* Normal function call return (static or dynamic). */
4241 init_sal (&sr_sal);
4242 sr_sal.pc = ecs->stop_func_start;
4243 sr_sal.pspace = get_frame_program_space (frame);
4244 insert_step_resume_breakpoint_at_sal (gdbarch,
4245 sr_sal, null_frame_id);
4246 }
4247 else
4248 insert_step_resume_breakpoint_at_caller (frame);
4249
4250 keep_going (ecs);
4251 return;
4252 }
4253
4254 /* If we are in a function call trampoline (a stub between the
4255 calling routine and the real function), locate the real
4256 function. That's what tells us (a) whether we want to step
4257 into it at all, and (b) what prologue we want to run to the
4258 end of, if we do step into it. */
4259 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4260 if (real_stop_pc == 0)
4261 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4262 if (real_stop_pc != 0)
4263 ecs->stop_func_start = real_stop_pc;
4264
4265 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4266 {
4267 struct symtab_and_line sr_sal;
4268 init_sal (&sr_sal);
4269 sr_sal.pc = ecs->stop_func_start;
4270 sr_sal.pspace = get_frame_program_space (frame);
4271
4272 insert_step_resume_breakpoint_at_sal (gdbarch,
4273 sr_sal, null_frame_id);
4274 keep_going (ecs);
4275 return;
4276 }
4277
4278 /* If we have line number information for the function we are
4279 thinking of stepping into, step into it.
4280
4281 If there are several symtabs at that PC (e.g. with include
4282 files), just want to know whether *any* of them have line
4283 numbers. find_pc_line handles this. */
4284 {
4285 struct symtab_and_line tmp_sal;
4286
4287 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4288 tmp_sal.pspace = get_frame_program_space (frame);
4289 if (tmp_sal.line != 0)
4290 {
4291 if (execution_direction == EXEC_REVERSE)
4292 handle_step_into_function_backward (gdbarch, ecs);
4293 else
4294 handle_step_into_function (gdbarch, ecs);
4295 return;
4296 }
4297 }
4298
4299 /* If we have no line number and the step-stop-if-no-debug is
4300 set, we stop the step so that the user has a chance to switch
4301 in assembly mode. */
4302 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4303 && step_stop_if_no_debug)
4304 {
4305 ecs->event_thread->stop_step = 1;
4306 print_stop_reason (END_STEPPING_RANGE, 0);
4307 stop_stepping (ecs);
4308 return;
4309 }
4310
4311 if (execution_direction == EXEC_REVERSE)
4312 {
4313 /* Set a breakpoint at callee's start address.
4314 From there we can step once and be back in the caller. */
4315 struct symtab_and_line sr_sal;
4316 init_sal (&sr_sal);
4317 sr_sal.pc = ecs->stop_func_start;
4318 sr_sal.pspace = get_frame_program_space (frame);
4319 insert_step_resume_breakpoint_at_sal (gdbarch,
4320 sr_sal, null_frame_id);
4321 }
4322 else
4323 /* Set a breakpoint at callee's return address (the address
4324 at which the caller will resume). */
4325 insert_step_resume_breakpoint_at_caller (frame);
4326
4327 keep_going (ecs);
4328 return;
4329 }
4330
4331 /* Reverse stepping through solib trampolines. */
4332
4333 if (execution_direction == EXEC_REVERSE
4334 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4335 {
4336 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4337 || (ecs->stop_func_start == 0
4338 && in_solib_dynsym_resolve_code (stop_pc)))
4339 {
4340 /* Any solib trampoline code can be handled in reverse
4341 by simply continuing to single-step. We have already
4342 executed the solib function (backwards), and a few
4343 steps will take us back through the trampoline to the
4344 caller. */
4345 keep_going (ecs);
4346 return;
4347 }
4348 else if (in_solib_dynsym_resolve_code (stop_pc))
4349 {
4350 /* Stepped backward into the solib dynsym resolver.
4351 Set a breakpoint at its start and continue, then
4352 one more step will take us out. */
4353 struct symtab_and_line sr_sal;
4354 init_sal (&sr_sal);
4355 sr_sal.pc = ecs->stop_func_start;
4356 sr_sal.pspace = get_frame_program_space (frame);
4357 insert_step_resume_breakpoint_at_sal (gdbarch,
4358 sr_sal, null_frame_id);
4359 keep_going (ecs);
4360 return;
4361 }
4362 }
4363
4364 /* If we're in the return path from a shared library trampoline,
4365 we want to proceed through the trampoline when stepping. */
4366 if (gdbarch_in_solib_return_trampoline (gdbarch,
4367 stop_pc, ecs->stop_func_name))
4368 {
4369 /* Determine where this trampoline returns. */
4370 CORE_ADDR real_stop_pc;
4371 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4372
4373 if (debug_infrun)
4374 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4375
4376 /* Only proceed through if we know where it's going. */
4377 if (real_stop_pc)
4378 {
4379 /* And put the step-breakpoint there and go until there. */
4380 struct symtab_and_line sr_sal;
4381
4382 init_sal (&sr_sal); /* initialize to zeroes */
4383 sr_sal.pc = real_stop_pc;
4384 sr_sal.section = find_pc_overlay (sr_sal.pc);
4385 sr_sal.pspace = get_frame_program_space (frame);
4386
4387 /* Do not specify what the fp should be when we stop since
4388 on some machines the prologue is where the new fp value
4389 is established. */
4390 insert_step_resume_breakpoint_at_sal (gdbarch,
4391 sr_sal, null_frame_id);
4392
4393 /* Restart without fiddling with the step ranges or
4394 other state. */
4395 keep_going (ecs);
4396 return;
4397 }
4398 }
4399
4400 stop_pc_sal = find_pc_line (stop_pc, 0);
4401
4402 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4403 the trampoline processing logic, however, there are some trampolines
4404 that have no names, so we should do trampoline handling first. */
4405 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4406 && ecs->stop_func_name == NULL
4407 && stop_pc_sal.line == 0)
4408 {
4409 if (debug_infrun)
4410 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4411
4412 /* The inferior just stepped into, or returned to, an
4413 undebuggable function (where there is no debugging information
4414 and no line number corresponding to the address where the
4415 inferior stopped). Since we want to skip this kind of code,
4416 we keep going until the inferior returns from this
4417 function - unless the user has asked us not to (via
4418 set step-mode) or we no longer know how to get back
4419 to the call site. */
4420 if (step_stop_if_no_debug
4421 || !frame_id_p (frame_unwind_caller_id (frame)))
4422 {
4423 /* If we have no line number and the step-stop-if-no-debug
4424 is set, we stop the step so that the user has a chance to
4425 switch in assembly mode. */
4426 ecs->event_thread->stop_step = 1;
4427 print_stop_reason (END_STEPPING_RANGE, 0);
4428 stop_stepping (ecs);
4429 return;
4430 }
4431 else
4432 {
4433 /* Set a breakpoint at callee's return address (the address
4434 at which the caller will resume). */
4435 insert_step_resume_breakpoint_at_caller (frame);
4436 keep_going (ecs);
4437 return;
4438 }
4439 }
4440
4441 if (ecs->event_thread->step_range_end == 1)
4442 {
4443 /* It is stepi or nexti. We always want to stop stepping after
4444 one instruction. */
4445 if (debug_infrun)
4446 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4447 ecs->event_thread->stop_step = 1;
4448 print_stop_reason (END_STEPPING_RANGE, 0);
4449 stop_stepping (ecs);
4450 return;
4451 }
4452
4453 if (stop_pc_sal.line == 0)
4454 {
4455 /* We have no line number information. That means to stop
4456 stepping (does this always happen right after one instruction,
4457 when we do "s" in a function with no line numbers,
4458 or can this happen as a result of a return or longjmp?). */
4459 if (debug_infrun)
4460 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4461 ecs->event_thread->stop_step = 1;
4462 print_stop_reason (END_STEPPING_RANGE, 0);
4463 stop_stepping (ecs);
4464 return;
4465 }
4466
4467 /* Look for "calls" to inlined functions, part one. If the inline
4468 frame machinery detected some skipped call sites, we have entered
4469 a new inline function. */
4470
4471 if (frame_id_eq (get_frame_id (get_current_frame ()),
4472 ecs->event_thread->step_frame_id)
4473 && inline_skipped_frames (ecs->ptid))
4474 {
4475 struct symtab_and_line call_sal;
4476
4477 if (debug_infrun)
4478 fprintf_unfiltered (gdb_stdlog,
4479 "infrun: stepped into inlined function\n");
4480
4481 find_frame_sal (get_current_frame (), &call_sal);
4482
4483 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4484 {
4485 /* For "step", we're going to stop. But if the call site
4486 for this inlined function is on the same source line as
4487 we were previously stepping, go down into the function
4488 first. Otherwise stop at the call site. */
4489
4490 if (call_sal.line == ecs->event_thread->current_line
4491 && call_sal.symtab == ecs->event_thread->current_symtab)
4492 step_into_inline_frame (ecs->ptid);
4493
4494 ecs->event_thread->stop_step = 1;
4495 print_stop_reason (END_STEPPING_RANGE, 0);
4496 stop_stepping (ecs);
4497 return;
4498 }
4499 else
4500 {
4501 /* For "next", we should stop at the call site if it is on a
4502 different source line. Otherwise continue through the
4503 inlined function. */
4504 if (call_sal.line == ecs->event_thread->current_line
4505 && call_sal.symtab == ecs->event_thread->current_symtab)
4506 keep_going (ecs);
4507 else
4508 {
4509 ecs->event_thread->stop_step = 1;
4510 print_stop_reason (END_STEPPING_RANGE, 0);
4511 stop_stepping (ecs);
4512 }
4513 return;
4514 }
4515 }
4516
4517 /* Look for "calls" to inlined functions, part two. If we are still
4518 in the same real function we were stepping through, but we have
4519 to go further up to find the exact frame ID, we are stepping
4520 through a more inlined call beyond its call site. */
4521
4522 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4523 && !frame_id_eq (get_frame_id (get_current_frame ()),
4524 ecs->event_thread->step_frame_id)
4525 && stepped_in_from (get_current_frame (),
4526 ecs->event_thread->step_frame_id))
4527 {
4528 if (debug_infrun)
4529 fprintf_unfiltered (gdb_stdlog,
4530 "infrun: stepping through inlined function\n");
4531
4532 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4533 keep_going (ecs);
4534 else
4535 {
4536 ecs->event_thread->stop_step = 1;
4537 print_stop_reason (END_STEPPING_RANGE, 0);
4538 stop_stepping (ecs);
4539 }
4540 return;
4541 }
4542
4543 if ((stop_pc == stop_pc_sal.pc)
4544 && (ecs->event_thread->current_line != stop_pc_sal.line
4545 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4546 {
4547 /* We are at the start of a different line. So stop. Note that
4548 we don't stop if we step into the middle of a different line.
4549 That is said to make things like for (;;) statements work
4550 better. */
4551 if (debug_infrun)
4552 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4553 ecs->event_thread->stop_step = 1;
4554 print_stop_reason (END_STEPPING_RANGE, 0);
4555 stop_stepping (ecs);
4556 return;
4557 }
4558
4559 /* We aren't done stepping.
4560
4561 Optimize by setting the stepping range to the line.
4562 (We might not be in the original line, but if we entered a
4563 new line in mid-statement, we continue stepping. This makes
4564 things like for(;;) statements work better.) */
4565
4566 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4567 ecs->event_thread->step_range_end = stop_pc_sal.end;
4568 set_step_info (frame, stop_pc_sal);
4569
4570 if (debug_infrun)
4571 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4572 keep_going (ecs);
4573 }
4574
4575 /* Is thread TP in the middle of single-stepping? */
4576
4577 static int
4578 currently_stepping (struct thread_info *tp)
4579 {
4580 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4581 || tp->trap_expected
4582 || tp->stepping_through_solib_after_catch
4583 || bpstat_should_step ());
4584 }
4585
4586 /* Returns true if any thread *but* the one passed in "data" is in the
4587 middle of stepping or of handling a "next". */
4588
4589 static int
4590 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4591 {
4592 if (tp == data)
4593 return 0;
4594
4595 return (tp->step_range_end
4596 || tp->trap_expected
4597 || tp->stepping_through_solib_after_catch);
4598 }
4599
4600 /* Inferior has stepped into a subroutine call with source code that
4601 we should not step over. Do step to the first line of code in
4602 it. */
4603
4604 static void
4605 handle_step_into_function (struct gdbarch *gdbarch,
4606 struct execution_control_state *ecs)
4607 {
4608 struct symtab *s;
4609 struct symtab_and_line stop_func_sal, sr_sal;
4610
4611 s = find_pc_symtab (stop_pc);
4612 if (s && s->language != language_asm)
4613 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4614 ecs->stop_func_start);
4615
4616 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4617 /* Use the step_resume_break to step until the end of the prologue,
4618 even if that involves jumps (as it seems to on the vax under
4619 4.2). */
4620 /* If the prologue ends in the middle of a source line, continue to
4621 the end of that source line (if it is still within the function).
4622 Otherwise, just go to end of prologue. */
4623 if (stop_func_sal.end
4624 && stop_func_sal.pc != ecs->stop_func_start
4625 && stop_func_sal.end < ecs->stop_func_end)
4626 ecs->stop_func_start = stop_func_sal.end;
4627
4628 /* Architectures which require breakpoint adjustment might not be able
4629 to place a breakpoint at the computed address. If so, the test
4630 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4631 ecs->stop_func_start to an address at which a breakpoint may be
4632 legitimately placed.
4633
4634 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4635 made, GDB will enter an infinite loop when stepping through
4636 optimized code consisting of VLIW instructions which contain
4637 subinstructions corresponding to different source lines. On
4638 FR-V, it's not permitted to place a breakpoint on any but the
4639 first subinstruction of a VLIW instruction. When a breakpoint is
4640 set, GDB will adjust the breakpoint address to the beginning of
4641 the VLIW instruction. Thus, we need to make the corresponding
4642 adjustment here when computing the stop address. */
4643
4644 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4645 {
4646 ecs->stop_func_start
4647 = gdbarch_adjust_breakpoint_address (gdbarch,
4648 ecs->stop_func_start);
4649 }
4650
4651 if (ecs->stop_func_start == stop_pc)
4652 {
4653 /* We are already there: stop now. */
4654 ecs->event_thread->stop_step = 1;
4655 print_stop_reason (END_STEPPING_RANGE, 0);
4656 stop_stepping (ecs);
4657 return;
4658 }
4659 else
4660 {
4661 /* Put the step-breakpoint there and go until there. */
4662 init_sal (&sr_sal); /* initialize to zeroes */
4663 sr_sal.pc = ecs->stop_func_start;
4664 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4665 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4666
4667 /* Do not specify what the fp should be when we stop since on
4668 some machines the prologue is where the new fp value is
4669 established. */
4670 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4671
4672 /* And make sure stepping stops right away then. */
4673 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4674 }
4675 keep_going (ecs);
4676 }
4677
4678 /* Inferior has stepped backward into a subroutine call with source
4679 code that we should not step over. Do step to the beginning of the
4680 last line of code in it. */
4681
4682 static void
4683 handle_step_into_function_backward (struct gdbarch *gdbarch,
4684 struct execution_control_state *ecs)
4685 {
4686 struct symtab *s;
4687 struct symtab_and_line stop_func_sal, sr_sal;
4688
4689 s = find_pc_symtab (stop_pc);
4690 if (s && s->language != language_asm)
4691 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4692 ecs->stop_func_start);
4693
4694 stop_func_sal = find_pc_line (stop_pc, 0);
4695
4696 /* OK, we're just going to keep stepping here. */
4697 if (stop_func_sal.pc == stop_pc)
4698 {
4699 /* We're there already. Just stop stepping now. */
4700 ecs->event_thread->stop_step = 1;
4701 print_stop_reason (END_STEPPING_RANGE, 0);
4702 stop_stepping (ecs);
4703 }
4704 else
4705 {
4706 /* Else just reset the step range and keep going.
4707 No step-resume breakpoint, they don't work for
4708 epilogues, which can have multiple entry paths. */
4709 ecs->event_thread->step_range_start = stop_func_sal.pc;
4710 ecs->event_thread->step_range_end = stop_func_sal.end;
4711 keep_going (ecs);
4712 }
4713 return;
4714 }
4715
4716 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4717 This is used to both functions and to skip over code. */
4718
4719 static void
4720 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4721 struct symtab_and_line sr_sal,
4722 struct frame_id sr_id)
4723 {
4724 /* There should never be more than one step-resume or longjmp-resume
4725 breakpoint per thread, so we should never be setting a new
4726 step_resume_breakpoint when one is already active. */
4727 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4728
4729 if (debug_infrun)
4730 fprintf_unfiltered (gdb_stdlog,
4731 "infrun: inserting step-resume breakpoint at %s\n",
4732 paddress (gdbarch, sr_sal.pc));
4733
4734 inferior_thread ()->step_resume_breakpoint
4735 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4736 }
4737
4738 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4739 to skip a potential signal handler.
4740
4741 This is called with the interrupted function's frame. The signal
4742 handler, when it returns, will resume the interrupted function at
4743 RETURN_FRAME.pc. */
4744
4745 static void
4746 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4747 {
4748 struct symtab_and_line sr_sal;
4749 struct gdbarch *gdbarch;
4750
4751 gdb_assert (return_frame != NULL);
4752 init_sal (&sr_sal); /* initialize to zeros */
4753
4754 gdbarch = get_frame_arch (return_frame);
4755 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4756 sr_sal.section = find_pc_overlay (sr_sal.pc);
4757 sr_sal.pspace = get_frame_program_space (return_frame);
4758
4759 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4760 get_stack_frame_id (return_frame));
4761 }
4762
4763 /* Similar to insert_step_resume_breakpoint_at_frame, except
4764 but a breakpoint at the previous frame's PC. This is used to
4765 skip a function after stepping into it (for "next" or if the called
4766 function has no debugging information).
4767
4768 The current function has almost always been reached by single
4769 stepping a call or return instruction. NEXT_FRAME belongs to the
4770 current function, and the breakpoint will be set at the caller's
4771 resume address.
4772
4773 This is a separate function rather than reusing
4774 insert_step_resume_breakpoint_at_frame in order to avoid
4775 get_prev_frame, which may stop prematurely (see the implementation
4776 of frame_unwind_caller_id for an example). */
4777
4778 static void
4779 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4780 {
4781 struct symtab_and_line sr_sal;
4782 struct gdbarch *gdbarch;
4783
4784 /* We shouldn't have gotten here if we don't know where the call site
4785 is. */
4786 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4787
4788 init_sal (&sr_sal); /* initialize to zeros */
4789
4790 gdbarch = frame_unwind_caller_arch (next_frame);
4791 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4792 frame_unwind_caller_pc (next_frame));
4793 sr_sal.section = find_pc_overlay (sr_sal.pc);
4794 sr_sal.pspace = frame_unwind_program_space (next_frame);
4795
4796 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4797 frame_unwind_caller_id (next_frame));
4798 }
4799
4800 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4801 new breakpoint at the target of a jmp_buf. The handling of
4802 longjmp-resume uses the same mechanisms used for handling
4803 "step-resume" breakpoints. */
4804
4805 static void
4806 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
4807 {
4808 /* There should never be more than one step-resume or longjmp-resume
4809 breakpoint per thread, so we should never be setting a new
4810 longjmp_resume_breakpoint when one is already active. */
4811 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4812
4813 if (debug_infrun)
4814 fprintf_unfiltered (gdb_stdlog,
4815 "infrun: inserting longjmp-resume breakpoint at %s\n",
4816 paddress (gdbarch, pc));
4817
4818 inferior_thread ()->step_resume_breakpoint =
4819 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
4820 }
4821
4822 static void
4823 stop_stepping (struct execution_control_state *ecs)
4824 {
4825 if (debug_infrun)
4826 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
4827
4828 /* Let callers know we don't want to wait for the inferior anymore. */
4829 ecs->wait_some_more = 0;
4830 }
4831
4832 /* This function handles various cases where we need to continue
4833 waiting for the inferior. */
4834 /* (Used to be the keep_going: label in the old wait_for_inferior) */
4835
4836 static void
4837 keep_going (struct execution_control_state *ecs)
4838 {
4839 /* Make sure normal_stop is called if we get a QUIT handled before
4840 reaching resume. */
4841 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
4842
4843 /* Save the pc before execution, to compare with pc after stop. */
4844 ecs->event_thread->prev_pc
4845 = regcache_read_pc (get_thread_regcache (ecs->ptid));
4846
4847 /* If we did not do break;, it means we should keep running the
4848 inferior and not return to debugger. */
4849
4850 if (ecs->event_thread->trap_expected
4851 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4852 {
4853 /* We took a signal (which we are supposed to pass through to
4854 the inferior, else we'd not get here) and we haven't yet
4855 gotten our trap. Simply continue. */
4856
4857 discard_cleanups (old_cleanups);
4858 resume (currently_stepping (ecs->event_thread),
4859 ecs->event_thread->stop_signal);
4860 }
4861 else
4862 {
4863 /* Either the trap was not expected, but we are continuing
4864 anyway (the user asked that this signal be passed to the
4865 child)
4866 -- or --
4867 The signal was SIGTRAP, e.g. it was our signal, but we
4868 decided we should resume from it.
4869
4870 We're going to run this baby now!
4871
4872 Note that insert_breakpoints won't try to re-insert
4873 already inserted breakpoints. Therefore, we don't
4874 care if breakpoints were already inserted, or not. */
4875
4876 if (ecs->event_thread->stepping_over_breakpoint)
4877 {
4878 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
4879 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
4880 /* Since we can't do a displaced step, we have to remove
4881 the breakpoint while we step it. To keep things
4882 simple, we remove them all. */
4883 remove_breakpoints ();
4884 }
4885 else
4886 {
4887 struct gdb_exception e;
4888 /* Stop stepping when inserting breakpoints
4889 has failed. */
4890 TRY_CATCH (e, RETURN_MASK_ERROR)
4891 {
4892 insert_breakpoints ();
4893 }
4894 if (e.reason < 0)
4895 {
4896 exception_print (gdb_stderr, e);
4897 stop_stepping (ecs);
4898 return;
4899 }
4900 }
4901
4902 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
4903
4904 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
4905 specifies that such a signal should be delivered to the
4906 target program).
4907
4908 Typically, this would occure when a user is debugging a
4909 target monitor on a simulator: the target monitor sets a
4910 breakpoint; the simulator encounters this break-point and
4911 halts the simulation handing control to GDB; GDB, noteing
4912 that the break-point isn't valid, returns control back to the
4913 simulator; the simulator then delivers the hardware
4914 equivalent of a SIGNAL_TRAP to the program being debugged. */
4915
4916 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
4917 && !signal_program[ecs->event_thread->stop_signal])
4918 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4919
4920 discard_cleanups (old_cleanups);
4921 resume (currently_stepping (ecs->event_thread),
4922 ecs->event_thread->stop_signal);
4923 }
4924
4925 prepare_to_wait (ecs);
4926 }
4927
4928 /* This function normally comes after a resume, before
4929 handle_inferior_event exits. It takes care of any last bits of
4930 housekeeping, and sets the all-important wait_some_more flag. */
4931
4932 static void
4933 prepare_to_wait (struct execution_control_state *ecs)
4934 {
4935 if (debug_infrun)
4936 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
4937
4938 /* This is the old end of the while loop. Let everybody know we
4939 want to wait for the inferior some more and get called again
4940 soon. */
4941 ecs->wait_some_more = 1;
4942 }
4943
4944 /* Print why the inferior has stopped. We always print something when
4945 the inferior exits, or receives a signal. The rest of the cases are
4946 dealt with later on in normal_stop() and print_it_typical(). Ideally
4947 there should be a call to this function from handle_inferior_event()
4948 each time stop_stepping() is called.*/
4949 static void
4950 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
4951 {
4952 switch (stop_reason)
4953 {
4954 case END_STEPPING_RANGE:
4955 /* We are done with a step/next/si/ni command. */
4956 /* For now print nothing. */
4957 /* Print a message only if not in the middle of doing a "step n"
4958 operation for n > 1 */
4959 if (!inferior_thread ()->step_multi
4960 || !inferior_thread ()->stop_step)
4961 if (ui_out_is_mi_like_p (uiout))
4962 ui_out_field_string
4963 (uiout, "reason",
4964 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
4965 break;
4966 case SIGNAL_EXITED:
4967 /* The inferior was terminated by a signal. */
4968 annotate_signalled ();
4969 if (ui_out_is_mi_like_p (uiout))
4970 ui_out_field_string
4971 (uiout, "reason",
4972 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
4973 ui_out_text (uiout, "\nProgram terminated with signal ");
4974 annotate_signal_name ();
4975 ui_out_field_string (uiout, "signal-name",
4976 target_signal_to_name (stop_info));
4977 annotate_signal_name_end ();
4978 ui_out_text (uiout, ", ");
4979 annotate_signal_string ();
4980 ui_out_field_string (uiout, "signal-meaning",
4981 target_signal_to_string (stop_info));
4982 annotate_signal_string_end ();
4983 ui_out_text (uiout, ".\n");
4984 ui_out_text (uiout, "The program no longer exists.\n");
4985 break;
4986 case EXITED:
4987 /* The inferior program is finished. */
4988 annotate_exited (stop_info);
4989 if (stop_info)
4990 {
4991 if (ui_out_is_mi_like_p (uiout))
4992 ui_out_field_string (uiout, "reason",
4993 async_reason_lookup (EXEC_ASYNC_EXITED));
4994 ui_out_text (uiout, "\nProgram exited with code ");
4995 ui_out_field_fmt (uiout, "exit-code", "0%o",
4996 (unsigned int) stop_info);
4997 ui_out_text (uiout, ".\n");
4998 }
4999 else
5000 {
5001 if (ui_out_is_mi_like_p (uiout))
5002 ui_out_field_string
5003 (uiout, "reason",
5004 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5005 ui_out_text (uiout, "\nProgram exited normally.\n");
5006 }
5007 /* Support the --return-child-result option. */
5008 return_child_result_value = stop_info;
5009 break;
5010 case SIGNAL_RECEIVED:
5011 /* Signal received. The signal table tells us to print about
5012 it. */
5013 annotate_signal ();
5014
5015 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5016 {
5017 struct thread_info *t = inferior_thread ();
5018
5019 ui_out_text (uiout, "\n[");
5020 ui_out_field_string (uiout, "thread-name",
5021 target_pid_to_str (t->ptid));
5022 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5023 ui_out_text (uiout, " stopped");
5024 }
5025 else
5026 {
5027 ui_out_text (uiout, "\nProgram received signal ");
5028 annotate_signal_name ();
5029 if (ui_out_is_mi_like_p (uiout))
5030 ui_out_field_string
5031 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5032 ui_out_field_string (uiout, "signal-name",
5033 target_signal_to_name (stop_info));
5034 annotate_signal_name_end ();
5035 ui_out_text (uiout, ", ");
5036 annotate_signal_string ();
5037 ui_out_field_string (uiout, "signal-meaning",
5038 target_signal_to_string (stop_info));
5039 annotate_signal_string_end ();
5040 }
5041 ui_out_text (uiout, ".\n");
5042 break;
5043 case NO_HISTORY:
5044 /* Reverse execution: target ran out of history info. */
5045 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5046 break;
5047 default:
5048 internal_error (__FILE__, __LINE__,
5049 _("print_stop_reason: unrecognized enum value"));
5050 break;
5051 }
5052 }
5053 \f
5054
5055 /* Here to return control to GDB when the inferior stops for real.
5056 Print appropriate messages, remove breakpoints, give terminal our modes.
5057
5058 STOP_PRINT_FRAME nonzero means print the executing frame
5059 (pc, function, args, file, line number and line text).
5060 BREAKPOINTS_FAILED nonzero means stop was due to error
5061 attempting to insert breakpoints. */
5062
5063 void
5064 normal_stop (void)
5065 {
5066 struct target_waitstatus last;
5067 ptid_t last_ptid;
5068 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5069
5070 get_last_target_status (&last_ptid, &last);
5071
5072 /* If an exception is thrown from this point on, make sure to
5073 propagate GDB's knowledge of the executing state to the
5074 frontend/user running state. A QUIT is an easy exception to see
5075 here, so do this before any filtered output. */
5076 if (!non_stop)
5077 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5078 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5079 && last.kind != TARGET_WAITKIND_EXITED)
5080 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5081
5082 /* In non-stop mode, we don't want GDB to switch threads behind the
5083 user's back, to avoid races where the user is typing a command to
5084 apply to thread x, but GDB switches to thread y before the user
5085 finishes entering the command. */
5086
5087 /* As with the notification of thread events, we want to delay
5088 notifying the user that we've switched thread context until
5089 the inferior actually stops.
5090
5091 There's no point in saying anything if the inferior has exited.
5092 Note that SIGNALLED here means "exited with a signal", not
5093 "received a signal". */
5094 if (!non_stop
5095 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5096 && target_has_execution
5097 && last.kind != TARGET_WAITKIND_SIGNALLED
5098 && last.kind != TARGET_WAITKIND_EXITED)
5099 {
5100 target_terminal_ours_for_output ();
5101 printf_filtered (_("[Switching to %s]\n"),
5102 target_pid_to_str (inferior_ptid));
5103 annotate_thread_changed ();
5104 previous_inferior_ptid = inferior_ptid;
5105 }
5106
5107 if (!breakpoints_always_inserted_mode () && target_has_execution)
5108 {
5109 if (remove_breakpoints ())
5110 {
5111 target_terminal_ours_for_output ();
5112 printf_filtered (_("\
5113 Cannot remove breakpoints because program is no longer writable.\n\
5114 Further execution is probably impossible.\n"));
5115 }
5116 }
5117
5118 /* If an auto-display called a function and that got a signal,
5119 delete that auto-display to avoid an infinite recursion. */
5120
5121 if (stopped_by_random_signal)
5122 disable_current_display ();
5123
5124 /* Don't print a message if in the middle of doing a "step n"
5125 operation for n > 1 */
5126 if (target_has_execution
5127 && last.kind != TARGET_WAITKIND_SIGNALLED
5128 && last.kind != TARGET_WAITKIND_EXITED
5129 && inferior_thread ()->step_multi
5130 && inferior_thread ()->stop_step)
5131 goto done;
5132
5133 target_terminal_ours ();
5134
5135 /* Set the current source location. This will also happen if we
5136 display the frame below, but the current SAL will be incorrect
5137 during a user hook-stop function. */
5138 if (has_stack_frames () && !stop_stack_dummy)
5139 set_current_sal_from_frame (get_current_frame (), 1);
5140
5141 /* Let the user/frontend see the threads as stopped. */
5142 do_cleanups (old_chain);
5143
5144 /* Look up the hook_stop and run it (CLI internally handles problem
5145 of stop_command's pre-hook not existing). */
5146 if (stop_command)
5147 catch_errors (hook_stop_stub, stop_command,
5148 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5149
5150 if (!has_stack_frames ())
5151 goto done;
5152
5153 if (last.kind == TARGET_WAITKIND_SIGNALLED
5154 || last.kind == TARGET_WAITKIND_EXITED)
5155 goto done;
5156
5157 /* Select innermost stack frame - i.e., current frame is frame 0,
5158 and current location is based on that.
5159 Don't do this on return from a stack dummy routine,
5160 or if the program has exited. */
5161
5162 if (!stop_stack_dummy)
5163 {
5164 select_frame (get_current_frame ());
5165
5166 /* Print current location without a level number, if
5167 we have changed functions or hit a breakpoint.
5168 Print source line if we have one.
5169 bpstat_print() contains the logic deciding in detail
5170 what to print, based on the event(s) that just occurred. */
5171
5172 /* If --batch-silent is enabled then there's no need to print the current
5173 source location, and to try risks causing an error message about
5174 missing source files. */
5175 if (stop_print_frame && !batch_silent)
5176 {
5177 int bpstat_ret;
5178 int source_flag;
5179 int do_frame_printing = 1;
5180 struct thread_info *tp = inferior_thread ();
5181
5182 bpstat_ret = bpstat_print (tp->stop_bpstat);
5183 switch (bpstat_ret)
5184 {
5185 case PRINT_UNKNOWN:
5186 /* If we had hit a shared library event breakpoint,
5187 bpstat_print would print out this message. If we hit
5188 an OS-level shared library event, do the same
5189 thing. */
5190 if (last.kind == TARGET_WAITKIND_LOADED)
5191 {
5192 printf_filtered (_("Stopped due to shared library event\n"));
5193 source_flag = SRC_LINE; /* something bogus */
5194 do_frame_printing = 0;
5195 break;
5196 }
5197
5198 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5199 (or should) carry around the function and does (or
5200 should) use that when doing a frame comparison. */
5201 if (tp->stop_step
5202 && frame_id_eq (tp->step_frame_id,
5203 get_frame_id (get_current_frame ()))
5204 && step_start_function == find_pc_function (stop_pc))
5205 source_flag = SRC_LINE; /* finished step, just print source line */
5206 else
5207 source_flag = SRC_AND_LOC; /* print location and source line */
5208 break;
5209 case PRINT_SRC_AND_LOC:
5210 source_flag = SRC_AND_LOC; /* print location and source line */
5211 break;
5212 case PRINT_SRC_ONLY:
5213 source_flag = SRC_LINE;
5214 break;
5215 case PRINT_NOTHING:
5216 source_flag = SRC_LINE; /* something bogus */
5217 do_frame_printing = 0;
5218 break;
5219 default:
5220 internal_error (__FILE__, __LINE__, _("Unknown value."));
5221 }
5222
5223 /* The behavior of this routine with respect to the source
5224 flag is:
5225 SRC_LINE: Print only source line
5226 LOCATION: Print only location
5227 SRC_AND_LOC: Print location and source line */
5228 if (do_frame_printing)
5229 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5230
5231 /* Display the auto-display expressions. */
5232 do_displays ();
5233 }
5234 }
5235
5236 /* Save the function value return registers, if we care.
5237 We might be about to restore their previous contents. */
5238 if (inferior_thread ()->proceed_to_finish)
5239 {
5240 /* This should not be necessary. */
5241 if (stop_registers)
5242 regcache_xfree (stop_registers);
5243
5244 /* NB: The copy goes through to the target picking up the value of
5245 all the registers. */
5246 stop_registers = regcache_dup (get_current_regcache ());
5247 }
5248
5249 if (stop_stack_dummy)
5250 {
5251 /* Pop the empty frame that contains the stack dummy.
5252 This also restores inferior state prior to the call
5253 (struct inferior_thread_state). */
5254 struct frame_info *frame = get_current_frame ();
5255 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5256 frame_pop (frame);
5257 /* frame_pop() calls reinit_frame_cache as the last thing it does
5258 which means there's currently no selected frame. We don't need
5259 to re-establish a selected frame if the dummy call returns normally,
5260 that will be done by restore_inferior_status. However, we do have
5261 to handle the case where the dummy call is returning after being
5262 stopped (e.g. the dummy call previously hit a breakpoint). We
5263 can't know which case we have so just always re-establish a
5264 selected frame here. */
5265 select_frame (get_current_frame ());
5266 }
5267
5268 done:
5269 annotate_stopped ();
5270
5271 /* Suppress the stop observer if we're in the middle of:
5272
5273 - a step n (n > 1), as there still more steps to be done.
5274
5275 - a "finish" command, as the observer will be called in
5276 finish_command_continuation, so it can include the inferior
5277 function's return value.
5278
5279 - calling an inferior function, as we pretend we inferior didn't
5280 run at all. The return value of the call is handled by the
5281 expression evaluator, through call_function_by_hand. */
5282
5283 if (!target_has_execution
5284 || last.kind == TARGET_WAITKIND_SIGNALLED
5285 || last.kind == TARGET_WAITKIND_EXITED
5286 || (!inferior_thread ()->step_multi
5287 && !(inferior_thread ()->stop_bpstat
5288 && inferior_thread ()->proceed_to_finish)
5289 && !inferior_thread ()->in_infcall))
5290 {
5291 if (!ptid_equal (inferior_ptid, null_ptid))
5292 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5293 stop_print_frame);
5294 else
5295 observer_notify_normal_stop (NULL, stop_print_frame);
5296 }
5297
5298 if (target_has_execution)
5299 {
5300 if (last.kind != TARGET_WAITKIND_SIGNALLED
5301 && last.kind != TARGET_WAITKIND_EXITED)
5302 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5303 Delete any breakpoint that is to be deleted at the next stop. */
5304 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5305 }
5306
5307 /* Try to get rid of automatically added inferiors that are no
5308 longer needed. Keeping those around slows down things linearly.
5309 Note that this never removes the current inferior. */
5310 prune_inferiors ();
5311 }
5312
5313 static int
5314 hook_stop_stub (void *cmd)
5315 {
5316 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5317 return (0);
5318 }
5319 \f
5320 int
5321 signal_stop_state (int signo)
5322 {
5323 return signal_stop[signo];
5324 }
5325
5326 int
5327 signal_print_state (int signo)
5328 {
5329 return signal_print[signo];
5330 }
5331
5332 int
5333 signal_pass_state (int signo)
5334 {
5335 return signal_program[signo];
5336 }
5337
5338 int
5339 signal_stop_update (int signo, int state)
5340 {
5341 int ret = signal_stop[signo];
5342 signal_stop[signo] = state;
5343 return ret;
5344 }
5345
5346 int
5347 signal_print_update (int signo, int state)
5348 {
5349 int ret = signal_print[signo];
5350 signal_print[signo] = state;
5351 return ret;
5352 }
5353
5354 int
5355 signal_pass_update (int signo, int state)
5356 {
5357 int ret = signal_program[signo];
5358 signal_program[signo] = state;
5359 return ret;
5360 }
5361
5362 static void
5363 sig_print_header (void)
5364 {
5365 printf_filtered (_("\
5366 Signal Stop\tPrint\tPass to program\tDescription\n"));
5367 }
5368
5369 static void
5370 sig_print_info (enum target_signal oursig)
5371 {
5372 const char *name = target_signal_to_name (oursig);
5373 int name_padding = 13 - strlen (name);
5374
5375 if (name_padding <= 0)
5376 name_padding = 0;
5377
5378 printf_filtered ("%s", name);
5379 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5380 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5381 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5382 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5383 printf_filtered ("%s\n", target_signal_to_string (oursig));
5384 }
5385
5386 /* Specify how various signals in the inferior should be handled. */
5387
5388 static void
5389 handle_command (char *args, int from_tty)
5390 {
5391 char **argv;
5392 int digits, wordlen;
5393 int sigfirst, signum, siglast;
5394 enum target_signal oursig;
5395 int allsigs;
5396 int nsigs;
5397 unsigned char *sigs;
5398 struct cleanup *old_chain;
5399
5400 if (args == NULL)
5401 {
5402 error_no_arg (_("signal to handle"));
5403 }
5404
5405 /* Allocate and zero an array of flags for which signals to handle. */
5406
5407 nsigs = (int) TARGET_SIGNAL_LAST;
5408 sigs = (unsigned char *) alloca (nsigs);
5409 memset (sigs, 0, nsigs);
5410
5411 /* Break the command line up into args. */
5412
5413 argv = gdb_buildargv (args);
5414 old_chain = make_cleanup_freeargv (argv);
5415
5416 /* Walk through the args, looking for signal oursigs, signal names, and
5417 actions. Signal numbers and signal names may be interspersed with
5418 actions, with the actions being performed for all signals cumulatively
5419 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5420
5421 while (*argv != NULL)
5422 {
5423 wordlen = strlen (*argv);
5424 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5425 {;
5426 }
5427 allsigs = 0;
5428 sigfirst = siglast = -1;
5429
5430 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5431 {
5432 /* Apply action to all signals except those used by the
5433 debugger. Silently skip those. */
5434 allsigs = 1;
5435 sigfirst = 0;
5436 siglast = nsigs - 1;
5437 }
5438 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5439 {
5440 SET_SIGS (nsigs, sigs, signal_stop);
5441 SET_SIGS (nsigs, sigs, signal_print);
5442 }
5443 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5444 {
5445 UNSET_SIGS (nsigs, sigs, signal_program);
5446 }
5447 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5448 {
5449 SET_SIGS (nsigs, sigs, signal_print);
5450 }
5451 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5452 {
5453 SET_SIGS (nsigs, sigs, signal_program);
5454 }
5455 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5456 {
5457 UNSET_SIGS (nsigs, sigs, signal_stop);
5458 }
5459 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5460 {
5461 SET_SIGS (nsigs, sigs, signal_program);
5462 }
5463 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5464 {
5465 UNSET_SIGS (nsigs, sigs, signal_print);
5466 UNSET_SIGS (nsigs, sigs, signal_stop);
5467 }
5468 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5469 {
5470 UNSET_SIGS (nsigs, sigs, signal_program);
5471 }
5472 else if (digits > 0)
5473 {
5474 /* It is numeric. The numeric signal refers to our own
5475 internal signal numbering from target.h, not to host/target
5476 signal number. This is a feature; users really should be
5477 using symbolic names anyway, and the common ones like
5478 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5479
5480 sigfirst = siglast = (int)
5481 target_signal_from_command (atoi (*argv));
5482 if ((*argv)[digits] == '-')
5483 {
5484 siglast = (int)
5485 target_signal_from_command (atoi ((*argv) + digits + 1));
5486 }
5487 if (sigfirst > siglast)
5488 {
5489 /* Bet he didn't figure we'd think of this case... */
5490 signum = sigfirst;
5491 sigfirst = siglast;
5492 siglast = signum;
5493 }
5494 }
5495 else
5496 {
5497 oursig = target_signal_from_name (*argv);
5498 if (oursig != TARGET_SIGNAL_UNKNOWN)
5499 {
5500 sigfirst = siglast = (int) oursig;
5501 }
5502 else
5503 {
5504 /* Not a number and not a recognized flag word => complain. */
5505 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5506 }
5507 }
5508
5509 /* If any signal numbers or symbol names were found, set flags for
5510 which signals to apply actions to. */
5511
5512 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5513 {
5514 switch ((enum target_signal) signum)
5515 {
5516 case TARGET_SIGNAL_TRAP:
5517 case TARGET_SIGNAL_INT:
5518 if (!allsigs && !sigs[signum])
5519 {
5520 if (query (_("%s is used by the debugger.\n\
5521 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5522 {
5523 sigs[signum] = 1;
5524 }
5525 else
5526 {
5527 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5528 gdb_flush (gdb_stdout);
5529 }
5530 }
5531 break;
5532 case TARGET_SIGNAL_0:
5533 case TARGET_SIGNAL_DEFAULT:
5534 case TARGET_SIGNAL_UNKNOWN:
5535 /* Make sure that "all" doesn't print these. */
5536 break;
5537 default:
5538 sigs[signum] = 1;
5539 break;
5540 }
5541 }
5542
5543 argv++;
5544 }
5545
5546 for (signum = 0; signum < nsigs; signum++)
5547 if (sigs[signum])
5548 {
5549 target_notice_signals (inferior_ptid);
5550
5551 if (from_tty)
5552 {
5553 /* Show the results. */
5554 sig_print_header ();
5555 for (; signum < nsigs; signum++)
5556 if (sigs[signum])
5557 sig_print_info (signum);
5558 }
5559
5560 break;
5561 }
5562
5563 do_cleanups (old_chain);
5564 }
5565
5566 static void
5567 xdb_handle_command (char *args, int from_tty)
5568 {
5569 char **argv;
5570 struct cleanup *old_chain;
5571
5572 if (args == NULL)
5573 error_no_arg (_("xdb command"));
5574
5575 /* Break the command line up into args. */
5576
5577 argv = gdb_buildargv (args);
5578 old_chain = make_cleanup_freeargv (argv);
5579 if (argv[1] != (char *) NULL)
5580 {
5581 char *argBuf;
5582 int bufLen;
5583
5584 bufLen = strlen (argv[0]) + 20;
5585 argBuf = (char *) xmalloc (bufLen);
5586 if (argBuf)
5587 {
5588 int validFlag = 1;
5589 enum target_signal oursig;
5590
5591 oursig = target_signal_from_name (argv[0]);
5592 memset (argBuf, 0, bufLen);
5593 if (strcmp (argv[1], "Q") == 0)
5594 sprintf (argBuf, "%s %s", argv[0], "noprint");
5595 else
5596 {
5597 if (strcmp (argv[1], "s") == 0)
5598 {
5599 if (!signal_stop[oursig])
5600 sprintf (argBuf, "%s %s", argv[0], "stop");
5601 else
5602 sprintf (argBuf, "%s %s", argv[0], "nostop");
5603 }
5604 else if (strcmp (argv[1], "i") == 0)
5605 {
5606 if (!signal_program[oursig])
5607 sprintf (argBuf, "%s %s", argv[0], "pass");
5608 else
5609 sprintf (argBuf, "%s %s", argv[0], "nopass");
5610 }
5611 else if (strcmp (argv[1], "r") == 0)
5612 {
5613 if (!signal_print[oursig])
5614 sprintf (argBuf, "%s %s", argv[0], "print");
5615 else
5616 sprintf (argBuf, "%s %s", argv[0], "noprint");
5617 }
5618 else
5619 validFlag = 0;
5620 }
5621 if (validFlag)
5622 handle_command (argBuf, from_tty);
5623 else
5624 printf_filtered (_("Invalid signal handling flag.\n"));
5625 if (argBuf)
5626 xfree (argBuf);
5627 }
5628 }
5629 do_cleanups (old_chain);
5630 }
5631
5632 /* Print current contents of the tables set by the handle command.
5633 It is possible we should just be printing signals actually used
5634 by the current target (but for things to work right when switching
5635 targets, all signals should be in the signal tables). */
5636
5637 static void
5638 signals_info (char *signum_exp, int from_tty)
5639 {
5640 enum target_signal oursig;
5641 sig_print_header ();
5642
5643 if (signum_exp)
5644 {
5645 /* First see if this is a symbol name. */
5646 oursig = target_signal_from_name (signum_exp);
5647 if (oursig == TARGET_SIGNAL_UNKNOWN)
5648 {
5649 /* No, try numeric. */
5650 oursig =
5651 target_signal_from_command (parse_and_eval_long (signum_exp));
5652 }
5653 sig_print_info (oursig);
5654 return;
5655 }
5656
5657 printf_filtered ("\n");
5658 /* These ugly casts brought to you by the native VAX compiler. */
5659 for (oursig = TARGET_SIGNAL_FIRST;
5660 (int) oursig < (int) TARGET_SIGNAL_LAST;
5661 oursig = (enum target_signal) ((int) oursig + 1))
5662 {
5663 QUIT;
5664
5665 if (oursig != TARGET_SIGNAL_UNKNOWN
5666 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5667 sig_print_info (oursig);
5668 }
5669
5670 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5671 }
5672
5673 /* The $_siginfo convenience variable is a bit special. We don't know
5674 for sure the type of the value until we actually have a chance to
5675 fetch the data. The type can change depending on gdbarch, so it it
5676 also dependent on which thread you have selected.
5677
5678 1. making $_siginfo be an internalvar that creates a new value on
5679 access.
5680
5681 2. making the value of $_siginfo be an lval_computed value. */
5682
5683 /* This function implements the lval_computed support for reading a
5684 $_siginfo value. */
5685
5686 static void
5687 siginfo_value_read (struct value *v)
5688 {
5689 LONGEST transferred;
5690
5691 transferred =
5692 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5693 NULL,
5694 value_contents_all_raw (v),
5695 value_offset (v),
5696 TYPE_LENGTH (value_type (v)));
5697
5698 if (transferred != TYPE_LENGTH (value_type (v)))
5699 error (_("Unable to read siginfo"));
5700 }
5701
5702 /* This function implements the lval_computed support for writing a
5703 $_siginfo value. */
5704
5705 static void
5706 siginfo_value_write (struct value *v, struct value *fromval)
5707 {
5708 LONGEST transferred;
5709
5710 transferred = target_write (&current_target,
5711 TARGET_OBJECT_SIGNAL_INFO,
5712 NULL,
5713 value_contents_all_raw (fromval),
5714 value_offset (v),
5715 TYPE_LENGTH (value_type (fromval)));
5716
5717 if (transferred != TYPE_LENGTH (value_type (fromval)))
5718 error (_("Unable to write siginfo"));
5719 }
5720
5721 static struct lval_funcs siginfo_value_funcs =
5722 {
5723 siginfo_value_read,
5724 siginfo_value_write
5725 };
5726
5727 /* Return a new value with the correct type for the siginfo object of
5728 the current thread using architecture GDBARCH. Return a void value
5729 if there's no object available. */
5730
5731 static struct value *
5732 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5733 {
5734 if (target_has_stack
5735 && !ptid_equal (inferior_ptid, null_ptid)
5736 && gdbarch_get_siginfo_type_p (gdbarch))
5737 {
5738 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5739 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5740 }
5741
5742 return allocate_value (builtin_type (gdbarch)->builtin_void);
5743 }
5744
5745 \f
5746 /* Inferior thread state.
5747 These are details related to the inferior itself, and don't include
5748 things like what frame the user had selected or what gdb was doing
5749 with the target at the time.
5750 For inferior function calls these are things we want to restore
5751 regardless of whether the function call successfully completes
5752 or the dummy frame has to be manually popped. */
5753
5754 struct inferior_thread_state
5755 {
5756 enum target_signal stop_signal;
5757 CORE_ADDR stop_pc;
5758 struct regcache *registers;
5759 };
5760
5761 struct inferior_thread_state *
5762 save_inferior_thread_state (void)
5763 {
5764 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5765 struct thread_info *tp = inferior_thread ();
5766
5767 inf_state->stop_signal = tp->stop_signal;
5768 inf_state->stop_pc = stop_pc;
5769
5770 inf_state->registers = regcache_dup (get_current_regcache ());
5771
5772 return inf_state;
5773 }
5774
5775 /* Restore inferior session state to INF_STATE. */
5776
5777 void
5778 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5779 {
5780 struct thread_info *tp = inferior_thread ();
5781
5782 tp->stop_signal = inf_state->stop_signal;
5783 stop_pc = inf_state->stop_pc;
5784
5785 /* The inferior can be gone if the user types "print exit(0)"
5786 (and perhaps other times). */
5787 if (target_has_execution)
5788 /* NB: The register write goes through to the target. */
5789 regcache_cpy (get_current_regcache (), inf_state->registers);
5790 regcache_xfree (inf_state->registers);
5791 xfree (inf_state);
5792 }
5793
5794 static void
5795 do_restore_inferior_thread_state_cleanup (void *state)
5796 {
5797 restore_inferior_thread_state (state);
5798 }
5799
5800 struct cleanup *
5801 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5802 {
5803 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
5804 }
5805
5806 void
5807 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
5808 {
5809 regcache_xfree (inf_state->registers);
5810 xfree (inf_state);
5811 }
5812
5813 struct regcache *
5814 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
5815 {
5816 return inf_state->registers;
5817 }
5818
5819 /* Session related state for inferior function calls.
5820 These are the additional bits of state that need to be restored
5821 when an inferior function call successfully completes. */
5822
5823 struct inferior_status
5824 {
5825 bpstat stop_bpstat;
5826 int stop_step;
5827 int stop_stack_dummy;
5828 int stopped_by_random_signal;
5829 int stepping_over_breakpoint;
5830 CORE_ADDR step_range_start;
5831 CORE_ADDR step_range_end;
5832 struct frame_id step_frame_id;
5833 struct frame_id step_stack_frame_id;
5834 enum step_over_calls_kind step_over_calls;
5835 CORE_ADDR step_resume_break_address;
5836 int stop_after_trap;
5837 int stop_soon;
5838
5839 /* ID if the selected frame when the inferior function call was made. */
5840 struct frame_id selected_frame_id;
5841
5842 int proceed_to_finish;
5843 int in_infcall;
5844 };
5845
5846 /* Save all of the information associated with the inferior<==>gdb
5847 connection. */
5848
5849 struct inferior_status *
5850 save_inferior_status (void)
5851 {
5852 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
5853 struct thread_info *tp = inferior_thread ();
5854 struct inferior *inf = current_inferior ();
5855
5856 inf_status->stop_step = tp->stop_step;
5857 inf_status->stop_stack_dummy = stop_stack_dummy;
5858 inf_status->stopped_by_random_signal = stopped_by_random_signal;
5859 inf_status->stepping_over_breakpoint = tp->trap_expected;
5860 inf_status->step_range_start = tp->step_range_start;
5861 inf_status->step_range_end = tp->step_range_end;
5862 inf_status->step_frame_id = tp->step_frame_id;
5863 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
5864 inf_status->step_over_calls = tp->step_over_calls;
5865 inf_status->stop_after_trap = stop_after_trap;
5866 inf_status->stop_soon = inf->stop_soon;
5867 /* Save original bpstat chain here; replace it with copy of chain.
5868 If caller's caller is walking the chain, they'll be happier if we
5869 hand them back the original chain when restore_inferior_status is
5870 called. */
5871 inf_status->stop_bpstat = tp->stop_bpstat;
5872 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
5873 inf_status->proceed_to_finish = tp->proceed_to_finish;
5874 inf_status->in_infcall = tp->in_infcall;
5875
5876 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
5877
5878 return inf_status;
5879 }
5880
5881 static int
5882 restore_selected_frame (void *args)
5883 {
5884 struct frame_id *fid = (struct frame_id *) args;
5885 struct frame_info *frame;
5886
5887 frame = frame_find_by_id (*fid);
5888
5889 /* If inf_status->selected_frame_id is NULL, there was no previously
5890 selected frame. */
5891 if (frame == NULL)
5892 {
5893 warning (_("Unable to restore previously selected frame."));
5894 return 0;
5895 }
5896
5897 select_frame (frame);
5898
5899 return (1);
5900 }
5901
5902 /* Restore inferior session state to INF_STATUS. */
5903
5904 void
5905 restore_inferior_status (struct inferior_status *inf_status)
5906 {
5907 struct thread_info *tp = inferior_thread ();
5908 struct inferior *inf = current_inferior ();
5909
5910 tp->stop_step = inf_status->stop_step;
5911 stop_stack_dummy = inf_status->stop_stack_dummy;
5912 stopped_by_random_signal = inf_status->stopped_by_random_signal;
5913 tp->trap_expected = inf_status->stepping_over_breakpoint;
5914 tp->step_range_start = inf_status->step_range_start;
5915 tp->step_range_end = inf_status->step_range_end;
5916 tp->step_frame_id = inf_status->step_frame_id;
5917 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
5918 tp->step_over_calls = inf_status->step_over_calls;
5919 stop_after_trap = inf_status->stop_after_trap;
5920 inf->stop_soon = inf_status->stop_soon;
5921 bpstat_clear (&tp->stop_bpstat);
5922 tp->stop_bpstat = inf_status->stop_bpstat;
5923 inf_status->stop_bpstat = NULL;
5924 tp->proceed_to_finish = inf_status->proceed_to_finish;
5925 tp->in_infcall = inf_status->in_infcall;
5926
5927 if (target_has_stack)
5928 {
5929 /* The point of catch_errors is that if the stack is clobbered,
5930 walking the stack might encounter a garbage pointer and
5931 error() trying to dereference it. */
5932 if (catch_errors
5933 (restore_selected_frame, &inf_status->selected_frame_id,
5934 "Unable to restore previously selected frame:\n",
5935 RETURN_MASK_ERROR) == 0)
5936 /* Error in restoring the selected frame. Select the innermost
5937 frame. */
5938 select_frame (get_current_frame ());
5939 }
5940
5941 xfree (inf_status);
5942 }
5943
5944 static void
5945 do_restore_inferior_status_cleanup (void *sts)
5946 {
5947 restore_inferior_status (sts);
5948 }
5949
5950 struct cleanup *
5951 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
5952 {
5953 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
5954 }
5955
5956 void
5957 discard_inferior_status (struct inferior_status *inf_status)
5958 {
5959 /* See save_inferior_status for info on stop_bpstat. */
5960 bpstat_clear (&inf_status->stop_bpstat);
5961 xfree (inf_status);
5962 }
5963 \f
5964 int
5965 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
5966 {
5967 struct target_waitstatus last;
5968 ptid_t last_ptid;
5969
5970 get_last_target_status (&last_ptid, &last);
5971
5972 if (last.kind != TARGET_WAITKIND_FORKED)
5973 return 0;
5974
5975 if (!ptid_equal (last_ptid, pid))
5976 return 0;
5977
5978 *child_pid = last.value.related_pid;
5979 return 1;
5980 }
5981
5982 int
5983 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
5984 {
5985 struct target_waitstatus last;
5986 ptid_t last_ptid;
5987
5988 get_last_target_status (&last_ptid, &last);
5989
5990 if (last.kind != TARGET_WAITKIND_VFORKED)
5991 return 0;
5992
5993 if (!ptid_equal (last_ptid, pid))
5994 return 0;
5995
5996 *child_pid = last.value.related_pid;
5997 return 1;
5998 }
5999
6000 int
6001 inferior_has_execd (ptid_t pid, char **execd_pathname)
6002 {
6003 struct target_waitstatus last;
6004 ptid_t last_ptid;
6005
6006 get_last_target_status (&last_ptid, &last);
6007
6008 if (last.kind != TARGET_WAITKIND_EXECD)
6009 return 0;
6010
6011 if (!ptid_equal (last_ptid, pid))
6012 return 0;
6013
6014 *execd_pathname = xstrdup (last.value.execd_pathname);
6015 return 1;
6016 }
6017
6018 int
6019 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6020 {
6021 struct target_waitstatus last;
6022 ptid_t last_ptid;
6023
6024 get_last_target_status (&last_ptid, &last);
6025
6026 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6027 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6028 return 0;
6029
6030 if (!ptid_equal (last_ptid, pid))
6031 return 0;
6032
6033 *syscall_number = last.value.syscall_number;
6034 return 1;
6035 }
6036
6037 /* Oft used ptids */
6038 ptid_t null_ptid;
6039 ptid_t minus_one_ptid;
6040
6041 /* Create a ptid given the necessary PID, LWP, and TID components. */
6042
6043 ptid_t
6044 ptid_build (int pid, long lwp, long tid)
6045 {
6046 ptid_t ptid;
6047
6048 ptid.pid = pid;
6049 ptid.lwp = lwp;
6050 ptid.tid = tid;
6051 return ptid;
6052 }
6053
6054 /* Create a ptid from just a pid. */
6055
6056 ptid_t
6057 pid_to_ptid (int pid)
6058 {
6059 return ptid_build (pid, 0, 0);
6060 }
6061
6062 /* Fetch the pid (process id) component from a ptid. */
6063
6064 int
6065 ptid_get_pid (ptid_t ptid)
6066 {
6067 return ptid.pid;
6068 }
6069
6070 /* Fetch the lwp (lightweight process) component from a ptid. */
6071
6072 long
6073 ptid_get_lwp (ptid_t ptid)
6074 {
6075 return ptid.lwp;
6076 }
6077
6078 /* Fetch the tid (thread id) component from a ptid. */
6079
6080 long
6081 ptid_get_tid (ptid_t ptid)
6082 {
6083 return ptid.tid;
6084 }
6085
6086 /* ptid_equal() is used to test equality of two ptids. */
6087
6088 int
6089 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6090 {
6091 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6092 && ptid1.tid == ptid2.tid);
6093 }
6094
6095 /* Returns true if PTID represents a process. */
6096
6097 int
6098 ptid_is_pid (ptid_t ptid)
6099 {
6100 if (ptid_equal (minus_one_ptid, ptid))
6101 return 0;
6102 if (ptid_equal (null_ptid, ptid))
6103 return 0;
6104
6105 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6106 }
6107
6108 /* restore_inferior_ptid() will be used by the cleanup machinery
6109 to restore the inferior_ptid value saved in a call to
6110 save_inferior_ptid(). */
6111
6112 static void
6113 restore_inferior_ptid (void *arg)
6114 {
6115 ptid_t *saved_ptid_ptr = arg;
6116 inferior_ptid = *saved_ptid_ptr;
6117 xfree (arg);
6118 }
6119
6120 /* Save the value of inferior_ptid so that it may be restored by a
6121 later call to do_cleanups(). Returns the struct cleanup pointer
6122 needed for later doing the cleanup. */
6123
6124 struct cleanup *
6125 save_inferior_ptid (void)
6126 {
6127 ptid_t *saved_ptid_ptr;
6128
6129 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6130 *saved_ptid_ptr = inferior_ptid;
6131 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6132 }
6133 \f
6134
6135 /* User interface for reverse debugging:
6136 Set exec-direction / show exec-direction commands
6137 (returns error unless target implements to_set_exec_direction method). */
6138
6139 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6140 static const char exec_forward[] = "forward";
6141 static const char exec_reverse[] = "reverse";
6142 static const char *exec_direction = exec_forward;
6143 static const char *exec_direction_names[] = {
6144 exec_forward,
6145 exec_reverse,
6146 NULL
6147 };
6148
6149 static void
6150 set_exec_direction_func (char *args, int from_tty,
6151 struct cmd_list_element *cmd)
6152 {
6153 if (target_can_execute_reverse)
6154 {
6155 if (!strcmp (exec_direction, exec_forward))
6156 execution_direction = EXEC_FORWARD;
6157 else if (!strcmp (exec_direction, exec_reverse))
6158 execution_direction = EXEC_REVERSE;
6159 }
6160 }
6161
6162 static void
6163 show_exec_direction_func (struct ui_file *out, int from_tty,
6164 struct cmd_list_element *cmd, const char *value)
6165 {
6166 switch (execution_direction) {
6167 case EXEC_FORWARD:
6168 fprintf_filtered (out, _("Forward.\n"));
6169 break;
6170 case EXEC_REVERSE:
6171 fprintf_filtered (out, _("Reverse.\n"));
6172 break;
6173 case EXEC_ERROR:
6174 default:
6175 fprintf_filtered (out,
6176 _("Forward (target `%s' does not support exec-direction).\n"),
6177 target_shortname);
6178 break;
6179 }
6180 }
6181
6182 /* User interface for non-stop mode. */
6183
6184 int non_stop = 0;
6185 static int non_stop_1 = 0;
6186
6187 static void
6188 set_non_stop (char *args, int from_tty,
6189 struct cmd_list_element *c)
6190 {
6191 if (target_has_execution)
6192 {
6193 non_stop_1 = non_stop;
6194 error (_("Cannot change this setting while the inferior is running."));
6195 }
6196
6197 non_stop = non_stop_1;
6198 }
6199
6200 static void
6201 show_non_stop (struct ui_file *file, int from_tty,
6202 struct cmd_list_element *c, const char *value)
6203 {
6204 fprintf_filtered (file,
6205 _("Controlling the inferior in non-stop mode is %s.\n"),
6206 value);
6207 }
6208
6209 static void
6210 show_schedule_multiple (struct ui_file *file, int from_tty,
6211 struct cmd_list_element *c, const char *value)
6212 {
6213 fprintf_filtered (file, _("\
6214 Resuming the execution of threads of all processes is %s.\n"), value);
6215 }
6216
6217 void
6218 _initialize_infrun (void)
6219 {
6220 int i;
6221 int numsigs;
6222 struct cmd_list_element *c;
6223
6224 add_info ("signals", signals_info, _("\
6225 What debugger does when program gets various signals.\n\
6226 Specify a signal as argument to print info on that signal only."));
6227 add_info_alias ("handle", "signals", 0);
6228
6229 add_com ("handle", class_run, handle_command, _("\
6230 Specify how to handle a signal.\n\
6231 Args are signals and actions to apply to those signals.\n\
6232 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6233 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6234 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6235 The special arg \"all\" is recognized to mean all signals except those\n\
6236 used by the debugger, typically SIGTRAP and SIGINT.\n\
6237 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6238 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6239 Stop means reenter debugger if this signal happens (implies print).\n\
6240 Print means print a message if this signal happens.\n\
6241 Pass means let program see this signal; otherwise program doesn't know.\n\
6242 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6243 Pass and Stop may be combined."));
6244 if (xdb_commands)
6245 {
6246 add_com ("lz", class_info, signals_info, _("\
6247 What debugger does when program gets various signals.\n\
6248 Specify a signal as argument to print info on that signal only."));
6249 add_com ("z", class_run, xdb_handle_command, _("\
6250 Specify how to handle a signal.\n\
6251 Args are signals and actions to apply to those signals.\n\
6252 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6253 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6254 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6255 The special arg \"all\" is recognized to mean all signals except those\n\
6256 used by the debugger, typically SIGTRAP and SIGINT.\n\
6257 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6258 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6259 nopass), \"Q\" (noprint)\n\
6260 Stop means reenter debugger if this signal happens (implies print).\n\
6261 Print means print a message if this signal happens.\n\
6262 Pass means let program see this signal; otherwise program doesn't know.\n\
6263 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6264 Pass and Stop may be combined."));
6265 }
6266
6267 if (!dbx_commands)
6268 stop_command = add_cmd ("stop", class_obscure,
6269 not_just_help_class_command, _("\
6270 There is no `stop' command, but you can set a hook on `stop'.\n\
6271 This allows you to set a list of commands to be run each time execution\n\
6272 of the program stops."), &cmdlist);
6273
6274 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6275 Set inferior debugging."), _("\
6276 Show inferior debugging."), _("\
6277 When non-zero, inferior specific debugging is enabled."),
6278 NULL,
6279 show_debug_infrun,
6280 &setdebuglist, &showdebuglist);
6281
6282 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6283 Set displaced stepping debugging."), _("\
6284 Show displaced stepping debugging."), _("\
6285 When non-zero, displaced stepping specific debugging is enabled."),
6286 NULL,
6287 show_debug_displaced,
6288 &setdebuglist, &showdebuglist);
6289
6290 add_setshow_boolean_cmd ("non-stop", no_class,
6291 &non_stop_1, _("\
6292 Set whether gdb controls the inferior in non-stop mode."), _("\
6293 Show whether gdb controls the inferior in non-stop mode."), _("\
6294 When debugging a multi-threaded program and this setting is\n\
6295 off (the default, also called all-stop mode), when one thread stops\n\
6296 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6297 all other threads in the program while you interact with the thread of\n\
6298 interest. When you continue or step a thread, you can allow the other\n\
6299 threads to run, or have them remain stopped, but while you inspect any\n\
6300 thread's state, all threads stop.\n\
6301 \n\
6302 In non-stop mode, when one thread stops, other threads can continue\n\
6303 to run freely. You'll be able to step each thread independently,\n\
6304 leave it stopped or free to run as needed."),
6305 set_non_stop,
6306 show_non_stop,
6307 &setlist,
6308 &showlist);
6309
6310 numsigs = (int) TARGET_SIGNAL_LAST;
6311 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6312 signal_print = (unsigned char *)
6313 xmalloc (sizeof (signal_print[0]) * numsigs);
6314 signal_program = (unsigned char *)
6315 xmalloc (sizeof (signal_program[0]) * numsigs);
6316 for (i = 0; i < numsigs; i++)
6317 {
6318 signal_stop[i] = 1;
6319 signal_print[i] = 1;
6320 signal_program[i] = 1;
6321 }
6322
6323 /* Signals caused by debugger's own actions
6324 should not be given to the program afterwards. */
6325 signal_program[TARGET_SIGNAL_TRAP] = 0;
6326 signal_program[TARGET_SIGNAL_INT] = 0;
6327
6328 /* Signals that are not errors should not normally enter the debugger. */
6329 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6330 signal_print[TARGET_SIGNAL_ALRM] = 0;
6331 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6332 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6333 signal_stop[TARGET_SIGNAL_PROF] = 0;
6334 signal_print[TARGET_SIGNAL_PROF] = 0;
6335 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6336 signal_print[TARGET_SIGNAL_CHLD] = 0;
6337 signal_stop[TARGET_SIGNAL_IO] = 0;
6338 signal_print[TARGET_SIGNAL_IO] = 0;
6339 signal_stop[TARGET_SIGNAL_POLL] = 0;
6340 signal_print[TARGET_SIGNAL_POLL] = 0;
6341 signal_stop[TARGET_SIGNAL_URG] = 0;
6342 signal_print[TARGET_SIGNAL_URG] = 0;
6343 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6344 signal_print[TARGET_SIGNAL_WINCH] = 0;
6345
6346 /* These signals are used internally by user-level thread
6347 implementations. (See signal(5) on Solaris.) Like the above
6348 signals, a healthy program receives and handles them as part of
6349 its normal operation. */
6350 signal_stop[TARGET_SIGNAL_LWP] = 0;
6351 signal_print[TARGET_SIGNAL_LWP] = 0;
6352 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6353 signal_print[TARGET_SIGNAL_WAITING] = 0;
6354 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6355 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6356
6357 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6358 &stop_on_solib_events, _("\
6359 Set stopping for shared library events."), _("\
6360 Show stopping for shared library events."), _("\
6361 If nonzero, gdb will give control to the user when the dynamic linker\n\
6362 notifies gdb of shared library events. The most common event of interest\n\
6363 to the user would be loading/unloading of a new library."),
6364 NULL,
6365 show_stop_on_solib_events,
6366 &setlist, &showlist);
6367
6368 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6369 follow_fork_mode_kind_names,
6370 &follow_fork_mode_string, _("\
6371 Set debugger response to a program call of fork or vfork."), _("\
6372 Show debugger response to a program call of fork or vfork."), _("\
6373 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6374 parent - the original process is debugged after a fork\n\
6375 child - the new process is debugged after a fork\n\
6376 The unfollowed process will continue to run.\n\
6377 By default, the debugger will follow the parent process."),
6378 NULL,
6379 show_follow_fork_mode_string,
6380 &setlist, &showlist);
6381
6382 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6383 follow_exec_mode_names,
6384 &follow_exec_mode_string, _("\
6385 Set debugger response to a program call of exec."), _("\
6386 Show debugger response to a program call of exec."), _("\
6387 An exec call replaces the program image of a process.\n\
6388 \n\
6389 follow-exec-mode can be:\n\
6390 \n\
6391 new - the debugger creates a new inferior and rebinds the process \n\
6392 to this new inferior. The program the process was running before\n\
6393 the exec call can be restarted afterwards by restarting the original\n\
6394 inferior.\n\
6395 \n\
6396 same - the debugger keeps the process bound to the same inferior.\n\
6397 The new executable image replaces the previous executable loaded in\n\
6398 the inferior. Restarting the inferior after the exec call restarts\n\
6399 the executable the process was running after the exec call.\n\
6400 \n\
6401 By default, the debugger will use the same inferior."),
6402 NULL,
6403 show_follow_exec_mode_string,
6404 &setlist, &showlist);
6405
6406 add_setshow_enum_cmd ("scheduler-locking", class_run,
6407 scheduler_enums, &scheduler_mode, _("\
6408 Set mode for locking scheduler during execution."), _("\
6409 Show mode for locking scheduler during execution."), _("\
6410 off == no locking (threads may preempt at any time)\n\
6411 on == full locking (no thread except the current thread may run)\n\
6412 step == scheduler locked during every single-step operation.\n\
6413 In this mode, no other thread may run during a step command.\n\
6414 Other threads may run while stepping over a function call ('next')."),
6415 set_schedlock_func, /* traps on target vector */
6416 show_scheduler_mode,
6417 &setlist, &showlist);
6418
6419 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6420 Set mode for resuming threads of all processes."), _("\
6421 Show mode for resuming threads of all processes."), _("\
6422 When on, execution commands (such as 'continue' or 'next') resume all\n\
6423 threads of all processes. When off (which is the default), execution\n\
6424 commands only resume the threads of the current process. The set of\n\
6425 threads that are resumed is further refined by the scheduler-locking\n\
6426 mode (see help set scheduler-locking)."),
6427 NULL,
6428 show_schedule_multiple,
6429 &setlist, &showlist);
6430
6431 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6432 Set mode of the step operation."), _("\
6433 Show mode of the step operation."), _("\
6434 When set, doing a step over a function without debug line information\n\
6435 will stop at the first instruction of that function. Otherwise, the\n\
6436 function is skipped and the step command stops at a different source line."),
6437 NULL,
6438 show_step_stop_if_no_debug,
6439 &setlist, &showlist);
6440
6441 add_setshow_enum_cmd ("displaced-stepping", class_run,
6442 can_use_displaced_stepping_enum,
6443 &can_use_displaced_stepping, _("\
6444 Set debugger's willingness to use displaced stepping."), _("\
6445 Show debugger's willingness to use displaced stepping."), _("\
6446 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6447 supported by the target architecture. If off, gdb will not use displaced\n\
6448 stepping to step over breakpoints, even if such is supported by the target\n\
6449 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6450 if the target architecture supports it and non-stop mode is active, but will not\n\
6451 use it in all-stop mode (see help set non-stop)."),
6452 NULL,
6453 show_can_use_displaced_stepping,
6454 &setlist, &showlist);
6455
6456 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6457 &exec_direction, _("Set direction of execution.\n\
6458 Options are 'forward' or 'reverse'."),
6459 _("Show direction of execution (forward/reverse)."),
6460 _("Tells gdb whether to execute forward or backward."),
6461 set_exec_direction_func, show_exec_direction_func,
6462 &setlist, &showlist);
6463
6464 /* Set/show detach-on-fork: user-settable mode. */
6465
6466 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6467 Set whether gdb will detach the child of a fork."), _("\
6468 Show whether gdb will detach the child of a fork."), _("\
6469 Tells gdb whether to detach the child of a fork."),
6470 NULL, NULL, &setlist, &showlist);
6471
6472 /* ptid initializations */
6473 null_ptid = ptid_build (0, 0, 0);
6474 minus_one_ptid = ptid_build (-1, 0, 0);
6475 inferior_ptid = null_ptid;
6476 target_last_wait_ptid = minus_one_ptid;
6477 displaced_step_ptid = null_ptid;
6478
6479 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6480 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6481 observer_attach_thread_exit (infrun_thread_thread_exit);
6482
6483 /* Explicitly create without lookup, since that tries to create a
6484 value with a void typed value, and when we get here, gdbarch
6485 isn't initialized yet. At this point, we're quite sure there
6486 isn't another convenience variable of the same name. */
6487 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6488 }