* infrun.c (prepare_to_proceed): Handle other signals which might
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54
55 /* Prototypes for local functions */
56
57 static void signals_info (char *, int);
58
59 static void handle_command (char *, int);
60
61 static void sig_print_info (enum target_signal);
62
63 static void sig_print_header (void);
64
65 static void resume_cleanups (void *);
66
67 static int hook_stop_stub (void *);
68
69 static int restore_selected_frame (void *);
70
71 static int follow_fork (void);
72
73 static void set_schedlock_func (char *args, int from_tty,
74 struct cmd_list_element *c);
75
76 static int currently_stepping (struct thread_info *tp);
77
78 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
79 void *data);
80
81 static void xdb_handle_command (char *args, int from_tty);
82
83 static int prepare_to_proceed (int);
84
85 void _initialize_infrun (void);
86
87 void nullify_last_target_wait_ptid (void);
88
89 /* When set, stop the 'step' command if we enter a function which has
90 no line number information. The normal behavior is that we step
91 over such function. */
92 int step_stop_if_no_debug = 0;
93 static void
94 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
95 struct cmd_list_element *c, const char *value)
96 {
97 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
98 }
99
100 /* In asynchronous mode, but simulating synchronous execution. */
101
102 int sync_execution = 0;
103
104 /* wait_for_inferior and normal_stop use this to notify the user
105 when the inferior stopped in a different thread than it had been
106 running in. */
107
108 static ptid_t previous_inferior_ptid;
109
110 /* Default behavior is to detach newly forked processes (legacy). */
111 int detach_fork = 1;
112
113 int debug_displaced = 0;
114 static void
115 show_debug_displaced (struct ui_file *file, int from_tty,
116 struct cmd_list_element *c, const char *value)
117 {
118 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
119 }
120
121 static int debug_infrun = 0;
122 static void
123 show_debug_infrun (struct ui_file *file, int from_tty,
124 struct cmd_list_element *c, const char *value)
125 {
126 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
127 }
128
129 /* If the program uses ELF-style shared libraries, then calls to
130 functions in shared libraries go through stubs, which live in a
131 table called the PLT (Procedure Linkage Table). The first time the
132 function is called, the stub sends control to the dynamic linker,
133 which looks up the function's real address, patches the stub so
134 that future calls will go directly to the function, and then passes
135 control to the function.
136
137 If we are stepping at the source level, we don't want to see any of
138 this --- we just want to skip over the stub and the dynamic linker.
139 The simple approach is to single-step until control leaves the
140 dynamic linker.
141
142 However, on some systems (e.g., Red Hat's 5.2 distribution) the
143 dynamic linker calls functions in the shared C library, so you
144 can't tell from the PC alone whether the dynamic linker is still
145 running. In this case, we use a step-resume breakpoint to get us
146 past the dynamic linker, as if we were using "next" to step over a
147 function call.
148
149 in_solib_dynsym_resolve_code() says whether we're in the dynamic
150 linker code or not. Normally, this means we single-step. However,
151 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
152 address where we can place a step-resume breakpoint to get past the
153 linker's symbol resolution function.
154
155 in_solib_dynsym_resolve_code() can generally be implemented in a
156 pretty portable way, by comparing the PC against the address ranges
157 of the dynamic linker's sections.
158
159 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
160 it depends on internal details of the dynamic linker. It's usually
161 not too hard to figure out where to put a breakpoint, but it
162 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
163 sanity checking. If it can't figure things out, returning zero and
164 getting the (possibly confusing) stepping behavior is better than
165 signalling an error, which will obscure the change in the
166 inferior's state. */
167
168 /* This function returns TRUE if pc is the address of an instruction
169 that lies within the dynamic linker (such as the event hook, or the
170 dld itself).
171
172 This function must be used only when a dynamic linker event has
173 been caught, and the inferior is being stepped out of the hook, or
174 undefined results are guaranteed. */
175
176 #ifndef SOLIB_IN_DYNAMIC_LINKER
177 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
178 #endif
179
180
181 /* Convert the #defines into values. This is temporary until wfi control
182 flow is completely sorted out. */
183
184 #ifndef CANNOT_STEP_HW_WATCHPOINTS
185 #define CANNOT_STEP_HW_WATCHPOINTS 0
186 #else
187 #undef CANNOT_STEP_HW_WATCHPOINTS
188 #define CANNOT_STEP_HW_WATCHPOINTS 1
189 #endif
190
191 /* Tables of how to react to signals; the user sets them. */
192
193 static unsigned char *signal_stop;
194 static unsigned char *signal_print;
195 static unsigned char *signal_program;
196
197 #define SET_SIGS(nsigs,sigs,flags) \
198 do { \
199 int signum = (nsigs); \
200 while (signum-- > 0) \
201 if ((sigs)[signum]) \
202 (flags)[signum] = 1; \
203 } while (0)
204
205 #define UNSET_SIGS(nsigs,sigs,flags) \
206 do { \
207 int signum = (nsigs); \
208 while (signum-- > 0) \
209 if ((sigs)[signum]) \
210 (flags)[signum] = 0; \
211 } while (0)
212
213 /* Value to pass to target_resume() to cause all threads to resume */
214
215 #define RESUME_ALL minus_one_ptid
216
217 /* Command list pointer for the "stop" placeholder. */
218
219 static struct cmd_list_element *stop_command;
220
221 /* Function inferior was in as of last step command. */
222
223 static struct symbol *step_start_function;
224
225 /* Nonzero if we want to give control to the user when we're notified
226 of shared library events by the dynamic linker. */
227 static int stop_on_solib_events;
228 static void
229 show_stop_on_solib_events (struct ui_file *file, int from_tty,
230 struct cmd_list_element *c, const char *value)
231 {
232 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
233 value);
234 }
235
236 /* Nonzero means expecting a trace trap
237 and should stop the inferior and return silently when it happens. */
238
239 int stop_after_trap;
240
241 /* Save register contents here when executing a "finish" command or are
242 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
243 Thus this contains the return value from the called function (assuming
244 values are returned in a register). */
245
246 struct regcache *stop_registers;
247
248 /* Nonzero after stop if current stack frame should be printed. */
249
250 static int stop_print_frame;
251
252 /* This is a cached copy of the pid/waitstatus of the last event
253 returned by target_wait()/deprecated_target_wait_hook(). This
254 information is returned by get_last_target_status(). */
255 static ptid_t target_last_wait_ptid;
256 static struct target_waitstatus target_last_waitstatus;
257
258 static void context_switch (ptid_t ptid);
259
260 void init_thread_stepping_state (struct thread_info *tss);
261
262 void init_infwait_state (void);
263
264 static const char follow_fork_mode_child[] = "child";
265 static const char follow_fork_mode_parent[] = "parent";
266
267 static const char *follow_fork_mode_kind_names[] = {
268 follow_fork_mode_child,
269 follow_fork_mode_parent,
270 NULL
271 };
272
273 static const char *follow_fork_mode_string = follow_fork_mode_parent;
274 static void
275 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
276 struct cmd_list_element *c, const char *value)
277 {
278 fprintf_filtered (file, _("\
279 Debugger response to a program call of fork or vfork is \"%s\".\n"),
280 value);
281 }
282 \f
283
284 /* Tell the target to follow the fork we're stopped at. Returns true
285 if the inferior should be resumed; false, if the target for some
286 reason decided it's best not to resume. */
287
288 static int
289 follow_fork (void)
290 {
291 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
292 int should_resume = 1;
293 struct thread_info *tp;
294
295 /* Copy user stepping state to the new inferior thread. FIXME: the
296 followed fork child thread should have a copy of most of the
297 parent thread structure's run control related fields, not just these.
298 Initialized to avoid "may be used uninitialized" warnings from gcc. */
299 struct breakpoint *step_resume_breakpoint = NULL;
300 CORE_ADDR step_range_start = 0;
301 CORE_ADDR step_range_end = 0;
302 struct frame_id step_frame_id = { 0 };
303
304 if (!non_stop)
305 {
306 ptid_t wait_ptid;
307 struct target_waitstatus wait_status;
308
309 /* Get the last target status returned by target_wait(). */
310 get_last_target_status (&wait_ptid, &wait_status);
311
312 /* If not stopped at a fork event, then there's nothing else to
313 do. */
314 if (wait_status.kind != TARGET_WAITKIND_FORKED
315 && wait_status.kind != TARGET_WAITKIND_VFORKED)
316 return 1;
317
318 /* Check if we switched over from WAIT_PTID, since the event was
319 reported. */
320 if (!ptid_equal (wait_ptid, minus_one_ptid)
321 && !ptid_equal (inferior_ptid, wait_ptid))
322 {
323 /* We did. Switch back to WAIT_PTID thread, to tell the
324 target to follow it (in either direction). We'll
325 afterwards refuse to resume, and inform the user what
326 happened. */
327 switch_to_thread (wait_ptid);
328 should_resume = 0;
329 }
330 }
331
332 tp = inferior_thread ();
333
334 /* If there were any forks/vforks that were caught and are now to be
335 followed, then do so now. */
336 switch (tp->pending_follow.kind)
337 {
338 case TARGET_WAITKIND_FORKED:
339 case TARGET_WAITKIND_VFORKED:
340 {
341 ptid_t parent, child;
342
343 /* If the user did a next/step, etc, over a fork call,
344 preserve the stepping state in the fork child. */
345 if (follow_child && should_resume)
346 {
347 step_resume_breakpoint
348 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
349 step_range_start = tp->step_range_start;
350 step_range_end = tp->step_range_end;
351 step_frame_id = tp->step_frame_id;
352
353 /* For now, delete the parent's sr breakpoint, otherwise,
354 parent/child sr breakpoints are considered duplicates,
355 and the child version will not be installed. Remove
356 this when the breakpoints module becomes aware of
357 inferiors and address spaces. */
358 delete_step_resume_breakpoint (tp);
359 tp->step_range_start = 0;
360 tp->step_range_end = 0;
361 tp->step_frame_id = null_frame_id;
362 }
363
364 parent = inferior_ptid;
365 child = tp->pending_follow.value.related_pid;
366
367 /* Tell the target to do whatever is necessary to follow
368 either parent or child. */
369 if (target_follow_fork (follow_child))
370 {
371 /* Target refused to follow, or there's some other reason
372 we shouldn't resume. */
373 should_resume = 0;
374 }
375 else
376 {
377 /* This pending follow fork event is now handled, one way
378 or another. The previous selected thread may be gone
379 from the lists by now, but if it is still around, need
380 to clear the pending follow request. */
381 tp = find_thread_ptid (parent);
382 if (tp)
383 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
384
385 /* This makes sure we don't try to apply the "Switched
386 over from WAIT_PID" logic above. */
387 nullify_last_target_wait_ptid ();
388
389 /* If we followed the child, switch to it... */
390 if (follow_child)
391 {
392 switch_to_thread (child);
393
394 /* ... and preserve the stepping state, in case the
395 user was stepping over the fork call. */
396 if (should_resume)
397 {
398 tp = inferior_thread ();
399 tp->step_resume_breakpoint = step_resume_breakpoint;
400 tp->step_range_start = step_range_start;
401 tp->step_range_end = step_range_end;
402 tp->step_frame_id = step_frame_id;
403 }
404 else
405 {
406 /* If we get here, it was because we're trying to
407 resume from a fork catchpoint, but, the user
408 has switched threads away from the thread that
409 forked. In that case, the resume command
410 issued is most likely not applicable to the
411 child, so just warn, and refuse to resume. */
412 warning (_("\
413 Not resuming: switched threads before following fork child.\n"));
414 }
415
416 /* Reset breakpoints in the child as appropriate. */
417 follow_inferior_reset_breakpoints ();
418 }
419 else
420 switch_to_thread (parent);
421 }
422 }
423 break;
424 case TARGET_WAITKIND_SPURIOUS:
425 /* Nothing to follow. */
426 break;
427 default:
428 internal_error (__FILE__, __LINE__,
429 "Unexpected pending_follow.kind %d\n",
430 tp->pending_follow.kind);
431 break;
432 }
433
434 return should_resume;
435 }
436
437 void
438 follow_inferior_reset_breakpoints (void)
439 {
440 struct thread_info *tp = inferior_thread ();
441
442 /* Was there a step_resume breakpoint? (There was if the user
443 did a "next" at the fork() call.) If so, explicitly reset its
444 thread number.
445
446 step_resumes are a form of bp that are made to be per-thread.
447 Since we created the step_resume bp when the parent process
448 was being debugged, and now are switching to the child process,
449 from the breakpoint package's viewpoint, that's a switch of
450 "threads". We must update the bp's notion of which thread
451 it is for, or it'll be ignored when it triggers. */
452
453 if (tp->step_resume_breakpoint)
454 breakpoint_re_set_thread (tp->step_resume_breakpoint);
455
456 /* Reinsert all breakpoints in the child. The user may have set
457 breakpoints after catching the fork, in which case those
458 were never set in the child, but only in the parent. This makes
459 sure the inserted breakpoints match the breakpoint list. */
460
461 breakpoint_re_set ();
462 insert_breakpoints ();
463 }
464
465 /* The child has exited or execed: resume threads of the parent the
466 user wanted to be executing. */
467
468 static int
469 proceed_after_vfork_done (struct thread_info *thread,
470 void *arg)
471 {
472 int pid = * (int *) arg;
473
474 if (ptid_get_pid (thread->ptid) == pid
475 && is_running (thread->ptid)
476 && !is_executing (thread->ptid)
477 && !thread->stop_requested
478 && thread->stop_signal == TARGET_SIGNAL_0)
479 {
480 if (debug_infrun)
481 fprintf_unfiltered (gdb_stdlog,
482 "infrun: resuming vfork parent thread %s\n",
483 target_pid_to_str (thread->ptid));
484
485 switch_to_thread (thread->ptid);
486 clear_proceed_status ();
487 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
488 }
489
490 return 0;
491 }
492
493 /* Called whenever we notice an exec or exit event, to handle
494 detaching or resuming a vfork parent. */
495
496 static void
497 handle_vfork_child_exec_or_exit (int exec)
498 {
499 struct inferior *inf = current_inferior ();
500
501 if (inf->vfork_parent)
502 {
503 int resume_parent = -1;
504
505 /* This exec or exit marks the end of the shared memory region
506 between the parent and the child. If the user wanted to
507 detach from the parent, now is the time. */
508
509 if (inf->vfork_parent->pending_detach)
510 {
511 struct thread_info *tp;
512 struct cleanup *old_chain;
513 struct program_space *pspace;
514 struct address_space *aspace;
515
516 /* follow-fork child, detach-on-fork on */
517
518 old_chain = make_cleanup_restore_current_thread ();
519
520 /* We're letting loose of the parent. */
521 tp = any_live_thread_of_process (inf->vfork_parent->pid);
522 switch_to_thread (tp->ptid);
523
524 /* We're about to detach from the parent, which implicitly
525 removes breakpoints from its address space. There's a
526 catch here: we want to reuse the spaces for the child,
527 but, parent/child are still sharing the pspace at this
528 point, although the exec in reality makes the kernel give
529 the child a fresh set of new pages. The problem here is
530 that the breakpoints module being unaware of this, would
531 likely chose the child process to write to the parent
532 address space. Swapping the child temporarily away from
533 the spaces has the desired effect. Yes, this is "sort
534 of" a hack. */
535
536 pspace = inf->pspace;
537 aspace = inf->aspace;
538 inf->aspace = NULL;
539 inf->pspace = NULL;
540
541 if (debug_infrun || info_verbose)
542 {
543 target_terminal_ours ();
544
545 if (exec)
546 fprintf_filtered (gdb_stdlog,
547 "Detaching vfork parent process %d after child exec.\n",
548 inf->vfork_parent->pid);
549 else
550 fprintf_filtered (gdb_stdlog,
551 "Detaching vfork parent process %d after child exit.\n",
552 inf->vfork_parent->pid);
553 }
554
555 target_detach (NULL, 0);
556
557 /* Put it back. */
558 inf->pspace = pspace;
559 inf->aspace = aspace;
560
561 do_cleanups (old_chain);
562 }
563 else if (exec)
564 {
565 /* We're staying attached to the parent, so, really give the
566 child a new address space. */
567 inf->pspace = add_program_space (maybe_new_address_space ());
568 inf->aspace = inf->pspace->aspace;
569 inf->removable = 1;
570 set_current_program_space (inf->pspace);
571
572 resume_parent = inf->vfork_parent->pid;
573
574 /* Break the bonds. */
575 inf->vfork_parent->vfork_child = NULL;
576 }
577 else
578 {
579 struct cleanup *old_chain;
580 struct program_space *pspace;
581
582 /* If this is a vfork child exiting, then the pspace and
583 aspaces were shared with the parent. Since we're
584 reporting the process exit, we'll be mourning all that is
585 found in the address space, and switching to null_ptid,
586 preparing to start a new inferior. But, since we don't
587 want to clobber the parent's address/program spaces, we
588 go ahead and create a new one for this exiting
589 inferior. */
590
591 /* Switch to null_ptid, so that clone_program_space doesn't want
592 to read the selected frame of a dead process. */
593 old_chain = save_inferior_ptid ();
594 inferior_ptid = null_ptid;
595
596 /* This inferior is dead, so avoid giving the breakpoints
597 module the option to write through to it (cloning a
598 program space resets breakpoints). */
599 inf->aspace = NULL;
600 inf->pspace = NULL;
601 pspace = add_program_space (maybe_new_address_space ());
602 set_current_program_space (pspace);
603 inf->removable = 1;
604 clone_program_space (pspace, inf->vfork_parent->pspace);
605 inf->pspace = pspace;
606 inf->aspace = pspace->aspace;
607
608 /* Put back inferior_ptid. We'll continue mourning this
609 inferior. */
610 do_cleanups (old_chain);
611
612 resume_parent = inf->vfork_parent->pid;
613 /* Break the bonds. */
614 inf->vfork_parent->vfork_child = NULL;
615 }
616
617 inf->vfork_parent = NULL;
618
619 gdb_assert (current_program_space == inf->pspace);
620
621 if (non_stop && resume_parent != -1)
622 {
623 /* If the user wanted the parent to be running, let it go
624 free now. */
625 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
626
627 if (debug_infrun)
628 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
629 resume_parent);
630
631 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
632
633 do_cleanups (old_chain);
634 }
635 }
636 }
637
638 /* Enum strings for "set|show displaced-stepping". */
639
640 static const char follow_exec_mode_new[] = "new";
641 static const char follow_exec_mode_same[] = "same";
642 static const char *follow_exec_mode_names[] =
643 {
644 follow_exec_mode_new,
645 follow_exec_mode_same,
646 NULL,
647 };
648
649 static const char *follow_exec_mode_string = follow_exec_mode_same;
650 static void
651 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
652 struct cmd_list_element *c, const char *value)
653 {
654 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
655 }
656
657 /* EXECD_PATHNAME is assumed to be non-NULL. */
658
659 static void
660 follow_exec (ptid_t pid, char *execd_pathname)
661 {
662 struct target_ops *tgt;
663 struct thread_info *th = inferior_thread ();
664 struct inferior *inf = current_inferior ();
665
666 /* This is an exec event that we actually wish to pay attention to.
667 Refresh our symbol table to the newly exec'd program, remove any
668 momentary bp's, etc.
669
670 If there are breakpoints, they aren't really inserted now,
671 since the exec() transformed our inferior into a fresh set
672 of instructions.
673
674 We want to preserve symbolic breakpoints on the list, since
675 we have hopes that they can be reset after the new a.out's
676 symbol table is read.
677
678 However, any "raw" breakpoints must be removed from the list
679 (e.g., the solib bp's), since their address is probably invalid
680 now.
681
682 And, we DON'T want to call delete_breakpoints() here, since
683 that may write the bp's "shadow contents" (the instruction
684 value that was overwritten witha TRAP instruction). Since
685 we now have a new a.out, those shadow contents aren't valid. */
686
687 mark_breakpoints_out ();
688
689 update_breakpoints_after_exec ();
690
691 /* If there was one, it's gone now. We cannot truly step-to-next
692 statement through an exec(). */
693 th->step_resume_breakpoint = NULL;
694 th->step_range_start = 0;
695 th->step_range_end = 0;
696
697 /* The target reports the exec event to the main thread, even if
698 some other thread does the exec, and even if the main thread was
699 already stopped --- if debugging in non-stop mode, it's possible
700 the user had the main thread held stopped in the previous image
701 --- release it now. This is the same behavior as step-over-exec
702 with scheduler-locking on in all-stop mode. */
703 th->stop_requested = 0;
704
705 /* What is this a.out's name? */
706 printf_unfiltered (_("%s is executing new program: %s\n"),
707 target_pid_to_str (inferior_ptid),
708 execd_pathname);
709
710 /* We've followed the inferior through an exec. Therefore, the
711 inferior has essentially been killed & reborn. */
712
713 gdb_flush (gdb_stdout);
714
715 breakpoint_init_inferior (inf_execd);
716
717 if (gdb_sysroot && *gdb_sysroot)
718 {
719 char *name = alloca (strlen (gdb_sysroot)
720 + strlen (execd_pathname)
721 + 1);
722 strcpy (name, gdb_sysroot);
723 strcat (name, execd_pathname);
724 execd_pathname = name;
725 }
726
727 /* Reset the shared library package. This ensures that we get a
728 shlib event when the child reaches "_start", at which point the
729 dld will have had a chance to initialize the child. */
730 /* Also, loading a symbol file below may trigger symbol lookups, and
731 we don't want those to be satisfied by the libraries of the
732 previous incarnation of this process. */
733 no_shared_libraries (NULL, 0);
734
735 if (follow_exec_mode_string == follow_exec_mode_new)
736 {
737 struct program_space *pspace;
738 struct inferior *new_inf;
739
740 /* The user wants to keep the old inferior and program spaces
741 around. Create a new fresh one, and switch to it. */
742
743 inf = add_inferior (current_inferior ()->pid);
744 pspace = add_program_space (maybe_new_address_space ());
745 inf->pspace = pspace;
746 inf->aspace = pspace->aspace;
747
748 exit_inferior_num_silent (current_inferior ()->num);
749
750 set_current_inferior (inf);
751 set_current_program_space (pspace);
752 }
753
754 gdb_assert (current_program_space == inf->pspace);
755
756 /* That a.out is now the one to use. */
757 exec_file_attach (execd_pathname, 0);
758
759 /* Load the main file's symbols. */
760 symbol_file_add_main (execd_pathname, 0);
761
762 #ifdef SOLIB_CREATE_INFERIOR_HOOK
763 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
764 #else
765 solib_create_inferior_hook (0);
766 #endif
767
768 jit_inferior_created_hook ();
769
770 /* Reinsert all breakpoints. (Those which were symbolic have
771 been reset to the proper address in the new a.out, thanks
772 to symbol_file_command...) */
773 insert_breakpoints ();
774
775 /* The next resume of this inferior should bring it to the shlib
776 startup breakpoints. (If the user had also set bp's on
777 "main" from the old (parent) process, then they'll auto-
778 matically get reset there in the new process.) */
779 }
780
781 /* Non-zero if we just simulating a single-step. This is needed
782 because we cannot remove the breakpoints in the inferior process
783 until after the `wait' in `wait_for_inferior'. */
784 static int singlestep_breakpoints_inserted_p = 0;
785
786 /* The thread we inserted single-step breakpoints for. */
787 static ptid_t singlestep_ptid;
788
789 /* PC when we started this single-step. */
790 static CORE_ADDR singlestep_pc;
791
792 /* If another thread hit the singlestep breakpoint, we save the original
793 thread here so that we can resume single-stepping it later. */
794 static ptid_t saved_singlestep_ptid;
795 static int stepping_past_singlestep_breakpoint;
796
797 /* If not equal to null_ptid, this means that after stepping over breakpoint
798 is finished, we need to switch to deferred_step_ptid, and step it.
799
800 The use case is when one thread has hit a breakpoint, and then the user
801 has switched to another thread and issued 'step'. We need to step over
802 breakpoint in the thread which hit the breakpoint, but then continue
803 stepping the thread user has selected. */
804 static ptid_t deferred_step_ptid;
805 \f
806 /* Displaced stepping. */
807
808 /* In non-stop debugging mode, we must take special care to manage
809 breakpoints properly; in particular, the traditional strategy for
810 stepping a thread past a breakpoint it has hit is unsuitable.
811 'Displaced stepping' is a tactic for stepping one thread past a
812 breakpoint it has hit while ensuring that other threads running
813 concurrently will hit the breakpoint as they should.
814
815 The traditional way to step a thread T off a breakpoint in a
816 multi-threaded program in all-stop mode is as follows:
817
818 a0) Initially, all threads are stopped, and breakpoints are not
819 inserted.
820 a1) We single-step T, leaving breakpoints uninserted.
821 a2) We insert breakpoints, and resume all threads.
822
823 In non-stop debugging, however, this strategy is unsuitable: we
824 don't want to have to stop all threads in the system in order to
825 continue or step T past a breakpoint. Instead, we use displaced
826 stepping:
827
828 n0) Initially, T is stopped, other threads are running, and
829 breakpoints are inserted.
830 n1) We copy the instruction "under" the breakpoint to a separate
831 location, outside the main code stream, making any adjustments
832 to the instruction, register, and memory state as directed by
833 T's architecture.
834 n2) We single-step T over the instruction at its new location.
835 n3) We adjust the resulting register and memory state as directed
836 by T's architecture. This includes resetting T's PC to point
837 back into the main instruction stream.
838 n4) We resume T.
839
840 This approach depends on the following gdbarch methods:
841
842 - gdbarch_max_insn_length and gdbarch_displaced_step_location
843 indicate where to copy the instruction, and how much space must
844 be reserved there. We use these in step n1.
845
846 - gdbarch_displaced_step_copy_insn copies a instruction to a new
847 address, and makes any necessary adjustments to the instruction,
848 register contents, and memory. We use this in step n1.
849
850 - gdbarch_displaced_step_fixup adjusts registers and memory after
851 we have successfuly single-stepped the instruction, to yield the
852 same effect the instruction would have had if we had executed it
853 at its original address. We use this in step n3.
854
855 - gdbarch_displaced_step_free_closure provides cleanup.
856
857 The gdbarch_displaced_step_copy_insn and
858 gdbarch_displaced_step_fixup functions must be written so that
859 copying an instruction with gdbarch_displaced_step_copy_insn,
860 single-stepping across the copied instruction, and then applying
861 gdbarch_displaced_insn_fixup should have the same effects on the
862 thread's memory and registers as stepping the instruction in place
863 would have. Exactly which responsibilities fall to the copy and
864 which fall to the fixup is up to the author of those functions.
865
866 See the comments in gdbarch.sh for details.
867
868 Note that displaced stepping and software single-step cannot
869 currently be used in combination, although with some care I think
870 they could be made to. Software single-step works by placing
871 breakpoints on all possible subsequent instructions; if the
872 displaced instruction is a PC-relative jump, those breakpoints
873 could fall in very strange places --- on pages that aren't
874 executable, or at addresses that are not proper instruction
875 boundaries. (We do generally let other threads run while we wait
876 to hit the software single-step breakpoint, and they might
877 encounter such a corrupted instruction.) One way to work around
878 this would be to have gdbarch_displaced_step_copy_insn fully
879 simulate the effect of PC-relative instructions (and return NULL)
880 on architectures that use software single-stepping.
881
882 In non-stop mode, we can have independent and simultaneous step
883 requests, so more than one thread may need to simultaneously step
884 over a breakpoint. The current implementation assumes there is
885 only one scratch space per process. In this case, we have to
886 serialize access to the scratch space. If thread A wants to step
887 over a breakpoint, but we are currently waiting for some other
888 thread to complete a displaced step, we leave thread A stopped and
889 place it in the displaced_step_request_queue. Whenever a displaced
890 step finishes, we pick the next thread in the queue and start a new
891 displaced step operation on it. See displaced_step_prepare and
892 displaced_step_fixup for details. */
893
894 /* If this is not null_ptid, this is the thread carrying out a
895 displaced single-step. This thread's state will require fixing up
896 once it has completed its step. */
897 static ptid_t displaced_step_ptid;
898
899 struct displaced_step_request
900 {
901 ptid_t ptid;
902 struct displaced_step_request *next;
903 };
904
905 /* A queue of pending displaced stepping requests. */
906 struct displaced_step_request *displaced_step_request_queue;
907
908 /* The architecture the thread had when we stepped it. */
909 static struct gdbarch *displaced_step_gdbarch;
910
911 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
912 for post-step cleanup. */
913 static struct displaced_step_closure *displaced_step_closure;
914
915 /* The address of the original instruction, and the copy we made. */
916 static CORE_ADDR displaced_step_original, displaced_step_copy;
917
918 /* Saved contents of copy area. */
919 static gdb_byte *displaced_step_saved_copy;
920
921 /* Enum strings for "set|show displaced-stepping". */
922
923 static const char can_use_displaced_stepping_auto[] = "auto";
924 static const char can_use_displaced_stepping_on[] = "on";
925 static const char can_use_displaced_stepping_off[] = "off";
926 static const char *can_use_displaced_stepping_enum[] =
927 {
928 can_use_displaced_stepping_auto,
929 can_use_displaced_stepping_on,
930 can_use_displaced_stepping_off,
931 NULL,
932 };
933
934 /* If ON, and the architecture supports it, GDB will use displaced
935 stepping to step over breakpoints. If OFF, or if the architecture
936 doesn't support it, GDB will instead use the traditional
937 hold-and-step approach. If AUTO (which is the default), GDB will
938 decide which technique to use to step over breakpoints depending on
939 which of all-stop or non-stop mode is active --- displaced stepping
940 in non-stop mode; hold-and-step in all-stop mode. */
941
942 static const char *can_use_displaced_stepping =
943 can_use_displaced_stepping_auto;
944
945 static void
946 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
947 struct cmd_list_element *c,
948 const char *value)
949 {
950 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
951 fprintf_filtered (file, _("\
952 Debugger's willingness to use displaced stepping to step over \
953 breakpoints is %s (currently %s).\n"),
954 value, non_stop ? "on" : "off");
955 else
956 fprintf_filtered (file, _("\
957 Debugger's willingness to use displaced stepping to step over \
958 breakpoints is %s.\n"), value);
959 }
960
961 /* Return non-zero if displaced stepping can/should be used to step
962 over breakpoints. */
963
964 static int
965 use_displaced_stepping (struct gdbarch *gdbarch)
966 {
967 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
968 && non_stop)
969 || can_use_displaced_stepping == can_use_displaced_stepping_on)
970 && gdbarch_displaced_step_copy_insn_p (gdbarch)
971 && !RECORD_IS_USED);
972 }
973
974 /* Clean out any stray displaced stepping state. */
975 static void
976 displaced_step_clear (void)
977 {
978 /* Indicate that there is no cleanup pending. */
979 displaced_step_ptid = null_ptid;
980
981 if (displaced_step_closure)
982 {
983 gdbarch_displaced_step_free_closure (displaced_step_gdbarch,
984 displaced_step_closure);
985 displaced_step_closure = NULL;
986 }
987 }
988
989 static void
990 displaced_step_clear_cleanup (void *ignore)
991 {
992 displaced_step_clear ();
993 }
994
995 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
996 void
997 displaced_step_dump_bytes (struct ui_file *file,
998 const gdb_byte *buf,
999 size_t len)
1000 {
1001 int i;
1002
1003 for (i = 0; i < len; i++)
1004 fprintf_unfiltered (file, "%02x ", buf[i]);
1005 fputs_unfiltered ("\n", file);
1006 }
1007
1008 /* Prepare to single-step, using displaced stepping.
1009
1010 Note that we cannot use displaced stepping when we have a signal to
1011 deliver. If we have a signal to deliver and an instruction to step
1012 over, then after the step, there will be no indication from the
1013 target whether the thread entered a signal handler or ignored the
1014 signal and stepped over the instruction successfully --- both cases
1015 result in a simple SIGTRAP. In the first case we mustn't do a
1016 fixup, and in the second case we must --- but we can't tell which.
1017 Comments in the code for 'random signals' in handle_inferior_event
1018 explain how we handle this case instead.
1019
1020 Returns 1 if preparing was successful -- this thread is going to be
1021 stepped now; or 0 if displaced stepping this thread got queued. */
1022 static int
1023 displaced_step_prepare (ptid_t ptid)
1024 {
1025 struct cleanup *old_cleanups, *ignore_cleanups;
1026 struct regcache *regcache = get_thread_regcache (ptid);
1027 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1028 CORE_ADDR original, copy;
1029 ULONGEST len;
1030 struct displaced_step_closure *closure;
1031
1032 /* We should never reach this function if the architecture does not
1033 support displaced stepping. */
1034 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1035
1036 /* For the first cut, we're displaced stepping one thread at a
1037 time. */
1038
1039 if (!ptid_equal (displaced_step_ptid, null_ptid))
1040 {
1041 /* Already waiting for a displaced step to finish. Defer this
1042 request and place in queue. */
1043 struct displaced_step_request *req, *new_req;
1044
1045 if (debug_displaced)
1046 fprintf_unfiltered (gdb_stdlog,
1047 "displaced: defering step of %s\n",
1048 target_pid_to_str (ptid));
1049
1050 new_req = xmalloc (sizeof (*new_req));
1051 new_req->ptid = ptid;
1052 new_req->next = NULL;
1053
1054 if (displaced_step_request_queue)
1055 {
1056 for (req = displaced_step_request_queue;
1057 req && req->next;
1058 req = req->next)
1059 ;
1060 req->next = new_req;
1061 }
1062 else
1063 displaced_step_request_queue = new_req;
1064
1065 return 0;
1066 }
1067 else
1068 {
1069 if (debug_displaced)
1070 fprintf_unfiltered (gdb_stdlog,
1071 "displaced: stepping %s now\n",
1072 target_pid_to_str (ptid));
1073 }
1074
1075 displaced_step_clear ();
1076
1077 old_cleanups = save_inferior_ptid ();
1078 inferior_ptid = ptid;
1079
1080 original = regcache_read_pc (regcache);
1081
1082 copy = gdbarch_displaced_step_location (gdbarch);
1083 len = gdbarch_max_insn_length (gdbarch);
1084
1085 /* Save the original contents of the copy area. */
1086 displaced_step_saved_copy = xmalloc (len);
1087 ignore_cleanups = make_cleanup (free_current_contents,
1088 &displaced_step_saved_copy);
1089 read_memory (copy, displaced_step_saved_copy, len);
1090 if (debug_displaced)
1091 {
1092 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1093 paddress (gdbarch, copy));
1094 displaced_step_dump_bytes (gdb_stdlog, displaced_step_saved_copy, len);
1095 };
1096
1097 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1098 original, copy, regcache);
1099
1100 /* We don't support the fully-simulated case at present. */
1101 gdb_assert (closure);
1102
1103 /* Save the information we need to fix things up if the step
1104 succeeds. */
1105 displaced_step_ptid = ptid;
1106 displaced_step_gdbarch = gdbarch;
1107 displaced_step_closure = closure;
1108 displaced_step_original = original;
1109 displaced_step_copy = copy;
1110
1111 make_cleanup (displaced_step_clear_cleanup, 0);
1112
1113 /* Resume execution at the copy. */
1114 regcache_write_pc (regcache, copy);
1115
1116 discard_cleanups (ignore_cleanups);
1117
1118 do_cleanups (old_cleanups);
1119
1120 if (debug_displaced)
1121 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1122 paddress (gdbarch, copy));
1123
1124 return 1;
1125 }
1126
1127 static void
1128 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1129 {
1130 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1131 inferior_ptid = ptid;
1132 write_memory (memaddr, myaddr, len);
1133 do_cleanups (ptid_cleanup);
1134 }
1135
1136 static void
1137 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1138 {
1139 struct cleanup *old_cleanups;
1140
1141 /* Was this event for the pid we displaced? */
1142 if (ptid_equal (displaced_step_ptid, null_ptid)
1143 || ! ptid_equal (displaced_step_ptid, event_ptid))
1144 return;
1145
1146 old_cleanups = make_cleanup (displaced_step_clear_cleanup, 0);
1147
1148 /* Restore the contents of the copy area. */
1149 {
1150 ULONGEST len = gdbarch_max_insn_length (displaced_step_gdbarch);
1151 write_memory_ptid (displaced_step_ptid, displaced_step_copy,
1152 displaced_step_saved_copy, len);
1153 if (debug_displaced)
1154 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1155 paddress (displaced_step_gdbarch,
1156 displaced_step_copy));
1157 }
1158
1159 /* Did the instruction complete successfully? */
1160 if (signal == TARGET_SIGNAL_TRAP)
1161 {
1162 /* Fix up the resulting state. */
1163 gdbarch_displaced_step_fixup (displaced_step_gdbarch,
1164 displaced_step_closure,
1165 displaced_step_original,
1166 displaced_step_copy,
1167 get_thread_regcache (displaced_step_ptid));
1168 }
1169 else
1170 {
1171 /* Since the instruction didn't complete, all we can do is
1172 relocate the PC. */
1173 struct regcache *regcache = get_thread_regcache (event_ptid);
1174 CORE_ADDR pc = regcache_read_pc (regcache);
1175 pc = displaced_step_original + (pc - displaced_step_copy);
1176 regcache_write_pc (regcache, pc);
1177 }
1178
1179 do_cleanups (old_cleanups);
1180
1181 displaced_step_ptid = null_ptid;
1182
1183 /* Are there any pending displaced stepping requests? If so, run
1184 one now. */
1185 while (displaced_step_request_queue)
1186 {
1187 struct displaced_step_request *head;
1188 ptid_t ptid;
1189 struct regcache *regcache;
1190 struct gdbarch *gdbarch;
1191 CORE_ADDR actual_pc;
1192 struct address_space *aspace;
1193
1194 head = displaced_step_request_queue;
1195 ptid = head->ptid;
1196 displaced_step_request_queue = head->next;
1197 xfree (head);
1198
1199 context_switch (ptid);
1200
1201 regcache = get_thread_regcache (ptid);
1202 actual_pc = regcache_read_pc (regcache);
1203 aspace = get_regcache_aspace (regcache);
1204
1205 if (breakpoint_here_p (aspace, actual_pc))
1206 {
1207 if (debug_displaced)
1208 fprintf_unfiltered (gdb_stdlog,
1209 "displaced: stepping queued %s now\n",
1210 target_pid_to_str (ptid));
1211
1212 displaced_step_prepare (ptid);
1213
1214 gdbarch = get_regcache_arch (regcache);
1215
1216 if (debug_displaced)
1217 {
1218 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1219 gdb_byte buf[4];
1220
1221 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1222 paddress (gdbarch, actual_pc));
1223 read_memory (actual_pc, buf, sizeof (buf));
1224 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1225 }
1226
1227 if (gdbarch_displaced_step_hw_singlestep
1228 (gdbarch, displaced_step_closure))
1229 target_resume (ptid, 1, TARGET_SIGNAL_0);
1230 else
1231 target_resume (ptid, 0, TARGET_SIGNAL_0);
1232
1233 /* Done, we're stepping a thread. */
1234 break;
1235 }
1236 else
1237 {
1238 int step;
1239 struct thread_info *tp = inferior_thread ();
1240
1241 /* The breakpoint we were sitting under has since been
1242 removed. */
1243 tp->trap_expected = 0;
1244
1245 /* Go back to what we were trying to do. */
1246 step = currently_stepping (tp);
1247
1248 if (debug_displaced)
1249 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1250 target_pid_to_str (tp->ptid), step);
1251
1252 target_resume (ptid, step, TARGET_SIGNAL_0);
1253 tp->stop_signal = TARGET_SIGNAL_0;
1254
1255 /* This request was discarded. See if there's any other
1256 thread waiting for its turn. */
1257 }
1258 }
1259 }
1260
1261 /* Update global variables holding ptids to hold NEW_PTID if they were
1262 holding OLD_PTID. */
1263 static void
1264 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1265 {
1266 struct displaced_step_request *it;
1267
1268 if (ptid_equal (inferior_ptid, old_ptid))
1269 inferior_ptid = new_ptid;
1270
1271 if (ptid_equal (singlestep_ptid, old_ptid))
1272 singlestep_ptid = new_ptid;
1273
1274 if (ptid_equal (displaced_step_ptid, old_ptid))
1275 displaced_step_ptid = new_ptid;
1276
1277 if (ptid_equal (deferred_step_ptid, old_ptid))
1278 deferred_step_ptid = new_ptid;
1279
1280 for (it = displaced_step_request_queue; it; it = it->next)
1281 if (ptid_equal (it->ptid, old_ptid))
1282 it->ptid = new_ptid;
1283 }
1284
1285 \f
1286 /* Resuming. */
1287
1288 /* Things to clean up if we QUIT out of resume (). */
1289 static void
1290 resume_cleanups (void *ignore)
1291 {
1292 normal_stop ();
1293 }
1294
1295 static const char schedlock_off[] = "off";
1296 static const char schedlock_on[] = "on";
1297 static const char schedlock_step[] = "step";
1298 static const char *scheduler_enums[] = {
1299 schedlock_off,
1300 schedlock_on,
1301 schedlock_step,
1302 NULL
1303 };
1304 static const char *scheduler_mode = schedlock_off;
1305 static void
1306 show_scheduler_mode (struct ui_file *file, int from_tty,
1307 struct cmd_list_element *c, const char *value)
1308 {
1309 fprintf_filtered (file, _("\
1310 Mode for locking scheduler during execution is \"%s\".\n"),
1311 value);
1312 }
1313
1314 static void
1315 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1316 {
1317 if (!target_can_lock_scheduler)
1318 {
1319 scheduler_mode = schedlock_off;
1320 error (_("Target '%s' cannot support this command."), target_shortname);
1321 }
1322 }
1323
1324 /* True if execution commands resume all threads of all processes by
1325 default; otherwise, resume only threads of the current inferior
1326 process. */
1327 int sched_multi = 0;
1328
1329 /* Try to setup for software single stepping over the specified location.
1330 Return 1 if target_resume() should use hardware single step.
1331
1332 GDBARCH the current gdbarch.
1333 PC the location to step over. */
1334
1335 static int
1336 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1337 {
1338 int hw_step = 1;
1339
1340 if (gdbarch_software_single_step_p (gdbarch)
1341 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1342 {
1343 hw_step = 0;
1344 /* Do not pull these breakpoints until after a `wait' in
1345 `wait_for_inferior' */
1346 singlestep_breakpoints_inserted_p = 1;
1347 singlestep_ptid = inferior_ptid;
1348 singlestep_pc = pc;
1349 }
1350 return hw_step;
1351 }
1352
1353 /* Resume the inferior, but allow a QUIT. This is useful if the user
1354 wants to interrupt some lengthy single-stepping operation
1355 (for child processes, the SIGINT goes to the inferior, and so
1356 we get a SIGINT random_signal, but for remote debugging and perhaps
1357 other targets, that's not true).
1358
1359 STEP nonzero if we should step (zero to continue instead).
1360 SIG is the signal to give the inferior (zero for none). */
1361 void
1362 resume (int step, enum target_signal sig)
1363 {
1364 int should_resume = 1;
1365 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1366 struct regcache *regcache = get_current_regcache ();
1367 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1368 struct thread_info *tp = inferior_thread ();
1369 CORE_ADDR pc = regcache_read_pc (regcache);
1370 struct address_space *aspace = get_regcache_aspace (regcache);
1371
1372 QUIT;
1373
1374 if (debug_infrun)
1375 fprintf_unfiltered (gdb_stdlog,
1376 "infrun: resume (step=%d, signal=%d), "
1377 "trap_expected=%d\n",
1378 step, sig, tp->trap_expected);
1379
1380 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1381 over an instruction that causes a page fault without triggering
1382 a hardware watchpoint. The kernel properly notices that it shouldn't
1383 stop, because the hardware watchpoint is not triggered, but it forgets
1384 the step request and continues the program normally.
1385 Work around the problem by removing hardware watchpoints if a step is
1386 requested, GDB will check for a hardware watchpoint trigger after the
1387 step anyway. */
1388 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1389 remove_hw_watchpoints ();
1390
1391
1392 /* Normally, by the time we reach `resume', the breakpoints are either
1393 removed or inserted, as appropriate. The exception is if we're sitting
1394 at a permanent breakpoint; we need to step over it, but permanent
1395 breakpoints can't be removed. So we have to test for it here. */
1396 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1397 {
1398 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1399 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1400 else
1401 error (_("\
1402 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1403 how to step past a permanent breakpoint on this architecture. Try using\n\
1404 a command like `return' or `jump' to continue execution."));
1405 }
1406
1407 /* If enabled, step over breakpoints by executing a copy of the
1408 instruction at a different address.
1409
1410 We can't use displaced stepping when we have a signal to deliver;
1411 the comments for displaced_step_prepare explain why. The
1412 comments in the handle_inferior event for dealing with 'random
1413 signals' explain what we do instead. */
1414 if (use_displaced_stepping (gdbarch)
1415 && (tp->trap_expected
1416 || (step && gdbarch_software_single_step_p (gdbarch)))
1417 && sig == TARGET_SIGNAL_0)
1418 {
1419 if (!displaced_step_prepare (inferior_ptid))
1420 {
1421 /* Got placed in displaced stepping queue. Will be resumed
1422 later when all the currently queued displaced stepping
1423 requests finish. The thread is not executing at this point,
1424 and the call to set_executing will be made later. But we
1425 need to call set_running here, since from frontend point of view,
1426 the thread is running. */
1427 set_running (inferior_ptid, 1);
1428 discard_cleanups (old_cleanups);
1429 return;
1430 }
1431
1432 step = gdbarch_displaced_step_hw_singlestep
1433 (gdbarch, displaced_step_closure);
1434 }
1435
1436 /* Do we need to do it the hard way, w/temp breakpoints? */
1437 else if (step)
1438 step = maybe_software_singlestep (gdbarch, pc);
1439
1440 if (should_resume)
1441 {
1442 ptid_t resume_ptid;
1443
1444 /* If STEP is set, it's a request to use hardware stepping
1445 facilities. But in that case, we should never
1446 use singlestep breakpoint. */
1447 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1448
1449 /* Decide the set of threads to ask the target to resume. Start
1450 by assuming everything will be resumed, than narrow the set
1451 by applying increasingly restricting conditions. */
1452
1453 /* By default, resume all threads of all processes. */
1454 resume_ptid = RESUME_ALL;
1455
1456 /* Maybe resume only all threads of the current process. */
1457 if (!sched_multi && target_supports_multi_process ())
1458 {
1459 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1460 }
1461
1462 /* Maybe resume a single thread after all. */
1463 if (singlestep_breakpoints_inserted_p
1464 && stepping_past_singlestep_breakpoint)
1465 {
1466 /* The situation here is as follows. In thread T1 we wanted to
1467 single-step. Lacking hardware single-stepping we've
1468 set breakpoint at the PC of the next instruction -- call it
1469 P. After resuming, we've hit that breakpoint in thread T2.
1470 Now we've removed original breakpoint, inserted breakpoint
1471 at P+1, and try to step to advance T2 past breakpoint.
1472 We need to step only T2, as if T1 is allowed to freely run,
1473 it can run past P, and if other threads are allowed to run,
1474 they can hit breakpoint at P+1, and nested hits of single-step
1475 breakpoints is not something we'd want -- that's complicated
1476 to support, and has no value. */
1477 resume_ptid = inferior_ptid;
1478 }
1479 else if ((step || singlestep_breakpoints_inserted_p)
1480 && tp->trap_expected)
1481 {
1482 /* We're allowing a thread to run past a breakpoint it has
1483 hit, by single-stepping the thread with the breakpoint
1484 removed. In which case, we need to single-step only this
1485 thread, and keep others stopped, as they can miss this
1486 breakpoint if allowed to run.
1487
1488 The current code actually removes all breakpoints when
1489 doing this, not just the one being stepped over, so if we
1490 let other threads run, we can actually miss any
1491 breakpoint, not just the one at PC. */
1492 resume_ptid = inferior_ptid;
1493 }
1494 else if (non_stop)
1495 {
1496 /* With non-stop mode on, threads are always handled
1497 individually. */
1498 resume_ptid = inferior_ptid;
1499 }
1500 else if ((scheduler_mode == schedlock_on)
1501 || (scheduler_mode == schedlock_step
1502 && (step || singlestep_breakpoints_inserted_p)))
1503 {
1504 /* User-settable 'scheduler' mode requires solo thread resume. */
1505 resume_ptid = inferior_ptid;
1506 }
1507
1508 if (gdbarch_cannot_step_breakpoint (gdbarch))
1509 {
1510 /* Most targets can step a breakpoint instruction, thus
1511 executing it normally. But if this one cannot, just
1512 continue and we will hit it anyway. */
1513 if (step && breakpoint_inserted_here_p (aspace, pc))
1514 step = 0;
1515 }
1516
1517 if (debug_displaced
1518 && use_displaced_stepping (gdbarch)
1519 && tp->trap_expected)
1520 {
1521 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1522 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1523 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1524 gdb_byte buf[4];
1525
1526 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1527 paddress (resume_gdbarch, actual_pc));
1528 read_memory (actual_pc, buf, sizeof (buf));
1529 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1530 }
1531
1532 /* Install inferior's terminal modes. */
1533 target_terminal_inferior ();
1534
1535 /* Avoid confusing the next resume, if the next stop/resume
1536 happens to apply to another thread. */
1537 tp->stop_signal = TARGET_SIGNAL_0;
1538
1539 target_resume (resume_ptid, step, sig);
1540 }
1541
1542 discard_cleanups (old_cleanups);
1543 }
1544 \f
1545 /* Proceeding. */
1546
1547 /* Clear out all variables saying what to do when inferior is continued.
1548 First do this, then set the ones you want, then call `proceed'. */
1549
1550 static void
1551 clear_proceed_status_thread (struct thread_info *tp)
1552 {
1553 if (debug_infrun)
1554 fprintf_unfiltered (gdb_stdlog,
1555 "infrun: clear_proceed_status_thread (%s)\n",
1556 target_pid_to_str (tp->ptid));
1557
1558 tp->trap_expected = 0;
1559 tp->step_range_start = 0;
1560 tp->step_range_end = 0;
1561 tp->step_frame_id = null_frame_id;
1562 tp->step_stack_frame_id = null_frame_id;
1563 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1564 tp->stop_requested = 0;
1565
1566 tp->stop_step = 0;
1567
1568 tp->proceed_to_finish = 0;
1569
1570 /* Discard any remaining commands or status from previous stop. */
1571 bpstat_clear (&tp->stop_bpstat);
1572 }
1573
1574 static int
1575 clear_proceed_status_callback (struct thread_info *tp, void *data)
1576 {
1577 if (is_exited (tp->ptid))
1578 return 0;
1579
1580 clear_proceed_status_thread (tp);
1581 return 0;
1582 }
1583
1584 void
1585 clear_proceed_status (void)
1586 {
1587 if (!non_stop)
1588 {
1589 /* In all-stop mode, delete the per-thread status of all
1590 threads, even if inferior_ptid is null_ptid, there may be
1591 threads on the list. E.g., we may be launching a new
1592 process, while selecting the executable. */
1593 iterate_over_threads (clear_proceed_status_callback, NULL);
1594 }
1595
1596 if (!ptid_equal (inferior_ptid, null_ptid))
1597 {
1598 struct inferior *inferior;
1599
1600 if (non_stop)
1601 {
1602 /* If in non-stop mode, only delete the per-thread status of
1603 the current thread. */
1604 clear_proceed_status_thread (inferior_thread ());
1605 }
1606
1607 inferior = current_inferior ();
1608 inferior->stop_soon = NO_STOP_QUIETLY;
1609 }
1610
1611 stop_after_trap = 0;
1612
1613 observer_notify_about_to_proceed ();
1614
1615 if (stop_registers)
1616 {
1617 regcache_xfree (stop_registers);
1618 stop_registers = NULL;
1619 }
1620 }
1621
1622 /* Check the current thread against the thread that reported the most recent
1623 event. If a step-over is required return TRUE and set the current thread
1624 to the old thread. Otherwise return FALSE.
1625
1626 This should be suitable for any targets that support threads. */
1627
1628 static int
1629 prepare_to_proceed (int step)
1630 {
1631 ptid_t wait_ptid;
1632 struct target_waitstatus wait_status;
1633 int schedlock_enabled;
1634
1635 /* With non-stop mode on, threads are always handled individually. */
1636 gdb_assert (! non_stop);
1637
1638 /* Get the last target status returned by target_wait(). */
1639 get_last_target_status (&wait_ptid, &wait_status);
1640
1641 /* Make sure we were stopped at a breakpoint. */
1642 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1643 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1644 && wait_status.value.sig != TARGET_SIGNAL_ILL
1645 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1646 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1647 {
1648 return 0;
1649 }
1650
1651 schedlock_enabled = (scheduler_mode == schedlock_on
1652 || (scheduler_mode == schedlock_step
1653 && step));
1654
1655 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1656 if (schedlock_enabled)
1657 return 0;
1658
1659 /* Don't switch over if we're about to resume some other process
1660 other than WAIT_PTID's, and schedule-multiple is off. */
1661 if (!sched_multi
1662 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1663 return 0;
1664
1665 /* Switched over from WAIT_PID. */
1666 if (!ptid_equal (wait_ptid, minus_one_ptid)
1667 && !ptid_equal (inferior_ptid, wait_ptid))
1668 {
1669 struct regcache *regcache = get_thread_regcache (wait_ptid);
1670
1671 if (breakpoint_here_p (get_regcache_aspace (regcache),
1672 regcache_read_pc (regcache)))
1673 {
1674 /* If stepping, remember current thread to switch back to. */
1675 if (step)
1676 deferred_step_ptid = inferior_ptid;
1677
1678 /* Switch back to WAIT_PID thread. */
1679 switch_to_thread (wait_ptid);
1680
1681 /* We return 1 to indicate that there is a breakpoint here,
1682 so we need to step over it before continuing to avoid
1683 hitting it straight away. */
1684 return 1;
1685 }
1686 }
1687
1688 return 0;
1689 }
1690
1691 /* Basic routine for continuing the program in various fashions.
1692
1693 ADDR is the address to resume at, or -1 for resume where stopped.
1694 SIGGNAL is the signal to give it, or 0 for none,
1695 or -1 for act according to how it stopped.
1696 STEP is nonzero if should trap after one instruction.
1697 -1 means return after that and print nothing.
1698 You should probably set various step_... variables
1699 before calling here, if you are stepping.
1700
1701 You should call clear_proceed_status before calling proceed. */
1702
1703 void
1704 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1705 {
1706 struct regcache *regcache;
1707 struct gdbarch *gdbarch;
1708 struct thread_info *tp;
1709 CORE_ADDR pc;
1710 struct address_space *aspace;
1711 int oneproc = 0;
1712
1713 /* If we're stopped at a fork/vfork, follow the branch set by the
1714 "set follow-fork-mode" command; otherwise, we'll just proceed
1715 resuming the current thread. */
1716 if (!follow_fork ())
1717 {
1718 /* The target for some reason decided not to resume. */
1719 normal_stop ();
1720 return;
1721 }
1722
1723 regcache = get_current_regcache ();
1724 gdbarch = get_regcache_arch (regcache);
1725 aspace = get_regcache_aspace (regcache);
1726 pc = regcache_read_pc (regcache);
1727
1728 if (step > 0)
1729 step_start_function = find_pc_function (pc);
1730 if (step < 0)
1731 stop_after_trap = 1;
1732
1733 if (addr == (CORE_ADDR) -1)
1734 {
1735 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1736 && execution_direction != EXEC_REVERSE)
1737 /* There is a breakpoint at the address we will resume at,
1738 step one instruction before inserting breakpoints so that
1739 we do not stop right away (and report a second hit at this
1740 breakpoint).
1741
1742 Note, we don't do this in reverse, because we won't
1743 actually be executing the breakpoint insn anyway.
1744 We'll be (un-)executing the previous instruction. */
1745
1746 oneproc = 1;
1747 else if (gdbarch_single_step_through_delay_p (gdbarch)
1748 && gdbarch_single_step_through_delay (gdbarch,
1749 get_current_frame ()))
1750 /* We stepped onto an instruction that needs to be stepped
1751 again before re-inserting the breakpoint, do so. */
1752 oneproc = 1;
1753 }
1754 else
1755 {
1756 regcache_write_pc (regcache, addr);
1757 }
1758
1759 if (debug_infrun)
1760 fprintf_unfiltered (gdb_stdlog,
1761 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1762 paddress (gdbarch, addr), siggnal, step);
1763
1764 if (non_stop)
1765 /* In non-stop, each thread is handled individually. The context
1766 must already be set to the right thread here. */
1767 ;
1768 else
1769 {
1770 /* In a multi-threaded task we may select another thread and
1771 then continue or step.
1772
1773 But if the old thread was stopped at a breakpoint, it will
1774 immediately cause another breakpoint stop without any
1775 execution (i.e. it will report a breakpoint hit incorrectly).
1776 So we must step over it first.
1777
1778 prepare_to_proceed checks the current thread against the
1779 thread that reported the most recent event. If a step-over
1780 is required it returns TRUE and sets the current thread to
1781 the old thread. */
1782 if (prepare_to_proceed (step))
1783 oneproc = 1;
1784 }
1785
1786 /* prepare_to_proceed may change the current thread. */
1787 tp = inferior_thread ();
1788
1789 if (oneproc)
1790 {
1791 tp->trap_expected = 1;
1792 /* If displaced stepping is enabled, we can step over the
1793 breakpoint without hitting it, so leave all breakpoints
1794 inserted. Otherwise we need to disable all breakpoints, step
1795 one instruction, and then re-add them when that step is
1796 finished. */
1797 if (!use_displaced_stepping (gdbarch))
1798 remove_breakpoints ();
1799 }
1800
1801 /* We can insert breakpoints if we're not trying to step over one,
1802 or if we are stepping over one but we're using displaced stepping
1803 to do so. */
1804 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1805 insert_breakpoints ();
1806
1807 if (!non_stop)
1808 {
1809 /* Pass the last stop signal to the thread we're resuming,
1810 irrespective of whether the current thread is the thread that
1811 got the last event or not. This was historically GDB's
1812 behaviour before keeping a stop_signal per thread. */
1813
1814 struct thread_info *last_thread;
1815 ptid_t last_ptid;
1816 struct target_waitstatus last_status;
1817
1818 get_last_target_status (&last_ptid, &last_status);
1819 if (!ptid_equal (inferior_ptid, last_ptid)
1820 && !ptid_equal (last_ptid, null_ptid)
1821 && !ptid_equal (last_ptid, minus_one_ptid))
1822 {
1823 last_thread = find_thread_ptid (last_ptid);
1824 if (last_thread)
1825 {
1826 tp->stop_signal = last_thread->stop_signal;
1827 last_thread->stop_signal = TARGET_SIGNAL_0;
1828 }
1829 }
1830 }
1831
1832 if (siggnal != TARGET_SIGNAL_DEFAULT)
1833 tp->stop_signal = siggnal;
1834 /* If this signal should not be seen by program,
1835 give it zero. Used for debugging signals. */
1836 else if (!signal_program[tp->stop_signal])
1837 tp->stop_signal = TARGET_SIGNAL_0;
1838
1839 annotate_starting ();
1840
1841 /* Make sure that output from GDB appears before output from the
1842 inferior. */
1843 gdb_flush (gdb_stdout);
1844
1845 /* Refresh prev_pc value just prior to resuming. This used to be
1846 done in stop_stepping, however, setting prev_pc there did not handle
1847 scenarios such as inferior function calls or returning from
1848 a function via the return command. In those cases, the prev_pc
1849 value was not set properly for subsequent commands. The prev_pc value
1850 is used to initialize the starting line number in the ecs. With an
1851 invalid value, the gdb next command ends up stopping at the position
1852 represented by the next line table entry past our start position.
1853 On platforms that generate one line table entry per line, this
1854 is not a problem. However, on the ia64, the compiler generates
1855 extraneous line table entries that do not increase the line number.
1856 When we issue the gdb next command on the ia64 after an inferior call
1857 or a return command, we often end up a few instructions forward, still
1858 within the original line we started.
1859
1860 An attempt was made to refresh the prev_pc at the same time the
1861 execution_control_state is initialized (for instance, just before
1862 waiting for an inferior event). But this approach did not work
1863 because of platforms that use ptrace, where the pc register cannot
1864 be read unless the inferior is stopped. At that point, we are not
1865 guaranteed the inferior is stopped and so the regcache_read_pc() call
1866 can fail. Setting the prev_pc value here ensures the value is updated
1867 correctly when the inferior is stopped. */
1868 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1869
1870 /* Fill in with reasonable starting values. */
1871 init_thread_stepping_state (tp);
1872
1873 /* Reset to normal state. */
1874 init_infwait_state ();
1875
1876 /* Resume inferior. */
1877 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1878
1879 /* Wait for it to stop (if not standalone)
1880 and in any case decode why it stopped, and act accordingly. */
1881 /* Do this only if we are not using the event loop, or if the target
1882 does not support asynchronous execution. */
1883 if (!target_can_async_p ())
1884 {
1885 wait_for_inferior (0);
1886 normal_stop ();
1887 }
1888 }
1889 \f
1890
1891 /* Start remote-debugging of a machine over a serial link. */
1892
1893 void
1894 start_remote (int from_tty)
1895 {
1896 struct inferior *inferior;
1897 init_wait_for_inferior ();
1898
1899 inferior = current_inferior ();
1900 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1901
1902 /* Always go on waiting for the target, regardless of the mode. */
1903 /* FIXME: cagney/1999-09-23: At present it isn't possible to
1904 indicate to wait_for_inferior that a target should timeout if
1905 nothing is returned (instead of just blocking). Because of this,
1906 targets expecting an immediate response need to, internally, set
1907 things up so that the target_wait() is forced to eventually
1908 timeout. */
1909 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
1910 differentiate to its caller what the state of the target is after
1911 the initial open has been performed. Here we're assuming that
1912 the target has stopped. It should be possible to eventually have
1913 target_open() return to the caller an indication that the target
1914 is currently running and GDB state should be set to the same as
1915 for an async run. */
1916 wait_for_inferior (0);
1917
1918 /* Now that the inferior has stopped, do any bookkeeping like
1919 loading shared libraries. We want to do this before normal_stop,
1920 so that the displayed frame is up to date. */
1921 post_create_inferior (&current_target, from_tty);
1922
1923 normal_stop ();
1924 }
1925
1926 /* Initialize static vars when a new inferior begins. */
1927
1928 void
1929 init_wait_for_inferior (void)
1930 {
1931 /* These are meaningless until the first time through wait_for_inferior. */
1932
1933 breakpoint_init_inferior (inf_starting);
1934
1935 clear_proceed_status ();
1936
1937 stepping_past_singlestep_breakpoint = 0;
1938 deferred_step_ptid = null_ptid;
1939
1940 target_last_wait_ptid = minus_one_ptid;
1941
1942 previous_inferior_ptid = null_ptid;
1943 init_infwait_state ();
1944
1945 displaced_step_clear ();
1946
1947 /* Discard any skipped inlined frames. */
1948 clear_inline_frame_state (minus_one_ptid);
1949 }
1950
1951 \f
1952 /* This enum encodes possible reasons for doing a target_wait, so that
1953 wfi can call target_wait in one place. (Ultimately the call will be
1954 moved out of the infinite loop entirely.) */
1955
1956 enum infwait_states
1957 {
1958 infwait_normal_state,
1959 infwait_thread_hop_state,
1960 infwait_step_watch_state,
1961 infwait_nonstep_watch_state
1962 };
1963
1964 /* Why did the inferior stop? Used to print the appropriate messages
1965 to the interface from within handle_inferior_event(). */
1966 enum inferior_stop_reason
1967 {
1968 /* Step, next, nexti, stepi finished. */
1969 END_STEPPING_RANGE,
1970 /* Inferior terminated by signal. */
1971 SIGNAL_EXITED,
1972 /* Inferior exited. */
1973 EXITED,
1974 /* Inferior received signal, and user asked to be notified. */
1975 SIGNAL_RECEIVED,
1976 /* Reverse execution -- target ran out of history info. */
1977 NO_HISTORY
1978 };
1979
1980 /* The PTID we'll do a target_wait on.*/
1981 ptid_t waiton_ptid;
1982
1983 /* Current inferior wait state. */
1984 enum infwait_states infwait_state;
1985
1986 /* Data to be passed around while handling an event. This data is
1987 discarded between events. */
1988 struct execution_control_state
1989 {
1990 ptid_t ptid;
1991 /* The thread that got the event, if this was a thread event; NULL
1992 otherwise. */
1993 struct thread_info *event_thread;
1994
1995 struct target_waitstatus ws;
1996 int random_signal;
1997 CORE_ADDR stop_func_start;
1998 CORE_ADDR stop_func_end;
1999 char *stop_func_name;
2000 int new_thread_event;
2001 int wait_some_more;
2002 };
2003
2004 static void handle_inferior_event (struct execution_control_state *ecs);
2005
2006 static void handle_step_into_function (struct gdbarch *gdbarch,
2007 struct execution_control_state *ecs);
2008 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2009 struct execution_control_state *ecs);
2010 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2011 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2012 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2013 struct symtab_and_line sr_sal,
2014 struct frame_id sr_id);
2015 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2016
2017 static void stop_stepping (struct execution_control_state *ecs);
2018 static void prepare_to_wait (struct execution_control_state *ecs);
2019 static void keep_going (struct execution_control_state *ecs);
2020 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2021 int stop_info);
2022
2023 /* Callback for iterate over threads. If the thread is stopped, but
2024 the user/frontend doesn't know about that yet, go through
2025 normal_stop, as if the thread had just stopped now. ARG points at
2026 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2027 ptid_is_pid(PTID) is true, applies to all threads of the process
2028 pointed at by PTID. Otherwise, apply only to the thread pointed by
2029 PTID. */
2030
2031 static int
2032 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2033 {
2034 ptid_t ptid = * (ptid_t *) arg;
2035
2036 if ((ptid_equal (info->ptid, ptid)
2037 || ptid_equal (minus_one_ptid, ptid)
2038 || (ptid_is_pid (ptid)
2039 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2040 && is_running (info->ptid)
2041 && !is_executing (info->ptid))
2042 {
2043 struct cleanup *old_chain;
2044 struct execution_control_state ecss;
2045 struct execution_control_state *ecs = &ecss;
2046
2047 memset (ecs, 0, sizeof (*ecs));
2048
2049 old_chain = make_cleanup_restore_current_thread ();
2050
2051 switch_to_thread (info->ptid);
2052
2053 /* Go through handle_inferior_event/normal_stop, so we always
2054 have consistent output as if the stop event had been
2055 reported. */
2056 ecs->ptid = info->ptid;
2057 ecs->event_thread = find_thread_ptid (info->ptid);
2058 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2059 ecs->ws.value.sig = TARGET_SIGNAL_0;
2060
2061 handle_inferior_event (ecs);
2062
2063 if (!ecs->wait_some_more)
2064 {
2065 struct thread_info *tp;
2066
2067 normal_stop ();
2068
2069 /* Finish off the continuations. The continations
2070 themselves are responsible for realising the thread
2071 didn't finish what it was supposed to do. */
2072 tp = inferior_thread ();
2073 do_all_intermediate_continuations_thread (tp);
2074 do_all_continuations_thread (tp);
2075 }
2076
2077 do_cleanups (old_chain);
2078 }
2079
2080 return 0;
2081 }
2082
2083 /* This function is attached as a "thread_stop_requested" observer.
2084 Cleanup local state that assumed the PTID was to be resumed, and
2085 report the stop to the frontend. */
2086
2087 static void
2088 infrun_thread_stop_requested (ptid_t ptid)
2089 {
2090 struct displaced_step_request *it, *next, *prev = NULL;
2091
2092 /* PTID was requested to stop. Remove it from the displaced
2093 stepping queue, so we don't try to resume it automatically. */
2094 for (it = displaced_step_request_queue; it; it = next)
2095 {
2096 next = it->next;
2097
2098 if (ptid_equal (it->ptid, ptid)
2099 || ptid_equal (minus_one_ptid, ptid)
2100 || (ptid_is_pid (ptid)
2101 && ptid_get_pid (ptid) == ptid_get_pid (it->ptid)))
2102 {
2103 if (displaced_step_request_queue == it)
2104 displaced_step_request_queue = it->next;
2105 else
2106 prev->next = it->next;
2107
2108 xfree (it);
2109 }
2110 else
2111 prev = it;
2112 }
2113
2114 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2115 }
2116
2117 static void
2118 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2119 {
2120 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2121 nullify_last_target_wait_ptid ();
2122 }
2123
2124 /* Callback for iterate_over_threads. */
2125
2126 static int
2127 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2128 {
2129 if (is_exited (info->ptid))
2130 return 0;
2131
2132 delete_step_resume_breakpoint (info);
2133 return 0;
2134 }
2135
2136 /* In all-stop, delete the step resume breakpoint of any thread that
2137 had one. In non-stop, delete the step resume breakpoint of the
2138 thread that just stopped. */
2139
2140 static void
2141 delete_step_thread_step_resume_breakpoint (void)
2142 {
2143 if (!target_has_execution
2144 || ptid_equal (inferior_ptid, null_ptid))
2145 /* If the inferior has exited, we have already deleted the step
2146 resume breakpoints out of GDB's lists. */
2147 return;
2148
2149 if (non_stop)
2150 {
2151 /* If in non-stop mode, only delete the step-resume or
2152 longjmp-resume breakpoint of the thread that just stopped
2153 stepping. */
2154 struct thread_info *tp = inferior_thread ();
2155 delete_step_resume_breakpoint (tp);
2156 }
2157 else
2158 /* In all-stop mode, delete all step-resume and longjmp-resume
2159 breakpoints of any thread that had them. */
2160 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2161 }
2162
2163 /* A cleanup wrapper. */
2164
2165 static void
2166 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2167 {
2168 delete_step_thread_step_resume_breakpoint ();
2169 }
2170
2171 /* Pretty print the results of target_wait, for debugging purposes. */
2172
2173 static void
2174 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2175 const struct target_waitstatus *ws)
2176 {
2177 char *status_string = target_waitstatus_to_string (ws);
2178 struct ui_file *tmp_stream = mem_fileopen ();
2179 char *text;
2180
2181 /* The text is split over several lines because it was getting too long.
2182 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2183 output as a unit; we want only one timestamp printed if debug_timestamp
2184 is set. */
2185
2186 fprintf_unfiltered (tmp_stream,
2187 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2188 if (PIDGET (waiton_ptid) != -1)
2189 fprintf_unfiltered (tmp_stream,
2190 " [%s]", target_pid_to_str (waiton_ptid));
2191 fprintf_unfiltered (tmp_stream, ", status) =\n");
2192 fprintf_unfiltered (tmp_stream,
2193 "infrun: %d [%s],\n",
2194 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2195 fprintf_unfiltered (tmp_stream,
2196 "infrun: %s\n",
2197 status_string);
2198
2199 text = ui_file_xstrdup (tmp_stream, NULL);
2200
2201 /* This uses %s in part to handle %'s in the text, but also to avoid
2202 a gcc error: the format attribute requires a string literal. */
2203 fprintf_unfiltered (gdb_stdlog, "%s", text);
2204
2205 xfree (status_string);
2206 xfree (text);
2207 ui_file_delete (tmp_stream);
2208 }
2209
2210 /* Wait for control to return from inferior to debugger.
2211
2212 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2213 as if they were SIGTRAP signals. This can be useful during
2214 the startup sequence on some targets such as HP/UX, where
2215 we receive an EXEC event instead of the expected SIGTRAP.
2216
2217 If inferior gets a signal, we may decide to start it up again
2218 instead of returning. That is why there is a loop in this function.
2219 When this function actually returns it means the inferior
2220 should be left stopped and GDB should read more commands. */
2221
2222 void
2223 wait_for_inferior (int treat_exec_as_sigtrap)
2224 {
2225 struct cleanup *old_cleanups;
2226 struct execution_control_state ecss;
2227 struct execution_control_state *ecs;
2228
2229 if (debug_infrun)
2230 fprintf_unfiltered
2231 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2232 treat_exec_as_sigtrap);
2233
2234 old_cleanups =
2235 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2236
2237 ecs = &ecss;
2238 memset (ecs, 0, sizeof (*ecs));
2239
2240 /* We'll update this if & when we switch to a new thread. */
2241 previous_inferior_ptid = inferior_ptid;
2242
2243 while (1)
2244 {
2245 struct cleanup *old_chain;
2246
2247 /* We have to invalidate the registers BEFORE calling target_wait
2248 because they can be loaded from the target while in target_wait.
2249 This makes remote debugging a bit more efficient for those
2250 targets that provide critical registers as part of their normal
2251 status mechanism. */
2252
2253 overlay_cache_invalid = 1;
2254 registers_changed ();
2255
2256 if (deprecated_target_wait_hook)
2257 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2258 else
2259 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2260
2261 if (debug_infrun)
2262 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2263
2264 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2265 {
2266 xfree (ecs->ws.value.execd_pathname);
2267 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2268 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2269 }
2270
2271 /* If an error happens while handling the event, propagate GDB's
2272 knowledge of the executing state to the frontend/user running
2273 state. */
2274 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2275
2276 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2277 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2278 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2279
2280 /* Now figure out what to do with the result of the result. */
2281 handle_inferior_event (ecs);
2282
2283 /* No error, don't finish the state yet. */
2284 discard_cleanups (old_chain);
2285
2286 if (!ecs->wait_some_more)
2287 break;
2288 }
2289
2290 do_cleanups (old_cleanups);
2291 }
2292
2293 /* Asynchronous version of wait_for_inferior. It is called by the
2294 event loop whenever a change of state is detected on the file
2295 descriptor corresponding to the target. It can be called more than
2296 once to complete a single execution command. In such cases we need
2297 to keep the state in a global variable ECSS. If it is the last time
2298 that this function is called for a single execution command, then
2299 report to the user that the inferior has stopped, and do the
2300 necessary cleanups. */
2301
2302 void
2303 fetch_inferior_event (void *client_data)
2304 {
2305 struct execution_control_state ecss;
2306 struct execution_control_state *ecs = &ecss;
2307 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2308 struct cleanup *ts_old_chain;
2309 int was_sync = sync_execution;
2310
2311 memset (ecs, 0, sizeof (*ecs));
2312
2313 /* We'll update this if & when we switch to a new thread. */
2314 previous_inferior_ptid = inferior_ptid;
2315
2316 if (non_stop)
2317 /* In non-stop mode, the user/frontend should not notice a thread
2318 switch due to internal events. Make sure we reverse to the
2319 user selected thread and frame after handling the event and
2320 running any breakpoint commands. */
2321 make_cleanup_restore_current_thread ();
2322
2323 /* We have to invalidate the registers BEFORE calling target_wait
2324 because they can be loaded from the target while in target_wait.
2325 This makes remote debugging a bit more efficient for those
2326 targets that provide critical registers as part of their normal
2327 status mechanism. */
2328
2329 overlay_cache_invalid = 1;
2330 registers_changed ();
2331
2332 if (deprecated_target_wait_hook)
2333 ecs->ptid =
2334 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2335 else
2336 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2337
2338 if (debug_infrun)
2339 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2340
2341 if (non_stop
2342 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2343 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2344 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2345 /* In non-stop mode, each thread is handled individually. Switch
2346 early, so the global state is set correctly for this
2347 thread. */
2348 context_switch (ecs->ptid);
2349
2350 /* If an error happens while handling the event, propagate GDB's
2351 knowledge of the executing state to the frontend/user running
2352 state. */
2353 if (!non_stop)
2354 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2355 else
2356 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2357
2358 /* Now figure out what to do with the result of the result. */
2359 handle_inferior_event (ecs);
2360
2361 if (!ecs->wait_some_more)
2362 {
2363 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2364
2365 delete_step_thread_step_resume_breakpoint ();
2366
2367 /* We may not find an inferior if this was a process exit. */
2368 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2369 normal_stop ();
2370
2371 if (target_has_execution
2372 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2373 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2374 && ecs->event_thread->step_multi
2375 && ecs->event_thread->stop_step)
2376 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2377 else
2378 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2379 }
2380
2381 /* No error, don't finish the thread states yet. */
2382 discard_cleanups (ts_old_chain);
2383
2384 /* Revert thread and frame. */
2385 do_cleanups (old_chain);
2386
2387 /* If the inferior was in sync execution mode, and now isn't,
2388 restore the prompt. */
2389 if (was_sync && !sync_execution)
2390 display_gdb_prompt (0);
2391 }
2392
2393 /* Record the frame and location we're currently stepping through. */
2394 void
2395 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2396 {
2397 struct thread_info *tp = inferior_thread ();
2398
2399 tp->step_frame_id = get_frame_id (frame);
2400 tp->step_stack_frame_id = get_stack_frame_id (frame);
2401
2402 tp->current_symtab = sal.symtab;
2403 tp->current_line = sal.line;
2404 }
2405
2406 /* Clear context switchable stepping state. */
2407
2408 void
2409 init_thread_stepping_state (struct thread_info *tss)
2410 {
2411 tss->stepping_over_breakpoint = 0;
2412 tss->step_after_step_resume_breakpoint = 0;
2413 tss->stepping_through_solib_after_catch = 0;
2414 tss->stepping_through_solib_catchpoints = NULL;
2415 }
2416
2417 /* Return the cached copy of the last pid/waitstatus returned by
2418 target_wait()/deprecated_target_wait_hook(). The data is actually
2419 cached by handle_inferior_event(), which gets called immediately
2420 after target_wait()/deprecated_target_wait_hook(). */
2421
2422 void
2423 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2424 {
2425 *ptidp = target_last_wait_ptid;
2426 *status = target_last_waitstatus;
2427 }
2428
2429 void
2430 nullify_last_target_wait_ptid (void)
2431 {
2432 target_last_wait_ptid = minus_one_ptid;
2433 }
2434
2435 /* Switch thread contexts. */
2436
2437 static void
2438 context_switch (ptid_t ptid)
2439 {
2440 if (debug_infrun)
2441 {
2442 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2443 target_pid_to_str (inferior_ptid));
2444 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2445 target_pid_to_str (ptid));
2446 }
2447
2448 switch_to_thread (ptid);
2449 }
2450
2451 static void
2452 adjust_pc_after_break (struct execution_control_state *ecs)
2453 {
2454 struct regcache *regcache;
2455 struct gdbarch *gdbarch;
2456 struct address_space *aspace;
2457 CORE_ADDR breakpoint_pc;
2458
2459 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2460 we aren't, just return.
2461
2462 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2463 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2464 implemented by software breakpoints should be handled through the normal
2465 breakpoint layer.
2466
2467 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2468 different signals (SIGILL or SIGEMT for instance), but it is less
2469 clear where the PC is pointing afterwards. It may not match
2470 gdbarch_decr_pc_after_break. I don't know any specific target that
2471 generates these signals at breakpoints (the code has been in GDB since at
2472 least 1992) so I can not guess how to handle them here.
2473
2474 In earlier versions of GDB, a target with
2475 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2476 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2477 target with both of these set in GDB history, and it seems unlikely to be
2478 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2479
2480 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2481 return;
2482
2483 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2484 return;
2485
2486 /* In reverse execution, when a breakpoint is hit, the instruction
2487 under it has already been de-executed. The reported PC always
2488 points at the breakpoint address, so adjusting it further would
2489 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2490 architecture:
2491
2492 B1 0x08000000 : INSN1
2493 B2 0x08000001 : INSN2
2494 0x08000002 : INSN3
2495 PC -> 0x08000003 : INSN4
2496
2497 Say you're stopped at 0x08000003 as above. Reverse continuing
2498 from that point should hit B2 as below. Reading the PC when the
2499 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2500 been de-executed already.
2501
2502 B1 0x08000000 : INSN1
2503 B2 PC -> 0x08000001 : INSN2
2504 0x08000002 : INSN3
2505 0x08000003 : INSN4
2506
2507 We can't apply the same logic as for forward execution, because
2508 we would wrongly adjust the PC to 0x08000000, since there's a
2509 breakpoint at PC - 1. We'd then report a hit on B1, although
2510 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2511 behaviour. */
2512 if (execution_direction == EXEC_REVERSE)
2513 return;
2514
2515 /* If this target does not decrement the PC after breakpoints, then
2516 we have nothing to do. */
2517 regcache = get_thread_regcache (ecs->ptid);
2518 gdbarch = get_regcache_arch (regcache);
2519 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2520 return;
2521
2522 aspace = get_regcache_aspace (regcache);
2523
2524 /* Find the location where (if we've hit a breakpoint) the
2525 breakpoint would be. */
2526 breakpoint_pc = regcache_read_pc (regcache)
2527 - gdbarch_decr_pc_after_break (gdbarch);
2528
2529 /* Check whether there actually is a software breakpoint inserted at
2530 that location.
2531
2532 If in non-stop mode, a race condition is possible where we've
2533 removed a breakpoint, but stop events for that breakpoint were
2534 already queued and arrive later. To suppress those spurious
2535 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2536 and retire them after a number of stop events are reported. */
2537 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2538 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2539 {
2540 struct cleanup *old_cleanups = NULL;
2541 if (RECORD_IS_USED)
2542 old_cleanups = record_gdb_operation_disable_set ();
2543
2544 /* When using hardware single-step, a SIGTRAP is reported for both
2545 a completed single-step and a software breakpoint. Need to
2546 differentiate between the two, as the latter needs adjusting
2547 but the former does not.
2548
2549 The SIGTRAP can be due to a completed hardware single-step only if
2550 - we didn't insert software single-step breakpoints
2551 - the thread to be examined is still the current thread
2552 - this thread is currently being stepped
2553
2554 If any of these events did not occur, we must have stopped due
2555 to hitting a software breakpoint, and have to back up to the
2556 breakpoint address.
2557
2558 As a special case, we could have hardware single-stepped a
2559 software breakpoint. In this case (prev_pc == breakpoint_pc),
2560 we also need to back up to the breakpoint address. */
2561
2562 if (singlestep_breakpoints_inserted_p
2563 || !ptid_equal (ecs->ptid, inferior_ptid)
2564 || !currently_stepping (ecs->event_thread)
2565 || ecs->event_thread->prev_pc == breakpoint_pc)
2566 regcache_write_pc (regcache, breakpoint_pc);
2567
2568 if (RECORD_IS_USED)
2569 do_cleanups (old_cleanups);
2570 }
2571 }
2572
2573 void
2574 init_infwait_state (void)
2575 {
2576 waiton_ptid = pid_to_ptid (-1);
2577 infwait_state = infwait_normal_state;
2578 }
2579
2580 void
2581 error_is_running (void)
2582 {
2583 error (_("\
2584 Cannot execute this command while the selected thread is running."));
2585 }
2586
2587 void
2588 ensure_not_running (void)
2589 {
2590 if (is_running (inferior_ptid))
2591 error_is_running ();
2592 }
2593
2594 static int
2595 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2596 {
2597 for (frame = get_prev_frame (frame);
2598 frame != NULL;
2599 frame = get_prev_frame (frame))
2600 {
2601 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2602 return 1;
2603 if (get_frame_type (frame) != INLINE_FRAME)
2604 break;
2605 }
2606
2607 return 0;
2608 }
2609
2610 /* Auxiliary function that handles syscall entry/return events.
2611 It returns 1 if the inferior should keep going (and GDB
2612 should ignore the event), or 0 if the event deserves to be
2613 processed. */
2614
2615 static int
2616 handle_syscall_event (struct execution_control_state *ecs)
2617 {
2618 struct regcache *regcache;
2619 struct gdbarch *gdbarch;
2620 int syscall_number;
2621
2622 if (!ptid_equal (ecs->ptid, inferior_ptid))
2623 context_switch (ecs->ptid);
2624
2625 regcache = get_thread_regcache (ecs->ptid);
2626 gdbarch = get_regcache_arch (regcache);
2627 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2628 stop_pc = regcache_read_pc (regcache);
2629
2630 target_last_waitstatus.value.syscall_number = syscall_number;
2631
2632 if (catch_syscall_enabled () > 0
2633 && catching_syscall_number (syscall_number) > 0)
2634 {
2635 if (debug_infrun)
2636 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2637 syscall_number);
2638
2639 ecs->event_thread->stop_bpstat
2640 = bpstat_stop_status (get_regcache_aspace (regcache),
2641 stop_pc, ecs->ptid);
2642 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2643
2644 if (!ecs->random_signal)
2645 {
2646 /* Catchpoint hit. */
2647 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2648 return 0;
2649 }
2650 }
2651
2652 /* If no catchpoint triggered for this, then keep going. */
2653 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2654 keep_going (ecs);
2655 return 1;
2656 }
2657
2658 /* Given an execution control state that has been freshly filled in
2659 by an event from the inferior, figure out what it means and take
2660 appropriate action. */
2661
2662 static void
2663 handle_inferior_event (struct execution_control_state *ecs)
2664 {
2665 struct frame_info *frame;
2666 struct gdbarch *gdbarch;
2667 struct regcache *regcache;
2668 int sw_single_step_trap_p = 0;
2669 int stopped_by_watchpoint;
2670 int stepped_after_stopped_by_watchpoint = 0;
2671 struct symtab_and_line stop_pc_sal;
2672 enum stop_kind stop_soon;
2673
2674 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2675 {
2676 /* We had an event in the inferior, but we are not interested in
2677 handling it at this level. The lower layers have already
2678 done what needs to be done, if anything.
2679
2680 One of the possible circumstances for this is when the
2681 inferior produces output for the console. The inferior has
2682 not stopped, and we are ignoring the event. Another possible
2683 circumstance is any event which the lower level knows will be
2684 reported multiple times without an intervening resume. */
2685 if (debug_infrun)
2686 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2687 prepare_to_wait (ecs);
2688 return;
2689 }
2690
2691 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2692 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2693 {
2694 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2695 gdb_assert (inf);
2696 stop_soon = inf->stop_soon;
2697 }
2698 else
2699 stop_soon = NO_STOP_QUIETLY;
2700
2701 /* Cache the last pid/waitstatus. */
2702 target_last_wait_ptid = ecs->ptid;
2703 target_last_waitstatus = ecs->ws;
2704
2705 /* Always clear state belonging to the previous time we stopped. */
2706 stop_stack_dummy = 0;
2707
2708 /* If it's a new process, add it to the thread database */
2709
2710 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2711 && !ptid_equal (ecs->ptid, minus_one_ptid)
2712 && !in_thread_list (ecs->ptid));
2713
2714 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2715 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2716 add_thread (ecs->ptid);
2717
2718 ecs->event_thread = find_thread_ptid (ecs->ptid);
2719
2720 /* Dependent on valid ECS->EVENT_THREAD. */
2721 adjust_pc_after_break (ecs);
2722
2723 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2724 reinit_frame_cache ();
2725
2726 breakpoint_retire_moribund ();
2727
2728 /* First, distinguish signals caused by the debugger from signals
2729 that have to do with the program's own actions. Note that
2730 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2731 on the operating system version. Here we detect when a SIGILL or
2732 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2733 something similar for SIGSEGV, since a SIGSEGV will be generated
2734 when we're trying to execute a breakpoint instruction on a
2735 non-executable stack. This happens for call dummy breakpoints
2736 for architectures like SPARC that place call dummies on the
2737 stack. */
2738 regcache = get_thread_regcache (ecs->ptid);
2739 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2740 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2741 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2742 || ecs->ws.value.sig == TARGET_SIGNAL_EMT)
2743 && breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2744 regcache_read_pc (regcache)))
2745 {
2746 if (debug_infrun)
2747 fprintf_unfiltered (gdb_stdlog,
2748 "infrun: Treating signal as SIGTRAP\n");
2749 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2750 }
2751
2752 /* Mark the non-executing threads accordingly. In all-stop, all
2753 threads of all processes are stopped when we get any event
2754 reported. In non-stop mode, only the event thread stops. If
2755 we're handling a process exit in non-stop mode, there's nothing
2756 to do, as threads of the dead process are gone, and threads of
2757 any other process were left running. */
2758 if (!non_stop)
2759 set_executing (minus_one_ptid, 0);
2760 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2761 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2762 set_executing (inferior_ptid, 0);
2763
2764 switch (infwait_state)
2765 {
2766 case infwait_thread_hop_state:
2767 if (debug_infrun)
2768 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2769 break;
2770
2771 case infwait_normal_state:
2772 if (debug_infrun)
2773 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2774 break;
2775
2776 case infwait_step_watch_state:
2777 if (debug_infrun)
2778 fprintf_unfiltered (gdb_stdlog,
2779 "infrun: infwait_step_watch_state\n");
2780
2781 stepped_after_stopped_by_watchpoint = 1;
2782 break;
2783
2784 case infwait_nonstep_watch_state:
2785 if (debug_infrun)
2786 fprintf_unfiltered (gdb_stdlog,
2787 "infrun: infwait_nonstep_watch_state\n");
2788 insert_breakpoints ();
2789
2790 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2791 handle things like signals arriving and other things happening
2792 in combination correctly? */
2793 stepped_after_stopped_by_watchpoint = 1;
2794 break;
2795
2796 default:
2797 internal_error (__FILE__, __LINE__, _("bad switch"));
2798 }
2799
2800 infwait_state = infwait_normal_state;
2801 waiton_ptid = pid_to_ptid (-1);
2802
2803 switch (ecs->ws.kind)
2804 {
2805 case TARGET_WAITKIND_LOADED:
2806 if (debug_infrun)
2807 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2808 /* Ignore gracefully during startup of the inferior, as it might
2809 be the shell which has just loaded some objects, otherwise
2810 add the symbols for the newly loaded objects. Also ignore at
2811 the beginning of an attach or remote session; we will query
2812 the full list of libraries once the connection is
2813 established. */
2814 if (stop_soon == NO_STOP_QUIETLY)
2815 {
2816 /* Check for any newly added shared libraries if we're
2817 supposed to be adding them automatically. Switch
2818 terminal for any messages produced by
2819 breakpoint_re_set. */
2820 target_terminal_ours_for_output ();
2821 /* NOTE: cagney/2003-11-25: Make certain that the target
2822 stack's section table is kept up-to-date. Architectures,
2823 (e.g., PPC64), use the section table to perform
2824 operations such as address => section name and hence
2825 require the table to contain all sections (including
2826 those found in shared libraries). */
2827 #ifdef SOLIB_ADD
2828 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
2829 #else
2830 solib_add (NULL, 0, &current_target, auto_solib_add);
2831 #endif
2832 target_terminal_inferior ();
2833
2834 /* If requested, stop when the dynamic linker notifies
2835 gdb of events. This allows the user to get control
2836 and place breakpoints in initializer routines for
2837 dynamically loaded objects (among other things). */
2838 if (stop_on_solib_events)
2839 {
2840 /* Make sure we print "Stopped due to solib-event" in
2841 normal_stop. */
2842 stop_print_frame = 1;
2843
2844 stop_stepping (ecs);
2845 return;
2846 }
2847
2848 /* NOTE drow/2007-05-11: This might be a good place to check
2849 for "catch load". */
2850 }
2851
2852 /* If we are skipping through a shell, or through shared library
2853 loading that we aren't interested in, resume the program. If
2854 we're running the program normally, also resume. But stop if
2855 we're attaching or setting up a remote connection. */
2856 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
2857 {
2858 /* Loading of shared libraries might have changed breakpoint
2859 addresses. Make sure new breakpoints are inserted. */
2860 if (stop_soon == NO_STOP_QUIETLY
2861 && !breakpoints_always_inserted_mode ())
2862 insert_breakpoints ();
2863 resume (0, TARGET_SIGNAL_0);
2864 prepare_to_wait (ecs);
2865 return;
2866 }
2867
2868 break;
2869
2870 case TARGET_WAITKIND_SPURIOUS:
2871 if (debug_infrun)
2872 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
2873 resume (0, TARGET_SIGNAL_0);
2874 prepare_to_wait (ecs);
2875 return;
2876
2877 case TARGET_WAITKIND_EXITED:
2878 if (debug_infrun)
2879 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
2880 inferior_ptid = ecs->ptid;
2881 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
2882 set_current_program_space (current_inferior ()->pspace);
2883 handle_vfork_child_exec_or_exit (0);
2884 target_terminal_ours (); /* Must do this before mourn anyway */
2885 print_stop_reason (EXITED, ecs->ws.value.integer);
2886
2887 /* Record the exit code in the convenience variable $_exitcode, so
2888 that the user can inspect this again later. */
2889 set_internalvar_integer (lookup_internalvar ("_exitcode"),
2890 (LONGEST) ecs->ws.value.integer);
2891 gdb_flush (gdb_stdout);
2892 target_mourn_inferior ();
2893 singlestep_breakpoints_inserted_p = 0;
2894 stop_print_frame = 0;
2895 stop_stepping (ecs);
2896 return;
2897
2898 case TARGET_WAITKIND_SIGNALLED:
2899 if (debug_infrun)
2900 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
2901 inferior_ptid = ecs->ptid;
2902 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
2903 set_current_program_space (current_inferior ()->pspace);
2904 handle_vfork_child_exec_or_exit (0);
2905 stop_print_frame = 0;
2906 target_terminal_ours (); /* Must do this before mourn anyway */
2907
2908 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
2909 reach here unless the inferior is dead. However, for years
2910 target_kill() was called here, which hints that fatal signals aren't
2911 really fatal on some systems. If that's true, then some changes
2912 may be needed. */
2913 target_mourn_inferior ();
2914
2915 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
2916 singlestep_breakpoints_inserted_p = 0;
2917 stop_stepping (ecs);
2918 return;
2919
2920 /* The following are the only cases in which we keep going;
2921 the above cases end in a continue or goto. */
2922 case TARGET_WAITKIND_FORKED:
2923 case TARGET_WAITKIND_VFORKED:
2924 if (debug_infrun)
2925 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
2926
2927 if (!ptid_equal (ecs->ptid, inferior_ptid))
2928 {
2929 context_switch (ecs->ptid);
2930 reinit_frame_cache ();
2931 }
2932
2933 /* Immediately detach breakpoints from the child before there's
2934 any chance of letting the user delete breakpoints from the
2935 breakpoint lists. If we don't do this early, it's easy to
2936 leave left over traps in the child, vis: "break foo; catch
2937 fork; c; <fork>; del; c; <child calls foo>". We only follow
2938 the fork on the last `continue', and by that time the
2939 breakpoint at "foo" is long gone from the breakpoint table.
2940 If we vforked, then we don't need to unpatch here, since both
2941 parent and child are sharing the same memory pages; we'll
2942 need to unpatch at follow/detach time instead to be certain
2943 that new breakpoints added between catchpoint hit time and
2944 vfork follow are detached. */
2945 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
2946 {
2947 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
2948
2949 /* This won't actually modify the breakpoint list, but will
2950 physically remove the breakpoints from the child. */
2951 detach_breakpoints (child_pid);
2952 }
2953
2954 /* In case the event is caught by a catchpoint, remember that
2955 the event is to be followed at the next resume of the thread,
2956 and not immediately. */
2957 ecs->event_thread->pending_follow = ecs->ws;
2958
2959 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2960
2961 ecs->event_thread->stop_bpstat
2962 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
2963 stop_pc, ecs->ptid);
2964
2965 /* Note that we're interested in knowing the bpstat actually
2966 causes a stop, not just if it may explain the signal.
2967 Software watchpoints, for example, always appear in the
2968 bpstat. */
2969 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
2970
2971 /* If no catchpoint triggered for this, then keep going. */
2972 if (ecs->random_signal)
2973 {
2974 ptid_t parent;
2975 ptid_t child;
2976 int should_resume;
2977 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
2978
2979 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2980
2981 should_resume = follow_fork ();
2982
2983 parent = ecs->ptid;
2984 child = ecs->ws.value.related_pid;
2985
2986 /* In non-stop mode, also resume the other branch. */
2987 if (non_stop && !detach_fork)
2988 {
2989 if (follow_child)
2990 switch_to_thread (parent);
2991 else
2992 switch_to_thread (child);
2993
2994 ecs->event_thread = inferior_thread ();
2995 ecs->ptid = inferior_ptid;
2996 keep_going (ecs);
2997 }
2998
2999 if (follow_child)
3000 switch_to_thread (child);
3001 else
3002 switch_to_thread (parent);
3003
3004 ecs->event_thread = inferior_thread ();
3005 ecs->ptid = inferior_ptid;
3006
3007 if (should_resume)
3008 keep_going (ecs);
3009 else
3010 stop_stepping (ecs);
3011 return;
3012 }
3013 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3014 goto process_event_stop_test;
3015
3016 case TARGET_WAITKIND_VFORK_DONE:
3017 /* Done with the shared memory region. Re-insert breakpoints in
3018 the parent, and keep going. */
3019
3020 if (debug_infrun)
3021 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3022
3023 if (!ptid_equal (ecs->ptid, inferior_ptid))
3024 context_switch (ecs->ptid);
3025
3026 current_inferior ()->waiting_for_vfork_done = 0;
3027 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3028 /* This also takes care of reinserting breakpoints in the
3029 previously locked inferior. */
3030 keep_going (ecs);
3031 return;
3032
3033 case TARGET_WAITKIND_EXECD:
3034 if (debug_infrun)
3035 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3036
3037 if (!ptid_equal (ecs->ptid, inferior_ptid))
3038 {
3039 context_switch (ecs->ptid);
3040 reinit_frame_cache ();
3041 }
3042
3043 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3044
3045 /* Do whatever is necessary to the parent branch of the vfork. */
3046 handle_vfork_child_exec_or_exit (1);
3047
3048 /* This causes the eventpoints and symbol table to be reset.
3049 Must do this now, before trying to determine whether to
3050 stop. */
3051 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3052
3053 ecs->event_thread->stop_bpstat
3054 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3055 stop_pc, ecs->ptid);
3056 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3057
3058 /* Note that this may be referenced from inside
3059 bpstat_stop_status above, through inferior_has_execd. */
3060 xfree (ecs->ws.value.execd_pathname);
3061 ecs->ws.value.execd_pathname = NULL;
3062
3063 /* If no catchpoint triggered for this, then keep going. */
3064 if (ecs->random_signal)
3065 {
3066 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3067 keep_going (ecs);
3068 return;
3069 }
3070 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3071 goto process_event_stop_test;
3072
3073 /* Be careful not to try to gather much state about a thread
3074 that's in a syscall. It's frequently a losing proposition. */
3075 case TARGET_WAITKIND_SYSCALL_ENTRY:
3076 if (debug_infrun)
3077 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3078 /* Getting the current syscall number */
3079 if (handle_syscall_event (ecs) != 0)
3080 return;
3081 goto process_event_stop_test;
3082
3083 /* Before examining the threads further, step this thread to
3084 get it entirely out of the syscall. (We get notice of the
3085 event when the thread is just on the verge of exiting a
3086 syscall. Stepping one instruction seems to get it back
3087 into user code.) */
3088 case TARGET_WAITKIND_SYSCALL_RETURN:
3089 if (debug_infrun)
3090 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3091 if (handle_syscall_event (ecs) != 0)
3092 return;
3093 goto process_event_stop_test;
3094
3095 case TARGET_WAITKIND_STOPPED:
3096 if (debug_infrun)
3097 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3098 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3099 break;
3100
3101 case TARGET_WAITKIND_NO_HISTORY:
3102 /* Reverse execution: target ran out of history info. */
3103 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3104 print_stop_reason (NO_HISTORY, 0);
3105 stop_stepping (ecs);
3106 return;
3107 }
3108
3109 if (ecs->new_thread_event)
3110 {
3111 if (non_stop)
3112 /* Non-stop assumes that the target handles adding new threads
3113 to the thread list. */
3114 internal_error (__FILE__, __LINE__, "\
3115 targets should add new threads to the thread list themselves in non-stop mode.");
3116
3117 /* We may want to consider not doing a resume here in order to
3118 give the user a chance to play with the new thread. It might
3119 be good to make that a user-settable option. */
3120
3121 /* At this point, all threads are stopped (happens automatically
3122 in either the OS or the native code). Therefore we need to
3123 continue all threads in order to make progress. */
3124
3125 if (!ptid_equal (ecs->ptid, inferior_ptid))
3126 context_switch (ecs->ptid);
3127 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3128 prepare_to_wait (ecs);
3129 return;
3130 }
3131
3132 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3133 {
3134 /* Do we need to clean up the state of a thread that has
3135 completed a displaced single-step? (Doing so usually affects
3136 the PC, so do it here, before we set stop_pc.) */
3137 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3138
3139 /* If we either finished a single-step or hit a breakpoint, but
3140 the user wanted this thread to be stopped, pretend we got a
3141 SIG0 (generic unsignaled stop). */
3142
3143 if (ecs->event_thread->stop_requested
3144 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3145 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3146 }
3147
3148 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3149
3150 if (debug_infrun)
3151 {
3152 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3153 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3154 struct cleanup *old_chain = save_inferior_ptid ();
3155
3156 inferior_ptid = ecs->ptid;
3157
3158 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3159 paddress (gdbarch, stop_pc));
3160 if (target_stopped_by_watchpoint ())
3161 {
3162 CORE_ADDR addr;
3163 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3164
3165 if (target_stopped_data_address (&current_target, &addr))
3166 fprintf_unfiltered (gdb_stdlog,
3167 "infrun: stopped data address = %s\n",
3168 paddress (gdbarch, addr));
3169 else
3170 fprintf_unfiltered (gdb_stdlog,
3171 "infrun: (no data address available)\n");
3172 }
3173
3174 do_cleanups (old_chain);
3175 }
3176
3177 if (stepping_past_singlestep_breakpoint)
3178 {
3179 gdb_assert (singlestep_breakpoints_inserted_p);
3180 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3181 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3182
3183 stepping_past_singlestep_breakpoint = 0;
3184
3185 /* We've either finished single-stepping past the single-step
3186 breakpoint, or stopped for some other reason. It would be nice if
3187 we could tell, but we can't reliably. */
3188 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3189 {
3190 if (debug_infrun)
3191 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3192 /* Pull the single step breakpoints out of the target. */
3193 remove_single_step_breakpoints ();
3194 singlestep_breakpoints_inserted_p = 0;
3195
3196 ecs->random_signal = 0;
3197 ecs->event_thread->trap_expected = 0;
3198
3199 context_switch (saved_singlestep_ptid);
3200 if (deprecated_context_hook)
3201 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3202
3203 resume (1, TARGET_SIGNAL_0);
3204 prepare_to_wait (ecs);
3205 return;
3206 }
3207 }
3208
3209 if (!ptid_equal (deferred_step_ptid, null_ptid))
3210 {
3211 /* In non-stop mode, there's never a deferred_step_ptid set. */
3212 gdb_assert (!non_stop);
3213
3214 /* If we stopped for some other reason than single-stepping, ignore
3215 the fact that we were supposed to switch back. */
3216 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3217 {
3218 if (debug_infrun)
3219 fprintf_unfiltered (gdb_stdlog,
3220 "infrun: handling deferred step\n");
3221
3222 /* Pull the single step breakpoints out of the target. */
3223 if (singlestep_breakpoints_inserted_p)
3224 {
3225 remove_single_step_breakpoints ();
3226 singlestep_breakpoints_inserted_p = 0;
3227 }
3228
3229 /* Note: We do not call context_switch at this point, as the
3230 context is already set up for stepping the original thread. */
3231 switch_to_thread (deferred_step_ptid);
3232 deferred_step_ptid = null_ptid;
3233 /* Suppress spurious "Switching to ..." message. */
3234 previous_inferior_ptid = inferior_ptid;
3235
3236 resume (1, TARGET_SIGNAL_0);
3237 prepare_to_wait (ecs);
3238 return;
3239 }
3240
3241 deferred_step_ptid = null_ptid;
3242 }
3243
3244 /* See if a thread hit a thread-specific breakpoint that was meant for
3245 another thread. If so, then step that thread past the breakpoint,
3246 and continue it. */
3247
3248 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3249 {
3250 int thread_hop_needed = 0;
3251 struct address_space *aspace =
3252 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3253
3254 /* Check if a regular breakpoint has been hit before checking
3255 for a potential single step breakpoint. Otherwise, GDB will
3256 not see this breakpoint hit when stepping onto breakpoints. */
3257 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3258 {
3259 ecs->random_signal = 0;
3260 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3261 thread_hop_needed = 1;
3262 }
3263 else if (singlestep_breakpoints_inserted_p)
3264 {
3265 /* We have not context switched yet, so this should be true
3266 no matter which thread hit the singlestep breakpoint. */
3267 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3268 if (debug_infrun)
3269 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3270 "trap for %s\n",
3271 target_pid_to_str (ecs->ptid));
3272
3273 ecs->random_signal = 0;
3274 /* The call to in_thread_list is necessary because PTIDs sometimes
3275 change when we go from single-threaded to multi-threaded. If
3276 the singlestep_ptid is still in the list, assume that it is
3277 really different from ecs->ptid. */
3278 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3279 && in_thread_list (singlestep_ptid))
3280 {
3281 /* If the PC of the thread we were trying to single-step
3282 has changed, discard this event (which we were going
3283 to ignore anyway), and pretend we saw that thread
3284 trap. This prevents us continuously moving the
3285 single-step breakpoint forward, one instruction at a
3286 time. If the PC has changed, then the thread we were
3287 trying to single-step has trapped or been signalled,
3288 but the event has not been reported to GDB yet.
3289
3290 There might be some cases where this loses signal
3291 information, if a signal has arrived at exactly the
3292 same time that the PC changed, but this is the best
3293 we can do with the information available. Perhaps we
3294 should arrange to report all events for all threads
3295 when they stop, or to re-poll the remote looking for
3296 this particular thread (i.e. temporarily enable
3297 schedlock). */
3298
3299 CORE_ADDR new_singlestep_pc
3300 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3301
3302 if (new_singlestep_pc != singlestep_pc)
3303 {
3304 enum target_signal stop_signal;
3305
3306 if (debug_infrun)
3307 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3308 " but expected thread advanced also\n");
3309
3310 /* The current context still belongs to
3311 singlestep_ptid. Don't swap here, since that's
3312 the context we want to use. Just fudge our
3313 state and continue. */
3314 stop_signal = ecs->event_thread->stop_signal;
3315 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3316 ecs->ptid = singlestep_ptid;
3317 ecs->event_thread = find_thread_ptid (ecs->ptid);
3318 ecs->event_thread->stop_signal = stop_signal;
3319 stop_pc = new_singlestep_pc;
3320 }
3321 else
3322 {
3323 if (debug_infrun)
3324 fprintf_unfiltered (gdb_stdlog,
3325 "infrun: unexpected thread\n");
3326
3327 thread_hop_needed = 1;
3328 stepping_past_singlestep_breakpoint = 1;
3329 saved_singlestep_ptid = singlestep_ptid;
3330 }
3331 }
3332 }
3333
3334 if (thread_hop_needed)
3335 {
3336 struct regcache *thread_regcache;
3337 int remove_status = 0;
3338
3339 if (debug_infrun)
3340 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3341
3342 /* Switch context before touching inferior memory, the
3343 previous thread may have exited. */
3344 if (!ptid_equal (inferior_ptid, ecs->ptid))
3345 context_switch (ecs->ptid);
3346
3347 /* Saw a breakpoint, but it was hit by the wrong thread.
3348 Just continue. */
3349
3350 if (singlestep_breakpoints_inserted_p)
3351 {
3352 /* Pull the single step breakpoints out of the target. */
3353 remove_single_step_breakpoints ();
3354 singlestep_breakpoints_inserted_p = 0;
3355 }
3356
3357 /* If the arch can displace step, don't remove the
3358 breakpoints. */
3359 thread_regcache = get_thread_regcache (ecs->ptid);
3360 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3361 remove_status = remove_breakpoints ();
3362
3363 /* Did we fail to remove breakpoints? If so, try
3364 to set the PC past the bp. (There's at least
3365 one situation in which we can fail to remove
3366 the bp's: On HP-UX's that use ttrace, we can't
3367 change the address space of a vforking child
3368 process until the child exits (well, okay, not
3369 then either :-) or execs. */
3370 if (remove_status != 0)
3371 error (_("Cannot step over breakpoint hit in wrong thread"));
3372 else
3373 { /* Single step */
3374 if (!non_stop)
3375 {
3376 /* Only need to require the next event from this
3377 thread in all-stop mode. */
3378 waiton_ptid = ecs->ptid;
3379 infwait_state = infwait_thread_hop_state;
3380 }
3381
3382 ecs->event_thread->stepping_over_breakpoint = 1;
3383 keep_going (ecs);
3384 return;
3385 }
3386 }
3387 else if (singlestep_breakpoints_inserted_p)
3388 {
3389 sw_single_step_trap_p = 1;
3390 ecs->random_signal = 0;
3391 }
3392 }
3393 else
3394 ecs->random_signal = 1;
3395
3396 /* See if something interesting happened to the non-current thread. If
3397 so, then switch to that thread. */
3398 if (!ptid_equal (ecs->ptid, inferior_ptid))
3399 {
3400 if (debug_infrun)
3401 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3402
3403 context_switch (ecs->ptid);
3404
3405 if (deprecated_context_hook)
3406 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3407 }
3408
3409 /* At this point, get hold of the now-current thread's frame. */
3410 frame = get_current_frame ();
3411 gdbarch = get_frame_arch (frame);
3412
3413 if (singlestep_breakpoints_inserted_p)
3414 {
3415 /* Pull the single step breakpoints out of the target. */
3416 remove_single_step_breakpoints ();
3417 singlestep_breakpoints_inserted_p = 0;
3418 }
3419
3420 if (stepped_after_stopped_by_watchpoint)
3421 stopped_by_watchpoint = 0;
3422 else
3423 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3424
3425 /* If necessary, step over this watchpoint. We'll be back to display
3426 it in a moment. */
3427 if (stopped_by_watchpoint
3428 && (target_have_steppable_watchpoint
3429 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3430 {
3431 /* At this point, we are stopped at an instruction which has
3432 attempted to write to a piece of memory under control of
3433 a watchpoint. The instruction hasn't actually executed
3434 yet. If we were to evaluate the watchpoint expression
3435 now, we would get the old value, and therefore no change
3436 would seem to have occurred.
3437
3438 In order to make watchpoints work `right', we really need
3439 to complete the memory write, and then evaluate the
3440 watchpoint expression. We do this by single-stepping the
3441 target.
3442
3443 It may not be necessary to disable the watchpoint to stop over
3444 it. For example, the PA can (with some kernel cooperation)
3445 single step over a watchpoint without disabling the watchpoint.
3446
3447 It is far more common to need to disable a watchpoint to step
3448 the inferior over it. If we have non-steppable watchpoints,
3449 we must disable the current watchpoint; it's simplest to
3450 disable all watchpoints and breakpoints. */
3451 int hw_step = 1;
3452
3453 if (!target_have_steppable_watchpoint)
3454 remove_breakpoints ();
3455 /* Single step */
3456 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3457 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3458 waiton_ptid = ecs->ptid;
3459 if (target_have_steppable_watchpoint)
3460 infwait_state = infwait_step_watch_state;
3461 else
3462 infwait_state = infwait_nonstep_watch_state;
3463 prepare_to_wait (ecs);
3464 return;
3465 }
3466
3467 ecs->stop_func_start = 0;
3468 ecs->stop_func_end = 0;
3469 ecs->stop_func_name = 0;
3470 /* Don't care about return value; stop_func_start and stop_func_name
3471 will both be 0 if it doesn't work. */
3472 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3473 &ecs->stop_func_start, &ecs->stop_func_end);
3474 ecs->stop_func_start
3475 += gdbarch_deprecated_function_start_offset (gdbarch);
3476 ecs->event_thread->stepping_over_breakpoint = 0;
3477 bpstat_clear (&ecs->event_thread->stop_bpstat);
3478 ecs->event_thread->stop_step = 0;
3479 stop_print_frame = 1;
3480 ecs->random_signal = 0;
3481 stopped_by_random_signal = 0;
3482
3483 /* Hide inlined functions starting here, unless we just performed stepi or
3484 nexti. After stepi and nexti, always show the innermost frame (not any
3485 inline function call sites). */
3486 if (ecs->event_thread->step_range_end != 1)
3487 skip_inline_frames (ecs->ptid);
3488
3489 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3490 && ecs->event_thread->trap_expected
3491 && gdbarch_single_step_through_delay_p (gdbarch)
3492 && currently_stepping (ecs->event_thread))
3493 {
3494 /* We're trying to step off a breakpoint. Turns out that we're
3495 also on an instruction that needs to be stepped multiple
3496 times before it's been fully executing. E.g., architectures
3497 with a delay slot. It needs to be stepped twice, once for
3498 the instruction and once for the delay slot. */
3499 int step_through_delay
3500 = gdbarch_single_step_through_delay (gdbarch, frame);
3501 if (debug_infrun && step_through_delay)
3502 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3503 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3504 {
3505 /* The user issued a continue when stopped at a breakpoint.
3506 Set up for another trap and get out of here. */
3507 ecs->event_thread->stepping_over_breakpoint = 1;
3508 keep_going (ecs);
3509 return;
3510 }
3511 else if (step_through_delay)
3512 {
3513 /* The user issued a step when stopped at a breakpoint.
3514 Maybe we should stop, maybe we should not - the delay
3515 slot *might* correspond to a line of source. In any
3516 case, don't decide that here, just set
3517 ecs->stepping_over_breakpoint, making sure we
3518 single-step again before breakpoints are re-inserted. */
3519 ecs->event_thread->stepping_over_breakpoint = 1;
3520 }
3521 }
3522
3523 /* Look at the cause of the stop, and decide what to do.
3524 The alternatives are:
3525 1) stop_stepping and return; to really stop and return to the debugger,
3526 2) keep_going and return to start up again
3527 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3528 3) set ecs->random_signal to 1, and the decision between 1 and 2
3529 will be made according to the signal handling tables. */
3530
3531 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3532 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3533 || stop_soon == STOP_QUIETLY_REMOTE)
3534 {
3535 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3536 {
3537 if (debug_infrun)
3538 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3539 stop_print_frame = 0;
3540 stop_stepping (ecs);
3541 return;
3542 }
3543
3544 /* This is originated from start_remote(), start_inferior() and
3545 shared libraries hook functions. */
3546 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3547 {
3548 if (debug_infrun)
3549 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3550 stop_stepping (ecs);
3551 return;
3552 }
3553
3554 /* This originates from attach_command(). We need to overwrite
3555 the stop_signal here, because some kernels don't ignore a
3556 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3557 See more comments in inferior.h. On the other hand, if we
3558 get a non-SIGSTOP, report it to the user - assume the backend
3559 will handle the SIGSTOP if it should show up later.
3560
3561 Also consider that the attach is complete when we see a
3562 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3563 target extended-remote report it instead of a SIGSTOP
3564 (e.g. gdbserver). We already rely on SIGTRAP being our
3565 signal, so this is no exception.
3566
3567 Also consider that the attach is complete when we see a
3568 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3569 the target to stop all threads of the inferior, in case the
3570 low level attach operation doesn't stop them implicitly. If
3571 they weren't stopped implicitly, then the stub will report a
3572 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3573 other than GDB's request. */
3574 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3575 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3576 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3577 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3578 {
3579 stop_stepping (ecs);
3580 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3581 return;
3582 }
3583
3584 /* See if there is a breakpoint at the current PC. */
3585 ecs->event_thread->stop_bpstat
3586 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3587 stop_pc, ecs->ptid);
3588
3589 /* Following in case break condition called a
3590 function. */
3591 stop_print_frame = 1;
3592
3593 /* This is where we handle "moribund" watchpoints. Unlike
3594 software breakpoints traps, hardware watchpoint traps are
3595 always distinguishable from random traps. If no high-level
3596 watchpoint is associated with the reported stop data address
3597 anymore, then the bpstat does not explain the signal ---
3598 simply make sure to ignore it if `stopped_by_watchpoint' is
3599 set. */
3600
3601 if (debug_infrun
3602 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3603 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3604 && stopped_by_watchpoint)
3605 fprintf_unfiltered (gdb_stdlog, "\
3606 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3607
3608 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3609 at one stage in the past included checks for an inferior
3610 function call's call dummy's return breakpoint. The original
3611 comment, that went with the test, read:
3612
3613 ``End of a stack dummy. Some systems (e.g. Sony news) give
3614 another signal besides SIGTRAP, so check here as well as
3615 above.''
3616
3617 If someone ever tries to get call dummys on a
3618 non-executable stack to work (where the target would stop
3619 with something like a SIGSEGV), then those tests might need
3620 to be re-instated. Given, however, that the tests were only
3621 enabled when momentary breakpoints were not being used, I
3622 suspect that it won't be the case.
3623
3624 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3625 be necessary for call dummies on a non-executable stack on
3626 SPARC. */
3627
3628 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3629 ecs->random_signal
3630 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3631 || stopped_by_watchpoint
3632 || ecs->event_thread->trap_expected
3633 || (ecs->event_thread->step_range_end
3634 && ecs->event_thread->step_resume_breakpoint == NULL));
3635 else
3636 {
3637 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3638 if (!ecs->random_signal)
3639 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3640 }
3641 }
3642
3643 /* When we reach this point, we've pretty much decided
3644 that the reason for stopping must've been a random
3645 (unexpected) signal. */
3646
3647 else
3648 ecs->random_signal = 1;
3649
3650 process_event_stop_test:
3651
3652 /* Re-fetch current thread's frame in case we did a
3653 "goto process_event_stop_test" above. */
3654 frame = get_current_frame ();
3655 gdbarch = get_frame_arch (frame);
3656
3657 /* For the program's own signals, act according to
3658 the signal handling tables. */
3659
3660 if (ecs->random_signal)
3661 {
3662 /* Signal not for debugging purposes. */
3663 int printed = 0;
3664
3665 if (debug_infrun)
3666 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3667 ecs->event_thread->stop_signal);
3668
3669 stopped_by_random_signal = 1;
3670
3671 if (signal_print[ecs->event_thread->stop_signal])
3672 {
3673 printed = 1;
3674 target_terminal_ours_for_output ();
3675 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3676 }
3677 /* Always stop on signals if we're either just gaining control
3678 of the program, or the user explicitly requested this thread
3679 to remain stopped. */
3680 if (stop_soon != NO_STOP_QUIETLY
3681 || ecs->event_thread->stop_requested
3682 || signal_stop_state (ecs->event_thread->stop_signal))
3683 {
3684 stop_stepping (ecs);
3685 return;
3686 }
3687 /* If not going to stop, give terminal back
3688 if we took it away. */
3689 else if (printed)
3690 target_terminal_inferior ();
3691
3692 /* Clear the signal if it should not be passed. */
3693 if (signal_program[ecs->event_thread->stop_signal] == 0)
3694 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3695
3696 if (ecs->event_thread->prev_pc == stop_pc
3697 && ecs->event_thread->trap_expected
3698 && ecs->event_thread->step_resume_breakpoint == NULL)
3699 {
3700 /* We were just starting a new sequence, attempting to
3701 single-step off of a breakpoint and expecting a SIGTRAP.
3702 Instead this signal arrives. This signal will take us out
3703 of the stepping range so GDB needs to remember to, when
3704 the signal handler returns, resume stepping off that
3705 breakpoint. */
3706 /* To simplify things, "continue" is forced to use the same
3707 code paths as single-step - set a breakpoint at the
3708 signal return address and then, once hit, step off that
3709 breakpoint. */
3710 if (debug_infrun)
3711 fprintf_unfiltered (gdb_stdlog,
3712 "infrun: signal arrived while stepping over "
3713 "breakpoint\n");
3714
3715 insert_step_resume_breakpoint_at_frame (frame);
3716 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3717 keep_going (ecs);
3718 return;
3719 }
3720
3721 if (ecs->event_thread->step_range_end != 0
3722 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3723 && (ecs->event_thread->step_range_start <= stop_pc
3724 && stop_pc < ecs->event_thread->step_range_end)
3725 && frame_id_eq (get_stack_frame_id (frame),
3726 ecs->event_thread->step_stack_frame_id)
3727 && ecs->event_thread->step_resume_breakpoint == NULL)
3728 {
3729 /* The inferior is about to take a signal that will take it
3730 out of the single step range. Set a breakpoint at the
3731 current PC (which is presumably where the signal handler
3732 will eventually return) and then allow the inferior to
3733 run free.
3734
3735 Note that this is only needed for a signal delivered
3736 while in the single-step range. Nested signals aren't a
3737 problem as they eventually all return. */
3738 if (debug_infrun)
3739 fprintf_unfiltered (gdb_stdlog,
3740 "infrun: signal may take us out of "
3741 "single-step range\n");
3742
3743 insert_step_resume_breakpoint_at_frame (frame);
3744 keep_going (ecs);
3745 return;
3746 }
3747
3748 /* Note: step_resume_breakpoint may be non-NULL. This occures
3749 when either there's a nested signal, or when there's a
3750 pending signal enabled just as the signal handler returns
3751 (leaving the inferior at the step-resume-breakpoint without
3752 actually executing it). Either way continue until the
3753 breakpoint is really hit. */
3754 keep_going (ecs);
3755 return;
3756 }
3757
3758 /* Handle cases caused by hitting a breakpoint. */
3759 {
3760 CORE_ADDR jmp_buf_pc;
3761 struct bpstat_what what;
3762
3763 what = bpstat_what (ecs->event_thread->stop_bpstat);
3764
3765 if (what.call_dummy)
3766 {
3767 stop_stack_dummy = 1;
3768 }
3769
3770 switch (what.main_action)
3771 {
3772 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3773 /* If we hit the breakpoint at longjmp while stepping, we
3774 install a momentary breakpoint at the target of the
3775 jmp_buf. */
3776
3777 if (debug_infrun)
3778 fprintf_unfiltered (gdb_stdlog,
3779 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3780
3781 ecs->event_thread->stepping_over_breakpoint = 1;
3782
3783 if (!gdbarch_get_longjmp_target_p (gdbarch)
3784 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3785 {
3786 if (debug_infrun)
3787 fprintf_unfiltered (gdb_stdlog, "\
3788 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3789 keep_going (ecs);
3790 return;
3791 }
3792
3793 /* We're going to replace the current step-resume breakpoint
3794 with a longjmp-resume breakpoint. */
3795 delete_step_resume_breakpoint (ecs->event_thread);
3796
3797 /* Insert a breakpoint at resume address. */
3798 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3799
3800 keep_going (ecs);
3801 return;
3802
3803 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3804 if (debug_infrun)
3805 fprintf_unfiltered (gdb_stdlog,
3806 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3807
3808 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3809 delete_step_resume_breakpoint (ecs->event_thread);
3810
3811 ecs->event_thread->stop_step = 1;
3812 print_stop_reason (END_STEPPING_RANGE, 0);
3813 stop_stepping (ecs);
3814 return;
3815
3816 case BPSTAT_WHAT_SINGLE:
3817 if (debug_infrun)
3818 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
3819 ecs->event_thread->stepping_over_breakpoint = 1;
3820 /* Still need to check other stuff, at least the case
3821 where we are stepping and step out of the right range. */
3822 break;
3823
3824 case BPSTAT_WHAT_STOP_NOISY:
3825 if (debug_infrun)
3826 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
3827 stop_print_frame = 1;
3828
3829 /* We are about to nuke the step_resume_breakpointt via the
3830 cleanup chain, so no need to worry about it here. */
3831
3832 stop_stepping (ecs);
3833 return;
3834
3835 case BPSTAT_WHAT_STOP_SILENT:
3836 if (debug_infrun)
3837 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
3838 stop_print_frame = 0;
3839
3840 /* We are about to nuke the step_resume_breakpoin via the
3841 cleanup chain, so no need to worry about it here. */
3842
3843 stop_stepping (ecs);
3844 return;
3845
3846 case BPSTAT_WHAT_STEP_RESUME:
3847 if (debug_infrun)
3848 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
3849
3850 delete_step_resume_breakpoint (ecs->event_thread);
3851 if (ecs->event_thread->step_after_step_resume_breakpoint)
3852 {
3853 /* Back when the step-resume breakpoint was inserted, we
3854 were trying to single-step off a breakpoint. Go back
3855 to doing that. */
3856 ecs->event_thread->step_after_step_resume_breakpoint = 0;
3857 ecs->event_thread->stepping_over_breakpoint = 1;
3858 keep_going (ecs);
3859 return;
3860 }
3861 if (stop_pc == ecs->stop_func_start
3862 && execution_direction == EXEC_REVERSE)
3863 {
3864 /* We are stepping over a function call in reverse, and
3865 just hit the step-resume breakpoint at the start
3866 address of the function. Go back to single-stepping,
3867 which should take us back to the function call. */
3868 ecs->event_thread->stepping_over_breakpoint = 1;
3869 keep_going (ecs);
3870 return;
3871 }
3872 break;
3873
3874 case BPSTAT_WHAT_CHECK_SHLIBS:
3875 {
3876 if (debug_infrun)
3877 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
3878
3879 /* Check for any newly added shared libraries if we're
3880 supposed to be adding them automatically. Switch
3881 terminal for any messages produced by
3882 breakpoint_re_set. */
3883 target_terminal_ours_for_output ();
3884 /* NOTE: cagney/2003-11-25: Make certain that the target
3885 stack's section table is kept up-to-date. Architectures,
3886 (e.g., PPC64), use the section table to perform
3887 operations such as address => section name and hence
3888 require the table to contain all sections (including
3889 those found in shared libraries). */
3890 #ifdef SOLIB_ADD
3891 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3892 #else
3893 solib_add (NULL, 0, &current_target, auto_solib_add);
3894 #endif
3895 target_terminal_inferior ();
3896
3897 /* If requested, stop when the dynamic linker notifies
3898 gdb of events. This allows the user to get control
3899 and place breakpoints in initializer routines for
3900 dynamically loaded objects (among other things). */
3901 if (stop_on_solib_events || stop_stack_dummy)
3902 {
3903 stop_stepping (ecs);
3904 return;
3905 }
3906 else
3907 {
3908 /* We want to step over this breakpoint, then keep going. */
3909 ecs->event_thread->stepping_over_breakpoint = 1;
3910 break;
3911 }
3912 }
3913 break;
3914
3915 case BPSTAT_WHAT_CHECK_JIT:
3916 if (debug_infrun)
3917 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
3918
3919 /* Switch terminal for any messages produced by breakpoint_re_set. */
3920 target_terminal_ours_for_output ();
3921
3922 jit_event_handler (gdbarch);
3923
3924 target_terminal_inferior ();
3925
3926 /* We want to step over this breakpoint, then keep going. */
3927 ecs->event_thread->stepping_over_breakpoint = 1;
3928
3929 break;
3930
3931 case BPSTAT_WHAT_LAST:
3932 /* Not a real code, but listed here to shut up gcc -Wall. */
3933
3934 case BPSTAT_WHAT_KEEP_CHECKING:
3935 break;
3936 }
3937 }
3938
3939 /* We come here if we hit a breakpoint but should not
3940 stop for it. Possibly we also were stepping
3941 and should stop for that. So fall through and
3942 test for stepping. But, if not stepping,
3943 do not stop. */
3944
3945 /* In all-stop mode, if we're currently stepping but have stopped in
3946 some other thread, we need to switch back to the stepped thread. */
3947 if (!non_stop)
3948 {
3949 struct thread_info *tp;
3950 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
3951 ecs->event_thread);
3952 if (tp)
3953 {
3954 /* However, if the current thread is blocked on some internal
3955 breakpoint, and we simply need to step over that breakpoint
3956 to get it going again, do that first. */
3957 if ((ecs->event_thread->trap_expected
3958 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
3959 || ecs->event_thread->stepping_over_breakpoint)
3960 {
3961 keep_going (ecs);
3962 return;
3963 }
3964
3965 /* If the stepping thread exited, then don't try to switch
3966 back and resume it, which could fail in several different
3967 ways depending on the target. Instead, just keep going.
3968
3969 We can find a stepping dead thread in the thread list in
3970 two cases:
3971
3972 - The target supports thread exit events, and when the
3973 target tries to delete the thread from the thread list,
3974 inferior_ptid pointed at the exiting thread. In such
3975 case, calling delete_thread does not really remove the
3976 thread from the list; instead, the thread is left listed,
3977 with 'exited' state.
3978
3979 - The target's debug interface does not support thread
3980 exit events, and so we have no idea whatsoever if the
3981 previously stepping thread is still alive. For that
3982 reason, we need to synchronously query the target
3983 now. */
3984 if (is_exited (tp->ptid)
3985 || !target_thread_alive (tp->ptid))
3986 {
3987 if (debug_infrun)
3988 fprintf_unfiltered (gdb_stdlog, "\
3989 infrun: not switching back to stepped thread, it has vanished\n");
3990
3991 delete_thread (tp->ptid);
3992 keep_going (ecs);
3993 return;
3994 }
3995
3996 /* Otherwise, we no longer expect a trap in the current thread.
3997 Clear the trap_expected flag before switching back -- this is
3998 what keep_going would do as well, if we called it. */
3999 ecs->event_thread->trap_expected = 0;
4000
4001 if (debug_infrun)
4002 fprintf_unfiltered (gdb_stdlog,
4003 "infrun: switching back to stepped thread\n");
4004
4005 ecs->event_thread = tp;
4006 ecs->ptid = tp->ptid;
4007 context_switch (ecs->ptid);
4008 keep_going (ecs);
4009 return;
4010 }
4011 }
4012
4013 /* Are we stepping to get the inferior out of the dynamic linker's
4014 hook (and possibly the dld itself) after catching a shlib
4015 event? */
4016 if (ecs->event_thread->stepping_through_solib_after_catch)
4017 {
4018 #if defined(SOLIB_ADD)
4019 /* Have we reached our destination? If not, keep going. */
4020 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4021 {
4022 if (debug_infrun)
4023 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4024 ecs->event_thread->stepping_over_breakpoint = 1;
4025 keep_going (ecs);
4026 return;
4027 }
4028 #endif
4029 if (debug_infrun)
4030 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4031 /* Else, stop and report the catchpoint(s) whose triggering
4032 caused us to begin stepping. */
4033 ecs->event_thread->stepping_through_solib_after_catch = 0;
4034 bpstat_clear (&ecs->event_thread->stop_bpstat);
4035 ecs->event_thread->stop_bpstat
4036 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4037 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4038 stop_print_frame = 1;
4039 stop_stepping (ecs);
4040 return;
4041 }
4042
4043 if (ecs->event_thread->step_resume_breakpoint)
4044 {
4045 if (debug_infrun)
4046 fprintf_unfiltered (gdb_stdlog,
4047 "infrun: step-resume breakpoint is inserted\n");
4048
4049 /* Having a step-resume breakpoint overrides anything
4050 else having to do with stepping commands until
4051 that breakpoint is reached. */
4052 keep_going (ecs);
4053 return;
4054 }
4055
4056 if (ecs->event_thread->step_range_end == 0)
4057 {
4058 if (debug_infrun)
4059 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4060 /* Likewise if we aren't even stepping. */
4061 keep_going (ecs);
4062 return;
4063 }
4064
4065 /* Re-fetch current thread's frame in case the code above caused
4066 the frame cache to be re-initialized, making our FRAME variable
4067 a dangling pointer. */
4068 frame = get_current_frame ();
4069
4070 /* If stepping through a line, keep going if still within it.
4071
4072 Note that step_range_end is the address of the first instruction
4073 beyond the step range, and NOT the address of the last instruction
4074 within it!
4075
4076 Note also that during reverse execution, we may be stepping
4077 through a function epilogue and therefore must detect when
4078 the current-frame changes in the middle of a line. */
4079
4080 if (stop_pc >= ecs->event_thread->step_range_start
4081 && stop_pc < ecs->event_thread->step_range_end
4082 && (execution_direction != EXEC_REVERSE
4083 || frame_id_eq (get_frame_id (frame),
4084 ecs->event_thread->step_frame_id)))
4085 {
4086 if (debug_infrun)
4087 fprintf_unfiltered
4088 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4089 paddress (gdbarch, ecs->event_thread->step_range_start),
4090 paddress (gdbarch, ecs->event_thread->step_range_end));
4091
4092 /* When stepping backward, stop at beginning of line range
4093 (unless it's the function entry point, in which case
4094 keep going back to the call point). */
4095 if (stop_pc == ecs->event_thread->step_range_start
4096 && stop_pc != ecs->stop_func_start
4097 && execution_direction == EXEC_REVERSE)
4098 {
4099 ecs->event_thread->stop_step = 1;
4100 print_stop_reason (END_STEPPING_RANGE, 0);
4101 stop_stepping (ecs);
4102 }
4103 else
4104 keep_going (ecs);
4105
4106 return;
4107 }
4108
4109 /* We stepped out of the stepping range. */
4110
4111 /* If we are stepping at the source level and entered the runtime
4112 loader dynamic symbol resolution code...
4113
4114 EXEC_FORWARD: we keep on single stepping until we exit the run
4115 time loader code and reach the callee's address.
4116
4117 EXEC_REVERSE: we've already executed the callee (backward), and
4118 the runtime loader code is handled just like any other
4119 undebuggable function call. Now we need only keep stepping
4120 backward through the trampoline code, and that's handled further
4121 down, so there is nothing for us to do here. */
4122
4123 if (execution_direction != EXEC_REVERSE
4124 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4125 && in_solib_dynsym_resolve_code (stop_pc))
4126 {
4127 CORE_ADDR pc_after_resolver =
4128 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4129
4130 if (debug_infrun)
4131 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4132
4133 if (pc_after_resolver)
4134 {
4135 /* Set up a step-resume breakpoint at the address
4136 indicated by SKIP_SOLIB_RESOLVER. */
4137 struct symtab_and_line sr_sal;
4138 init_sal (&sr_sal);
4139 sr_sal.pc = pc_after_resolver;
4140 sr_sal.pspace = get_frame_program_space (frame);
4141
4142 insert_step_resume_breakpoint_at_sal (gdbarch,
4143 sr_sal, null_frame_id);
4144 }
4145
4146 keep_going (ecs);
4147 return;
4148 }
4149
4150 if (ecs->event_thread->step_range_end != 1
4151 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4152 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4153 && get_frame_type (frame) == SIGTRAMP_FRAME)
4154 {
4155 if (debug_infrun)
4156 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4157 /* The inferior, while doing a "step" or "next", has ended up in
4158 a signal trampoline (either by a signal being delivered or by
4159 the signal handler returning). Just single-step until the
4160 inferior leaves the trampoline (either by calling the handler
4161 or returning). */
4162 keep_going (ecs);
4163 return;
4164 }
4165
4166 /* Check for subroutine calls. The check for the current frame
4167 equalling the step ID is not necessary - the check of the
4168 previous frame's ID is sufficient - but it is a common case and
4169 cheaper than checking the previous frame's ID.
4170
4171 NOTE: frame_id_eq will never report two invalid frame IDs as
4172 being equal, so to get into this block, both the current and
4173 previous frame must have valid frame IDs. */
4174 /* The outer_frame_id check is a heuristic to detect stepping
4175 through startup code. If we step over an instruction which
4176 sets the stack pointer from an invalid value to a valid value,
4177 we may detect that as a subroutine call from the mythical
4178 "outermost" function. This could be fixed by marking
4179 outermost frames as !stack_p,code_p,special_p. Then the
4180 initial outermost frame, before sp was valid, would
4181 have code_addr == &_start. See the comment in frame_id_eq
4182 for more. */
4183 if (!frame_id_eq (get_stack_frame_id (frame),
4184 ecs->event_thread->step_stack_frame_id)
4185 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4186 ecs->event_thread->step_stack_frame_id)
4187 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4188 outer_frame_id)
4189 || step_start_function != find_pc_function (stop_pc))))
4190 {
4191 CORE_ADDR real_stop_pc;
4192
4193 if (debug_infrun)
4194 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4195
4196 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4197 || ((ecs->event_thread->step_range_end == 1)
4198 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4199 ecs->stop_func_start)))
4200 {
4201 /* I presume that step_over_calls is only 0 when we're
4202 supposed to be stepping at the assembly language level
4203 ("stepi"). Just stop. */
4204 /* Also, maybe we just did a "nexti" inside a prolog, so we
4205 thought it was a subroutine call but it was not. Stop as
4206 well. FENN */
4207 /* And this works the same backward as frontward. MVS */
4208 ecs->event_thread->stop_step = 1;
4209 print_stop_reason (END_STEPPING_RANGE, 0);
4210 stop_stepping (ecs);
4211 return;
4212 }
4213
4214 /* Reverse stepping through solib trampolines. */
4215
4216 if (execution_direction == EXEC_REVERSE
4217 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4218 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4219 || (ecs->stop_func_start == 0
4220 && in_solib_dynsym_resolve_code (stop_pc))))
4221 {
4222 /* Any solib trampoline code can be handled in reverse
4223 by simply continuing to single-step. We have already
4224 executed the solib function (backwards), and a few
4225 steps will take us back through the trampoline to the
4226 caller. */
4227 keep_going (ecs);
4228 return;
4229 }
4230
4231 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4232 {
4233 /* We're doing a "next".
4234
4235 Normal (forward) execution: set a breakpoint at the
4236 callee's return address (the address at which the caller
4237 will resume).
4238
4239 Reverse (backward) execution. set the step-resume
4240 breakpoint at the start of the function that we just
4241 stepped into (backwards), and continue to there. When we
4242 get there, we'll need to single-step back to the caller. */
4243
4244 if (execution_direction == EXEC_REVERSE)
4245 {
4246 struct symtab_and_line sr_sal;
4247
4248 /* Normal function call return (static or dynamic). */
4249 init_sal (&sr_sal);
4250 sr_sal.pc = ecs->stop_func_start;
4251 sr_sal.pspace = get_frame_program_space (frame);
4252 insert_step_resume_breakpoint_at_sal (gdbarch,
4253 sr_sal, null_frame_id);
4254 }
4255 else
4256 insert_step_resume_breakpoint_at_caller (frame);
4257
4258 keep_going (ecs);
4259 return;
4260 }
4261
4262 /* If we are in a function call trampoline (a stub between the
4263 calling routine and the real function), locate the real
4264 function. That's what tells us (a) whether we want to step
4265 into it at all, and (b) what prologue we want to run to the
4266 end of, if we do step into it. */
4267 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4268 if (real_stop_pc == 0)
4269 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4270 if (real_stop_pc != 0)
4271 ecs->stop_func_start = real_stop_pc;
4272
4273 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4274 {
4275 struct symtab_and_line sr_sal;
4276 init_sal (&sr_sal);
4277 sr_sal.pc = ecs->stop_func_start;
4278 sr_sal.pspace = get_frame_program_space (frame);
4279
4280 insert_step_resume_breakpoint_at_sal (gdbarch,
4281 sr_sal, null_frame_id);
4282 keep_going (ecs);
4283 return;
4284 }
4285
4286 /* If we have line number information for the function we are
4287 thinking of stepping into, step into it.
4288
4289 If there are several symtabs at that PC (e.g. with include
4290 files), just want to know whether *any* of them have line
4291 numbers. find_pc_line handles this. */
4292 {
4293 struct symtab_and_line tmp_sal;
4294
4295 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4296 tmp_sal.pspace = get_frame_program_space (frame);
4297 if (tmp_sal.line != 0)
4298 {
4299 if (execution_direction == EXEC_REVERSE)
4300 handle_step_into_function_backward (gdbarch, ecs);
4301 else
4302 handle_step_into_function (gdbarch, ecs);
4303 return;
4304 }
4305 }
4306
4307 /* If we have no line number and the step-stop-if-no-debug is
4308 set, we stop the step so that the user has a chance to switch
4309 in assembly mode. */
4310 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4311 && step_stop_if_no_debug)
4312 {
4313 ecs->event_thread->stop_step = 1;
4314 print_stop_reason (END_STEPPING_RANGE, 0);
4315 stop_stepping (ecs);
4316 return;
4317 }
4318
4319 if (execution_direction == EXEC_REVERSE)
4320 {
4321 /* Set a breakpoint at callee's start address.
4322 From there we can step once and be back in the caller. */
4323 struct symtab_and_line sr_sal;
4324 init_sal (&sr_sal);
4325 sr_sal.pc = ecs->stop_func_start;
4326 sr_sal.pspace = get_frame_program_space (frame);
4327 insert_step_resume_breakpoint_at_sal (gdbarch,
4328 sr_sal, null_frame_id);
4329 }
4330 else
4331 /* Set a breakpoint at callee's return address (the address
4332 at which the caller will resume). */
4333 insert_step_resume_breakpoint_at_caller (frame);
4334
4335 keep_going (ecs);
4336 return;
4337 }
4338
4339 /* Reverse stepping through solib trampolines. */
4340
4341 if (execution_direction == EXEC_REVERSE
4342 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4343 {
4344 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4345 || (ecs->stop_func_start == 0
4346 && in_solib_dynsym_resolve_code (stop_pc)))
4347 {
4348 /* Any solib trampoline code can be handled in reverse
4349 by simply continuing to single-step. We have already
4350 executed the solib function (backwards), and a few
4351 steps will take us back through the trampoline to the
4352 caller. */
4353 keep_going (ecs);
4354 return;
4355 }
4356 else if (in_solib_dynsym_resolve_code (stop_pc))
4357 {
4358 /* Stepped backward into the solib dynsym resolver.
4359 Set a breakpoint at its start and continue, then
4360 one more step will take us out. */
4361 struct symtab_and_line sr_sal;
4362 init_sal (&sr_sal);
4363 sr_sal.pc = ecs->stop_func_start;
4364 sr_sal.pspace = get_frame_program_space (frame);
4365 insert_step_resume_breakpoint_at_sal (gdbarch,
4366 sr_sal, null_frame_id);
4367 keep_going (ecs);
4368 return;
4369 }
4370 }
4371
4372 /* If we're in the return path from a shared library trampoline,
4373 we want to proceed through the trampoline when stepping. */
4374 if (gdbarch_in_solib_return_trampoline (gdbarch,
4375 stop_pc, ecs->stop_func_name))
4376 {
4377 /* Determine where this trampoline returns. */
4378 CORE_ADDR real_stop_pc;
4379 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4380
4381 if (debug_infrun)
4382 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4383
4384 /* Only proceed through if we know where it's going. */
4385 if (real_stop_pc)
4386 {
4387 /* And put the step-breakpoint there and go until there. */
4388 struct symtab_and_line sr_sal;
4389
4390 init_sal (&sr_sal); /* initialize to zeroes */
4391 sr_sal.pc = real_stop_pc;
4392 sr_sal.section = find_pc_overlay (sr_sal.pc);
4393 sr_sal.pspace = get_frame_program_space (frame);
4394
4395 /* Do not specify what the fp should be when we stop since
4396 on some machines the prologue is where the new fp value
4397 is established. */
4398 insert_step_resume_breakpoint_at_sal (gdbarch,
4399 sr_sal, null_frame_id);
4400
4401 /* Restart without fiddling with the step ranges or
4402 other state. */
4403 keep_going (ecs);
4404 return;
4405 }
4406 }
4407
4408 stop_pc_sal = find_pc_line (stop_pc, 0);
4409
4410 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4411 the trampoline processing logic, however, there are some trampolines
4412 that have no names, so we should do trampoline handling first. */
4413 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4414 && ecs->stop_func_name == NULL
4415 && stop_pc_sal.line == 0)
4416 {
4417 if (debug_infrun)
4418 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4419
4420 /* The inferior just stepped into, or returned to, an
4421 undebuggable function (where there is no debugging information
4422 and no line number corresponding to the address where the
4423 inferior stopped). Since we want to skip this kind of code,
4424 we keep going until the inferior returns from this
4425 function - unless the user has asked us not to (via
4426 set step-mode) or we no longer know how to get back
4427 to the call site. */
4428 if (step_stop_if_no_debug
4429 || !frame_id_p (frame_unwind_caller_id (frame)))
4430 {
4431 /* If we have no line number and the step-stop-if-no-debug
4432 is set, we stop the step so that the user has a chance to
4433 switch in assembly mode. */
4434 ecs->event_thread->stop_step = 1;
4435 print_stop_reason (END_STEPPING_RANGE, 0);
4436 stop_stepping (ecs);
4437 return;
4438 }
4439 else
4440 {
4441 /* Set a breakpoint at callee's return address (the address
4442 at which the caller will resume). */
4443 insert_step_resume_breakpoint_at_caller (frame);
4444 keep_going (ecs);
4445 return;
4446 }
4447 }
4448
4449 if (ecs->event_thread->step_range_end == 1)
4450 {
4451 /* It is stepi or nexti. We always want to stop stepping after
4452 one instruction. */
4453 if (debug_infrun)
4454 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4455 ecs->event_thread->stop_step = 1;
4456 print_stop_reason (END_STEPPING_RANGE, 0);
4457 stop_stepping (ecs);
4458 return;
4459 }
4460
4461 if (stop_pc_sal.line == 0)
4462 {
4463 /* We have no line number information. That means to stop
4464 stepping (does this always happen right after one instruction,
4465 when we do "s" in a function with no line numbers,
4466 or can this happen as a result of a return or longjmp?). */
4467 if (debug_infrun)
4468 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4469 ecs->event_thread->stop_step = 1;
4470 print_stop_reason (END_STEPPING_RANGE, 0);
4471 stop_stepping (ecs);
4472 return;
4473 }
4474
4475 /* Look for "calls" to inlined functions, part one. If the inline
4476 frame machinery detected some skipped call sites, we have entered
4477 a new inline function. */
4478
4479 if (frame_id_eq (get_frame_id (get_current_frame ()),
4480 ecs->event_thread->step_frame_id)
4481 && inline_skipped_frames (ecs->ptid))
4482 {
4483 struct symtab_and_line call_sal;
4484
4485 if (debug_infrun)
4486 fprintf_unfiltered (gdb_stdlog,
4487 "infrun: stepped into inlined function\n");
4488
4489 find_frame_sal (get_current_frame (), &call_sal);
4490
4491 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4492 {
4493 /* For "step", we're going to stop. But if the call site
4494 for this inlined function is on the same source line as
4495 we were previously stepping, go down into the function
4496 first. Otherwise stop at the call site. */
4497
4498 if (call_sal.line == ecs->event_thread->current_line
4499 && call_sal.symtab == ecs->event_thread->current_symtab)
4500 step_into_inline_frame (ecs->ptid);
4501
4502 ecs->event_thread->stop_step = 1;
4503 print_stop_reason (END_STEPPING_RANGE, 0);
4504 stop_stepping (ecs);
4505 return;
4506 }
4507 else
4508 {
4509 /* For "next", we should stop at the call site if it is on a
4510 different source line. Otherwise continue through the
4511 inlined function. */
4512 if (call_sal.line == ecs->event_thread->current_line
4513 && call_sal.symtab == ecs->event_thread->current_symtab)
4514 keep_going (ecs);
4515 else
4516 {
4517 ecs->event_thread->stop_step = 1;
4518 print_stop_reason (END_STEPPING_RANGE, 0);
4519 stop_stepping (ecs);
4520 }
4521 return;
4522 }
4523 }
4524
4525 /* Look for "calls" to inlined functions, part two. If we are still
4526 in the same real function we were stepping through, but we have
4527 to go further up to find the exact frame ID, we are stepping
4528 through a more inlined call beyond its call site. */
4529
4530 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4531 && !frame_id_eq (get_frame_id (get_current_frame ()),
4532 ecs->event_thread->step_frame_id)
4533 && stepped_in_from (get_current_frame (),
4534 ecs->event_thread->step_frame_id))
4535 {
4536 if (debug_infrun)
4537 fprintf_unfiltered (gdb_stdlog,
4538 "infrun: stepping through inlined function\n");
4539
4540 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4541 keep_going (ecs);
4542 else
4543 {
4544 ecs->event_thread->stop_step = 1;
4545 print_stop_reason (END_STEPPING_RANGE, 0);
4546 stop_stepping (ecs);
4547 }
4548 return;
4549 }
4550
4551 if ((stop_pc == stop_pc_sal.pc)
4552 && (ecs->event_thread->current_line != stop_pc_sal.line
4553 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4554 {
4555 /* We are at the start of a different line. So stop. Note that
4556 we don't stop if we step into the middle of a different line.
4557 That is said to make things like for (;;) statements work
4558 better. */
4559 if (debug_infrun)
4560 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4561 ecs->event_thread->stop_step = 1;
4562 print_stop_reason (END_STEPPING_RANGE, 0);
4563 stop_stepping (ecs);
4564 return;
4565 }
4566
4567 /* We aren't done stepping.
4568
4569 Optimize by setting the stepping range to the line.
4570 (We might not be in the original line, but if we entered a
4571 new line in mid-statement, we continue stepping. This makes
4572 things like for(;;) statements work better.) */
4573
4574 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4575 ecs->event_thread->step_range_end = stop_pc_sal.end;
4576 set_step_info (frame, stop_pc_sal);
4577
4578 if (debug_infrun)
4579 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4580 keep_going (ecs);
4581 }
4582
4583 /* Is thread TP in the middle of single-stepping? */
4584
4585 static int
4586 currently_stepping (struct thread_info *tp)
4587 {
4588 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4589 || tp->trap_expected
4590 || tp->stepping_through_solib_after_catch
4591 || bpstat_should_step ());
4592 }
4593
4594 /* Returns true if any thread *but* the one passed in "data" is in the
4595 middle of stepping or of handling a "next". */
4596
4597 static int
4598 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4599 {
4600 if (tp == data)
4601 return 0;
4602
4603 return (tp->step_range_end
4604 || tp->trap_expected
4605 || tp->stepping_through_solib_after_catch);
4606 }
4607
4608 /* Inferior has stepped into a subroutine call with source code that
4609 we should not step over. Do step to the first line of code in
4610 it. */
4611
4612 static void
4613 handle_step_into_function (struct gdbarch *gdbarch,
4614 struct execution_control_state *ecs)
4615 {
4616 struct symtab *s;
4617 struct symtab_and_line stop_func_sal, sr_sal;
4618
4619 s = find_pc_symtab (stop_pc);
4620 if (s && s->language != language_asm)
4621 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4622 ecs->stop_func_start);
4623
4624 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4625 /* Use the step_resume_break to step until the end of the prologue,
4626 even if that involves jumps (as it seems to on the vax under
4627 4.2). */
4628 /* If the prologue ends in the middle of a source line, continue to
4629 the end of that source line (if it is still within the function).
4630 Otherwise, just go to end of prologue. */
4631 if (stop_func_sal.end
4632 && stop_func_sal.pc != ecs->stop_func_start
4633 && stop_func_sal.end < ecs->stop_func_end)
4634 ecs->stop_func_start = stop_func_sal.end;
4635
4636 /* Architectures which require breakpoint adjustment might not be able
4637 to place a breakpoint at the computed address. If so, the test
4638 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4639 ecs->stop_func_start to an address at which a breakpoint may be
4640 legitimately placed.
4641
4642 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4643 made, GDB will enter an infinite loop when stepping through
4644 optimized code consisting of VLIW instructions which contain
4645 subinstructions corresponding to different source lines. On
4646 FR-V, it's not permitted to place a breakpoint on any but the
4647 first subinstruction of a VLIW instruction. When a breakpoint is
4648 set, GDB will adjust the breakpoint address to the beginning of
4649 the VLIW instruction. Thus, we need to make the corresponding
4650 adjustment here when computing the stop address. */
4651
4652 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4653 {
4654 ecs->stop_func_start
4655 = gdbarch_adjust_breakpoint_address (gdbarch,
4656 ecs->stop_func_start);
4657 }
4658
4659 if (ecs->stop_func_start == stop_pc)
4660 {
4661 /* We are already there: stop now. */
4662 ecs->event_thread->stop_step = 1;
4663 print_stop_reason (END_STEPPING_RANGE, 0);
4664 stop_stepping (ecs);
4665 return;
4666 }
4667 else
4668 {
4669 /* Put the step-breakpoint there and go until there. */
4670 init_sal (&sr_sal); /* initialize to zeroes */
4671 sr_sal.pc = ecs->stop_func_start;
4672 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4673 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4674
4675 /* Do not specify what the fp should be when we stop since on
4676 some machines the prologue is where the new fp value is
4677 established. */
4678 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4679
4680 /* And make sure stepping stops right away then. */
4681 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4682 }
4683 keep_going (ecs);
4684 }
4685
4686 /* Inferior has stepped backward into a subroutine call with source
4687 code that we should not step over. Do step to the beginning of the
4688 last line of code in it. */
4689
4690 static void
4691 handle_step_into_function_backward (struct gdbarch *gdbarch,
4692 struct execution_control_state *ecs)
4693 {
4694 struct symtab *s;
4695 struct symtab_and_line stop_func_sal, sr_sal;
4696
4697 s = find_pc_symtab (stop_pc);
4698 if (s && s->language != language_asm)
4699 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4700 ecs->stop_func_start);
4701
4702 stop_func_sal = find_pc_line (stop_pc, 0);
4703
4704 /* OK, we're just going to keep stepping here. */
4705 if (stop_func_sal.pc == stop_pc)
4706 {
4707 /* We're there already. Just stop stepping now. */
4708 ecs->event_thread->stop_step = 1;
4709 print_stop_reason (END_STEPPING_RANGE, 0);
4710 stop_stepping (ecs);
4711 }
4712 else
4713 {
4714 /* Else just reset the step range and keep going.
4715 No step-resume breakpoint, they don't work for
4716 epilogues, which can have multiple entry paths. */
4717 ecs->event_thread->step_range_start = stop_func_sal.pc;
4718 ecs->event_thread->step_range_end = stop_func_sal.end;
4719 keep_going (ecs);
4720 }
4721 return;
4722 }
4723
4724 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4725 This is used to both functions and to skip over code. */
4726
4727 static void
4728 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4729 struct symtab_and_line sr_sal,
4730 struct frame_id sr_id)
4731 {
4732 /* There should never be more than one step-resume or longjmp-resume
4733 breakpoint per thread, so we should never be setting a new
4734 step_resume_breakpoint when one is already active. */
4735 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4736
4737 if (debug_infrun)
4738 fprintf_unfiltered (gdb_stdlog,
4739 "infrun: inserting step-resume breakpoint at %s\n",
4740 paddress (gdbarch, sr_sal.pc));
4741
4742 inferior_thread ()->step_resume_breakpoint
4743 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4744 }
4745
4746 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4747 to skip a potential signal handler.
4748
4749 This is called with the interrupted function's frame. The signal
4750 handler, when it returns, will resume the interrupted function at
4751 RETURN_FRAME.pc. */
4752
4753 static void
4754 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4755 {
4756 struct symtab_and_line sr_sal;
4757 struct gdbarch *gdbarch;
4758
4759 gdb_assert (return_frame != NULL);
4760 init_sal (&sr_sal); /* initialize to zeros */
4761
4762 gdbarch = get_frame_arch (return_frame);
4763 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4764 sr_sal.section = find_pc_overlay (sr_sal.pc);
4765 sr_sal.pspace = get_frame_program_space (return_frame);
4766
4767 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4768 get_stack_frame_id (return_frame));
4769 }
4770
4771 /* Similar to insert_step_resume_breakpoint_at_frame, except
4772 but a breakpoint at the previous frame's PC. This is used to
4773 skip a function after stepping into it (for "next" or if the called
4774 function has no debugging information).
4775
4776 The current function has almost always been reached by single
4777 stepping a call or return instruction. NEXT_FRAME belongs to the
4778 current function, and the breakpoint will be set at the caller's
4779 resume address.
4780
4781 This is a separate function rather than reusing
4782 insert_step_resume_breakpoint_at_frame in order to avoid
4783 get_prev_frame, which may stop prematurely (see the implementation
4784 of frame_unwind_caller_id for an example). */
4785
4786 static void
4787 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4788 {
4789 struct symtab_and_line sr_sal;
4790 struct gdbarch *gdbarch;
4791
4792 /* We shouldn't have gotten here if we don't know where the call site
4793 is. */
4794 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4795
4796 init_sal (&sr_sal); /* initialize to zeros */
4797
4798 gdbarch = frame_unwind_caller_arch (next_frame);
4799 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4800 frame_unwind_caller_pc (next_frame));
4801 sr_sal.section = find_pc_overlay (sr_sal.pc);
4802 sr_sal.pspace = frame_unwind_program_space (next_frame);
4803
4804 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4805 frame_unwind_caller_id (next_frame));
4806 }
4807
4808 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4809 new breakpoint at the target of a jmp_buf. The handling of
4810 longjmp-resume uses the same mechanisms used for handling
4811 "step-resume" breakpoints. */
4812
4813 static void
4814 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
4815 {
4816 /* There should never be more than one step-resume or longjmp-resume
4817 breakpoint per thread, so we should never be setting a new
4818 longjmp_resume_breakpoint when one is already active. */
4819 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4820
4821 if (debug_infrun)
4822 fprintf_unfiltered (gdb_stdlog,
4823 "infrun: inserting longjmp-resume breakpoint at %s\n",
4824 paddress (gdbarch, pc));
4825
4826 inferior_thread ()->step_resume_breakpoint =
4827 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
4828 }
4829
4830 static void
4831 stop_stepping (struct execution_control_state *ecs)
4832 {
4833 if (debug_infrun)
4834 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
4835
4836 /* Let callers know we don't want to wait for the inferior anymore. */
4837 ecs->wait_some_more = 0;
4838 }
4839
4840 /* This function handles various cases where we need to continue
4841 waiting for the inferior. */
4842 /* (Used to be the keep_going: label in the old wait_for_inferior) */
4843
4844 static void
4845 keep_going (struct execution_control_state *ecs)
4846 {
4847 /* Make sure normal_stop is called if we get a QUIT handled before
4848 reaching resume. */
4849 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
4850
4851 /* Save the pc before execution, to compare with pc after stop. */
4852 ecs->event_thread->prev_pc
4853 = regcache_read_pc (get_thread_regcache (ecs->ptid));
4854
4855 /* If we did not do break;, it means we should keep running the
4856 inferior and not return to debugger. */
4857
4858 if (ecs->event_thread->trap_expected
4859 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4860 {
4861 /* We took a signal (which we are supposed to pass through to
4862 the inferior, else we'd not get here) and we haven't yet
4863 gotten our trap. Simply continue. */
4864
4865 discard_cleanups (old_cleanups);
4866 resume (currently_stepping (ecs->event_thread),
4867 ecs->event_thread->stop_signal);
4868 }
4869 else
4870 {
4871 /* Either the trap was not expected, but we are continuing
4872 anyway (the user asked that this signal be passed to the
4873 child)
4874 -- or --
4875 The signal was SIGTRAP, e.g. it was our signal, but we
4876 decided we should resume from it.
4877
4878 We're going to run this baby now!
4879
4880 Note that insert_breakpoints won't try to re-insert
4881 already inserted breakpoints. Therefore, we don't
4882 care if breakpoints were already inserted, or not. */
4883
4884 if (ecs->event_thread->stepping_over_breakpoint)
4885 {
4886 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
4887 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
4888 /* Since we can't do a displaced step, we have to remove
4889 the breakpoint while we step it. To keep things
4890 simple, we remove them all. */
4891 remove_breakpoints ();
4892 }
4893 else
4894 {
4895 struct gdb_exception e;
4896 /* Stop stepping when inserting breakpoints
4897 has failed. */
4898 TRY_CATCH (e, RETURN_MASK_ERROR)
4899 {
4900 insert_breakpoints ();
4901 }
4902 if (e.reason < 0)
4903 {
4904 exception_print (gdb_stderr, e);
4905 stop_stepping (ecs);
4906 return;
4907 }
4908 }
4909
4910 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
4911
4912 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
4913 specifies that such a signal should be delivered to the
4914 target program).
4915
4916 Typically, this would occure when a user is debugging a
4917 target monitor on a simulator: the target monitor sets a
4918 breakpoint; the simulator encounters this break-point and
4919 halts the simulation handing control to GDB; GDB, noteing
4920 that the break-point isn't valid, returns control back to the
4921 simulator; the simulator then delivers the hardware
4922 equivalent of a SIGNAL_TRAP to the program being debugged. */
4923
4924 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
4925 && !signal_program[ecs->event_thread->stop_signal])
4926 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4927
4928 discard_cleanups (old_cleanups);
4929 resume (currently_stepping (ecs->event_thread),
4930 ecs->event_thread->stop_signal);
4931 }
4932
4933 prepare_to_wait (ecs);
4934 }
4935
4936 /* This function normally comes after a resume, before
4937 handle_inferior_event exits. It takes care of any last bits of
4938 housekeeping, and sets the all-important wait_some_more flag. */
4939
4940 static void
4941 prepare_to_wait (struct execution_control_state *ecs)
4942 {
4943 if (debug_infrun)
4944 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
4945
4946 /* This is the old end of the while loop. Let everybody know we
4947 want to wait for the inferior some more and get called again
4948 soon. */
4949 ecs->wait_some_more = 1;
4950 }
4951
4952 /* Print why the inferior has stopped. We always print something when
4953 the inferior exits, or receives a signal. The rest of the cases are
4954 dealt with later on in normal_stop() and print_it_typical(). Ideally
4955 there should be a call to this function from handle_inferior_event()
4956 each time stop_stepping() is called.*/
4957 static void
4958 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
4959 {
4960 switch (stop_reason)
4961 {
4962 case END_STEPPING_RANGE:
4963 /* We are done with a step/next/si/ni command. */
4964 /* For now print nothing. */
4965 /* Print a message only if not in the middle of doing a "step n"
4966 operation for n > 1 */
4967 if (!inferior_thread ()->step_multi
4968 || !inferior_thread ()->stop_step)
4969 if (ui_out_is_mi_like_p (uiout))
4970 ui_out_field_string
4971 (uiout, "reason",
4972 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
4973 break;
4974 case SIGNAL_EXITED:
4975 /* The inferior was terminated by a signal. */
4976 annotate_signalled ();
4977 if (ui_out_is_mi_like_p (uiout))
4978 ui_out_field_string
4979 (uiout, "reason",
4980 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
4981 ui_out_text (uiout, "\nProgram terminated with signal ");
4982 annotate_signal_name ();
4983 ui_out_field_string (uiout, "signal-name",
4984 target_signal_to_name (stop_info));
4985 annotate_signal_name_end ();
4986 ui_out_text (uiout, ", ");
4987 annotate_signal_string ();
4988 ui_out_field_string (uiout, "signal-meaning",
4989 target_signal_to_string (stop_info));
4990 annotate_signal_string_end ();
4991 ui_out_text (uiout, ".\n");
4992 ui_out_text (uiout, "The program no longer exists.\n");
4993 break;
4994 case EXITED:
4995 /* The inferior program is finished. */
4996 annotate_exited (stop_info);
4997 if (stop_info)
4998 {
4999 if (ui_out_is_mi_like_p (uiout))
5000 ui_out_field_string (uiout, "reason",
5001 async_reason_lookup (EXEC_ASYNC_EXITED));
5002 ui_out_text (uiout, "\nProgram exited with code ");
5003 ui_out_field_fmt (uiout, "exit-code", "0%o",
5004 (unsigned int) stop_info);
5005 ui_out_text (uiout, ".\n");
5006 }
5007 else
5008 {
5009 if (ui_out_is_mi_like_p (uiout))
5010 ui_out_field_string
5011 (uiout, "reason",
5012 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5013 ui_out_text (uiout, "\nProgram exited normally.\n");
5014 }
5015 /* Support the --return-child-result option. */
5016 return_child_result_value = stop_info;
5017 break;
5018 case SIGNAL_RECEIVED:
5019 /* Signal received. The signal table tells us to print about
5020 it. */
5021 annotate_signal ();
5022
5023 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5024 {
5025 struct thread_info *t = inferior_thread ();
5026
5027 ui_out_text (uiout, "\n[");
5028 ui_out_field_string (uiout, "thread-name",
5029 target_pid_to_str (t->ptid));
5030 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5031 ui_out_text (uiout, " stopped");
5032 }
5033 else
5034 {
5035 ui_out_text (uiout, "\nProgram received signal ");
5036 annotate_signal_name ();
5037 if (ui_out_is_mi_like_p (uiout))
5038 ui_out_field_string
5039 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5040 ui_out_field_string (uiout, "signal-name",
5041 target_signal_to_name (stop_info));
5042 annotate_signal_name_end ();
5043 ui_out_text (uiout, ", ");
5044 annotate_signal_string ();
5045 ui_out_field_string (uiout, "signal-meaning",
5046 target_signal_to_string (stop_info));
5047 annotate_signal_string_end ();
5048 }
5049 ui_out_text (uiout, ".\n");
5050 break;
5051 case NO_HISTORY:
5052 /* Reverse execution: target ran out of history info. */
5053 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5054 break;
5055 default:
5056 internal_error (__FILE__, __LINE__,
5057 _("print_stop_reason: unrecognized enum value"));
5058 break;
5059 }
5060 }
5061 \f
5062
5063 /* Here to return control to GDB when the inferior stops for real.
5064 Print appropriate messages, remove breakpoints, give terminal our modes.
5065
5066 STOP_PRINT_FRAME nonzero means print the executing frame
5067 (pc, function, args, file, line number and line text).
5068 BREAKPOINTS_FAILED nonzero means stop was due to error
5069 attempting to insert breakpoints. */
5070
5071 void
5072 normal_stop (void)
5073 {
5074 struct target_waitstatus last;
5075 ptid_t last_ptid;
5076 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5077
5078 get_last_target_status (&last_ptid, &last);
5079
5080 /* If an exception is thrown from this point on, make sure to
5081 propagate GDB's knowledge of the executing state to the
5082 frontend/user running state. A QUIT is an easy exception to see
5083 here, so do this before any filtered output. */
5084 if (!non_stop)
5085 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5086 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5087 && last.kind != TARGET_WAITKIND_EXITED)
5088 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5089
5090 /* In non-stop mode, we don't want GDB to switch threads behind the
5091 user's back, to avoid races where the user is typing a command to
5092 apply to thread x, but GDB switches to thread y before the user
5093 finishes entering the command. */
5094
5095 /* As with the notification of thread events, we want to delay
5096 notifying the user that we've switched thread context until
5097 the inferior actually stops.
5098
5099 There's no point in saying anything if the inferior has exited.
5100 Note that SIGNALLED here means "exited with a signal", not
5101 "received a signal". */
5102 if (!non_stop
5103 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5104 && target_has_execution
5105 && last.kind != TARGET_WAITKIND_SIGNALLED
5106 && last.kind != TARGET_WAITKIND_EXITED)
5107 {
5108 target_terminal_ours_for_output ();
5109 printf_filtered (_("[Switching to %s]\n"),
5110 target_pid_to_str (inferior_ptid));
5111 annotate_thread_changed ();
5112 previous_inferior_ptid = inferior_ptid;
5113 }
5114
5115 if (!breakpoints_always_inserted_mode () && target_has_execution)
5116 {
5117 if (remove_breakpoints ())
5118 {
5119 target_terminal_ours_for_output ();
5120 printf_filtered (_("\
5121 Cannot remove breakpoints because program is no longer writable.\n\
5122 Further execution is probably impossible.\n"));
5123 }
5124 }
5125
5126 /* If an auto-display called a function and that got a signal,
5127 delete that auto-display to avoid an infinite recursion. */
5128
5129 if (stopped_by_random_signal)
5130 disable_current_display ();
5131
5132 /* Don't print a message if in the middle of doing a "step n"
5133 operation for n > 1 */
5134 if (target_has_execution
5135 && last.kind != TARGET_WAITKIND_SIGNALLED
5136 && last.kind != TARGET_WAITKIND_EXITED
5137 && inferior_thread ()->step_multi
5138 && inferior_thread ()->stop_step)
5139 goto done;
5140
5141 target_terminal_ours ();
5142
5143 /* Set the current source location. This will also happen if we
5144 display the frame below, but the current SAL will be incorrect
5145 during a user hook-stop function. */
5146 if (has_stack_frames () && !stop_stack_dummy)
5147 set_current_sal_from_frame (get_current_frame (), 1);
5148
5149 /* Let the user/frontend see the threads as stopped. */
5150 do_cleanups (old_chain);
5151
5152 /* Look up the hook_stop and run it (CLI internally handles problem
5153 of stop_command's pre-hook not existing). */
5154 if (stop_command)
5155 catch_errors (hook_stop_stub, stop_command,
5156 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5157
5158 if (!has_stack_frames ())
5159 goto done;
5160
5161 if (last.kind == TARGET_WAITKIND_SIGNALLED
5162 || last.kind == TARGET_WAITKIND_EXITED)
5163 goto done;
5164
5165 /* Select innermost stack frame - i.e., current frame is frame 0,
5166 and current location is based on that.
5167 Don't do this on return from a stack dummy routine,
5168 or if the program has exited. */
5169
5170 if (!stop_stack_dummy)
5171 {
5172 select_frame (get_current_frame ());
5173
5174 /* Print current location without a level number, if
5175 we have changed functions or hit a breakpoint.
5176 Print source line if we have one.
5177 bpstat_print() contains the logic deciding in detail
5178 what to print, based on the event(s) that just occurred. */
5179
5180 /* If --batch-silent is enabled then there's no need to print the current
5181 source location, and to try risks causing an error message about
5182 missing source files. */
5183 if (stop_print_frame && !batch_silent)
5184 {
5185 int bpstat_ret;
5186 int source_flag;
5187 int do_frame_printing = 1;
5188 struct thread_info *tp = inferior_thread ();
5189
5190 bpstat_ret = bpstat_print (tp->stop_bpstat);
5191 switch (bpstat_ret)
5192 {
5193 case PRINT_UNKNOWN:
5194 /* If we had hit a shared library event breakpoint,
5195 bpstat_print would print out this message. If we hit
5196 an OS-level shared library event, do the same
5197 thing. */
5198 if (last.kind == TARGET_WAITKIND_LOADED)
5199 {
5200 printf_filtered (_("Stopped due to shared library event\n"));
5201 source_flag = SRC_LINE; /* something bogus */
5202 do_frame_printing = 0;
5203 break;
5204 }
5205
5206 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5207 (or should) carry around the function and does (or
5208 should) use that when doing a frame comparison. */
5209 if (tp->stop_step
5210 && frame_id_eq (tp->step_frame_id,
5211 get_frame_id (get_current_frame ()))
5212 && step_start_function == find_pc_function (stop_pc))
5213 source_flag = SRC_LINE; /* finished step, just print source line */
5214 else
5215 source_flag = SRC_AND_LOC; /* print location and source line */
5216 break;
5217 case PRINT_SRC_AND_LOC:
5218 source_flag = SRC_AND_LOC; /* print location and source line */
5219 break;
5220 case PRINT_SRC_ONLY:
5221 source_flag = SRC_LINE;
5222 break;
5223 case PRINT_NOTHING:
5224 source_flag = SRC_LINE; /* something bogus */
5225 do_frame_printing = 0;
5226 break;
5227 default:
5228 internal_error (__FILE__, __LINE__, _("Unknown value."));
5229 }
5230
5231 /* The behavior of this routine with respect to the source
5232 flag is:
5233 SRC_LINE: Print only source line
5234 LOCATION: Print only location
5235 SRC_AND_LOC: Print location and source line */
5236 if (do_frame_printing)
5237 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5238
5239 /* Display the auto-display expressions. */
5240 do_displays ();
5241 }
5242 }
5243
5244 /* Save the function value return registers, if we care.
5245 We might be about to restore their previous contents. */
5246 if (inferior_thread ()->proceed_to_finish)
5247 {
5248 /* This should not be necessary. */
5249 if (stop_registers)
5250 regcache_xfree (stop_registers);
5251
5252 /* NB: The copy goes through to the target picking up the value of
5253 all the registers. */
5254 stop_registers = regcache_dup (get_current_regcache ());
5255 }
5256
5257 if (stop_stack_dummy)
5258 {
5259 /* Pop the empty frame that contains the stack dummy.
5260 This also restores inferior state prior to the call
5261 (struct inferior_thread_state). */
5262 struct frame_info *frame = get_current_frame ();
5263 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5264 frame_pop (frame);
5265 /* frame_pop() calls reinit_frame_cache as the last thing it does
5266 which means there's currently no selected frame. We don't need
5267 to re-establish a selected frame if the dummy call returns normally,
5268 that will be done by restore_inferior_status. However, we do have
5269 to handle the case where the dummy call is returning after being
5270 stopped (e.g. the dummy call previously hit a breakpoint). We
5271 can't know which case we have so just always re-establish a
5272 selected frame here. */
5273 select_frame (get_current_frame ());
5274 }
5275
5276 done:
5277 annotate_stopped ();
5278
5279 /* Suppress the stop observer if we're in the middle of:
5280
5281 - a step n (n > 1), as there still more steps to be done.
5282
5283 - a "finish" command, as the observer will be called in
5284 finish_command_continuation, so it can include the inferior
5285 function's return value.
5286
5287 - calling an inferior function, as we pretend we inferior didn't
5288 run at all. The return value of the call is handled by the
5289 expression evaluator, through call_function_by_hand. */
5290
5291 if (!target_has_execution
5292 || last.kind == TARGET_WAITKIND_SIGNALLED
5293 || last.kind == TARGET_WAITKIND_EXITED
5294 || (!inferior_thread ()->step_multi
5295 && !(inferior_thread ()->stop_bpstat
5296 && inferior_thread ()->proceed_to_finish)
5297 && !inferior_thread ()->in_infcall))
5298 {
5299 if (!ptid_equal (inferior_ptid, null_ptid))
5300 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5301 stop_print_frame);
5302 else
5303 observer_notify_normal_stop (NULL, stop_print_frame);
5304 }
5305
5306 if (target_has_execution)
5307 {
5308 if (last.kind != TARGET_WAITKIND_SIGNALLED
5309 && last.kind != TARGET_WAITKIND_EXITED)
5310 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5311 Delete any breakpoint that is to be deleted at the next stop. */
5312 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5313 }
5314
5315 /* Try to get rid of automatically added inferiors that are no
5316 longer needed. Keeping those around slows down things linearly.
5317 Note that this never removes the current inferior. */
5318 prune_inferiors ();
5319 }
5320
5321 static int
5322 hook_stop_stub (void *cmd)
5323 {
5324 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5325 return (0);
5326 }
5327 \f
5328 int
5329 signal_stop_state (int signo)
5330 {
5331 return signal_stop[signo];
5332 }
5333
5334 int
5335 signal_print_state (int signo)
5336 {
5337 return signal_print[signo];
5338 }
5339
5340 int
5341 signal_pass_state (int signo)
5342 {
5343 return signal_program[signo];
5344 }
5345
5346 int
5347 signal_stop_update (int signo, int state)
5348 {
5349 int ret = signal_stop[signo];
5350 signal_stop[signo] = state;
5351 return ret;
5352 }
5353
5354 int
5355 signal_print_update (int signo, int state)
5356 {
5357 int ret = signal_print[signo];
5358 signal_print[signo] = state;
5359 return ret;
5360 }
5361
5362 int
5363 signal_pass_update (int signo, int state)
5364 {
5365 int ret = signal_program[signo];
5366 signal_program[signo] = state;
5367 return ret;
5368 }
5369
5370 static void
5371 sig_print_header (void)
5372 {
5373 printf_filtered (_("\
5374 Signal Stop\tPrint\tPass to program\tDescription\n"));
5375 }
5376
5377 static void
5378 sig_print_info (enum target_signal oursig)
5379 {
5380 const char *name = target_signal_to_name (oursig);
5381 int name_padding = 13 - strlen (name);
5382
5383 if (name_padding <= 0)
5384 name_padding = 0;
5385
5386 printf_filtered ("%s", name);
5387 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5388 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5389 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5390 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5391 printf_filtered ("%s\n", target_signal_to_string (oursig));
5392 }
5393
5394 /* Specify how various signals in the inferior should be handled. */
5395
5396 static void
5397 handle_command (char *args, int from_tty)
5398 {
5399 char **argv;
5400 int digits, wordlen;
5401 int sigfirst, signum, siglast;
5402 enum target_signal oursig;
5403 int allsigs;
5404 int nsigs;
5405 unsigned char *sigs;
5406 struct cleanup *old_chain;
5407
5408 if (args == NULL)
5409 {
5410 error_no_arg (_("signal to handle"));
5411 }
5412
5413 /* Allocate and zero an array of flags for which signals to handle. */
5414
5415 nsigs = (int) TARGET_SIGNAL_LAST;
5416 sigs = (unsigned char *) alloca (nsigs);
5417 memset (sigs, 0, nsigs);
5418
5419 /* Break the command line up into args. */
5420
5421 argv = gdb_buildargv (args);
5422 old_chain = make_cleanup_freeargv (argv);
5423
5424 /* Walk through the args, looking for signal oursigs, signal names, and
5425 actions. Signal numbers and signal names may be interspersed with
5426 actions, with the actions being performed for all signals cumulatively
5427 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5428
5429 while (*argv != NULL)
5430 {
5431 wordlen = strlen (*argv);
5432 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5433 {;
5434 }
5435 allsigs = 0;
5436 sigfirst = siglast = -1;
5437
5438 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5439 {
5440 /* Apply action to all signals except those used by the
5441 debugger. Silently skip those. */
5442 allsigs = 1;
5443 sigfirst = 0;
5444 siglast = nsigs - 1;
5445 }
5446 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5447 {
5448 SET_SIGS (nsigs, sigs, signal_stop);
5449 SET_SIGS (nsigs, sigs, signal_print);
5450 }
5451 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5452 {
5453 UNSET_SIGS (nsigs, sigs, signal_program);
5454 }
5455 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5456 {
5457 SET_SIGS (nsigs, sigs, signal_print);
5458 }
5459 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5460 {
5461 SET_SIGS (nsigs, sigs, signal_program);
5462 }
5463 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5464 {
5465 UNSET_SIGS (nsigs, sigs, signal_stop);
5466 }
5467 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5468 {
5469 SET_SIGS (nsigs, sigs, signal_program);
5470 }
5471 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5472 {
5473 UNSET_SIGS (nsigs, sigs, signal_print);
5474 UNSET_SIGS (nsigs, sigs, signal_stop);
5475 }
5476 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5477 {
5478 UNSET_SIGS (nsigs, sigs, signal_program);
5479 }
5480 else if (digits > 0)
5481 {
5482 /* It is numeric. The numeric signal refers to our own
5483 internal signal numbering from target.h, not to host/target
5484 signal number. This is a feature; users really should be
5485 using symbolic names anyway, and the common ones like
5486 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5487
5488 sigfirst = siglast = (int)
5489 target_signal_from_command (atoi (*argv));
5490 if ((*argv)[digits] == '-')
5491 {
5492 siglast = (int)
5493 target_signal_from_command (atoi ((*argv) + digits + 1));
5494 }
5495 if (sigfirst > siglast)
5496 {
5497 /* Bet he didn't figure we'd think of this case... */
5498 signum = sigfirst;
5499 sigfirst = siglast;
5500 siglast = signum;
5501 }
5502 }
5503 else
5504 {
5505 oursig = target_signal_from_name (*argv);
5506 if (oursig != TARGET_SIGNAL_UNKNOWN)
5507 {
5508 sigfirst = siglast = (int) oursig;
5509 }
5510 else
5511 {
5512 /* Not a number and not a recognized flag word => complain. */
5513 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5514 }
5515 }
5516
5517 /* If any signal numbers or symbol names were found, set flags for
5518 which signals to apply actions to. */
5519
5520 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5521 {
5522 switch ((enum target_signal) signum)
5523 {
5524 case TARGET_SIGNAL_TRAP:
5525 case TARGET_SIGNAL_INT:
5526 if (!allsigs && !sigs[signum])
5527 {
5528 if (query (_("%s is used by the debugger.\n\
5529 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5530 {
5531 sigs[signum] = 1;
5532 }
5533 else
5534 {
5535 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5536 gdb_flush (gdb_stdout);
5537 }
5538 }
5539 break;
5540 case TARGET_SIGNAL_0:
5541 case TARGET_SIGNAL_DEFAULT:
5542 case TARGET_SIGNAL_UNKNOWN:
5543 /* Make sure that "all" doesn't print these. */
5544 break;
5545 default:
5546 sigs[signum] = 1;
5547 break;
5548 }
5549 }
5550
5551 argv++;
5552 }
5553
5554 for (signum = 0; signum < nsigs; signum++)
5555 if (sigs[signum])
5556 {
5557 target_notice_signals (inferior_ptid);
5558
5559 if (from_tty)
5560 {
5561 /* Show the results. */
5562 sig_print_header ();
5563 for (; signum < nsigs; signum++)
5564 if (sigs[signum])
5565 sig_print_info (signum);
5566 }
5567
5568 break;
5569 }
5570
5571 do_cleanups (old_chain);
5572 }
5573
5574 static void
5575 xdb_handle_command (char *args, int from_tty)
5576 {
5577 char **argv;
5578 struct cleanup *old_chain;
5579
5580 if (args == NULL)
5581 error_no_arg (_("xdb command"));
5582
5583 /* Break the command line up into args. */
5584
5585 argv = gdb_buildargv (args);
5586 old_chain = make_cleanup_freeargv (argv);
5587 if (argv[1] != (char *) NULL)
5588 {
5589 char *argBuf;
5590 int bufLen;
5591
5592 bufLen = strlen (argv[0]) + 20;
5593 argBuf = (char *) xmalloc (bufLen);
5594 if (argBuf)
5595 {
5596 int validFlag = 1;
5597 enum target_signal oursig;
5598
5599 oursig = target_signal_from_name (argv[0]);
5600 memset (argBuf, 0, bufLen);
5601 if (strcmp (argv[1], "Q") == 0)
5602 sprintf (argBuf, "%s %s", argv[0], "noprint");
5603 else
5604 {
5605 if (strcmp (argv[1], "s") == 0)
5606 {
5607 if (!signal_stop[oursig])
5608 sprintf (argBuf, "%s %s", argv[0], "stop");
5609 else
5610 sprintf (argBuf, "%s %s", argv[0], "nostop");
5611 }
5612 else if (strcmp (argv[1], "i") == 0)
5613 {
5614 if (!signal_program[oursig])
5615 sprintf (argBuf, "%s %s", argv[0], "pass");
5616 else
5617 sprintf (argBuf, "%s %s", argv[0], "nopass");
5618 }
5619 else if (strcmp (argv[1], "r") == 0)
5620 {
5621 if (!signal_print[oursig])
5622 sprintf (argBuf, "%s %s", argv[0], "print");
5623 else
5624 sprintf (argBuf, "%s %s", argv[0], "noprint");
5625 }
5626 else
5627 validFlag = 0;
5628 }
5629 if (validFlag)
5630 handle_command (argBuf, from_tty);
5631 else
5632 printf_filtered (_("Invalid signal handling flag.\n"));
5633 if (argBuf)
5634 xfree (argBuf);
5635 }
5636 }
5637 do_cleanups (old_chain);
5638 }
5639
5640 /* Print current contents of the tables set by the handle command.
5641 It is possible we should just be printing signals actually used
5642 by the current target (but for things to work right when switching
5643 targets, all signals should be in the signal tables). */
5644
5645 static void
5646 signals_info (char *signum_exp, int from_tty)
5647 {
5648 enum target_signal oursig;
5649 sig_print_header ();
5650
5651 if (signum_exp)
5652 {
5653 /* First see if this is a symbol name. */
5654 oursig = target_signal_from_name (signum_exp);
5655 if (oursig == TARGET_SIGNAL_UNKNOWN)
5656 {
5657 /* No, try numeric. */
5658 oursig =
5659 target_signal_from_command (parse_and_eval_long (signum_exp));
5660 }
5661 sig_print_info (oursig);
5662 return;
5663 }
5664
5665 printf_filtered ("\n");
5666 /* These ugly casts brought to you by the native VAX compiler. */
5667 for (oursig = TARGET_SIGNAL_FIRST;
5668 (int) oursig < (int) TARGET_SIGNAL_LAST;
5669 oursig = (enum target_signal) ((int) oursig + 1))
5670 {
5671 QUIT;
5672
5673 if (oursig != TARGET_SIGNAL_UNKNOWN
5674 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5675 sig_print_info (oursig);
5676 }
5677
5678 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5679 }
5680
5681 /* The $_siginfo convenience variable is a bit special. We don't know
5682 for sure the type of the value until we actually have a chance to
5683 fetch the data. The type can change depending on gdbarch, so it it
5684 also dependent on which thread you have selected.
5685
5686 1. making $_siginfo be an internalvar that creates a new value on
5687 access.
5688
5689 2. making the value of $_siginfo be an lval_computed value. */
5690
5691 /* This function implements the lval_computed support for reading a
5692 $_siginfo value. */
5693
5694 static void
5695 siginfo_value_read (struct value *v)
5696 {
5697 LONGEST transferred;
5698
5699 transferred =
5700 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5701 NULL,
5702 value_contents_all_raw (v),
5703 value_offset (v),
5704 TYPE_LENGTH (value_type (v)));
5705
5706 if (transferred != TYPE_LENGTH (value_type (v)))
5707 error (_("Unable to read siginfo"));
5708 }
5709
5710 /* This function implements the lval_computed support for writing a
5711 $_siginfo value. */
5712
5713 static void
5714 siginfo_value_write (struct value *v, struct value *fromval)
5715 {
5716 LONGEST transferred;
5717
5718 transferred = target_write (&current_target,
5719 TARGET_OBJECT_SIGNAL_INFO,
5720 NULL,
5721 value_contents_all_raw (fromval),
5722 value_offset (v),
5723 TYPE_LENGTH (value_type (fromval)));
5724
5725 if (transferred != TYPE_LENGTH (value_type (fromval)))
5726 error (_("Unable to write siginfo"));
5727 }
5728
5729 static struct lval_funcs siginfo_value_funcs =
5730 {
5731 siginfo_value_read,
5732 siginfo_value_write
5733 };
5734
5735 /* Return a new value with the correct type for the siginfo object of
5736 the current thread using architecture GDBARCH. Return a void value
5737 if there's no object available. */
5738
5739 static struct value *
5740 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5741 {
5742 if (target_has_stack
5743 && !ptid_equal (inferior_ptid, null_ptid)
5744 && gdbarch_get_siginfo_type_p (gdbarch))
5745 {
5746 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5747 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5748 }
5749
5750 return allocate_value (builtin_type (gdbarch)->builtin_void);
5751 }
5752
5753 \f
5754 /* Inferior thread state.
5755 These are details related to the inferior itself, and don't include
5756 things like what frame the user had selected or what gdb was doing
5757 with the target at the time.
5758 For inferior function calls these are things we want to restore
5759 regardless of whether the function call successfully completes
5760 or the dummy frame has to be manually popped. */
5761
5762 struct inferior_thread_state
5763 {
5764 enum target_signal stop_signal;
5765 CORE_ADDR stop_pc;
5766 struct regcache *registers;
5767 };
5768
5769 struct inferior_thread_state *
5770 save_inferior_thread_state (void)
5771 {
5772 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5773 struct thread_info *tp = inferior_thread ();
5774
5775 inf_state->stop_signal = tp->stop_signal;
5776 inf_state->stop_pc = stop_pc;
5777
5778 inf_state->registers = regcache_dup (get_current_regcache ());
5779
5780 return inf_state;
5781 }
5782
5783 /* Restore inferior session state to INF_STATE. */
5784
5785 void
5786 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5787 {
5788 struct thread_info *tp = inferior_thread ();
5789
5790 tp->stop_signal = inf_state->stop_signal;
5791 stop_pc = inf_state->stop_pc;
5792
5793 /* The inferior can be gone if the user types "print exit(0)"
5794 (and perhaps other times). */
5795 if (target_has_execution)
5796 /* NB: The register write goes through to the target. */
5797 regcache_cpy (get_current_regcache (), inf_state->registers);
5798 regcache_xfree (inf_state->registers);
5799 xfree (inf_state);
5800 }
5801
5802 static void
5803 do_restore_inferior_thread_state_cleanup (void *state)
5804 {
5805 restore_inferior_thread_state (state);
5806 }
5807
5808 struct cleanup *
5809 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5810 {
5811 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
5812 }
5813
5814 void
5815 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
5816 {
5817 regcache_xfree (inf_state->registers);
5818 xfree (inf_state);
5819 }
5820
5821 struct regcache *
5822 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
5823 {
5824 return inf_state->registers;
5825 }
5826
5827 /* Session related state for inferior function calls.
5828 These are the additional bits of state that need to be restored
5829 when an inferior function call successfully completes. */
5830
5831 struct inferior_status
5832 {
5833 bpstat stop_bpstat;
5834 int stop_step;
5835 int stop_stack_dummy;
5836 int stopped_by_random_signal;
5837 int stepping_over_breakpoint;
5838 CORE_ADDR step_range_start;
5839 CORE_ADDR step_range_end;
5840 struct frame_id step_frame_id;
5841 struct frame_id step_stack_frame_id;
5842 enum step_over_calls_kind step_over_calls;
5843 CORE_ADDR step_resume_break_address;
5844 int stop_after_trap;
5845 int stop_soon;
5846
5847 /* ID if the selected frame when the inferior function call was made. */
5848 struct frame_id selected_frame_id;
5849
5850 int proceed_to_finish;
5851 int in_infcall;
5852 };
5853
5854 /* Save all of the information associated with the inferior<==>gdb
5855 connection. */
5856
5857 struct inferior_status *
5858 save_inferior_status (void)
5859 {
5860 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
5861 struct thread_info *tp = inferior_thread ();
5862 struct inferior *inf = current_inferior ();
5863
5864 inf_status->stop_step = tp->stop_step;
5865 inf_status->stop_stack_dummy = stop_stack_dummy;
5866 inf_status->stopped_by_random_signal = stopped_by_random_signal;
5867 inf_status->stepping_over_breakpoint = tp->trap_expected;
5868 inf_status->step_range_start = tp->step_range_start;
5869 inf_status->step_range_end = tp->step_range_end;
5870 inf_status->step_frame_id = tp->step_frame_id;
5871 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
5872 inf_status->step_over_calls = tp->step_over_calls;
5873 inf_status->stop_after_trap = stop_after_trap;
5874 inf_status->stop_soon = inf->stop_soon;
5875 /* Save original bpstat chain here; replace it with copy of chain.
5876 If caller's caller is walking the chain, they'll be happier if we
5877 hand them back the original chain when restore_inferior_status is
5878 called. */
5879 inf_status->stop_bpstat = tp->stop_bpstat;
5880 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
5881 inf_status->proceed_to_finish = tp->proceed_to_finish;
5882 inf_status->in_infcall = tp->in_infcall;
5883
5884 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
5885
5886 return inf_status;
5887 }
5888
5889 static int
5890 restore_selected_frame (void *args)
5891 {
5892 struct frame_id *fid = (struct frame_id *) args;
5893 struct frame_info *frame;
5894
5895 frame = frame_find_by_id (*fid);
5896
5897 /* If inf_status->selected_frame_id is NULL, there was no previously
5898 selected frame. */
5899 if (frame == NULL)
5900 {
5901 warning (_("Unable to restore previously selected frame."));
5902 return 0;
5903 }
5904
5905 select_frame (frame);
5906
5907 return (1);
5908 }
5909
5910 /* Restore inferior session state to INF_STATUS. */
5911
5912 void
5913 restore_inferior_status (struct inferior_status *inf_status)
5914 {
5915 struct thread_info *tp = inferior_thread ();
5916 struct inferior *inf = current_inferior ();
5917
5918 tp->stop_step = inf_status->stop_step;
5919 stop_stack_dummy = inf_status->stop_stack_dummy;
5920 stopped_by_random_signal = inf_status->stopped_by_random_signal;
5921 tp->trap_expected = inf_status->stepping_over_breakpoint;
5922 tp->step_range_start = inf_status->step_range_start;
5923 tp->step_range_end = inf_status->step_range_end;
5924 tp->step_frame_id = inf_status->step_frame_id;
5925 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
5926 tp->step_over_calls = inf_status->step_over_calls;
5927 stop_after_trap = inf_status->stop_after_trap;
5928 inf->stop_soon = inf_status->stop_soon;
5929 bpstat_clear (&tp->stop_bpstat);
5930 tp->stop_bpstat = inf_status->stop_bpstat;
5931 inf_status->stop_bpstat = NULL;
5932 tp->proceed_to_finish = inf_status->proceed_to_finish;
5933 tp->in_infcall = inf_status->in_infcall;
5934
5935 if (target_has_stack)
5936 {
5937 /* The point of catch_errors is that if the stack is clobbered,
5938 walking the stack might encounter a garbage pointer and
5939 error() trying to dereference it. */
5940 if (catch_errors
5941 (restore_selected_frame, &inf_status->selected_frame_id,
5942 "Unable to restore previously selected frame:\n",
5943 RETURN_MASK_ERROR) == 0)
5944 /* Error in restoring the selected frame. Select the innermost
5945 frame. */
5946 select_frame (get_current_frame ());
5947 }
5948
5949 xfree (inf_status);
5950 }
5951
5952 static void
5953 do_restore_inferior_status_cleanup (void *sts)
5954 {
5955 restore_inferior_status (sts);
5956 }
5957
5958 struct cleanup *
5959 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
5960 {
5961 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
5962 }
5963
5964 void
5965 discard_inferior_status (struct inferior_status *inf_status)
5966 {
5967 /* See save_inferior_status for info on stop_bpstat. */
5968 bpstat_clear (&inf_status->stop_bpstat);
5969 xfree (inf_status);
5970 }
5971 \f
5972 int
5973 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
5974 {
5975 struct target_waitstatus last;
5976 ptid_t last_ptid;
5977
5978 get_last_target_status (&last_ptid, &last);
5979
5980 if (last.kind != TARGET_WAITKIND_FORKED)
5981 return 0;
5982
5983 if (!ptid_equal (last_ptid, pid))
5984 return 0;
5985
5986 *child_pid = last.value.related_pid;
5987 return 1;
5988 }
5989
5990 int
5991 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
5992 {
5993 struct target_waitstatus last;
5994 ptid_t last_ptid;
5995
5996 get_last_target_status (&last_ptid, &last);
5997
5998 if (last.kind != TARGET_WAITKIND_VFORKED)
5999 return 0;
6000
6001 if (!ptid_equal (last_ptid, pid))
6002 return 0;
6003
6004 *child_pid = last.value.related_pid;
6005 return 1;
6006 }
6007
6008 int
6009 inferior_has_execd (ptid_t pid, char **execd_pathname)
6010 {
6011 struct target_waitstatus last;
6012 ptid_t last_ptid;
6013
6014 get_last_target_status (&last_ptid, &last);
6015
6016 if (last.kind != TARGET_WAITKIND_EXECD)
6017 return 0;
6018
6019 if (!ptid_equal (last_ptid, pid))
6020 return 0;
6021
6022 *execd_pathname = xstrdup (last.value.execd_pathname);
6023 return 1;
6024 }
6025
6026 int
6027 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6028 {
6029 struct target_waitstatus last;
6030 ptid_t last_ptid;
6031
6032 get_last_target_status (&last_ptid, &last);
6033
6034 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6035 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6036 return 0;
6037
6038 if (!ptid_equal (last_ptid, pid))
6039 return 0;
6040
6041 *syscall_number = last.value.syscall_number;
6042 return 1;
6043 }
6044
6045 /* Oft used ptids */
6046 ptid_t null_ptid;
6047 ptid_t minus_one_ptid;
6048
6049 /* Create a ptid given the necessary PID, LWP, and TID components. */
6050
6051 ptid_t
6052 ptid_build (int pid, long lwp, long tid)
6053 {
6054 ptid_t ptid;
6055
6056 ptid.pid = pid;
6057 ptid.lwp = lwp;
6058 ptid.tid = tid;
6059 return ptid;
6060 }
6061
6062 /* Create a ptid from just a pid. */
6063
6064 ptid_t
6065 pid_to_ptid (int pid)
6066 {
6067 return ptid_build (pid, 0, 0);
6068 }
6069
6070 /* Fetch the pid (process id) component from a ptid. */
6071
6072 int
6073 ptid_get_pid (ptid_t ptid)
6074 {
6075 return ptid.pid;
6076 }
6077
6078 /* Fetch the lwp (lightweight process) component from a ptid. */
6079
6080 long
6081 ptid_get_lwp (ptid_t ptid)
6082 {
6083 return ptid.lwp;
6084 }
6085
6086 /* Fetch the tid (thread id) component from a ptid. */
6087
6088 long
6089 ptid_get_tid (ptid_t ptid)
6090 {
6091 return ptid.tid;
6092 }
6093
6094 /* ptid_equal() is used to test equality of two ptids. */
6095
6096 int
6097 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6098 {
6099 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6100 && ptid1.tid == ptid2.tid);
6101 }
6102
6103 /* Returns true if PTID represents a process. */
6104
6105 int
6106 ptid_is_pid (ptid_t ptid)
6107 {
6108 if (ptid_equal (minus_one_ptid, ptid))
6109 return 0;
6110 if (ptid_equal (null_ptid, ptid))
6111 return 0;
6112
6113 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6114 }
6115
6116 /* restore_inferior_ptid() will be used by the cleanup machinery
6117 to restore the inferior_ptid value saved in a call to
6118 save_inferior_ptid(). */
6119
6120 static void
6121 restore_inferior_ptid (void *arg)
6122 {
6123 ptid_t *saved_ptid_ptr = arg;
6124 inferior_ptid = *saved_ptid_ptr;
6125 xfree (arg);
6126 }
6127
6128 /* Save the value of inferior_ptid so that it may be restored by a
6129 later call to do_cleanups(). Returns the struct cleanup pointer
6130 needed for later doing the cleanup. */
6131
6132 struct cleanup *
6133 save_inferior_ptid (void)
6134 {
6135 ptid_t *saved_ptid_ptr;
6136
6137 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6138 *saved_ptid_ptr = inferior_ptid;
6139 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6140 }
6141 \f
6142
6143 /* User interface for reverse debugging:
6144 Set exec-direction / show exec-direction commands
6145 (returns error unless target implements to_set_exec_direction method). */
6146
6147 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6148 static const char exec_forward[] = "forward";
6149 static const char exec_reverse[] = "reverse";
6150 static const char *exec_direction = exec_forward;
6151 static const char *exec_direction_names[] = {
6152 exec_forward,
6153 exec_reverse,
6154 NULL
6155 };
6156
6157 static void
6158 set_exec_direction_func (char *args, int from_tty,
6159 struct cmd_list_element *cmd)
6160 {
6161 if (target_can_execute_reverse)
6162 {
6163 if (!strcmp (exec_direction, exec_forward))
6164 execution_direction = EXEC_FORWARD;
6165 else if (!strcmp (exec_direction, exec_reverse))
6166 execution_direction = EXEC_REVERSE;
6167 }
6168 }
6169
6170 static void
6171 show_exec_direction_func (struct ui_file *out, int from_tty,
6172 struct cmd_list_element *cmd, const char *value)
6173 {
6174 switch (execution_direction) {
6175 case EXEC_FORWARD:
6176 fprintf_filtered (out, _("Forward.\n"));
6177 break;
6178 case EXEC_REVERSE:
6179 fprintf_filtered (out, _("Reverse.\n"));
6180 break;
6181 case EXEC_ERROR:
6182 default:
6183 fprintf_filtered (out,
6184 _("Forward (target `%s' does not support exec-direction).\n"),
6185 target_shortname);
6186 break;
6187 }
6188 }
6189
6190 /* User interface for non-stop mode. */
6191
6192 int non_stop = 0;
6193 static int non_stop_1 = 0;
6194
6195 static void
6196 set_non_stop (char *args, int from_tty,
6197 struct cmd_list_element *c)
6198 {
6199 if (target_has_execution)
6200 {
6201 non_stop_1 = non_stop;
6202 error (_("Cannot change this setting while the inferior is running."));
6203 }
6204
6205 non_stop = non_stop_1;
6206 }
6207
6208 static void
6209 show_non_stop (struct ui_file *file, int from_tty,
6210 struct cmd_list_element *c, const char *value)
6211 {
6212 fprintf_filtered (file,
6213 _("Controlling the inferior in non-stop mode is %s.\n"),
6214 value);
6215 }
6216
6217 static void
6218 show_schedule_multiple (struct ui_file *file, int from_tty,
6219 struct cmd_list_element *c, const char *value)
6220 {
6221 fprintf_filtered (file, _("\
6222 Resuming the execution of threads of all processes is %s.\n"), value);
6223 }
6224
6225 void
6226 _initialize_infrun (void)
6227 {
6228 int i;
6229 int numsigs;
6230 struct cmd_list_element *c;
6231
6232 add_info ("signals", signals_info, _("\
6233 What debugger does when program gets various signals.\n\
6234 Specify a signal as argument to print info on that signal only."));
6235 add_info_alias ("handle", "signals", 0);
6236
6237 add_com ("handle", class_run, handle_command, _("\
6238 Specify how to handle a signal.\n\
6239 Args are signals and actions to apply to those signals.\n\
6240 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6241 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6242 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6243 The special arg \"all\" is recognized to mean all signals except those\n\
6244 used by the debugger, typically SIGTRAP and SIGINT.\n\
6245 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6246 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6247 Stop means reenter debugger if this signal happens (implies print).\n\
6248 Print means print a message if this signal happens.\n\
6249 Pass means let program see this signal; otherwise program doesn't know.\n\
6250 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6251 Pass and Stop may be combined."));
6252 if (xdb_commands)
6253 {
6254 add_com ("lz", class_info, signals_info, _("\
6255 What debugger does when program gets various signals.\n\
6256 Specify a signal as argument to print info on that signal only."));
6257 add_com ("z", class_run, xdb_handle_command, _("\
6258 Specify how to handle a signal.\n\
6259 Args are signals and actions to apply to those signals.\n\
6260 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6261 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6262 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6263 The special arg \"all\" is recognized to mean all signals except those\n\
6264 used by the debugger, typically SIGTRAP and SIGINT.\n\
6265 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6266 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6267 nopass), \"Q\" (noprint)\n\
6268 Stop means reenter debugger if this signal happens (implies print).\n\
6269 Print means print a message if this signal happens.\n\
6270 Pass means let program see this signal; otherwise program doesn't know.\n\
6271 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6272 Pass and Stop may be combined."));
6273 }
6274
6275 if (!dbx_commands)
6276 stop_command = add_cmd ("stop", class_obscure,
6277 not_just_help_class_command, _("\
6278 There is no `stop' command, but you can set a hook on `stop'.\n\
6279 This allows you to set a list of commands to be run each time execution\n\
6280 of the program stops."), &cmdlist);
6281
6282 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6283 Set inferior debugging."), _("\
6284 Show inferior debugging."), _("\
6285 When non-zero, inferior specific debugging is enabled."),
6286 NULL,
6287 show_debug_infrun,
6288 &setdebuglist, &showdebuglist);
6289
6290 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6291 Set displaced stepping debugging."), _("\
6292 Show displaced stepping debugging."), _("\
6293 When non-zero, displaced stepping specific debugging is enabled."),
6294 NULL,
6295 show_debug_displaced,
6296 &setdebuglist, &showdebuglist);
6297
6298 add_setshow_boolean_cmd ("non-stop", no_class,
6299 &non_stop_1, _("\
6300 Set whether gdb controls the inferior in non-stop mode."), _("\
6301 Show whether gdb controls the inferior in non-stop mode."), _("\
6302 When debugging a multi-threaded program and this setting is\n\
6303 off (the default, also called all-stop mode), when one thread stops\n\
6304 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6305 all other threads in the program while you interact with the thread of\n\
6306 interest. When you continue or step a thread, you can allow the other\n\
6307 threads to run, or have them remain stopped, but while you inspect any\n\
6308 thread's state, all threads stop.\n\
6309 \n\
6310 In non-stop mode, when one thread stops, other threads can continue\n\
6311 to run freely. You'll be able to step each thread independently,\n\
6312 leave it stopped or free to run as needed."),
6313 set_non_stop,
6314 show_non_stop,
6315 &setlist,
6316 &showlist);
6317
6318 numsigs = (int) TARGET_SIGNAL_LAST;
6319 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6320 signal_print = (unsigned char *)
6321 xmalloc (sizeof (signal_print[0]) * numsigs);
6322 signal_program = (unsigned char *)
6323 xmalloc (sizeof (signal_program[0]) * numsigs);
6324 for (i = 0; i < numsigs; i++)
6325 {
6326 signal_stop[i] = 1;
6327 signal_print[i] = 1;
6328 signal_program[i] = 1;
6329 }
6330
6331 /* Signals caused by debugger's own actions
6332 should not be given to the program afterwards. */
6333 signal_program[TARGET_SIGNAL_TRAP] = 0;
6334 signal_program[TARGET_SIGNAL_INT] = 0;
6335
6336 /* Signals that are not errors should not normally enter the debugger. */
6337 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6338 signal_print[TARGET_SIGNAL_ALRM] = 0;
6339 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6340 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6341 signal_stop[TARGET_SIGNAL_PROF] = 0;
6342 signal_print[TARGET_SIGNAL_PROF] = 0;
6343 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6344 signal_print[TARGET_SIGNAL_CHLD] = 0;
6345 signal_stop[TARGET_SIGNAL_IO] = 0;
6346 signal_print[TARGET_SIGNAL_IO] = 0;
6347 signal_stop[TARGET_SIGNAL_POLL] = 0;
6348 signal_print[TARGET_SIGNAL_POLL] = 0;
6349 signal_stop[TARGET_SIGNAL_URG] = 0;
6350 signal_print[TARGET_SIGNAL_URG] = 0;
6351 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6352 signal_print[TARGET_SIGNAL_WINCH] = 0;
6353
6354 /* These signals are used internally by user-level thread
6355 implementations. (See signal(5) on Solaris.) Like the above
6356 signals, a healthy program receives and handles them as part of
6357 its normal operation. */
6358 signal_stop[TARGET_SIGNAL_LWP] = 0;
6359 signal_print[TARGET_SIGNAL_LWP] = 0;
6360 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6361 signal_print[TARGET_SIGNAL_WAITING] = 0;
6362 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6363 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6364
6365 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6366 &stop_on_solib_events, _("\
6367 Set stopping for shared library events."), _("\
6368 Show stopping for shared library events."), _("\
6369 If nonzero, gdb will give control to the user when the dynamic linker\n\
6370 notifies gdb of shared library events. The most common event of interest\n\
6371 to the user would be loading/unloading of a new library."),
6372 NULL,
6373 show_stop_on_solib_events,
6374 &setlist, &showlist);
6375
6376 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6377 follow_fork_mode_kind_names,
6378 &follow_fork_mode_string, _("\
6379 Set debugger response to a program call of fork or vfork."), _("\
6380 Show debugger response to a program call of fork or vfork."), _("\
6381 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6382 parent - the original process is debugged after a fork\n\
6383 child - the new process is debugged after a fork\n\
6384 The unfollowed process will continue to run.\n\
6385 By default, the debugger will follow the parent process."),
6386 NULL,
6387 show_follow_fork_mode_string,
6388 &setlist, &showlist);
6389
6390 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6391 follow_exec_mode_names,
6392 &follow_exec_mode_string, _("\
6393 Set debugger response to a program call of exec."), _("\
6394 Show debugger response to a program call of exec."), _("\
6395 An exec call replaces the program image of a process.\n\
6396 \n\
6397 follow-exec-mode can be:\n\
6398 \n\
6399 new - the debugger creates a new inferior and rebinds the process \n\
6400 to this new inferior. The program the process was running before\n\
6401 the exec call can be restarted afterwards by restarting the original\n\
6402 inferior.\n\
6403 \n\
6404 same - the debugger keeps the process bound to the same inferior.\n\
6405 The new executable image replaces the previous executable loaded in\n\
6406 the inferior. Restarting the inferior after the exec call restarts\n\
6407 the executable the process was running after the exec call.\n\
6408 \n\
6409 By default, the debugger will use the same inferior."),
6410 NULL,
6411 show_follow_exec_mode_string,
6412 &setlist, &showlist);
6413
6414 add_setshow_enum_cmd ("scheduler-locking", class_run,
6415 scheduler_enums, &scheduler_mode, _("\
6416 Set mode for locking scheduler during execution."), _("\
6417 Show mode for locking scheduler during execution."), _("\
6418 off == no locking (threads may preempt at any time)\n\
6419 on == full locking (no thread except the current thread may run)\n\
6420 step == scheduler locked during every single-step operation.\n\
6421 In this mode, no other thread may run during a step command.\n\
6422 Other threads may run while stepping over a function call ('next')."),
6423 set_schedlock_func, /* traps on target vector */
6424 show_scheduler_mode,
6425 &setlist, &showlist);
6426
6427 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6428 Set mode for resuming threads of all processes."), _("\
6429 Show mode for resuming threads of all processes."), _("\
6430 When on, execution commands (such as 'continue' or 'next') resume all\n\
6431 threads of all processes. When off (which is the default), execution\n\
6432 commands only resume the threads of the current process. The set of\n\
6433 threads that are resumed is further refined by the scheduler-locking\n\
6434 mode (see help set scheduler-locking)."),
6435 NULL,
6436 show_schedule_multiple,
6437 &setlist, &showlist);
6438
6439 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6440 Set mode of the step operation."), _("\
6441 Show mode of the step operation."), _("\
6442 When set, doing a step over a function without debug line information\n\
6443 will stop at the first instruction of that function. Otherwise, the\n\
6444 function is skipped and the step command stops at a different source line."),
6445 NULL,
6446 show_step_stop_if_no_debug,
6447 &setlist, &showlist);
6448
6449 add_setshow_enum_cmd ("displaced-stepping", class_run,
6450 can_use_displaced_stepping_enum,
6451 &can_use_displaced_stepping, _("\
6452 Set debugger's willingness to use displaced stepping."), _("\
6453 Show debugger's willingness to use displaced stepping."), _("\
6454 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6455 supported by the target architecture. If off, gdb will not use displaced\n\
6456 stepping to step over breakpoints, even if such is supported by the target\n\
6457 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6458 if the target architecture supports it and non-stop mode is active, but will not\n\
6459 use it in all-stop mode (see help set non-stop)."),
6460 NULL,
6461 show_can_use_displaced_stepping,
6462 &setlist, &showlist);
6463
6464 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6465 &exec_direction, _("Set direction of execution.\n\
6466 Options are 'forward' or 'reverse'."),
6467 _("Show direction of execution (forward/reverse)."),
6468 _("Tells gdb whether to execute forward or backward."),
6469 set_exec_direction_func, show_exec_direction_func,
6470 &setlist, &showlist);
6471
6472 /* Set/show detach-on-fork: user-settable mode. */
6473
6474 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6475 Set whether gdb will detach the child of a fork."), _("\
6476 Show whether gdb will detach the child of a fork."), _("\
6477 Tells gdb whether to detach the child of a fork."),
6478 NULL, NULL, &setlist, &showlist);
6479
6480 /* ptid initializations */
6481 null_ptid = ptid_build (0, 0, 0);
6482 minus_one_ptid = ptid_build (-1, 0, 0);
6483 inferior_ptid = null_ptid;
6484 target_last_wait_ptid = minus_one_ptid;
6485 displaced_step_ptid = null_ptid;
6486
6487 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6488 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6489 observer_attach_thread_exit (infrun_thread_thread_exit);
6490
6491 /* Explicitly create without lookup, since that tries to create a
6492 value with a void typed value, and when we get here, gdbarch
6493 isn't initialized yet. At this point, we're quite sure there
6494 isn't another convenience variable of the same name. */
6495 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6496 }