2010-05-14 Michael Snyder <msnyder@vmware.com>
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181
182 /* Tables of how to react to signals; the user sets them. */
183
184 static unsigned char *signal_stop;
185 static unsigned char *signal_print;
186 static unsigned char *signal_program;
187
188 #define SET_SIGS(nsigs,sigs,flags) \
189 do { \
190 int signum = (nsigs); \
191 while (signum-- > 0) \
192 if ((sigs)[signum]) \
193 (flags)[signum] = 1; \
194 } while (0)
195
196 #define UNSET_SIGS(nsigs,sigs,flags) \
197 do { \
198 int signum = (nsigs); \
199 while (signum-- > 0) \
200 if ((sigs)[signum]) \
201 (flags)[signum] = 0; \
202 } while (0)
203
204 /* Value to pass to target_resume() to cause all threads to resume */
205
206 #define RESUME_ALL minus_one_ptid
207
208 /* Command list pointer for the "stop" placeholder. */
209
210 static struct cmd_list_element *stop_command;
211
212 /* Function inferior was in as of last step command. */
213
214 static struct symbol *step_start_function;
215
216 /* Nonzero if we want to give control to the user when we're notified
217 of shared library events by the dynamic linker. */
218 static int stop_on_solib_events;
219 static void
220 show_stop_on_solib_events (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
224 value);
225 }
226
227 /* Nonzero means expecting a trace trap
228 and should stop the inferior and return silently when it happens. */
229
230 int stop_after_trap;
231
232 /* Save register contents here when executing a "finish" command or are
233 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
234 Thus this contains the return value from the called function (assuming
235 values are returned in a register). */
236
237 struct regcache *stop_registers;
238
239 /* Nonzero after stop if current stack frame should be printed. */
240
241 static int stop_print_frame;
242
243 /* This is a cached copy of the pid/waitstatus of the last event
244 returned by target_wait()/deprecated_target_wait_hook(). This
245 information is returned by get_last_target_status(). */
246 static ptid_t target_last_wait_ptid;
247 static struct target_waitstatus target_last_waitstatus;
248
249 static void context_switch (ptid_t ptid);
250
251 void init_thread_stepping_state (struct thread_info *tss);
252
253 void init_infwait_state (void);
254
255 static const char follow_fork_mode_child[] = "child";
256 static const char follow_fork_mode_parent[] = "parent";
257
258 static const char *follow_fork_mode_kind_names[] = {
259 follow_fork_mode_child,
260 follow_fork_mode_parent,
261 NULL
262 };
263
264 static const char *follow_fork_mode_string = follow_fork_mode_parent;
265 static void
266 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
267 struct cmd_list_element *c, const char *value)
268 {
269 fprintf_filtered (file, _("\
270 Debugger response to a program call of fork or vfork is \"%s\".\n"),
271 value);
272 }
273 \f
274
275 /* Tell the target to follow the fork we're stopped at. Returns true
276 if the inferior should be resumed; false, if the target for some
277 reason decided it's best not to resume. */
278
279 static int
280 follow_fork (void)
281 {
282 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
283 int should_resume = 1;
284 struct thread_info *tp;
285
286 /* Copy user stepping state to the new inferior thread. FIXME: the
287 followed fork child thread should have a copy of most of the
288 parent thread structure's run control related fields, not just these.
289 Initialized to avoid "may be used uninitialized" warnings from gcc. */
290 struct breakpoint *step_resume_breakpoint = NULL;
291 CORE_ADDR step_range_start = 0;
292 CORE_ADDR step_range_end = 0;
293 struct frame_id step_frame_id = { 0 };
294
295 if (!non_stop)
296 {
297 ptid_t wait_ptid;
298 struct target_waitstatus wait_status;
299
300 /* Get the last target status returned by target_wait(). */
301 get_last_target_status (&wait_ptid, &wait_status);
302
303 /* If not stopped at a fork event, then there's nothing else to
304 do. */
305 if (wait_status.kind != TARGET_WAITKIND_FORKED
306 && wait_status.kind != TARGET_WAITKIND_VFORKED)
307 return 1;
308
309 /* Check if we switched over from WAIT_PTID, since the event was
310 reported. */
311 if (!ptid_equal (wait_ptid, minus_one_ptid)
312 && !ptid_equal (inferior_ptid, wait_ptid))
313 {
314 /* We did. Switch back to WAIT_PTID thread, to tell the
315 target to follow it (in either direction). We'll
316 afterwards refuse to resume, and inform the user what
317 happened. */
318 switch_to_thread (wait_ptid);
319 should_resume = 0;
320 }
321 }
322
323 tp = inferior_thread ();
324
325 /* If there were any forks/vforks that were caught and are now to be
326 followed, then do so now. */
327 switch (tp->pending_follow.kind)
328 {
329 case TARGET_WAITKIND_FORKED:
330 case TARGET_WAITKIND_VFORKED:
331 {
332 ptid_t parent, child;
333
334 /* If the user did a next/step, etc, over a fork call,
335 preserve the stepping state in the fork child. */
336 if (follow_child && should_resume)
337 {
338 step_resume_breakpoint
339 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
340 step_range_start = tp->step_range_start;
341 step_range_end = tp->step_range_end;
342 step_frame_id = tp->step_frame_id;
343
344 /* For now, delete the parent's sr breakpoint, otherwise,
345 parent/child sr breakpoints are considered duplicates,
346 and the child version will not be installed. Remove
347 this when the breakpoints module becomes aware of
348 inferiors and address spaces. */
349 delete_step_resume_breakpoint (tp);
350 tp->step_range_start = 0;
351 tp->step_range_end = 0;
352 tp->step_frame_id = null_frame_id;
353 }
354
355 parent = inferior_ptid;
356 child = tp->pending_follow.value.related_pid;
357
358 /* Tell the target to do whatever is necessary to follow
359 either parent or child. */
360 if (target_follow_fork (follow_child))
361 {
362 /* Target refused to follow, or there's some other reason
363 we shouldn't resume. */
364 should_resume = 0;
365 }
366 else
367 {
368 /* This pending follow fork event is now handled, one way
369 or another. The previous selected thread may be gone
370 from the lists by now, but if it is still around, need
371 to clear the pending follow request. */
372 tp = find_thread_ptid (parent);
373 if (tp)
374 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
375
376 /* This makes sure we don't try to apply the "Switched
377 over from WAIT_PID" logic above. */
378 nullify_last_target_wait_ptid ();
379
380 /* If we followed the child, switch to it... */
381 if (follow_child)
382 {
383 switch_to_thread (child);
384
385 /* ... and preserve the stepping state, in case the
386 user was stepping over the fork call. */
387 if (should_resume)
388 {
389 tp = inferior_thread ();
390 tp->step_resume_breakpoint = step_resume_breakpoint;
391 tp->step_range_start = step_range_start;
392 tp->step_range_end = step_range_end;
393 tp->step_frame_id = step_frame_id;
394 }
395 else
396 {
397 /* If we get here, it was because we're trying to
398 resume from a fork catchpoint, but, the user
399 has switched threads away from the thread that
400 forked. In that case, the resume command
401 issued is most likely not applicable to the
402 child, so just warn, and refuse to resume. */
403 warning (_("\
404 Not resuming: switched threads before following fork child.\n"));
405 }
406
407 /* Reset breakpoints in the child as appropriate. */
408 follow_inferior_reset_breakpoints ();
409 }
410 else
411 switch_to_thread (parent);
412 }
413 }
414 break;
415 case TARGET_WAITKIND_SPURIOUS:
416 /* Nothing to follow. */
417 break;
418 default:
419 internal_error (__FILE__, __LINE__,
420 "Unexpected pending_follow.kind %d\n",
421 tp->pending_follow.kind);
422 break;
423 }
424
425 return should_resume;
426 }
427
428 void
429 follow_inferior_reset_breakpoints (void)
430 {
431 struct thread_info *tp = inferior_thread ();
432
433 /* Was there a step_resume breakpoint? (There was if the user
434 did a "next" at the fork() call.) If so, explicitly reset its
435 thread number.
436
437 step_resumes are a form of bp that are made to be per-thread.
438 Since we created the step_resume bp when the parent process
439 was being debugged, and now are switching to the child process,
440 from the breakpoint package's viewpoint, that's a switch of
441 "threads". We must update the bp's notion of which thread
442 it is for, or it'll be ignored when it triggers. */
443
444 if (tp->step_resume_breakpoint)
445 breakpoint_re_set_thread (tp->step_resume_breakpoint);
446
447 /* Reinsert all breakpoints in the child. The user may have set
448 breakpoints after catching the fork, in which case those
449 were never set in the child, but only in the parent. This makes
450 sure the inserted breakpoints match the breakpoint list. */
451
452 breakpoint_re_set ();
453 insert_breakpoints ();
454 }
455
456 /* The child has exited or execed: resume threads of the parent the
457 user wanted to be executing. */
458
459 static int
460 proceed_after_vfork_done (struct thread_info *thread,
461 void *arg)
462 {
463 int pid = * (int *) arg;
464
465 if (ptid_get_pid (thread->ptid) == pid
466 && is_running (thread->ptid)
467 && !is_executing (thread->ptid)
468 && !thread->stop_requested
469 && thread->stop_signal == TARGET_SIGNAL_0)
470 {
471 if (debug_infrun)
472 fprintf_unfiltered (gdb_stdlog,
473 "infrun: resuming vfork parent thread %s\n",
474 target_pid_to_str (thread->ptid));
475
476 switch_to_thread (thread->ptid);
477 clear_proceed_status ();
478 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
479 }
480
481 return 0;
482 }
483
484 /* Called whenever we notice an exec or exit event, to handle
485 detaching or resuming a vfork parent. */
486
487 static void
488 handle_vfork_child_exec_or_exit (int exec)
489 {
490 struct inferior *inf = current_inferior ();
491
492 if (inf->vfork_parent)
493 {
494 int resume_parent = -1;
495
496 /* This exec or exit marks the end of the shared memory region
497 between the parent and the child. If the user wanted to
498 detach from the parent, now is the time. */
499
500 if (inf->vfork_parent->pending_detach)
501 {
502 struct thread_info *tp;
503 struct cleanup *old_chain;
504 struct program_space *pspace;
505 struct address_space *aspace;
506
507 /* follow-fork child, detach-on-fork on */
508
509 old_chain = make_cleanup_restore_current_thread ();
510
511 /* We're letting loose of the parent. */
512 tp = any_live_thread_of_process (inf->vfork_parent->pid);
513 switch_to_thread (tp->ptid);
514
515 /* We're about to detach from the parent, which implicitly
516 removes breakpoints from its address space. There's a
517 catch here: we want to reuse the spaces for the child,
518 but, parent/child are still sharing the pspace at this
519 point, although the exec in reality makes the kernel give
520 the child a fresh set of new pages. The problem here is
521 that the breakpoints module being unaware of this, would
522 likely chose the child process to write to the parent
523 address space. Swapping the child temporarily away from
524 the spaces has the desired effect. Yes, this is "sort
525 of" a hack. */
526
527 pspace = inf->pspace;
528 aspace = inf->aspace;
529 inf->aspace = NULL;
530 inf->pspace = NULL;
531
532 if (debug_infrun || info_verbose)
533 {
534 target_terminal_ours ();
535
536 if (exec)
537 fprintf_filtered (gdb_stdlog,
538 "Detaching vfork parent process %d after child exec.\n",
539 inf->vfork_parent->pid);
540 else
541 fprintf_filtered (gdb_stdlog,
542 "Detaching vfork parent process %d after child exit.\n",
543 inf->vfork_parent->pid);
544 }
545
546 target_detach (NULL, 0);
547
548 /* Put it back. */
549 inf->pspace = pspace;
550 inf->aspace = aspace;
551
552 do_cleanups (old_chain);
553 }
554 else if (exec)
555 {
556 /* We're staying attached to the parent, so, really give the
557 child a new address space. */
558 inf->pspace = add_program_space (maybe_new_address_space ());
559 inf->aspace = inf->pspace->aspace;
560 inf->removable = 1;
561 set_current_program_space (inf->pspace);
562
563 resume_parent = inf->vfork_parent->pid;
564
565 /* Break the bonds. */
566 inf->vfork_parent->vfork_child = NULL;
567 }
568 else
569 {
570 struct cleanup *old_chain;
571 struct program_space *pspace;
572
573 /* If this is a vfork child exiting, then the pspace and
574 aspaces were shared with the parent. Since we're
575 reporting the process exit, we'll be mourning all that is
576 found in the address space, and switching to null_ptid,
577 preparing to start a new inferior. But, since we don't
578 want to clobber the parent's address/program spaces, we
579 go ahead and create a new one for this exiting
580 inferior. */
581
582 /* Switch to null_ptid, so that clone_program_space doesn't want
583 to read the selected frame of a dead process. */
584 old_chain = save_inferior_ptid ();
585 inferior_ptid = null_ptid;
586
587 /* This inferior is dead, so avoid giving the breakpoints
588 module the option to write through to it (cloning a
589 program space resets breakpoints). */
590 inf->aspace = NULL;
591 inf->pspace = NULL;
592 pspace = add_program_space (maybe_new_address_space ());
593 set_current_program_space (pspace);
594 inf->removable = 1;
595 clone_program_space (pspace, inf->vfork_parent->pspace);
596 inf->pspace = pspace;
597 inf->aspace = pspace->aspace;
598
599 /* Put back inferior_ptid. We'll continue mourning this
600 inferior. */
601 do_cleanups (old_chain);
602
603 resume_parent = inf->vfork_parent->pid;
604 /* Break the bonds. */
605 inf->vfork_parent->vfork_child = NULL;
606 }
607
608 inf->vfork_parent = NULL;
609
610 gdb_assert (current_program_space == inf->pspace);
611
612 if (non_stop && resume_parent != -1)
613 {
614 /* If the user wanted the parent to be running, let it go
615 free now. */
616 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
617
618 if (debug_infrun)
619 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
620 resume_parent);
621
622 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
623
624 do_cleanups (old_chain);
625 }
626 }
627 }
628
629 /* Enum strings for "set|show displaced-stepping". */
630
631 static const char follow_exec_mode_new[] = "new";
632 static const char follow_exec_mode_same[] = "same";
633 static const char *follow_exec_mode_names[] =
634 {
635 follow_exec_mode_new,
636 follow_exec_mode_same,
637 NULL,
638 };
639
640 static const char *follow_exec_mode_string = follow_exec_mode_same;
641 static void
642 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
643 struct cmd_list_element *c, const char *value)
644 {
645 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
646 }
647
648 /* EXECD_PATHNAME is assumed to be non-NULL. */
649
650 static void
651 follow_exec (ptid_t pid, char *execd_pathname)
652 {
653 struct thread_info *th = inferior_thread ();
654 struct inferior *inf = current_inferior ();
655
656 /* This is an exec event that we actually wish to pay attention to.
657 Refresh our symbol table to the newly exec'd program, remove any
658 momentary bp's, etc.
659
660 If there are breakpoints, they aren't really inserted now,
661 since the exec() transformed our inferior into a fresh set
662 of instructions.
663
664 We want to preserve symbolic breakpoints on the list, since
665 we have hopes that they can be reset after the new a.out's
666 symbol table is read.
667
668 However, any "raw" breakpoints must be removed from the list
669 (e.g., the solib bp's), since their address is probably invalid
670 now.
671
672 And, we DON'T want to call delete_breakpoints() here, since
673 that may write the bp's "shadow contents" (the instruction
674 value that was overwritten witha TRAP instruction). Since
675 we now have a new a.out, those shadow contents aren't valid. */
676
677 mark_breakpoints_out ();
678
679 update_breakpoints_after_exec ();
680
681 /* If there was one, it's gone now. We cannot truly step-to-next
682 statement through an exec(). */
683 th->step_resume_breakpoint = NULL;
684 th->step_range_start = 0;
685 th->step_range_end = 0;
686
687 /* The target reports the exec event to the main thread, even if
688 some other thread does the exec, and even if the main thread was
689 already stopped --- if debugging in non-stop mode, it's possible
690 the user had the main thread held stopped in the previous image
691 --- release it now. This is the same behavior as step-over-exec
692 with scheduler-locking on in all-stop mode. */
693 th->stop_requested = 0;
694
695 /* What is this a.out's name? */
696 printf_unfiltered (_("%s is executing new program: %s\n"),
697 target_pid_to_str (inferior_ptid),
698 execd_pathname);
699
700 /* We've followed the inferior through an exec. Therefore, the
701 inferior has essentially been killed & reborn. */
702
703 gdb_flush (gdb_stdout);
704
705 breakpoint_init_inferior (inf_execd);
706
707 if (gdb_sysroot && *gdb_sysroot)
708 {
709 char *name = alloca (strlen (gdb_sysroot)
710 + strlen (execd_pathname)
711 + 1);
712
713 strcpy (name, gdb_sysroot);
714 strcat (name, execd_pathname);
715 execd_pathname = name;
716 }
717
718 /* Reset the shared library package. This ensures that we get a
719 shlib event when the child reaches "_start", at which point the
720 dld will have had a chance to initialize the child. */
721 /* Also, loading a symbol file below may trigger symbol lookups, and
722 we don't want those to be satisfied by the libraries of the
723 previous incarnation of this process. */
724 no_shared_libraries (NULL, 0);
725
726 if (follow_exec_mode_string == follow_exec_mode_new)
727 {
728 struct program_space *pspace;
729
730 /* The user wants to keep the old inferior and program spaces
731 around. Create a new fresh one, and switch to it. */
732
733 inf = add_inferior (current_inferior ()->pid);
734 pspace = add_program_space (maybe_new_address_space ());
735 inf->pspace = pspace;
736 inf->aspace = pspace->aspace;
737
738 exit_inferior_num_silent (current_inferior ()->num);
739
740 set_current_inferior (inf);
741 set_current_program_space (pspace);
742 }
743
744 gdb_assert (current_program_space == inf->pspace);
745
746 /* That a.out is now the one to use. */
747 exec_file_attach (execd_pathname, 0);
748
749 /* Load the main file's symbols. */
750 symbol_file_add_main (execd_pathname, 0);
751
752 #ifdef SOLIB_CREATE_INFERIOR_HOOK
753 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
754 #else
755 solib_create_inferior_hook (0);
756 #endif
757
758 jit_inferior_created_hook ();
759
760 /* Reinsert all breakpoints. (Those which were symbolic have
761 been reset to the proper address in the new a.out, thanks
762 to symbol_file_command...) */
763 insert_breakpoints ();
764
765 /* The next resume of this inferior should bring it to the shlib
766 startup breakpoints. (If the user had also set bp's on
767 "main" from the old (parent) process, then they'll auto-
768 matically get reset there in the new process.) */
769 }
770
771 /* Non-zero if we just simulating a single-step. This is needed
772 because we cannot remove the breakpoints in the inferior process
773 until after the `wait' in `wait_for_inferior'. */
774 static int singlestep_breakpoints_inserted_p = 0;
775
776 /* The thread we inserted single-step breakpoints for. */
777 static ptid_t singlestep_ptid;
778
779 /* PC when we started this single-step. */
780 static CORE_ADDR singlestep_pc;
781
782 /* If another thread hit the singlestep breakpoint, we save the original
783 thread here so that we can resume single-stepping it later. */
784 static ptid_t saved_singlestep_ptid;
785 static int stepping_past_singlestep_breakpoint;
786
787 /* If not equal to null_ptid, this means that after stepping over breakpoint
788 is finished, we need to switch to deferred_step_ptid, and step it.
789
790 The use case is when one thread has hit a breakpoint, and then the user
791 has switched to another thread and issued 'step'. We need to step over
792 breakpoint in the thread which hit the breakpoint, but then continue
793 stepping the thread user has selected. */
794 static ptid_t deferred_step_ptid;
795 \f
796 /* Displaced stepping. */
797
798 /* In non-stop debugging mode, we must take special care to manage
799 breakpoints properly; in particular, the traditional strategy for
800 stepping a thread past a breakpoint it has hit is unsuitable.
801 'Displaced stepping' is a tactic for stepping one thread past a
802 breakpoint it has hit while ensuring that other threads running
803 concurrently will hit the breakpoint as they should.
804
805 The traditional way to step a thread T off a breakpoint in a
806 multi-threaded program in all-stop mode is as follows:
807
808 a0) Initially, all threads are stopped, and breakpoints are not
809 inserted.
810 a1) We single-step T, leaving breakpoints uninserted.
811 a2) We insert breakpoints, and resume all threads.
812
813 In non-stop debugging, however, this strategy is unsuitable: we
814 don't want to have to stop all threads in the system in order to
815 continue or step T past a breakpoint. Instead, we use displaced
816 stepping:
817
818 n0) Initially, T is stopped, other threads are running, and
819 breakpoints are inserted.
820 n1) We copy the instruction "under" the breakpoint to a separate
821 location, outside the main code stream, making any adjustments
822 to the instruction, register, and memory state as directed by
823 T's architecture.
824 n2) We single-step T over the instruction at its new location.
825 n3) We adjust the resulting register and memory state as directed
826 by T's architecture. This includes resetting T's PC to point
827 back into the main instruction stream.
828 n4) We resume T.
829
830 This approach depends on the following gdbarch methods:
831
832 - gdbarch_max_insn_length and gdbarch_displaced_step_location
833 indicate where to copy the instruction, and how much space must
834 be reserved there. We use these in step n1.
835
836 - gdbarch_displaced_step_copy_insn copies a instruction to a new
837 address, and makes any necessary adjustments to the instruction,
838 register contents, and memory. We use this in step n1.
839
840 - gdbarch_displaced_step_fixup adjusts registers and memory after
841 we have successfuly single-stepped the instruction, to yield the
842 same effect the instruction would have had if we had executed it
843 at its original address. We use this in step n3.
844
845 - gdbarch_displaced_step_free_closure provides cleanup.
846
847 The gdbarch_displaced_step_copy_insn and
848 gdbarch_displaced_step_fixup functions must be written so that
849 copying an instruction with gdbarch_displaced_step_copy_insn,
850 single-stepping across the copied instruction, and then applying
851 gdbarch_displaced_insn_fixup should have the same effects on the
852 thread's memory and registers as stepping the instruction in place
853 would have. Exactly which responsibilities fall to the copy and
854 which fall to the fixup is up to the author of those functions.
855
856 See the comments in gdbarch.sh for details.
857
858 Note that displaced stepping and software single-step cannot
859 currently be used in combination, although with some care I think
860 they could be made to. Software single-step works by placing
861 breakpoints on all possible subsequent instructions; if the
862 displaced instruction is a PC-relative jump, those breakpoints
863 could fall in very strange places --- on pages that aren't
864 executable, or at addresses that are not proper instruction
865 boundaries. (We do generally let other threads run while we wait
866 to hit the software single-step breakpoint, and they might
867 encounter such a corrupted instruction.) One way to work around
868 this would be to have gdbarch_displaced_step_copy_insn fully
869 simulate the effect of PC-relative instructions (and return NULL)
870 on architectures that use software single-stepping.
871
872 In non-stop mode, we can have independent and simultaneous step
873 requests, so more than one thread may need to simultaneously step
874 over a breakpoint. The current implementation assumes there is
875 only one scratch space per process. In this case, we have to
876 serialize access to the scratch space. If thread A wants to step
877 over a breakpoint, but we are currently waiting for some other
878 thread to complete a displaced step, we leave thread A stopped and
879 place it in the displaced_step_request_queue. Whenever a displaced
880 step finishes, we pick the next thread in the queue and start a new
881 displaced step operation on it. See displaced_step_prepare and
882 displaced_step_fixup for details. */
883
884 struct displaced_step_request
885 {
886 ptid_t ptid;
887 struct displaced_step_request *next;
888 };
889
890 /* Per-inferior displaced stepping state. */
891 struct displaced_step_inferior_state
892 {
893 /* Pointer to next in linked list. */
894 struct displaced_step_inferior_state *next;
895
896 /* The process this displaced step state refers to. */
897 int pid;
898
899 /* A queue of pending displaced stepping requests. One entry per
900 thread that needs to do a displaced step. */
901 struct displaced_step_request *step_request_queue;
902
903 /* If this is not null_ptid, this is the thread carrying out a
904 displaced single-step in process PID. This thread's state will
905 require fixing up once it has completed its step. */
906 ptid_t step_ptid;
907
908 /* The architecture the thread had when we stepped it. */
909 struct gdbarch *step_gdbarch;
910
911 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
912 for post-step cleanup. */
913 struct displaced_step_closure *step_closure;
914
915 /* The address of the original instruction, and the copy we
916 made. */
917 CORE_ADDR step_original, step_copy;
918
919 /* Saved contents of copy area. */
920 gdb_byte *step_saved_copy;
921 };
922
923 /* The list of states of processes involved in displaced stepping
924 presently. */
925 static struct displaced_step_inferior_state *displaced_step_inferior_states;
926
927 /* Get the displaced stepping state of process PID. */
928
929 static struct displaced_step_inferior_state *
930 get_displaced_stepping_state (int pid)
931 {
932 struct displaced_step_inferior_state *state;
933
934 for (state = displaced_step_inferior_states;
935 state != NULL;
936 state = state->next)
937 if (state->pid == pid)
938 return state;
939
940 return NULL;
941 }
942
943 /* Add a new displaced stepping state for process PID to the displaced
944 stepping state list, or return a pointer to an already existing
945 entry, if it already exists. Never returns NULL. */
946
947 static struct displaced_step_inferior_state *
948 add_displaced_stepping_state (int pid)
949 {
950 struct displaced_step_inferior_state *state;
951
952 for (state = displaced_step_inferior_states;
953 state != NULL;
954 state = state->next)
955 if (state->pid == pid)
956 return state;
957
958 state = xcalloc (1, sizeof (*state));
959 state->pid = pid;
960 state->next = displaced_step_inferior_states;
961 displaced_step_inferior_states = state;
962
963 return state;
964 }
965
966 /* Remove the displaced stepping state of process PID. */
967
968 static void
969 remove_displaced_stepping_state (int pid)
970 {
971 struct displaced_step_inferior_state *it, **prev_next_p;
972
973 gdb_assert (pid != 0);
974
975 it = displaced_step_inferior_states;
976 prev_next_p = &displaced_step_inferior_states;
977 while (it)
978 {
979 if (it->pid == pid)
980 {
981 *prev_next_p = it->next;
982 xfree (it);
983 return;
984 }
985
986 prev_next_p = &it->next;
987 it = *prev_next_p;
988 }
989 }
990
991 static void
992 infrun_inferior_exit (struct inferior *inf)
993 {
994 remove_displaced_stepping_state (inf->pid);
995 }
996
997 /* Enum strings for "set|show displaced-stepping". */
998
999 static const char can_use_displaced_stepping_auto[] = "auto";
1000 static const char can_use_displaced_stepping_on[] = "on";
1001 static const char can_use_displaced_stepping_off[] = "off";
1002 static const char *can_use_displaced_stepping_enum[] =
1003 {
1004 can_use_displaced_stepping_auto,
1005 can_use_displaced_stepping_on,
1006 can_use_displaced_stepping_off,
1007 NULL,
1008 };
1009
1010 /* If ON, and the architecture supports it, GDB will use displaced
1011 stepping to step over breakpoints. If OFF, or if the architecture
1012 doesn't support it, GDB will instead use the traditional
1013 hold-and-step approach. If AUTO (which is the default), GDB will
1014 decide which technique to use to step over breakpoints depending on
1015 which of all-stop or non-stop mode is active --- displaced stepping
1016 in non-stop mode; hold-and-step in all-stop mode. */
1017
1018 static const char *can_use_displaced_stepping =
1019 can_use_displaced_stepping_auto;
1020
1021 static void
1022 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1023 struct cmd_list_element *c,
1024 const char *value)
1025 {
1026 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1027 fprintf_filtered (file, _("\
1028 Debugger's willingness to use displaced stepping to step over \
1029 breakpoints is %s (currently %s).\n"),
1030 value, non_stop ? "on" : "off");
1031 else
1032 fprintf_filtered (file, _("\
1033 Debugger's willingness to use displaced stepping to step over \
1034 breakpoints is %s.\n"), value);
1035 }
1036
1037 /* Return non-zero if displaced stepping can/should be used to step
1038 over breakpoints. */
1039
1040 static int
1041 use_displaced_stepping (struct gdbarch *gdbarch)
1042 {
1043 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1044 && non_stop)
1045 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1046 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1047 && !RECORD_IS_USED);
1048 }
1049
1050 /* Clean out any stray displaced stepping state. */
1051 static void
1052 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1053 {
1054 /* Indicate that there is no cleanup pending. */
1055 displaced->step_ptid = null_ptid;
1056
1057 if (displaced->step_closure)
1058 {
1059 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1060 displaced->step_closure);
1061 displaced->step_closure = NULL;
1062 }
1063 }
1064
1065 static void
1066 displaced_step_clear_cleanup (void *arg)
1067 {
1068 struct displaced_step_inferior_state *state = arg;
1069
1070 displaced_step_clear (state);
1071 }
1072
1073 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1074 void
1075 displaced_step_dump_bytes (struct ui_file *file,
1076 const gdb_byte *buf,
1077 size_t len)
1078 {
1079 int i;
1080
1081 for (i = 0; i < len; i++)
1082 fprintf_unfiltered (file, "%02x ", buf[i]);
1083 fputs_unfiltered ("\n", file);
1084 }
1085
1086 /* Prepare to single-step, using displaced stepping.
1087
1088 Note that we cannot use displaced stepping when we have a signal to
1089 deliver. If we have a signal to deliver and an instruction to step
1090 over, then after the step, there will be no indication from the
1091 target whether the thread entered a signal handler or ignored the
1092 signal and stepped over the instruction successfully --- both cases
1093 result in a simple SIGTRAP. In the first case we mustn't do a
1094 fixup, and in the second case we must --- but we can't tell which.
1095 Comments in the code for 'random signals' in handle_inferior_event
1096 explain how we handle this case instead.
1097
1098 Returns 1 if preparing was successful -- this thread is going to be
1099 stepped now; or 0 if displaced stepping this thread got queued. */
1100 static int
1101 displaced_step_prepare (ptid_t ptid)
1102 {
1103 struct cleanup *old_cleanups, *ignore_cleanups;
1104 struct regcache *regcache = get_thread_regcache (ptid);
1105 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1106 CORE_ADDR original, copy;
1107 ULONGEST len;
1108 struct displaced_step_closure *closure;
1109 struct displaced_step_inferior_state *displaced;
1110
1111 /* We should never reach this function if the architecture does not
1112 support displaced stepping. */
1113 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1114
1115 /* We have to displaced step one thread at a time, as we only have
1116 access to a single scratch space per inferior. */
1117
1118 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1119
1120 if (!ptid_equal (displaced->step_ptid, null_ptid))
1121 {
1122 /* Already waiting for a displaced step to finish. Defer this
1123 request and place in queue. */
1124 struct displaced_step_request *req, *new_req;
1125
1126 if (debug_displaced)
1127 fprintf_unfiltered (gdb_stdlog,
1128 "displaced: defering step of %s\n",
1129 target_pid_to_str (ptid));
1130
1131 new_req = xmalloc (sizeof (*new_req));
1132 new_req->ptid = ptid;
1133 new_req->next = NULL;
1134
1135 if (displaced->step_request_queue)
1136 {
1137 for (req = displaced->step_request_queue;
1138 req && req->next;
1139 req = req->next)
1140 ;
1141 req->next = new_req;
1142 }
1143 else
1144 displaced->step_request_queue = new_req;
1145
1146 return 0;
1147 }
1148 else
1149 {
1150 if (debug_displaced)
1151 fprintf_unfiltered (gdb_stdlog,
1152 "displaced: stepping %s now\n",
1153 target_pid_to_str (ptid));
1154 }
1155
1156 displaced_step_clear (displaced);
1157
1158 old_cleanups = save_inferior_ptid ();
1159 inferior_ptid = ptid;
1160
1161 original = regcache_read_pc (regcache);
1162
1163 copy = gdbarch_displaced_step_location (gdbarch);
1164 len = gdbarch_max_insn_length (gdbarch);
1165
1166 /* Save the original contents of the copy area. */
1167 displaced->step_saved_copy = xmalloc (len);
1168 ignore_cleanups = make_cleanup (free_current_contents,
1169 &displaced->step_saved_copy);
1170 read_memory (copy, displaced->step_saved_copy, len);
1171 if (debug_displaced)
1172 {
1173 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1174 paddress (gdbarch, copy));
1175 displaced_step_dump_bytes (gdb_stdlog,
1176 displaced->step_saved_copy,
1177 len);
1178 };
1179
1180 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1181 original, copy, regcache);
1182
1183 /* We don't support the fully-simulated case at present. */
1184 gdb_assert (closure);
1185
1186 /* Save the information we need to fix things up if the step
1187 succeeds. */
1188 displaced->step_ptid = ptid;
1189 displaced->step_gdbarch = gdbarch;
1190 displaced->step_closure = closure;
1191 displaced->step_original = original;
1192 displaced->step_copy = copy;
1193
1194 make_cleanup (displaced_step_clear_cleanup, displaced);
1195
1196 /* Resume execution at the copy. */
1197 regcache_write_pc (regcache, copy);
1198
1199 discard_cleanups (ignore_cleanups);
1200
1201 do_cleanups (old_cleanups);
1202
1203 if (debug_displaced)
1204 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1205 paddress (gdbarch, copy));
1206
1207 return 1;
1208 }
1209
1210 static void
1211 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1212 {
1213 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1214
1215 inferior_ptid = ptid;
1216 write_memory (memaddr, myaddr, len);
1217 do_cleanups (ptid_cleanup);
1218 }
1219
1220 static void
1221 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1222 {
1223 struct cleanup *old_cleanups;
1224 struct displaced_step_inferior_state *displaced
1225 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1226
1227 /* Was any thread of this process doing a displaced step? */
1228 if (displaced == NULL)
1229 return;
1230
1231 /* Was this event for the pid we displaced? */
1232 if (ptid_equal (displaced->step_ptid, null_ptid)
1233 || ! ptid_equal (displaced->step_ptid, event_ptid))
1234 return;
1235
1236 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1237
1238 /* Restore the contents of the copy area. */
1239 {
1240 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1241
1242 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1243 displaced->step_saved_copy, len);
1244 if (debug_displaced)
1245 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1246 paddress (displaced->step_gdbarch,
1247 displaced->step_copy));
1248 }
1249
1250 /* Did the instruction complete successfully? */
1251 if (signal == TARGET_SIGNAL_TRAP)
1252 {
1253 /* Fix up the resulting state. */
1254 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1255 displaced->step_closure,
1256 displaced->step_original,
1257 displaced->step_copy,
1258 get_thread_regcache (displaced->step_ptid));
1259 }
1260 else
1261 {
1262 /* Since the instruction didn't complete, all we can do is
1263 relocate the PC. */
1264 struct regcache *regcache = get_thread_regcache (event_ptid);
1265 CORE_ADDR pc = regcache_read_pc (regcache);
1266
1267 pc = displaced->step_original + (pc - displaced->step_copy);
1268 regcache_write_pc (regcache, pc);
1269 }
1270
1271 do_cleanups (old_cleanups);
1272
1273 displaced->step_ptid = null_ptid;
1274
1275 /* Are there any pending displaced stepping requests? If so, run
1276 one now. Leave the state object around, since we're likely to
1277 need it again soon. */
1278 while (displaced->step_request_queue)
1279 {
1280 struct displaced_step_request *head;
1281 ptid_t ptid;
1282 struct regcache *regcache;
1283 struct gdbarch *gdbarch;
1284 CORE_ADDR actual_pc;
1285 struct address_space *aspace;
1286
1287 head = displaced->step_request_queue;
1288 ptid = head->ptid;
1289 displaced->step_request_queue = head->next;
1290 xfree (head);
1291
1292 context_switch (ptid);
1293
1294 regcache = get_thread_regcache (ptid);
1295 actual_pc = regcache_read_pc (regcache);
1296 aspace = get_regcache_aspace (regcache);
1297
1298 if (breakpoint_here_p (aspace, actual_pc))
1299 {
1300 if (debug_displaced)
1301 fprintf_unfiltered (gdb_stdlog,
1302 "displaced: stepping queued %s now\n",
1303 target_pid_to_str (ptid));
1304
1305 displaced_step_prepare (ptid);
1306
1307 gdbarch = get_regcache_arch (regcache);
1308
1309 if (debug_displaced)
1310 {
1311 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1312 gdb_byte buf[4];
1313
1314 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1315 paddress (gdbarch, actual_pc));
1316 read_memory (actual_pc, buf, sizeof (buf));
1317 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1318 }
1319
1320 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1321 displaced->step_closure))
1322 target_resume (ptid, 1, TARGET_SIGNAL_0);
1323 else
1324 target_resume (ptid, 0, TARGET_SIGNAL_0);
1325
1326 /* Done, we're stepping a thread. */
1327 break;
1328 }
1329 else
1330 {
1331 int step;
1332 struct thread_info *tp = inferior_thread ();
1333
1334 /* The breakpoint we were sitting under has since been
1335 removed. */
1336 tp->trap_expected = 0;
1337
1338 /* Go back to what we were trying to do. */
1339 step = currently_stepping (tp);
1340
1341 if (debug_displaced)
1342 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1343 target_pid_to_str (tp->ptid), step);
1344
1345 target_resume (ptid, step, TARGET_SIGNAL_0);
1346 tp->stop_signal = TARGET_SIGNAL_0;
1347
1348 /* This request was discarded. See if there's any other
1349 thread waiting for its turn. */
1350 }
1351 }
1352 }
1353
1354 /* Update global variables holding ptids to hold NEW_PTID if they were
1355 holding OLD_PTID. */
1356 static void
1357 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1358 {
1359 struct displaced_step_request *it;
1360 struct displaced_step_inferior_state *displaced;
1361
1362 if (ptid_equal (inferior_ptid, old_ptid))
1363 inferior_ptid = new_ptid;
1364
1365 if (ptid_equal (singlestep_ptid, old_ptid))
1366 singlestep_ptid = new_ptid;
1367
1368 if (ptid_equal (deferred_step_ptid, old_ptid))
1369 deferred_step_ptid = new_ptid;
1370
1371 for (displaced = displaced_step_inferior_states;
1372 displaced;
1373 displaced = displaced->next)
1374 {
1375 if (ptid_equal (displaced->step_ptid, old_ptid))
1376 displaced->step_ptid = new_ptid;
1377
1378 for (it = displaced->step_request_queue; it; it = it->next)
1379 if (ptid_equal (it->ptid, old_ptid))
1380 it->ptid = new_ptid;
1381 }
1382 }
1383
1384 \f
1385 /* Resuming. */
1386
1387 /* Things to clean up if we QUIT out of resume (). */
1388 static void
1389 resume_cleanups (void *ignore)
1390 {
1391 normal_stop ();
1392 }
1393
1394 static const char schedlock_off[] = "off";
1395 static const char schedlock_on[] = "on";
1396 static const char schedlock_step[] = "step";
1397 static const char *scheduler_enums[] = {
1398 schedlock_off,
1399 schedlock_on,
1400 schedlock_step,
1401 NULL
1402 };
1403 static const char *scheduler_mode = schedlock_off;
1404 static void
1405 show_scheduler_mode (struct ui_file *file, int from_tty,
1406 struct cmd_list_element *c, const char *value)
1407 {
1408 fprintf_filtered (file, _("\
1409 Mode for locking scheduler during execution is \"%s\".\n"),
1410 value);
1411 }
1412
1413 static void
1414 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1415 {
1416 if (!target_can_lock_scheduler)
1417 {
1418 scheduler_mode = schedlock_off;
1419 error (_("Target '%s' cannot support this command."), target_shortname);
1420 }
1421 }
1422
1423 /* reset_schedlock -- public */
1424
1425 void
1426 reset_schedlock ()
1427 {
1428 if (scheduler_mode == schedlock_on)
1429 {
1430 warning ("Resetting scheduler-lock mode to 'off'");
1431 scheduler_mode = schedlock_off;
1432 }
1433 }
1434
1435 /* True if execution commands resume all threads of all processes by
1436 default; otherwise, resume only threads of the current inferior
1437 process. */
1438 int sched_multi = 0;
1439
1440 /* Try to setup for software single stepping over the specified location.
1441 Return 1 if target_resume() should use hardware single step.
1442
1443 GDBARCH the current gdbarch.
1444 PC the location to step over. */
1445
1446 static int
1447 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1448 {
1449 int hw_step = 1;
1450
1451 if (gdbarch_software_single_step_p (gdbarch)
1452 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1453 {
1454 hw_step = 0;
1455 /* Do not pull these breakpoints until after a `wait' in
1456 `wait_for_inferior' */
1457 singlestep_breakpoints_inserted_p = 1;
1458 singlestep_ptid = inferior_ptid;
1459 singlestep_pc = pc;
1460 }
1461 return hw_step;
1462 }
1463
1464 /* Resume the inferior, but allow a QUIT. This is useful if the user
1465 wants to interrupt some lengthy single-stepping operation
1466 (for child processes, the SIGINT goes to the inferior, and so
1467 we get a SIGINT random_signal, but for remote debugging and perhaps
1468 other targets, that's not true).
1469
1470 STEP nonzero if we should step (zero to continue instead).
1471 SIG is the signal to give the inferior (zero for none). */
1472 void
1473 resume (int step, enum target_signal sig)
1474 {
1475 int should_resume = 1;
1476 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1477 struct regcache *regcache = get_current_regcache ();
1478 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1479 struct thread_info *tp = inferior_thread ();
1480 CORE_ADDR pc = regcache_read_pc (regcache);
1481 struct address_space *aspace = get_regcache_aspace (regcache);
1482
1483 QUIT;
1484
1485 if (debug_infrun)
1486 fprintf_unfiltered (gdb_stdlog,
1487 "infrun: resume (step=%d, signal=%d), "
1488 "trap_expected=%d\n",
1489 step, sig, tp->trap_expected);
1490
1491 /* Normally, by the time we reach `resume', the breakpoints are either
1492 removed or inserted, as appropriate. The exception is if we're sitting
1493 at a permanent breakpoint; we need to step over it, but permanent
1494 breakpoints can't be removed. So we have to test for it here. */
1495 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1496 {
1497 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1498 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1499 else
1500 error (_("\
1501 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1502 how to step past a permanent breakpoint on this architecture. Try using\n\
1503 a command like `return' or `jump' to continue execution."));
1504 }
1505
1506 /* If enabled, step over breakpoints by executing a copy of the
1507 instruction at a different address.
1508
1509 We can't use displaced stepping when we have a signal to deliver;
1510 the comments for displaced_step_prepare explain why. The
1511 comments in the handle_inferior event for dealing with 'random
1512 signals' explain what we do instead. */
1513 if (use_displaced_stepping (gdbarch)
1514 && (tp->trap_expected
1515 || (step && gdbarch_software_single_step_p (gdbarch)))
1516 && sig == TARGET_SIGNAL_0)
1517 {
1518 struct displaced_step_inferior_state *displaced;
1519
1520 if (!displaced_step_prepare (inferior_ptid))
1521 {
1522 /* Got placed in displaced stepping queue. Will be resumed
1523 later when all the currently queued displaced stepping
1524 requests finish. The thread is not executing at this point,
1525 and the call to set_executing will be made later. But we
1526 need to call set_running here, since from frontend point of view,
1527 the thread is running. */
1528 set_running (inferior_ptid, 1);
1529 discard_cleanups (old_cleanups);
1530 return;
1531 }
1532
1533 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1534 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1535 displaced->step_closure);
1536 }
1537
1538 /* Do we need to do it the hard way, w/temp breakpoints? */
1539 else if (step)
1540 step = maybe_software_singlestep (gdbarch, pc);
1541
1542 if (should_resume)
1543 {
1544 ptid_t resume_ptid;
1545
1546 /* If STEP is set, it's a request to use hardware stepping
1547 facilities. But in that case, we should never
1548 use singlestep breakpoint. */
1549 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1550
1551 /* Decide the set of threads to ask the target to resume. Start
1552 by assuming everything will be resumed, than narrow the set
1553 by applying increasingly restricting conditions. */
1554
1555 /* By default, resume all threads of all processes. */
1556 resume_ptid = RESUME_ALL;
1557
1558 /* Maybe resume only all threads of the current process. */
1559 if (!sched_multi && target_supports_multi_process ())
1560 {
1561 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1562 }
1563
1564 /* Maybe resume a single thread after all. */
1565 if (singlestep_breakpoints_inserted_p
1566 && stepping_past_singlestep_breakpoint)
1567 {
1568 /* The situation here is as follows. In thread T1 we wanted to
1569 single-step. Lacking hardware single-stepping we've
1570 set breakpoint at the PC of the next instruction -- call it
1571 P. After resuming, we've hit that breakpoint in thread T2.
1572 Now we've removed original breakpoint, inserted breakpoint
1573 at P+1, and try to step to advance T2 past breakpoint.
1574 We need to step only T2, as if T1 is allowed to freely run,
1575 it can run past P, and if other threads are allowed to run,
1576 they can hit breakpoint at P+1, and nested hits of single-step
1577 breakpoints is not something we'd want -- that's complicated
1578 to support, and has no value. */
1579 resume_ptid = inferior_ptid;
1580 }
1581 else if ((step || singlestep_breakpoints_inserted_p)
1582 && tp->trap_expected)
1583 {
1584 /* We're allowing a thread to run past a breakpoint it has
1585 hit, by single-stepping the thread with the breakpoint
1586 removed. In which case, we need to single-step only this
1587 thread, and keep others stopped, as they can miss this
1588 breakpoint if allowed to run.
1589
1590 The current code actually removes all breakpoints when
1591 doing this, not just the one being stepped over, so if we
1592 let other threads run, we can actually miss any
1593 breakpoint, not just the one at PC. */
1594 resume_ptid = inferior_ptid;
1595 }
1596 else if (non_stop)
1597 {
1598 /* With non-stop mode on, threads are always handled
1599 individually. */
1600 resume_ptid = inferior_ptid;
1601 }
1602 else if ((scheduler_mode == schedlock_on)
1603 || (scheduler_mode == schedlock_step
1604 && (step || singlestep_breakpoints_inserted_p)))
1605 {
1606 /* User-settable 'scheduler' mode requires solo thread resume. */
1607 resume_ptid = inferior_ptid;
1608 }
1609
1610 if (gdbarch_cannot_step_breakpoint (gdbarch))
1611 {
1612 /* Most targets can step a breakpoint instruction, thus
1613 executing it normally. But if this one cannot, just
1614 continue and we will hit it anyway. */
1615 if (step && breakpoint_inserted_here_p (aspace, pc))
1616 step = 0;
1617 }
1618
1619 if (debug_displaced
1620 && use_displaced_stepping (gdbarch)
1621 && tp->trap_expected)
1622 {
1623 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1624 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1625 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1626 gdb_byte buf[4];
1627
1628 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1629 paddress (resume_gdbarch, actual_pc));
1630 read_memory (actual_pc, buf, sizeof (buf));
1631 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1632 }
1633
1634 /* Install inferior's terminal modes. */
1635 target_terminal_inferior ();
1636
1637 /* Avoid confusing the next resume, if the next stop/resume
1638 happens to apply to another thread. */
1639 tp->stop_signal = TARGET_SIGNAL_0;
1640
1641 target_resume (resume_ptid, step, sig);
1642 }
1643
1644 discard_cleanups (old_cleanups);
1645 }
1646 \f
1647 /* Proceeding. */
1648
1649 /* Clear out all variables saying what to do when inferior is continued.
1650 First do this, then set the ones you want, then call `proceed'. */
1651
1652 static void
1653 clear_proceed_status_thread (struct thread_info *tp)
1654 {
1655 if (debug_infrun)
1656 fprintf_unfiltered (gdb_stdlog,
1657 "infrun: clear_proceed_status_thread (%s)\n",
1658 target_pid_to_str (tp->ptid));
1659
1660 tp->trap_expected = 0;
1661 tp->step_range_start = 0;
1662 tp->step_range_end = 0;
1663 tp->step_frame_id = null_frame_id;
1664 tp->step_stack_frame_id = null_frame_id;
1665 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1666 tp->stop_requested = 0;
1667
1668 tp->stop_step = 0;
1669
1670 tp->proceed_to_finish = 0;
1671
1672 /* Discard any remaining commands or status from previous stop. */
1673 bpstat_clear (&tp->stop_bpstat);
1674 }
1675
1676 static int
1677 clear_proceed_status_callback (struct thread_info *tp, void *data)
1678 {
1679 if (is_exited (tp->ptid))
1680 return 0;
1681
1682 clear_proceed_status_thread (tp);
1683 return 0;
1684 }
1685
1686 void
1687 clear_proceed_status (void)
1688 {
1689 if (!non_stop)
1690 {
1691 /* In all-stop mode, delete the per-thread status of all
1692 threads, even if inferior_ptid is null_ptid, there may be
1693 threads on the list. E.g., we may be launching a new
1694 process, while selecting the executable. */
1695 iterate_over_threads (clear_proceed_status_callback, NULL);
1696 }
1697
1698 if (!ptid_equal (inferior_ptid, null_ptid))
1699 {
1700 struct inferior *inferior;
1701
1702 if (non_stop)
1703 {
1704 /* If in non-stop mode, only delete the per-thread status of
1705 the current thread. */
1706 clear_proceed_status_thread (inferior_thread ());
1707 }
1708
1709 inferior = current_inferior ();
1710 inferior->stop_soon = NO_STOP_QUIETLY;
1711 }
1712
1713 stop_after_trap = 0;
1714
1715 observer_notify_about_to_proceed ();
1716
1717 if (stop_registers)
1718 {
1719 regcache_xfree (stop_registers);
1720 stop_registers = NULL;
1721 }
1722 }
1723
1724 /* Check the current thread against the thread that reported the most recent
1725 event. If a step-over is required return TRUE and set the current thread
1726 to the old thread. Otherwise return FALSE.
1727
1728 This should be suitable for any targets that support threads. */
1729
1730 static int
1731 prepare_to_proceed (int step)
1732 {
1733 ptid_t wait_ptid;
1734 struct target_waitstatus wait_status;
1735 int schedlock_enabled;
1736
1737 /* With non-stop mode on, threads are always handled individually. */
1738 gdb_assert (! non_stop);
1739
1740 /* Get the last target status returned by target_wait(). */
1741 get_last_target_status (&wait_ptid, &wait_status);
1742
1743 /* Make sure we were stopped at a breakpoint. */
1744 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1745 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1746 && wait_status.value.sig != TARGET_SIGNAL_ILL
1747 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1748 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1749 {
1750 return 0;
1751 }
1752
1753 schedlock_enabled = (scheduler_mode == schedlock_on
1754 || (scheduler_mode == schedlock_step
1755 && step));
1756
1757 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1758 if (schedlock_enabled)
1759 return 0;
1760
1761 /* Don't switch over if we're about to resume some other process
1762 other than WAIT_PTID's, and schedule-multiple is off. */
1763 if (!sched_multi
1764 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1765 return 0;
1766
1767 /* Switched over from WAIT_PID. */
1768 if (!ptid_equal (wait_ptid, minus_one_ptid)
1769 && !ptid_equal (inferior_ptid, wait_ptid))
1770 {
1771 struct regcache *regcache = get_thread_regcache (wait_ptid);
1772
1773 if (breakpoint_here_p (get_regcache_aspace (regcache),
1774 regcache_read_pc (regcache)))
1775 {
1776 /* If stepping, remember current thread to switch back to. */
1777 if (step)
1778 deferred_step_ptid = inferior_ptid;
1779
1780 /* Switch back to WAIT_PID thread. */
1781 switch_to_thread (wait_ptid);
1782
1783 /* We return 1 to indicate that there is a breakpoint here,
1784 so we need to step over it before continuing to avoid
1785 hitting it straight away. */
1786 return 1;
1787 }
1788 }
1789
1790 return 0;
1791 }
1792
1793 /* Basic routine for continuing the program in various fashions.
1794
1795 ADDR is the address to resume at, or -1 for resume where stopped.
1796 SIGGNAL is the signal to give it, or 0 for none,
1797 or -1 for act according to how it stopped.
1798 STEP is nonzero if should trap after one instruction.
1799 -1 means return after that and print nothing.
1800 You should probably set various step_... variables
1801 before calling here, if you are stepping.
1802
1803 You should call clear_proceed_status before calling proceed. */
1804
1805 void
1806 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1807 {
1808 struct regcache *regcache;
1809 struct gdbarch *gdbarch;
1810 struct thread_info *tp;
1811 CORE_ADDR pc;
1812 struct address_space *aspace;
1813 int oneproc = 0;
1814
1815 /* If we're stopped at a fork/vfork, follow the branch set by the
1816 "set follow-fork-mode" command; otherwise, we'll just proceed
1817 resuming the current thread. */
1818 if (!follow_fork ())
1819 {
1820 /* The target for some reason decided not to resume. */
1821 normal_stop ();
1822 return;
1823 }
1824
1825 regcache = get_current_regcache ();
1826 gdbarch = get_regcache_arch (regcache);
1827 aspace = get_regcache_aspace (regcache);
1828 pc = regcache_read_pc (regcache);
1829
1830 if (step > 0)
1831 step_start_function = find_pc_function (pc);
1832 if (step < 0)
1833 stop_after_trap = 1;
1834
1835 if (addr == (CORE_ADDR) -1)
1836 {
1837 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1838 && execution_direction != EXEC_REVERSE)
1839 /* There is a breakpoint at the address we will resume at,
1840 step one instruction before inserting breakpoints so that
1841 we do not stop right away (and report a second hit at this
1842 breakpoint).
1843
1844 Note, we don't do this in reverse, because we won't
1845 actually be executing the breakpoint insn anyway.
1846 We'll be (un-)executing the previous instruction. */
1847
1848 oneproc = 1;
1849 else if (gdbarch_single_step_through_delay_p (gdbarch)
1850 && gdbarch_single_step_through_delay (gdbarch,
1851 get_current_frame ()))
1852 /* We stepped onto an instruction that needs to be stepped
1853 again before re-inserting the breakpoint, do so. */
1854 oneproc = 1;
1855 }
1856 else
1857 {
1858 regcache_write_pc (regcache, addr);
1859 }
1860
1861 if (debug_infrun)
1862 fprintf_unfiltered (gdb_stdlog,
1863 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1864 paddress (gdbarch, addr), siggnal, step);
1865
1866 /* We're handling a live event, so make sure we're doing live
1867 debugging. If we're looking at traceframes while the target is
1868 running, we're going to need to get back to that mode after
1869 handling the event. */
1870 if (non_stop)
1871 {
1872 make_cleanup_restore_current_traceframe ();
1873 set_traceframe_number (-1);
1874 }
1875
1876 if (non_stop)
1877 /* In non-stop, each thread is handled individually. The context
1878 must already be set to the right thread here. */
1879 ;
1880 else
1881 {
1882 /* In a multi-threaded task we may select another thread and
1883 then continue or step.
1884
1885 But if the old thread was stopped at a breakpoint, it will
1886 immediately cause another breakpoint stop without any
1887 execution (i.e. it will report a breakpoint hit incorrectly).
1888 So we must step over it first.
1889
1890 prepare_to_proceed checks the current thread against the
1891 thread that reported the most recent event. If a step-over
1892 is required it returns TRUE and sets the current thread to
1893 the old thread. */
1894 if (prepare_to_proceed (step))
1895 oneproc = 1;
1896 }
1897
1898 /* prepare_to_proceed may change the current thread. */
1899 tp = inferior_thread ();
1900
1901 if (oneproc)
1902 {
1903 tp->trap_expected = 1;
1904 /* If displaced stepping is enabled, we can step over the
1905 breakpoint without hitting it, so leave all breakpoints
1906 inserted. Otherwise we need to disable all breakpoints, step
1907 one instruction, and then re-add them when that step is
1908 finished. */
1909 if (!use_displaced_stepping (gdbarch))
1910 remove_breakpoints ();
1911 }
1912
1913 /* We can insert breakpoints if we're not trying to step over one,
1914 or if we are stepping over one but we're using displaced stepping
1915 to do so. */
1916 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1917 insert_breakpoints ();
1918
1919 if (!non_stop)
1920 {
1921 /* Pass the last stop signal to the thread we're resuming,
1922 irrespective of whether the current thread is the thread that
1923 got the last event or not. This was historically GDB's
1924 behaviour before keeping a stop_signal per thread. */
1925
1926 struct thread_info *last_thread;
1927 ptid_t last_ptid;
1928 struct target_waitstatus last_status;
1929
1930 get_last_target_status (&last_ptid, &last_status);
1931 if (!ptid_equal (inferior_ptid, last_ptid)
1932 && !ptid_equal (last_ptid, null_ptid)
1933 && !ptid_equal (last_ptid, minus_one_ptid))
1934 {
1935 last_thread = find_thread_ptid (last_ptid);
1936 if (last_thread)
1937 {
1938 tp->stop_signal = last_thread->stop_signal;
1939 last_thread->stop_signal = TARGET_SIGNAL_0;
1940 }
1941 }
1942 }
1943
1944 if (siggnal != TARGET_SIGNAL_DEFAULT)
1945 tp->stop_signal = siggnal;
1946 /* If this signal should not be seen by program,
1947 give it zero. Used for debugging signals. */
1948 else if (!signal_program[tp->stop_signal])
1949 tp->stop_signal = TARGET_SIGNAL_0;
1950
1951 annotate_starting ();
1952
1953 /* Make sure that output from GDB appears before output from the
1954 inferior. */
1955 gdb_flush (gdb_stdout);
1956
1957 /* Refresh prev_pc value just prior to resuming. This used to be
1958 done in stop_stepping, however, setting prev_pc there did not handle
1959 scenarios such as inferior function calls or returning from
1960 a function via the return command. In those cases, the prev_pc
1961 value was not set properly for subsequent commands. The prev_pc value
1962 is used to initialize the starting line number in the ecs. With an
1963 invalid value, the gdb next command ends up stopping at the position
1964 represented by the next line table entry past our start position.
1965 On platforms that generate one line table entry per line, this
1966 is not a problem. However, on the ia64, the compiler generates
1967 extraneous line table entries that do not increase the line number.
1968 When we issue the gdb next command on the ia64 after an inferior call
1969 or a return command, we often end up a few instructions forward, still
1970 within the original line we started.
1971
1972 An attempt was made to refresh the prev_pc at the same time the
1973 execution_control_state is initialized (for instance, just before
1974 waiting for an inferior event). But this approach did not work
1975 because of platforms that use ptrace, where the pc register cannot
1976 be read unless the inferior is stopped. At that point, we are not
1977 guaranteed the inferior is stopped and so the regcache_read_pc() call
1978 can fail. Setting the prev_pc value here ensures the value is updated
1979 correctly when the inferior is stopped. */
1980 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1981
1982 /* Fill in with reasonable starting values. */
1983 init_thread_stepping_state (tp);
1984
1985 /* Reset to normal state. */
1986 init_infwait_state ();
1987
1988 /* Resume inferior. */
1989 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1990
1991 /* Wait for it to stop (if not standalone)
1992 and in any case decode why it stopped, and act accordingly. */
1993 /* Do this only if we are not using the event loop, or if the target
1994 does not support asynchronous execution. */
1995 if (!target_can_async_p ())
1996 {
1997 wait_for_inferior (0);
1998 normal_stop ();
1999 }
2000 }
2001 \f
2002
2003 /* Start remote-debugging of a machine over a serial link. */
2004
2005 void
2006 start_remote (int from_tty)
2007 {
2008 struct inferior *inferior;
2009
2010 init_wait_for_inferior ();
2011 inferior = current_inferior ();
2012 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2013
2014 /* Always go on waiting for the target, regardless of the mode. */
2015 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2016 indicate to wait_for_inferior that a target should timeout if
2017 nothing is returned (instead of just blocking). Because of this,
2018 targets expecting an immediate response need to, internally, set
2019 things up so that the target_wait() is forced to eventually
2020 timeout. */
2021 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2022 differentiate to its caller what the state of the target is after
2023 the initial open has been performed. Here we're assuming that
2024 the target has stopped. It should be possible to eventually have
2025 target_open() return to the caller an indication that the target
2026 is currently running and GDB state should be set to the same as
2027 for an async run. */
2028 wait_for_inferior (0);
2029
2030 /* Now that the inferior has stopped, do any bookkeeping like
2031 loading shared libraries. We want to do this before normal_stop,
2032 so that the displayed frame is up to date. */
2033 post_create_inferior (&current_target, from_tty);
2034
2035 normal_stop ();
2036 }
2037
2038 /* Initialize static vars when a new inferior begins. */
2039
2040 void
2041 init_wait_for_inferior (void)
2042 {
2043 /* These are meaningless until the first time through wait_for_inferior. */
2044
2045 breakpoint_init_inferior (inf_starting);
2046
2047 clear_proceed_status ();
2048
2049 stepping_past_singlestep_breakpoint = 0;
2050 deferred_step_ptid = null_ptid;
2051
2052 target_last_wait_ptid = minus_one_ptid;
2053
2054 previous_inferior_ptid = null_ptid;
2055 init_infwait_state ();
2056
2057 /* Discard any skipped inlined frames. */
2058 clear_inline_frame_state (minus_one_ptid);
2059 }
2060
2061 \f
2062 /* This enum encodes possible reasons for doing a target_wait, so that
2063 wfi can call target_wait in one place. (Ultimately the call will be
2064 moved out of the infinite loop entirely.) */
2065
2066 enum infwait_states
2067 {
2068 infwait_normal_state,
2069 infwait_thread_hop_state,
2070 infwait_step_watch_state,
2071 infwait_nonstep_watch_state
2072 };
2073
2074 /* Why did the inferior stop? Used to print the appropriate messages
2075 to the interface from within handle_inferior_event(). */
2076 enum inferior_stop_reason
2077 {
2078 /* Step, next, nexti, stepi finished. */
2079 END_STEPPING_RANGE,
2080 /* Inferior terminated by signal. */
2081 SIGNAL_EXITED,
2082 /* Inferior exited. */
2083 EXITED,
2084 /* Inferior received signal, and user asked to be notified. */
2085 SIGNAL_RECEIVED,
2086 /* Reverse execution -- target ran out of history info. */
2087 NO_HISTORY
2088 };
2089
2090 /* The PTID we'll do a target_wait on.*/
2091 ptid_t waiton_ptid;
2092
2093 /* Current inferior wait state. */
2094 enum infwait_states infwait_state;
2095
2096 /* Data to be passed around while handling an event. This data is
2097 discarded between events. */
2098 struct execution_control_state
2099 {
2100 ptid_t ptid;
2101 /* The thread that got the event, if this was a thread event; NULL
2102 otherwise. */
2103 struct thread_info *event_thread;
2104
2105 struct target_waitstatus ws;
2106 int random_signal;
2107 CORE_ADDR stop_func_start;
2108 CORE_ADDR stop_func_end;
2109 char *stop_func_name;
2110 int new_thread_event;
2111 int wait_some_more;
2112 };
2113
2114 static void handle_inferior_event (struct execution_control_state *ecs);
2115
2116 static void handle_step_into_function (struct gdbarch *gdbarch,
2117 struct execution_control_state *ecs);
2118 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2119 struct execution_control_state *ecs);
2120 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2121 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2122 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2123 struct symtab_and_line sr_sal,
2124 struct frame_id sr_id);
2125 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2126
2127 static void stop_stepping (struct execution_control_state *ecs);
2128 static void prepare_to_wait (struct execution_control_state *ecs);
2129 static void keep_going (struct execution_control_state *ecs);
2130 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2131 int stop_info);
2132
2133 /* Callback for iterate over threads. If the thread is stopped, but
2134 the user/frontend doesn't know about that yet, go through
2135 normal_stop, as if the thread had just stopped now. ARG points at
2136 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2137 ptid_is_pid(PTID) is true, applies to all threads of the process
2138 pointed at by PTID. Otherwise, apply only to the thread pointed by
2139 PTID. */
2140
2141 static int
2142 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2143 {
2144 ptid_t ptid = * (ptid_t *) arg;
2145
2146 if ((ptid_equal (info->ptid, ptid)
2147 || ptid_equal (minus_one_ptid, ptid)
2148 || (ptid_is_pid (ptid)
2149 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2150 && is_running (info->ptid)
2151 && !is_executing (info->ptid))
2152 {
2153 struct cleanup *old_chain;
2154 struct execution_control_state ecss;
2155 struct execution_control_state *ecs = &ecss;
2156
2157 memset (ecs, 0, sizeof (*ecs));
2158
2159 old_chain = make_cleanup_restore_current_thread ();
2160
2161 switch_to_thread (info->ptid);
2162
2163 /* Go through handle_inferior_event/normal_stop, so we always
2164 have consistent output as if the stop event had been
2165 reported. */
2166 ecs->ptid = info->ptid;
2167 ecs->event_thread = find_thread_ptid (info->ptid);
2168 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2169 ecs->ws.value.sig = TARGET_SIGNAL_0;
2170
2171 handle_inferior_event (ecs);
2172
2173 if (!ecs->wait_some_more)
2174 {
2175 struct thread_info *tp;
2176
2177 normal_stop ();
2178
2179 /* Finish off the continuations. The continations
2180 themselves are responsible for realising the thread
2181 didn't finish what it was supposed to do. */
2182 tp = inferior_thread ();
2183 do_all_intermediate_continuations_thread (tp);
2184 do_all_continuations_thread (tp);
2185 }
2186
2187 do_cleanups (old_chain);
2188 }
2189
2190 return 0;
2191 }
2192
2193 /* This function is attached as a "thread_stop_requested" observer.
2194 Cleanup local state that assumed the PTID was to be resumed, and
2195 report the stop to the frontend. */
2196
2197 static void
2198 infrun_thread_stop_requested (ptid_t ptid)
2199 {
2200 struct displaced_step_inferior_state *displaced;
2201
2202 /* PTID was requested to stop. Remove it from the displaced
2203 stepping queue, so we don't try to resume it automatically. */
2204
2205 for (displaced = displaced_step_inferior_states;
2206 displaced;
2207 displaced = displaced->next)
2208 {
2209 struct displaced_step_request *it, **prev_next_p;
2210
2211 it = displaced->step_request_queue;
2212 prev_next_p = &displaced->step_request_queue;
2213 while (it)
2214 {
2215 if (ptid_match (it->ptid, ptid))
2216 {
2217 *prev_next_p = it->next;
2218 it->next = NULL;
2219 xfree (it);
2220 }
2221 else
2222 {
2223 prev_next_p = &it->next;
2224 }
2225
2226 it = *prev_next_p;
2227 }
2228 }
2229
2230 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2231 }
2232
2233 static void
2234 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2235 {
2236 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2237 nullify_last_target_wait_ptid ();
2238 }
2239
2240 /* Callback for iterate_over_threads. */
2241
2242 static int
2243 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2244 {
2245 if (is_exited (info->ptid))
2246 return 0;
2247
2248 delete_step_resume_breakpoint (info);
2249 return 0;
2250 }
2251
2252 /* In all-stop, delete the step resume breakpoint of any thread that
2253 had one. In non-stop, delete the step resume breakpoint of the
2254 thread that just stopped. */
2255
2256 static void
2257 delete_step_thread_step_resume_breakpoint (void)
2258 {
2259 if (!target_has_execution
2260 || ptid_equal (inferior_ptid, null_ptid))
2261 /* If the inferior has exited, we have already deleted the step
2262 resume breakpoints out of GDB's lists. */
2263 return;
2264
2265 if (non_stop)
2266 {
2267 /* If in non-stop mode, only delete the step-resume or
2268 longjmp-resume breakpoint of the thread that just stopped
2269 stepping. */
2270 struct thread_info *tp = inferior_thread ();
2271
2272 delete_step_resume_breakpoint (tp);
2273 }
2274 else
2275 /* In all-stop mode, delete all step-resume and longjmp-resume
2276 breakpoints of any thread that had them. */
2277 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2278 }
2279
2280 /* A cleanup wrapper. */
2281
2282 static void
2283 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2284 {
2285 delete_step_thread_step_resume_breakpoint ();
2286 }
2287
2288 /* Pretty print the results of target_wait, for debugging purposes. */
2289
2290 static void
2291 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2292 const struct target_waitstatus *ws)
2293 {
2294 char *status_string = target_waitstatus_to_string (ws);
2295 struct ui_file *tmp_stream = mem_fileopen ();
2296 char *text;
2297
2298 /* The text is split over several lines because it was getting too long.
2299 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2300 output as a unit; we want only one timestamp printed if debug_timestamp
2301 is set. */
2302
2303 fprintf_unfiltered (tmp_stream,
2304 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2305 if (PIDGET (waiton_ptid) != -1)
2306 fprintf_unfiltered (tmp_stream,
2307 " [%s]", target_pid_to_str (waiton_ptid));
2308 fprintf_unfiltered (tmp_stream, ", status) =\n");
2309 fprintf_unfiltered (tmp_stream,
2310 "infrun: %d [%s],\n",
2311 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2312 fprintf_unfiltered (tmp_stream,
2313 "infrun: %s\n",
2314 status_string);
2315
2316 text = ui_file_xstrdup (tmp_stream, NULL);
2317
2318 /* This uses %s in part to handle %'s in the text, but also to avoid
2319 a gcc error: the format attribute requires a string literal. */
2320 fprintf_unfiltered (gdb_stdlog, "%s", text);
2321
2322 xfree (status_string);
2323 xfree (text);
2324 ui_file_delete (tmp_stream);
2325 }
2326
2327 /* Prepare and stabilize the inferior for detaching it. E.g.,
2328 detaching while a thread is displaced stepping is a recipe for
2329 crashing it, as nothing would readjust the PC out of the scratch
2330 pad. */
2331
2332 void
2333 prepare_for_detach (void)
2334 {
2335 struct inferior *inf = current_inferior ();
2336 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2337 struct cleanup *old_chain_1;
2338 struct displaced_step_inferior_state *displaced;
2339
2340 displaced = get_displaced_stepping_state (inf->pid);
2341
2342 /* Is any thread of this process displaced stepping? If not,
2343 there's nothing else to do. */
2344 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2345 return;
2346
2347 if (debug_infrun)
2348 fprintf_unfiltered (gdb_stdlog,
2349 "displaced-stepping in-process while detaching");
2350
2351 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2352 inf->detaching = 1;
2353
2354 while (!ptid_equal (displaced->step_ptid, null_ptid))
2355 {
2356 struct cleanup *old_chain_2;
2357 struct execution_control_state ecss;
2358 struct execution_control_state *ecs;
2359
2360 ecs = &ecss;
2361 memset (ecs, 0, sizeof (*ecs));
2362
2363 overlay_cache_invalid = 1;
2364
2365 /* We have to invalidate the registers BEFORE calling
2366 target_wait because they can be loaded from the target while
2367 in target_wait. This makes remote debugging a bit more
2368 efficient for those targets that provide critical registers
2369 as part of their normal status mechanism. */
2370
2371 registers_changed ();
2372
2373 if (deprecated_target_wait_hook)
2374 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2375 else
2376 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2377
2378 if (debug_infrun)
2379 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2380
2381 /* If an error happens while handling the event, propagate GDB's
2382 knowledge of the executing state to the frontend/user running
2383 state. */
2384 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2385
2386 /* In non-stop mode, each thread is handled individually.
2387 Switch early, so the global state is set correctly for this
2388 thread. */
2389 if (non_stop
2390 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2391 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2392 context_switch (ecs->ptid);
2393
2394 /* Now figure out what to do with the result of the result. */
2395 handle_inferior_event (ecs);
2396
2397 /* No error, don't finish the state yet. */
2398 discard_cleanups (old_chain_2);
2399
2400 /* Breakpoints and watchpoints are not installed on the target
2401 at this point, and signals are passed directly to the
2402 inferior, so this must mean the process is gone. */
2403 if (!ecs->wait_some_more)
2404 {
2405 discard_cleanups (old_chain_1);
2406 error (_("Program exited while detaching"));
2407 }
2408 }
2409
2410 discard_cleanups (old_chain_1);
2411 }
2412
2413 /* Wait for control to return from inferior to debugger.
2414
2415 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2416 as if they were SIGTRAP signals. This can be useful during
2417 the startup sequence on some targets such as HP/UX, where
2418 we receive an EXEC event instead of the expected SIGTRAP.
2419
2420 If inferior gets a signal, we may decide to start it up again
2421 instead of returning. That is why there is a loop in this function.
2422 When this function actually returns it means the inferior
2423 should be left stopped and GDB should read more commands. */
2424
2425 void
2426 wait_for_inferior (int treat_exec_as_sigtrap)
2427 {
2428 struct cleanup *old_cleanups;
2429 struct execution_control_state ecss;
2430 struct execution_control_state *ecs;
2431
2432 if (debug_infrun)
2433 fprintf_unfiltered
2434 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2435 treat_exec_as_sigtrap);
2436
2437 old_cleanups =
2438 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2439
2440 ecs = &ecss;
2441 memset (ecs, 0, sizeof (*ecs));
2442
2443 /* We'll update this if & when we switch to a new thread. */
2444 previous_inferior_ptid = inferior_ptid;
2445
2446 while (1)
2447 {
2448 struct cleanup *old_chain;
2449
2450 /* We have to invalidate the registers BEFORE calling target_wait
2451 because they can be loaded from the target while in target_wait.
2452 This makes remote debugging a bit more efficient for those
2453 targets that provide critical registers as part of their normal
2454 status mechanism. */
2455
2456 overlay_cache_invalid = 1;
2457 registers_changed ();
2458
2459 if (deprecated_target_wait_hook)
2460 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2461 else
2462 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2463
2464 if (debug_infrun)
2465 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2466
2467 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2468 {
2469 xfree (ecs->ws.value.execd_pathname);
2470 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2471 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2472 }
2473
2474 /* If an error happens while handling the event, propagate GDB's
2475 knowledge of the executing state to the frontend/user running
2476 state. */
2477 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2478
2479 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2480 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2481 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2482
2483 /* Now figure out what to do with the result of the result. */
2484 handle_inferior_event (ecs);
2485
2486 /* No error, don't finish the state yet. */
2487 discard_cleanups (old_chain);
2488
2489 if (!ecs->wait_some_more)
2490 break;
2491 }
2492
2493 do_cleanups (old_cleanups);
2494 }
2495
2496 /* Asynchronous version of wait_for_inferior. It is called by the
2497 event loop whenever a change of state is detected on the file
2498 descriptor corresponding to the target. It can be called more than
2499 once to complete a single execution command. In such cases we need
2500 to keep the state in a global variable ECSS. If it is the last time
2501 that this function is called for a single execution command, then
2502 report to the user that the inferior has stopped, and do the
2503 necessary cleanups. */
2504
2505 void
2506 fetch_inferior_event (void *client_data)
2507 {
2508 struct execution_control_state ecss;
2509 struct execution_control_state *ecs = &ecss;
2510 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2511 struct cleanup *ts_old_chain;
2512 int was_sync = sync_execution;
2513
2514 memset (ecs, 0, sizeof (*ecs));
2515
2516 /* We'll update this if & when we switch to a new thread. */
2517 previous_inferior_ptid = inferior_ptid;
2518
2519 if (non_stop)
2520 /* In non-stop mode, the user/frontend should not notice a thread
2521 switch due to internal events. Make sure we reverse to the
2522 user selected thread and frame after handling the event and
2523 running any breakpoint commands. */
2524 make_cleanup_restore_current_thread ();
2525
2526 /* We have to invalidate the registers BEFORE calling target_wait
2527 because they can be loaded from the target while in target_wait.
2528 This makes remote debugging a bit more efficient for those
2529 targets that provide critical registers as part of their normal
2530 status mechanism. */
2531
2532 overlay_cache_invalid = 1;
2533 registers_changed ();
2534
2535 if (deprecated_target_wait_hook)
2536 ecs->ptid =
2537 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2538 else
2539 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2540
2541 if (debug_infrun)
2542 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2543
2544 if (non_stop
2545 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2546 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2547 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2548 /* In non-stop mode, each thread is handled individually. Switch
2549 early, so the global state is set correctly for this
2550 thread. */
2551 context_switch (ecs->ptid);
2552
2553 /* If an error happens while handling the event, propagate GDB's
2554 knowledge of the executing state to the frontend/user running
2555 state. */
2556 if (!non_stop)
2557 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2558 else
2559 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2560
2561 /* Now figure out what to do with the result of the result. */
2562 handle_inferior_event (ecs);
2563
2564 if (!ecs->wait_some_more)
2565 {
2566 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2567
2568 delete_step_thread_step_resume_breakpoint ();
2569
2570 /* We may not find an inferior if this was a process exit. */
2571 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2572 normal_stop ();
2573
2574 if (target_has_execution
2575 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2576 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2577 && ecs->event_thread->step_multi
2578 && ecs->event_thread->stop_step)
2579 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2580 else
2581 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2582 }
2583
2584 /* No error, don't finish the thread states yet. */
2585 discard_cleanups (ts_old_chain);
2586
2587 /* Revert thread and frame. */
2588 do_cleanups (old_chain);
2589
2590 /* If the inferior was in sync execution mode, and now isn't,
2591 restore the prompt. */
2592 if (was_sync && !sync_execution)
2593 display_gdb_prompt (0);
2594 }
2595
2596 /* Record the frame and location we're currently stepping through. */
2597 void
2598 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2599 {
2600 struct thread_info *tp = inferior_thread ();
2601
2602 tp->step_frame_id = get_frame_id (frame);
2603 tp->step_stack_frame_id = get_stack_frame_id (frame);
2604
2605 tp->current_symtab = sal.symtab;
2606 tp->current_line = sal.line;
2607 }
2608
2609 /* Clear context switchable stepping state. */
2610
2611 void
2612 init_thread_stepping_state (struct thread_info *tss)
2613 {
2614 tss->stepping_over_breakpoint = 0;
2615 tss->step_after_step_resume_breakpoint = 0;
2616 tss->stepping_through_solib_after_catch = 0;
2617 tss->stepping_through_solib_catchpoints = NULL;
2618 }
2619
2620 /* Return the cached copy of the last pid/waitstatus returned by
2621 target_wait()/deprecated_target_wait_hook(). The data is actually
2622 cached by handle_inferior_event(), which gets called immediately
2623 after target_wait()/deprecated_target_wait_hook(). */
2624
2625 void
2626 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2627 {
2628 *ptidp = target_last_wait_ptid;
2629 *status = target_last_waitstatus;
2630 }
2631
2632 void
2633 nullify_last_target_wait_ptid (void)
2634 {
2635 target_last_wait_ptid = minus_one_ptid;
2636 }
2637
2638 /* Switch thread contexts. */
2639
2640 static void
2641 context_switch (ptid_t ptid)
2642 {
2643 if (debug_infrun)
2644 {
2645 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2646 target_pid_to_str (inferior_ptid));
2647 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2648 target_pid_to_str (ptid));
2649 }
2650
2651 switch_to_thread (ptid);
2652 }
2653
2654 static void
2655 adjust_pc_after_break (struct execution_control_state *ecs)
2656 {
2657 struct regcache *regcache;
2658 struct gdbarch *gdbarch;
2659 struct address_space *aspace;
2660 CORE_ADDR breakpoint_pc;
2661
2662 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2663 we aren't, just return.
2664
2665 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2666 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2667 implemented by software breakpoints should be handled through the normal
2668 breakpoint layer.
2669
2670 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2671 different signals (SIGILL or SIGEMT for instance), but it is less
2672 clear where the PC is pointing afterwards. It may not match
2673 gdbarch_decr_pc_after_break. I don't know any specific target that
2674 generates these signals at breakpoints (the code has been in GDB since at
2675 least 1992) so I can not guess how to handle them here.
2676
2677 In earlier versions of GDB, a target with
2678 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2679 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2680 target with both of these set in GDB history, and it seems unlikely to be
2681 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2682
2683 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2684 return;
2685
2686 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2687 return;
2688
2689 /* In reverse execution, when a breakpoint is hit, the instruction
2690 under it has already been de-executed. The reported PC always
2691 points at the breakpoint address, so adjusting it further would
2692 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2693 architecture:
2694
2695 B1 0x08000000 : INSN1
2696 B2 0x08000001 : INSN2
2697 0x08000002 : INSN3
2698 PC -> 0x08000003 : INSN4
2699
2700 Say you're stopped at 0x08000003 as above. Reverse continuing
2701 from that point should hit B2 as below. Reading the PC when the
2702 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2703 been de-executed already.
2704
2705 B1 0x08000000 : INSN1
2706 B2 PC -> 0x08000001 : INSN2
2707 0x08000002 : INSN3
2708 0x08000003 : INSN4
2709
2710 We can't apply the same logic as for forward execution, because
2711 we would wrongly adjust the PC to 0x08000000, since there's a
2712 breakpoint at PC - 1. We'd then report a hit on B1, although
2713 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2714 behaviour. */
2715 if (execution_direction == EXEC_REVERSE)
2716 return;
2717
2718 /* If this target does not decrement the PC after breakpoints, then
2719 we have nothing to do. */
2720 regcache = get_thread_regcache (ecs->ptid);
2721 gdbarch = get_regcache_arch (regcache);
2722 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2723 return;
2724
2725 aspace = get_regcache_aspace (regcache);
2726
2727 /* Find the location where (if we've hit a breakpoint) the
2728 breakpoint would be. */
2729 breakpoint_pc = regcache_read_pc (regcache)
2730 - gdbarch_decr_pc_after_break (gdbarch);
2731
2732 /* Check whether there actually is a software breakpoint inserted at
2733 that location.
2734
2735 If in non-stop mode, a race condition is possible where we've
2736 removed a breakpoint, but stop events for that breakpoint were
2737 already queued and arrive later. To suppress those spurious
2738 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2739 and retire them after a number of stop events are reported. */
2740 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2741 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2742 {
2743 struct cleanup *old_cleanups = NULL;
2744
2745 if (RECORD_IS_USED)
2746 old_cleanups = record_gdb_operation_disable_set ();
2747
2748 /* When using hardware single-step, a SIGTRAP is reported for both
2749 a completed single-step and a software breakpoint. Need to
2750 differentiate between the two, as the latter needs adjusting
2751 but the former does not.
2752
2753 The SIGTRAP can be due to a completed hardware single-step only if
2754 - we didn't insert software single-step breakpoints
2755 - the thread to be examined is still the current thread
2756 - this thread is currently being stepped
2757
2758 If any of these events did not occur, we must have stopped due
2759 to hitting a software breakpoint, and have to back up to the
2760 breakpoint address.
2761
2762 As a special case, we could have hardware single-stepped a
2763 software breakpoint. In this case (prev_pc == breakpoint_pc),
2764 we also need to back up to the breakpoint address. */
2765
2766 if (singlestep_breakpoints_inserted_p
2767 || !ptid_equal (ecs->ptid, inferior_ptid)
2768 || !currently_stepping (ecs->event_thread)
2769 || ecs->event_thread->prev_pc == breakpoint_pc)
2770 regcache_write_pc (regcache, breakpoint_pc);
2771
2772 if (RECORD_IS_USED)
2773 do_cleanups (old_cleanups);
2774 }
2775 }
2776
2777 void
2778 init_infwait_state (void)
2779 {
2780 waiton_ptid = pid_to_ptid (-1);
2781 infwait_state = infwait_normal_state;
2782 }
2783
2784 void
2785 error_is_running (void)
2786 {
2787 error (_("\
2788 Cannot execute this command while the selected thread is running."));
2789 }
2790
2791 void
2792 ensure_not_running (void)
2793 {
2794 if (is_running (inferior_ptid))
2795 error_is_running ();
2796 }
2797
2798 static int
2799 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2800 {
2801 for (frame = get_prev_frame (frame);
2802 frame != NULL;
2803 frame = get_prev_frame (frame))
2804 {
2805 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2806 return 1;
2807 if (get_frame_type (frame) != INLINE_FRAME)
2808 break;
2809 }
2810
2811 return 0;
2812 }
2813
2814 /* Auxiliary function that handles syscall entry/return events.
2815 It returns 1 if the inferior should keep going (and GDB
2816 should ignore the event), or 0 if the event deserves to be
2817 processed. */
2818
2819 static int
2820 handle_syscall_event (struct execution_control_state *ecs)
2821 {
2822 struct regcache *regcache;
2823 struct gdbarch *gdbarch;
2824 int syscall_number;
2825
2826 if (!ptid_equal (ecs->ptid, inferior_ptid))
2827 context_switch (ecs->ptid);
2828
2829 regcache = get_thread_regcache (ecs->ptid);
2830 gdbarch = get_regcache_arch (regcache);
2831 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2832 stop_pc = regcache_read_pc (regcache);
2833
2834 target_last_waitstatus.value.syscall_number = syscall_number;
2835
2836 if (catch_syscall_enabled () > 0
2837 && catching_syscall_number (syscall_number) > 0)
2838 {
2839 if (debug_infrun)
2840 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2841 syscall_number);
2842
2843 ecs->event_thread->stop_bpstat
2844 = bpstat_stop_status (get_regcache_aspace (regcache),
2845 stop_pc, ecs->ptid);
2846 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2847
2848 if (!ecs->random_signal)
2849 {
2850 /* Catchpoint hit. */
2851 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2852 return 0;
2853 }
2854 }
2855
2856 /* If no catchpoint triggered for this, then keep going. */
2857 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2858 keep_going (ecs);
2859 return 1;
2860 }
2861
2862 /* Given an execution control state that has been freshly filled in
2863 by an event from the inferior, figure out what it means and take
2864 appropriate action. */
2865
2866 static void
2867 handle_inferior_event (struct execution_control_state *ecs)
2868 {
2869 struct frame_info *frame;
2870 struct gdbarch *gdbarch;
2871 int sw_single_step_trap_p = 0;
2872 int stopped_by_watchpoint;
2873 int stepped_after_stopped_by_watchpoint = 0;
2874 struct symtab_and_line stop_pc_sal;
2875 enum stop_kind stop_soon;
2876
2877 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2878 {
2879 /* We had an event in the inferior, but we are not interested in
2880 handling it at this level. The lower layers have already
2881 done what needs to be done, if anything.
2882
2883 One of the possible circumstances for this is when the
2884 inferior produces output for the console. The inferior has
2885 not stopped, and we are ignoring the event. Another possible
2886 circumstance is any event which the lower level knows will be
2887 reported multiple times without an intervening resume. */
2888 if (debug_infrun)
2889 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2890 prepare_to_wait (ecs);
2891 return;
2892 }
2893
2894 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2895 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2896 {
2897 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2898
2899 gdb_assert (inf);
2900 stop_soon = inf->stop_soon;
2901 }
2902 else
2903 stop_soon = NO_STOP_QUIETLY;
2904
2905 /* Cache the last pid/waitstatus. */
2906 target_last_wait_ptid = ecs->ptid;
2907 target_last_waitstatus = ecs->ws;
2908
2909 /* Always clear state belonging to the previous time we stopped. */
2910 stop_stack_dummy = STOP_NONE;
2911
2912 /* If it's a new process, add it to the thread database */
2913
2914 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2915 && !ptid_equal (ecs->ptid, minus_one_ptid)
2916 && !in_thread_list (ecs->ptid));
2917
2918 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2919 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2920 add_thread (ecs->ptid);
2921
2922 ecs->event_thread = find_thread_ptid (ecs->ptid);
2923
2924 /* Dependent on valid ECS->EVENT_THREAD. */
2925 adjust_pc_after_break (ecs);
2926
2927 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2928 reinit_frame_cache ();
2929
2930 breakpoint_retire_moribund ();
2931
2932 /* First, distinguish signals caused by the debugger from signals
2933 that have to do with the program's own actions. Note that
2934 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2935 on the operating system version. Here we detect when a SIGILL or
2936 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2937 something similar for SIGSEGV, since a SIGSEGV will be generated
2938 when we're trying to execute a breakpoint instruction on a
2939 non-executable stack. This happens for call dummy breakpoints
2940 for architectures like SPARC that place call dummies on the
2941 stack. */
2942 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2943 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2944 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2945 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
2946 {
2947 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2948
2949 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2950 regcache_read_pc (regcache)))
2951 {
2952 if (debug_infrun)
2953 fprintf_unfiltered (gdb_stdlog,
2954 "infrun: Treating signal as SIGTRAP\n");
2955 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2956 }
2957 }
2958
2959 /* Mark the non-executing threads accordingly. In all-stop, all
2960 threads of all processes are stopped when we get any event
2961 reported. In non-stop mode, only the event thread stops. If
2962 we're handling a process exit in non-stop mode, there's nothing
2963 to do, as threads of the dead process are gone, and threads of
2964 any other process were left running. */
2965 if (!non_stop)
2966 set_executing (minus_one_ptid, 0);
2967 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2968 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2969 set_executing (inferior_ptid, 0);
2970
2971 switch (infwait_state)
2972 {
2973 case infwait_thread_hop_state:
2974 if (debug_infrun)
2975 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2976 break;
2977
2978 case infwait_normal_state:
2979 if (debug_infrun)
2980 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2981 break;
2982
2983 case infwait_step_watch_state:
2984 if (debug_infrun)
2985 fprintf_unfiltered (gdb_stdlog,
2986 "infrun: infwait_step_watch_state\n");
2987
2988 stepped_after_stopped_by_watchpoint = 1;
2989 break;
2990
2991 case infwait_nonstep_watch_state:
2992 if (debug_infrun)
2993 fprintf_unfiltered (gdb_stdlog,
2994 "infrun: infwait_nonstep_watch_state\n");
2995 insert_breakpoints ();
2996
2997 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2998 handle things like signals arriving and other things happening
2999 in combination correctly? */
3000 stepped_after_stopped_by_watchpoint = 1;
3001 break;
3002
3003 default:
3004 internal_error (__FILE__, __LINE__, _("bad switch"));
3005 }
3006
3007 infwait_state = infwait_normal_state;
3008 waiton_ptid = pid_to_ptid (-1);
3009
3010 switch (ecs->ws.kind)
3011 {
3012 case TARGET_WAITKIND_LOADED:
3013 if (debug_infrun)
3014 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3015 /* Ignore gracefully during startup of the inferior, as it might
3016 be the shell which has just loaded some objects, otherwise
3017 add the symbols for the newly loaded objects. Also ignore at
3018 the beginning of an attach or remote session; we will query
3019 the full list of libraries once the connection is
3020 established. */
3021 if (stop_soon == NO_STOP_QUIETLY)
3022 {
3023 /* Check for any newly added shared libraries if we're
3024 supposed to be adding them automatically. Switch
3025 terminal for any messages produced by
3026 breakpoint_re_set. */
3027 target_terminal_ours_for_output ();
3028 /* NOTE: cagney/2003-11-25: Make certain that the target
3029 stack's section table is kept up-to-date. Architectures,
3030 (e.g., PPC64), use the section table to perform
3031 operations such as address => section name and hence
3032 require the table to contain all sections (including
3033 those found in shared libraries). */
3034 #ifdef SOLIB_ADD
3035 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3036 #else
3037 solib_add (NULL, 0, &current_target, auto_solib_add);
3038 #endif
3039 target_terminal_inferior ();
3040
3041 /* If requested, stop when the dynamic linker notifies
3042 gdb of events. This allows the user to get control
3043 and place breakpoints in initializer routines for
3044 dynamically loaded objects (among other things). */
3045 if (stop_on_solib_events)
3046 {
3047 /* Make sure we print "Stopped due to solib-event" in
3048 normal_stop. */
3049 stop_print_frame = 1;
3050
3051 stop_stepping (ecs);
3052 return;
3053 }
3054
3055 /* NOTE drow/2007-05-11: This might be a good place to check
3056 for "catch load". */
3057 }
3058
3059 /* If we are skipping through a shell, or through shared library
3060 loading that we aren't interested in, resume the program. If
3061 we're running the program normally, also resume. But stop if
3062 we're attaching or setting up a remote connection. */
3063 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3064 {
3065 /* Loading of shared libraries might have changed breakpoint
3066 addresses. Make sure new breakpoints are inserted. */
3067 if (stop_soon == NO_STOP_QUIETLY
3068 && !breakpoints_always_inserted_mode ())
3069 insert_breakpoints ();
3070 resume (0, TARGET_SIGNAL_0);
3071 prepare_to_wait (ecs);
3072 return;
3073 }
3074
3075 break;
3076
3077 case TARGET_WAITKIND_SPURIOUS:
3078 if (debug_infrun)
3079 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3080 resume (0, TARGET_SIGNAL_0);
3081 prepare_to_wait (ecs);
3082 return;
3083
3084 case TARGET_WAITKIND_EXITED:
3085 if (debug_infrun)
3086 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3087 inferior_ptid = ecs->ptid;
3088 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3089 set_current_program_space (current_inferior ()->pspace);
3090 handle_vfork_child_exec_or_exit (0);
3091 target_terminal_ours (); /* Must do this before mourn anyway */
3092 print_stop_reason (EXITED, ecs->ws.value.integer);
3093
3094 /* Record the exit code in the convenience variable $_exitcode, so
3095 that the user can inspect this again later. */
3096 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3097 (LONGEST) ecs->ws.value.integer);
3098 gdb_flush (gdb_stdout);
3099 target_mourn_inferior ();
3100 singlestep_breakpoints_inserted_p = 0;
3101 stop_print_frame = 0;
3102 stop_stepping (ecs);
3103 return;
3104
3105 case TARGET_WAITKIND_SIGNALLED:
3106 if (debug_infrun)
3107 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3108 inferior_ptid = ecs->ptid;
3109 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3110 set_current_program_space (current_inferior ()->pspace);
3111 handle_vfork_child_exec_or_exit (0);
3112 stop_print_frame = 0;
3113 target_terminal_ours (); /* Must do this before mourn anyway */
3114
3115 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3116 reach here unless the inferior is dead. However, for years
3117 target_kill() was called here, which hints that fatal signals aren't
3118 really fatal on some systems. If that's true, then some changes
3119 may be needed. */
3120 target_mourn_inferior ();
3121
3122 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3123 singlestep_breakpoints_inserted_p = 0;
3124 stop_stepping (ecs);
3125 return;
3126
3127 /* The following are the only cases in which we keep going;
3128 the above cases end in a continue or goto. */
3129 case TARGET_WAITKIND_FORKED:
3130 case TARGET_WAITKIND_VFORKED:
3131 if (debug_infrun)
3132 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3133
3134 if (!ptid_equal (ecs->ptid, inferior_ptid))
3135 {
3136 context_switch (ecs->ptid);
3137 reinit_frame_cache ();
3138 }
3139
3140 /* Immediately detach breakpoints from the child before there's
3141 any chance of letting the user delete breakpoints from the
3142 breakpoint lists. If we don't do this early, it's easy to
3143 leave left over traps in the child, vis: "break foo; catch
3144 fork; c; <fork>; del; c; <child calls foo>". We only follow
3145 the fork on the last `continue', and by that time the
3146 breakpoint at "foo" is long gone from the breakpoint table.
3147 If we vforked, then we don't need to unpatch here, since both
3148 parent and child are sharing the same memory pages; we'll
3149 need to unpatch at follow/detach time instead to be certain
3150 that new breakpoints added between catchpoint hit time and
3151 vfork follow are detached. */
3152 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3153 {
3154 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3155
3156 /* This won't actually modify the breakpoint list, but will
3157 physically remove the breakpoints from the child. */
3158 detach_breakpoints (child_pid);
3159 }
3160
3161 /* In case the event is caught by a catchpoint, remember that
3162 the event is to be followed at the next resume of the thread,
3163 and not immediately. */
3164 ecs->event_thread->pending_follow = ecs->ws;
3165
3166 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3167
3168 ecs->event_thread->stop_bpstat
3169 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3170 stop_pc, ecs->ptid);
3171
3172 /* Note that we're interested in knowing the bpstat actually
3173 causes a stop, not just if it may explain the signal.
3174 Software watchpoints, for example, always appear in the
3175 bpstat. */
3176 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3177
3178 /* If no catchpoint triggered for this, then keep going. */
3179 if (ecs->random_signal)
3180 {
3181 ptid_t parent;
3182 ptid_t child;
3183 int should_resume;
3184 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3185
3186 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3187
3188 should_resume = follow_fork ();
3189
3190 parent = ecs->ptid;
3191 child = ecs->ws.value.related_pid;
3192
3193 /* In non-stop mode, also resume the other branch. */
3194 if (non_stop && !detach_fork)
3195 {
3196 if (follow_child)
3197 switch_to_thread (parent);
3198 else
3199 switch_to_thread (child);
3200
3201 ecs->event_thread = inferior_thread ();
3202 ecs->ptid = inferior_ptid;
3203 keep_going (ecs);
3204 }
3205
3206 if (follow_child)
3207 switch_to_thread (child);
3208 else
3209 switch_to_thread (parent);
3210
3211 ecs->event_thread = inferior_thread ();
3212 ecs->ptid = inferior_ptid;
3213
3214 if (should_resume)
3215 keep_going (ecs);
3216 else
3217 stop_stepping (ecs);
3218 return;
3219 }
3220 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3221 goto process_event_stop_test;
3222
3223 case TARGET_WAITKIND_VFORK_DONE:
3224 /* Done with the shared memory region. Re-insert breakpoints in
3225 the parent, and keep going. */
3226
3227 if (debug_infrun)
3228 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3229
3230 if (!ptid_equal (ecs->ptid, inferior_ptid))
3231 context_switch (ecs->ptid);
3232
3233 current_inferior ()->waiting_for_vfork_done = 0;
3234 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3235 /* This also takes care of reinserting breakpoints in the
3236 previously locked inferior. */
3237 keep_going (ecs);
3238 return;
3239
3240 case TARGET_WAITKIND_EXECD:
3241 if (debug_infrun)
3242 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3243
3244 if (!ptid_equal (ecs->ptid, inferior_ptid))
3245 {
3246 context_switch (ecs->ptid);
3247 reinit_frame_cache ();
3248 }
3249
3250 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3251
3252 /* Do whatever is necessary to the parent branch of the vfork. */
3253 handle_vfork_child_exec_or_exit (1);
3254
3255 /* This causes the eventpoints and symbol table to be reset.
3256 Must do this now, before trying to determine whether to
3257 stop. */
3258 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3259
3260 ecs->event_thread->stop_bpstat
3261 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3262 stop_pc, ecs->ptid);
3263 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3264
3265 /* Note that this may be referenced from inside
3266 bpstat_stop_status above, through inferior_has_execd. */
3267 xfree (ecs->ws.value.execd_pathname);
3268 ecs->ws.value.execd_pathname = NULL;
3269
3270 /* If no catchpoint triggered for this, then keep going. */
3271 if (ecs->random_signal)
3272 {
3273 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3274 keep_going (ecs);
3275 return;
3276 }
3277 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3278 goto process_event_stop_test;
3279
3280 /* Be careful not to try to gather much state about a thread
3281 that's in a syscall. It's frequently a losing proposition. */
3282 case TARGET_WAITKIND_SYSCALL_ENTRY:
3283 if (debug_infrun)
3284 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3285 /* Getting the current syscall number */
3286 if (handle_syscall_event (ecs) != 0)
3287 return;
3288 goto process_event_stop_test;
3289
3290 /* Before examining the threads further, step this thread to
3291 get it entirely out of the syscall. (We get notice of the
3292 event when the thread is just on the verge of exiting a
3293 syscall. Stepping one instruction seems to get it back
3294 into user code.) */
3295 case TARGET_WAITKIND_SYSCALL_RETURN:
3296 if (debug_infrun)
3297 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3298 if (handle_syscall_event (ecs) != 0)
3299 return;
3300 goto process_event_stop_test;
3301
3302 case TARGET_WAITKIND_STOPPED:
3303 if (debug_infrun)
3304 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3305 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3306 break;
3307
3308 case TARGET_WAITKIND_NO_HISTORY:
3309 /* Reverse execution: target ran out of history info. */
3310 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3311 print_stop_reason (NO_HISTORY, 0);
3312 stop_stepping (ecs);
3313 return;
3314 }
3315
3316 if (ecs->new_thread_event)
3317 {
3318 if (non_stop)
3319 /* Non-stop assumes that the target handles adding new threads
3320 to the thread list. */
3321 internal_error (__FILE__, __LINE__, "\
3322 targets should add new threads to the thread list themselves in non-stop mode.");
3323
3324 /* We may want to consider not doing a resume here in order to
3325 give the user a chance to play with the new thread. It might
3326 be good to make that a user-settable option. */
3327
3328 /* At this point, all threads are stopped (happens automatically
3329 in either the OS or the native code). Therefore we need to
3330 continue all threads in order to make progress. */
3331
3332 if (!ptid_equal (ecs->ptid, inferior_ptid))
3333 context_switch (ecs->ptid);
3334 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3335 prepare_to_wait (ecs);
3336 return;
3337 }
3338
3339 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3340 {
3341 /* Do we need to clean up the state of a thread that has
3342 completed a displaced single-step? (Doing so usually affects
3343 the PC, so do it here, before we set stop_pc.) */
3344 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3345
3346 /* If we either finished a single-step or hit a breakpoint, but
3347 the user wanted this thread to be stopped, pretend we got a
3348 SIG0 (generic unsignaled stop). */
3349
3350 if (ecs->event_thread->stop_requested
3351 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3352 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3353 }
3354
3355 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3356
3357 if (debug_infrun)
3358 {
3359 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3360 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3361 struct cleanup *old_chain = save_inferior_ptid ();
3362
3363 inferior_ptid = ecs->ptid;
3364
3365 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3366 paddress (gdbarch, stop_pc));
3367 if (target_stopped_by_watchpoint ())
3368 {
3369 CORE_ADDR addr;
3370
3371 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3372
3373 if (target_stopped_data_address (&current_target, &addr))
3374 fprintf_unfiltered (gdb_stdlog,
3375 "infrun: stopped data address = %s\n",
3376 paddress (gdbarch, addr));
3377 else
3378 fprintf_unfiltered (gdb_stdlog,
3379 "infrun: (no data address available)\n");
3380 }
3381
3382 do_cleanups (old_chain);
3383 }
3384
3385 if (stepping_past_singlestep_breakpoint)
3386 {
3387 gdb_assert (singlestep_breakpoints_inserted_p);
3388 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3389 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3390
3391 stepping_past_singlestep_breakpoint = 0;
3392
3393 /* We've either finished single-stepping past the single-step
3394 breakpoint, or stopped for some other reason. It would be nice if
3395 we could tell, but we can't reliably. */
3396 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3397 {
3398 if (debug_infrun)
3399 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3400 /* Pull the single step breakpoints out of the target. */
3401 remove_single_step_breakpoints ();
3402 singlestep_breakpoints_inserted_p = 0;
3403
3404 ecs->random_signal = 0;
3405 ecs->event_thread->trap_expected = 0;
3406
3407 context_switch (saved_singlestep_ptid);
3408 if (deprecated_context_hook)
3409 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3410
3411 resume (1, TARGET_SIGNAL_0);
3412 prepare_to_wait (ecs);
3413 return;
3414 }
3415 }
3416
3417 if (!ptid_equal (deferred_step_ptid, null_ptid))
3418 {
3419 /* In non-stop mode, there's never a deferred_step_ptid set. */
3420 gdb_assert (!non_stop);
3421
3422 /* If we stopped for some other reason than single-stepping, ignore
3423 the fact that we were supposed to switch back. */
3424 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3425 {
3426 if (debug_infrun)
3427 fprintf_unfiltered (gdb_stdlog,
3428 "infrun: handling deferred step\n");
3429
3430 /* Pull the single step breakpoints out of the target. */
3431 if (singlestep_breakpoints_inserted_p)
3432 {
3433 remove_single_step_breakpoints ();
3434 singlestep_breakpoints_inserted_p = 0;
3435 }
3436
3437 /* Note: We do not call context_switch at this point, as the
3438 context is already set up for stepping the original thread. */
3439 switch_to_thread (deferred_step_ptid);
3440 deferred_step_ptid = null_ptid;
3441 /* Suppress spurious "Switching to ..." message. */
3442 previous_inferior_ptid = inferior_ptid;
3443
3444 resume (1, TARGET_SIGNAL_0);
3445 prepare_to_wait (ecs);
3446 return;
3447 }
3448
3449 deferred_step_ptid = null_ptid;
3450 }
3451
3452 /* See if a thread hit a thread-specific breakpoint that was meant for
3453 another thread. If so, then step that thread past the breakpoint,
3454 and continue it. */
3455
3456 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3457 {
3458 int thread_hop_needed = 0;
3459 struct address_space *aspace =
3460 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3461
3462 /* Check if a regular breakpoint has been hit before checking
3463 for a potential single step breakpoint. Otherwise, GDB will
3464 not see this breakpoint hit when stepping onto breakpoints. */
3465 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3466 {
3467 ecs->random_signal = 0;
3468 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3469 thread_hop_needed = 1;
3470 }
3471 else if (singlestep_breakpoints_inserted_p)
3472 {
3473 /* We have not context switched yet, so this should be true
3474 no matter which thread hit the singlestep breakpoint. */
3475 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3476 if (debug_infrun)
3477 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3478 "trap for %s\n",
3479 target_pid_to_str (ecs->ptid));
3480
3481 ecs->random_signal = 0;
3482 /* The call to in_thread_list is necessary because PTIDs sometimes
3483 change when we go from single-threaded to multi-threaded. If
3484 the singlestep_ptid is still in the list, assume that it is
3485 really different from ecs->ptid. */
3486 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3487 && in_thread_list (singlestep_ptid))
3488 {
3489 /* If the PC of the thread we were trying to single-step
3490 has changed, discard this event (which we were going
3491 to ignore anyway), and pretend we saw that thread
3492 trap. This prevents us continuously moving the
3493 single-step breakpoint forward, one instruction at a
3494 time. If the PC has changed, then the thread we were
3495 trying to single-step has trapped or been signalled,
3496 but the event has not been reported to GDB yet.
3497
3498 There might be some cases where this loses signal
3499 information, if a signal has arrived at exactly the
3500 same time that the PC changed, but this is the best
3501 we can do with the information available. Perhaps we
3502 should arrange to report all events for all threads
3503 when they stop, or to re-poll the remote looking for
3504 this particular thread (i.e. temporarily enable
3505 schedlock). */
3506
3507 CORE_ADDR new_singlestep_pc
3508 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3509
3510 if (new_singlestep_pc != singlestep_pc)
3511 {
3512 enum target_signal stop_signal;
3513
3514 if (debug_infrun)
3515 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3516 " but expected thread advanced also\n");
3517
3518 /* The current context still belongs to
3519 singlestep_ptid. Don't swap here, since that's
3520 the context we want to use. Just fudge our
3521 state and continue. */
3522 stop_signal = ecs->event_thread->stop_signal;
3523 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3524 ecs->ptid = singlestep_ptid;
3525 ecs->event_thread = find_thread_ptid (ecs->ptid);
3526 ecs->event_thread->stop_signal = stop_signal;
3527 stop_pc = new_singlestep_pc;
3528 }
3529 else
3530 {
3531 if (debug_infrun)
3532 fprintf_unfiltered (gdb_stdlog,
3533 "infrun: unexpected thread\n");
3534
3535 thread_hop_needed = 1;
3536 stepping_past_singlestep_breakpoint = 1;
3537 saved_singlestep_ptid = singlestep_ptid;
3538 }
3539 }
3540 }
3541
3542 if (thread_hop_needed)
3543 {
3544 struct regcache *thread_regcache;
3545 int remove_status = 0;
3546
3547 if (debug_infrun)
3548 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3549
3550 /* Switch context before touching inferior memory, the
3551 previous thread may have exited. */
3552 if (!ptid_equal (inferior_ptid, ecs->ptid))
3553 context_switch (ecs->ptid);
3554
3555 /* Saw a breakpoint, but it was hit by the wrong thread.
3556 Just continue. */
3557
3558 if (singlestep_breakpoints_inserted_p)
3559 {
3560 /* Pull the single step breakpoints out of the target. */
3561 remove_single_step_breakpoints ();
3562 singlestep_breakpoints_inserted_p = 0;
3563 }
3564
3565 /* If the arch can displace step, don't remove the
3566 breakpoints. */
3567 thread_regcache = get_thread_regcache (ecs->ptid);
3568 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3569 remove_status = remove_breakpoints ();
3570
3571 /* Did we fail to remove breakpoints? If so, try
3572 to set the PC past the bp. (There's at least
3573 one situation in which we can fail to remove
3574 the bp's: On HP-UX's that use ttrace, we can't
3575 change the address space of a vforking child
3576 process until the child exits (well, okay, not
3577 then either :-) or execs. */
3578 if (remove_status != 0)
3579 error (_("Cannot step over breakpoint hit in wrong thread"));
3580 else
3581 { /* Single step */
3582 if (!non_stop)
3583 {
3584 /* Only need to require the next event from this
3585 thread in all-stop mode. */
3586 waiton_ptid = ecs->ptid;
3587 infwait_state = infwait_thread_hop_state;
3588 }
3589
3590 ecs->event_thread->stepping_over_breakpoint = 1;
3591 keep_going (ecs);
3592 return;
3593 }
3594 }
3595 else if (singlestep_breakpoints_inserted_p)
3596 {
3597 sw_single_step_trap_p = 1;
3598 ecs->random_signal = 0;
3599 }
3600 }
3601 else
3602 ecs->random_signal = 1;
3603
3604 /* See if something interesting happened to the non-current thread. If
3605 so, then switch to that thread. */
3606 if (!ptid_equal (ecs->ptid, inferior_ptid))
3607 {
3608 if (debug_infrun)
3609 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3610
3611 context_switch (ecs->ptid);
3612
3613 if (deprecated_context_hook)
3614 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3615 }
3616
3617 /* At this point, get hold of the now-current thread's frame. */
3618 frame = get_current_frame ();
3619 gdbarch = get_frame_arch (frame);
3620
3621 if (singlestep_breakpoints_inserted_p)
3622 {
3623 /* Pull the single step breakpoints out of the target. */
3624 remove_single_step_breakpoints ();
3625 singlestep_breakpoints_inserted_p = 0;
3626 }
3627
3628 if (stepped_after_stopped_by_watchpoint)
3629 stopped_by_watchpoint = 0;
3630 else
3631 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3632
3633 /* If necessary, step over this watchpoint. We'll be back to display
3634 it in a moment. */
3635 if (stopped_by_watchpoint
3636 && (target_have_steppable_watchpoint
3637 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3638 {
3639 /* At this point, we are stopped at an instruction which has
3640 attempted to write to a piece of memory under control of
3641 a watchpoint. The instruction hasn't actually executed
3642 yet. If we were to evaluate the watchpoint expression
3643 now, we would get the old value, and therefore no change
3644 would seem to have occurred.
3645
3646 In order to make watchpoints work `right', we really need
3647 to complete the memory write, and then evaluate the
3648 watchpoint expression. We do this by single-stepping the
3649 target.
3650
3651 It may not be necessary to disable the watchpoint to stop over
3652 it. For example, the PA can (with some kernel cooperation)
3653 single step over a watchpoint without disabling the watchpoint.
3654
3655 It is far more common to need to disable a watchpoint to step
3656 the inferior over it. If we have non-steppable watchpoints,
3657 we must disable the current watchpoint; it's simplest to
3658 disable all watchpoints and breakpoints. */
3659 int hw_step = 1;
3660
3661 if (!target_have_steppable_watchpoint)
3662 remove_breakpoints ();
3663 /* Single step */
3664 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3665 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3666 waiton_ptid = ecs->ptid;
3667 if (target_have_steppable_watchpoint)
3668 infwait_state = infwait_step_watch_state;
3669 else
3670 infwait_state = infwait_nonstep_watch_state;
3671 prepare_to_wait (ecs);
3672 return;
3673 }
3674
3675 ecs->stop_func_start = 0;
3676 ecs->stop_func_end = 0;
3677 ecs->stop_func_name = 0;
3678 /* Don't care about return value; stop_func_start and stop_func_name
3679 will both be 0 if it doesn't work. */
3680 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3681 &ecs->stop_func_start, &ecs->stop_func_end);
3682 ecs->stop_func_start
3683 += gdbarch_deprecated_function_start_offset (gdbarch);
3684 ecs->event_thread->stepping_over_breakpoint = 0;
3685 bpstat_clear (&ecs->event_thread->stop_bpstat);
3686 ecs->event_thread->stop_step = 0;
3687 stop_print_frame = 1;
3688 ecs->random_signal = 0;
3689 stopped_by_random_signal = 0;
3690
3691 /* Hide inlined functions starting here, unless we just performed stepi or
3692 nexti. After stepi and nexti, always show the innermost frame (not any
3693 inline function call sites). */
3694 if (ecs->event_thread->step_range_end != 1)
3695 skip_inline_frames (ecs->ptid);
3696
3697 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3698 && ecs->event_thread->trap_expected
3699 && gdbarch_single_step_through_delay_p (gdbarch)
3700 && currently_stepping (ecs->event_thread))
3701 {
3702 /* We're trying to step off a breakpoint. Turns out that we're
3703 also on an instruction that needs to be stepped multiple
3704 times before it's been fully executing. E.g., architectures
3705 with a delay slot. It needs to be stepped twice, once for
3706 the instruction and once for the delay slot. */
3707 int step_through_delay
3708 = gdbarch_single_step_through_delay (gdbarch, frame);
3709
3710 if (debug_infrun && step_through_delay)
3711 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3712 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3713 {
3714 /* The user issued a continue when stopped at a breakpoint.
3715 Set up for another trap and get out of here. */
3716 ecs->event_thread->stepping_over_breakpoint = 1;
3717 keep_going (ecs);
3718 return;
3719 }
3720 else if (step_through_delay)
3721 {
3722 /* The user issued a step when stopped at a breakpoint.
3723 Maybe we should stop, maybe we should not - the delay
3724 slot *might* correspond to a line of source. In any
3725 case, don't decide that here, just set
3726 ecs->stepping_over_breakpoint, making sure we
3727 single-step again before breakpoints are re-inserted. */
3728 ecs->event_thread->stepping_over_breakpoint = 1;
3729 }
3730 }
3731
3732 /* Look at the cause of the stop, and decide what to do.
3733 The alternatives are:
3734 1) stop_stepping and return; to really stop and return to the debugger,
3735 2) keep_going and return to start up again
3736 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3737 3) set ecs->random_signal to 1, and the decision between 1 and 2
3738 will be made according to the signal handling tables. */
3739
3740 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3741 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3742 || stop_soon == STOP_QUIETLY_REMOTE)
3743 {
3744 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3745 {
3746 if (debug_infrun)
3747 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3748 stop_print_frame = 0;
3749 stop_stepping (ecs);
3750 return;
3751 }
3752
3753 /* This is originated from start_remote(), start_inferior() and
3754 shared libraries hook functions. */
3755 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3756 {
3757 if (debug_infrun)
3758 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3759 stop_stepping (ecs);
3760 return;
3761 }
3762
3763 /* This originates from attach_command(). We need to overwrite
3764 the stop_signal here, because some kernels don't ignore a
3765 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3766 See more comments in inferior.h. On the other hand, if we
3767 get a non-SIGSTOP, report it to the user - assume the backend
3768 will handle the SIGSTOP if it should show up later.
3769
3770 Also consider that the attach is complete when we see a
3771 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3772 target extended-remote report it instead of a SIGSTOP
3773 (e.g. gdbserver). We already rely on SIGTRAP being our
3774 signal, so this is no exception.
3775
3776 Also consider that the attach is complete when we see a
3777 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3778 the target to stop all threads of the inferior, in case the
3779 low level attach operation doesn't stop them implicitly. If
3780 they weren't stopped implicitly, then the stub will report a
3781 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3782 other than GDB's request. */
3783 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3784 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3785 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3786 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3787 {
3788 stop_stepping (ecs);
3789 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3790 return;
3791 }
3792
3793 /* See if there is a breakpoint at the current PC. */
3794 ecs->event_thread->stop_bpstat
3795 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3796 stop_pc, ecs->ptid);
3797
3798 /* Following in case break condition called a
3799 function. */
3800 stop_print_frame = 1;
3801
3802 /* This is where we handle "moribund" watchpoints. Unlike
3803 software breakpoints traps, hardware watchpoint traps are
3804 always distinguishable from random traps. If no high-level
3805 watchpoint is associated with the reported stop data address
3806 anymore, then the bpstat does not explain the signal ---
3807 simply make sure to ignore it if `stopped_by_watchpoint' is
3808 set. */
3809
3810 if (debug_infrun
3811 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3812 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3813 && stopped_by_watchpoint)
3814 fprintf_unfiltered (gdb_stdlog, "\
3815 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3816
3817 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3818 at one stage in the past included checks for an inferior
3819 function call's call dummy's return breakpoint. The original
3820 comment, that went with the test, read:
3821
3822 ``End of a stack dummy. Some systems (e.g. Sony news) give
3823 another signal besides SIGTRAP, so check here as well as
3824 above.''
3825
3826 If someone ever tries to get call dummys on a
3827 non-executable stack to work (where the target would stop
3828 with something like a SIGSEGV), then those tests might need
3829 to be re-instated. Given, however, that the tests were only
3830 enabled when momentary breakpoints were not being used, I
3831 suspect that it won't be the case.
3832
3833 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3834 be necessary for call dummies on a non-executable stack on
3835 SPARC. */
3836
3837 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3838 ecs->random_signal
3839 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3840 || stopped_by_watchpoint
3841 || ecs->event_thread->trap_expected
3842 || (ecs->event_thread->step_range_end
3843 && ecs->event_thread->step_resume_breakpoint == NULL));
3844 else
3845 {
3846 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3847 if (!ecs->random_signal)
3848 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3849 }
3850 }
3851
3852 /* When we reach this point, we've pretty much decided
3853 that the reason for stopping must've been a random
3854 (unexpected) signal. */
3855
3856 else
3857 ecs->random_signal = 1;
3858
3859 process_event_stop_test:
3860
3861 /* Re-fetch current thread's frame in case we did a
3862 "goto process_event_stop_test" above. */
3863 frame = get_current_frame ();
3864 gdbarch = get_frame_arch (frame);
3865
3866 /* For the program's own signals, act according to
3867 the signal handling tables. */
3868
3869 if (ecs->random_signal)
3870 {
3871 /* Signal not for debugging purposes. */
3872 int printed = 0;
3873 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3874
3875 if (debug_infrun)
3876 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3877 ecs->event_thread->stop_signal);
3878
3879 stopped_by_random_signal = 1;
3880
3881 if (signal_print[ecs->event_thread->stop_signal])
3882 {
3883 printed = 1;
3884 target_terminal_ours_for_output ();
3885 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3886 }
3887 /* Always stop on signals if we're either just gaining control
3888 of the program, or the user explicitly requested this thread
3889 to remain stopped. */
3890 if (stop_soon != NO_STOP_QUIETLY
3891 || ecs->event_thread->stop_requested
3892 || (!inf->detaching
3893 && signal_stop_state (ecs->event_thread->stop_signal)))
3894 {
3895 stop_stepping (ecs);
3896 return;
3897 }
3898 /* If not going to stop, give terminal back
3899 if we took it away. */
3900 else if (printed)
3901 target_terminal_inferior ();
3902
3903 /* Clear the signal if it should not be passed. */
3904 if (signal_program[ecs->event_thread->stop_signal] == 0)
3905 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3906
3907 if (ecs->event_thread->prev_pc == stop_pc
3908 && ecs->event_thread->trap_expected
3909 && ecs->event_thread->step_resume_breakpoint == NULL)
3910 {
3911 /* We were just starting a new sequence, attempting to
3912 single-step off of a breakpoint and expecting a SIGTRAP.
3913 Instead this signal arrives. This signal will take us out
3914 of the stepping range so GDB needs to remember to, when
3915 the signal handler returns, resume stepping off that
3916 breakpoint. */
3917 /* To simplify things, "continue" is forced to use the same
3918 code paths as single-step - set a breakpoint at the
3919 signal return address and then, once hit, step off that
3920 breakpoint. */
3921 if (debug_infrun)
3922 fprintf_unfiltered (gdb_stdlog,
3923 "infrun: signal arrived while stepping over "
3924 "breakpoint\n");
3925
3926 insert_step_resume_breakpoint_at_frame (frame);
3927 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3928 keep_going (ecs);
3929 return;
3930 }
3931
3932 if (ecs->event_thread->step_range_end != 0
3933 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3934 && (ecs->event_thread->step_range_start <= stop_pc
3935 && stop_pc < ecs->event_thread->step_range_end)
3936 && frame_id_eq (get_stack_frame_id (frame),
3937 ecs->event_thread->step_stack_frame_id)
3938 && ecs->event_thread->step_resume_breakpoint == NULL)
3939 {
3940 /* The inferior is about to take a signal that will take it
3941 out of the single step range. Set a breakpoint at the
3942 current PC (which is presumably where the signal handler
3943 will eventually return) and then allow the inferior to
3944 run free.
3945
3946 Note that this is only needed for a signal delivered
3947 while in the single-step range. Nested signals aren't a
3948 problem as they eventually all return. */
3949 if (debug_infrun)
3950 fprintf_unfiltered (gdb_stdlog,
3951 "infrun: signal may take us out of "
3952 "single-step range\n");
3953
3954 insert_step_resume_breakpoint_at_frame (frame);
3955 keep_going (ecs);
3956 return;
3957 }
3958
3959 /* Note: step_resume_breakpoint may be non-NULL. This occures
3960 when either there's a nested signal, or when there's a
3961 pending signal enabled just as the signal handler returns
3962 (leaving the inferior at the step-resume-breakpoint without
3963 actually executing it). Either way continue until the
3964 breakpoint is really hit. */
3965 keep_going (ecs);
3966 return;
3967 }
3968
3969 /* Handle cases caused by hitting a breakpoint. */
3970 {
3971 CORE_ADDR jmp_buf_pc;
3972 struct bpstat_what what;
3973
3974 what = bpstat_what (ecs->event_thread->stop_bpstat);
3975
3976 if (what.call_dummy)
3977 {
3978 stop_stack_dummy = what.call_dummy;
3979 }
3980
3981 switch (what.main_action)
3982 {
3983 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3984 /* If we hit the breakpoint at longjmp while stepping, we
3985 install a momentary breakpoint at the target of the
3986 jmp_buf. */
3987
3988 if (debug_infrun)
3989 fprintf_unfiltered (gdb_stdlog,
3990 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3991
3992 ecs->event_thread->stepping_over_breakpoint = 1;
3993
3994 if (!gdbarch_get_longjmp_target_p (gdbarch)
3995 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3996 {
3997 if (debug_infrun)
3998 fprintf_unfiltered (gdb_stdlog, "\
3999 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4000 keep_going (ecs);
4001 return;
4002 }
4003
4004 /* We're going to replace the current step-resume breakpoint
4005 with a longjmp-resume breakpoint. */
4006 delete_step_resume_breakpoint (ecs->event_thread);
4007
4008 /* Insert a breakpoint at resume address. */
4009 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4010
4011 keep_going (ecs);
4012 return;
4013
4014 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4015 if (debug_infrun)
4016 fprintf_unfiltered (gdb_stdlog,
4017 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4018
4019 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4020 delete_step_resume_breakpoint (ecs->event_thread);
4021
4022 ecs->event_thread->stop_step = 1;
4023 print_stop_reason (END_STEPPING_RANGE, 0);
4024 stop_stepping (ecs);
4025 return;
4026
4027 case BPSTAT_WHAT_SINGLE:
4028 if (debug_infrun)
4029 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4030 ecs->event_thread->stepping_over_breakpoint = 1;
4031 /* Still need to check other stuff, at least the case
4032 where we are stepping and step out of the right range. */
4033 break;
4034
4035 case BPSTAT_WHAT_STOP_NOISY:
4036 if (debug_infrun)
4037 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4038 stop_print_frame = 1;
4039
4040 /* We are about to nuke the step_resume_breakpointt via the
4041 cleanup chain, so no need to worry about it here. */
4042
4043 stop_stepping (ecs);
4044 return;
4045
4046 case BPSTAT_WHAT_STOP_SILENT:
4047 if (debug_infrun)
4048 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4049 stop_print_frame = 0;
4050
4051 /* We are about to nuke the step_resume_breakpoin via the
4052 cleanup chain, so no need to worry about it here. */
4053
4054 stop_stepping (ecs);
4055 return;
4056
4057 case BPSTAT_WHAT_STEP_RESUME:
4058 if (debug_infrun)
4059 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4060
4061 delete_step_resume_breakpoint (ecs->event_thread);
4062 if (ecs->event_thread->step_after_step_resume_breakpoint)
4063 {
4064 /* Back when the step-resume breakpoint was inserted, we
4065 were trying to single-step off a breakpoint. Go back
4066 to doing that. */
4067 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4068 ecs->event_thread->stepping_over_breakpoint = 1;
4069 keep_going (ecs);
4070 return;
4071 }
4072 if (stop_pc == ecs->stop_func_start
4073 && execution_direction == EXEC_REVERSE)
4074 {
4075 /* We are stepping over a function call in reverse, and
4076 just hit the step-resume breakpoint at the start
4077 address of the function. Go back to single-stepping,
4078 which should take us back to the function call. */
4079 ecs->event_thread->stepping_over_breakpoint = 1;
4080 keep_going (ecs);
4081 return;
4082 }
4083 break;
4084
4085 case BPSTAT_WHAT_CHECK_SHLIBS:
4086 {
4087 if (debug_infrun)
4088 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
4089
4090 /* Check for any newly added shared libraries if we're
4091 supposed to be adding them automatically. Switch
4092 terminal for any messages produced by
4093 breakpoint_re_set. */
4094 target_terminal_ours_for_output ();
4095 /* NOTE: cagney/2003-11-25: Make certain that the target
4096 stack's section table is kept up-to-date. Architectures,
4097 (e.g., PPC64), use the section table to perform
4098 operations such as address => section name and hence
4099 require the table to contain all sections (including
4100 those found in shared libraries). */
4101 #ifdef SOLIB_ADD
4102 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
4103 #else
4104 solib_add (NULL, 0, &current_target, auto_solib_add);
4105 #endif
4106 target_terminal_inferior ();
4107
4108 /* If requested, stop when the dynamic linker notifies
4109 gdb of events. This allows the user to get control
4110 and place breakpoints in initializer routines for
4111 dynamically loaded objects (among other things). */
4112 if (stop_on_solib_events || stop_stack_dummy)
4113 {
4114 stop_stepping (ecs);
4115 return;
4116 }
4117 else
4118 {
4119 /* We want to step over this breakpoint, then keep going. */
4120 ecs->event_thread->stepping_over_breakpoint = 1;
4121 break;
4122 }
4123 }
4124 break;
4125
4126 case BPSTAT_WHAT_CHECK_JIT:
4127 if (debug_infrun)
4128 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
4129
4130 /* Switch terminal for any messages produced by breakpoint_re_set. */
4131 target_terminal_ours_for_output ();
4132
4133 jit_event_handler (gdbarch);
4134
4135 target_terminal_inferior ();
4136
4137 /* We want to step over this breakpoint, then keep going. */
4138 ecs->event_thread->stepping_over_breakpoint = 1;
4139
4140 break;
4141
4142 case BPSTAT_WHAT_LAST:
4143 /* Not a real code, but listed here to shut up gcc -Wall. */
4144
4145 case BPSTAT_WHAT_KEEP_CHECKING:
4146 break;
4147 }
4148 }
4149
4150 /* We come here if we hit a breakpoint but should not
4151 stop for it. Possibly we also were stepping
4152 and should stop for that. So fall through and
4153 test for stepping. But, if not stepping,
4154 do not stop. */
4155
4156 /* In all-stop mode, if we're currently stepping but have stopped in
4157 some other thread, we need to switch back to the stepped thread. */
4158 if (!non_stop)
4159 {
4160 struct thread_info *tp;
4161
4162 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4163 ecs->event_thread);
4164 if (tp)
4165 {
4166 /* However, if the current thread is blocked on some internal
4167 breakpoint, and we simply need to step over that breakpoint
4168 to get it going again, do that first. */
4169 if ((ecs->event_thread->trap_expected
4170 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4171 || ecs->event_thread->stepping_over_breakpoint)
4172 {
4173 keep_going (ecs);
4174 return;
4175 }
4176
4177 /* If the stepping thread exited, then don't try to switch
4178 back and resume it, which could fail in several different
4179 ways depending on the target. Instead, just keep going.
4180
4181 We can find a stepping dead thread in the thread list in
4182 two cases:
4183
4184 - The target supports thread exit events, and when the
4185 target tries to delete the thread from the thread list,
4186 inferior_ptid pointed at the exiting thread. In such
4187 case, calling delete_thread does not really remove the
4188 thread from the list; instead, the thread is left listed,
4189 with 'exited' state.
4190
4191 - The target's debug interface does not support thread
4192 exit events, and so we have no idea whatsoever if the
4193 previously stepping thread is still alive. For that
4194 reason, we need to synchronously query the target
4195 now. */
4196 if (is_exited (tp->ptid)
4197 || !target_thread_alive (tp->ptid))
4198 {
4199 if (debug_infrun)
4200 fprintf_unfiltered (gdb_stdlog, "\
4201 infrun: not switching back to stepped thread, it has vanished\n");
4202
4203 delete_thread (tp->ptid);
4204 keep_going (ecs);
4205 return;
4206 }
4207
4208 /* Otherwise, we no longer expect a trap in the current thread.
4209 Clear the trap_expected flag before switching back -- this is
4210 what keep_going would do as well, if we called it. */
4211 ecs->event_thread->trap_expected = 0;
4212
4213 if (debug_infrun)
4214 fprintf_unfiltered (gdb_stdlog,
4215 "infrun: switching back to stepped thread\n");
4216
4217 ecs->event_thread = tp;
4218 ecs->ptid = tp->ptid;
4219 context_switch (ecs->ptid);
4220 keep_going (ecs);
4221 return;
4222 }
4223 }
4224
4225 /* Are we stepping to get the inferior out of the dynamic linker's
4226 hook (and possibly the dld itself) after catching a shlib
4227 event? */
4228 if (ecs->event_thread->stepping_through_solib_after_catch)
4229 {
4230 #if defined(SOLIB_ADD)
4231 /* Have we reached our destination? If not, keep going. */
4232 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4233 {
4234 if (debug_infrun)
4235 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4236 ecs->event_thread->stepping_over_breakpoint = 1;
4237 keep_going (ecs);
4238 return;
4239 }
4240 #endif
4241 if (debug_infrun)
4242 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4243 /* Else, stop and report the catchpoint(s) whose triggering
4244 caused us to begin stepping. */
4245 ecs->event_thread->stepping_through_solib_after_catch = 0;
4246 bpstat_clear (&ecs->event_thread->stop_bpstat);
4247 ecs->event_thread->stop_bpstat
4248 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4249 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4250 stop_print_frame = 1;
4251 stop_stepping (ecs);
4252 return;
4253 }
4254
4255 if (ecs->event_thread->step_resume_breakpoint)
4256 {
4257 if (debug_infrun)
4258 fprintf_unfiltered (gdb_stdlog,
4259 "infrun: step-resume breakpoint is inserted\n");
4260
4261 /* Having a step-resume breakpoint overrides anything
4262 else having to do with stepping commands until
4263 that breakpoint is reached. */
4264 keep_going (ecs);
4265 return;
4266 }
4267
4268 if (ecs->event_thread->step_range_end == 0)
4269 {
4270 if (debug_infrun)
4271 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4272 /* Likewise if we aren't even stepping. */
4273 keep_going (ecs);
4274 return;
4275 }
4276
4277 /* Re-fetch current thread's frame in case the code above caused
4278 the frame cache to be re-initialized, making our FRAME variable
4279 a dangling pointer. */
4280 frame = get_current_frame ();
4281
4282 /* If stepping through a line, keep going if still within it.
4283
4284 Note that step_range_end is the address of the first instruction
4285 beyond the step range, and NOT the address of the last instruction
4286 within it!
4287
4288 Note also that during reverse execution, we may be stepping
4289 through a function epilogue and therefore must detect when
4290 the current-frame changes in the middle of a line. */
4291
4292 if (stop_pc >= ecs->event_thread->step_range_start
4293 && stop_pc < ecs->event_thread->step_range_end
4294 && (execution_direction != EXEC_REVERSE
4295 || frame_id_eq (get_frame_id (frame),
4296 ecs->event_thread->step_frame_id)))
4297 {
4298 if (debug_infrun)
4299 fprintf_unfiltered
4300 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4301 paddress (gdbarch, ecs->event_thread->step_range_start),
4302 paddress (gdbarch, ecs->event_thread->step_range_end));
4303
4304 /* When stepping backward, stop at beginning of line range
4305 (unless it's the function entry point, in which case
4306 keep going back to the call point). */
4307 if (stop_pc == ecs->event_thread->step_range_start
4308 && stop_pc != ecs->stop_func_start
4309 && execution_direction == EXEC_REVERSE)
4310 {
4311 ecs->event_thread->stop_step = 1;
4312 print_stop_reason (END_STEPPING_RANGE, 0);
4313 stop_stepping (ecs);
4314 }
4315 else
4316 keep_going (ecs);
4317
4318 return;
4319 }
4320
4321 /* We stepped out of the stepping range. */
4322
4323 /* If we are stepping at the source level and entered the runtime
4324 loader dynamic symbol resolution code...
4325
4326 EXEC_FORWARD: we keep on single stepping until we exit the run
4327 time loader code and reach the callee's address.
4328
4329 EXEC_REVERSE: we've already executed the callee (backward), and
4330 the runtime loader code is handled just like any other
4331 undebuggable function call. Now we need only keep stepping
4332 backward through the trampoline code, and that's handled further
4333 down, so there is nothing for us to do here. */
4334
4335 if (execution_direction != EXEC_REVERSE
4336 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4337 && in_solib_dynsym_resolve_code (stop_pc))
4338 {
4339 CORE_ADDR pc_after_resolver =
4340 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4341
4342 if (debug_infrun)
4343 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4344
4345 if (pc_after_resolver)
4346 {
4347 /* Set up a step-resume breakpoint at the address
4348 indicated by SKIP_SOLIB_RESOLVER. */
4349 struct symtab_and_line sr_sal;
4350
4351 init_sal (&sr_sal);
4352 sr_sal.pc = pc_after_resolver;
4353 sr_sal.pspace = get_frame_program_space (frame);
4354
4355 insert_step_resume_breakpoint_at_sal (gdbarch,
4356 sr_sal, null_frame_id);
4357 }
4358
4359 keep_going (ecs);
4360 return;
4361 }
4362
4363 if (ecs->event_thread->step_range_end != 1
4364 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4365 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4366 && get_frame_type (frame) == SIGTRAMP_FRAME)
4367 {
4368 if (debug_infrun)
4369 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4370 /* The inferior, while doing a "step" or "next", has ended up in
4371 a signal trampoline (either by a signal being delivered or by
4372 the signal handler returning). Just single-step until the
4373 inferior leaves the trampoline (either by calling the handler
4374 or returning). */
4375 keep_going (ecs);
4376 return;
4377 }
4378
4379 /* Check for subroutine calls. The check for the current frame
4380 equalling the step ID is not necessary - the check of the
4381 previous frame's ID is sufficient - but it is a common case and
4382 cheaper than checking the previous frame's ID.
4383
4384 NOTE: frame_id_eq will never report two invalid frame IDs as
4385 being equal, so to get into this block, both the current and
4386 previous frame must have valid frame IDs. */
4387 /* The outer_frame_id check is a heuristic to detect stepping
4388 through startup code. If we step over an instruction which
4389 sets the stack pointer from an invalid value to a valid value,
4390 we may detect that as a subroutine call from the mythical
4391 "outermost" function. This could be fixed by marking
4392 outermost frames as !stack_p,code_p,special_p. Then the
4393 initial outermost frame, before sp was valid, would
4394 have code_addr == &_start. See the comment in frame_id_eq
4395 for more. */
4396 if (!frame_id_eq (get_stack_frame_id (frame),
4397 ecs->event_thread->step_stack_frame_id)
4398 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4399 ecs->event_thread->step_stack_frame_id)
4400 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4401 outer_frame_id)
4402 || step_start_function != find_pc_function (stop_pc))))
4403 {
4404 CORE_ADDR real_stop_pc;
4405
4406 if (debug_infrun)
4407 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4408
4409 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4410 || ((ecs->event_thread->step_range_end == 1)
4411 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4412 ecs->stop_func_start)))
4413 {
4414 /* I presume that step_over_calls is only 0 when we're
4415 supposed to be stepping at the assembly language level
4416 ("stepi"). Just stop. */
4417 /* Also, maybe we just did a "nexti" inside a prolog, so we
4418 thought it was a subroutine call but it was not. Stop as
4419 well. FENN */
4420 /* And this works the same backward as frontward. MVS */
4421 ecs->event_thread->stop_step = 1;
4422 print_stop_reason (END_STEPPING_RANGE, 0);
4423 stop_stepping (ecs);
4424 return;
4425 }
4426
4427 /* Reverse stepping through solib trampolines. */
4428
4429 if (execution_direction == EXEC_REVERSE
4430 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4431 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4432 || (ecs->stop_func_start == 0
4433 && in_solib_dynsym_resolve_code (stop_pc))))
4434 {
4435 /* Any solib trampoline code can be handled in reverse
4436 by simply continuing to single-step. We have already
4437 executed the solib function (backwards), and a few
4438 steps will take us back through the trampoline to the
4439 caller. */
4440 keep_going (ecs);
4441 return;
4442 }
4443
4444 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4445 {
4446 /* We're doing a "next".
4447
4448 Normal (forward) execution: set a breakpoint at the
4449 callee's return address (the address at which the caller
4450 will resume).
4451
4452 Reverse (backward) execution. set the step-resume
4453 breakpoint at the start of the function that we just
4454 stepped into (backwards), and continue to there. When we
4455 get there, we'll need to single-step back to the caller. */
4456
4457 if (execution_direction == EXEC_REVERSE)
4458 {
4459 struct symtab_and_line sr_sal;
4460
4461 /* Normal function call return (static or dynamic). */
4462 init_sal (&sr_sal);
4463 sr_sal.pc = ecs->stop_func_start;
4464 sr_sal.pspace = get_frame_program_space (frame);
4465 insert_step_resume_breakpoint_at_sal (gdbarch,
4466 sr_sal, null_frame_id);
4467 }
4468 else
4469 insert_step_resume_breakpoint_at_caller (frame);
4470
4471 keep_going (ecs);
4472 return;
4473 }
4474
4475 /* If we are in a function call trampoline (a stub between the
4476 calling routine and the real function), locate the real
4477 function. That's what tells us (a) whether we want to step
4478 into it at all, and (b) what prologue we want to run to the
4479 end of, if we do step into it. */
4480 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4481 if (real_stop_pc == 0)
4482 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4483 if (real_stop_pc != 0)
4484 ecs->stop_func_start = real_stop_pc;
4485
4486 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4487 {
4488 struct symtab_and_line sr_sal;
4489
4490 init_sal (&sr_sal);
4491 sr_sal.pc = ecs->stop_func_start;
4492 sr_sal.pspace = get_frame_program_space (frame);
4493
4494 insert_step_resume_breakpoint_at_sal (gdbarch,
4495 sr_sal, null_frame_id);
4496 keep_going (ecs);
4497 return;
4498 }
4499
4500 /* If we have line number information for the function we are
4501 thinking of stepping into, step into it.
4502
4503 If there are several symtabs at that PC (e.g. with include
4504 files), just want to know whether *any* of them have line
4505 numbers. find_pc_line handles this. */
4506 {
4507 struct symtab_and_line tmp_sal;
4508
4509 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4510 tmp_sal.pspace = get_frame_program_space (frame);
4511 if (tmp_sal.line != 0)
4512 {
4513 if (execution_direction == EXEC_REVERSE)
4514 handle_step_into_function_backward (gdbarch, ecs);
4515 else
4516 handle_step_into_function (gdbarch, ecs);
4517 return;
4518 }
4519 }
4520
4521 /* If we have no line number and the step-stop-if-no-debug is
4522 set, we stop the step so that the user has a chance to switch
4523 in assembly mode. */
4524 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4525 && step_stop_if_no_debug)
4526 {
4527 ecs->event_thread->stop_step = 1;
4528 print_stop_reason (END_STEPPING_RANGE, 0);
4529 stop_stepping (ecs);
4530 return;
4531 }
4532
4533 if (execution_direction == EXEC_REVERSE)
4534 {
4535 /* Set a breakpoint at callee's start address.
4536 From there we can step once and be back in the caller. */
4537 struct symtab_and_line sr_sal;
4538
4539 init_sal (&sr_sal);
4540 sr_sal.pc = ecs->stop_func_start;
4541 sr_sal.pspace = get_frame_program_space (frame);
4542 insert_step_resume_breakpoint_at_sal (gdbarch,
4543 sr_sal, null_frame_id);
4544 }
4545 else
4546 /* Set a breakpoint at callee's return address (the address
4547 at which the caller will resume). */
4548 insert_step_resume_breakpoint_at_caller (frame);
4549
4550 keep_going (ecs);
4551 return;
4552 }
4553
4554 /* Reverse stepping through solib trampolines. */
4555
4556 if (execution_direction == EXEC_REVERSE
4557 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4558 {
4559 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4560 || (ecs->stop_func_start == 0
4561 && in_solib_dynsym_resolve_code (stop_pc)))
4562 {
4563 /* Any solib trampoline code can be handled in reverse
4564 by simply continuing to single-step. We have already
4565 executed the solib function (backwards), and a few
4566 steps will take us back through the trampoline to the
4567 caller. */
4568 keep_going (ecs);
4569 return;
4570 }
4571 else if (in_solib_dynsym_resolve_code (stop_pc))
4572 {
4573 /* Stepped backward into the solib dynsym resolver.
4574 Set a breakpoint at its start and continue, then
4575 one more step will take us out. */
4576 struct symtab_and_line sr_sal;
4577
4578 init_sal (&sr_sal);
4579 sr_sal.pc = ecs->stop_func_start;
4580 sr_sal.pspace = get_frame_program_space (frame);
4581 insert_step_resume_breakpoint_at_sal (gdbarch,
4582 sr_sal, null_frame_id);
4583 keep_going (ecs);
4584 return;
4585 }
4586 }
4587
4588 /* If we're in the return path from a shared library trampoline,
4589 we want to proceed through the trampoline when stepping. */
4590 if (gdbarch_in_solib_return_trampoline (gdbarch,
4591 stop_pc, ecs->stop_func_name))
4592 {
4593 /* Determine where this trampoline returns. */
4594 CORE_ADDR real_stop_pc;
4595
4596 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4597
4598 if (debug_infrun)
4599 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4600
4601 /* Only proceed through if we know where it's going. */
4602 if (real_stop_pc)
4603 {
4604 /* And put the step-breakpoint there and go until there. */
4605 struct symtab_and_line sr_sal;
4606
4607 init_sal (&sr_sal); /* initialize to zeroes */
4608 sr_sal.pc = real_stop_pc;
4609 sr_sal.section = find_pc_overlay (sr_sal.pc);
4610 sr_sal.pspace = get_frame_program_space (frame);
4611
4612 /* Do not specify what the fp should be when we stop since
4613 on some machines the prologue is where the new fp value
4614 is established. */
4615 insert_step_resume_breakpoint_at_sal (gdbarch,
4616 sr_sal, null_frame_id);
4617
4618 /* Restart without fiddling with the step ranges or
4619 other state. */
4620 keep_going (ecs);
4621 return;
4622 }
4623 }
4624
4625 stop_pc_sal = find_pc_line (stop_pc, 0);
4626
4627 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4628 the trampoline processing logic, however, there are some trampolines
4629 that have no names, so we should do trampoline handling first. */
4630 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4631 && ecs->stop_func_name == NULL
4632 && stop_pc_sal.line == 0)
4633 {
4634 if (debug_infrun)
4635 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4636
4637 /* The inferior just stepped into, or returned to, an
4638 undebuggable function (where there is no debugging information
4639 and no line number corresponding to the address where the
4640 inferior stopped). Since we want to skip this kind of code,
4641 we keep going until the inferior returns from this
4642 function - unless the user has asked us not to (via
4643 set step-mode) or we no longer know how to get back
4644 to the call site. */
4645 if (step_stop_if_no_debug
4646 || !frame_id_p (frame_unwind_caller_id (frame)))
4647 {
4648 /* If we have no line number and the step-stop-if-no-debug
4649 is set, we stop the step so that the user has a chance to
4650 switch in assembly mode. */
4651 ecs->event_thread->stop_step = 1;
4652 print_stop_reason (END_STEPPING_RANGE, 0);
4653 stop_stepping (ecs);
4654 return;
4655 }
4656 else
4657 {
4658 /* Set a breakpoint at callee's return address (the address
4659 at which the caller will resume). */
4660 insert_step_resume_breakpoint_at_caller (frame);
4661 keep_going (ecs);
4662 return;
4663 }
4664 }
4665
4666 if (ecs->event_thread->step_range_end == 1)
4667 {
4668 /* It is stepi or nexti. We always want to stop stepping after
4669 one instruction. */
4670 if (debug_infrun)
4671 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4672 ecs->event_thread->stop_step = 1;
4673 print_stop_reason (END_STEPPING_RANGE, 0);
4674 stop_stepping (ecs);
4675 return;
4676 }
4677
4678 if (stop_pc_sal.line == 0)
4679 {
4680 /* We have no line number information. That means to stop
4681 stepping (does this always happen right after one instruction,
4682 when we do "s" in a function with no line numbers,
4683 or can this happen as a result of a return or longjmp?). */
4684 if (debug_infrun)
4685 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4686 ecs->event_thread->stop_step = 1;
4687 print_stop_reason (END_STEPPING_RANGE, 0);
4688 stop_stepping (ecs);
4689 return;
4690 }
4691
4692 /* Look for "calls" to inlined functions, part one. If the inline
4693 frame machinery detected some skipped call sites, we have entered
4694 a new inline function. */
4695
4696 if (frame_id_eq (get_frame_id (get_current_frame ()),
4697 ecs->event_thread->step_frame_id)
4698 && inline_skipped_frames (ecs->ptid))
4699 {
4700 struct symtab_and_line call_sal;
4701
4702 if (debug_infrun)
4703 fprintf_unfiltered (gdb_stdlog,
4704 "infrun: stepped into inlined function\n");
4705
4706 find_frame_sal (get_current_frame (), &call_sal);
4707
4708 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4709 {
4710 /* For "step", we're going to stop. But if the call site
4711 for this inlined function is on the same source line as
4712 we were previously stepping, go down into the function
4713 first. Otherwise stop at the call site. */
4714
4715 if (call_sal.line == ecs->event_thread->current_line
4716 && call_sal.symtab == ecs->event_thread->current_symtab)
4717 step_into_inline_frame (ecs->ptid);
4718
4719 ecs->event_thread->stop_step = 1;
4720 print_stop_reason (END_STEPPING_RANGE, 0);
4721 stop_stepping (ecs);
4722 return;
4723 }
4724 else
4725 {
4726 /* For "next", we should stop at the call site if it is on a
4727 different source line. Otherwise continue through the
4728 inlined function. */
4729 if (call_sal.line == ecs->event_thread->current_line
4730 && call_sal.symtab == ecs->event_thread->current_symtab)
4731 keep_going (ecs);
4732 else
4733 {
4734 ecs->event_thread->stop_step = 1;
4735 print_stop_reason (END_STEPPING_RANGE, 0);
4736 stop_stepping (ecs);
4737 }
4738 return;
4739 }
4740 }
4741
4742 /* Look for "calls" to inlined functions, part two. If we are still
4743 in the same real function we were stepping through, but we have
4744 to go further up to find the exact frame ID, we are stepping
4745 through a more inlined call beyond its call site. */
4746
4747 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4748 && !frame_id_eq (get_frame_id (get_current_frame ()),
4749 ecs->event_thread->step_frame_id)
4750 && stepped_in_from (get_current_frame (),
4751 ecs->event_thread->step_frame_id))
4752 {
4753 if (debug_infrun)
4754 fprintf_unfiltered (gdb_stdlog,
4755 "infrun: stepping through inlined function\n");
4756
4757 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4758 keep_going (ecs);
4759 else
4760 {
4761 ecs->event_thread->stop_step = 1;
4762 print_stop_reason (END_STEPPING_RANGE, 0);
4763 stop_stepping (ecs);
4764 }
4765 return;
4766 }
4767
4768 if ((stop_pc == stop_pc_sal.pc)
4769 && (ecs->event_thread->current_line != stop_pc_sal.line
4770 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4771 {
4772 /* We are at the start of a different line. So stop. Note that
4773 we don't stop if we step into the middle of a different line.
4774 That is said to make things like for (;;) statements work
4775 better. */
4776 if (debug_infrun)
4777 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4778 ecs->event_thread->stop_step = 1;
4779 print_stop_reason (END_STEPPING_RANGE, 0);
4780 stop_stepping (ecs);
4781 return;
4782 }
4783
4784 /* We aren't done stepping.
4785
4786 Optimize by setting the stepping range to the line.
4787 (We might not be in the original line, but if we entered a
4788 new line in mid-statement, we continue stepping. This makes
4789 things like for(;;) statements work better.) */
4790
4791 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4792 ecs->event_thread->step_range_end = stop_pc_sal.end;
4793 set_step_info (frame, stop_pc_sal);
4794
4795 if (debug_infrun)
4796 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4797 keep_going (ecs);
4798 }
4799
4800 /* Is thread TP in the middle of single-stepping? */
4801
4802 static int
4803 currently_stepping (struct thread_info *tp)
4804 {
4805 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4806 || tp->trap_expected
4807 || tp->stepping_through_solib_after_catch
4808 || bpstat_should_step ());
4809 }
4810
4811 /* Returns true if any thread *but* the one passed in "data" is in the
4812 middle of stepping or of handling a "next". */
4813
4814 static int
4815 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4816 {
4817 if (tp == data)
4818 return 0;
4819
4820 return (tp->step_range_end
4821 || tp->trap_expected
4822 || tp->stepping_through_solib_after_catch);
4823 }
4824
4825 /* Inferior has stepped into a subroutine call with source code that
4826 we should not step over. Do step to the first line of code in
4827 it. */
4828
4829 static void
4830 handle_step_into_function (struct gdbarch *gdbarch,
4831 struct execution_control_state *ecs)
4832 {
4833 struct symtab *s;
4834 struct symtab_and_line stop_func_sal, sr_sal;
4835
4836 s = find_pc_symtab (stop_pc);
4837 if (s && s->language != language_asm)
4838 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4839 ecs->stop_func_start);
4840
4841 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4842 /* Use the step_resume_break to step until the end of the prologue,
4843 even if that involves jumps (as it seems to on the vax under
4844 4.2). */
4845 /* If the prologue ends in the middle of a source line, continue to
4846 the end of that source line (if it is still within the function).
4847 Otherwise, just go to end of prologue. */
4848 if (stop_func_sal.end
4849 && stop_func_sal.pc != ecs->stop_func_start
4850 && stop_func_sal.end < ecs->stop_func_end)
4851 ecs->stop_func_start = stop_func_sal.end;
4852
4853 /* Architectures which require breakpoint adjustment might not be able
4854 to place a breakpoint at the computed address. If so, the test
4855 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4856 ecs->stop_func_start to an address at which a breakpoint may be
4857 legitimately placed.
4858
4859 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4860 made, GDB will enter an infinite loop when stepping through
4861 optimized code consisting of VLIW instructions which contain
4862 subinstructions corresponding to different source lines. On
4863 FR-V, it's not permitted to place a breakpoint on any but the
4864 first subinstruction of a VLIW instruction. When a breakpoint is
4865 set, GDB will adjust the breakpoint address to the beginning of
4866 the VLIW instruction. Thus, we need to make the corresponding
4867 adjustment here when computing the stop address. */
4868
4869 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4870 {
4871 ecs->stop_func_start
4872 = gdbarch_adjust_breakpoint_address (gdbarch,
4873 ecs->stop_func_start);
4874 }
4875
4876 if (ecs->stop_func_start == stop_pc)
4877 {
4878 /* We are already there: stop now. */
4879 ecs->event_thread->stop_step = 1;
4880 print_stop_reason (END_STEPPING_RANGE, 0);
4881 stop_stepping (ecs);
4882 return;
4883 }
4884 else
4885 {
4886 /* Put the step-breakpoint there and go until there. */
4887 init_sal (&sr_sal); /* initialize to zeroes */
4888 sr_sal.pc = ecs->stop_func_start;
4889 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4890 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4891
4892 /* Do not specify what the fp should be when we stop since on
4893 some machines the prologue is where the new fp value is
4894 established. */
4895 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4896
4897 /* And make sure stepping stops right away then. */
4898 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4899 }
4900 keep_going (ecs);
4901 }
4902
4903 /* Inferior has stepped backward into a subroutine call with source
4904 code that we should not step over. Do step to the beginning of the
4905 last line of code in it. */
4906
4907 static void
4908 handle_step_into_function_backward (struct gdbarch *gdbarch,
4909 struct execution_control_state *ecs)
4910 {
4911 struct symtab *s;
4912 struct symtab_and_line stop_func_sal;
4913
4914 s = find_pc_symtab (stop_pc);
4915 if (s && s->language != language_asm)
4916 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4917 ecs->stop_func_start);
4918
4919 stop_func_sal = find_pc_line (stop_pc, 0);
4920
4921 /* OK, we're just going to keep stepping here. */
4922 if (stop_func_sal.pc == stop_pc)
4923 {
4924 /* We're there already. Just stop stepping now. */
4925 ecs->event_thread->stop_step = 1;
4926 print_stop_reason (END_STEPPING_RANGE, 0);
4927 stop_stepping (ecs);
4928 }
4929 else
4930 {
4931 /* Else just reset the step range and keep going.
4932 No step-resume breakpoint, they don't work for
4933 epilogues, which can have multiple entry paths. */
4934 ecs->event_thread->step_range_start = stop_func_sal.pc;
4935 ecs->event_thread->step_range_end = stop_func_sal.end;
4936 keep_going (ecs);
4937 }
4938 return;
4939 }
4940
4941 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4942 This is used to both functions and to skip over code. */
4943
4944 static void
4945 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4946 struct symtab_and_line sr_sal,
4947 struct frame_id sr_id)
4948 {
4949 /* There should never be more than one step-resume or longjmp-resume
4950 breakpoint per thread, so we should never be setting a new
4951 step_resume_breakpoint when one is already active. */
4952 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4953
4954 if (debug_infrun)
4955 fprintf_unfiltered (gdb_stdlog,
4956 "infrun: inserting step-resume breakpoint at %s\n",
4957 paddress (gdbarch, sr_sal.pc));
4958
4959 inferior_thread ()->step_resume_breakpoint
4960 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4961 }
4962
4963 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4964 to skip a potential signal handler.
4965
4966 This is called with the interrupted function's frame. The signal
4967 handler, when it returns, will resume the interrupted function at
4968 RETURN_FRAME.pc. */
4969
4970 static void
4971 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4972 {
4973 struct symtab_and_line sr_sal;
4974 struct gdbarch *gdbarch;
4975
4976 gdb_assert (return_frame != NULL);
4977 init_sal (&sr_sal); /* initialize to zeros */
4978
4979 gdbarch = get_frame_arch (return_frame);
4980 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4981 sr_sal.section = find_pc_overlay (sr_sal.pc);
4982 sr_sal.pspace = get_frame_program_space (return_frame);
4983
4984 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4985 get_stack_frame_id (return_frame));
4986 }
4987
4988 /* Similar to insert_step_resume_breakpoint_at_frame, except
4989 but a breakpoint at the previous frame's PC. This is used to
4990 skip a function after stepping into it (for "next" or if the called
4991 function has no debugging information).
4992
4993 The current function has almost always been reached by single
4994 stepping a call or return instruction. NEXT_FRAME belongs to the
4995 current function, and the breakpoint will be set at the caller's
4996 resume address.
4997
4998 This is a separate function rather than reusing
4999 insert_step_resume_breakpoint_at_frame in order to avoid
5000 get_prev_frame, which may stop prematurely (see the implementation
5001 of frame_unwind_caller_id for an example). */
5002
5003 static void
5004 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5005 {
5006 struct symtab_and_line sr_sal;
5007 struct gdbarch *gdbarch;
5008
5009 /* We shouldn't have gotten here if we don't know where the call site
5010 is. */
5011 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5012
5013 init_sal (&sr_sal); /* initialize to zeros */
5014
5015 gdbarch = frame_unwind_caller_arch (next_frame);
5016 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5017 frame_unwind_caller_pc (next_frame));
5018 sr_sal.section = find_pc_overlay (sr_sal.pc);
5019 sr_sal.pspace = frame_unwind_program_space (next_frame);
5020
5021 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5022 frame_unwind_caller_id (next_frame));
5023 }
5024
5025 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5026 new breakpoint at the target of a jmp_buf. The handling of
5027 longjmp-resume uses the same mechanisms used for handling
5028 "step-resume" breakpoints. */
5029
5030 static void
5031 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5032 {
5033 /* There should never be more than one step-resume or longjmp-resume
5034 breakpoint per thread, so we should never be setting a new
5035 longjmp_resume_breakpoint when one is already active. */
5036 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5037
5038 if (debug_infrun)
5039 fprintf_unfiltered (gdb_stdlog,
5040 "infrun: inserting longjmp-resume breakpoint at %s\n",
5041 paddress (gdbarch, pc));
5042
5043 inferior_thread ()->step_resume_breakpoint =
5044 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5045 }
5046
5047 static void
5048 stop_stepping (struct execution_control_state *ecs)
5049 {
5050 if (debug_infrun)
5051 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5052
5053 /* Let callers know we don't want to wait for the inferior anymore. */
5054 ecs->wait_some_more = 0;
5055 }
5056
5057 /* This function handles various cases where we need to continue
5058 waiting for the inferior. */
5059 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5060
5061 static void
5062 keep_going (struct execution_control_state *ecs)
5063 {
5064 /* Make sure normal_stop is called if we get a QUIT handled before
5065 reaching resume. */
5066 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5067
5068 /* Save the pc before execution, to compare with pc after stop. */
5069 ecs->event_thread->prev_pc
5070 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5071
5072 /* If we did not do break;, it means we should keep running the
5073 inferior and not return to debugger. */
5074
5075 if (ecs->event_thread->trap_expected
5076 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5077 {
5078 /* We took a signal (which we are supposed to pass through to
5079 the inferior, else we'd not get here) and we haven't yet
5080 gotten our trap. Simply continue. */
5081
5082 discard_cleanups (old_cleanups);
5083 resume (currently_stepping (ecs->event_thread),
5084 ecs->event_thread->stop_signal);
5085 }
5086 else
5087 {
5088 /* Either the trap was not expected, but we are continuing
5089 anyway (the user asked that this signal be passed to the
5090 child)
5091 -- or --
5092 The signal was SIGTRAP, e.g. it was our signal, but we
5093 decided we should resume from it.
5094
5095 We're going to run this baby now!
5096
5097 Note that insert_breakpoints won't try to re-insert
5098 already inserted breakpoints. Therefore, we don't
5099 care if breakpoints were already inserted, or not. */
5100
5101 if (ecs->event_thread->stepping_over_breakpoint)
5102 {
5103 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5104
5105 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5106 /* Since we can't do a displaced step, we have to remove
5107 the breakpoint while we step it. To keep things
5108 simple, we remove them all. */
5109 remove_breakpoints ();
5110 }
5111 else
5112 {
5113 struct gdb_exception e;
5114
5115 /* Stop stepping when inserting breakpoints
5116 has failed. */
5117 TRY_CATCH (e, RETURN_MASK_ERROR)
5118 {
5119 insert_breakpoints ();
5120 }
5121 if (e.reason < 0)
5122 {
5123 exception_print (gdb_stderr, e);
5124 stop_stepping (ecs);
5125 return;
5126 }
5127 }
5128
5129 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5130
5131 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5132 specifies that such a signal should be delivered to the
5133 target program).
5134
5135 Typically, this would occure when a user is debugging a
5136 target monitor on a simulator: the target monitor sets a
5137 breakpoint; the simulator encounters this break-point and
5138 halts the simulation handing control to GDB; GDB, noteing
5139 that the break-point isn't valid, returns control back to the
5140 simulator; the simulator then delivers the hardware
5141 equivalent of a SIGNAL_TRAP to the program being debugged. */
5142
5143 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5144 && !signal_program[ecs->event_thread->stop_signal])
5145 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5146
5147 discard_cleanups (old_cleanups);
5148 resume (currently_stepping (ecs->event_thread),
5149 ecs->event_thread->stop_signal);
5150 }
5151
5152 prepare_to_wait (ecs);
5153 }
5154
5155 /* This function normally comes after a resume, before
5156 handle_inferior_event exits. It takes care of any last bits of
5157 housekeeping, and sets the all-important wait_some_more flag. */
5158
5159 static void
5160 prepare_to_wait (struct execution_control_state *ecs)
5161 {
5162 if (debug_infrun)
5163 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5164
5165 /* This is the old end of the while loop. Let everybody know we
5166 want to wait for the inferior some more and get called again
5167 soon. */
5168 ecs->wait_some_more = 1;
5169 }
5170
5171 /* Print why the inferior has stopped. We always print something when
5172 the inferior exits, or receives a signal. The rest of the cases are
5173 dealt with later on in normal_stop() and print_it_typical(). Ideally
5174 there should be a call to this function from handle_inferior_event()
5175 each time stop_stepping() is called.*/
5176 static void
5177 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5178 {
5179 switch (stop_reason)
5180 {
5181 case END_STEPPING_RANGE:
5182 /* We are done with a step/next/si/ni command. */
5183 /* For now print nothing. */
5184 /* Print a message only if not in the middle of doing a "step n"
5185 operation for n > 1 */
5186 if (!inferior_thread ()->step_multi
5187 || !inferior_thread ()->stop_step)
5188 if (ui_out_is_mi_like_p (uiout))
5189 ui_out_field_string
5190 (uiout, "reason",
5191 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5192 break;
5193 case SIGNAL_EXITED:
5194 /* The inferior was terminated by a signal. */
5195 annotate_signalled ();
5196 if (ui_out_is_mi_like_p (uiout))
5197 ui_out_field_string
5198 (uiout, "reason",
5199 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5200 ui_out_text (uiout, "\nProgram terminated with signal ");
5201 annotate_signal_name ();
5202 ui_out_field_string (uiout, "signal-name",
5203 target_signal_to_name (stop_info));
5204 annotate_signal_name_end ();
5205 ui_out_text (uiout, ", ");
5206 annotate_signal_string ();
5207 ui_out_field_string (uiout, "signal-meaning",
5208 target_signal_to_string (stop_info));
5209 annotate_signal_string_end ();
5210 ui_out_text (uiout, ".\n");
5211 ui_out_text (uiout, "The program no longer exists.\n");
5212 break;
5213 case EXITED:
5214 /* The inferior program is finished. */
5215 annotate_exited (stop_info);
5216 if (stop_info)
5217 {
5218 if (ui_out_is_mi_like_p (uiout))
5219 ui_out_field_string (uiout, "reason",
5220 async_reason_lookup (EXEC_ASYNC_EXITED));
5221 ui_out_text (uiout, "\nProgram exited with code ");
5222 ui_out_field_fmt (uiout, "exit-code", "0%o",
5223 (unsigned int) stop_info);
5224 ui_out_text (uiout, ".\n");
5225 }
5226 else
5227 {
5228 if (ui_out_is_mi_like_p (uiout))
5229 ui_out_field_string
5230 (uiout, "reason",
5231 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5232 ui_out_text (uiout, "\nProgram exited normally.\n");
5233 }
5234 /* Support the --return-child-result option. */
5235 return_child_result_value = stop_info;
5236 break;
5237 case SIGNAL_RECEIVED:
5238 /* Signal received. The signal table tells us to print about
5239 it. */
5240 annotate_signal ();
5241
5242 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5243 {
5244 struct thread_info *t = inferior_thread ();
5245
5246 ui_out_text (uiout, "\n[");
5247 ui_out_field_string (uiout, "thread-name",
5248 target_pid_to_str (t->ptid));
5249 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5250 ui_out_text (uiout, " stopped");
5251 }
5252 else
5253 {
5254 ui_out_text (uiout, "\nProgram received signal ");
5255 annotate_signal_name ();
5256 if (ui_out_is_mi_like_p (uiout))
5257 ui_out_field_string
5258 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5259 ui_out_field_string (uiout, "signal-name",
5260 target_signal_to_name (stop_info));
5261 annotate_signal_name_end ();
5262 ui_out_text (uiout, ", ");
5263 annotate_signal_string ();
5264 ui_out_field_string (uiout, "signal-meaning",
5265 target_signal_to_string (stop_info));
5266 annotate_signal_string_end ();
5267 }
5268 ui_out_text (uiout, ".\n");
5269 break;
5270 case NO_HISTORY:
5271 /* Reverse execution: target ran out of history info. */
5272 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5273 break;
5274 default:
5275 internal_error (__FILE__, __LINE__,
5276 _("print_stop_reason: unrecognized enum value"));
5277 break;
5278 }
5279 }
5280 \f
5281
5282 /* Here to return control to GDB when the inferior stops for real.
5283 Print appropriate messages, remove breakpoints, give terminal our modes.
5284
5285 STOP_PRINT_FRAME nonzero means print the executing frame
5286 (pc, function, args, file, line number and line text).
5287 BREAKPOINTS_FAILED nonzero means stop was due to error
5288 attempting to insert breakpoints. */
5289
5290 void
5291 normal_stop (void)
5292 {
5293 struct target_waitstatus last;
5294 ptid_t last_ptid;
5295 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5296
5297 get_last_target_status (&last_ptid, &last);
5298
5299 /* If an exception is thrown from this point on, make sure to
5300 propagate GDB's knowledge of the executing state to the
5301 frontend/user running state. A QUIT is an easy exception to see
5302 here, so do this before any filtered output. */
5303 if (!non_stop)
5304 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5305 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5306 && last.kind != TARGET_WAITKIND_EXITED)
5307 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5308
5309 /* In non-stop mode, we don't want GDB to switch threads behind the
5310 user's back, to avoid races where the user is typing a command to
5311 apply to thread x, but GDB switches to thread y before the user
5312 finishes entering the command. */
5313
5314 /* As with the notification of thread events, we want to delay
5315 notifying the user that we've switched thread context until
5316 the inferior actually stops.
5317
5318 There's no point in saying anything if the inferior has exited.
5319 Note that SIGNALLED here means "exited with a signal", not
5320 "received a signal". */
5321 if (!non_stop
5322 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5323 && target_has_execution
5324 && last.kind != TARGET_WAITKIND_SIGNALLED
5325 && last.kind != TARGET_WAITKIND_EXITED)
5326 {
5327 target_terminal_ours_for_output ();
5328 printf_filtered (_("[Switching to %s]\n"),
5329 target_pid_to_str (inferior_ptid));
5330 annotate_thread_changed ();
5331 previous_inferior_ptid = inferior_ptid;
5332 }
5333
5334 if (!breakpoints_always_inserted_mode () && target_has_execution)
5335 {
5336 if (remove_breakpoints ())
5337 {
5338 target_terminal_ours_for_output ();
5339 printf_filtered (_("\
5340 Cannot remove breakpoints because program is no longer writable.\n\
5341 Further execution is probably impossible.\n"));
5342 }
5343 }
5344
5345 /* If an auto-display called a function and that got a signal,
5346 delete that auto-display to avoid an infinite recursion. */
5347
5348 if (stopped_by_random_signal)
5349 disable_current_display ();
5350
5351 /* Don't print a message if in the middle of doing a "step n"
5352 operation for n > 1 */
5353 if (target_has_execution
5354 && last.kind != TARGET_WAITKIND_SIGNALLED
5355 && last.kind != TARGET_WAITKIND_EXITED
5356 && inferior_thread ()->step_multi
5357 && inferior_thread ()->stop_step)
5358 goto done;
5359
5360 target_terminal_ours ();
5361
5362 /* Set the current source location. This will also happen if we
5363 display the frame below, but the current SAL will be incorrect
5364 during a user hook-stop function. */
5365 if (has_stack_frames () && !stop_stack_dummy)
5366 set_current_sal_from_frame (get_current_frame (), 1);
5367
5368 /* Let the user/frontend see the threads as stopped. */
5369 do_cleanups (old_chain);
5370
5371 /* Look up the hook_stop and run it (CLI internally handles problem
5372 of stop_command's pre-hook not existing). */
5373 if (stop_command)
5374 catch_errors (hook_stop_stub, stop_command,
5375 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5376
5377 if (!has_stack_frames ())
5378 goto done;
5379
5380 if (last.kind == TARGET_WAITKIND_SIGNALLED
5381 || last.kind == TARGET_WAITKIND_EXITED)
5382 goto done;
5383
5384 /* Select innermost stack frame - i.e., current frame is frame 0,
5385 and current location is based on that.
5386 Don't do this on return from a stack dummy routine,
5387 or if the program has exited. */
5388
5389 if (!stop_stack_dummy)
5390 {
5391 select_frame (get_current_frame ());
5392
5393 /* Print current location without a level number, if
5394 we have changed functions or hit a breakpoint.
5395 Print source line if we have one.
5396 bpstat_print() contains the logic deciding in detail
5397 what to print, based on the event(s) that just occurred. */
5398
5399 /* If --batch-silent is enabled then there's no need to print the current
5400 source location, and to try risks causing an error message about
5401 missing source files. */
5402 if (stop_print_frame && !batch_silent)
5403 {
5404 int bpstat_ret;
5405 int source_flag;
5406 int do_frame_printing = 1;
5407 struct thread_info *tp = inferior_thread ();
5408
5409 bpstat_ret = bpstat_print (tp->stop_bpstat);
5410 switch (bpstat_ret)
5411 {
5412 case PRINT_UNKNOWN:
5413 /* If we had hit a shared library event breakpoint,
5414 bpstat_print would print out this message. If we hit
5415 an OS-level shared library event, do the same
5416 thing. */
5417 if (last.kind == TARGET_WAITKIND_LOADED)
5418 {
5419 printf_filtered (_("Stopped due to shared library event\n"));
5420 source_flag = SRC_LINE; /* something bogus */
5421 do_frame_printing = 0;
5422 break;
5423 }
5424
5425 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5426 (or should) carry around the function and does (or
5427 should) use that when doing a frame comparison. */
5428 if (tp->stop_step
5429 && frame_id_eq (tp->step_frame_id,
5430 get_frame_id (get_current_frame ()))
5431 && step_start_function == find_pc_function (stop_pc))
5432 source_flag = SRC_LINE; /* finished step, just print source line */
5433 else
5434 source_flag = SRC_AND_LOC; /* print location and source line */
5435 break;
5436 case PRINT_SRC_AND_LOC:
5437 source_flag = SRC_AND_LOC; /* print location and source line */
5438 break;
5439 case PRINT_SRC_ONLY:
5440 source_flag = SRC_LINE;
5441 break;
5442 case PRINT_NOTHING:
5443 source_flag = SRC_LINE; /* something bogus */
5444 do_frame_printing = 0;
5445 break;
5446 default:
5447 internal_error (__FILE__, __LINE__, _("Unknown value."));
5448 }
5449
5450 /* The behavior of this routine with respect to the source
5451 flag is:
5452 SRC_LINE: Print only source line
5453 LOCATION: Print only location
5454 SRC_AND_LOC: Print location and source line */
5455 if (do_frame_printing)
5456 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5457
5458 /* Display the auto-display expressions. */
5459 do_displays ();
5460 }
5461 }
5462
5463 /* Save the function value return registers, if we care.
5464 We might be about to restore their previous contents. */
5465 if (inferior_thread ()->proceed_to_finish)
5466 {
5467 /* This should not be necessary. */
5468 if (stop_registers)
5469 regcache_xfree (stop_registers);
5470
5471 /* NB: The copy goes through to the target picking up the value of
5472 all the registers. */
5473 stop_registers = regcache_dup (get_current_regcache ());
5474 }
5475
5476 if (stop_stack_dummy == STOP_STACK_DUMMY)
5477 {
5478 /* Pop the empty frame that contains the stack dummy.
5479 This also restores inferior state prior to the call
5480 (struct inferior_thread_state). */
5481 struct frame_info *frame = get_current_frame ();
5482
5483 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5484 frame_pop (frame);
5485 /* frame_pop() calls reinit_frame_cache as the last thing it does
5486 which means there's currently no selected frame. We don't need
5487 to re-establish a selected frame if the dummy call returns normally,
5488 that will be done by restore_inferior_status. However, we do have
5489 to handle the case where the dummy call is returning after being
5490 stopped (e.g. the dummy call previously hit a breakpoint). We
5491 can't know which case we have so just always re-establish a
5492 selected frame here. */
5493 select_frame (get_current_frame ());
5494 }
5495
5496 done:
5497 annotate_stopped ();
5498
5499 /* Suppress the stop observer if we're in the middle of:
5500
5501 - a step n (n > 1), as there still more steps to be done.
5502
5503 - a "finish" command, as the observer will be called in
5504 finish_command_continuation, so it can include the inferior
5505 function's return value.
5506
5507 - calling an inferior function, as we pretend we inferior didn't
5508 run at all. The return value of the call is handled by the
5509 expression evaluator, through call_function_by_hand. */
5510
5511 if (!target_has_execution
5512 || last.kind == TARGET_WAITKIND_SIGNALLED
5513 || last.kind == TARGET_WAITKIND_EXITED
5514 || (!inferior_thread ()->step_multi
5515 && !(inferior_thread ()->stop_bpstat
5516 && inferior_thread ()->proceed_to_finish)
5517 && !inferior_thread ()->in_infcall))
5518 {
5519 if (!ptid_equal (inferior_ptid, null_ptid))
5520 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5521 stop_print_frame);
5522 else
5523 observer_notify_normal_stop (NULL, stop_print_frame);
5524 }
5525
5526 if (target_has_execution)
5527 {
5528 if (last.kind != TARGET_WAITKIND_SIGNALLED
5529 && last.kind != TARGET_WAITKIND_EXITED)
5530 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5531 Delete any breakpoint that is to be deleted at the next stop. */
5532 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5533 }
5534
5535 /* Try to get rid of automatically added inferiors that are no
5536 longer needed. Keeping those around slows down things linearly.
5537 Note that this never removes the current inferior. */
5538 prune_inferiors ();
5539 }
5540
5541 static int
5542 hook_stop_stub (void *cmd)
5543 {
5544 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5545 return (0);
5546 }
5547 \f
5548 int
5549 signal_stop_state (int signo)
5550 {
5551 return signal_stop[signo];
5552 }
5553
5554 int
5555 signal_print_state (int signo)
5556 {
5557 return signal_print[signo];
5558 }
5559
5560 int
5561 signal_pass_state (int signo)
5562 {
5563 return signal_program[signo];
5564 }
5565
5566 int
5567 signal_stop_update (int signo, int state)
5568 {
5569 int ret = signal_stop[signo];
5570
5571 signal_stop[signo] = state;
5572 return ret;
5573 }
5574
5575 int
5576 signal_print_update (int signo, int state)
5577 {
5578 int ret = signal_print[signo];
5579
5580 signal_print[signo] = state;
5581 return ret;
5582 }
5583
5584 int
5585 signal_pass_update (int signo, int state)
5586 {
5587 int ret = signal_program[signo];
5588
5589 signal_program[signo] = state;
5590 return ret;
5591 }
5592
5593 static void
5594 sig_print_header (void)
5595 {
5596 printf_filtered (_("\
5597 Signal Stop\tPrint\tPass to program\tDescription\n"));
5598 }
5599
5600 static void
5601 sig_print_info (enum target_signal oursig)
5602 {
5603 const char *name = target_signal_to_name (oursig);
5604 int name_padding = 13 - strlen (name);
5605
5606 if (name_padding <= 0)
5607 name_padding = 0;
5608
5609 printf_filtered ("%s", name);
5610 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5611 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5612 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5613 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5614 printf_filtered ("%s\n", target_signal_to_string (oursig));
5615 }
5616
5617 /* Specify how various signals in the inferior should be handled. */
5618
5619 static void
5620 handle_command (char *args, int from_tty)
5621 {
5622 char **argv;
5623 int digits, wordlen;
5624 int sigfirst, signum, siglast;
5625 enum target_signal oursig;
5626 int allsigs;
5627 int nsigs;
5628 unsigned char *sigs;
5629 struct cleanup *old_chain;
5630
5631 if (args == NULL)
5632 {
5633 error_no_arg (_("signal to handle"));
5634 }
5635
5636 /* Allocate and zero an array of flags for which signals to handle. */
5637
5638 nsigs = (int) TARGET_SIGNAL_LAST;
5639 sigs = (unsigned char *) alloca (nsigs);
5640 memset (sigs, 0, nsigs);
5641
5642 /* Break the command line up into args. */
5643
5644 argv = gdb_buildargv (args);
5645 old_chain = make_cleanup_freeargv (argv);
5646
5647 /* Walk through the args, looking for signal oursigs, signal names, and
5648 actions. Signal numbers and signal names may be interspersed with
5649 actions, with the actions being performed for all signals cumulatively
5650 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5651
5652 while (*argv != NULL)
5653 {
5654 wordlen = strlen (*argv);
5655 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5656 {;
5657 }
5658 allsigs = 0;
5659 sigfirst = siglast = -1;
5660
5661 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5662 {
5663 /* Apply action to all signals except those used by the
5664 debugger. Silently skip those. */
5665 allsigs = 1;
5666 sigfirst = 0;
5667 siglast = nsigs - 1;
5668 }
5669 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5670 {
5671 SET_SIGS (nsigs, sigs, signal_stop);
5672 SET_SIGS (nsigs, sigs, signal_print);
5673 }
5674 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5675 {
5676 UNSET_SIGS (nsigs, sigs, signal_program);
5677 }
5678 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5679 {
5680 SET_SIGS (nsigs, sigs, signal_print);
5681 }
5682 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5683 {
5684 SET_SIGS (nsigs, sigs, signal_program);
5685 }
5686 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5687 {
5688 UNSET_SIGS (nsigs, sigs, signal_stop);
5689 }
5690 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5691 {
5692 SET_SIGS (nsigs, sigs, signal_program);
5693 }
5694 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5695 {
5696 UNSET_SIGS (nsigs, sigs, signal_print);
5697 UNSET_SIGS (nsigs, sigs, signal_stop);
5698 }
5699 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5700 {
5701 UNSET_SIGS (nsigs, sigs, signal_program);
5702 }
5703 else if (digits > 0)
5704 {
5705 /* It is numeric. The numeric signal refers to our own
5706 internal signal numbering from target.h, not to host/target
5707 signal number. This is a feature; users really should be
5708 using symbolic names anyway, and the common ones like
5709 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5710
5711 sigfirst = siglast = (int)
5712 target_signal_from_command (atoi (*argv));
5713 if ((*argv)[digits] == '-')
5714 {
5715 siglast = (int)
5716 target_signal_from_command (atoi ((*argv) + digits + 1));
5717 }
5718 if (sigfirst > siglast)
5719 {
5720 /* Bet he didn't figure we'd think of this case... */
5721 signum = sigfirst;
5722 sigfirst = siglast;
5723 siglast = signum;
5724 }
5725 }
5726 else
5727 {
5728 oursig = target_signal_from_name (*argv);
5729 if (oursig != TARGET_SIGNAL_UNKNOWN)
5730 {
5731 sigfirst = siglast = (int) oursig;
5732 }
5733 else
5734 {
5735 /* Not a number and not a recognized flag word => complain. */
5736 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5737 }
5738 }
5739
5740 /* If any signal numbers or symbol names were found, set flags for
5741 which signals to apply actions to. */
5742
5743 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5744 {
5745 switch ((enum target_signal) signum)
5746 {
5747 case TARGET_SIGNAL_TRAP:
5748 case TARGET_SIGNAL_INT:
5749 if (!allsigs && !sigs[signum])
5750 {
5751 if (query (_("%s is used by the debugger.\n\
5752 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5753 {
5754 sigs[signum] = 1;
5755 }
5756 else
5757 {
5758 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5759 gdb_flush (gdb_stdout);
5760 }
5761 }
5762 break;
5763 case TARGET_SIGNAL_0:
5764 case TARGET_SIGNAL_DEFAULT:
5765 case TARGET_SIGNAL_UNKNOWN:
5766 /* Make sure that "all" doesn't print these. */
5767 break;
5768 default:
5769 sigs[signum] = 1;
5770 break;
5771 }
5772 }
5773
5774 argv++;
5775 }
5776
5777 for (signum = 0; signum < nsigs; signum++)
5778 if (sigs[signum])
5779 {
5780 target_notice_signals (inferior_ptid);
5781
5782 if (from_tty)
5783 {
5784 /* Show the results. */
5785 sig_print_header ();
5786 for (; signum < nsigs; signum++)
5787 if (sigs[signum])
5788 sig_print_info (signum);
5789 }
5790
5791 break;
5792 }
5793
5794 do_cleanups (old_chain);
5795 }
5796
5797 static void
5798 xdb_handle_command (char *args, int from_tty)
5799 {
5800 char **argv;
5801 struct cleanup *old_chain;
5802
5803 if (args == NULL)
5804 error_no_arg (_("xdb command"));
5805
5806 /* Break the command line up into args. */
5807
5808 argv = gdb_buildargv (args);
5809 old_chain = make_cleanup_freeargv (argv);
5810 if (argv[1] != (char *) NULL)
5811 {
5812 char *argBuf;
5813 int bufLen;
5814
5815 bufLen = strlen (argv[0]) + 20;
5816 argBuf = (char *) xmalloc (bufLen);
5817 if (argBuf)
5818 {
5819 int validFlag = 1;
5820 enum target_signal oursig;
5821
5822 oursig = target_signal_from_name (argv[0]);
5823 memset (argBuf, 0, bufLen);
5824 if (strcmp (argv[1], "Q") == 0)
5825 sprintf (argBuf, "%s %s", argv[0], "noprint");
5826 else
5827 {
5828 if (strcmp (argv[1], "s") == 0)
5829 {
5830 if (!signal_stop[oursig])
5831 sprintf (argBuf, "%s %s", argv[0], "stop");
5832 else
5833 sprintf (argBuf, "%s %s", argv[0], "nostop");
5834 }
5835 else if (strcmp (argv[1], "i") == 0)
5836 {
5837 if (!signal_program[oursig])
5838 sprintf (argBuf, "%s %s", argv[0], "pass");
5839 else
5840 sprintf (argBuf, "%s %s", argv[0], "nopass");
5841 }
5842 else if (strcmp (argv[1], "r") == 0)
5843 {
5844 if (!signal_print[oursig])
5845 sprintf (argBuf, "%s %s", argv[0], "print");
5846 else
5847 sprintf (argBuf, "%s %s", argv[0], "noprint");
5848 }
5849 else
5850 validFlag = 0;
5851 }
5852 if (validFlag)
5853 handle_command (argBuf, from_tty);
5854 else
5855 printf_filtered (_("Invalid signal handling flag.\n"));
5856 if (argBuf)
5857 xfree (argBuf);
5858 }
5859 }
5860 do_cleanups (old_chain);
5861 }
5862
5863 /* Print current contents of the tables set by the handle command.
5864 It is possible we should just be printing signals actually used
5865 by the current target (but for things to work right when switching
5866 targets, all signals should be in the signal tables). */
5867
5868 static void
5869 signals_info (char *signum_exp, int from_tty)
5870 {
5871 enum target_signal oursig;
5872
5873 sig_print_header ();
5874
5875 if (signum_exp)
5876 {
5877 /* First see if this is a symbol name. */
5878 oursig = target_signal_from_name (signum_exp);
5879 if (oursig == TARGET_SIGNAL_UNKNOWN)
5880 {
5881 /* No, try numeric. */
5882 oursig =
5883 target_signal_from_command (parse_and_eval_long (signum_exp));
5884 }
5885 sig_print_info (oursig);
5886 return;
5887 }
5888
5889 printf_filtered ("\n");
5890 /* These ugly casts brought to you by the native VAX compiler. */
5891 for (oursig = TARGET_SIGNAL_FIRST;
5892 (int) oursig < (int) TARGET_SIGNAL_LAST;
5893 oursig = (enum target_signal) ((int) oursig + 1))
5894 {
5895 QUIT;
5896
5897 if (oursig != TARGET_SIGNAL_UNKNOWN
5898 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5899 sig_print_info (oursig);
5900 }
5901
5902 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5903 }
5904
5905 /* The $_siginfo convenience variable is a bit special. We don't know
5906 for sure the type of the value until we actually have a chance to
5907 fetch the data. The type can change depending on gdbarch, so it it
5908 also dependent on which thread you have selected.
5909
5910 1. making $_siginfo be an internalvar that creates a new value on
5911 access.
5912
5913 2. making the value of $_siginfo be an lval_computed value. */
5914
5915 /* This function implements the lval_computed support for reading a
5916 $_siginfo value. */
5917
5918 static void
5919 siginfo_value_read (struct value *v)
5920 {
5921 LONGEST transferred;
5922
5923 transferred =
5924 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5925 NULL,
5926 value_contents_all_raw (v),
5927 value_offset (v),
5928 TYPE_LENGTH (value_type (v)));
5929
5930 if (transferred != TYPE_LENGTH (value_type (v)))
5931 error (_("Unable to read siginfo"));
5932 }
5933
5934 /* This function implements the lval_computed support for writing a
5935 $_siginfo value. */
5936
5937 static void
5938 siginfo_value_write (struct value *v, struct value *fromval)
5939 {
5940 LONGEST transferred;
5941
5942 transferred = target_write (&current_target,
5943 TARGET_OBJECT_SIGNAL_INFO,
5944 NULL,
5945 value_contents_all_raw (fromval),
5946 value_offset (v),
5947 TYPE_LENGTH (value_type (fromval)));
5948
5949 if (transferred != TYPE_LENGTH (value_type (fromval)))
5950 error (_("Unable to write siginfo"));
5951 }
5952
5953 static struct lval_funcs siginfo_value_funcs =
5954 {
5955 siginfo_value_read,
5956 siginfo_value_write
5957 };
5958
5959 /* Return a new value with the correct type for the siginfo object of
5960 the current thread using architecture GDBARCH. Return a void value
5961 if there's no object available. */
5962
5963 static struct value *
5964 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5965 {
5966 if (target_has_stack
5967 && !ptid_equal (inferior_ptid, null_ptid)
5968 && gdbarch_get_siginfo_type_p (gdbarch))
5969 {
5970 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5971
5972 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5973 }
5974
5975 return allocate_value (builtin_type (gdbarch)->builtin_void);
5976 }
5977
5978 \f
5979 /* Inferior thread state.
5980 These are details related to the inferior itself, and don't include
5981 things like what frame the user had selected or what gdb was doing
5982 with the target at the time.
5983 For inferior function calls these are things we want to restore
5984 regardless of whether the function call successfully completes
5985 or the dummy frame has to be manually popped. */
5986
5987 struct inferior_thread_state
5988 {
5989 enum target_signal stop_signal;
5990 CORE_ADDR stop_pc;
5991 struct regcache *registers;
5992 };
5993
5994 struct inferior_thread_state *
5995 save_inferior_thread_state (void)
5996 {
5997 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5998 struct thread_info *tp = inferior_thread ();
5999
6000 inf_state->stop_signal = tp->stop_signal;
6001 inf_state->stop_pc = stop_pc;
6002
6003 inf_state->registers = regcache_dup (get_current_regcache ());
6004
6005 return inf_state;
6006 }
6007
6008 /* Restore inferior session state to INF_STATE. */
6009
6010 void
6011 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6012 {
6013 struct thread_info *tp = inferior_thread ();
6014
6015 tp->stop_signal = inf_state->stop_signal;
6016 stop_pc = inf_state->stop_pc;
6017
6018 /* The inferior can be gone if the user types "print exit(0)"
6019 (and perhaps other times). */
6020 if (target_has_execution)
6021 /* NB: The register write goes through to the target. */
6022 regcache_cpy (get_current_regcache (), inf_state->registers);
6023 regcache_xfree (inf_state->registers);
6024 xfree (inf_state);
6025 }
6026
6027 static void
6028 do_restore_inferior_thread_state_cleanup (void *state)
6029 {
6030 restore_inferior_thread_state (state);
6031 }
6032
6033 struct cleanup *
6034 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6035 {
6036 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6037 }
6038
6039 void
6040 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6041 {
6042 regcache_xfree (inf_state->registers);
6043 xfree (inf_state);
6044 }
6045
6046 struct regcache *
6047 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6048 {
6049 return inf_state->registers;
6050 }
6051
6052 /* Session related state for inferior function calls.
6053 These are the additional bits of state that need to be restored
6054 when an inferior function call successfully completes. */
6055
6056 struct inferior_status
6057 {
6058 bpstat stop_bpstat;
6059 int stop_step;
6060 enum stop_stack_kind stop_stack_dummy;
6061 int stopped_by_random_signal;
6062 int stepping_over_breakpoint;
6063 CORE_ADDR step_range_start;
6064 CORE_ADDR step_range_end;
6065 struct frame_id step_frame_id;
6066 struct frame_id step_stack_frame_id;
6067 enum step_over_calls_kind step_over_calls;
6068 CORE_ADDR step_resume_break_address;
6069 int stop_after_trap;
6070 int stop_soon;
6071
6072 /* ID if the selected frame when the inferior function call was made. */
6073 struct frame_id selected_frame_id;
6074
6075 int proceed_to_finish;
6076 int in_infcall;
6077 };
6078
6079 /* Save all of the information associated with the inferior<==>gdb
6080 connection. */
6081
6082 struct inferior_status *
6083 save_inferior_status (void)
6084 {
6085 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6086 struct thread_info *tp = inferior_thread ();
6087 struct inferior *inf = current_inferior ();
6088
6089 inf_status->stop_step = tp->stop_step;
6090 inf_status->stop_stack_dummy = stop_stack_dummy;
6091 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6092 inf_status->stepping_over_breakpoint = tp->trap_expected;
6093 inf_status->step_range_start = tp->step_range_start;
6094 inf_status->step_range_end = tp->step_range_end;
6095 inf_status->step_frame_id = tp->step_frame_id;
6096 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6097 inf_status->step_over_calls = tp->step_over_calls;
6098 inf_status->stop_after_trap = stop_after_trap;
6099 inf_status->stop_soon = inf->stop_soon;
6100 /* Save original bpstat chain here; replace it with copy of chain.
6101 If caller's caller is walking the chain, they'll be happier if we
6102 hand them back the original chain when restore_inferior_status is
6103 called. */
6104 inf_status->stop_bpstat = tp->stop_bpstat;
6105 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6106 inf_status->proceed_to_finish = tp->proceed_to_finish;
6107 inf_status->in_infcall = tp->in_infcall;
6108
6109 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6110
6111 return inf_status;
6112 }
6113
6114 static int
6115 restore_selected_frame (void *args)
6116 {
6117 struct frame_id *fid = (struct frame_id *) args;
6118 struct frame_info *frame;
6119
6120 frame = frame_find_by_id (*fid);
6121
6122 /* If inf_status->selected_frame_id is NULL, there was no previously
6123 selected frame. */
6124 if (frame == NULL)
6125 {
6126 warning (_("Unable to restore previously selected frame."));
6127 return 0;
6128 }
6129
6130 select_frame (frame);
6131
6132 return (1);
6133 }
6134
6135 /* Restore inferior session state to INF_STATUS. */
6136
6137 void
6138 restore_inferior_status (struct inferior_status *inf_status)
6139 {
6140 struct thread_info *tp = inferior_thread ();
6141 struct inferior *inf = current_inferior ();
6142
6143 tp->stop_step = inf_status->stop_step;
6144 stop_stack_dummy = inf_status->stop_stack_dummy;
6145 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6146 tp->trap_expected = inf_status->stepping_over_breakpoint;
6147 tp->step_range_start = inf_status->step_range_start;
6148 tp->step_range_end = inf_status->step_range_end;
6149 tp->step_frame_id = inf_status->step_frame_id;
6150 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6151 tp->step_over_calls = inf_status->step_over_calls;
6152 stop_after_trap = inf_status->stop_after_trap;
6153 inf->stop_soon = inf_status->stop_soon;
6154 bpstat_clear (&tp->stop_bpstat);
6155 tp->stop_bpstat = inf_status->stop_bpstat;
6156 inf_status->stop_bpstat = NULL;
6157 tp->proceed_to_finish = inf_status->proceed_to_finish;
6158 tp->in_infcall = inf_status->in_infcall;
6159
6160 if (target_has_stack)
6161 {
6162 /* The point of catch_errors is that if the stack is clobbered,
6163 walking the stack might encounter a garbage pointer and
6164 error() trying to dereference it. */
6165 if (catch_errors
6166 (restore_selected_frame, &inf_status->selected_frame_id,
6167 "Unable to restore previously selected frame:\n",
6168 RETURN_MASK_ERROR) == 0)
6169 /* Error in restoring the selected frame. Select the innermost
6170 frame. */
6171 select_frame (get_current_frame ());
6172 }
6173
6174 xfree (inf_status);
6175 }
6176
6177 static void
6178 do_restore_inferior_status_cleanup (void *sts)
6179 {
6180 restore_inferior_status (sts);
6181 }
6182
6183 struct cleanup *
6184 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6185 {
6186 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6187 }
6188
6189 void
6190 discard_inferior_status (struct inferior_status *inf_status)
6191 {
6192 /* See save_inferior_status for info on stop_bpstat. */
6193 bpstat_clear (&inf_status->stop_bpstat);
6194 xfree (inf_status);
6195 }
6196 \f
6197 int
6198 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6199 {
6200 struct target_waitstatus last;
6201 ptid_t last_ptid;
6202
6203 get_last_target_status (&last_ptid, &last);
6204
6205 if (last.kind != TARGET_WAITKIND_FORKED)
6206 return 0;
6207
6208 if (!ptid_equal (last_ptid, pid))
6209 return 0;
6210
6211 *child_pid = last.value.related_pid;
6212 return 1;
6213 }
6214
6215 int
6216 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6217 {
6218 struct target_waitstatus last;
6219 ptid_t last_ptid;
6220
6221 get_last_target_status (&last_ptid, &last);
6222
6223 if (last.kind != TARGET_WAITKIND_VFORKED)
6224 return 0;
6225
6226 if (!ptid_equal (last_ptid, pid))
6227 return 0;
6228
6229 *child_pid = last.value.related_pid;
6230 return 1;
6231 }
6232
6233 int
6234 inferior_has_execd (ptid_t pid, char **execd_pathname)
6235 {
6236 struct target_waitstatus last;
6237 ptid_t last_ptid;
6238
6239 get_last_target_status (&last_ptid, &last);
6240
6241 if (last.kind != TARGET_WAITKIND_EXECD)
6242 return 0;
6243
6244 if (!ptid_equal (last_ptid, pid))
6245 return 0;
6246
6247 *execd_pathname = xstrdup (last.value.execd_pathname);
6248 return 1;
6249 }
6250
6251 int
6252 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6253 {
6254 struct target_waitstatus last;
6255 ptid_t last_ptid;
6256
6257 get_last_target_status (&last_ptid, &last);
6258
6259 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6260 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6261 return 0;
6262
6263 if (!ptid_equal (last_ptid, pid))
6264 return 0;
6265
6266 *syscall_number = last.value.syscall_number;
6267 return 1;
6268 }
6269
6270 /* Oft used ptids */
6271 ptid_t null_ptid;
6272 ptid_t minus_one_ptid;
6273
6274 /* Create a ptid given the necessary PID, LWP, and TID components. */
6275
6276 ptid_t
6277 ptid_build (int pid, long lwp, long tid)
6278 {
6279 ptid_t ptid;
6280
6281 ptid.pid = pid;
6282 ptid.lwp = lwp;
6283 ptid.tid = tid;
6284 return ptid;
6285 }
6286
6287 /* Create a ptid from just a pid. */
6288
6289 ptid_t
6290 pid_to_ptid (int pid)
6291 {
6292 return ptid_build (pid, 0, 0);
6293 }
6294
6295 /* Fetch the pid (process id) component from a ptid. */
6296
6297 int
6298 ptid_get_pid (ptid_t ptid)
6299 {
6300 return ptid.pid;
6301 }
6302
6303 /* Fetch the lwp (lightweight process) component from a ptid. */
6304
6305 long
6306 ptid_get_lwp (ptid_t ptid)
6307 {
6308 return ptid.lwp;
6309 }
6310
6311 /* Fetch the tid (thread id) component from a ptid. */
6312
6313 long
6314 ptid_get_tid (ptid_t ptid)
6315 {
6316 return ptid.tid;
6317 }
6318
6319 /* ptid_equal() is used to test equality of two ptids. */
6320
6321 int
6322 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6323 {
6324 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6325 && ptid1.tid == ptid2.tid);
6326 }
6327
6328 /* Returns true if PTID represents a process. */
6329
6330 int
6331 ptid_is_pid (ptid_t ptid)
6332 {
6333 if (ptid_equal (minus_one_ptid, ptid))
6334 return 0;
6335 if (ptid_equal (null_ptid, ptid))
6336 return 0;
6337
6338 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6339 }
6340
6341 int
6342 ptid_match (ptid_t ptid, ptid_t filter)
6343 {
6344 /* Since both parameters have the same type, prevent easy mistakes
6345 from happening. */
6346 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6347 && !ptid_equal (ptid, null_ptid));
6348
6349 if (ptid_equal (filter, minus_one_ptid))
6350 return 1;
6351 if (ptid_is_pid (filter)
6352 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6353 return 1;
6354 else if (ptid_equal (ptid, filter))
6355 return 1;
6356
6357 return 0;
6358 }
6359
6360 /* restore_inferior_ptid() will be used by the cleanup machinery
6361 to restore the inferior_ptid value saved in a call to
6362 save_inferior_ptid(). */
6363
6364 static void
6365 restore_inferior_ptid (void *arg)
6366 {
6367 ptid_t *saved_ptid_ptr = arg;
6368
6369 inferior_ptid = *saved_ptid_ptr;
6370 xfree (arg);
6371 }
6372
6373 /* Save the value of inferior_ptid so that it may be restored by a
6374 later call to do_cleanups(). Returns the struct cleanup pointer
6375 needed for later doing the cleanup. */
6376
6377 struct cleanup *
6378 save_inferior_ptid (void)
6379 {
6380 ptid_t *saved_ptid_ptr;
6381
6382 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6383 *saved_ptid_ptr = inferior_ptid;
6384 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6385 }
6386 \f
6387
6388 /* User interface for reverse debugging:
6389 Set exec-direction / show exec-direction commands
6390 (returns error unless target implements to_set_exec_direction method). */
6391
6392 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6393 static const char exec_forward[] = "forward";
6394 static const char exec_reverse[] = "reverse";
6395 static const char *exec_direction = exec_forward;
6396 static const char *exec_direction_names[] = {
6397 exec_forward,
6398 exec_reverse,
6399 NULL
6400 };
6401
6402 static void
6403 set_exec_direction_func (char *args, int from_tty,
6404 struct cmd_list_element *cmd)
6405 {
6406 if (target_can_execute_reverse)
6407 {
6408 if (!strcmp (exec_direction, exec_forward))
6409 execution_direction = EXEC_FORWARD;
6410 else if (!strcmp (exec_direction, exec_reverse))
6411 execution_direction = EXEC_REVERSE;
6412 }
6413 }
6414
6415 static void
6416 show_exec_direction_func (struct ui_file *out, int from_tty,
6417 struct cmd_list_element *cmd, const char *value)
6418 {
6419 switch (execution_direction) {
6420 case EXEC_FORWARD:
6421 fprintf_filtered (out, _("Forward.\n"));
6422 break;
6423 case EXEC_REVERSE:
6424 fprintf_filtered (out, _("Reverse.\n"));
6425 break;
6426 case EXEC_ERROR:
6427 default:
6428 fprintf_filtered (out,
6429 _("Forward (target `%s' does not support exec-direction).\n"),
6430 target_shortname);
6431 break;
6432 }
6433 }
6434
6435 /* User interface for non-stop mode. */
6436
6437 int non_stop = 0;
6438 static int non_stop_1 = 0;
6439
6440 static void
6441 set_non_stop (char *args, int from_tty,
6442 struct cmd_list_element *c)
6443 {
6444 if (target_has_execution)
6445 {
6446 non_stop_1 = non_stop;
6447 error (_("Cannot change this setting while the inferior is running."));
6448 }
6449
6450 non_stop = non_stop_1;
6451 }
6452
6453 static void
6454 show_non_stop (struct ui_file *file, int from_tty,
6455 struct cmd_list_element *c, const char *value)
6456 {
6457 fprintf_filtered (file,
6458 _("Controlling the inferior in non-stop mode is %s.\n"),
6459 value);
6460 }
6461
6462 static void
6463 show_schedule_multiple (struct ui_file *file, int from_tty,
6464 struct cmd_list_element *c, const char *value)
6465 {
6466 fprintf_filtered (file, _("\
6467 Resuming the execution of threads of all processes is %s.\n"), value);
6468 }
6469
6470 void
6471 _initialize_infrun (void)
6472 {
6473 int i;
6474 int numsigs;
6475
6476 add_info ("signals", signals_info, _("\
6477 What debugger does when program gets various signals.\n\
6478 Specify a signal as argument to print info on that signal only."));
6479 add_info_alias ("handle", "signals", 0);
6480
6481 add_com ("handle", class_run, handle_command, _("\
6482 Specify how to handle a signal.\n\
6483 Args are signals and actions to apply to those signals.\n\
6484 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6485 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6486 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6487 The special arg \"all\" is recognized to mean all signals except those\n\
6488 used by the debugger, typically SIGTRAP and SIGINT.\n\
6489 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6490 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6491 Stop means reenter debugger if this signal happens (implies print).\n\
6492 Print means print a message if this signal happens.\n\
6493 Pass means let program see this signal; otherwise program doesn't know.\n\
6494 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6495 Pass and Stop may be combined."));
6496 if (xdb_commands)
6497 {
6498 add_com ("lz", class_info, signals_info, _("\
6499 What debugger does when program gets various signals.\n\
6500 Specify a signal as argument to print info on that signal only."));
6501 add_com ("z", class_run, xdb_handle_command, _("\
6502 Specify how to handle a signal.\n\
6503 Args are signals and actions to apply to those signals.\n\
6504 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6505 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6506 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6507 The special arg \"all\" is recognized to mean all signals except those\n\
6508 used by the debugger, typically SIGTRAP and SIGINT.\n\
6509 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6510 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6511 nopass), \"Q\" (noprint)\n\
6512 Stop means reenter debugger if this signal happens (implies print).\n\
6513 Print means print a message if this signal happens.\n\
6514 Pass means let program see this signal; otherwise program doesn't know.\n\
6515 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6516 Pass and Stop may be combined."));
6517 }
6518
6519 if (!dbx_commands)
6520 stop_command = add_cmd ("stop", class_obscure,
6521 not_just_help_class_command, _("\
6522 There is no `stop' command, but you can set a hook on `stop'.\n\
6523 This allows you to set a list of commands to be run each time execution\n\
6524 of the program stops."), &cmdlist);
6525
6526 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6527 Set inferior debugging."), _("\
6528 Show inferior debugging."), _("\
6529 When non-zero, inferior specific debugging is enabled."),
6530 NULL,
6531 show_debug_infrun,
6532 &setdebuglist, &showdebuglist);
6533
6534 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6535 Set displaced stepping debugging."), _("\
6536 Show displaced stepping debugging."), _("\
6537 When non-zero, displaced stepping specific debugging is enabled."),
6538 NULL,
6539 show_debug_displaced,
6540 &setdebuglist, &showdebuglist);
6541
6542 add_setshow_boolean_cmd ("non-stop", no_class,
6543 &non_stop_1, _("\
6544 Set whether gdb controls the inferior in non-stop mode."), _("\
6545 Show whether gdb controls the inferior in non-stop mode."), _("\
6546 When debugging a multi-threaded program and this setting is\n\
6547 off (the default, also called all-stop mode), when one thread stops\n\
6548 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6549 all other threads in the program while you interact with the thread of\n\
6550 interest. When you continue or step a thread, you can allow the other\n\
6551 threads to run, or have them remain stopped, but while you inspect any\n\
6552 thread's state, all threads stop.\n\
6553 \n\
6554 In non-stop mode, when one thread stops, other threads can continue\n\
6555 to run freely. You'll be able to step each thread independently,\n\
6556 leave it stopped or free to run as needed."),
6557 set_non_stop,
6558 show_non_stop,
6559 &setlist,
6560 &showlist);
6561
6562 numsigs = (int) TARGET_SIGNAL_LAST;
6563 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6564 signal_print = (unsigned char *)
6565 xmalloc (sizeof (signal_print[0]) * numsigs);
6566 signal_program = (unsigned char *)
6567 xmalloc (sizeof (signal_program[0]) * numsigs);
6568 for (i = 0; i < numsigs; i++)
6569 {
6570 signal_stop[i] = 1;
6571 signal_print[i] = 1;
6572 signal_program[i] = 1;
6573 }
6574
6575 /* Signals caused by debugger's own actions
6576 should not be given to the program afterwards. */
6577 signal_program[TARGET_SIGNAL_TRAP] = 0;
6578 signal_program[TARGET_SIGNAL_INT] = 0;
6579
6580 /* Signals that are not errors should not normally enter the debugger. */
6581 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6582 signal_print[TARGET_SIGNAL_ALRM] = 0;
6583 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6584 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6585 signal_stop[TARGET_SIGNAL_PROF] = 0;
6586 signal_print[TARGET_SIGNAL_PROF] = 0;
6587 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6588 signal_print[TARGET_SIGNAL_CHLD] = 0;
6589 signal_stop[TARGET_SIGNAL_IO] = 0;
6590 signal_print[TARGET_SIGNAL_IO] = 0;
6591 signal_stop[TARGET_SIGNAL_POLL] = 0;
6592 signal_print[TARGET_SIGNAL_POLL] = 0;
6593 signal_stop[TARGET_SIGNAL_URG] = 0;
6594 signal_print[TARGET_SIGNAL_URG] = 0;
6595 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6596 signal_print[TARGET_SIGNAL_WINCH] = 0;
6597
6598 /* These signals are used internally by user-level thread
6599 implementations. (See signal(5) on Solaris.) Like the above
6600 signals, a healthy program receives and handles them as part of
6601 its normal operation. */
6602 signal_stop[TARGET_SIGNAL_LWP] = 0;
6603 signal_print[TARGET_SIGNAL_LWP] = 0;
6604 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6605 signal_print[TARGET_SIGNAL_WAITING] = 0;
6606 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6607 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6608
6609 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6610 &stop_on_solib_events, _("\
6611 Set stopping for shared library events."), _("\
6612 Show stopping for shared library events."), _("\
6613 If nonzero, gdb will give control to the user when the dynamic linker\n\
6614 notifies gdb of shared library events. The most common event of interest\n\
6615 to the user would be loading/unloading of a new library."),
6616 NULL,
6617 show_stop_on_solib_events,
6618 &setlist, &showlist);
6619
6620 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6621 follow_fork_mode_kind_names,
6622 &follow_fork_mode_string, _("\
6623 Set debugger response to a program call of fork or vfork."), _("\
6624 Show debugger response to a program call of fork or vfork."), _("\
6625 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6626 parent - the original process is debugged after a fork\n\
6627 child - the new process is debugged after a fork\n\
6628 The unfollowed process will continue to run.\n\
6629 By default, the debugger will follow the parent process."),
6630 NULL,
6631 show_follow_fork_mode_string,
6632 &setlist, &showlist);
6633
6634 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6635 follow_exec_mode_names,
6636 &follow_exec_mode_string, _("\
6637 Set debugger response to a program call of exec."), _("\
6638 Show debugger response to a program call of exec."), _("\
6639 An exec call replaces the program image of a process.\n\
6640 \n\
6641 follow-exec-mode can be:\n\
6642 \n\
6643 new - the debugger creates a new inferior and rebinds the process \n\
6644 to this new inferior. The program the process was running before\n\
6645 the exec call can be restarted afterwards by restarting the original\n\
6646 inferior.\n\
6647 \n\
6648 same - the debugger keeps the process bound to the same inferior.\n\
6649 The new executable image replaces the previous executable loaded in\n\
6650 the inferior. Restarting the inferior after the exec call restarts\n\
6651 the executable the process was running after the exec call.\n\
6652 \n\
6653 By default, the debugger will use the same inferior."),
6654 NULL,
6655 show_follow_exec_mode_string,
6656 &setlist, &showlist);
6657
6658 add_setshow_enum_cmd ("scheduler-locking", class_run,
6659 scheduler_enums, &scheduler_mode, _("\
6660 Set mode for locking scheduler during execution."), _("\
6661 Show mode for locking scheduler during execution."), _("\
6662 off == no locking (threads may preempt at any time)\n\
6663 on == full locking (no thread except the current thread may run)\n\
6664 step == scheduler locked during every single-step operation.\n\
6665 In this mode, no other thread may run during a step command.\n\
6666 Other threads may run while stepping over a function call ('next')."),
6667 set_schedlock_func, /* traps on target vector */
6668 show_scheduler_mode,
6669 &setlist, &showlist);
6670
6671 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6672 Set mode for resuming threads of all processes."), _("\
6673 Show mode for resuming threads of all processes."), _("\
6674 When on, execution commands (such as 'continue' or 'next') resume all\n\
6675 threads of all processes. When off (which is the default), execution\n\
6676 commands only resume the threads of the current process. The set of\n\
6677 threads that are resumed is further refined by the scheduler-locking\n\
6678 mode (see help set scheduler-locking)."),
6679 NULL,
6680 show_schedule_multiple,
6681 &setlist, &showlist);
6682
6683 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6684 Set mode of the step operation."), _("\
6685 Show mode of the step operation."), _("\
6686 When set, doing a step over a function without debug line information\n\
6687 will stop at the first instruction of that function. Otherwise, the\n\
6688 function is skipped and the step command stops at a different source line."),
6689 NULL,
6690 show_step_stop_if_no_debug,
6691 &setlist, &showlist);
6692
6693 add_setshow_enum_cmd ("displaced-stepping", class_run,
6694 can_use_displaced_stepping_enum,
6695 &can_use_displaced_stepping, _("\
6696 Set debugger's willingness to use displaced stepping."), _("\
6697 Show debugger's willingness to use displaced stepping."), _("\
6698 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6699 supported by the target architecture. If off, gdb will not use displaced\n\
6700 stepping to step over breakpoints, even if such is supported by the target\n\
6701 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6702 if the target architecture supports it and non-stop mode is active, but will not\n\
6703 use it in all-stop mode (see help set non-stop)."),
6704 NULL,
6705 show_can_use_displaced_stepping,
6706 &setlist, &showlist);
6707
6708 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6709 &exec_direction, _("Set direction of execution.\n\
6710 Options are 'forward' or 'reverse'."),
6711 _("Show direction of execution (forward/reverse)."),
6712 _("Tells gdb whether to execute forward or backward."),
6713 set_exec_direction_func, show_exec_direction_func,
6714 &setlist, &showlist);
6715
6716 /* Set/show detach-on-fork: user-settable mode. */
6717
6718 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6719 Set whether gdb will detach the child of a fork."), _("\
6720 Show whether gdb will detach the child of a fork."), _("\
6721 Tells gdb whether to detach the child of a fork."),
6722 NULL, NULL, &setlist, &showlist);
6723
6724 /* ptid initializations */
6725 null_ptid = ptid_build (0, 0, 0);
6726 minus_one_ptid = ptid_build (-1, 0, 0);
6727 inferior_ptid = null_ptid;
6728 target_last_wait_ptid = minus_one_ptid;
6729
6730 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6731 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6732 observer_attach_thread_exit (infrun_thread_thread_exit);
6733 observer_attach_inferior_exit (infrun_inferior_exit);
6734
6735 /* Explicitly create without lookup, since that tries to create a
6736 value with a void typed value, and when we get here, gdbarch
6737 isn't initialized yet. At this point, we're quite sure there
6738 isn't another convenience variable of the same name. */
6739 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6740 }