* infrun.c (prepare_for_detach): In non-stop, context switch to
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181
182 /* Tables of how to react to signals; the user sets them. */
183
184 static unsigned char *signal_stop;
185 static unsigned char *signal_print;
186 static unsigned char *signal_program;
187
188 #define SET_SIGS(nsigs,sigs,flags) \
189 do { \
190 int signum = (nsigs); \
191 while (signum-- > 0) \
192 if ((sigs)[signum]) \
193 (flags)[signum] = 1; \
194 } while (0)
195
196 #define UNSET_SIGS(nsigs,sigs,flags) \
197 do { \
198 int signum = (nsigs); \
199 while (signum-- > 0) \
200 if ((sigs)[signum]) \
201 (flags)[signum] = 0; \
202 } while (0)
203
204 /* Value to pass to target_resume() to cause all threads to resume */
205
206 #define RESUME_ALL minus_one_ptid
207
208 /* Command list pointer for the "stop" placeholder. */
209
210 static struct cmd_list_element *stop_command;
211
212 /* Function inferior was in as of last step command. */
213
214 static struct symbol *step_start_function;
215
216 /* Nonzero if we want to give control to the user when we're notified
217 of shared library events by the dynamic linker. */
218 static int stop_on_solib_events;
219 static void
220 show_stop_on_solib_events (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
224 value);
225 }
226
227 /* Nonzero means expecting a trace trap
228 and should stop the inferior and return silently when it happens. */
229
230 int stop_after_trap;
231
232 /* Save register contents here when executing a "finish" command or are
233 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
234 Thus this contains the return value from the called function (assuming
235 values are returned in a register). */
236
237 struct regcache *stop_registers;
238
239 /* Nonzero after stop if current stack frame should be printed. */
240
241 static int stop_print_frame;
242
243 /* This is a cached copy of the pid/waitstatus of the last event
244 returned by target_wait()/deprecated_target_wait_hook(). This
245 information is returned by get_last_target_status(). */
246 static ptid_t target_last_wait_ptid;
247 static struct target_waitstatus target_last_waitstatus;
248
249 static void context_switch (ptid_t ptid);
250
251 void init_thread_stepping_state (struct thread_info *tss);
252
253 void init_infwait_state (void);
254
255 static const char follow_fork_mode_child[] = "child";
256 static const char follow_fork_mode_parent[] = "parent";
257
258 static const char *follow_fork_mode_kind_names[] = {
259 follow_fork_mode_child,
260 follow_fork_mode_parent,
261 NULL
262 };
263
264 static const char *follow_fork_mode_string = follow_fork_mode_parent;
265 static void
266 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
267 struct cmd_list_element *c, const char *value)
268 {
269 fprintf_filtered (file, _("\
270 Debugger response to a program call of fork or vfork is \"%s\".\n"),
271 value);
272 }
273 \f
274
275 /* Tell the target to follow the fork we're stopped at. Returns true
276 if the inferior should be resumed; false, if the target for some
277 reason decided it's best not to resume. */
278
279 static int
280 follow_fork (void)
281 {
282 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
283 int should_resume = 1;
284 struct thread_info *tp;
285
286 /* Copy user stepping state to the new inferior thread. FIXME: the
287 followed fork child thread should have a copy of most of the
288 parent thread structure's run control related fields, not just these.
289 Initialized to avoid "may be used uninitialized" warnings from gcc. */
290 struct breakpoint *step_resume_breakpoint = NULL;
291 CORE_ADDR step_range_start = 0;
292 CORE_ADDR step_range_end = 0;
293 struct frame_id step_frame_id = { 0 };
294
295 if (!non_stop)
296 {
297 ptid_t wait_ptid;
298 struct target_waitstatus wait_status;
299
300 /* Get the last target status returned by target_wait(). */
301 get_last_target_status (&wait_ptid, &wait_status);
302
303 /* If not stopped at a fork event, then there's nothing else to
304 do. */
305 if (wait_status.kind != TARGET_WAITKIND_FORKED
306 && wait_status.kind != TARGET_WAITKIND_VFORKED)
307 return 1;
308
309 /* Check if we switched over from WAIT_PTID, since the event was
310 reported. */
311 if (!ptid_equal (wait_ptid, minus_one_ptid)
312 && !ptid_equal (inferior_ptid, wait_ptid))
313 {
314 /* We did. Switch back to WAIT_PTID thread, to tell the
315 target to follow it (in either direction). We'll
316 afterwards refuse to resume, and inform the user what
317 happened. */
318 switch_to_thread (wait_ptid);
319 should_resume = 0;
320 }
321 }
322
323 tp = inferior_thread ();
324
325 /* If there were any forks/vforks that were caught and are now to be
326 followed, then do so now. */
327 switch (tp->pending_follow.kind)
328 {
329 case TARGET_WAITKIND_FORKED:
330 case TARGET_WAITKIND_VFORKED:
331 {
332 ptid_t parent, child;
333
334 /* If the user did a next/step, etc, over a fork call,
335 preserve the stepping state in the fork child. */
336 if (follow_child && should_resume)
337 {
338 step_resume_breakpoint
339 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
340 step_range_start = tp->step_range_start;
341 step_range_end = tp->step_range_end;
342 step_frame_id = tp->step_frame_id;
343
344 /* For now, delete the parent's sr breakpoint, otherwise,
345 parent/child sr breakpoints are considered duplicates,
346 and the child version will not be installed. Remove
347 this when the breakpoints module becomes aware of
348 inferiors and address spaces. */
349 delete_step_resume_breakpoint (tp);
350 tp->step_range_start = 0;
351 tp->step_range_end = 0;
352 tp->step_frame_id = null_frame_id;
353 }
354
355 parent = inferior_ptid;
356 child = tp->pending_follow.value.related_pid;
357
358 /* Tell the target to do whatever is necessary to follow
359 either parent or child. */
360 if (target_follow_fork (follow_child))
361 {
362 /* Target refused to follow, or there's some other reason
363 we shouldn't resume. */
364 should_resume = 0;
365 }
366 else
367 {
368 /* This pending follow fork event is now handled, one way
369 or another. The previous selected thread may be gone
370 from the lists by now, but if it is still around, need
371 to clear the pending follow request. */
372 tp = find_thread_ptid (parent);
373 if (tp)
374 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
375
376 /* This makes sure we don't try to apply the "Switched
377 over from WAIT_PID" logic above. */
378 nullify_last_target_wait_ptid ();
379
380 /* If we followed the child, switch to it... */
381 if (follow_child)
382 {
383 switch_to_thread (child);
384
385 /* ... and preserve the stepping state, in case the
386 user was stepping over the fork call. */
387 if (should_resume)
388 {
389 tp = inferior_thread ();
390 tp->step_resume_breakpoint = step_resume_breakpoint;
391 tp->step_range_start = step_range_start;
392 tp->step_range_end = step_range_end;
393 tp->step_frame_id = step_frame_id;
394 }
395 else
396 {
397 /* If we get here, it was because we're trying to
398 resume from a fork catchpoint, but, the user
399 has switched threads away from the thread that
400 forked. In that case, the resume command
401 issued is most likely not applicable to the
402 child, so just warn, and refuse to resume. */
403 warning (_("\
404 Not resuming: switched threads before following fork child.\n"));
405 }
406
407 /* Reset breakpoints in the child as appropriate. */
408 follow_inferior_reset_breakpoints ();
409 }
410 else
411 switch_to_thread (parent);
412 }
413 }
414 break;
415 case TARGET_WAITKIND_SPURIOUS:
416 /* Nothing to follow. */
417 break;
418 default:
419 internal_error (__FILE__, __LINE__,
420 "Unexpected pending_follow.kind %d\n",
421 tp->pending_follow.kind);
422 break;
423 }
424
425 return should_resume;
426 }
427
428 void
429 follow_inferior_reset_breakpoints (void)
430 {
431 struct thread_info *tp = inferior_thread ();
432
433 /* Was there a step_resume breakpoint? (There was if the user
434 did a "next" at the fork() call.) If so, explicitly reset its
435 thread number.
436
437 step_resumes are a form of bp that are made to be per-thread.
438 Since we created the step_resume bp when the parent process
439 was being debugged, and now are switching to the child process,
440 from the breakpoint package's viewpoint, that's a switch of
441 "threads". We must update the bp's notion of which thread
442 it is for, or it'll be ignored when it triggers. */
443
444 if (tp->step_resume_breakpoint)
445 breakpoint_re_set_thread (tp->step_resume_breakpoint);
446
447 /* Reinsert all breakpoints in the child. The user may have set
448 breakpoints after catching the fork, in which case those
449 were never set in the child, but only in the parent. This makes
450 sure the inserted breakpoints match the breakpoint list. */
451
452 breakpoint_re_set ();
453 insert_breakpoints ();
454 }
455
456 /* The child has exited or execed: resume threads of the parent the
457 user wanted to be executing. */
458
459 static int
460 proceed_after_vfork_done (struct thread_info *thread,
461 void *arg)
462 {
463 int pid = * (int *) arg;
464
465 if (ptid_get_pid (thread->ptid) == pid
466 && is_running (thread->ptid)
467 && !is_executing (thread->ptid)
468 && !thread->stop_requested
469 && thread->stop_signal == TARGET_SIGNAL_0)
470 {
471 if (debug_infrun)
472 fprintf_unfiltered (gdb_stdlog,
473 "infrun: resuming vfork parent thread %s\n",
474 target_pid_to_str (thread->ptid));
475
476 switch_to_thread (thread->ptid);
477 clear_proceed_status ();
478 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
479 }
480
481 return 0;
482 }
483
484 /* Called whenever we notice an exec or exit event, to handle
485 detaching or resuming a vfork parent. */
486
487 static void
488 handle_vfork_child_exec_or_exit (int exec)
489 {
490 struct inferior *inf = current_inferior ();
491
492 if (inf->vfork_parent)
493 {
494 int resume_parent = -1;
495
496 /* This exec or exit marks the end of the shared memory region
497 between the parent and the child. If the user wanted to
498 detach from the parent, now is the time. */
499
500 if (inf->vfork_parent->pending_detach)
501 {
502 struct thread_info *tp;
503 struct cleanup *old_chain;
504 struct program_space *pspace;
505 struct address_space *aspace;
506
507 /* follow-fork child, detach-on-fork on */
508
509 old_chain = make_cleanup_restore_current_thread ();
510
511 /* We're letting loose of the parent. */
512 tp = any_live_thread_of_process (inf->vfork_parent->pid);
513 switch_to_thread (tp->ptid);
514
515 /* We're about to detach from the parent, which implicitly
516 removes breakpoints from its address space. There's a
517 catch here: we want to reuse the spaces for the child,
518 but, parent/child are still sharing the pspace at this
519 point, although the exec in reality makes the kernel give
520 the child a fresh set of new pages. The problem here is
521 that the breakpoints module being unaware of this, would
522 likely chose the child process to write to the parent
523 address space. Swapping the child temporarily away from
524 the spaces has the desired effect. Yes, this is "sort
525 of" a hack. */
526
527 pspace = inf->pspace;
528 aspace = inf->aspace;
529 inf->aspace = NULL;
530 inf->pspace = NULL;
531
532 if (debug_infrun || info_verbose)
533 {
534 target_terminal_ours ();
535
536 if (exec)
537 fprintf_filtered (gdb_stdlog,
538 "Detaching vfork parent process %d after child exec.\n",
539 inf->vfork_parent->pid);
540 else
541 fprintf_filtered (gdb_stdlog,
542 "Detaching vfork parent process %d after child exit.\n",
543 inf->vfork_parent->pid);
544 }
545
546 target_detach (NULL, 0);
547
548 /* Put it back. */
549 inf->pspace = pspace;
550 inf->aspace = aspace;
551
552 do_cleanups (old_chain);
553 }
554 else if (exec)
555 {
556 /* We're staying attached to the parent, so, really give the
557 child a new address space. */
558 inf->pspace = add_program_space (maybe_new_address_space ());
559 inf->aspace = inf->pspace->aspace;
560 inf->removable = 1;
561 set_current_program_space (inf->pspace);
562
563 resume_parent = inf->vfork_parent->pid;
564
565 /* Break the bonds. */
566 inf->vfork_parent->vfork_child = NULL;
567 }
568 else
569 {
570 struct cleanup *old_chain;
571 struct program_space *pspace;
572
573 /* If this is a vfork child exiting, then the pspace and
574 aspaces were shared with the parent. Since we're
575 reporting the process exit, we'll be mourning all that is
576 found in the address space, and switching to null_ptid,
577 preparing to start a new inferior. But, since we don't
578 want to clobber the parent's address/program spaces, we
579 go ahead and create a new one for this exiting
580 inferior. */
581
582 /* Switch to null_ptid, so that clone_program_space doesn't want
583 to read the selected frame of a dead process. */
584 old_chain = save_inferior_ptid ();
585 inferior_ptid = null_ptid;
586
587 /* This inferior is dead, so avoid giving the breakpoints
588 module the option to write through to it (cloning a
589 program space resets breakpoints). */
590 inf->aspace = NULL;
591 inf->pspace = NULL;
592 pspace = add_program_space (maybe_new_address_space ());
593 set_current_program_space (pspace);
594 inf->removable = 1;
595 clone_program_space (pspace, inf->vfork_parent->pspace);
596 inf->pspace = pspace;
597 inf->aspace = pspace->aspace;
598
599 /* Put back inferior_ptid. We'll continue mourning this
600 inferior. */
601 do_cleanups (old_chain);
602
603 resume_parent = inf->vfork_parent->pid;
604 /* Break the bonds. */
605 inf->vfork_parent->vfork_child = NULL;
606 }
607
608 inf->vfork_parent = NULL;
609
610 gdb_assert (current_program_space == inf->pspace);
611
612 if (non_stop && resume_parent != -1)
613 {
614 /* If the user wanted the parent to be running, let it go
615 free now. */
616 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
617
618 if (debug_infrun)
619 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
620 resume_parent);
621
622 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
623
624 do_cleanups (old_chain);
625 }
626 }
627 }
628
629 /* Enum strings for "set|show displaced-stepping". */
630
631 static const char follow_exec_mode_new[] = "new";
632 static const char follow_exec_mode_same[] = "same";
633 static const char *follow_exec_mode_names[] =
634 {
635 follow_exec_mode_new,
636 follow_exec_mode_same,
637 NULL,
638 };
639
640 static const char *follow_exec_mode_string = follow_exec_mode_same;
641 static void
642 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
643 struct cmd_list_element *c, const char *value)
644 {
645 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
646 }
647
648 /* EXECD_PATHNAME is assumed to be non-NULL. */
649
650 static void
651 follow_exec (ptid_t pid, char *execd_pathname)
652 {
653 struct target_ops *tgt;
654 struct thread_info *th = inferior_thread ();
655 struct inferior *inf = current_inferior ();
656
657 /* This is an exec event that we actually wish to pay attention to.
658 Refresh our symbol table to the newly exec'd program, remove any
659 momentary bp's, etc.
660
661 If there are breakpoints, they aren't really inserted now,
662 since the exec() transformed our inferior into a fresh set
663 of instructions.
664
665 We want to preserve symbolic breakpoints on the list, since
666 we have hopes that they can be reset after the new a.out's
667 symbol table is read.
668
669 However, any "raw" breakpoints must be removed from the list
670 (e.g., the solib bp's), since their address is probably invalid
671 now.
672
673 And, we DON'T want to call delete_breakpoints() here, since
674 that may write the bp's "shadow contents" (the instruction
675 value that was overwritten witha TRAP instruction). Since
676 we now have a new a.out, those shadow contents aren't valid. */
677
678 mark_breakpoints_out ();
679
680 update_breakpoints_after_exec ();
681
682 /* If there was one, it's gone now. We cannot truly step-to-next
683 statement through an exec(). */
684 th->step_resume_breakpoint = NULL;
685 th->step_range_start = 0;
686 th->step_range_end = 0;
687
688 /* The target reports the exec event to the main thread, even if
689 some other thread does the exec, and even if the main thread was
690 already stopped --- if debugging in non-stop mode, it's possible
691 the user had the main thread held stopped in the previous image
692 --- release it now. This is the same behavior as step-over-exec
693 with scheduler-locking on in all-stop mode. */
694 th->stop_requested = 0;
695
696 /* What is this a.out's name? */
697 printf_unfiltered (_("%s is executing new program: %s\n"),
698 target_pid_to_str (inferior_ptid),
699 execd_pathname);
700
701 /* We've followed the inferior through an exec. Therefore, the
702 inferior has essentially been killed & reborn. */
703
704 gdb_flush (gdb_stdout);
705
706 breakpoint_init_inferior (inf_execd);
707
708 if (gdb_sysroot && *gdb_sysroot)
709 {
710 char *name = alloca (strlen (gdb_sysroot)
711 + strlen (execd_pathname)
712 + 1);
713 strcpy (name, gdb_sysroot);
714 strcat (name, execd_pathname);
715 execd_pathname = name;
716 }
717
718 /* Reset the shared library package. This ensures that we get a
719 shlib event when the child reaches "_start", at which point the
720 dld will have had a chance to initialize the child. */
721 /* Also, loading a symbol file below may trigger symbol lookups, and
722 we don't want those to be satisfied by the libraries of the
723 previous incarnation of this process. */
724 no_shared_libraries (NULL, 0);
725
726 if (follow_exec_mode_string == follow_exec_mode_new)
727 {
728 struct program_space *pspace;
729 struct inferior *new_inf;
730
731 /* The user wants to keep the old inferior and program spaces
732 around. Create a new fresh one, and switch to it. */
733
734 inf = add_inferior (current_inferior ()->pid);
735 pspace = add_program_space (maybe_new_address_space ());
736 inf->pspace = pspace;
737 inf->aspace = pspace->aspace;
738
739 exit_inferior_num_silent (current_inferior ()->num);
740
741 set_current_inferior (inf);
742 set_current_program_space (pspace);
743 }
744
745 gdb_assert (current_program_space == inf->pspace);
746
747 /* That a.out is now the one to use. */
748 exec_file_attach (execd_pathname, 0);
749
750 /* Load the main file's symbols. */
751 symbol_file_add_main (execd_pathname, 0);
752
753 #ifdef SOLIB_CREATE_INFERIOR_HOOK
754 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
755 #else
756 solib_create_inferior_hook (0);
757 #endif
758
759 jit_inferior_created_hook ();
760
761 /* Reinsert all breakpoints. (Those which were symbolic have
762 been reset to the proper address in the new a.out, thanks
763 to symbol_file_command...) */
764 insert_breakpoints ();
765
766 /* The next resume of this inferior should bring it to the shlib
767 startup breakpoints. (If the user had also set bp's on
768 "main" from the old (parent) process, then they'll auto-
769 matically get reset there in the new process.) */
770 }
771
772 /* Non-zero if we just simulating a single-step. This is needed
773 because we cannot remove the breakpoints in the inferior process
774 until after the `wait' in `wait_for_inferior'. */
775 static int singlestep_breakpoints_inserted_p = 0;
776
777 /* The thread we inserted single-step breakpoints for. */
778 static ptid_t singlestep_ptid;
779
780 /* PC when we started this single-step. */
781 static CORE_ADDR singlestep_pc;
782
783 /* If another thread hit the singlestep breakpoint, we save the original
784 thread here so that we can resume single-stepping it later. */
785 static ptid_t saved_singlestep_ptid;
786 static int stepping_past_singlestep_breakpoint;
787
788 /* If not equal to null_ptid, this means that after stepping over breakpoint
789 is finished, we need to switch to deferred_step_ptid, and step it.
790
791 The use case is when one thread has hit a breakpoint, and then the user
792 has switched to another thread and issued 'step'. We need to step over
793 breakpoint in the thread which hit the breakpoint, but then continue
794 stepping the thread user has selected. */
795 static ptid_t deferred_step_ptid;
796 \f
797 /* Displaced stepping. */
798
799 /* In non-stop debugging mode, we must take special care to manage
800 breakpoints properly; in particular, the traditional strategy for
801 stepping a thread past a breakpoint it has hit is unsuitable.
802 'Displaced stepping' is a tactic for stepping one thread past a
803 breakpoint it has hit while ensuring that other threads running
804 concurrently will hit the breakpoint as they should.
805
806 The traditional way to step a thread T off a breakpoint in a
807 multi-threaded program in all-stop mode is as follows:
808
809 a0) Initially, all threads are stopped, and breakpoints are not
810 inserted.
811 a1) We single-step T, leaving breakpoints uninserted.
812 a2) We insert breakpoints, and resume all threads.
813
814 In non-stop debugging, however, this strategy is unsuitable: we
815 don't want to have to stop all threads in the system in order to
816 continue or step T past a breakpoint. Instead, we use displaced
817 stepping:
818
819 n0) Initially, T is stopped, other threads are running, and
820 breakpoints are inserted.
821 n1) We copy the instruction "under" the breakpoint to a separate
822 location, outside the main code stream, making any adjustments
823 to the instruction, register, and memory state as directed by
824 T's architecture.
825 n2) We single-step T over the instruction at its new location.
826 n3) We adjust the resulting register and memory state as directed
827 by T's architecture. This includes resetting T's PC to point
828 back into the main instruction stream.
829 n4) We resume T.
830
831 This approach depends on the following gdbarch methods:
832
833 - gdbarch_max_insn_length and gdbarch_displaced_step_location
834 indicate where to copy the instruction, and how much space must
835 be reserved there. We use these in step n1.
836
837 - gdbarch_displaced_step_copy_insn copies a instruction to a new
838 address, and makes any necessary adjustments to the instruction,
839 register contents, and memory. We use this in step n1.
840
841 - gdbarch_displaced_step_fixup adjusts registers and memory after
842 we have successfuly single-stepped the instruction, to yield the
843 same effect the instruction would have had if we had executed it
844 at its original address. We use this in step n3.
845
846 - gdbarch_displaced_step_free_closure provides cleanup.
847
848 The gdbarch_displaced_step_copy_insn and
849 gdbarch_displaced_step_fixup functions must be written so that
850 copying an instruction with gdbarch_displaced_step_copy_insn,
851 single-stepping across the copied instruction, and then applying
852 gdbarch_displaced_insn_fixup should have the same effects on the
853 thread's memory and registers as stepping the instruction in place
854 would have. Exactly which responsibilities fall to the copy and
855 which fall to the fixup is up to the author of those functions.
856
857 See the comments in gdbarch.sh for details.
858
859 Note that displaced stepping and software single-step cannot
860 currently be used in combination, although with some care I think
861 they could be made to. Software single-step works by placing
862 breakpoints on all possible subsequent instructions; if the
863 displaced instruction is a PC-relative jump, those breakpoints
864 could fall in very strange places --- on pages that aren't
865 executable, or at addresses that are not proper instruction
866 boundaries. (We do generally let other threads run while we wait
867 to hit the software single-step breakpoint, and they might
868 encounter such a corrupted instruction.) One way to work around
869 this would be to have gdbarch_displaced_step_copy_insn fully
870 simulate the effect of PC-relative instructions (and return NULL)
871 on architectures that use software single-stepping.
872
873 In non-stop mode, we can have independent and simultaneous step
874 requests, so more than one thread may need to simultaneously step
875 over a breakpoint. The current implementation assumes there is
876 only one scratch space per process. In this case, we have to
877 serialize access to the scratch space. If thread A wants to step
878 over a breakpoint, but we are currently waiting for some other
879 thread to complete a displaced step, we leave thread A stopped and
880 place it in the displaced_step_request_queue. Whenever a displaced
881 step finishes, we pick the next thread in the queue and start a new
882 displaced step operation on it. See displaced_step_prepare and
883 displaced_step_fixup for details. */
884
885 struct displaced_step_request
886 {
887 ptid_t ptid;
888 struct displaced_step_request *next;
889 };
890
891 /* Per-inferior displaced stepping state. */
892 struct displaced_step_inferior_state
893 {
894 /* Pointer to next in linked list. */
895 struct displaced_step_inferior_state *next;
896
897 /* The process this displaced step state refers to. */
898 int pid;
899
900 /* A queue of pending displaced stepping requests. One entry per
901 thread that needs to do a displaced step. */
902 struct displaced_step_request *step_request_queue;
903
904 /* If this is not null_ptid, this is the thread carrying out a
905 displaced single-step in process PID. This thread's state will
906 require fixing up once it has completed its step. */
907 ptid_t step_ptid;
908
909 /* The architecture the thread had when we stepped it. */
910 struct gdbarch *step_gdbarch;
911
912 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
913 for post-step cleanup. */
914 struct displaced_step_closure *step_closure;
915
916 /* The address of the original instruction, and the copy we
917 made. */
918 CORE_ADDR step_original, step_copy;
919
920 /* Saved contents of copy area. */
921 gdb_byte *step_saved_copy;
922 };
923
924 /* The list of states of processes involved in displaced stepping
925 presently. */
926 static struct displaced_step_inferior_state *displaced_step_inferior_states;
927
928 /* Get the displaced stepping state of process PID. */
929
930 static struct displaced_step_inferior_state *
931 get_displaced_stepping_state (int pid)
932 {
933 struct displaced_step_inferior_state *state;
934
935 for (state = displaced_step_inferior_states;
936 state != NULL;
937 state = state->next)
938 if (state->pid == pid)
939 return state;
940
941 return NULL;
942 }
943
944 /* Add a new displaced stepping state for process PID to the displaced
945 stepping state list, or return a pointer to an already existing
946 entry, if it already exists. Never returns NULL. */
947
948 static struct displaced_step_inferior_state *
949 add_displaced_stepping_state (int pid)
950 {
951 struct displaced_step_inferior_state *state;
952
953 for (state = displaced_step_inferior_states;
954 state != NULL;
955 state = state->next)
956 if (state->pid == pid)
957 return state;
958
959 state = xcalloc (1, sizeof (*state));
960 state->pid = pid;
961 state->next = displaced_step_inferior_states;
962 displaced_step_inferior_states = state;
963
964 return state;
965 }
966
967 /* Remove the displaced stepping state of process PID. */
968
969 static void
970 remove_displaced_stepping_state (int pid)
971 {
972 struct displaced_step_inferior_state *it, **prev_next_p;
973
974 gdb_assert (pid != 0);
975
976 it = displaced_step_inferior_states;
977 prev_next_p = &displaced_step_inferior_states;
978 while (it)
979 {
980 if (it->pid == pid)
981 {
982 *prev_next_p = it->next;
983 xfree (it);
984 return;
985 }
986
987 prev_next_p = &it->next;
988 it = *prev_next_p;
989 }
990 }
991
992 static void
993 infrun_inferior_exit (struct inferior *inf)
994 {
995 remove_displaced_stepping_state (inf->pid);
996 }
997
998 /* Enum strings for "set|show displaced-stepping". */
999
1000 static const char can_use_displaced_stepping_auto[] = "auto";
1001 static const char can_use_displaced_stepping_on[] = "on";
1002 static const char can_use_displaced_stepping_off[] = "off";
1003 static const char *can_use_displaced_stepping_enum[] =
1004 {
1005 can_use_displaced_stepping_auto,
1006 can_use_displaced_stepping_on,
1007 can_use_displaced_stepping_off,
1008 NULL,
1009 };
1010
1011 /* If ON, and the architecture supports it, GDB will use displaced
1012 stepping to step over breakpoints. If OFF, or if the architecture
1013 doesn't support it, GDB will instead use the traditional
1014 hold-and-step approach. If AUTO (which is the default), GDB will
1015 decide which technique to use to step over breakpoints depending on
1016 which of all-stop or non-stop mode is active --- displaced stepping
1017 in non-stop mode; hold-and-step in all-stop mode. */
1018
1019 static const char *can_use_displaced_stepping =
1020 can_use_displaced_stepping_auto;
1021
1022 static void
1023 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1024 struct cmd_list_element *c,
1025 const char *value)
1026 {
1027 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1028 fprintf_filtered (file, _("\
1029 Debugger's willingness to use displaced stepping to step over \
1030 breakpoints is %s (currently %s).\n"),
1031 value, non_stop ? "on" : "off");
1032 else
1033 fprintf_filtered (file, _("\
1034 Debugger's willingness to use displaced stepping to step over \
1035 breakpoints is %s.\n"), value);
1036 }
1037
1038 /* Return non-zero if displaced stepping can/should be used to step
1039 over breakpoints. */
1040
1041 static int
1042 use_displaced_stepping (struct gdbarch *gdbarch)
1043 {
1044 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1045 && non_stop)
1046 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1047 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1048 && !RECORD_IS_USED);
1049 }
1050
1051 /* Clean out any stray displaced stepping state. */
1052 static void
1053 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1054 {
1055 /* Indicate that there is no cleanup pending. */
1056 displaced->step_ptid = null_ptid;
1057
1058 if (displaced->step_closure)
1059 {
1060 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1061 displaced->step_closure);
1062 displaced->step_closure = NULL;
1063 }
1064 }
1065
1066 static void
1067 displaced_step_clear_cleanup (void *arg)
1068 {
1069 struct displaced_step_inferior_state *state = arg;
1070
1071 displaced_step_clear (state);
1072 }
1073
1074 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1075 void
1076 displaced_step_dump_bytes (struct ui_file *file,
1077 const gdb_byte *buf,
1078 size_t len)
1079 {
1080 int i;
1081
1082 for (i = 0; i < len; i++)
1083 fprintf_unfiltered (file, "%02x ", buf[i]);
1084 fputs_unfiltered ("\n", file);
1085 }
1086
1087 /* Prepare to single-step, using displaced stepping.
1088
1089 Note that we cannot use displaced stepping when we have a signal to
1090 deliver. If we have a signal to deliver and an instruction to step
1091 over, then after the step, there will be no indication from the
1092 target whether the thread entered a signal handler or ignored the
1093 signal and stepped over the instruction successfully --- both cases
1094 result in a simple SIGTRAP. In the first case we mustn't do a
1095 fixup, and in the second case we must --- but we can't tell which.
1096 Comments in the code for 'random signals' in handle_inferior_event
1097 explain how we handle this case instead.
1098
1099 Returns 1 if preparing was successful -- this thread is going to be
1100 stepped now; or 0 if displaced stepping this thread got queued. */
1101 static int
1102 displaced_step_prepare (ptid_t ptid)
1103 {
1104 struct cleanup *old_cleanups, *ignore_cleanups;
1105 struct regcache *regcache = get_thread_regcache (ptid);
1106 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1107 CORE_ADDR original, copy;
1108 ULONGEST len;
1109 struct displaced_step_closure *closure;
1110 struct displaced_step_inferior_state *displaced;
1111
1112 /* We should never reach this function if the architecture does not
1113 support displaced stepping. */
1114 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1115
1116 /* We have to displaced step one thread at a time, as we only have
1117 access to a single scratch space per inferior. */
1118
1119 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1120
1121 if (!ptid_equal (displaced->step_ptid, null_ptid))
1122 {
1123 /* Already waiting for a displaced step to finish. Defer this
1124 request and place in queue. */
1125 struct displaced_step_request *req, *new_req;
1126
1127 if (debug_displaced)
1128 fprintf_unfiltered (gdb_stdlog,
1129 "displaced: defering step of %s\n",
1130 target_pid_to_str (ptid));
1131
1132 new_req = xmalloc (sizeof (*new_req));
1133 new_req->ptid = ptid;
1134 new_req->next = NULL;
1135
1136 if (displaced->step_request_queue)
1137 {
1138 for (req = displaced->step_request_queue;
1139 req && req->next;
1140 req = req->next)
1141 ;
1142 req->next = new_req;
1143 }
1144 else
1145 displaced->step_request_queue = new_req;
1146
1147 return 0;
1148 }
1149 else
1150 {
1151 if (debug_displaced)
1152 fprintf_unfiltered (gdb_stdlog,
1153 "displaced: stepping %s now\n",
1154 target_pid_to_str (ptid));
1155 }
1156
1157 displaced_step_clear (displaced);
1158
1159 old_cleanups = save_inferior_ptid ();
1160 inferior_ptid = ptid;
1161
1162 original = regcache_read_pc (regcache);
1163
1164 copy = gdbarch_displaced_step_location (gdbarch);
1165 len = gdbarch_max_insn_length (gdbarch);
1166
1167 /* Save the original contents of the copy area. */
1168 displaced->step_saved_copy = xmalloc (len);
1169 ignore_cleanups = make_cleanup (free_current_contents,
1170 &displaced->step_saved_copy);
1171 read_memory (copy, displaced->step_saved_copy, len);
1172 if (debug_displaced)
1173 {
1174 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1175 paddress (gdbarch, copy));
1176 displaced_step_dump_bytes (gdb_stdlog,
1177 displaced->step_saved_copy,
1178 len);
1179 };
1180
1181 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1182 original, copy, regcache);
1183
1184 /* We don't support the fully-simulated case at present. */
1185 gdb_assert (closure);
1186
1187 /* Save the information we need to fix things up if the step
1188 succeeds. */
1189 displaced->step_ptid = ptid;
1190 displaced->step_gdbarch = gdbarch;
1191 displaced->step_closure = closure;
1192 displaced->step_original = original;
1193 displaced->step_copy = copy;
1194
1195 make_cleanup (displaced_step_clear_cleanup, displaced);
1196
1197 /* Resume execution at the copy. */
1198 regcache_write_pc (regcache, copy);
1199
1200 discard_cleanups (ignore_cleanups);
1201
1202 do_cleanups (old_cleanups);
1203
1204 if (debug_displaced)
1205 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1206 paddress (gdbarch, copy));
1207
1208 return 1;
1209 }
1210
1211 static void
1212 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1213 {
1214 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1215 inferior_ptid = ptid;
1216 write_memory (memaddr, myaddr, len);
1217 do_cleanups (ptid_cleanup);
1218 }
1219
1220 static void
1221 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1222 {
1223 struct cleanup *old_cleanups;
1224 struct displaced_step_inferior_state *displaced
1225 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1226
1227 /* Was any thread of this process doing a displaced step? */
1228 if (displaced == NULL)
1229 return;
1230
1231 /* Was this event for the pid we displaced? */
1232 if (ptid_equal (displaced->step_ptid, null_ptid)
1233 || ! ptid_equal (displaced->step_ptid, event_ptid))
1234 return;
1235
1236 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1237
1238 /* Restore the contents of the copy area. */
1239 {
1240 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1241 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1242 displaced->step_saved_copy, len);
1243 if (debug_displaced)
1244 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1245 paddress (displaced->step_gdbarch,
1246 displaced->step_copy));
1247 }
1248
1249 /* Did the instruction complete successfully? */
1250 if (signal == TARGET_SIGNAL_TRAP)
1251 {
1252 /* Fix up the resulting state. */
1253 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1254 displaced->step_closure,
1255 displaced->step_original,
1256 displaced->step_copy,
1257 get_thread_regcache (displaced->step_ptid));
1258 }
1259 else
1260 {
1261 /* Since the instruction didn't complete, all we can do is
1262 relocate the PC. */
1263 struct regcache *regcache = get_thread_regcache (event_ptid);
1264 CORE_ADDR pc = regcache_read_pc (regcache);
1265 pc = displaced->step_original + (pc - displaced->step_copy);
1266 regcache_write_pc (regcache, pc);
1267 }
1268
1269 do_cleanups (old_cleanups);
1270
1271 displaced->step_ptid = null_ptid;
1272
1273 /* Are there any pending displaced stepping requests? If so, run
1274 one now. Leave the state object around, since we're likely to
1275 need it again soon. */
1276 while (displaced->step_request_queue)
1277 {
1278 struct displaced_step_request *head;
1279 ptid_t ptid;
1280 struct regcache *regcache;
1281 struct gdbarch *gdbarch;
1282 CORE_ADDR actual_pc;
1283 struct address_space *aspace;
1284
1285 head = displaced->step_request_queue;
1286 ptid = head->ptid;
1287 displaced->step_request_queue = head->next;
1288 xfree (head);
1289
1290 context_switch (ptid);
1291
1292 regcache = get_thread_regcache (ptid);
1293 actual_pc = regcache_read_pc (regcache);
1294 aspace = get_regcache_aspace (regcache);
1295
1296 if (breakpoint_here_p (aspace, actual_pc))
1297 {
1298 if (debug_displaced)
1299 fprintf_unfiltered (gdb_stdlog,
1300 "displaced: stepping queued %s now\n",
1301 target_pid_to_str (ptid));
1302
1303 displaced_step_prepare (ptid);
1304
1305 gdbarch = get_regcache_arch (regcache);
1306
1307 if (debug_displaced)
1308 {
1309 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1310 gdb_byte buf[4];
1311
1312 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1313 paddress (gdbarch, actual_pc));
1314 read_memory (actual_pc, buf, sizeof (buf));
1315 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1316 }
1317
1318 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1319 displaced->step_closure))
1320 target_resume (ptid, 1, TARGET_SIGNAL_0);
1321 else
1322 target_resume (ptid, 0, TARGET_SIGNAL_0);
1323
1324 /* Done, we're stepping a thread. */
1325 break;
1326 }
1327 else
1328 {
1329 int step;
1330 struct thread_info *tp = inferior_thread ();
1331
1332 /* The breakpoint we were sitting under has since been
1333 removed. */
1334 tp->trap_expected = 0;
1335
1336 /* Go back to what we were trying to do. */
1337 step = currently_stepping (tp);
1338
1339 if (debug_displaced)
1340 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1341 target_pid_to_str (tp->ptid), step);
1342
1343 target_resume (ptid, step, TARGET_SIGNAL_0);
1344 tp->stop_signal = TARGET_SIGNAL_0;
1345
1346 /* This request was discarded. See if there's any other
1347 thread waiting for its turn. */
1348 }
1349 }
1350 }
1351
1352 /* Update global variables holding ptids to hold NEW_PTID if they were
1353 holding OLD_PTID. */
1354 static void
1355 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1356 {
1357 struct displaced_step_request *it;
1358 struct displaced_step_inferior_state *displaced;
1359
1360 if (ptid_equal (inferior_ptid, old_ptid))
1361 inferior_ptid = new_ptid;
1362
1363 if (ptid_equal (singlestep_ptid, old_ptid))
1364 singlestep_ptid = new_ptid;
1365
1366 if (ptid_equal (deferred_step_ptid, old_ptid))
1367 deferred_step_ptid = new_ptid;
1368
1369 for (displaced = displaced_step_inferior_states;
1370 displaced;
1371 displaced = displaced->next)
1372 {
1373 if (ptid_equal (displaced->step_ptid, old_ptid))
1374 displaced->step_ptid = new_ptid;
1375
1376 for (it = displaced->step_request_queue; it; it = it->next)
1377 if (ptid_equal (it->ptid, old_ptid))
1378 it->ptid = new_ptid;
1379 }
1380 }
1381
1382 \f
1383 /* Resuming. */
1384
1385 /* Things to clean up if we QUIT out of resume (). */
1386 static void
1387 resume_cleanups (void *ignore)
1388 {
1389 normal_stop ();
1390 }
1391
1392 static const char schedlock_off[] = "off";
1393 static const char schedlock_on[] = "on";
1394 static const char schedlock_step[] = "step";
1395 static const char *scheduler_enums[] = {
1396 schedlock_off,
1397 schedlock_on,
1398 schedlock_step,
1399 NULL
1400 };
1401 static const char *scheduler_mode = schedlock_off;
1402 static void
1403 show_scheduler_mode (struct ui_file *file, int from_tty,
1404 struct cmd_list_element *c, const char *value)
1405 {
1406 fprintf_filtered (file, _("\
1407 Mode for locking scheduler during execution is \"%s\".\n"),
1408 value);
1409 }
1410
1411 static void
1412 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1413 {
1414 if (!target_can_lock_scheduler)
1415 {
1416 scheduler_mode = schedlock_off;
1417 error (_("Target '%s' cannot support this command."), target_shortname);
1418 }
1419 }
1420
1421 /* True if execution commands resume all threads of all processes by
1422 default; otherwise, resume only threads of the current inferior
1423 process. */
1424 int sched_multi = 0;
1425
1426 /* Try to setup for software single stepping over the specified location.
1427 Return 1 if target_resume() should use hardware single step.
1428
1429 GDBARCH the current gdbarch.
1430 PC the location to step over. */
1431
1432 static int
1433 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1434 {
1435 int hw_step = 1;
1436
1437 if (gdbarch_software_single_step_p (gdbarch)
1438 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1439 {
1440 hw_step = 0;
1441 /* Do not pull these breakpoints until after a `wait' in
1442 `wait_for_inferior' */
1443 singlestep_breakpoints_inserted_p = 1;
1444 singlestep_ptid = inferior_ptid;
1445 singlestep_pc = pc;
1446 }
1447 return hw_step;
1448 }
1449
1450 /* Resume the inferior, but allow a QUIT. This is useful if the user
1451 wants to interrupt some lengthy single-stepping operation
1452 (for child processes, the SIGINT goes to the inferior, and so
1453 we get a SIGINT random_signal, but for remote debugging and perhaps
1454 other targets, that's not true).
1455
1456 STEP nonzero if we should step (zero to continue instead).
1457 SIG is the signal to give the inferior (zero for none). */
1458 void
1459 resume (int step, enum target_signal sig)
1460 {
1461 int should_resume = 1;
1462 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1463 struct regcache *regcache = get_current_regcache ();
1464 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1465 struct thread_info *tp = inferior_thread ();
1466 CORE_ADDR pc = regcache_read_pc (regcache);
1467 struct address_space *aspace = get_regcache_aspace (regcache);
1468
1469 QUIT;
1470
1471 if (debug_infrun)
1472 fprintf_unfiltered (gdb_stdlog,
1473 "infrun: resume (step=%d, signal=%d), "
1474 "trap_expected=%d\n",
1475 step, sig, tp->trap_expected);
1476
1477 /* Normally, by the time we reach `resume', the breakpoints are either
1478 removed or inserted, as appropriate. The exception is if we're sitting
1479 at a permanent breakpoint; we need to step over it, but permanent
1480 breakpoints can't be removed. So we have to test for it here. */
1481 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1482 {
1483 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1484 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1485 else
1486 error (_("\
1487 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1488 how to step past a permanent breakpoint on this architecture. Try using\n\
1489 a command like `return' or `jump' to continue execution."));
1490 }
1491
1492 /* If enabled, step over breakpoints by executing a copy of the
1493 instruction at a different address.
1494
1495 We can't use displaced stepping when we have a signal to deliver;
1496 the comments for displaced_step_prepare explain why. The
1497 comments in the handle_inferior event for dealing with 'random
1498 signals' explain what we do instead. */
1499 if (use_displaced_stepping (gdbarch)
1500 && (tp->trap_expected
1501 || (step && gdbarch_software_single_step_p (gdbarch)))
1502 && sig == TARGET_SIGNAL_0)
1503 {
1504 struct displaced_step_inferior_state *displaced;
1505
1506 if (!displaced_step_prepare (inferior_ptid))
1507 {
1508 /* Got placed in displaced stepping queue. Will be resumed
1509 later when all the currently queued displaced stepping
1510 requests finish. The thread is not executing at this point,
1511 and the call to set_executing will be made later. But we
1512 need to call set_running here, since from frontend point of view,
1513 the thread is running. */
1514 set_running (inferior_ptid, 1);
1515 discard_cleanups (old_cleanups);
1516 return;
1517 }
1518
1519 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1520 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1521 displaced->step_closure);
1522 }
1523
1524 /* Do we need to do it the hard way, w/temp breakpoints? */
1525 else if (step)
1526 step = maybe_software_singlestep (gdbarch, pc);
1527
1528 if (should_resume)
1529 {
1530 ptid_t resume_ptid;
1531
1532 /* If STEP is set, it's a request to use hardware stepping
1533 facilities. But in that case, we should never
1534 use singlestep breakpoint. */
1535 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1536
1537 /* Decide the set of threads to ask the target to resume. Start
1538 by assuming everything will be resumed, than narrow the set
1539 by applying increasingly restricting conditions. */
1540
1541 /* By default, resume all threads of all processes. */
1542 resume_ptid = RESUME_ALL;
1543
1544 /* Maybe resume only all threads of the current process. */
1545 if (!sched_multi && target_supports_multi_process ())
1546 {
1547 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1548 }
1549
1550 /* Maybe resume a single thread after all. */
1551 if (singlestep_breakpoints_inserted_p
1552 && stepping_past_singlestep_breakpoint)
1553 {
1554 /* The situation here is as follows. In thread T1 we wanted to
1555 single-step. Lacking hardware single-stepping we've
1556 set breakpoint at the PC of the next instruction -- call it
1557 P. After resuming, we've hit that breakpoint in thread T2.
1558 Now we've removed original breakpoint, inserted breakpoint
1559 at P+1, and try to step to advance T2 past breakpoint.
1560 We need to step only T2, as if T1 is allowed to freely run,
1561 it can run past P, and if other threads are allowed to run,
1562 they can hit breakpoint at P+1, and nested hits of single-step
1563 breakpoints is not something we'd want -- that's complicated
1564 to support, and has no value. */
1565 resume_ptid = inferior_ptid;
1566 }
1567 else if ((step || singlestep_breakpoints_inserted_p)
1568 && tp->trap_expected)
1569 {
1570 /* We're allowing a thread to run past a breakpoint it has
1571 hit, by single-stepping the thread with the breakpoint
1572 removed. In which case, we need to single-step only this
1573 thread, and keep others stopped, as they can miss this
1574 breakpoint if allowed to run.
1575
1576 The current code actually removes all breakpoints when
1577 doing this, not just the one being stepped over, so if we
1578 let other threads run, we can actually miss any
1579 breakpoint, not just the one at PC. */
1580 resume_ptid = inferior_ptid;
1581 }
1582 else if (non_stop)
1583 {
1584 /* With non-stop mode on, threads are always handled
1585 individually. */
1586 resume_ptid = inferior_ptid;
1587 }
1588 else if ((scheduler_mode == schedlock_on)
1589 || (scheduler_mode == schedlock_step
1590 && (step || singlestep_breakpoints_inserted_p)))
1591 {
1592 /* User-settable 'scheduler' mode requires solo thread resume. */
1593 resume_ptid = inferior_ptid;
1594 }
1595
1596 if (gdbarch_cannot_step_breakpoint (gdbarch))
1597 {
1598 /* Most targets can step a breakpoint instruction, thus
1599 executing it normally. But if this one cannot, just
1600 continue and we will hit it anyway. */
1601 if (step && breakpoint_inserted_here_p (aspace, pc))
1602 step = 0;
1603 }
1604
1605 if (debug_displaced
1606 && use_displaced_stepping (gdbarch)
1607 && tp->trap_expected)
1608 {
1609 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1610 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1611 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1612 gdb_byte buf[4];
1613
1614 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1615 paddress (resume_gdbarch, actual_pc));
1616 read_memory (actual_pc, buf, sizeof (buf));
1617 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1618 }
1619
1620 /* Install inferior's terminal modes. */
1621 target_terminal_inferior ();
1622
1623 /* Avoid confusing the next resume, if the next stop/resume
1624 happens to apply to another thread. */
1625 tp->stop_signal = TARGET_SIGNAL_0;
1626
1627 target_resume (resume_ptid, step, sig);
1628 }
1629
1630 discard_cleanups (old_cleanups);
1631 }
1632 \f
1633 /* Proceeding. */
1634
1635 /* Clear out all variables saying what to do when inferior is continued.
1636 First do this, then set the ones you want, then call `proceed'. */
1637
1638 static void
1639 clear_proceed_status_thread (struct thread_info *tp)
1640 {
1641 if (debug_infrun)
1642 fprintf_unfiltered (gdb_stdlog,
1643 "infrun: clear_proceed_status_thread (%s)\n",
1644 target_pid_to_str (tp->ptid));
1645
1646 tp->trap_expected = 0;
1647 tp->step_range_start = 0;
1648 tp->step_range_end = 0;
1649 tp->step_frame_id = null_frame_id;
1650 tp->step_stack_frame_id = null_frame_id;
1651 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1652 tp->stop_requested = 0;
1653
1654 tp->stop_step = 0;
1655
1656 tp->proceed_to_finish = 0;
1657
1658 /* Discard any remaining commands or status from previous stop. */
1659 bpstat_clear (&tp->stop_bpstat);
1660 }
1661
1662 static int
1663 clear_proceed_status_callback (struct thread_info *tp, void *data)
1664 {
1665 if (is_exited (tp->ptid))
1666 return 0;
1667
1668 clear_proceed_status_thread (tp);
1669 return 0;
1670 }
1671
1672 void
1673 clear_proceed_status (void)
1674 {
1675 if (!non_stop)
1676 {
1677 /* In all-stop mode, delete the per-thread status of all
1678 threads, even if inferior_ptid is null_ptid, there may be
1679 threads on the list. E.g., we may be launching a new
1680 process, while selecting the executable. */
1681 iterate_over_threads (clear_proceed_status_callback, NULL);
1682 }
1683
1684 if (!ptid_equal (inferior_ptid, null_ptid))
1685 {
1686 struct inferior *inferior;
1687
1688 if (non_stop)
1689 {
1690 /* If in non-stop mode, only delete the per-thread status of
1691 the current thread. */
1692 clear_proceed_status_thread (inferior_thread ());
1693 }
1694
1695 inferior = current_inferior ();
1696 inferior->stop_soon = NO_STOP_QUIETLY;
1697 }
1698
1699 stop_after_trap = 0;
1700
1701 observer_notify_about_to_proceed ();
1702
1703 if (stop_registers)
1704 {
1705 regcache_xfree (stop_registers);
1706 stop_registers = NULL;
1707 }
1708 }
1709
1710 /* Check the current thread against the thread that reported the most recent
1711 event. If a step-over is required return TRUE and set the current thread
1712 to the old thread. Otherwise return FALSE.
1713
1714 This should be suitable for any targets that support threads. */
1715
1716 static int
1717 prepare_to_proceed (int step)
1718 {
1719 ptid_t wait_ptid;
1720 struct target_waitstatus wait_status;
1721 int schedlock_enabled;
1722
1723 /* With non-stop mode on, threads are always handled individually. */
1724 gdb_assert (! non_stop);
1725
1726 /* Get the last target status returned by target_wait(). */
1727 get_last_target_status (&wait_ptid, &wait_status);
1728
1729 /* Make sure we were stopped at a breakpoint. */
1730 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1731 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1732 && wait_status.value.sig != TARGET_SIGNAL_ILL
1733 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1734 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1735 {
1736 return 0;
1737 }
1738
1739 schedlock_enabled = (scheduler_mode == schedlock_on
1740 || (scheduler_mode == schedlock_step
1741 && step));
1742
1743 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1744 if (schedlock_enabled)
1745 return 0;
1746
1747 /* Don't switch over if we're about to resume some other process
1748 other than WAIT_PTID's, and schedule-multiple is off. */
1749 if (!sched_multi
1750 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1751 return 0;
1752
1753 /* Switched over from WAIT_PID. */
1754 if (!ptid_equal (wait_ptid, minus_one_ptid)
1755 && !ptid_equal (inferior_ptid, wait_ptid))
1756 {
1757 struct regcache *regcache = get_thread_regcache (wait_ptid);
1758
1759 if (breakpoint_here_p (get_regcache_aspace (regcache),
1760 regcache_read_pc (regcache)))
1761 {
1762 /* If stepping, remember current thread to switch back to. */
1763 if (step)
1764 deferred_step_ptid = inferior_ptid;
1765
1766 /* Switch back to WAIT_PID thread. */
1767 switch_to_thread (wait_ptid);
1768
1769 /* We return 1 to indicate that there is a breakpoint here,
1770 so we need to step over it before continuing to avoid
1771 hitting it straight away. */
1772 return 1;
1773 }
1774 }
1775
1776 return 0;
1777 }
1778
1779 /* Basic routine for continuing the program in various fashions.
1780
1781 ADDR is the address to resume at, or -1 for resume where stopped.
1782 SIGGNAL is the signal to give it, or 0 for none,
1783 or -1 for act according to how it stopped.
1784 STEP is nonzero if should trap after one instruction.
1785 -1 means return after that and print nothing.
1786 You should probably set various step_... variables
1787 before calling here, if you are stepping.
1788
1789 You should call clear_proceed_status before calling proceed. */
1790
1791 void
1792 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1793 {
1794 struct regcache *regcache;
1795 struct gdbarch *gdbarch;
1796 struct thread_info *tp;
1797 CORE_ADDR pc;
1798 struct address_space *aspace;
1799 int oneproc = 0;
1800
1801 /* If we're stopped at a fork/vfork, follow the branch set by the
1802 "set follow-fork-mode" command; otherwise, we'll just proceed
1803 resuming the current thread. */
1804 if (!follow_fork ())
1805 {
1806 /* The target for some reason decided not to resume. */
1807 normal_stop ();
1808 return;
1809 }
1810
1811 regcache = get_current_regcache ();
1812 gdbarch = get_regcache_arch (regcache);
1813 aspace = get_regcache_aspace (regcache);
1814 pc = regcache_read_pc (regcache);
1815
1816 if (step > 0)
1817 step_start_function = find_pc_function (pc);
1818 if (step < 0)
1819 stop_after_trap = 1;
1820
1821 if (addr == (CORE_ADDR) -1)
1822 {
1823 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1824 && execution_direction != EXEC_REVERSE)
1825 /* There is a breakpoint at the address we will resume at,
1826 step one instruction before inserting breakpoints so that
1827 we do not stop right away (and report a second hit at this
1828 breakpoint).
1829
1830 Note, we don't do this in reverse, because we won't
1831 actually be executing the breakpoint insn anyway.
1832 We'll be (un-)executing the previous instruction. */
1833
1834 oneproc = 1;
1835 else if (gdbarch_single_step_through_delay_p (gdbarch)
1836 && gdbarch_single_step_through_delay (gdbarch,
1837 get_current_frame ()))
1838 /* We stepped onto an instruction that needs to be stepped
1839 again before re-inserting the breakpoint, do so. */
1840 oneproc = 1;
1841 }
1842 else
1843 {
1844 regcache_write_pc (regcache, addr);
1845 }
1846
1847 if (debug_infrun)
1848 fprintf_unfiltered (gdb_stdlog,
1849 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1850 paddress (gdbarch, addr), siggnal, step);
1851
1852 /* We're handling a live event, so make sure we're doing live
1853 debugging. If we're looking at traceframes while the target is
1854 running, we're going to need to get back to that mode after
1855 handling the event. */
1856 if (non_stop)
1857 {
1858 make_cleanup_restore_current_traceframe ();
1859 set_traceframe_number (-1);
1860 }
1861
1862 if (non_stop)
1863 /* In non-stop, each thread is handled individually. The context
1864 must already be set to the right thread here. */
1865 ;
1866 else
1867 {
1868 /* In a multi-threaded task we may select another thread and
1869 then continue or step.
1870
1871 But if the old thread was stopped at a breakpoint, it will
1872 immediately cause another breakpoint stop without any
1873 execution (i.e. it will report a breakpoint hit incorrectly).
1874 So we must step over it first.
1875
1876 prepare_to_proceed checks the current thread against the
1877 thread that reported the most recent event. If a step-over
1878 is required it returns TRUE and sets the current thread to
1879 the old thread. */
1880 if (prepare_to_proceed (step))
1881 oneproc = 1;
1882 }
1883
1884 /* prepare_to_proceed may change the current thread. */
1885 tp = inferior_thread ();
1886
1887 if (oneproc)
1888 {
1889 tp->trap_expected = 1;
1890 /* If displaced stepping is enabled, we can step over the
1891 breakpoint without hitting it, so leave all breakpoints
1892 inserted. Otherwise we need to disable all breakpoints, step
1893 one instruction, and then re-add them when that step is
1894 finished. */
1895 if (!use_displaced_stepping (gdbarch))
1896 remove_breakpoints ();
1897 }
1898
1899 /* We can insert breakpoints if we're not trying to step over one,
1900 or if we are stepping over one but we're using displaced stepping
1901 to do so. */
1902 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1903 insert_breakpoints ();
1904
1905 if (!non_stop)
1906 {
1907 /* Pass the last stop signal to the thread we're resuming,
1908 irrespective of whether the current thread is the thread that
1909 got the last event or not. This was historically GDB's
1910 behaviour before keeping a stop_signal per thread. */
1911
1912 struct thread_info *last_thread;
1913 ptid_t last_ptid;
1914 struct target_waitstatus last_status;
1915
1916 get_last_target_status (&last_ptid, &last_status);
1917 if (!ptid_equal (inferior_ptid, last_ptid)
1918 && !ptid_equal (last_ptid, null_ptid)
1919 && !ptid_equal (last_ptid, minus_one_ptid))
1920 {
1921 last_thread = find_thread_ptid (last_ptid);
1922 if (last_thread)
1923 {
1924 tp->stop_signal = last_thread->stop_signal;
1925 last_thread->stop_signal = TARGET_SIGNAL_0;
1926 }
1927 }
1928 }
1929
1930 if (siggnal != TARGET_SIGNAL_DEFAULT)
1931 tp->stop_signal = siggnal;
1932 /* If this signal should not be seen by program,
1933 give it zero. Used for debugging signals. */
1934 else if (!signal_program[tp->stop_signal])
1935 tp->stop_signal = TARGET_SIGNAL_0;
1936
1937 annotate_starting ();
1938
1939 /* Make sure that output from GDB appears before output from the
1940 inferior. */
1941 gdb_flush (gdb_stdout);
1942
1943 /* Refresh prev_pc value just prior to resuming. This used to be
1944 done in stop_stepping, however, setting prev_pc there did not handle
1945 scenarios such as inferior function calls or returning from
1946 a function via the return command. In those cases, the prev_pc
1947 value was not set properly for subsequent commands. The prev_pc value
1948 is used to initialize the starting line number in the ecs. With an
1949 invalid value, the gdb next command ends up stopping at the position
1950 represented by the next line table entry past our start position.
1951 On platforms that generate one line table entry per line, this
1952 is not a problem. However, on the ia64, the compiler generates
1953 extraneous line table entries that do not increase the line number.
1954 When we issue the gdb next command on the ia64 after an inferior call
1955 or a return command, we often end up a few instructions forward, still
1956 within the original line we started.
1957
1958 An attempt was made to refresh the prev_pc at the same time the
1959 execution_control_state is initialized (for instance, just before
1960 waiting for an inferior event). But this approach did not work
1961 because of platforms that use ptrace, where the pc register cannot
1962 be read unless the inferior is stopped. At that point, we are not
1963 guaranteed the inferior is stopped and so the regcache_read_pc() call
1964 can fail. Setting the prev_pc value here ensures the value is updated
1965 correctly when the inferior is stopped. */
1966 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1967
1968 /* Fill in with reasonable starting values. */
1969 init_thread_stepping_state (tp);
1970
1971 /* Reset to normal state. */
1972 init_infwait_state ();
1973
1974 /* Resume inferior. */
1975 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1976
1977 /* Wait for it to stop (if not standalone)
1978 and in any case decode why it stopped, and act accordingly. */
1979 /* Do this only if we are not using the event loop, or if the target
1980 does not support asynchronous execution. */
1981 if (!target_can_async_p ())
1982 {
1983 wait_for_inferior (0);
1984 normal_stop ();
1985 }
1986 }
1987 \f
1988
1989 /* Start remote-debugging of a machine over a serial link. */
1990
1991 void
1992 start_remote (int from_tty)
1993 {
1994 struct inferior *inferior;
1995 init_wait_for_inferior ();
1996
1997 inferior = current_inferior ();
1998 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1999
2000 /* Always go on waiting for the target, regardless of the mode. */
2001 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2002 indicate to wait_for_inferior that a target should timeout if
2003 nothing is returned (instead of just blocking). Because of this,
2004 targets expecting an immediate response need to, internally, set
2005 things up so that the target_wait() is forced to eventually
2006 timeout. */
2007 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2008 differentiate to its caller what the state of the target is after
2009 the initial open has been performed. Here we're assuming that
2010 the target has stopped. It should be possible to eventually have
2011 target_open() return to the caller an indication that the target
2012 is currently running and GDB state should be set to the same as
2013 for an async run. */
2014 wait_for_inferior (0);
2015
2016 /* Now that the inferior has stopped, do any bookkeeping like
2017 loading shared libraries. We want to do this before normal_stop,
2018 so that the displayed frame is up to date. */
2019 post_create_inferior (&current_target, from_tty);
2020
2021 normal_stop ();
2022 }
2023
2024 /* Initialize static vars when a new inferior begins. */
2025
2026 void
2027 init_wait_for_inferior (void)
2028 {
2029 /* These are meaningless until the first time through wait_for_inferior. */
2030
2031 breakpoint_init_inferior (inf_starting);
2032
2033 clear_proceed_status ();
2034
2035 stepping_past_singlestep_breakpoint = 0;
2036 deferred_step_ptid = null_ptid;
2037
2038 target_last_wait_ptid = minus_one_ptid;
2039
2040 previous_inferior_ptid = null_ptid;
2041 init_infwait_state ();
2042
2043 /* Discard any skipped inlined frames. */
2044 clear_inline_frame_state (minus_one_ptid);
2045 }
2046
2047 \f
2048 /* This enum encodes possible reasons for doing a target_wait, so that
2049 wfi can call target_wait in one place. (Ultimately the call will be
2050 moved out of the infinite loop entirely.) */
2051
2052 enum infwait_states
2053 {
2054 infwait_normal_state,
2055 infwait_thread_hop_state,
2056 infwait_step_watch_state,
2057 infwait_nonstep_watch_state
2058 };
2059
2060 /* Why did the inferior stop? Used to print the appropriate messages
2061 to the interface from within handle_inferior_event(). */
2062 enum inferior_stop_reason
2063 {
2064 /* Step, next, nexti, stepi finished. */
2065 END_STEPPING_RANGE,
2066 /* Inferior terminated by signal. */
2067 SIGNAL_EXITED,
2068 /* Inferior exited. */
2069 EXITED,
2070 /* Inferior received signal, and user asked to be notified. */
2071 SIGNAL_RECEIVED,
2072 /* Reverse execution -- target ran out of history info. */
2073 NO_HISTORY
2074 };
2075
2076 /* The PTID we'll do a target_wait on.*/
2077 ptid_t waiton_ptid;
2078
2079 /* Current inferior wait state. */
2080 enum infwait_states infwait_state;
2081
2082 /* Data to be passed around while handling an event. This data is
2083 discarded between events. */
2084 struct execution_control_state
2085 {
2086 ptid_t ptid;
2087 /* The thread that got the event, if this was a thread event; NULL
2088 otherwise. */
2089 struct thread_info *event_thread;
2090
2091 struct target_waitstatus ws;
2092 int random_signal;
2093 CORE_ADDR stop_func_start;
2094 CORE_ADDR stop_func_end;
2095 char *stop_func_name;
2096 int new_thread_event;
2097 int wait_some_more;
2098 };
2099
2100 static void handle_inferior_event (struct execution_control_state *ecs);
2101
2102 static void handle_step_into_function (struct gdbarch *gdbarch,
2103 struct execution_control_state *ecs);
2104 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2105 struct execution_control_state *ecs);
2106 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2107 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2108 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2109 struct symtab_and_line sr_sal,
2110 struct frame_id sr_id);
2111 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2112
2113 static void stop_stepping (struct execution_control_state *ecs);
2114 static void prepare_to_wait (struct execution_control_state *ecs);
2115 static void keep_going (struct execution_control_state *ecs);
2116 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2117 int stop_info);
2118
2119 /* Callback for iterate over threads. If the thread is stopped, but
2120 the user/frontend doesn't know about that yet, go through
2121 normal_stop, as if the thread had just stopped now. ARG points at
2122 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2123 ptid_is_pid(PTID) is true, applies to all threads of the process
2124 pointed at by PTID. Otherwise, apply only to the thread pointed by
2125 PTID. */
2126
2127 static int
2128 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2129 {
2130 ptid_t ptid = * (ptid_t *) arg;
2131
2132 if ((ptid_equal (info->ptid, ptid)
2133 || ptid_equal (minus_one_ptid, ptid)
2134 || (ptid_is_pid (ptid)
2135 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2136 && is_running (info->ptid)
2137 && !is_executing (info->ptid))
2138 {
2139 struct cleanup *old_chain;
2140 struct execution_control_state ecss;
2141 struct execution_control_state *ecs = &ecss;
2142
2143 memset (ecs, 0, sizeof (*ecs));
2144
2145 old_chain = make_cleanup_restore_current_thread ();
2146
2147 switch_to_thread (info->ptid);
2148
2149 /* Go through handle_inferior_event/normal_stop, so we always
2150 have consistent output as if the stop event had been
2151 reported. */
2152 ecs->ptid = info->ptid;
2153 ecs->event_thread = find_thread_ptid (info->ptid);
2154 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2155 ecs->ws.value.sig = TARGET_SIGNAL_0;
2156
2157 handle_inferior_event (ecs);
2158
2159 if (!ecs->wait_some_more)
2160 {
2161 struct thread_info *tp;
2162
2163 normal_stop ();
2164
2165 /* Finish off the continuations. The continations
2166 themselves are responsible for realising the thread
2167 didn't finish what it was supposed to do. */
2168 tp = inferior_thread ();
2169 do_all_intermediate_continuations_thread (tp);
2170 do_all_continuations_thread (tp);
2171 }
2172
2173 do_cleanups (old_chain);
2174 }
2175
2176 return 0;
2177 }
2178
2179 /* This function is attached as a "thread_stop_requested" observer.
2180 Cleanup local state that assumed the PTID was to be resumed, and
2181 report the stop to the frontend. */
2182
2183 static void
2184 infrun_thread_stop_requested (ptid_t ptid)
2185 {
2186 struct displaced_step_inferior_state *displaced;
2187
2188 /* PTID was requested to stop. Remove it from the displaced
2189 stepping queue, so we don't try to resume it automatically. */
2190
2191 for (displaced = displaced_step_inferior_states;
2192 displaced;
2193 displaced = displaced->next)
2194 {
2195 struct displaced_step_request *it, **prev_next_p;
2196
2197 it = displaced->step_request_queue;
2198 prev_next_p = &displaced->step_request_queue;
2199 while (it)
2200 {
2201 if (ptid_match (it->ptid, ptid))
2202 {
2203 *prev_next_p = it->next;
2204 it->next = NULL;
2205 xfree (it);
2206 }
2207 else
2208 {
2209 prev_next_p = &it->next;
2210 }
2211
2212 it = *prev_next_p;
2213 }
2214 }
2215
2216 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2217 }
2218
2219 static void
2220 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2221 {
2222 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2223 nullify_last_target_wait_ptid ();
2224 }
2225
2226 /* Callback for iterate_over_threads. */
2227
2228 static int
2229 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2230 {
2231 if (is_exited (info->ptid))
2232 return 0;
2233
2234 delete_step_resume_breakpoint (info);
2235 return 0;
2236 }
2237
2238 /* In all-stop, delete the step resume breakpoint of any thread that
2239 had one. In non-stop, delete the step resume breakpoint of the
2240 thread that just stopped. */
2241
2242 static void
2243 delete_step_thread_step_resume_breakpoint (void)
2244 {
2245 if (!target_has_execution
2246 || ptid_equal (inferior_ptid, null_ptid))
2247 /* If the inferior has exited, we have already deleted the step
2248 resume breakpoints out of GDB's lists. */
2249 return;
2250
2251 if (non_stop)
2252 {
2253 /* If in non-stop mode, only delete the step-resume or
2254 longjmp-resume breakpoint of the thread that just stopped
2255 stepping. */
2256 struct thread_info *tp = inferior_thread ();
2257 delete_step_resume_breakpoint (tp);
2258 }
2259 else
2260 /* In all-stop mode, delete all step-resume and longjmp-resume
2261 breakpoints of any thread that had them. */
2262 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2263 }
2264
2265 /* A cleanup wrapper. */
2266
2267 static void
2268 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2269 {
2270 delete_step_thread_step_resume_breakpoint ();
2271 }
2272
2273 /* Pretty print the results of target_wait, for debugging purposes. */
2274
2275 static void
2276 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2277 const struct target_waitstatus *ws)
2278 {
2279 char *status_string = target_waitstatus_to_string (ws);
2280 struct ui_file *tmp_stream = mem_fileopen ();
2281 char *text;
2282
2283 /* The text is split over several lines because it was getting too long.
2284 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2285 output as a unit; we want only one timestamp printed if debug_timestamp
2286 is set. */
2287
2288 fprintf_unfiltered (tmp_stream,
2289 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2290 if (PIDGET (waiton_ptid) != -1)
2291 fprintf_unfiltered (tmp_stream,
2292 " [%s]", target_pid_to_str (waiton_ptid));
2293 fprintf_unfiltered (tmp_stream, ", status) =\n");
2294 fprintf_unfiltered (tmp_stream,
2295 "infrun: %d [%s],\n",
2296 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2297 fprintf_unfiltered (tmp_stream,
2298 "infrun: %s\n",
2299 status_string);
2300
2301 text = ui_file_xstrdup (tmp_stream, NULL);
2302
2303 /* This uses %s in part to handle %'s in the text, but also to avoid
2304 a gcc error: the format attribute requires a string literal. */
2305 fprintf_unfiltered (gdb_stdlog, "%s", text);
2306
2307 xfree (status_string);
2308 xfree (text);
2309 ui_file_delete (tmp_stream);
2310 }
2311
2312 /* Prepare and stabilize the inferior for detaching it. E.g.,
2313 detaching while a thread is displaced stepping is a recipe for
2314 crashing it, as nothing would readjust the PC out of the scratch
2315 pad. */
2316
2317 void
2318 prepare_for_detach (void)
2319 {
2320 struct inferior *inf = current_inferior ();
2321 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2322 struct cleanup *old_chain_1;
2323 struct displaced_step_inferior_state *displaced;
2324
2325 displaced = get_displaced_stepping_state (inf->pid);
2326
2327 /* Is any thread of this process displaced stepping? If not,
2328 there's nothing else to do. */
2329 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2330 return;
2331
2332 if (debug_infrun)
2333 fprintf_unfiltered (gdb_stdlog,
2334 "displaced-stepping in-process while detaching");
2335
2336 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2337 inf->detaching = 1;
2338
2339 while (!ptid_equal (displaced->step_ptid, null_ptid))
2340 {
2341 struct cleanup *old_chain_2;
2342 struct execution_control_state ecss;
2343 struct execution_control_state *ecs;
2344
2345 ecs = &ecss;
2346 memset (ecs, 0, sizeof (*ecs));
2347
2348 overlay_cache_invalid = 1;
2349
2350 /* We have to invalidate the registers BEFORE calling
2351 target_wait because they can be loaded from the target while
2352 in target_wait. This makes remote debugging a bit more
2353 efficient for those targets that provide critical registers
2354 as part of their normal status mechanism. */
2355
2356 registers_changed ();
2357
2358 if (deprecated_target_wait_hook)
2359 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2360 else
2361 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2362
2363 if (debug_infrun)
2364 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2365
2366 /* If an error happens while handling the event, propagate GDB's
2367 knowledge of the executing state to the frontend/user running
2368 state. */
2369 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2370
2371 /* In non-stop mode, each thread is handled individually.
2372 Switch early, so the global state is set correctly for this
2373 thread. */
2374 if (non_stop
2375 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2376 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2377 context_switch (ecs->ptid);
2378
2379 /* Now figure out what to do with the result of the result. */
2380 handle_inferior_event (ecs);
2381
2382 /* No error, don't finish the state yet. */
2383 discard_cleanups (old_chain_2);
2384
2385 /* Breakpoints and watchpoints are not installed on the target
2386 at this point, and signals are passed directly to the
2387 inferior, so this must mean the process is gone. */
2388 if (!ecs->wait_some_more)
2389 {
2390 discard_cleanups (old_chain_1);
2391 error (_("Program exited while detaching"));
2392 }
2393 }
2394
2395 discard_cleanups (old_chain_1);
2396 }
2397
2398 /* Wait for control to return from inferior to debugger.
2399
2400 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2401 as if they were SIGTRAP signals. This can be useful during
2402 the startup sequence on some targets such as HP/UX, where
2403 we receive an EXEC event instead of the expected SIGTRAP.
2404
2405 If inferior gets a signal, we may decide to start it up again
2406 instead of returning. That is why there is a loop in this function.
2407 When this function actually returns it means the inferior
2408 should be left stopped and GDB should read more commands. */
2409
2410 void
2411 wait_for_inferior (int treat_exec_as_sigtrap)
2412 {
2413 struct cleanup *old_cleanups;
2414 struct execution_control_state ecss;
2415 struct execution_control_state *ecs;
2416
2417 if (debug_infrun)
2418 fprintf_unfiltered
2419 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2420 treat_exec_as_sigtrap);
2421
2422 old_cleanups =
2423 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2424
2425 ecs = &ecss;
2426 memset (ecs, 0, sizeof (*ecs));
2427
2428 /* We'll update this if & when we switch to a new thread. */
2429 previous_inferior_ptid = inferior_ptid;
2430
2431 while (1)
2432 {
2433 struct cleanup *old_chain;
2434
2435 /* We have to invalidate the registers BEFORE calling target_wait
2436 because they can be loaded from the target while in target_wait.
2437 This makes remote debugging a bit more efficient for those
2438 targets that provide critical registers as part of their normal
2439 status mechanism. */
2440
2441 overlay_cache_invalid = 1;
2442 registers_changed ();
2443
2444 if (deprecated_target_wait_hook)
2445 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2446 else
2447 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2448
2449 if (debug_infrun)
2450 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2451
2452 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2453 {
2454 xfree (ecs->ws.value.execd_pathname);
2455 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2456 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2457 }
2458
2459 /* If an error happens while handling the event, propagate GDB's
2460 knowledge of the executing state to the frontend/user running
2461 state. */
2462 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2463
2464 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2465 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2466 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2467
2468 /* Now figure out what to do with the result of the result. */
2469 handle_inferior_event (ecs);
2470
2471 /* No error, don't finish the state yet. */
2472 discard_cleanups (old_chain);
2473
2474 if (!ecs->wait_some_more)
2475 break;
2476 }
2477
2478 do_cleanups (old_cleanups);
2479 }
2480
2481 /* Asynchronous version of wait_for_inferior. It is called by the
2482 event loop whenever a change of state is detected on the file
2483 descriptor corresponding to the target. It can be called more than
2484 once to complete a single execution command. In such cases we need
2485 to keep the state in a global variable ECSS. If it is the last time
2486 that this function is called for a single execution command, then
2487 report to the user that the inferior has stopped, and do the
2488 necessary cleanups. */
2489
2490 void
2491 fetch_inferior_event (void *client_data)
2492 {
2493 struct execution_control_state ecss;
2494 struct execution_control_state *ecs = &ecss;
2495 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2496 struct cleanup *ts_old_chain;
2497 int was_sync = sync_execution;
2498
2499 memset (ecs, 0, sizeof (*ecs));
2500
2501 /* We'll update this if & when we switch to a new thread. */
2502 previous_inferior_ptid = inferior_ptid;
2503
2504 if (non_stop)
2505 /* In non-stop mode, the user/frontend should not notice a thread
2506 switch due to internal events. Make sure we reverse to the
2507 user selected thread and frame after handling the event and
2508 running any breakpoint commands. */
2509 make_cleanup_restore_current_thread ();
2510
2511 /* We have to invalidate the registers BEFORE calling target_wait
2512 because they can be loaded from the target while in target_wait.
2513 This makes remote debugging a bit more efficient for those
2514 targets that provide critical registers as part of their normal
2515 status mechanism. */
2516
2517 overlay_cache_invalid = 1;
2518 registers_changed ();
2519
2520 if (deprecated_target_wait_hook)
2521 ecs->ptid =
2522 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2523 else
2524 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2525
2526 if (debug_infrun)
2527 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2528
2529 if (non_stop
2530 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2531 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2532 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2533 /* In non-stop mode, each thread is handled individually. Switch
2534 early, so the global state is set correctly for this
2535 thread. */
2536 context_switch (ecs->ptid);
2537
2538 /* If an error happens while handling the event, propagate GDB's
2539 knowledge of the executing state to the frontend/user running
2540 state. */
2541 if (!non_stop)
2542 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2543 else
2544 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2545
2546 /* Now figure out what to do with the result of the result. */
2547 handle_inferior_event (ecs);
2548
2549 if (!ecs->wait_some_more)
2550 {
2551 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2552
2553 delete_step_thread_step_resume_breakpoint ();
2554
2555 /* We may not find an inferior if this was a process exit. */
2556 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2557 normal_stop ();
2558
2559 if (target_has_execution
2560 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2561 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2562 && ecs->event_thread->step_multi
2563 && ecs->event_thread->stop_step)
2564 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2565 else
2566 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2567 }
2568
2569 /* No error, don't finish the thread states yet. */
2570 discard_cleanups (ts_old_chain);
2571
2572 /* Revert thread and frame. */
2573 do_cleanups (old_chain);
2574
2575 /* If the inferior was in sync execution mode, and now isn't,
2576 restore the prompt. */
2577 if (was_sync && !sync_execution)
2578 display_gdb_prompt (0);
2579 }
2580
2581 /* Record the frame and location we're currently stepping through. */
2582 void
2583 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2584 {
2585 struct thread_info *tp = inferior_thread ();
2586
2587 tp->step_frame_id = get_frame_id (frame);
2588 tp->step_stack_frame_id = get_stack_frame_id (frame);
2589
2590 tp->current_symtab = sal.symtab;
2591 tp->current_line = sal.line;
2592 }
2593
2594 /* Clear context switchable stepping state. */
2595
2596 void
2597 init_thread_stepping_state (struct thread_info *tss)
2598 {
2599 tss->stepping_over_breakpoint = 0;
2600 tss->step_after_step_resume_breakpoint = 0;
2601 tss->stepping_through_solib_after_catch = 0;
2602 tss->stepping_through_solib_catchpoints = NULL;
2603 }
2604
2605 /* Return the cached copy of the last pid/waitstatus returned by
2606 target_wait()/deprecated_target_wait_hook(). The data is actually
2607 cached by handle_inferior_event(), which gets called immediately
2608 after target_wait()/deprecated_target_wait_hook(). */
2609
2610 void
2611 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2612 {
2613 *ptidp = target_last_wait_ptid;
2614 *status = target_last_waitstatus;
2615 }
2616
2617 void
2618 nullify_last_target_wait_ptid (void)
2619 {
2620 target_last_wait_ptid = minus_one_ptid;
2621 }
2622
2623 /* Switch thread contexts. */
2624
2625 static void
2626 context_switch (ptid_t ptid)
2627 {
2628 if (debug_infrun)
2629 {
2630 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2631 target_pid_to_str (inferior_ptid));
2632 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2633 target_pid_to_str (ptid));
2634 }
2635
2636 switch_to_thread (ptid);
2637 }
2638
2639 static void
2640 adjust_pc_after_break (struct execution_control_state *ecs)
2641 {
2642 struct regcache *regcache;
2643 struct gdbarch *gdbarch;
2644 struct address_space *aspace;
2645 CORE_ADDR breakpoint_pc;
2646
2647 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2648 we aren't, just return.
2649
2650 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2651 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2652 implemented by software breakpoints should be handled through the normal
2653 breakpoint layer.
2654
2655 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2656 different signals (SIGILL or SIGEMT for instance), but it is less
2657 clear where the PC is pointing afterwards. It may not match
2658 gdbarch_decr_pc_after_break. I don't know any specific target that
2659 generates these signals at breakpoints (the code has been in GDB since at
2660 least 1992) so I can not guess how to handle them here.
2661
2662 In earlier versions of GDB, a target with
2663 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2664 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2665 target with both of these set in GDB history, and it seems unlikely to be
2666 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2667
2668 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2669 return;
2670
2671 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2672 return;
2673
2674 /* In reverse execution, when a breakpoint is hit, the instruction
2675 under it has already been de-executed. The reported PC always
2676 points at the breakpoint address, so adjusting it further would
2677 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2678 architecture:
2679
2680 B1 0x08000000 : INSN1
2681 B2 0x08000001 : INSN2
2682 0x08000002 : INSN3
2683 PC -> 0x08000003 : INSN4
2684
2685 Say you're stopped at 0x08000003 as above. Reverse continuing
2686 from that point should hit B2 as below. Reading the PC when the
2687 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2688 been de-executed already.
2689
2690 B1 0x08000000 : INSN1
2691 B2 PC -> 0x08000001 : INSN2
2692 0x08000002 : INSN3
2693 0x08000003 : INSN4
2694
2695 We can't apply the same logic as for forward execution, because
2696 we would wrongly adjust the PC to 0x08000000, since there's a
2697 breakpoint at PC - 1. We'd then report a hit on B1, although
2698 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2699 behaviour. */
2700 if (execution_direction == EXEC_REVERSE)
2701 return;
2702
2703 /* If this target does not decrement the PC after breakpoints, then
2704 we have nothing to do. */
2705 regcache = get_thread_regcache (ecs->ptid);
2706 gdbarch = get_regcache_arch (regcache);
2707 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2708 return;
2709
2710 aspace = get_regcache_aspace (regcache);
2711
2712 /* Find the location where (if we've hit a breakpoint) the
2713 breakpoint would be. */
2714 breakpoint_pc = regcache_read_pc (regcache)
2715 - gdbarch_decr_pc_after_break (gdbarch);
2716
2717 /* Check whether there actually is a software breakpoint inserted at
2718 that location.
2719
2720 If in non-stop mode, a race condition is possible where we've
2721 removed a breakpoint, but stop events for that breakpoint were
2722 already queued and arrive later. To suppress those spurious
2723 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2724 and retire them after a number of stop events are reported. */
2725 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2726 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2727 {
2728 struct cleanup *old_cleanups = NULL;
2729 if (RECORD_IS_USED)
2730 old_cleanups = record_gdb_operation_disable_set ();
2731
2732 /* When using hardware single-step, a SIGTRAP is reported for both
2733 a completed single-step and a software breakpoint. Need to
2734 differentiate between the two, as the latter needs adjusting
2735 but the former does not.
2736
2737 The SIGTRAP can be due to a completed hardware single-step only if
2738 - we didn't insert software single-step breakpoints
2739 - the thread to be examined is still the current thread
2740 - this thread is currently being stepped
2741
2742 If any of these events did not occur, we must have stopped due
2743 to hitting a software breakpoint, and have to back up to the
2744 breakpoint address.
2745
2746 As a special case, we could have hardware single-stepped a
2747 software breakpoint. In this case (prev_pc == breakpoint_pc),
2748 we also need to back up to the breakpoint address. */
2749
2750 if (singlestep_breakpoints_inserted_p
2751 || !ptid_equal (ecs->ptid, inferior_ptid)
2752 || !currently_stepping (ecs->event_thread)
2753 || ecs->event_thread->prev_pc == breakpoint_pc)
2754 regcache_write_pc (regcache, breakpoint_pc);
2755
2756 if (RECORD_IS_USED)
2757 do_cleanups (old_cleanups);
2758 }
2759 }
2760
2761 void
2762 init_infwait_state (void)
2763 {
2764 waiton_ptid = pid_to_ptid (-1);
2765 infwait_state = infwait_normal_state;
2766 }
2767
2768 void
2769 error_is_running (void)
2770 {
2771 error (_("\
2772 Cannot execute this command while the selected thread is running."));
2773 }
2774
2775 void
2776 ensure_not_running (void)
2777 {
2778 if (is_running (inferior_ptid))
2779 error_is_running ();
2780 }
2781
2782 static int
2783 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2784 {
2785 for (frame = get_prev_frame (frame);
2786 frame != NULL;
2787 frame = get_prev_frame (frame))
2788 {
2789 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2790 return 1;
2791 if (get_frame_type (frame) != INLINE_FRAME)
2792 break;
2793 }
2794
2795 return 0;
2796 }
2797
2798 /* Auxiliary function that handles syscall entry/return events.
2799 It returns 1 if the inferior should keep going (and GDB
2800 should ignore the event), or 0 if the event deserves to be
2801 processed. */
2802
2803 static int
2804 handle_syscall_event (struct execution_control_state *ecs)
2805 {
2806 struct regcache *regcache;
2807 struct gdbarch *gdbarch;
2808 int syscall_number;
2809
2810 if (!ptid_equal (ecs->ptid, inferior_ptid))
2811 context_switch (ecs->ptid);
2812
2813 regcache = get_thread_regcache (ecs->ptid);
2814 gdbarch = get_regcache_arch (regcache);
2815 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2816 stop_pc = regcache_read_pc (regcache);
2817
2818 target_last_waitstatus.value.syscall_number = syscall_number;
2819
2820 if (catch_syscall_enabled () > 0
2821 && catching_syscall_number (syscall_number) > 0)
2822 {
2823 if (debug_infrun)
2824 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2825 syscall_number);
2826
2827 ecs->event_thread->stop_bpstat
2828 = bpstat_stop_status (get_regcache_aspace (regcache),
2829 stop_pc, ecs->ptid);
2830 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2831
2832 if (!ecs->random_signal)
2833 {
2834 /* Catchpoint hit. */
2835 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2836 return 0;
2837 }
2838 }
2839
2840 /* If no catchpoint triggered for this, then keep going. */
2841 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2842 keep_going (ecs);
2843 return 1;
2844 }
2845
2846 /* Given an execution control state that has been freshly filled in
2847 by an event from the inferior, figure out what it means and take
2848 appropriate action. */
2849
2850 static void
2851 handle_inferior_event (struct execution_control_state *ecs)
2852 {
2853 struct frame_info *frame;
2854 struct gdbarch *gdbarch;
2855 int sw_single_step_trap_p = 0;
2856 int stopped_by_watchpoint;
2857 int stepped_after_stopped_by_watchpoint = 0;
2858 struct symtab_and_line stop_pc_sal;
2859 enum stop_kind stop_soon;
2860
2861 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2862 {
2863 /* We had an event in the inferior, but we are not interested in
2864 handling it at this level. The lower layers have already
2865 done what needs to be done, if anything.
2866
2867 One of the possible circumstances for this is when the
2868 inferior produces output for the console. The inferior has
2869 not stopped, and we are ignoring the event. Another possible
2870 circumstance is any event which the lower level knows will be
2871 reported multiple times without an intervening resume. */
2872 if (debug_infrun)
2873 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2874 prepare_to_wait (ecs);
2875 return;
2876 }
2877
2878 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2879 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2880 {
2881 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2882 gdb_assert (inf);
2883 stop_soon = inf->stop_soon;
2884 }
2885 else
2886 stop_soon = NO_STOP_QUIETLY;
2887
2888 /* Cache the last pid/waitstatus. */
2889 target_last_wait_ptid = ecs->ptid;
2890 target_last_waitstatus = ecs->ws;
2891
2892 /* Always clear state belonging to the previous time we stopped. */
2893 stop_stack_dummy = STOP_NONE;
2894
2895 /* If it's a new process, add it to the thread database */
2896
2897 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2898 && !ptid_equal (ecs->ptid, minus_one_ptid)
2899 && !in_thread_list (ecs->ptid));
2900
2901 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2902 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2903 add_thread (ecs->ptid);
2904
2905 ecs->event_thread = find_thread_ptid (ecs->ptid);
2906
2907 /* Dependent on valid ECS->EVENT_THREAD. */
2908 adjust_pc_after_break (ecs);
2909
2910 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2911 reinit_frame_cache ();
2912
2913 breakpoint_retire_moribund ();
2914
2915 /* First, distinguish signals caused by the debugger from signals
2916 that have to do with the program's own actions. Note that
2917 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2918 on the operating system version. Here we detect when a SIGILL or
2919 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2920 something similar for SIGSEGV, since a SIGSEGV will be generated
2921 when we're trying to execute a breakpoint instruction on a
2922 non-executable stack. This happens for call dummy breakpoints
2923 for architectures like SPARC that place call dummies on the
2924 stack. */
2925 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2926 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2927 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2928 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
2929 {
2930 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2931
2932 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2933 regcache_read_pc (regcache)))
2934 {
2935 if (debug_infrun)
2936 fprintf_unfiltered (gdb_stdlog,
2937 "infrun: Treating signal as SIGTRAP\n");
2938 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2939 }
2940 }
2941
2942 /* Mark the non-executing threads accordingly. In all-stop, all
2943 threads of all processes are stopped when we get any event
2944 reported. In non-stop mode, only the event thread stops. If
2945 we're handling a process exit in non-stop mode, there's nothing
2946 to do, as threads of the dead process are gone, and threads of
2947 any other process were left running. */
2948 if (!non_stop)
2949 set_executing (minus_one_ptid, 0);
2950 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2951 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2952 set_executing (inferior_ptid, 0);
2953
2954 switch (infwait_state)
2955 {
2956 case infwait_thread_hop_state:
2957 if (debug_infrun)
2958 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2959 break;
2960
2961 case infwait_normal_state:
2962 if (debug_infrun)
2963 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2964 break;
2965
2966 case infwait_step_watch_state:
2967 if (debug_infrun)
2968 fprintf_unfiltered (gdb_stdlog,
2969 "infrun: infwait_step_watch_state\n");
2970
2971 stepped_after_stopped_by_watchpoint = 1;
2972 break;
2973
2974 case infwait_nonstep_watch_state:
2975 if (debug_infrun)
2976 fprintf_unfiltered (gdb_stdlog,
2977 "infrun: infwait_nonstep_watch_state\n");
2978 insert_breakpoints ();
2979
2980 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2981 handle things like signals arriving and other things happening
2982 in combination correctly? */
2983 stepped_after_stopped_by_watchpoint = 1;
2984 break;
2985
2986 default:
2987 internal_error (__FILE__, __LINE__, _("bad switch"));
2988 }
2989
2990 infwait_state = infwait_normal_state;
2991 waiton_ptid = pid_to_ptid (-1);
2992
2993 switch (ecs->ws.kind)
2994 {
2995 case TARGET_WAITKIND_LOADED:
2996 if (debug_infrun)
2997 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2998 /* Ignore gracefully during startup of the inferior, as it might
2999 be the shell which has just loaded some objects, otherwise
3000 add the symbols for the newly loaded objects. Also ignore at
3001 the beginning of an attach or remote session; we will query
3002 the full list of libraries once the connection is
3003 established. */
3004 if (stop_soon == NO_STOP_QUIETLY)
3005 {
3006 /* Check for any newly added shared libraries if we're
3007 supposed to be adding them automatically. Switch
3008 terminal for any messages produced by
3009 breakpoint_re_set. */
3010 target_terminal_ours_for_output ();
3011 /* NOTE: cagney/2003-11-25: Make certain that the target
3012 stack's section table is kept up-to-date. Architectures,
3013 (e.g., PPC64), use the section table to perform
3014 operations such as address => section name and hence
3015 require the table to contain all sections (including
3016 those found in shared libraries). */
3017 #ifdef SOLIB_ADD
3018 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3019 #else
3020 solib_add (NULL, 0, &current_target, auto_solib_add);
3021 #endif
3022 target_terminal_inferior ();
3023
3024 /* If requested, stop when the dynamic linker notifies
3025 gdb of events. This allows the user to get control
3026 and place breakpoints in initializer routines for
3027 dynamically loaded objects (among other things). */
3028 if (stop_on_solib_events)
3029 {
3030 /* Make sure we print "Stopped due to solib-event" in
3031 normal_stop. */
3032 stop_print_frame = 1;
3033
3034 stop_stepping (ecs);
3035 return;
3036 }
3037
3038 /* NOTE drow/2007-05-11: This might be a good place to check
3039 for "catch load". */
3040 }
3041
3042 /* If we are skipping through a shell, or through shared library
3043 loading that we aren't interested in, resume the program. If
3044 we're running the program normally, also resume. But stop if
3045 we're attaching or setting up a remote connection. */
3046 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3047 {
3048 /* Loading of shared libraries might have changed breakpoint
3049 addresses. Make sure new breakpoints are inserted. */
3050 if (stop_soon == NO_STOP_QUIETLY
3051 && !breakpoints_always_inserted_mode ())
3052 insert_breakpoints ();
3053 resume (0, TARGET_SIGNAL_0);
3054 prepare_to_wait (ecs);
3055 return;
3056 }
3057
3058 break;
3059
3060 case TARGET_WAITKIND_SPURIOUS:
3061 if (debug_infrun)
3062 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3063 resume (0, TARGET_SIGNAL_0);
3064 prepare_to_wait (ecs);
3065 return;
3066
3067 case TARGET_WAITKIND_EXITED:
3068 if (debug_infrun)
3069 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3070 inferior_ptid = ecs->ptid;
3071 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3072 set_current_program_space (current_inferior ()->pspace);
3073 handle_vfork_child_exec_or_exit (0);
3074 target_terminal_ours (); /* Must do this before mourn anyway */
3075 print_stop_reason (EXITED, ecs->ws.value.integer);
3076
3077 /* Record the exit code in the convenience variable $_exitcode, so
3078 that the user can inspect this again later. */
3079 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3080 (LONGEST) ecs->ws.value.integer);
3081 gdb_flush (gdb_stdout);
3082 target_mourn_inferior ();
3083 singlestep_breakpoints_inserted_p = 0;
3084 stop_print_frame = 0;
3085 stop_stepping (ecs);
3086 return;
3087
3088 case TARGET_WAITKIND_SIGNALLED:
3089 if (debug_infrun)
3090 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3091 inferior_ptid = ecs->ptid;
3092 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3093 set_current_program_space (current_inferior ()->pspace);
3094 handle_vfork_child_exec_or_exit (0);
3095 stop_print_frame = 0;
3096 target_terminal_ours (); /* Must do this before mourn anyway */
3097
3098 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3099 reach here unless the inferior is dead. However, for years
3100 target_kill() was called here, which hints that fatal signals aren't
3101 really fatal on some systems. If that's true, then some changes
3102 may be needed. */
3103 target_mourn_inferior ();
3104
3105 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3106 singlestep_breakpoints_inserted_p = 0;
3107 stop_stepping (ecs);
3108 return;
3109
3110 /* The following are the only cases in which we keep going;
3111 the above cases end in a continue or goto. */
3112 case TARGET_WAITKIND_FORKED:
3113 case TARGET_WAITKIND_VFORKED:
3114 if (debug_infrun)
3115 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3116
3117 if (!ptid_equal (ecs->ptid, inferior_ptid))
3118 {
3119 context_switch (ecs->ptid);
3120 reinit_frame_cache ();
3121 }
3122
3123 /* Immediately detach breakpoints from the child before there's
3124 any chance of letting the user delete breakpoints from the
3125 breakpoint lists. If we don't do this early, it's easy to
3126 leave left over traps in the child, vis: "break foo; catch
3127 fork; c; <fork>; del; c; <child calls foo>". We only follow
3128 the fork on the last `continue', and by that time the
3129 breakpoint at "foo" is long gone from the breakpoint table.
3130 If we vforked, then we don't need to unpatch here, since both
3131 parent and child are sharing the same memory pages; we'll
3132 need to unpatch at follow/detach time instead to be certain
3133 that new breakpoints added between catchpoint hit time and
3134 vfork follow are detached. */
3135 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3136 {
3137 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3138
3139 /* This won't actually modify the breakpoint list, but will
3140 physically remove the breakpoints from the child. */
3141 detach_breakpoints (child_pid);
3142 }
3143
3144 /* In case the event is caught by a catchpoint, remember that
3145 the event is to be followed at the next resume of the thread,
3146 and not immediately. */
3147 ecs->event_thread->pending_follow = ecs->ws;
3148
3149 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3150
3151 ecs->event_thread->stop_bpstat
3152 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3153 stop_pc, ecs->ptid);
3154
3155 /* Note that we're interested in knowing the bpstat actually
3156 causes a stop, not just if it may explain the signal.
3157 Software watchpoints, for example, always appear in the
3158 bpstat. */
3159 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3160
3161 /* If no catchpoint triggered for this, then keep going. */
3162 if (ecs->random_signal)
3163 {
3164 ptid_t parent;
3165 ptid_t child;
3166 int should_resume;
3167 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3168
3169 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3170
3171 should_resume = follow_fork ();
3172
3173 parent = ecs->ptid;
3174 child = ecs->ws.value.related_pid;
3175
3176 /* In non-stop mode, also resume the other branch. */
3177 if (non_stop && !detach_fork)
3178 {
3179 if (follow_child)
3180 switch_to_thread (parent);
3181 else
3182 switch_to_thread (child);
3183
3184 ecs->event_thread = inferior_thread ();
3185 ecs->ptid = inferior_ptid;
3186 keep_going (ecs);
3187 }
3188
3189 if (follow_child)
3190 switch_to_thread (child);
3191 else
3192 switch_to_thread (parent);
3193
3194 ecs->event_thread = inferior_thread ();
3195 ecs->ptid = inferior_ptid;
3196
3197 if (should_resume)
3198 keep_going (ecs);
3199 else
3200 stop_stepping (ecs);
3201 return;
3202 }
3203 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3204 goto process_event_stop_test;
3205
3206 case TARGET_WAITKIND_VFORK_DONE:
3207 /* Done with the shared memory region. Re-insert breakpoints in
3208 the parent, and keep going. */
3209
3210 if (debug_infrun)
3211 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3212
3213 if (!ptid_equal (ecs->ptid, inferior_ptid))
3214 context_switch (ecs->ptid);
3215
3216 current_inferior ()->waiting_for_vfork_done = 0;
3217 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3218 /* This also takes care of reinserting breakpoints in the
3219 previously locked inferior. */
3220 keep_going (ecs);
3221 return;
3222
3223 case TARGET_WAITKIND_EXECD:
3224 if (debug_infrun)
3225 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3226
3227 if (!ptid_equal (ecs->ptid, inferior_ptid))
3228 {
3229 context_switch (ecs->ptid);
3230 reinit_frame_cache ();
3231 }
3232
3233 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3234
3235 /* Do whatever is necessary to the parent branch of the vfork. */
3236 handle_vfork_child_exec_or_exit (1);
3237
3238 /* This causes the eventpoints and symbol table to be reset.
3239 Must do this now, before trying to determine whether to
3240 stop. */
3241 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3242
3243 ecs->event_thread->stop_bpstat
3244 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3245 stop_pc, ecs->ptid);
3246 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3247
3248 /* Note that this may be referenced from inside
3249 bpstat_stop_status above, through inferior_has_execd. */
3250 xfree (ecs->ws.value.execd_pathname);
3251 ecs->ws.value.execd_pathname = NULL;
3252
3253 /* If no catchpoint triggered for this, then keep going. */
3254 if (ecs->random_signal)
3255 {
3256 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3257 keep_going (ecs);
3258 return;
3259 }
3260 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3261 goto process_event_stop_test;
3262
3263 /* Be careful not to try to gather much state about a thread
3264 that's in a syscall. It's frequently a losing proposition. */
3265 case TARGET_WAITKIND_SYSCALL_ENTRY:
3266 if (debug_infrun)
3267 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3268 /* Getting the current syscall number */
3269 if (handle_syscall_event (ecs) != 0)
3270 return;
3271 goto process_event_stop_test;
3272
3273 /* Before examining the threads further, step this thread to
3274 get it entirely out of the syscall. (We get notice of the
3275 event when the thread is just on the verge of exiting a
3276 syscall. Stepping one instruction seems to get it back
3277 into user code.) */
3278 case TARGET_WAITKIND_SYSCALL_RETURN:
3279 if (debug_infrun)
3280 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3281 if (handle_syscall_event (ecs) != 0)
3282 return;
3283 goto process_event_stop_test;
3284
3285 case TARGET_WAITKIND_STOPPED:
3286 if (debug_infrun)
3287 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3288 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3289 break;
3290
3291 case TARGET_WAITKIND_NO_HISTORY:
3292 /* Reverse execution: target ran out of history info. */
3293 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3294 print_stop_reason (NO_HISTORY, 0);
3295 stop_stepping (ecs);
3296 return;
3297 }
3298
3299 if (ecs->new_thread_event)
3300 {
3301 if (non_stop)
3302 /* Non-stop assumes that the target handles adding new threads
3303 to the thread list. */
3304 internal_error (__FILE__, __LINE__, "\
3305 targets should add new threads to the thread list themselves in non-stop mode.");
3306
3307 /* We may want to consider not doing a resume here in order to
3308 give the user a chance to play with the new thread. It might
3309 be good to make that a user-settable option. */
3310
3311 /* At this point, all threads are stopped (happens automatically
3312 in either the OS or the native code). Therefore we need to
3313 continue all threads in order to make progress. */
3314
3315 if (!ptid_equal (ecs->ptid, inferior_ptid))
3316 context_switch (ecs->ptid);
3317 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3318 prepare_to_wait (ecs);
3319 return;
3320 }
3321
3322 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3323 {
3324 /* Do we need to clean up the state of a thread that has
3325 completed a displaced single-step? (Doing so usually affects
3326 the PC, so do it here, before we set stop_pc.) */
3327 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3328
3329 /* If we either finished a single-step or hit a breakpoint, but
3330 the user wanted this thread to be stopped, pretend we got a
3331 SIG0 (generic unsignaled stop). */
3332
3333 if (ecs->event_thread->stop_requested
3334 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3335 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3336 }
3337
3338 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3339
3340 if (debug_infrun)
3341 {
3342 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3343 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3344 struct cleanup *old_chain = save_inferior_ptid ();
3345
3346 inferior_ptid = ecs->ptid;
3347
3348 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3349 paddress (gdbarch, stop_pc));
3350 if (target_stopped_by_watchpoint ())
3351 {
3352 CORE_ADDR addr;
3353 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3354
3355 if (target_stopped_data_address (&current_target, &addr))
3356 fprintf_unfiltered (gdb_stdlog,
3357 "infrun: stopped data address = %s\n",
3358 paddress (gdbarch, addr));
3359 else
3360 fprintf_unfiltered (gdb_stdlog,
3361 "infrun: (no data address available)\n");
3362 }
3363
3364 do_cleanups (old_chain);
3365 }
3366
3367 if (stepping_past_singlestep_breakpoint)
3368 {
3369 gdb_assert (singlestep_breakpoints_inserted_p);
3370 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3371 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3372
3373 stepping_past_singlestep_breakpoint = 0;
3374
3375 /* We've either finished single-stepping past the single-step
3376 breakpoint, or stopped for some other reason. It would be nice if
3377 we could tell, but we can't reliably. */
3378 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3379 {
3380 if (debug_infrun)
3381 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3382 /* Pull the single step breakpoints out of the target. */
3383 remove_single_step_breakpoints ();
3384 singlestep_breakpoints_inserted_p = 0;
3385
3386 ecs->random_signal = 0;
3387 ecs->event_thread->trap_expected = 0;
3388
3389 context_switch (saved_singlestep_ptid);
3390 if (deprecated_context_hook)
3391 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3392
3393 resume (1, TARGET_SIGNAL_0);
3394 prepare_to_wait (ecs);
3395 return;
3396 }
3397 }
3398
3399 if (!ptid_equal (deferred_step_ptid, null_ptid))
3400 {
3401 /* In non-stop mode, there's never a deferred_step_ptid set. */
3402 gdb_assert (!non_stop);
3403
3404 /* If we stopped for some other reason than single-stepping, ignore
3405 the fact that we were supposed to switch back. */
3406 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3407 {
3408 if (debug_infrun)
3409 fprintf_unfiltered (gdb_stdlog,
3410 "infrun: handling deferred step\n");
3411
3412 /* Pull the single step breakpoints out of the target. */
3413 if (singlestep_breakpoints_inserted_p)
3414 {
3415 remove_single_step_breakpoints ();
3416 singlestep_breakpoints_inserted_p = 0;
3417 }
3418
3419 /* Note: We do not call context_switch at this point, as the
3420 context is already set up for stepping the original thread. */
3421 switch_to_thread (deferred_step_ptid);
3422 deferred_step_ptid = null_ptid;
3423 /* Suppress spurious "Switching to ..." message. */
3424 previous_inferior_ptid = inferior_ptid;
3425
3426 resume (1, TARGET_SIGNAL_0);
3427 prepare_to_wait (ecs);
3428 return;
3429 }
3430
3431 deferred_step_ptid = null_ptid;
3432 }
3433
3434 /* See if a thread hit a thread-specific breakpoint that was meant for
3435 another thread. If so, then step that thread past the breakpoint,
3436 and continue it. */
3437
3438 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3439 {
3440 int thread_hop_needed = 0;
3441 struct address_space *aspace =
3442 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3443
3444 /* Check if a regular breakpoint has been hit before checking
3445 for a potential single step breakpoint. Otherwise, GDB will
3446 not see this breakpoint hit when stepping onto breakpoints. */
3447 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3448 {
3449 ecs->random_signal = 0;
3450 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3451 thread_hop_needed = 1;
3452 }
3453 else if (singlestep_breakpoints_inserted_p)
3454 {
3455 /* We have not context switched yet, so this should be true
3456 no matter which thread hit the singlestep breakpoint. */
3457 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3458 if (debug_infrun)
3459 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3460 "trap for %s\n",
3461 target_pid_to_str (ecs->ptid));
3462
3463 ecs->random_signal = 0;
3464 /* The call to in_thread_list is necessary because PTIDs sometimes
3465 change when we go from single-threaded to multi-threaded. If
3466 the singlestep_ptid is still in the list, assume that it is
3467 really different from ecs->ptid. */
3468 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3469 && in_thread_list (singlestep_ptid))
3470 {
3471 /* If the PC of the thread we were trying to single-step
3472 has changed, discard this event (which we were going
3473 to ignore anyway), and pretend we saw that thread
3474 trap. This prevents us continuously moving the
3475 single-step breakpoint forward, one instruction at a
3476 time. If the PC has changed, then the thread we were
3477 trying to single-step has trapped or been signalled,
3478 but the event has not been reported to GDB yet.
3479
3480 There might be some cases where this loses signal
3481 information, if a signal has arrived at exactly the
3482 same time that the PC changed, but this is the best
3483 we can do with the information available. Perhaps we
3484 should arrange to report all events for all threads
3485 when they stop, or to re-poll the remote looking for
3486 this particular thread (i.e. temporarily enable
3487 schedlock). */
3488
3489 CORE_ADDR new_singlestep_pc
3490 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3491
3492 if (new_singlestep_pc != singlestep_pc)
3493 {
3494 enum target_signal stop_signal;
3495
3496 if (debug_infrun)
3497 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3498 " but expected thread advanced also\n");
3499
3500 /* The current context still belongs to
3501 singlestep_ptid. Don't swap here, since that's
3502 the context we want to use. Just fudge our
3503 state and continue. */
3504 stop_signal = ecs->event_thread->stop_signal;
3505 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3506 ecs->ptid = singlestep_ptid;
3507 ecs->event_thread = find_thread_ptid (ecs->ptid);
3508 ecs->event_thread->stop_signal = stop_signal;
3509 stop_pc = new_singlestep_pc;
3510 }
3511 else
3512 {
3513 if (debug_infrun)
3514 fprintf_unfiltered (gdb_stdlog,
3515 "infrun: unexpected thread\n");
3516
3517 thread_hop_needed = 1;
3518 stepping_past_singlestep_breakpoint = 1;
3519 saved_singlestep_ptid = singlestep_ptid;
3520 }
3521 }
3522 }
3523
3524 if (thread_hop_needed)
3525 {
3526 struct regcache *thread_regcache;
3527 int remove_status = 0;
3528
3529 if (debug_infrun)
3530 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3531
3532 /* Switch context before touching inferior memory, the
3533 previous thread may have exited. */
3534 if (!ptid_equal (inferior_ptid, ecs->ptid))
3535 context_switch (ecs->ptid);
3536
3537 /* Saw a breakpoint, but it was hit by the wrong thread.
3538 Just continue. */
3539
3540 if (singlestep_breakpoints_inserted_p)
3541 {
3542 /* Pull the single step breakpoints out of the target. */
3543 remove_single_step_breakpoints ();
3544 singlestep_breakpoints_inserted_p = 0;
3545 }
3546
3547 /* If the arch can displace step, don't remove the
3548 breakpoints. */
3549 thread_regcache = get_thread_regcache (ecs->ptid);
3550 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3551 remove_status = remove_breakpoints ();
3552
3553 /* Did we fail to remove breakpoints? If so, try
3554 to set the PC past the bp. (There's at least
3555 one situation in which we can fail to remove
3556 the bp's: On HP-UX's that use ttrace, we can't
3557 change the address space of a vforking child
3558 process until the child exits (well, okay, not
3559 then either :-) or execs. */
3560 if (remove_status != 0)
3561 error (_("Cannot step over breakpoint hit in wrong thread"));
3562 else
3563 { /* Single step */
3564 if (!non_stop)
3565 {
3566 /* Only need to require the next event from this
3567 thread in all-stop mode. */
3568 waiton_ptid = ecs->ptid;
3569 infwait_state = infwait_thread_hop_state;
3570 }
3571
3572 ecs->event_thread->stepping_over_breakpoint = 1;
3573 keep_going (ecs);
3574 return;
3575 }
3576 }
3577 else if (singlestep_breakpoints_inserted_p)
3578 {
3579 sw_single_step_trap_p = 1;
3580 ecs->random_signal = 0;
3581 }
3582 }
3583 else
3584 ecs->random_signal = 1;
3585
3586 /* See if something interesting happened to the non-current thread. If
3587 so, then switch to that thread. */
3588 if (!ptid_equal (ecs->ptid, inferior_ptid))
3589 {
3590 if (debug_infrun)
3591 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3592
3593 context_switch (ecs->ptid);
3594
3595 if (deprecated_context_hook)
3596 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3597 }
3598
3599 /* At this point, get hold of the now-current thread's frame. */
3600 frame = get_current_frame ();
3601 gdbarch = get_frame_arch (frame);
3602
3603 if (singlestep_breakpoints_inserted_p)
3604 {
3605 /* Pull the single step breakpoints out of the target. */
3606 remove_single_step_breakpoints ();
3607 singlestep_breakpoints_inserted_p = 0;
3608 }
3609
3610 if (stepped_after_stopped_by_watchpoint)
3611 stopped_by_watchpoint = 0;
3612 else
3613 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3614
3615 /* If necessary, step over this watchpoint. We'll be back to display
3616 it in a moment. */
3617 if (stopped_by_watchpoint
3618 && (target_have_steppable_watchpoint
3619 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3620 {
3621 /* At this point, we are stopped at an instruction which has
3622 attempted to write to a piece of memory under control of
3623 a watchpoint. The instruction hasn't actually executed
3624 yet. If we were to evaluate the watchpoint expression
3625 now, we would get the old value, and therefore no change
3626 would seem to have occurred.
3627
3628 In order to make watchpoints work `right', we really need
3629 to complete the memory write, and then evaluate the
3630 watchpoint expression. We do this by single-stepping the
3631 target.
3632
3633 It may not be necessary to disable the watchpoint to stop over
3634 it. For example, the PA can (with some kernel cooperation)
3635 single step over a watchpoint without disabling the watchpoint.
3636
3637 It is far more common to need to disable a watchpoint to step
3638 the inferior over it. If we have non-steppable watchpoints,
3639 we must disable the current watchpoint; it's simplest to
3640 disable all watchpoints and breakpoints. */
3641 int hw_step = 1;
3642
3643 if (!target_have_steppable_watchpoint)
3644 remove_breakpoints ();
3645 /* Single step */
3646 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3647 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3648 waiton_ptid = ecs->ptid;
3649 if (target_have_steppable_watchpoint)
3650 infwait_state = infwait_step_watch_state;
3651 else
3652 infwait_state = infwait_nonstep_watch_state;
3653 prepare_to_wait (ecs);
3654 return;
3655 }
3656
3657 ecs->stop_func_start = 0;
3658 ecs->stop_func_end = 0;
3659 ecs->stop_func_name = 0;
3660 /* Don't care about return value; stop_func_start and stop_func_name
3661 will both be 0 if it doesn't work. */
3662 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3663 &ecs->stop_func_start, &ecs->stop_func_end);
3664 ecs->stop_func_start
3665 += gdbarch_deprecated_function_start_offset (gdbarch);
3666 ecs->event_thread->stepping_over_breakpoint = 0;
3667 bpstat_clear (&ecs->event_thread->stop_bpstat);
3668 ecs->event_thread->stop_step = 0;
3669 stop_print_frame = 1;
3670 ecs->random_signal = 0;
3671 stopped_by_random_signal = 0;
3672
3673 /* Hide inlined functions starting here, unless we just performed stepi or
3674 nexti. After stepi and nexti, always show the innermost frame (not any
3675 inline function call sites). */
3676 if (ecs->event_thread->step_range_end != 1)
3677 skip_inline_frames (ecs->ptid);
3678
3679 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3680 && ecs->event_thread->trap_expected
3681 && gdbarch_single_step_through_delay_p (gdbarch)
3682 && currently_stepping (ecs->event_thread))
3683 {
3684 /* We're trying to step off a breakpoint. Turns out that we're
3685 also on an instruction that needs to be stepped multiple
3686 times before it's been fully executing. E.g., architectures
3687 with a delay slot. It needs to be stepped twice, once for
3688 the instruction and once for the delay slot. */
3689 int step_through_delay
3690 = gdbarch_single_step_through_delay (gdbarch, frame);
3691 if (debug_infrun && step_through_delay)
3692 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3693 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3694 {
3695 /* The user issued a continue when stopped at a breakpoint.
3696 Set up for another trap and get out of here. */
3697 ecs->event_thread->stepping_over_breakpoint = 1;
3698 keep_going (ecs);
3699 return;
3700 }
3701 else if (step_through_delay)
3702 {
3703 /* The user issued a step when stopped at a breakpoint.
3704 Maybe we should stop, maybe we should not - the delay
3705 slot *might* correspond to a line of source. In any
3706 case, don't decide that here, just set
3707 ecs->stepping_over_breakpoint, making sure we
3708 single-step again before breakpoints are re-inserted. */
3709 ecs->event_thread->stepping_over_breakpoint = 1;
3710 }
3711 }
3712
3713 /* Look at the cause of the stop, and decide what to do.
3714 The alternatives are:
3715 1) stop_stepping and return; to really stop and return to the debugger,
3716 2) keep_going and return to start up again
3717 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3718 3) set ecs->random_signal to 1, and the decision between 1 and 2
3719 will be made according to the signal handling tables. */
3720
3721 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3722 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3723 || stop_soon == STOP_QUIETLY_REMOTE)
3724 {
3725 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3726 {
3727 if (debug_infrun)
3728 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3729 stop_print_frame = 0;
3730 stop_stepping (ecs);
3731 return;
3732 }
3733
3734 /* This is originated from start_remote(), start_inferior() and
3735 shared libraries hook functions. */
3736 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3737 {
3738 if (debug_infrun)
3739 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3740 stop_stepping (ecs);
3741 return;
3742 }
3743
3744 /* This originates from attach_command(). We need to overwrite
3745 the stop_signal here, because some kernels don't ignore a
3746 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3747 See more comments in inferior.h. On the other hand, if we
3748 get a non-SIGSTOP, report it to the user - assume the backend
3749 will handle the SIGSTOP if it should show up later.
3750
3751 Also consider that the attach is complete when we see a
3752 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3753 target extended-remote report it instead of a SIGSTOP
3754 (e.g. gdbserver). We already rely on SIGTRAP being our
3755 signal, so this is no exception.
3756
3757 Also consider that the attach is complete when we see a
3758 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3759 the target to stop all threads of the inferior, in case the
3760 low level attach operation doesn't stop them implicitly. If
3761 they weren't stopped implicitly, then the stub will report a
3762 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3763 other than GDB's request. */
3764 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3765 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3766 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3767 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3768 {
3769 stop_stepping (ecs);
3770 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3771 return;
3772 }
3773
3774 /* See if there is a breakpoint at the current PC. */
3775 ecs->event_thread->stop_bpstat
3776 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3777 stop_pc, ecs->ptid);
3778
3779 /* Following in case break condition called a
3780 function. */
3781 stop_print_frame = 1;
3782
3783 /* This is where we handle "moribund" watchpoints. Unlike
3784 software breakpoints traps, hardware watchpoint traps are
3785 always distinguishable from random traps. If no high-level
3786 watchpoint is associated with the reported stop data address
3787 anymore, then the bpstat does not explain the signal ---
3788 simply make sure to ignore it if `stopped_by_watchpoint' is
3789 set. */
3790
3791 if (debug_infrun
3792 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3793 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3794 && stopped_by_watchpoint)
3795 fprintf_unfiltered (gdb_stdlog, "\
3796 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3797
3798 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3799 at one stage in the past included checks for an inferior
3800 function call's call dummy's return breakpoint. The original
3801 comment, that went with the test, read:
3802
3803 ``End of a stack dummy. Some systems (e.g. Sony news) give
3804 another signal besides SIGTRAP, so check here as well as
3805 above.''
3806
3807 If someone ever tries to get call dummys on a
3808 non-executable stack to work (where the target would stop
3809 with something like a SIGSEGV), then those tests might need
3810 to be re-instated. Given, however, that the tests were only
3811 enabled when momentary breakpoints were not being used, I
3812 suspect that it won't be the case.
3813
3814 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3815 be necessary for call dummies on a non-executable stack on
3816 SPARC. */
3817
3818 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3819 ecs->random_signal
3820 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3821 || stopped_by_watchpoint
3822 || ecs->event_thread->trap_expected
3823 || (ecs->event_thread->step_range_end
3824 && ecs->event_thread->step_resume_breakpoint == NULL));
3825 else
3826 {
3827 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3828 if (!ecs->random_signal)
3829 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3830 }
3831 }
3832
3833 /* When we reach this point, we've pretty much decided
3834 that the reason for stopping must've been a random
3835 (unexpected) signal. */
3836
3837 else
3838 ecs->random_signal = 1;
3839
3840 process_event_stop_test:
3841
3842 /* Re-fetch current thread's frame in case we did a
3843 "goto process_event_stop_test" above. */
3844 frame = get_current_frame ();
3845 gdbarch = get_frame_arch (frame);
3846
3847 /* For the program's own signals, act according to
3848 the signal handling tables. */
3849
3850 if (ecs->random_signal)
3851 {
3852 /* Signal not for debugging purposes. */
3853 int printed = 0;
3854 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3855
3856 if (debug_infrun)
3857 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3858 ecs->event_thread->stop_signal);
3859
3860 stopped_by_random_signal = 1;
3861
3862 if (signal_print[ecs->event_thread->stop_signal])
3863 {
3864 printed = 1;
3865 target_terminal_ours_for_output ();
3866 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3867 }
3868 /* Always stop on signals if we're either just gaining control
3869 of the program, or the user explicitly requested this thread
3870 to remain stopped. */
3871 if (stop_soon != NO_STOP_QUIETLY
3872 || ecs->event_thread->stop_requested
3873 || (!inf->detaching
3874 && signal_stop_state (ecs->event_thread->stop_signal)))
3875 {
3876 stop_stepping (ecs);
3877 return;
3878 }
3879 /* If not going to stop, give terminal back
3880 if we took it away. */
3881 else if (printed)
3882 target_terminal_inferior ();
3883
3884 /* Clear the signal if it should not be passed. */
3885 if (signal_program[ecs->event_thread->stop_signal] == 0)
3886 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3887
3888 if (ecs->event_thread->prev_pc == stop_pc
3889 && ecs->event_thread->trap_expected
3890 && ecs->event_thread->step_resume_breakpoint == NULL)
3891 {
3892 /* We were just starting a new sequence, attempting to
3893 single-step off of a breakpoint and expecting a SIGTRAP.
3894 Instead this signal arrives. This signal will take us out
3895 of the stepping range so GDB needs to remember to, when
3896 the signal handler returns, resume stepping off that
3897 breakpoint. */
3898 /* To simplify things, "continue" is forced to use the same
3899 code paths as single-step - set a breakpoint at the
3900 signal return address and then, once hit, step off that
3901 breakpoint. */
3902 if (debug_infrun)
3903 fprintf_unfiltered (gdb_stdlog,
3904 "infrun: signal arrived while stepping over "
3905 "breakpoint\n");
3906
3907 insert_step_resume_breakpoint_at_frame (frame);
3908 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3909 keep_going (ecs);
3910 return;
3911 }
3912
3913 if (ecs->event_thread->step_range_end != 0
3914 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3915 && (ecs->event_thread->step_range_start <= stop_pc
3916 && stop_pc < ecs->event_thread->step_range_end)
3917 && frame_id_eq (get_stack_frame_id (frame),
3918 ecs->event_thread->step_stack_frame_id)
3919 && ecs->event_thread->step_resume_breakpoint == NULL)
3920 {
3921 /* The inferior is about to take a signal that will take it
3922 out of the single step range. Set a breakpoint at the
3923 current PC (which is presumably where the signal handler
3924 will eventually return) and then allow the inferior to
3925 run free.
3926
3927 Note that this is only needed for a signal delivered
3928 while in the single-step range. Nested signals aren't a
3929 problem as they eventually all return. */
3930 if (debug_infrun)
3931 fprintf_unfiltered (gdb_stdlog,
3932 "infrun: signal may take us out of "
3933 "single-step range\n");
3934
3935 insert_step_resume_breakpoint_at_frame (frame);
3936 keep_going (ecs);
3937 return;
3938 }
3939
3940 /* Note: step_resume_breakpoint may be non-NULL. This occures
3941 when either there's a nested signal, or when there's a
3942 pending signal enabled just as the signal handler returns
3943 (leaving the inferior at the step-resume-breakpoint without
3944 actually executing it). Either way continue until the
3945 breakpoint is really hit. */
3946 keep_going (ecs);
3947 return;
3948 }
3949
3950 /* Handle cases caused by hitting a breakpoint. */
3951 {
3952 CORE_ADDR jmp_buf_pc;
3953 struct bpstat_what what;
3954
3955 what = bpstat_what (ecs->event_thread->stop_bpstat);
3956
3957 if (what.call_dummy)
3958 {
3959 stop_stack_dummy = what.call_dummy;
3960 }
3961
3962 switch (what.main_action)
3963 {
3964 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3965 /* If we hit the breakpoint at longjmp while stepping, we
3966 install a momentary breakpoint at the target of the
3967 jmp_buf. */
3968
3969 if (debug_infrun)
3970 fprintf_unfiltered (gdb_stdlog,
3971 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3972
3973 ecs->event_thread->stepping_over_breakpoint = 1;
3974
3975 if (!gdbarch_get_longjmp_target_p (gdbarch)
3976 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3977 {
3978 if (debug_infrun)
3979 fprintf_unfiltered (gdb_stdlog, "\
3980 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3981 keep_going (ecs);
3982 return;
3983 }
3984
3985 /* We're going to replace the current step-resume breakpoint
3986 with a longjmp-resume breakpoint. */
3987 delete_step_resume_breakpoint (ecs->event_thread);
3988
3989 /* Insert a breakpoint at resume address. */
3990 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3991
3992 keep_going (ecs);
3993 return;
3994
3995 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3996 if (debug_infrun)
3997 fprintf_unfiltered (gdb_stdlog,
3998 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3999
4000 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4001 delete_step_resume_breakpoint (ecs->event_thread);
4002
4003 ecs->event_thread->stop_step = 1;
4004 print_stop_reason (END_STEPPING_RANGE, 0);
4005 stop_stepping (ecs);
4006 return;
4007
4008 case BPSTAT_WHAT_SINGLE:
4009 if (debug_infrun)
4010 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4011 ecs->event_thread->stepping_over_breakpoint = 1;
4012 /* Still need to check other stuff, at least the case
4013 where we are stepping and step out of the right range. */
4014 break;
4015
4016 case BPSTAT_WHAT_STOP_NOISY:
4017 if (debug_infrun)
4018 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4019 stop_print_frame = 1;
4020
4021 /* We are about to nuke the step_resume_breakpointt via the
4022 cleanup chain, so no need to worry about it here. */
4023
4024 stop_stepping (ecs);
4025 return;
4026
4027 case BPSTAT_WHAT_STOP_SILENT:
4028 if (debug_infrun)
4029 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4030 stop_print_frame = 0;
4031
4032 /* We are about to nuke the step_resume_breakpoin via the
4033 cleanup chain, so no need to worry about it here. */
4034
4035 stop_stepping (ecs);
4036 return;
4037
4038 case BPSTAT_WHAT_STEP_RESUME:
4039 if (debug_infrun)
4040 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4041
4042 delete_step_resume_breakpoint (ecs->event_thread);
4043 if (ecs->event_thread->step_after_step_resume_breakpoint)
4044 {
4045 /* Back when the step-resume breakpoint was inserted, we
4046 were trying to single-step off a breakpoint. Go back
4047 to doing that. */
4048 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4049 ecs->event_thread->stepping_over_breakpoint = 1;
4050 keep_going (ecs);
4051 return;
4052 }
4053 if (stop_pc == ecs->stop_func_start
4054 && execution_direction == EXEC_REVERSE)
4055 {
4056 /* We are stepping over a function call in reverse, and
4057 just hit the step-resume breakpoint at the start
4058 address of the function. Go back to single-stepping,
4059 which should take us back to the function call. */
4060 ecs->event_thread->stepping_over_breakpoint = 1;
4061 keep_going (ecs);
4062 return;
4063 }
4064 break;
4065
4066 case BPSTAT_WHAT_CHECK_SHLIBS:
4067 {
4068 if (debug_infrun)
4069 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
4070
4071 /* Check for any newly added shared libraries if we're
4072 supposed to be adding them automatically. Switch
4073 terminal for any messages produced by
4074 breakpoint_re_set. */
4075 target_terminal_ours_for_output ();
4076 /* NOTE: cagney/2003-11-25: Make certain that the target
4077 stack's section table is kept up-to-date. Architectures,
4078 (e.g., PPC64), use the section table to perform
4079 operations such as address => section name and hence
4080 require the table to contain all sections (including
4081 those found in shared libraries). */
4082 #ifdef SOLIB_ADD
4083 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
4084 #else
4085 solib_add (NULL, 0, &current_target, auto_solib_add);
4086 #endif
4087 target_terminal_inferior ();
4088
4089 /* If requested, stop when the dynamic linker notifies
4090 gdb of events. This allows the user to get control
4091 and place breakpoints in initializer routines for
4092 dynamically loaded objects (among other things). */
4093 if (stop_on_solib_events || stop_stack_dummy)
4094 {
4095 stop_stepping (ecs);
4096 return;
4097 }
4098 else
4099 {
4100 /* We want to step over this breakpoint, then keep going. */
4101 ecs->event_thread->stepping_over_breakpoint = 1;
4102 break;
4103 }
4104 }
4105 break;
4106
4107 case BPSTAT_WHAT_CHECK_JIT:
4108 if (debug_infrun)
4109 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
4110
4111 /* Switch terminal for any messages produced by breakpoint_re_set. */
4112 target_terminal_ours_for_output ();
4113
4114 jit_event_handler (gdbarch);
4115
4116 target_terminal_inferior ();
4117
4118 /* We want to step over this breakpoint, then keep going. */
4119 ecs->event_thread->stepping_over_breakpoint = 1;
4120
4121 break;
4122
4123 case BPSTAT_WHAT_LAST:
4124 /* Not a real code, but listed here to shut up gcc -Wall. */
4125
4126 case BPSTAT_WHAT_KEEP_CHECKING:
4127 break;
4128 }
4129 }
4130
4131 /* We come here if we hit a breakpoint but should not
4132 stop for it. Possibly we also were stepping
4133 and should stop for that. So fall through and
4134 test for stepping. But, if not stepping,
4135 do not stop. */
4136
4137 /* In all-stop mode, if we're currently stepping but have stopped in
4138 some other thread, we need to switch back to the stepped thread. */
4139 if (!non_stop)
4140 {
4141 struct thread_info *tp;
4142 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4143 ecs->event_thread);
4144 if (tp)
4145 {
4146 /* However, if the current thread is blocked on some internal
4147 breakpoint, and we simply need to step over that breakpoint
4148 to get it going again, do that first. */
4149 if ((ecs->event_thread->trap_expected
4150 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4151 || ecs->event_thread->stepping_over_breakpoint)
4152 {
4153 keep_going (ecs);
4154 return;
4155 }
4156
4157 /* If the stepping thread exited, then don't try to switch
4158 back and resume it, which could fail in several different
4159 ways depending on the target. Instead, just keep going.
4160
4161 We can find a stepping dead thread in the thread list in
4162 two cases:
4163
4164 - The target supports thread exit events, and when the
4165 target tries to delete the thread from the thread list,
4166 inferior_ptid pointed at the exiting thread. In such
4167 case, calling delete_thread does not really remove the
4168 thread from the list; instead, the thread is left listed,
4169 with 'exited' state.
4170
4171 - The target's debug interface does not support thread
4172 exit events, and so we have no idea whatsoever if the
4173 previously stepping thread is still alive. For that
4174 reason, we need to synchronously query the target
4175 now. */
4176 if (is_exited (tp->ptid)
4177 || !target_thread_alive (tp->ptid))
4178 {
4179 if (debug_infrun)
4180 fprintf_unfiltered (gdb_stdlog, "\
4181 infrun: not switching back to stepped thread, it has vanished\n");
4182
4183 delete_thread (tp->ptid);
4184 keep_going (ecs);
4185 return;
4186 }
4187
4188 /* Otherwise, we no longer expect a trap in the current thread.
4189 Clear the trap_expected flag before switching back -- this is
4190 what keep_going would do as well, if we called it. */
4191 ecs->event_thread->trap_expected = 0;
4192
4193 if (debug_infrun)
4194 fprintf_unfiltered (gdb_stdlog,
4195 "infrun: switching back to stepped thread\n");
4196
4197 ecs->event_thread = tp;
4198 ecs->ptid = tp->ptid;
4199 context_switch (ecs->ptid);
4200 keep_going (ecs);
4201 return;
4202 }
4203 }
4204
4205 /* Are we stepping to get the inferior out of the dynamic linker's
4206 hook (and possibly the dld itself) after catching a shlib
4207 event? */
4208 if (ecs->event_thread->stepping_through_solib_after_catch)
4209 {
4210 #if defined(SOLIB_ADD)
4211 /* Have we reached our destination? If not, keep going. */
4212 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4213 {
4214 if (debug_infrun)
4215 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4216 ecs->event_thread->stepping_over_breakpoint = 1;
4217 keep_going (ecs);
4218 return;
4219 }
4220 #endif
4221 if (debug_infrun)
4222 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4223 /* Else, stop and report the catchpoint(s) whose triggering
4224 caused us to begin stepping. */
4225 ecs->event_thread->stepping_through_solib_after_catch = 0;
4226 bpstat_clear (&ecs->event_thread->stop_bpstat);
4227 ecs->event_thread->stop_bpstat
4228 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4229 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4230 stop_print_frame = 1;
4231 stop_stepping (ecs);
4232 return;
4233 }
4234
4235 if (ecs->event_thread->step_resume_breakpoint)
4236 {
4237 if (debug_infrun)
4238 fprintf_unfiltered (gdb_stdlog,
4239 "infrun: step-resume breakpoint is inserted\n");
4240
4241 /* Having a step-resume breakpoint overrides anything
4242 else having to do with stepping commands until
4243 that breakpoint is reached. */
4244 keep_going (ecs);
4245 return;
4246 }
4247
4248 if (ecs->event_thread->step_range_end == 0)
4249 {
4250 if (debug_infrun)
4251 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4252 /* Likewise if we aren't even stepping. */
4253 keep_going (ecs);
4254 return;
4255 }
4256
4257 /* Re-fetch current thread's frame in case the code above caused
4258 the frame cache to be re-initialized, making our FRAME variable
4259 a dangling pointer. */
4260 frame = get_current_frame ();
4261
4262 /* If stepping through a line, keep going if still within it.
4263
4264 Note that step_range_end is the address of the first instruction
4265 beyond the step range, and NOT the address of the last instruction
4266 within it!
4267
4268 Note also that during reverse execution, we may be stepping
4269 through a function epilogue and therefore must detect when
4270 the current-frame changes in the middle of a line. */
4271
4272 if (stop_pc >= ecs->event_thread->step_range_start
4273 && stop_pc < ecs->event_thread->step_range_end
4274 && (execution_direction != EXEC_REVERSE
4275 || frame_id_eq (get_frame_id (frame),
4276 ecs->event_thread->step_frame_id)))
4277 {
4278 if (debug_infrun)
4279 fprintf_unfiltered
4280 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4281 paddress (gdbarch, ecs->event_thread->step_range_start),
4282 paddress (gdbarch, ecs->event_thread->step_range_end));
4283
4284 /* When stepping backward, stop at beginning of line range
4285 (unless it's the function entry point, in which case
4286 keep going back to the call point). */
4287 if (stop_pc == ecs->event_thread->step_range_start
4288 && stop_pc != ecs->stop_func_start
4289 && execution_direction == EXEC_REVERSE)
4290 {
4291 ecs->event_thread->stop_step = 1;
4292 print_stop_reason (END_STEPPING_RANGE, 0);
4293 stop_stepping (ecs);
4294 }
4295 else
4296 keep_going (ecs);
4297
4298 return;
4299 }
4300
4301 /* We stepped out of the stepping range. */
4302
4303 /* If we are stepping at the source level and entered the runtime
4304 loader dynamic symbol resolution code...
4305
4306 EXEC_FORWARD: we keep on single stepping until we exit the run
4307 time loader code and reach the callee's address.
4308
4309 EXEC_REVERSE: we've already executed the callee (backward), and
4310 the runtime loader code is handled just like any other
4311 undebuggable function call. Now we need only keep stepping
4312 backward through the trampoline code, and that's handled further
4313 down, so there is nothing for us to do here. */
4314
4315 if (execution_direction != EXEC_REVERSE
4316 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4317 && in_solib_dynsym_resolve_code (stop_pc))
4318 {
4319 CORE_ADDR pc_after_resolver =
4320 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4321
4322 if (debug_infrun)
4323 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4324
4325 if (pc_after_resolver)
4326 {
4327 /* Set up a step-resume breakpoint at the address
4328 indicated by SKIP_SOLIB_RESOLVER. */
4329 struct symtab_and_line sr_sal;
4330 init_sal (&sr_sal);
4331 sr_sal.pc = pc_after_resolver;
4332 sr_sal.pspace = get_frame_program_space (frame);
4333
4334 insert_step_resume_breakpoint_at_sal (gdbarch,
4335 sr_sal, null_frame_id);
4336 }
4337
4338 keep_going (ecs);
4339 return;
4340 }
4341
4342 if (ecs->event_thread->step_range_end != 1
4343 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4344 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4345 && get_frame_type (frame) == SIGTRAMP_FRAME)
4346 {
4347 if (debug_infrun)
4348 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4349 /* The inferior, while doing a "step" or "next", has ended up in
4350 a signal trampoline (either by a signal being delivered or by
4351 the signal handler returning). Just single-step until the
4352 inferior leaves the trampoline (either by calling the handler
4353 or returning). */
4354 keep_going (ecs);
4355 return;
4356 }
4357
4358 /* Check for subroutine calls. The check for the current frame
4359 equalling the step ID is not necessary - the check of the
4360 previous frame's ID is sufficient - but it is a common case and
4361 cheaper than checking the previous frame's ID.
4362
4363 NOTE: frame_id_eq will never report two invalid frame IDs as
4364 being equal, so to get into this block, both the current and
4365 previous frame must have valid frame IDs. */
4366 /* The outer_frame_id check is a heuristic to detect stepping
4367 through startup code. If we step over an instruction which
4368 sets the stack pointer from an invalid value to a valid value,
4369 we may detect that as a subroutine call from the mythical
4370 "outermost" function. This could be fixed by marking
4371 outermost frames as !stack_p,code_p,special_p. Then the
4372 initial outermost frame, before sp was valid, would
4373 have code_addr == &_start. See the comment in frame_id_eq
4374 for more. */
4375 if (!frame_id_eq (get_stack_frame_id (frame),
4376 ecs->event_thread->step_stack_frame_id)
4377 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4378 ecs->event_thread->step_stack_frame_id)
4379 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4380 outer_frame_id)
4381 || step_start_function != find_pc_function (stop_pc))))
4382 {
4383 CORE_ADDR real_stop_pc;
4384
4385 if (debug_infrun)
4386 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4387
4388 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4389 || ((ecs->event_thread->step_range_end == 1)
4390 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4391 ecs->stop_func_start)))
4392 {
4393 /* I presume that step_over_calls is only 0 when we're
4394 supposed to be stepping at the assembly language level
4395 ("stepi"). Just stop. */
4396 /* Also, maybe we just did a "nexti" inside a prolog, so we
4397 thought it was a subroutine call but it was not. Stop as
4398 well. FENN */
4399 /* And this works the same backward as frontward. MVS */
4400 ecs->event_thread->stop_step = 1;
4401 print_stop_reason (END_STEPPING_RANGE, 0);
4402 stop_stepping (ecs);
4403 return;
4404 }
4405
4406 /* Reverse stepping through solib trampolines. */
4407
4408 if (execution_direction == EXEC_REVERSE
4409 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4410 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4411 || (ecs->stop_func_start == 0
4412 && in_solib_dynsym_resolve_code (stop_pc))))
4413 {
4414 /* Any solib trampoline code can be handled in reverse
4415 by simply continuing to single-step. We have already
4416 executed the solib function (backwards), and a few
4417 steps will take us back through the trampoline to the
4418 caller. */
4419 keep_going (ecs);
4420 return;
4421 }
4422
4423 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4424 {
4425 /* We're doing a "next".
4426
4427 Normal (forward) execution: set a breakpoint at the
4428 callee's return address (the address at which the caller
4429 will resume).
4430
4431 Reverse (backward) execution. set the step-resume
4432 breakpoint at the start of the function that we just
4433 stepped into (backwards), and continue to there. When we
4434 get there, we'll need to single-step back to the caller. */
4435
4436 if (execution_direction == EXEC_REVERSE)
4437 {
4438 struct symtab_and_line sr_sal;
4439
4440 /* Normal function call return (static or dynamic). */
4441 init_sal (&sr_sal);
4442 sr_sal.pc = ecs->stop_func_start;
4443 sr_sal.pspace = get_frame_program_space (frame);
4444 insert_step_resume_breakpoint_at_sal (gdbarch,
4445 sr_sal, null_frame_id);
4446 }
4447 else
4448 insert_step_resume_breakpoint_at_caller (frame);
4449
4450 keep_going (ecs);
4451 return;
4452 }
4453
4454 /* If we are in a function call trampoline (a stub between the
4455 calling routine and the real function), locate the real
4456 function. That's what tells us (a) whether we want to step
4457 into it at all, and (b) what prologue we want to run to the
4458 end of, if we do step into it. */
4459 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4460 if (real_stop_pc == 0)
4461 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4462 if (real_stop_pc != 0)
4463 ecs->stop_func_start = real_stop_pc;
4464
4465 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4466 {
4467 struct symtab_and_line sr_sal;
4468 init_sal (&sr_sal);
4469 sr_sal.pc = ecs->stop_func_start;
4470 sr_sal.pspace = get_frame_program_space (frame);
4471
4472 insert_step_resume_breakpoint_at_sal (gdbarch,
4473 sr_sal, null_frame_id);
4474 keep_going (ecs);
4475 return;
4476 }
4477
4478 /* If we have line number information for the function we are
4479 thinking of stepping into, step into it.
4480
4481 If there are several symtabs at that PC (e.g. with include
4482 files), just want to know whether *any* of them have line
4483 numbers. find_pc_line handles this. */
4484 {
4485 struct symtab_and_line tmp_sal;
4486
4487 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4488 tmp_sal.pspace = get_frame_program_space (frame);
4489 if (tmp_sal.line != 0)
4490 {
4491 if (execution_direction == EXEC_REVERSE)
4492 handle_step_into_function_backward (gdbarch, ecs);
4493 else
4494 handle_step_into_function (gdbarch, ecs);
4495 return;
4496 }
4497 }
4498
4499 /* If we have no line number and the step-stop-if-no-debug is
4500 set, we stop the step so that the user has a chance to switch
4501 in assembly mode. */
4502 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4503 && step_stop_if_no_debug)
4504 {
4505 ecs->event_thread->stop_step = 1;
4506 print_stop_reason (END_STEPPING_RANGE, 0);
4507 stop_stepping (ecs);
4508 return;
4509 }
4510
4511 if (execution_direction == EXEC_REVERSE)
4512 {
4513 /* Set a breakpoint at callee's start address.
4514 From there we can step once and be back in the caller. */
4515 struct symtab_and_line sr_sal;
4516 init_sal (&sr_sal);
4517 sr_sal.pc = ecs->stop_func_start;
4518 sr_sal.pspace = get_frame_program_space (frame);
4519 insert_step_resume_breakpoint_at_sal (gdbarch,
4520 sr_sal, null_frame_id);
4521 }
4522 else
4523 /* Set a breakpoint at callee's return address (the address
4524 at which the caller will resume). */
4525 insert_step_resume_breakpoint_at_caller (frame);
4526
4527 keep_going (ecs);
4528 return;
4529 }
4530
4531 /* Reverse stepping through solib trampolines. */
4532
4533 if (execution_direction == EXEC_REVERSE
4534 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4535 {
4536 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4537 || (ecs->stop_func_start == 0
4538 && in_solib_dynsym_resolve_code (stop_pc)))
4539 {
4540 /* Any solib trampoline code can be handled in reverse
4541 by simply continuing to single-step. We have already
4542 executed the solib function (backwards), and a few
4543 steps will take us back through the trampoline to the
4544 caller. */
4545 keep_going (ecs);
4546 return;
4547 }
4548 else if (in_solib_dynsym_resolve_code (stop_pc))
4549 {
4550 /* Stepped backward into the solib dynsym resolver.
4551 Set a breakpoint at its start and continue, then
4552 one more step will take us out. */
4553 struct symtab_and_line sr_sal;
4554 init_sal (&sr_sal);
4555 sr_sal.pc = ecs->stop_func_start;
4556 sr_sal.pspace = get_frame_program_space (frame);
4557 insert_step_resume_breakpoint_at_sal (gdbarch,
4558 sr_sal, null_frame_id);
4559 keep_going (ecs);
4560 return;
4561 }
4562 }
4563
4564 /* If we're in the return path from a shared library trampoline,
4565 we want to proceed through the trampoline when stepping. */
4566 if (gdbarch_in_solib_return_trampoline (gdbarch,
4567 stop_pc, ecs->stop_func_name))
4568 {
4569 /* Determine where this trampoline returns. */
4570 CORE_ADDR real_stop_pc;
4571 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4572
4573 if (debug_infrun)
4574 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4575
4576 /* Only proceed through if we know where it's going. */
4577 if (real_stop_pc)
4578 {
4579 /* And put the step-breakpoint there and go until there. */
4580 struct symtab_and_line sr_sal;
4581
4582 init_sal (&sr_sal); /* initialize to zeroes */
4583 sr_sal.pc = real_stop_pc;
4584 sr_sal.section = find_pc_overlay (sr_sal.pc);
4585 sr_sal.pspace = get_frame_program_space (frame);
4586
4587 /* Do not specify what the fp should be when we stop since
4588 on some machines the prologue is where the new fp value
4589 is established. */
4590 insert_step_resume_breakpoint_at_sal (gdbarch,
4591 sr_sal, null_frame_id);
4592
4593 /* Restart without fiddling with the step ranges or
4594 other state. */
4595 keep_going (ecs);
4596 return;
4597 }
4598 }
4599
4600 stop_pc_sal = find_pc_line (stop_pc, 0);
4601
4602 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4603 the trampoline processing logic, however, there are some trampolines
4604 that have no names, so we should do trampoline handling first. */
4605 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4606 && ecs->stop_func_name == NULL
4607 && stop_pc_sal.line == 0)
4608 {
4609 if (debug_infrun)
4610 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4611
4612 /* The inferior just stepped into, or returned to, an
4613 undebuggable function (where there is no debugging information
4614 and no line number corresponding to the address where the
4615 inferior stopped). Since we want to skip this kind of code,
4616 we keep going until the inferior returns from this
4617 function - unless the user has asked us not to (via
4618 set step-mode) or we no longer know how to get back
4619 to the call site. */
4620 if (step_stop_if_no_debug
4621 || !frame_id_p (frame_unwind_caller_id (frame)))
4622 {
4623 /* If we have no line number and the step-stop-if-no-debug
4624 is set, we stop the step so that the user has a chance to
4625 switch in assembly mode. */
4626 ecs->event_thread->stop_step = 1;
4627 print_stop_reason (END_STEPPING_RANGE, 0);
4628 stop_stepping (ecs);
4629 return;
4630 }
4631 else
4632 {
4633 /* Set a breakpoint at callee's return address (the address
4634 at which the caller will resume). */
4635 insert_step_resume_breakpoint_at_caller (frame);
4636 keep_going (ecs);
4637 return;
4638 }
4639 }
4640
4641 if (ecs->event_thread->step_range_end == 1)
4642 {
4643 /* It is stepi or nexti. We always want to stop stepping after
4644 one instruction. */
4645 if (debug_infrun)
4646 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4647 ecs->event_thread->stop_step = 1;
4648 print_stop_reason (END_STEPPING_RANGE, 0);
4649 stop_stepping (ecs);
4650 return;
4651 }
4652
4653 if (stop_pc_sal.line == 0)
4654 {
4655 /* We have no line number information. That means to stop
4656 stepping (does this always happen right after one instruction,
4657 when we do "s" in a function with no line numbers,
4658 or can this happen as a result of a return or longjmp?). */
4659 if (debug_infrun)
4660 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4661 ecs->event_thread->stop_step = 1;
4662 print_stop_reason (END_STEPPING_RANGE, 0);
4663 stop_stepping (ecs);
4664 return;
4665 }
4666
4667 /* Look for "calls" to inlined functions, part one. If the inline
4668 frame machinery detected some skipped call sites, we have entered
4669 a new inline function. */
4670
4671 if (frame_id_eq (get_frame_id (get_current_frame ()),
4672 ecs->event_thread->step_frame_id)
4673 && inline_skipped_frames (ecs->ptid))
4674 {
4675 struct symtab_and_line call_sal;
4676
4677 if (debug_infrun)
4678 fprintf_unfiltered (gdb_stdlog,
4679 "infrun: stepped into inlined function\n");
4680
4681 find_frame_sal (get_current_frame (), &call_sal);
4682
4683 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4684 {
4685 /* For "step", we're going to stop. But if the call site
4686 for this inlined function is on the same source line as
4687 we were previously stepping, go down into the function
4688 first. Otherwise stop at the call site. */
4689
4690 if (call_sal.line == ecs->event_thread->current_line
4691 && call_sal.symtab == ecs->event_thread->current_symtab)
4692 step_into_inline_frame (ecs->ptid);
4693
4694 ecs->event_thread->stop_step = 1;
4695 print_stop_reason (END_STEPPING_RANGE, 0);
4696 stop_stepping (ecs);
4697 return;
4698 }
4699 else
4700 {
4701 /* For "next", we should stop at the call site if it is on a
4702 different source line. Otherwise continue through the
4703 inlined function. */
4704 if (call_sal.line == ecs->event_thread->current_line
4705 && call_sal.symtab == ecs->event_thread->current_symtab)
4706 keep_going (ecs);
4707 else
4708 {
4709 ecs->event_thread->stop_step = 1;
4710 print_stop_reason (END_STEPPING_RANGE, 0);
4711 stop_stepping (ecs);
4712 }
4713 return;
4714 }
4715 }
4716
4717 /* Look for "calls" to inlined functions, part two. If we are still
4718 in the same real function we were stepping through, but we have
4719 to go further up to find the exact frame ID, we are stepping
4720 through a more inlined call beyond its call site. */
4721
4722 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4723 && !frame_id_eq (get_frame_id (get_current_frame ()),
4724 ecs->event_thread->step_frame_id)
4725 && stepped_in_from (get_current_frame (),
4726 ecs->event_thread->step_frame_id))
4727 {
4728 if (debug_infrun)
4729 fprintf_unfiltered (gdb_stdlog,
4730 "infrun: stepping through inlined function\n");
4731
4732 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4733 keep_going (ecs);
4734 else
4735 {
4736 ecs->event_thread->stop_step = 1;
4737 print_stop_reason (END_STEPPING_RANGE, 0);
4738 stop_stepping (ecs);
4739 }
4740 return;
4741 }
4742
4743 if ((stop_pc == stop_pc_sal.pc)
4744 && (ecs->event_thread->current_line != stop_pc_sal.line
4745 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4746 {
4747 /* We are at the start of a different line. So stop. Note that
4748 we don't stop if we step into the middle of a different line.
4749 That is said to make things like for (;;) statements work
4750 better. */
4751 if (debug_infrun)
4752 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4753 ecs->event_thread->stop_step = 1;
4754 print_stop_reason (END_STEPPING_RANGE, 0);
4755 stop_stepping (ecs);
4756 return;
4757 }
4758
4759 /* We aren't done stepping.
4760
4761 Optimize by setting the stepping range to the line.
4762 (We might not be in the original line, but if we entered a
4763 new line in mid-statement, we continue stepping. This makes
4764 things like for(;;) statements work better.) */
4765
4766 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4767 ecs->event_thread->step_range_end = stop_pc_sal.end;
4768 set_step_info (frame, stop_pc_sal);
4769
4770 if (debug_infrun)
4771 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4772 keep_going (ecs);
4773 }
4774
4775 /* Is thread TP in the middle of single-stepping? */
4776
4777 static int
4778 currently_stepping (struct thread_info *tp)
4779 {
4780 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4781 || tp->trap_expected
4782 || tp->stepping_through_solib_after_catch
4783 || bpstat_should_step ());
4784 }
4785
4786 /* Returns true if any thread *but* the one passed in "data" is in the
4787 middle of stepping or of handling a "next". */
4788
4789 static int
4790 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4791 {
4792 if (tp == data)
4793 return 0;
4794
4795 return (tp->step_range_end
4796 || tp->trap_expected
4797 || tp->stepping_through_solib_after_catch);
4798 }
4799
4800 /* Inferior has stepped into a subroutine call with source code that
4801 we should not step over. Do step to the first line of code in
4802 it. */
4803
4804 static void
4805 handle_step_into_function (struct gdbarch *gdbarch,
4806 struct execution_control_state *ecs)
4807 {
4808 struct symtab *s;
4809 struct symtab_and_line stop_func_sal, sr_sal;
4810
4811 s = find_pc_symtab (stop_pc);
4812 if (s && s->language != language_asm)
4813 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4814 ecs->stop_func_start);
4815
4816 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4817 /* Use the step_resume_break to step until the end of the prologue,
4818 even if that involves jumps (as it seems to on the vax under
4819 4.2). */
4820 /* If the prologue ends in the middle of a source line, continue to
4821 the end of that source line (if it is still within the function).
4822 Otherwise, just go to end of prologue. */
4823 if (stop_func_sal.end
4824 && stop_func_sal.pc != ecs->stop_func_start
4825 && stop_func_sal.end < ecs->stop_func_end)
4826 ecs->stop_func_start = stop_func_sal.end;
4827
4828 /* Architectures which require breakpoint adjustment might not be able
4829 to place a breakpoint at the computed address. If so, the test
4830 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4831 ecs->stop_func_start to an address at which a breakpoint may be
4832 legitimately placed.
4833
4834 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4835 made, GDB will enter an infinite loop when stepping through
4836 optimized code consisting of VLIW instructions which contain
4837 subinstructions corresponding to different source lines. On
4838 FR-V, it's not permitted to place a breakpoint on any but the
4839 first subinstruction of a VLIW instruction. When a breakpoint is
4840 set, GDB will adjust the breakpoint address to the beginning of
4841 the VLIW instruction. Thus, we need to make the corresponding
4842 adjustment here when computing the stop address. */
4843
4844 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4845 {
4846 ecs->stop_func_start
4847 = gdbarch_adjust_breakpoint_address (gdbarch,
4848 ecs->stop_func_start);
4849 }
4850
4851 if (ecs->stop_func_start == stop_pc)
4852 {
4853 /* We are already there: stop now. */
4854 ecs->event_thread->stop_step = 1;
4855 print_stop_reason (END_STEPPING_RANGE, 0);
4856 stop_stepping (ecs);
4857 return;
4858 }
4859 else
4860 {
4861 /* Put the step-breakpoint there and go until there. */
4862 init_sal (&sr_sal); /* initialize to zeroes */
4863 sr_sal.pc = ecs->stop_func_start;
4864 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4865 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4866
4867 /* Do not specify what the fp should be when we stop since on
4868 some machines the prologue is where the new fp value is
4869 established. */
4870 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4871
4872 /* And make sure stepping stops right away then. */
4873 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4874 }
4875 keep_going (ecs);
4876 }
4877
4878 /* Inferior has stepped backward into a subroutine call with source
4879 code that we should not step over. Do step to the beginning of the
4880 last line of code in it. */
4881
4882 static void
4883 handle_step_into_function_backward (struct gdbarch *gdbarch,
4884 struct execution_control_state *ecs)
4885 {
4886 struct symtab *s;
4887 struct symtab_and_line stop_func_sal, sr_sal;
4888
4889 s = find_pc_symtab (stop_pc);
4890 if (s && s->language != language_asm)
4891 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4892 ecs->stop_func_start);
4893
4894 stop_func_sal = find_pc_line (stop_pc, 0);
4895
4896 /* OK, we're just going to keep stepping here. */
4897 if (stop_func_sal.pc == stop_pc)
4898 {
4899 /* We're there already. Just stop stepping now. */
4900 ecs->event_thread->stop_step = 1;
4901 print_stop_reason (END_STEPPING_RANGE, 0);
4902 stop_stepping (ecs);
4903 }
4904 else
4905 {
4906 /* Else just reset the step range and keep going.
4907 No step-resume breakpoint, they don't work for
4908 epilogues, which can have multiple entry paths. */
4909 ecs->event_thread->step_range_start = stop_func_sal.pc;
4910 ecs->event_thread->step_range_end = stop_func_sal.end;
4911 keep_going (ecs);
4912 }
4913 return;
4914 }
4915
4916 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4917 This is used to both functions and to skip over code. */
4918
4919 static void
4920 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4921 struct symtab_and_line sr_sal,
4922 struct frame_id sr_id)
4923 {
4924 /* There should never be more than one step-resume or longjmp-resume
4925 breakpoint per thread, so we should never be setting a new
4926 step_resume_breakpoint when one is already active. */
4927 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4928
4929 if (debug_infrun)
4930 fprintf_unfiltered (gdb_stdlog,
4931 "infrun: inserting step-resume breakpoint at %s\n",
4932 paddress (gdbarch, sr_sal.pc));
4933
4934 inferior_thread ()->step_resume_breakpoint
4935 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4936 }
4937
4938 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4939 to skip a potential signal handler.
4940
4941 This is called with the interrupted function's frame. The signal
4942 handler, when it returns, will resume the interrupted function at
4943 RETURN_FRAME.pc. */
4944
4945 static void
4946 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4947 {
4948 struct symtab_and_line sr_sal;
4949 struct gdbarch *gdbarch;
4950
4951 gdb_assert (return_frame != NULL);
4952 init_sal (&sr_sal); /* initialize to zeros */
4953
4954 gdbarch = get_frame_arch (return_frame);
4955 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4956 sr_sal.section = find_pc_overlay (sr_sal.pc);
4957 sr_sal.pspace = get_frame_program_space (return_frame);
4958
4959 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4960 get_stack_frame_id (return_frame));
4961 }
4962
4963 /* Similar to insert_step_resume_breakpoint_at_frame, except
4964 but a breakpoint at the previous frame's PC. This is used to
4965 skip a function after stepping into it (for "next" or if the called
4966 function has no debugging information).
4967
4968 The current function has almost always been reached by single
4969 stepping a call or return instruction. NEXT_FRAME belongs to the
4970 current function, and the breakpoint will be set at the caller's
4971 resume address.
4972
4973 This is a separate function rather than reusing
4974 insert_step_resume_breakpoint_at_frame in order to avoid
4975 get_prev_frame, which may stop prematurely (see the implementation
4976 of frame_unwind_caller_id for an example). */
4977
4978 static void
4979 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4980 {
4981 struct symtab_and_line sr_sal;
4982 struct gdbarch *gdbarch;
4983
4984 /* We shouldn't have gotten here if we don't know where the call site
4985 is. */
4986 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4987
4988 init_sal (&sr_sal); /* initialize to zeros */
4989
4990 gdbarch = frame_unwind_caller_arch (next_frame);
4991 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4992 frame_unwind_caller_pc (next_frame));
4993 sr_sal.section = find_pc_overlay (sr_sal.pc);
4994 sr_sal.pspace = frame_unwind_program_space (next_frame);
4995
4996 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4997 frame_unwind_caller_id (next_frame));
4998 }
4999
5000 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5001 new breakpoint at the target of a jmp_buf. The handling of
5002 longjmp-resume uses the same mechanisms used for handling
5003 "step-resume" breakpoints. */
5004
5005 static void
5006 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5007 {
5008 /* There should never be more than one step-resume or longjmp-resume
5009 breakpoint per thread, so we should never be setting a new
5010 longjmp_resume_breakpoint when one is already active. */
5011 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5012
5013 if (debug_infrun)
5014 fprintf_unfiltered (gdb_stdlog,
5015 "infrun: inserting longjmp-resume breakpoint at %s\n",
5016 paddress (gdbarch, pc));
5017
5018 inferior_thread ()->step_resume_breakpoint =
5019 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5020 }
5021
5022 static void
5023 stop_stepping (struct execution_control_state *ecs)
5024 {
5025 if (debug_infrun)
5026 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5027
5028 /* Let callers know we don't want to wait for the inferior anymore. */
5029 ecs->wait_some_more = 0;
5030 }
5031
5032 /* This function handles various cases where we need to continue
5033 waiting for the inferior. */
5034 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5035
5036 static void
5037 keep_going (struct execution_control_state *ecs)
5038 {
5039 /* Make sure normal_stop is called if we get a QUIT handled before
5040 reaching resume. */
5041 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5042
5043 /* Save the pc before execution, to compare with pc after stop. */
5044 ecs->event_thread->prev_pc
5045 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5046
5047 /* If we did not do break;, it means we should keep running the
5048 inferior and not return to debugger. */
5049
5050 if (ecs->event_thread->trap_expected
5051 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5052 {
5053 /* We took a signal (which we are supposed to pass through to
5054 the inferior, else we'd not get here) and we haven't yet
5055 gotten our trap. Simply continue. */
5056
5057 discard_cleanups (old_cleanups);
5058 resume (currently_stepping (ecs->event_thread),
5059 ecs->event_thread->stop_signal);
5060 }
5061 else
5062 {
5063 /* Either the trap was not expected, but we are continuing
5064 anyway (the user asked that this signal be passed to the
5065 child)
5066 -- or --
5067 The signal was SIGTRAP, e.g. it was our signal, but we
5068 decided we should resume from it.
5069
5070 We're going to run this baby now!
5071
5072 Note that insert_breakpoints won't try to re-insert
5073 already inserted breakpoints. Therefore, we don't
5074 care if breakpoints were already inserted, or not. */
5075
5076 if (ecs->event_thread->stepping_over_breakpoint)
5077 {
5078 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5079 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5080 /* Since we can't do a displaced step, we have to remove
5081 the breakpoint while we step it. To keep things
5082 simple, we remove them all. */
5083 remove_breakpoints ();
5084 }
5085 else
5086 {
5087 struct gdb_exception e;
5088 /* Stop stepping when inserting breakpoints
5089 has failed. */
5090 TRY_CATCH (e, RETURN_MASK_ERROR)
5091 {
5092 insert_breakpoints ();
5093 }
5094 if (e.reason < 0)
5095 {
5096 exception_print (gdb_stderr, e);
5097 stop_stepping (ecs);
5098 return;
5099 }
5100 }
5101
5102 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5103
5104 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5105 specifies that such a signal should be delivered to the
5106 target program).
5107
5108 Typically, this would occure when a user is debugging a
5109 target monitor on a simulator: the target monitor sets a
5110 breakpoint; the simulator encounters this break-point and
5111 halts the simulation handing control to GDB; GDB, noteing
5112 that the break-point isn't valid, returns control back to the
5113 simulator; the simulator then delivers the hardware
5114 equivalent of a SIGNAL_TRAP to the program being debugged. */
5115
5116 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5117 && !signal_program[ecs->event_thread->stop_signal])
5118 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5119
5120 discard_cleanups (old_cleanups);
5121 resume (currently_stepping (ecs->event_thread),
5122 ecs->event_thread->stop_signal);
5123 }
5124
5125 prepare_to_wait (ecs);
5126 }
5127
5128 /* This function normally comes after a resume, before
5129 handle_inferior_event exits. It takes care of any last bits of
5130 housekeeping, and sets the all-important wait_some_more flag. */
5131
5132 static void
5133 prepare_to_wait (struct execution_control_state *ecs)
5134 {
5135 if (debug_infrun)
5136 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5137
5138 /* This is the old end of the while loop. Let everybody know we
5139 want to wait for the inferior some more and get called again
5140 soon. */
5141 ecs->wait_some_more = 1;
5142 }
5143
5144 /* Print why the inferior has stopped. We always print something when
5145 the inferior exits, or receives a signal. The rest of the cases are
5146 dealt with later on in normal_stop() and print_it_typical(). Ideally
5147 there should be a call to this function from handle_inferior_event()
5148 each time stop_stepping() is called.*/
5149 static void
5150 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5151 {
5152 switch (stop_reason)
5153 {
5154 case END_STEPPING_RANGE:
5155 /* We are done with a step/next/si/ni command. */
5156 /* For now print nothing. */
5157 /* Print a message only if not in the middle of doing a "step n"
5158 operation for n > 1 */
5159 if (!inferior_thread ()->step_multi
5160 || !inferior_thread ()->stop_step)
5161 if (ui_out_is_mi_like_p (uiout))
5162 ui_out_field_string
5163 (uiout, "reason",
5164 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5165 break;
5166 case SIGNAL_EXITED:
5167 /* The inferior was terminated by a signal. */
5168 annotate_signalled ();
5169 if (ui_out_is_mi_like_p (uiout))
5170 ui_out_field_string
5171 (uiout, "reason",
5172 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5173 ui_out_text (uiout, "\nProgram terminated with signal ");
5174 annotate_signal_name ();
5175 ui_out_field_string (uiout, "signal-name",
5176 target_signal_to_name (stop_info));
5177 annotate_signal_name_end ();
5178 ui_out_text (uiout, ", ");
5179 annotate_signal_string ();
5180 ui_out_field_string (uiout, "signal-meaning",
5181 target_signal_to_string (stop_info));
5182 annotate_signal_string_end ();
5183 ui_out_text (uiout, ".\n");
5184 ui_out_text (uiout, "The program no longer exists.\n");
5185 break;
5186 case EXITED:
5187 /* The inferior program is finished. */
5188 annotate_exited (stop_info);
5189 if (stop_info)
5190 {
5191 if (ui_out_is_mi_like_p (uiout))
5192 ui_out_field_string (uiout, "reason",
5193 async_reason_lookup (EXEC_ASYNC_EXITED));
5194 ui_out_text (uiout, "\nProgram exited with code ");
5195 ui_out_field_fmt (uiout, "exit-code", "0%o",
5196 (unsigned int) stop_info);
5197 ui_out_text (uiout, ".\n");
5198 }
5199 else
5200 {
5201 if (ui_out_is_mi_like_p (uiout))
5202 ui_out_field_string
5203 (uiout, "reason",
5204 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5205 ui_out_text (uiout, "\nProgram exited normally.\n");
5206 }
5207 /* Support the --return-child-result option. */
5208 return_child_result_value = stop_info;
5209 break;
5210 case SIGNAL_RECEIVED:
5211 /* Signal received. The signal table tells us to print about
5212 it. */
5213 annotate_signal ();
5214
5215 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5216 {
5217 struct thread_info *t = inferior_thread ();
5218
5219 ui_out_text (uiout, "\n[");
5220 ui_out_field_string (uiout, "thread-name",
5221 target_pid_to_str (t->ptid));
5222 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5223 ui_out_text (uiout, " stopped");
5224 }
5225 else
5226 {
5227 ui_out_text (uiout, "\nProgram received signal ");
5228 annotate_signal_name ();
5229 if (ui_out_is_mi_like_p (uiout))
5230 ui_out_field_string
5231 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5232 ui_out_field_string (uiout, "signal-name",
5233 target_signal_to_name (stop_info));
5234 annotate_signal_name_end ();
5235 ui_out_text (uiout, ", ");
5236 annotate_signal_string ();
5237 ui_out_field_string (uiout, "signal-meaning",
5238 target_signal_to_string (stop_info));
5239 annotate_signal_string_end ();
5240 }
5241 ui_out_text (uiout, ".\n");
5242 break;
5243 case NO_HISTORY:
5244 /* Reverse execution: target ran out of history info. */
5245 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5246 break;
5247 default:
5248 internal_error (__FILE__, __LINE__,
5249 _("print_stop_reason: unrecognized enum value"));
5250 break;
5251 }
5252 }
5253 \f
5254
5255 /* Here to return control to GDB when the inferior stops for real.
5256 Print appropriate messages, remove breakpoints, give terminal our modes.
5257
5258 STOP_PRINT_FRAME nonzero means print the executing frame
5259 (pc, function, args, file, line number and line text).
5260 BREAKPOINTS_FAILED nonzero means stop was due to error
5261 attempting to insert breakpoints. */
5262
5263 void
5264 normal_stop (void)
5265 {
5266 struct target_waitstatus last;
5267 ptid_t last_ptid;
5268 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5269
5270 get_last_target_status (&last_ptid, &last);
5271
5272 /* If an exception is thrown from this point on, make sure to
5273 propagate GDB's knowledge of the executing state to the
5274 frontend/user running state. A QUIT is an easy exception to see
5275 here, so do this before any filtered output. */
5276 if (!non_stop)
5277 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5278 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5279 && last.kind != TARGET_WAITKIND_EXITED)
5280 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5281
5282 /* In non-stop mode, we don't want GDB to switch threads behind the
5283 user's back, to avoid races where the user is typing a command to
5284 apply to thread x, but GDB switches to thread y before the user
5285 finishes entering the command. */
5286
5287 /* As with the notification of thread events, we want to delay
5288 notifying the user that we've switched thread context until
5289 the inferior actually stops.
5290
5291 There's no point in saying anything if the inferior has exited.
5292 Note that SIGNALLED here means "exited with a signal", not
5293 "received a signal". */
5294 if (!non_stop
5295 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5296 && target_has_execution
5297 && last.kind != TARGET_WAITKIND_SIGNALLED
5298 && last.kind != TARGET_WAITKIND_EXITED)
5299 {
5300 target_terminal_ours_for_output ();
5301 printf_filtered (_("[Switching to %s]\n"),
5302 target_pid_to_str (inferior_ptid));
5303 annotate_thread_changed ();
5304 previous_inferior_ptid = inferior_ptid;
5305 }
5306
5307 if (!breakpoints_always_inserted_mode () && target_has_execution)
5308 {
5309 if (remove_breakpoints ())
5310 {
5311 target_terminal_ours_for_output ();
5312 printf_filtered (_("\
5313 Cannot remove breakpoints because program is no longer writable.\n\
5314 Further execution is probably impossible.\n"));
5315 }
5316 }
5317
5318 /* If an auto-display called a function and that got a signal,
5319 delete that auto-display to avoid an infinite recursion. */
5320
5321 if (stopped_by_random_signal)
5322 disable_current_display ();
5323
5324 /* Don't print a message if in the middle of doing a "step n"
5325 operation for n > 1 */
5326 if (target_has_execution
5327 && last.kind != TARGET_WAITKIND_SIGNALLED
5328 && last.kind != TARGET_WAITKIND_EXITED
5329 && inferior_thread ()->step_multi
5330 && inferior_thread ()->stop_step)
5331 goto done;
5332
5333 target_terminal_ours ();
5334
5335 /* Set the current source location. This will also happen if we
5336 display the frame below, but the current SAL will be incorrect
5337 during a user hook-stop function. */
5338 if (has_stack_frames () && !stop_stack_dummy)
5339 set_current_sal_from_frame (get_current_frame (), 1);
5340
5341 /* Let the user/frontend see the threads as stopped. */
5342 do_cleanups (old_chain);
5343
5344 /* Look up the hook_stop and run it (CLI internally handles problem
5345 of stop_command's pre-hook not existing). */
5346 if (stop_command)
5347 catch_errors (hook_stop_stub, stop_command,
5348 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5349
5350 if (!has_stack_frames ())
5351 goto done;
5352
5353 if (last.kind == TARGET_WAITKIND_SIGNALLED
5354 || last.kind == TARGET_WAITKIND_EXITED)
5355 goto done;
5356
5357 /* Select innermost stack frame - i.e., current frame is frame 0,
5358 and current location is based on that.
5359 Don't do this on return from a stack dummy routine,
5360 or if the program has exited. */
5361
5362 if (!stop_stack_dummy)
5363 {
5364 select_frame (get_current_frame ());
5365
5366 /* Print current location without a level number, if
5367 we have changed functions or hit a breakpoint.
5368 Print source line if we have one.
5369 bpstat_print() contains the logic deciding in detail
5370 what to print, based on the event(s) that just occurred. */
5371
5372 /* If --batch-silent is enabled then there's no need to print the current
5373 source location, and to try risks causing an error message about
5374 missing source files. */
5375 if (stop_print_frame && !batch_silent)
5376 {
5377 int bpstat_ret;
5378 int source_flag;
5379 int do_frame_printing = 1;
5380 struct thread_info *tp = inferior_thread ();
5381
5382 bpstat_ret = bpstat_print (tp->stop_bpstat);
5383 switch (bpstat_ret)
5384 {
5385 case PRINT_UNKNOWN:
5386 /* If we had hit a shared library event breakpoint,
5387 bpstat_print would print out this message. If we hit
5388 an OS-level shared library event, do the same
5389 thing. */
5390 if (last.kind == TARGET_WAITKIND_LOADED)
5391 {
5392 printf_filtered (_("Stopped due to shared library event\n"));
5393 source_flag = SRC_LINE; /* something bogus */
5394 do_frame_printing = 0;
5395 break;
5396 }
5397
5398 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5399 (or should) carry around the function and does (or
5400 should) use that when doing a frame comparison. */
5401 if (tp->stop_step
5402 && frame_id_eq (tp->step_frame_id,
5403 get_frame_id (get_current_frame ()))
5404 && step_start_function == find_pc_function (stop_pc))
5405 source_flag = SRC_LINE; /* finished step, just print source line */
5406 else
5407 source_flag = SRC_AND_LOC; /* print location and source line */
5408 break;
5409 case PRINT_SRC_AND_LOC:
5410 source_flag = SRC_AND_LOC; /* print location and source line */
5411 break;
5412 case PRINT_SRC_ONLY:
5413 source_flag = SRC_LINE;
5414 break;
5415 case PRINT_NOTHING:
5416 source_flag = SRC_LINE; /* something bogus */
5417 do_frame_printing = 0;
5418 break;
5419 default:
5420 internal_error (__FILE__, __LINE__, _("Unknown value."));
5421 }
5422
5423 /* The behavior of this routine with respect to the source
5424 flag is:
5425 SRC_LINE: Print only source line
5426 LOCATION: Print only location
5427 SRC_AND_LOC: Print location and source line */
5428 if (do_frame_printing)
5429 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5430
5431 /* Display the auto-display expressions. */
5432 do_displays ();
5433 }
5434 }
5435
5436 /* Save the function value return registers, if we care.
5437 We might be about to restore their previous contents. */
5438 if (inferior_thread ()->proceed_to_finish)
5439 {
5440 /* This should not be necessary. */
5441 if (stop_registers)
5442 regcache_xfree (stop_registers);
5443
5444 /* NB: The copy goes through to the target picking up the value of
5445 all the registers. */
5446 stop_registers = regcache_dup (get_current_regcache ());
5447 }
5448
5449 if (stop_stack_dummy == STOP_STACK_DUMMY)
5450 {
5451 /* Pop the empty frame that contains the stack dummy.
5452 This also restores inferior state prior to the call
5453 (struct inferior_thread_state). */
5454 struct frame_info *frame = get_current_frame ();
5455 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5456 frame_pop (frame);
5457 /* frame_pop() calls reinit_frame_cache as the last thing it does
5458 which means there's currently no selected frame. We don't need
5459 to re-establish a selected frame if the dummy call returns normally,
5460 that will be done by restore_inferior_status. However, we do have
5461 to handle the case where the dummy call is returning after being
5462 stopped (e.g. the dummy call previously hit a breakpoint). We
5463 can't know which case we have so just always re-establish a
5464 selected frame here. */
5465 select_frame (get_current_frame ());
5466 }
5467
5468 done:
5469 annotate_stopped ();
5470
5471 /* Suppress the stop observer if we're in the middle of:
5472
5473 - a step n (n > 1), as there still more steps to be done.
5474
5475 - a "finish" command, as the observer will be called in
5476 finish_command_continuation, so it can include the inferior
5477 function's return value.
5478
5479 - calling an inferior function, as we pretend we inferior didn't
5480 run at all. The return value of the call is handled by the
5481 expression evaluator, through call_function_by_hand. */
5482
5483 if (!target_has_execution
5484 || last.kind == TARGET_WAITKIND_SIGNALLED
5485 || last.kind == TARGET_WAITKIND_EXITED
5486 || (!inferior_thread ()->step_multi
5487 && !(inferior_thread ()->stop_bpstat
5488 && inferior_thread ()->proceed_to_finish)
5489 && !inferior_thread ()->in_infcall))
5490 {
5491 if (!ptid_equal (inferior_ptid, null_ptid))
5492 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5493 stop_print_frame);
5494 else
5495 observer_notify_normal_stop (NULL, stop_print_frame);
5496 }
5497
5498 if (target_has_execution)
5499 {
5500 if (last.kind != TARGET_WAITKIND_SIGNALLED
5501 && last.kind != TARGET_WAITKIND_EXITED)
5502 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5503 Delete any breakpoint that is to be deleted at the next stop. */
5504 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5505 }
5506
5507 /* Try to get rid of automatically added inferiors that are no
5508 longer needed. Keeping those around slows down things linearly.
5509 Note that this never removes the current inferior. */
5510 prune_inferiors ();
5511 }
5512
5513 static int
5514 hook_stop_stub (void *cmd)
5515 {
5516 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5517 return (0);
5518 }
5519 \f
5520 int
5521 signal_stop_state (int signo)
5522 {
5523 return signal_stop[signo];
5524 }
5525
5526 int
5527 signal_print_state (int signo)
5528 {
5529 return signal_print[signo];
5530 }
5531
5532 int
5533 signal_pass_state (int signo)
5534 {
5535 return signal_program[signo];
5536 }
5537
5538 int
5539 signal_stop_update (int signo, int state)
5540 {
5541 int ret = signal_stop[signo];
5542 signal_stop[signo] = state;
5543 return ret;
5544 }
5545
5546 int
5547 signal_print_update (int signo, int state)
5548 {
5549 int ret = signal_print[signo];
5550 signal_print[signo] = state;
5551 return ret;
5552 }
5553
5554 int
5555 signal_pass_update (int signo, int state)
5556 {
5557 int ret = signal_program[signo];
5558 signal_program[signo] = state;
5559 return ret;
5560 }
5561
5562 static void
5563 sig_print_header (void)
5564 {
5565 printf_filtered (_("\
5566 Signal Stop\tPrint\tPass to program\tDescription\n"));
5567 }
5568
5569 static void
5570 sig_print_info (enum target_signal oursig)
5571 {
5572 const char *name = target_signal_to_name (oursig);
5573 int name_padding = 13 - strlen (name);
5574
5575 if (name_padding <= 0)
5576 name_padding = 0;
5577
5578 printf_filtered ("%s", name);
5579 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5580 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5581 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5582 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5583 printf_filtered ("%s\n", target_signal_to_string (oursig));
5584 }
5585
5586 /* Specify how various signals in the inferior should be handled. */
5587
5588 static void
5589 handle_command (char *args, int from_tty)
5590 {
5591 char **argv;
5592 int digits, wordlen;
5593 int sigfirst, signum, siglast;
5594 enum target_signal oursig;
5595 int allsigs;
5596 int nsigs;
5597 unsigned char *sigs;
5598 struct cleanup *old_chain;
5599
5600 if (args == NULL)
5601 {
5602 error_no_arg (_("signal to handle"));
5603 }
5604
5605 /* Allocate and zero an array of flags for which signals to handle. */
5606
5607 nsigs = (int) TARGET_SIGNAL_LAST;
5608 sigs = (unsigned char *) alloca (nsigs);
5609 memset (sigs, 0, nsigs);
5610
5611 /* Break the command line up into args. */
5612
5613 argv = gdb_buildargv (args);
5614 old_chain = make_cleanup_freeargv (argv);
5615
5616 /* Walk through the args, looking for signal oursigs, signal names, and
5617 actions. Signal numbers and signal names may be interspersed with
5618 actions, with the actions being performed for all signals cumulatively
5619 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5620
5621 while (*argv != NULL)
5622 {
5623 wordlen = strlen (*argv);
5624 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5625 {;
5626 }
5627 allsigs = 0;
5628 sigfirst = siglast = -1;
5629
5630 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5631 {
5632 /* Apply action to all signals except those used by the
5633 debugger. Silently skip those. */
5634 allsigs = 1;
5635 sigfirst = 0;
5636 siglast = nsigs - 1;
5637 }
5638 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5639 {
5640 SET_SIGS (nsigs, sigs, signal_stop);
5641 SET_SIGS (nsigs, sigs, signal_print);
5642 }
5643 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5644 {
5645 UNSET_SIGS (nsigs, sigs, signal_program);
5646 }
5647 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5648 {
5649 SET_SIGS (nsigs, sigs, signal_print);
5650 }
5651 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5652 {
5653 SET_SIGS (nsigs, sigs, signal_program);
5654 }
5655 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5656 {
5657 UNSET_SIGS (nsigs, sigs, signal_stop);
5658 }
5659 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5660 {
5661 SET_SIGS (nsigs, sigs, signal_program);
5662 }
5663 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5664 {
5665 UNSET_SIGS (nsigs, sigs, signal_print);
5666 UNSET_SIGS (nsigs, sigs, signal_stop);
5667 }
5668 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5669 {
5670 UNSET_SIGS (nsigs, sigs, signal_program);
5671 }
5672 else if (digits > 0)
5673 {
5674 /* It is numeric. The numeric signal refers to our own
5675 internal signal numbering from target.h, not to host/target
5676 signal number. This is a feature; users really should be
5677 using symbolic names anyway, and the common ones like
5678 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5679
5680 sigfirst = siglast = (int)
5681 target_signal_from_command (atoi (*argv));
5682 if ((*argv)[digits] == '-')
5683 {
5684 siglast = (int)
5685 target_signal_from_command (atoi ((*argv) + digits + 1));
5686 }
5687 if (sigfirst > siglast)
5688 {
5689 /* Bet he didn't figure we'd think of this case... */
5690 signum = sigfirst;
5691 sigfirst = siglast;
5692 siglast = signum;
5693 }
5694 }
5695 else
5696 {
5697 oursig = target_signal_from_name (*argv);
5698 if (oursig != TARGET_SIGNAL_UNKNOWN)
5699 {
5700 sigfirst = siglast = (int) oursig;
5701 }
5702 else
5703 {
5704 /* Not a number and not a recognized flag word => complain. */
5705 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5706 }
5707 }
5708
5709 /* If any signal numbers or symbol names were found, set flags for
5710 which signals to apply actions to. */
5711
5712 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5713 {
5714 switch ((enum target_signal) signum)
5715 {
5716 case TARGET_SIGNAL_TRAP:
5717 case TARGET_SIGNAL_INT:
5718 if (!allsigs && !sigs[signum])
5719 {
5720 if (query (_("%s is used by the debugger.\n\
5721 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5722 {
5723 sigs[signum] = 1;
5724 }
5725 else
5726 {
5727 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5728 gdb_flush (gdb_stdout);
5729 }
5730 }
5731 break;
5732 case TARGET_SIGNAL_0:
5733 case TARGET_SIGNAL_DEFAULT:
5734 case TARGET_SIGNAL_UNKNOWN:
5735 /* Make sure that "all" doesn't print these. */
5736 break;
5737 default:
5738 sigs[signum] = 1;
5739 break;
5740 }
5741 }
5742
5743 argv++;
5744 }
5745
5746 for (signum = 0; signum < nsigs; signum++)
5747 if (sigs[signum])
5748 {
5749 target_notice_signals (inferior_ptid);
5750
5751 if (from_tty)
5752 {
5753 /* Show the results. */
5754 sig_print_header ();
5755 for (; signum < nsigs; signum++)
5756 if (sigs[signum])
5757 sig_print_info (signum);
5758 }
5759
5760 break;
5761 }
5762
5763 do_cleanups (old_chain);
5764 }
5765
5766 static void
5767 xdb_handle_command (char *args, int from_tty)
5768 {
5769 char **argv;
5770 struct cleanup *old_chain;
5771
5772 if (args == NULL)
5773 error_no_arg (_("xdb command"));
5774
5775 /* Break the command line up into args. */
5776
5777 argv = gdb_buildargv (args);
5778 old_chain = make_cleanup_freeargv (argv);
5779 if (argv[1] != (char *) NULL)
5780 {
5781 char *argBuf;
5782 int bufLen;
5783
5784 bufLen = strlen (argv[0]) + 20;
5785 argBuf = (char *) xmalloc (bufLen);
5786 if (argBuf)
5787 {
5788 int validFlag = 1;
5789 enum target_signal oursig;
5790
5791 oursig = target_signal_from_name (argv[0]);
5792 memset (argBuf, 0, bufLen);
5793 if (strcmp (argv[1], "Q") == 0)
5794 sprintf (argBuf, "%s %s", argv[0], "noprint");
5795 else
5796 {
5797 if (strcmp (argv[1], "s") == 0)
5798 {
5799 if (!signal_stop[oursig])
5800 sprintf (argBuf, "%s %s", argv[0], "stop");
5801 else
5802 sprintf (argBuf, "%s %s", argv[0], "nostop");
5803 }
5804 else if (strcmp (argv[1], "i") == 0)
5805 {
5806 if (!signal_program[oursig])
5807 sprintf (argBuf, "%s %s", argv[0], "pass");
5808 else
5809 sprintf (argBuf, "%s %s", argv[0], "nopass");
5810 }
5811 else if (strcmp (argv[1], "r") == 0)
5812 {
5813 if (!signal_print[oursig])
5814 sprintf (argBuf, "%s %s", argv[0], "print");
5815 else
5816 sprintf (argBuf, "%s %s", argv[0], "noprint");
5817 }
5818 else
5819 validFlag = 0;
5820 }
5821 if (validFlag)
5822 handle_command (argBuf, from_tty);
5823 else
5824 printf_filtered (_("Invalid signal handling flag.\n"));
5825 if (argBuf)
5826 xfree (argBuf);
5827 }
5828 }
5829 do_cleanups (old_chain);
5830 }
5831
5832 /* Print current contents of the tables set by the handle command.
5833 It is possible we should just be printing signals actually used
5834 by the current target (but for things to work right when switching
5835 targets, all signals should be in the signal tables). */
5836
5837 static void
5838 signals_info (char *signum_exp, int from_tty)
5839 {
5840 enum target_signal oursig;
5841 sig_print_header ();
5842
5843 if (signum_exp)
5844 {
5845 /* First see if this is a symbol name. */
5846 oursig = target_signal_from_name (signum_exp);
5847 if (oursig == TARGET_SIGNAL_UNKNOWN)
5848 {
5849 /* No, try numeric. */
5850 oursig =
5851 target_signal_from_command (parse_and_eval_long (signum_exp));
5852 }
5853 sig_print_info (oursig);
5854 return;
5855 }
5856
5857 printf_filtered ("\n");
5858 /* These ugly casts brought to you by the native VAX compiler. */
5859 for (oursig = TARGET_SIGNAL_FIRST;
5860 (int) oursig < (int) TARGET_SIGNAL_LAST;
5861 oursig = (enum target_signal) ((int) oursig + 1))
5862 {
5863 QUIT;
5864
5865 if (oursig != TARGET_SIGNAL_UNKNOWN
5866 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5867 sig_print_info (oursig);
5868 }
5869
5870 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5871 }
5872
5873 /* The $_siginfo convenience variable is a bit special. We don't know
5874 for sure the type of the value until we actually have a chance to
5875 fetch the data. The type can change depending on gdbarch, so it it
5876 also dependent on which thread you have selected.
5877
5878 1. making $_siginfo be an internalvar that creates a new value on
5879 access.
5880
5881 2. making the value of $_siginfo be an lval_computed value. */
5882
5883 /* This function implements the lval_computed support for reading a
5884 $_siginfo value. */
5885
5886 static void
5887 siginfo_value_read (struct value *v)
5888 {
5889 LONGEST transferred;
5890
5891 transferred =
5892 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5893 NULL,
5894 value_contents_all_raw (v),
5895 value_offset (v),
5896 TYPE_LENGTH (value_type (v)));
5897
5898 if (transferred != TYPE_LENGTH (value_type (v)))
5899 error (_("Unable to read siginfo"));
5900 }
5901
5902 /* This function implements the lval_computed support for writing a
5903 $_siginfo value. */
5904
5905 static void
5906 siginfo_value_write (struct value *v, struct value *fromval)
5907 {
5908 LONGEST transferred;
5909
5910 transferred = target_write (&current_target,
5911 TARGET_OBJECT_SIGNAL_INFO,
5912 NULL,
5913 value_contents_all_raw (fromval),
5914 value_offset (v),
5915 TYPE_LENGTH (value_type (fromval)));
5916
5917 if (transferred != TYPE_LENGTH (value_type (fromval)))
5918 error (_("Unable to write siginfo"));
5919 }
5920
5921 static struct lval_funcs siginfo_value_funcs =
5922 {
5923 siginfo_value_read,
5924 siginfo_value_write
5925 };
5926
5927 /* Return a new value with the correct type for the siginfo object of
5928 the current thread using architecture GDBARCH. Return a void value
5929 if there's no object available. */
5930
5931 static struct value *
5932 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5933 {
5934 if (target_has_stack
5935 && !ptid_equal (inferior_ptid, null_ptid)
5936 && gdbarch_get_siginfo_type_p (gdbarch))
5937 {
5938 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5939 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5940 }
5941
5942 return allocate_value (builtin_type (gdbarch)->builtin_void);
5943 }
5944
5945 \f
5946 /* Inferior thread state.
5947 These are details related to the inferior itself, and don't include
5948 things like what frame the user had selected or what gdb was doing
5949 with the target at the time.
5950 For inferior function calls these are things we want to restore
5951 regardless of whether the function call successfully completes
5952 or the dummy frame has to be manually popped. */
5953
5954 struct inferior_thread_state
5955 {
5956 enum target_signal stop_signal;
5957 CORE_ADDR stop_pc;
5958 struct regcache *registers;
5959 };
5960
5961 struct inferior_thread_state *
5962 save_inferior_thread_state (void)
5963 {
5964 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5965 struct thread_info *tp = inferior_thread ();
5966
5967 inf_state->stop_signal = tp->stop_signal;
5968 inf_state->stop_pc = stop_pc;
5969
5970 inf_state->registers = regcache_dup (get_current_regcache ());
5971
5972 return inf_state;
5973 }
5974
5975 /* Restore inferior session state to INF_STATE. */
5976
5977 void
5978 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5979 {
5980 struct thread_info *tp = inferior_thread ();
5981
5982 tp->stop_signal = inf_state->stop_signal;
5983 stop_pc = inf_state->stop_pc;
5984
5985 /* The inferior can be gone if the user types "print exit(0)"
5986 (and perhaps other times). */
5987 if (target_has_execution)
5988 /* NB: The register write goes through to the target. */
5989 regcache_cpy (get_current_regcache (), inf_state->registers);
5990 regcache_xfree (inf_state->registers);
5991 xfree (inf_state);
5992 }
5993
5994 static void
5995 do_restore_inferior_thread_state_cleanup (void *state)
5996 {
5997 restore_inferior_thread_state (state);
5998 }
5999
6000 struct cleanup *
6001 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6002 {
6003 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6004 }
6005
6006 void
6007 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6008 {
6009 regcache_xfree (inf_state->registers);
6010 xfree (inf_state);
6011 }
6012
6013 struct regcache *
6014 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6015 {
6016 return inf_state->registers;
6017 }
6018
6019 /* Session related state for inferior function calls.
6020 These are the additional bits of state that need to be restored
6021 when an inferior function call successfully completes. */
6022
6023 struct inferior_status
6024 {
6025 bpstat stop_bpstat;
6026 int stop_step;
6027 enum stop_stack_kind stop_stack_dummy;
6028 int stopped_by_random_signal;
6029 int stepping_over_breakpoint;
6030 CORE_ADDR step_range_start;
6031 CORE_ADDR step_range_end;
6032 struct frame_id step_frame_id;
6033 struct frame_id step_stack_frame_id;
6034 enum step_over_calls_kind step_over_calls;
6035 CORE_ADDR step_resume_break_address;
6036 int stop_after_trap;
6037 int stop_soon;
6038
6039 /* ID if the selected frame when the inferior function call was made. */
6040 struct frame_id selected_frame_id;
6041
6042 int proceed_to_finish;
6043 int in_infcall;
6044 };
6045
6046 /* Save all of the information associated with the inferior<==>gdb
6047 connection. */
6048
6049 struct inferior_status *
6050 save_inferior_status (void)
6051 {
6052 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6053 struct thread_info *tp = inferior_thread ();
6054 struct inferior *inf = current_inferior ();
6055
6056 inf_status->stop_step = tp->stop_step;
6057 inf_status->stop_stack_dummy = stop_stack_dummy;
6058 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6059 inf_status->stepping_over_breakpoint = tp->trap_expected;
6060 inf_status->step_range_start = tp->step_range_start;
6061 inf_status->step_range_end = tp->step_range_end;
6062 inf_status->step_frame_id = tp->step_frame_id;
6063 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6064 inf_status->step_over_calls = tp->step_over_calls;
6065 inf_status->stop_after_trap = stop_after_trap;
6066 inf_status->stop_soon = inf->stop_soon;
6067 /* Save original bpstat chain here; replace it with copy of chain.
6068 If caller's caller is walking the chain, they'll be happier if we
6069 hand them back the original chain when restore_inferior_status is
6070 called. */
6071 inf_status->stop_bpstat = tp->stop_bpstat;
6072 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6073 inf_status->proceed_to_finish = tp->proceed_to_finish;
6074 inf_status->in_infcall = tp->in_infcall;
6075
6076 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6077
6078 return inf_status;
6079 }
6080
6081 static int
6082 restore_selected_frame (void *args)
6083 {
6084 struct frame_id *fid = (struct frame_id *) args;
6085 struct frame_info *frame;
6086
6087 frame = frame_find_by_id (*fid);
6088
6089 /* If inf_status->selected_frame_id is NULL, there was no previously
6090 selected frame. */
6091 if (frame == NULL)
6092 {
6093 warning (_("Unable to restore previously selected frame."));
6094 return 0;
6095 }
6096
6097 select_frame (frame);
6098
6099 return (1);
6100 }
6101
6102 /* Restore inferior session state to INF_STATUS. */
6103
6104 void
6105 restore_inferior_status (struct inferior_status *inf_status)
6106 {
6107 struct thread_info *tp = inferior_thread ();
6108 struct inferior *inf = current_inferior ();
6109
6110 tp->stop_step = inf_status->stop_step;
6111 stop_stack_dummy = inf_status->stop_stack_dummy;
6112 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6113 tp->trap_expected = inf_status->stepping_over_breakpoint;
6114 tp->step_range_start = inf_status->step_range_start;
6115 tp->step_range_end = inf_status->step_range_end;
6116 tp->step_frame_id = inf_status->step_frame_id;
6117 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6118 tp->step_over_calls = inf_status->step_over_calls;
6119 stop_after_trap = inf_status->stop_after_trap;
6120 inf->stop_soon = inf_status->stop_soon;
6121 bpstat_clear (&tp->stop_bpstat);
6122 tp->stop_bpstat = inf_status->stop_bpstat;
6123 inf_status->stop_bpstat = NULL;
6124 tp->proceed_to_finish = inf_status->proceed_to_finish;
6125 tp->in_infcall = inf_status->in_infcall;
6126
6127 if (target_has_stack)
6128 {
6129 /* The point of catch_errors is that if the stack is clobbered,
6130 walking the stack might encounter a garbage pointer and
6131 error() trying to dereference it. */
6132 if (catch_errors
6133 (restore_selected_frame, &inf_status->selected_frame_id,
6134 "Unable to restore previously selected frame:\n",
6135 RETURN_MASK_ERROR) == 0)
6136 /* Error in restoring the selected frame. Select the innermost
6137 frame. */
6138 select_frame (get_current_frame ());
6139 }
6140
6141 xfree (inf_status);
6142 }
6143
6144 static void
6145 do_restore_inferior_status_cleanup (void *sts)
6146 {
6147 restore_inferior_status (sts);
6148 }
6149
6150 struct cleanup *
6151 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6152 {
6153 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6154 }
6155
6156 void
6157 discard_inferior_status (struct inferior_status *inf_status)
6158 {
6159 /* See save_inferior_status for info on stop_bpstat. */
6160 bpstat_clear (&inf_status->stop_bpstat);
6161 xfree (inf_status);
6162 }
6163 \f
6164 int
6165 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6166 {
6167 struct target_waitstatus last;
6168 ptid_t last_ptid;
6169
6170 get_last_target_status (&last_ptid, &last);
6171
6172 if (last.kind != TARGET_WAITKIND_FORKED)
6173 return 0;
6174
6175 if (!ptid_equal (last_ptid, pid))
6176 return 0;
6177
6178 *child_pid = last.value.related_pid;
6179 return 1;
6180 }
6181
6182 int
6183 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6184 {
6185 struct target_waitstatus last;
6186 ptid_t last_ptid;
6187
6188 get_last_target_status (&last_ptid, &last);
6189
6190 if (last.kind != TARGET_WAITKIND_VFORKED)
6191 return 0;
6192
6193 if (!ptid_equal (last_ptid, pid))
6194 return 0;
6195
6196 *child_pid = last.value.related_pid;
6197 return 1;
6198 }
6199
6200 int
6201 inferior_has_execd (ptid_t pid, char **execd_pathname)
6202 {
6203 struct target_waitstatus last;
6204 ptid_t last_ptid;
6205
6206 get_last_target_status (&last_ptid, &last);
6207
6208 if (last.kind != TARGET_WAITKIND_EXECD)
6209 return 0;
6210
6211 if (!ptid_equal (last_ptid, pid))
6212 return 0;
6213
6214 *execd_pathname = xstrdup (last.value.execd_pathname);
6215 return 1;
6216 }
6217
6218 int
6219 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6220 {
6221 struct target_waitstatus last;
6222 ptid_t last_ptid;
6223
6224 get_last_target_status (&last_ptid, &last);
6225
6226 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6227 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6228 return 0;
6229
6230 if (!ptid_equal (last_ptid, pid))
6231 return 0;
6232
6233 *syscall_number = last.value.syscall_number;
6234 return 1;
6235 }
6236
6237 /* Oft used ptids */
6238 ptid_t null_ptid;
6239 ptid_t minus_one_ptid;
6240
6241 /* Create a ptid given the necessary PID, LWP, and TID components. */
6242
6243 ptid_t
6244 ptid_build (int pid, long lwp, long tid)
6245 {
6246 ptid_t ptid;
6247
6248 ptid.pid = pid;
6249 ptid.lwp = lwp;
6250 ptid.tid = tid;
6251 return ptid;
6252 }
6253
6254 /* Create a ptid from just a pid. */
6255
6256 ptid_t
6257 pid_to_ptid (int pid)
6258 {
6259 return ptid_build (pid, 0, 0);
6260 }
6261
6262 /* Fetch the pid (process id) component from a ptid. */
6263
6264 int
6265 ptid_get_pid (ptid_t ptid)
6266 {
6267 return ptid.pid;
6268 }
6269
6270 /* Fetch the lwp (lightweight process) component from a ptid. */
6271
6272 long
6273 ptid_get_lwp (ptid_t ptid)
6274 {
6275 return ptid.lwp;
6276 }
6277
6278 /* Fetch the tid (thread id) component from a ptid. */
6279
6280 long
6281 ptid_get_tid (ptid_t ptid)
6282 {
6283 return ptid.tid;
6284 }
6285
6286 /* ptid_equal() is used to test equality of two ptids. */
6287
6288 int
6289 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6290 {
6291 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6292 && ptid1.tid == ptid2.tid);
6293 }
6294
6295 /* Returns true if PTID represents a process. */
6296
6297 int
6298 ptid_is_pid (ptid_t ptid)
6299 {
6300 if (ptid_equal (minus_one_ptid, ptid))
6301 return 0;
6302 if (ptid_equal (null_ptid, ptid))
6303 return 0;
6304
6305 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6306 }
6307
6308 int
6309 ptid_match (ptid_t ptid, ptid_t filter)
6310 {
6311 /* Since both parameters have the same type, prevent easy mistakes
6312 from happening. */
6313 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6314 && !ptid_equal (ptid, null_ptid));
6315
6316 if (ptid_equal (filter, minus_one_ptid))
6317 return 1;
6318 if (ptid_is_pid (filter)
6319 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6320 return 1;
6321 else if (ptid_equal (ptid, filter))
6322 return 1;
6323
6324 return 0;
6325 }
6326
6327 /* restore_inferior_ptid() will be used by the cleanup machinery
6328 to restore the inferior_ptid value saved in a call to
6329 save_inferior_ptid(). */
6330
6331 static void
6332 restore_inferior_ptid (void *arg)
6333 {
6334 ptid_t *saved_ptid_ptr = arg;
6335 inferior_ptid = *saved_ptid_ptr;
6336 xfree (arg);
6337 }
6338
6339 /* Save the value of inferior_ptid so that it may be restored by a
6340 later call to do_cleanups(). Returns the struct cleanup pointer
6341 needed for later doing the cleanup. */
6342
6343 struct cleanup *
6344 save_inferior_ptid (void)
6345 {
6346 ptid_t *saved_ptid_ptr;
6347
6348 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6349 *saved_ptid_ptr = inferior_ptid;
6350 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6351 }
6352 \f
6353
6354 /* User interface for reverse debugging:
6355 Set exec-direction / show exec-direction commands
6356 (returns error unless target implements to_set_exec_direction method). */
6357
6358 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6359 static const char exec_forward[] = "forward";
6360 static const char exec_reverse[] = "reverse";
6361 static const char *exec_direction = exec_forward;
6362 static const char *exec_direction_names[] = {
6363 exec_forward,
6364 exec_reverse,
6365 NULL
6366 };
6367
6368 static void
6369 set_exec_direction_func (char *args, int from_tty,
6370 struct cmd_list_element *cmd)
6371 {
6372 if (target_can_execute_reverse)
6373 {
6374 if (!strcmp (exec_direction, exec_forward))
6375 execution_direction = EXEC_FORWARD;
6376 else if (!strcmp (exec_direction, exec_reverse))
6377 execution_direction = EXEC_REVERSE;
6378 }
6379 }
6380
6381 static void
6382 show_exec_direction_func (struct ui_file *out, int from_tty,
6383 struct cmd_list_element *cmd, const char *value)
6384 {
6385 switch (execution_direction) {
6386 case EXEC_FORWARD:
6387 fprintf_filtered (out, _("Forward.\n"));
6388 break;
6389 case EXEC_REVERSE:
6390 fprintf_filtered (out, _("Reverse.\n"));
6391 break;
6392 case EXEC_ERROR:
6393 default:
6394 fprintf_filtered (out,
6395 _("Forward (target `%s' does not support exec-direction).\n"),
6396 target_shortname);
6397 break;
6398 }
6399 }
6400
6401 /* User interface for non-stop mode. */
6402
6403 int non_stop = 0;
6404 static int non_stop_1 = 0;
6405
6406 static void
6407 set_non_stop (char *args, int from_tty,
6408 struct cmd_list_element *c)
6409 {
6410 if (target_has_execution)
6411 {
6412 non_stop_1 = non_stop;
6413 error (_("Cannot change this setting while the inferior is running."));
6414 }
6415
6416 non_stop = non_stop_1;
6417 }
6418
6419 static void
6420 show_non_stop (struct ui_file *file, int from_tty,
6421 struct cmd_list_element *c, const char *value)
6422 {
6423 fprintf_filtered (file,
6424 _("Controlling the inferior in non-stop mode is %s.\n"),
6425 value);
6426 }
6427
6428 static void
6429 show_schedule_multiple (struct ui_file *file, int from_tty,
6430 struct cmd_list_element *c, const char *value)
6431 {
6432 fprintf_filtered (file, _("\
6433 Resuming the execution of threads of all processes is %s.\n"), value);
6434 }
6435
6436 void
6437 _initialize_infrun (void)
6438 {
6439 int i;
6440 int numsigs;
6441 struct cmd_list_element *c;
6442
6443 add_info ("signals", signals_info, _("\
6444 What debugger does when program gets various signals.\n\
6445 Specify a signal as argument to print info on that signal only."));
6446 add_info_alias ("handle", "signals", 0);
6447
6448 add_com ("handle", class_run, handle_command, _("\
6449 Specify how to handle a signal.\n\
6450 Args are signals and actions to apply to those signals.\n\
6451 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6452 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6453 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6454 The special arg \"all\" is recognized to mean all signals except those\n\
6455 used by the debugger, typically SIGTRAP and SIGINT.\n\
6456 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6457 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6458 Stop means reenter debugger if this signal happens (implies print).\n\
6459 Print means print a message if this signal happens.\n\
6460 Pass means let program see this signal; otherwise program doesn't know.\n\
6461 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6462 Pass and Stop may be combined."));
6463 if (xdb_commands)
6464 {
6465 add_com ("lz", class_info, signals_info, _("\
6466 What debugger does when program gets various signals.\n\
6467 Specify a signal as argument to print info on that signal only."));
6468 add_com ("z", class_run, xdb_handle_command, _("\
6469 Specify how to handle a signal.\n\
6470 Args are signals and actions to apply to those signals.\n\
6471 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6472 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6473 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6474 The special arg \"all\" is recognized to mean all signals except those\n\
6475 used by the debugger, typically SIGTRAP and SIGINT.\n\
6476 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6477 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6478 nopass), \"Q\" (noprint)\n\
6479 Stop means reenter debugger if this signal happens (implies print).\n\
6480 Print means print a message if this signal happens.\n\
6481 Pass means let program see this signal; otherwise program doesn't know.\n\
6482 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6483 Pass and Stop may be combined."));
6484 }
6485
6486 if (!dbx_commands)
6487 stop_command = add_cmd ("stop", class_obscure,
6488 not_just_help_class_command, _("\
6489 There is no `stop' command, but you can set a hook on `stop'.\n\
6490 This allows you to set a list of commands to be run each time execution\n\
6491 of the program stops."), &cmdlist);
6492
6493 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6494 Set inferior debugging."), _("\
6495 Show inferior debugging."), _("\
6496 When non-zero, inferior specific debugging is enabled."),
6497 NULL,
6498 show_debug_infrun,
6499 &setdebuglist, &showdebuglist);
6500
6501 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6502 Set displaced stepping debugging."), _("\
6503 Show displaced stepping debugging."), _("\
6504 When non-zero, displaced stepping specific debugging is enabled."),
6505 NULL,
6506 show_debug_displaced,
6507 &setdebuglist, &showdebuglist);
6508
6509 add_setshow_boolean_cmd ("non-stop", no_class,
6510 &non_stop_1, _("\
6511 Set whether gdb controls the inferior in non-stop mode."), _("\
6512 Show whether gdb controls the inferior in non-stop mode."), _("\
6513 When debugging a multi-threaded program and this setting is\n\
6514 off (the default, also called all-stop mode), when one thread stops\n\
6515 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6516 all other threads in the program while you interact with the thread of\n\
6517 interest. When you continue or step a thread, you can allow the other\n\
6518 threads to run, or have them remain stopped, but while you inspect any\n\
6519 thread's state, all threads stop.\n\
6520 \n\
6521 In non-stop mode, when one thread stops, other threads can continue\n\
6522 to run freely. You'll be able to step each thread independently,\n\
6523 leave it stopped or free to run as needed."),
6524 set_non_stop,
6525 show_non_stop,
6526 &setlist,
6527 &showlist);
6528
6529 numsigs = (int) TARGET_SIGNAL_LAST;
6530 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6531 signal_print = (unsigned char *)
6532 xmalloc (sizeof (signal_print[0]) * numsigs);
6533 signal_program = (unsigned char *)
6534 xmalloc (sizeof (signal_program[0]) * numsigs);
6535 for (i = 0; i < numsigs; i++)
6536 {
6537 signal_stop[i] = 1;
6538 signal_print[i] = 1;
6539 signal_program[i] = 1;
6540 }
6541
6542 /* Signals caused by debugger's own actions
6543 should not be given to the program afterwards. */
6544 signal_program[TARGET_SIGNAL_TRAP] = 0;
6545 signal_program[TARGET_SIGNAL_INT] = 0;
6546
6547 /* Signals that are not errors should not normally enter the debugger. */
6548 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6549 signal_print[TARGET_SIGNAL_ALRM] = 0;
6550 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6551 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6552 signal_stop[TARGET_SIGNAL_PROF] = 0;
6553 signal_print[TARGET_SIGNAL_PROF] = 0;
6554 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6555 signal_print[TARGET_SIGNAL_CHLD] = 0;
6556 signal_stop[TARGET_SIGNAL_IO] = 0;
6557 signal_print[TARGET_SIGNAL_IO] = 0;
6558 signal_stop[TARGET_SIGNAL_POLL] = 0;
6559 signal_print[TARGET_SIGNAL_POLL] = 0;
6560 signal_stop[TARGET_SIGNAL_URG] = 0;
6561 signal_print[TARGET_SIGNAL_URG] = 0;
6562 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6563 signal_print[TARGET_SIGNAL_WINCH] = 0;
6564
6565 /* These signals are used internally by user-level thread
6566 implementations. (See signal(5) on Solaris.) Like the above
6567 signals, a healthy program receives and handles them as part of
6568 its normal operation. */
6569 signal_stop[TARGET_SIGNAL_LWP] = 0;
6570 signal_print[TARGET_SIGNAL_LWP] = 0;
6571 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6572 signal_print[TARGET_SIGNAL_WAITING] = 0;
6573 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6574 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6575
6576 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6577 &stop_on_solib_events, _("\
6578 Set stopping for shared library events."), _("\
6579 Show stopping for shared library events."), _("\
6580 If nonzero, gdb will give control to the user when the dynamic linker\n\
6581 notifies gdb of shared library events. The most common event of interest\n\
6582 to the user would be loading/unloading of a new library."),
6583 NULL,
6584 show_stop_on_solib_events,
6585 &setlist, &showlist);
6586
6587 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6588 follow_fork_mode_kind_names,
6589 &follow_fork_mode_string, _("\
6590 Set debugger response to a program call of fork or vfork."), _("\
6591 Show debugger response to a program call of fork or vfork."), _("\
6592 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6593 parent - the original process is debugged after a fork\n\
6594 child - the new process is debugged after a fork\n\
6595 The unfollowed process will continue to run.\n\
6596 By default, the debugger will follow the parent process."),
6597 NULL,
6598 show_follow_fork_mode_string,
6599 &setlist, &showlist);
6600
6601 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6602 follow_exec_mode_names,
6603 &follow_exec_mode_string, _("\
6604 Set debugger response to a program call of exec."), _("\
6605 Show debugger response to a program call of exec."), _("\
6606 An exec call replaces the program image of a process.\n\
6607 \n\
6608 follow-exec-mode can be:\n\
6609 \n\
6610 new - the debugger creates a new inferior and rebinds the process \n\
6611 to this new inferior. The program the process was running before\n\
6612 the exec call can be restarted afterwards by restarting the original\n\
6613 inferior.\n\
6614 \n\
6615 same - the debugger keeps the process bound to the same inferior.\n\
6616 The new executable image replaces the previous executable loaded in\n\
6617 the inferior. Restarting the inferior after the exec call restarts\n\
6618 the executable the process was running after the exec call.\n\
6619 \n\
6620 By default, the debugger will use the same inferior."),
6621 NULL,
6622 show_follow_exec_mode_string,
6623 &setlist, &showlist);
6624
6625 add_setshow_enum_cmd ("scheduler-locking", class_run,
6626 scheduler_enums, &scheduler_mode, _("\
6627 Set mode for locking scheduler during execution."), _("\
6628 Show mode for locking scheduler during execution."), _("\
6629 off == no locking (threads may preempt at any time)\n\
6630 on == full locking (no thread except the current thread may run)\n\
6631 step == scheduler locked during every single-step operation.\n\
6632 In this mode, no other thread may run during a step command.\n\
6633 Other threads may run while stepping over a function call ('next')."),
6634 set_schedlock_func, /* traps on target vector */
6635 show_scheduler_mode,
6636 &setlist, &showlist);
6637
6638 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6639 Set mode for resuming threads of all processes."), _("\
6640 Show mode for resuming threads of all processes."), _("\
6641 When on, execution commands (such as 'continue' or 'next') resume all\n\
6642 threads of all processes. When off (which is the default), execution\n\
6643 commands only resume the threads of the current process. The set of\n\
6644 threads that are resumed is further refined by the scheduler-locking\n\
6645 mode (see help set scheduler-locking)."),
6646 NULL,
6647 show_schedule_multiple,
6648 &setlist, &showlist);
6649
6650 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6651 Set mode of the step operation."), _("\
6652 Show mode of the step operation."), _("\
6653 When set, doing a step over a function without debug line information\n\
6654 will stop at the first instruction of that function. Otherwise, the\n\
6655 function is skipped and the step command stops at a different source line."),
6656 NULL,
6657 show_step_stop_if_no_debug,
6658 &setlist, &showlist);
6659
6660 add_setshow_enum_cmd ("displaced-stepping", class_run,
6661 can_use_displaced_stepping_enum,
6662 &can_use_displaced_stepping, _("\
6663 Set debugger's willingness to use displaced stepping."), _("\
6664 Show debugger's willingness to use displaced stepping."), _("\
6665 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6666 supported by the target architecture. If off, gdb will not use displaced\n\
6667 stepping to step over breakpoints, even if such is supported by the target\n\
6668 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6669 if the target architecture supports it and non-stop mode is active, but will not\n\
6670 use it in all-stop mode (see help set non-stop)."),
6671 NULL,
6672 show_can_use_displaced_stepping,
6673 &setlist, &showlist);
6674
6675 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6676 &exec_direction, _("Set direction of execution.\n\
6677 Options are 'forward' or 'reverse'."),
6678 _("Show direction of execution (forward/reverse)."),
6679 _("Tells gdb whether to execute forward or backward."),
6680 set_exec_direction_func, show_exec_direction_func,
6681 &setlist, &showlist);
6682
6683 /* Set/show detach-on-fork: user-settable mode. */
6684
6685 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6686 Set whether gdb will detach the child of a fork."), _("\
6687 Show whether gdb will detach the child of a fork."), _("\
6688 Tells gdb whether to detach the child of a fork."),
6689 NULL, NULL, &setlist, &showlist);
6690
6691 /* ptid initializations */
6692 null_ptid = ptid_build (0, 0, 0);
6693 minus_one_ptid = ptid_build (-1, 0, 0);
6694 inferior_ptid = null_ptid;
6695 target_last_wait_ptid = minus_one_ptid;
6696
6697 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6698 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6699 observer_attach_thread_exit (infrun_thread_thread_exit);
6700 observer_attach_inferior_exit (infrun_inferior_exit);
6701
6702 /* Explicitly create without lookup, since that tries to create a
6703 value with a void typed value, and when we get here, gdbarch
6704 isn't initialized yet. At this point, we're quite sure there
6705 isn't another convenience variable of the same name. */
6706 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6707 }