* infrun.c (resume): Extend comment on ignoring single-step
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 static void print_exited_reason (int exitstatus);
87
88 static void print_signal_exited_reason (enum target_signal siggnal);
89
90 static void print_no_history_reason (void);
91
92 static void print_signal_received_reason (enum target_signal siggnal);
93
94 static void print_end_stepping_range_reason (void);
95
96 void _initialize_infrun (void);
97
98 void nullify_last_target_wait_ptid (void);
99
100 /* When set, stop the 'step' command if we enter a function which has
101 no line number information. The normal behavior is that we step
102 over such function. */
103 int step_stop_if_no_debug = 0;
104 static void
105 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
106 struct cmd_list_element *c, const char *value)
107 {
108 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
109 }
110
111 /* In asynchronous mode, but simulating synchronous execution. */
112
113 int sync_execution = 0;
114
115 /* wait_for_inferior and normal_stop use this to notify the user
116 when the inferior stopped in a different thread than it had been
117 running in. */
118
119 static ptid_t previous_inferior_ptid;
120
121 /* Default behavior is to detach newly forked processes (legacy). */
122 int detach_fork = 1;
123
124 int debug_displaced = 0;
125 static void
126 show_debug_displaced (struct ui_file *file, int from_tty,
127 struct cmd_list_element *c, const char *value)
128 {
129 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
130 }
131
132 int debug_infrun = 0;
133 static void
134 show_debug_infrun (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136 {
137 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
138 }
139
140 /* If the program uses ELF-style shared libraries, then calls to
141 functions in shared libraries go through stubs, which live in a
142 table called the PLT (Procedure Linkage Table). The first time the
143 function is called, the stub sends control to the dynamic linker,
144 which looks up the function's real address, patches the stub so
145 that future calls will go directly to the function, and then passes
146 control to the function.
147
148 If we are stepping at the source level, we don't want to see any of
149 this --- we just want to skip over the stub and the dynamic linker.
150 The simple approach is to single-step until control leaves the
151 dynamic linker.
152
153 However, on some systems (e.g., Red Hat's 5.2 distribution) the
154 dynamic linker calls functions in the shared C library, so you
155 can't tell from the PC alone whether the dynamic linker is still
156 running. In this case, we use a step-resume breakpoint to get us
157 past the dynamic linker, as if we were using "next" to step over a
158 function call.
159
160 in_solib_dynsym_resolve_code() says whether we're in the dynamic
161 linker code or not. Normally, this means we single-step. However,
162 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
163 address where we can place a step-resume breakpoint to get past the
164 linker's symbol resolution function.
165
166 in_solib_dynsym_resolve_code() can generally be implemented in a
167 pretty portable way, by comparing the PC against the address ranges
168 of the dynamic linker's sections.
169
170 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
171 it depends on internal details of the dynamic linker. It's usually
172 not too hard to figure out where to put a breakpoint, but it
173 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
174 sanity checking. If it can't figure things out, returning zero and
175 getting the (possibly confusing) stepping behavior is better than
176 signalling an error, which will obscure the change in the
177 inferior's state. */
178
179 /* This function returns TRUE if pc is the address of an instruction
180 that lies within the dynamic linker (such as the event hook, or the
181 dld itself).
182
183 This function must be used only when a dynamic linker event has
184 been caught, and the inferior is being stepped out of the hook, or
185 undefined results are guaranteed. */
186
187 #ifndef SOLIB_IN_DYNAMIC_LINKER
188 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
189 #endif
190
191 /* "Observer mode" is somewhat like a more extreme version of
192 non-stop, in which all GDB operations that might affect the
193 target's execution have been disabled. */
194
195 static int non_stop_1 = 0;
196
197 int observer_mode = 0;
198 static int observer_mode_1 = 0;
199
200 static void
201 set_observer_mode (char *args, int from_tty,
202 struct cmd_list_element *c)
203 {
204 extern int pagination_enabled;
205
206 if (target_has_execution)
207 {
208 observer_mode_1 = observer_mode;
209 error (_("Cannot change this setting while the inferior is running."));
210 }
211
212 observer_mode = observer_mode_1;
213
214 may_write_registers = !observer_mode;
215 may_write_memory = !observer_mode;
216 may_insert_breakpoints = !observer_mode;
217 may_insert_tracepoints = !observer_mode;
218 /* We can insert fast tracepoints in or out of observer mode,
219 but enable them if we're going into this mode. */
220 if (observer_mode)
221 may_insert_fast_tracepoints = 1;
222 may_stop = !observer_mode;
223 update_target_permissions ();
224
225 /* Going *into* observer mode we must force non-stop, then
226 going out we leave it that way. */
227 if (observer_mode)
228 {
229 target_async_permitted = 1;
230 pagination_enabled = 0;
231 non_stop = non_stop_1 = 1;
232 }
233
234 if (from_tty)
235 printf_filtered (_("Observer mode is now %s.\n"),
236 (observer_mode ? "on" : "off"));
237 }
238
239 static void
240 show_observer_mode (struct ui_file *file, int from_tty,
241 struct cmd_list_element *c, const char *value)
242 {
243 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
244 }
245
246 /* This updates the value of observer mode based on changes in
247 permissions. Note that we are deliberately ignoring the values of
248 may-write-registers and may-write-memory, since the user may have
249 reason to enable these during a session, for instance to turn on a
250 debugging-related global. */
251
252 void
253 update_observer_mode (void)
254 {
255 int newval;
256
257 newval = (!may_insert_breakpoints
258 && !may_insert_tracepoints
259 && may_insert_fast_tracepoints
260 && !may_stop
261 && non_stop);
262
263 /* Let the user know if things change. */
264 if (newval != observer_mode)
265 printf_filtered (_("Observer mode is now %s.\n"),
266 (newval ? "on" : "off"));
267
268 observer_mode = observer_mode_1 = newval;
269 }
270
271 /* Tables of how to react to signals; the user sets them. */
272
273 static unsigned char *signal_stop;
274 static unsigned char *signal_print;
275 static unsigned char *signal_program;
276
277 #define SET_SIGS(nsigs,sigs,flags) \
278 do { \
279 int signum = (nsigs); \
280 while (signum-- > 0) \
281 if ((sigs)[signum]) \
282 (flags)[signum] = 1; \
283 } while (0)
284
285 #define UNSET_SIGS(nsigs,sigs,flags) \
286 do { \
287 int signum = (nsigs); \
288 while (signum-- > 0) \
289 if ((sigs)[signum]) \
290 (flags)[signum] = 0; \
291 } while (0)
292
293 /* Value to pass to target_resume() to cause all threads to resume */
294
295 #define RESUME_ALL minus_one_ptid
296
297 /* Command list pointer for the "stop" placeholder. */
298
299 static struct cmd_list_element *stop_command;
300
301 /* Function inferior was in as of last step command. */
302
303 static struct symbol *step_start_function;
304
305 /* Nonzero if we want to give control to the user when we're notified
306 of shared library events by the dynamic linker. */
307 int stop_on_solib_events;
308 static void
309 show_stop_on_solib_events (struct ui_file *file, int from_tty,
310 struct cmd_list_element *c, const char *value)
311 {
312 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
313 value);
314 }
315
316 /* Nonzero means expecting a trace trap
317 and should stop the inferior and return silently when it happens. */
318
319 int stop_after_trap;
320
321 /* Save register contents here when executing a "finish" command or are
322 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
323 Thus this contains the return value from the called function (assuming
324 values are returned in a register). */
325
326 struct regcache *stop_registers;
327
328 /* Nonzero after stop if current stack frame should be printed. */
329
330 static int stop_print_frame;
331
332 /* This is a cached copy of the pid/waitstatus of the last event
333 returned by target_wait()/deprecated_target_wait_hook(). This
334 information is returned by get_last_target_status(). */
335 static ptid_t target_last_wait_ptid;
336 static struct target_waitstatus target_last_waitstatus;
337
338 static void context_switch (ptid_t ptid);
339
340 void init_thread_stepping_state (struct thread_info *tss);
341
342 void init_infwait_state (void);
343
344 static const char follow_fork_mode_child[] = "child";
345 static const char follow_fork_mode_parent[] = "parent";
346
347 static const char *follow_fork_mode_kind_names[] = {
348 follow_fork_mode_child,
349 follow_fork_mode_parent,
350 NULL
351 };
352
353 static const char *follow_fork_mode_string = follow_fork_mode_parent;
354 static void
355 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
356 struct cmd_list_element *c, const char *value)
357 {
358 fprintf_filtered (file, _("\
359 Debugger response to a program call of fork or vfork is \"%s\".\n"),
360 value);
361 }
362 \f
363
364 /* Tell the target to follow the fork we're stopped at. Returns true
365 if the inferior should be resumed; false, if the target for some
366 reason decided it's best not to resume. */
367
368 static int
369 follow_fork (void)
370 {
371 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
372 int should_resume = 1;
373 struct thread_info *tp;
374
375 /* Copy user stepping state to the new inferior thread. FIXME: the
376 followed fork child thread should have a copy of most of the
377 parent thread structure's run control related fields, not just these.
378 Initialized to avoid "may be used uninitialized" warnings from gcc. */
379 struct breakpoint *step_resume_breakpoint = NULL;
380 CORE_ADDR step_range_start = 0;
381 CORE_ADDR step_range_end = 0;
382 struct frame_id step_frame_id = { 0 };
383
384 if (!non_stop)
385 {
386 ptid_t wait_ptid;
387 struct target_waitstatus wait_status;
388
389 /* Get the last target status returned by target_wait(). */
390 get_last_target_status (&wait_ptid, &wait_status);
391
392 /* If not stopped at a fork event, then there's nothing else to
393 do. */
394 if (wait_status.kind != TARGET_WAITKIND_FORKED
395 && wait_status.kind != TARGET_WAITKIND_VFORKED)
396 return 1;
397
398 /* Check if we switched over from WAIT_PTID, since the event was
399 reported. */
400 if (!ptid_equal (wait_ptid, minus_one_ptid)
401 && !ptid_equal (inferior_ptid, wait_ptid))
402 {
403 /* We did. Switch back to WAIT_PTID thread, to tell the
404 target to follow it (in either direction). We'll
405 afterwards refuse to resume, and inform the user what
406 happened. */
407 switch_to_thread (wait_ptid);
408 should_resume = 0;
409 }
410 }
411
412 tp = inferior_thread ();
413
414 /* If there were any forks/vforks that were caught and are now to be
415 followed, then do so now. */
416 switch (tp->pending_follow.kind)
417 {
418 case TARGET_WAITKIND_FORKED:
419 case TARGET_WAITKIND_VFORKED:
420 {
421 ptid_t parent, child;
422
423 /* If the user did a next/step, etc, over a fork call,
424 preserve the stepping state in the fork child. */
425 if (follow_child && should_resume)
426 {
427 step_resume_breakpoint
428 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
429 step_range_start = tp->step_range_start;
430 step_range_end = tp->step_range_end;
431 step_frame_id = tp->step_frame_id;
432
433 /* For now, delete the parent's sr breakpoint, otherwise,
434 parent/child sr breakpoints are considered duplicates,
435 and the child version will not be installed. Remove
436 this when the breakpoints module becomes aware of
437 inferiors and address spaces. */
438 delete_step_resume_breakpoint (tp);
439 tp->step_range_start = 0;
440 tp->step_range_end = 0;
441 tp->step_frame_id = null_frame_id;
442 }
443
444 parent = inferior_ptid;
445 child = tp->pending_follow.value.related_pid;
446
447 /* Tell the target to do whatever is necessary to follow
448 either parent or child. */
449 if (target_follow_fork (follow_child))
450 {
451 /* Target refused to follow, or there's some other reason
452 we shouldn't resume. */
453 should_resume = 0;
454 }
455 else
456 {
457 /* This pending follow fork event is now handled, one way
458 or another. The previous selected thread may be gone
459 from the lists by now, but if it is still around, need
460 to clear the pending follow request. */
461 tp = find_thread_ptid (parent);
462 if (tp)
463 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
464
465 /* This makes sure we don't try to apply the "Switched
466 over from WAIT_PID" logic above. */
467 nullify_last_target_wait_ptid ();
468
469 /* If we followed the child, switch to it... */
470 if (follow_child)
471 {
472 switch_to_thread (child);
473
474 /* ... and preserve the stepping state, in case the
475 user was stepping over the fork call. */
476 if (should_resume)
477 {
478 tp = inferior_thread ();
479 tp->step_resume_breakpoint = step_resume_breakpoint;
480 tp->step_range_start = step_range_start;
481 tp->step_range_end = step_range_end;
482 tp->step_frame_id = step_frame_id;
483 }
484 else
485 {
486 /* If we get here, it was because we're trying to
487 resume from a fork catchpoint, but, the user
488 has switched threads away from the thread that
489 forked. In that case, the resume command
490 issued is most likely not applicable to the
491 child, so just warn, and refuse to resume. */
492 warning (_("\
493 Not resuming: switched threads before following fork child.\n"));
494 }
495
496 /* Reset breakpoints in the child as appropriate. */
497 follow_inferior_reset_breakpoints ();
498 }
499 else
500 switch_to_thread (parent);
501 }
502 }
503 break;
504 case TARGET_WAITKIND_SPURIOUS:
505 /* Nothing to follow. */
506 break;
507 default:
508 internal_error (__FILE__, __LINE__,
509 "Unexpected pending_follow.kind %d\n",
510 tp->pending_follow.kind);
511 break;
512 }
513
514 return should_resume;
515 }
516
517 void
518 follow_inferior_reset_breakpoints (void)
519 {
520 struct thread_info *tp = inferior_thread ();
521
522 /* Was there a step_resume breakpoint? (There was if the user
523 did a "next" at the fork() call.) If so, explicitly reset its
524 thread number.
525
526 step_resumes are a form of bp that are made to be per-thread.
527 Since we created the step_resume bp when the parent process
528 was being debugged, and now are switching to the child process,
529 from the breakpoint package's viewpoint, that's a switch of
530 "threads". We must update the bp's notion of which thread
531 it is for, or it'll be ignored when it triggers. */
532
533 if (tp->step_resume_breakpoint)
534 breakpoint_re_set_thread (tp->step_resume_breakpoint);
535
536 /* Reinsert all breakpoints in the child. The user may have set
537 breakpoints after catching the fork, in which case those
538 were never set in the child, but only in the parent. This makes
539 sure the inserted breakpoints match the breakpoint list. */
540
541 breakpoint_re_set ();
542 insert_breakpoints ();
543 }
544
545 /* The child has exited or execed: resume threads of the parent the
546 user wanted to be executing. */
547
548 static int
549 proceed_after_vfork_done (struct thread_info *thread,
550 void *arg)
551 {
552 int pid = * (int *) arg;
553
554 if (ptid_get_pid (thread->ptid) == pid
555 && is_running (thread->ptid)
556 && !is_executing (thread->ptid)
557 && !thread->stop_requested
558 && thread->stop_signal == TARGET_SIGNAL_0)
559 {
560 if (debug_infrun)
561 fprintf_unfiltered (gdb_stdlog,
562 "infrun: resuming vfork parent thread %s\n",
563 target_pid_to_str (thread->ptid));
564
565 switch_to_thread (thread->ptid);
566 clear_proceed_status ();
567 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
568 }
569
570 return 0;
571 }
572
573 /* Called whenever we notice an exec or exit event, to handle
574 detaching or resuming a vfork parent. */
575
576 static void
577 handle_vfork_child_exec_or_exit (int exec)
578 {
579 struct inferior *inf = current_inferior ();
580
581 if (inf->vfork_parent)
582 {
583 int resume_parent = -1;
584
585 /* This exec or exit marks the end of the shared memory region
586 between the parent and the child. If the user wanted to
587 detach from the parent, now is the time. */
588
589 if (inf->vfork_parent->pending_detach)
590 {
591 struct thread_info *tp;
592 struct cleanup *old_chain;
593 struct program_space *pspace;
594 struct address_space *aspace;
595
596 /* follow-fork child, detach-on-fork on */
597
598 old_chain = make_cleanup_restore_current_thread ();
599
600 /* We're letting loose of the parent. */
601 tp = any_live_thread_of_process (inf->vfork_parent->pid);
602 switch_to_thread (tp->ptid);
603
604 /* We're about to detach from the parent, which implicitly
605 removes breakpoints from its address space. There's a
606 catch here: we want to reuse the spaces for the child,
607 but, parent/child are still sharing the pspace at this
608 point, although the exec in reality makes the kernel give
609 the child a fresh set of new pages. The problem here is
610 that the breakpoints module being unaware of this, would
611 likely chose the child process to write to the parent
612 address space. Swapping the child temporarily away from
613 the spaces has the desired effect. Yes, this is "sort
614 of" a hack. */
615
616 pspace = inf->pspace;
617 aspace = inf->aspace;
618 inf->aspace = NULL;
619 inf->pspace = NULL;
620
621 if (debug_infrun || info_verbose)
622 {
623 target_terminal_ours ();
624
625 if (exec)
626 fprintf_filtered (gdb_stdlog,
627 "Detaching vfork parent process %d after child exec.\n",
628 inf->vfork_parent->pid);
629 else
630 fprintf_filtered (gdb_stdlog,
631 "Detaching vfork parent process %d after child exit.\n",
632 inf->vfork_parent->pid);
633 }
634
635 target_detach (NULL, 0);
636
637 /* Put it back. */
638 inf->pspace = pspace;
639 inf->aspace = aspace;
640
641 do_cleanups (old_chain);
642 }
643 else if (exec)
644 {
645 /* We're staying attached to the parent, so, really give the
646 child a new address space. */
647 inf->pspace = add_program_space (maybe_new_address_space ());
648 inf->aspace = inf->pspace->aspace;
649 inf->removable = 1;
650 set_current_program_space (inf->pspace);
651
652 resume_parent = inf->vfork_parent->pid;
653
654 /* Break the bonds. */
655 inf->vfork_parent->vfork_child = NULL;
656 }
657 else
658 {
659 struct cleanup *old_chain;
660 struct program_space *pspace;
661
662 /* If this is a vfork child exiting, then the pspace and
663 aspaces were shared with the parent. Since we're
664 reporting the process exit, we'll be mourning all that is
665 found in the address space, and switching to null_ptid,
666 preparing to start a new inferior. But, since we don't
667 want to clobber the parent's address/program spaces, we
668 go ahead and create a new one for this exiting
669 inferior. */
670
671 /* Switch to null_ptid, so that clone_program_space doesn't want
672 to read the selected frame of a dead process. */
673 old_chain = save_inferior_ptid ();
674 inferior_ptid = null_ptid;
675
676 /* This inferior is dead, so avoid giving the breakpoints
677 module the option to write through to it (cloning a
678 program space resets breakpoints). */
679 inf->aspace = NULL;
680 inf->pspace = NULL;
681 pspace = add_program_space (maybe_new_address_space ());
682 set_current_program_space (pspace);
683 inf->removable = 1;
684 clone_program_space (pspace, inf->vfork_parent->pspace);
685 inf->pspace = pspace;
686 inf->aspace = pspace->aspace;
687
688 /* Put back inferior_ptid. We'll continue mourning this
689 inferior. */
690 do_cleanups (old_chain);
691
692 resume_parent = inf->vfork_parent->pid;
693 /* Break the bonds. */
694 inf->vfork_parent->vfork_child = NULL;
695 }
696
697 inf->vfork_parent = NULL;
698
699 gdb_assert (current_program_space == inf->pspace);
700
701 if (non_stop && resume_parent != -1)
702 {
703 /* If the user wanted the parent to be running, let it go
704 free now. */
705 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
706
707 if (debug_infrun)
708 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
709 resume_parent);
710
711 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
712
713 do_cleanups (old_chain);
714 }
715 }
716 }
717
718 /* Enum strings for "set|show displaced-stepping". */
719
720 static const char follow_exec_mode_new[] = "new";
721 static const char follow_exec_mode_same[] = "same";
722 static const char *follow_exec_mode_names[] =
723 {
724 follow_exec_mode_new,
725 follow_exec_mode_same,
726 NULL,
727 };
728
729 static const char *follow_exec_mode_string = follow_exec_mode_same;
730 static void
731 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
732 struct cmd_list_element *c, const char *value)
733 {
734 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
735 }
736
737 /* EXECD_PATHNAME is assumed to be non-NULL. */
738
739 static void
740 follow_exec (ptid_t pid, char *execd_pathname)
741 {
742 struct thread_info *th = inferior_thread ();
743 struct inferior *inf = current_inferior ();
744
745 /* This is an exec event that we actually wish to pay attention to.
746 Refresh our symbol table to the newly exec'd program, remove any
747 momentary bp's, etc.
748
749 If there are breakpoints, they aren't really inserted now,
750 since the exec() transformed our inferior into a fresh set
751 of instructions.
752
753 We want to preserve symbolic breakpoints on the list, since
754 we have hopes that they can be reset after the new a.out's
755 symbol table is read.
756
757 However, any "raw" breakpoints must be removed from the list
758 (e.g., the solib bp's), since their address is probably invalid
759 now.
760
761 And, we DON'T want to call delete_breakpoints() here, since
762 that may write the bp's "shadow contents" (the instruction
763 value that was overwritten witha TRAP instruction). Since
764 we now have a new a.out, those shadow contents aren't valid. */
765
766 mark_breakpoints_out ();
767
768 update_breakpoints_after_exec ();
769
770 /* If there was one, it's gone now. We cannot truly step-to-next
771 statement through an exec(). */
772 th->step_resume_breakpoint = NULL;
773 th->step_range_start = 0;
774 th->step_range_end = 0;
775
776 /* The target reports the exec event to the main thread, even if
777 some other thread does the exec, and even if the main thread was
778 already stopped --- if debugging in non-stop mode, it's possible
779 the user had the main thread held stopped in the previous image
780 --- release it now. This is the same behavior as step-over-exec
781 with scheduler-locking on in all-stop mode. */
782 th->stop_requested = 0;
783
784 /* What is this a.out's name? */
785 printf_unfiltered (_("%s is executing new program: %s\n"),
786 target_pid_to_str (inferior_ptid),
787 execd_pathname);
788
789 /* We've followed the inferior through an exec. Therefore, the
790 inferior has essentially been killed & reborn. */
791
792 gdb_flush (gdb_stdout);
793
794 breakpoint_init_inferior (inf_execd);
795
796 if (gdb_sysroot && *gdb_sysroot)
797 {
798 char *name = alloca (strlen (gdb_sysroot)
799 + strlen (execd_pathname)
800 + 1);
801
802 strcpy (name, gdb_sysroot);
803 strcat (name, execd_pathname);
804 execd_pathname = name;
805 }
806
807 /* Reset the shared library package. This ensures that we get a
808 shlib event when the child reaches "_start", at which point the
809 dld will have had a chance to initialize the child. */
810 /* Also, loading a symbol file below may trigger symbol lookups, and
811 we don't want those to be satisfied by the libraries of the
812 previous incarnation of this process. */
813 no_shared_libraries (NULL, 0);
814
815 if (follow_exec_mode_string == follow_exec_mode_new)
816 {
817 struct program_space *pspace;
818
819 /* The user wants to keep the old inferior and program spaces
820 around. Create a new fresh one, and switch to it. */
821
822 inf = add_inferior (current_inferior ()->pid);
823 pspace = add_program_space (maybe_new_address_space ());
824 inf->pspace = pspace;
825 inf->aspace = pspace->aspace;
826
827 exit_inferior_num_silent (current_inferior ()->num);
828
829 set_current_inferior (inf);
830 set_current_program_space (pspace);
831 }
832
833 gdb_assert (current_program_space == inf->pspace);
834
835 /* That a.out is now the one to use. */
836 exec_file_attach (execd_pathname, 0);
837
838 /* Load the main file's symbols. */
839 symbol_file_add_main (execd_pathname, 0);
840
841 #ifdef SOLIB_CREATE_INFERIOR_HOOK
842 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
843 #else
844 solib_create_inferior_hook (0);
845 #endif
846
847 jit_inferior_created_hook ();
848
849 /* Reinsert all breakpoints. (Those which were symbolic have
850 been reset to the proper address in the new a.out, thanks
851 to symbol_file_command...) */
852 insert_breakpoints ();
853
854 /* The next resume of this inferior should bring it to the shlib
855 startup breakpoints. (If the user had also set bp's on
856 "main" from the old (parent) process, then they'll auto-
857 matically get reset there in the new process.) */
858 }
859
860 /* Non-zero if we just simulating a single-step. This is needed
861 because we cannot remove the breakpoints in the inferior process
862 until after the `wait' in `wait_for_inferior'. */
863 static int singlestep_breakpoints_inserted_p = 0;
864
865 /* The thread we inserted single-step breakpoints for. */
866 static ptid_t singlestep_ptid;
867
868 /* PC when we started this single-step. */
869 static CORE_ADDR singlestep_pc;
870
871 /* If another thread hit the singlestep breakpoint, we save the original
872 thread here so that we can resume single-stepping it later. */
873 static ptid_t saved_singlestep_ptid;
874 static int stepping_past_singlestep_breakpoint;
875
876 /* If not equal to null_ptid, this means that after stepping over breakpoint
877 is finished, we need to switch to deferred_step_ptid, and step it.
878
879 The use case is when one thread has hit a breakpoint, and then the user
880 has switched to another thread and issued 'step'. We need to step over
881 breakpoint in the thread which hit the breakpoint, but then continue
882 stepping the thread user has selected. */
883 static ptid_t deferred_step_ptid;
884 \f
885 /* Displaced stepping. */
886
887 /* In non-stop debugging mode, we must take special care to manage
888 breakpoints properly; in particular, the traditional strategy for
889 stepping a thread past a breakpoint it has hit is unsuitable.
890 'Displaced stepping' is a tactic for stepping one thread past a
891 breakpoint it has hit while ensuring that other threads running
892 concurrently will hit the breakpoint as they should.
893
894 The traditional way to step a thread T off a breakpoint in a
895 multi-threaded program in all-stop mode is as follows:
896
897 a0) Initially, all threads are stopped, and breakpoints are not
898 inserted.
899 a1) We single-step T, leaving breakpoints uninserted.
900 a2) We insert breakpoints, and resume all threads.
901
902 In non-stop debugging, however, this strategy is unsuitable: we
903 don't want to have to stop all threads in the system in order to
904 continue or step T past a breakpoint. Instead, we use displaced
905 stepping:
906
907 n0) Initially, T is stopped, other threads are running, and
908 breakpoints are inserted.
909 n1) We copy the instruction "under" the breakpoint to a separate
910 location, outside the main code stream, making any adjustments
911 to the instruction, register, and memory state as directed by
912 T's architecture.
913 n2) We single-step T over the instruction at its new location.
914 n3) We adjust the resulting register and memory state as directed
915 by T's architecture. This includes resetting T's PC to point
916 back into the main instruction stream.
917 n4) We resume T.
918
919 This approach depends on the following gdbarch methods:
920
921 - gdbarch_max_insn_length and gdbarch_displaced_step_location
922 indicate where to copy the instruction, and how much space must
923 be reserved there. We use these in step n1.
924
925 - gdbarch_displaced_step_copy_insn copies a instruction to a new
926 address, and makes any necessary adjustments to the instruction,
927 register contents, and memory. We use this in step n1.
928
929 - gdbarch_displaced_step_fixup adjusts registers and memory after
930 we have successfuly single-stepped the instruction, to yield the
931 same effect the instruction would have had if we had executed it
932 at its original address. We use this in step n3.
933
934 - gdbarch_displaced_step_free_closure provides cleanup.
935
936 The gdbarch_displaced_step_copy_insn and
937 gdbarch_displaced_step_fixup functions must be written so that
938 copying an instruction with gdbarch_displaced_step_copy_insn,
939 single-stepping across the copied instruction, and then applying
940 gdbarch_displaced_insn_fixup should have the same effects on the
941 thread's memory and registers as stepping the instruction in place
942 would have. Exactly which responsibilities fall to the copy and
943 which fall to the fixup is up to the author of those functions.
944
945 See the comments in gdbarch.sh for details.
946
947 Note that displaced stepping and software single-step cannot
948 currently be used in combination, although with some care I think
949 they could be made to. Software single-step works by placing
950 breakpoints on all possible subsequent instructions; if the
951 displaced instruction is a PC-relative jump, those breakpoints
952 could fall in very strange places --- on pages that aren't
953 executable, or at addresses that are not proper instruction
954 boundaries. (We do generally let other threads run while we wait
955 to hit the software single-step breakpoint, and they might
956 encounter such a corrupted instruction.) One way to work around
957 this would be to have gdbarch_displaced_step_copy_insn fully
958 simulate the effect of PC-relative instructions (and return NULL)
959 on architectures that use software single-stepping.
960
961 In non-stop mode, we can have independent and simultaneous step
962 requests, so more than one thread may need to simultaneously step
963 over a breakpoint. The current implementation assumes there is
964 only one scratch space per process. In this case, we have to
965 serialize access to the scratch space. If thread A wants to step
966 over a breakpoint, but we are currently waiting for some other
967 thread to complete a displaced step, we leave thread A stopped and
968 place it in the displaced_step_request_queue. Whenever a displaced
969 step finishes, we pick the next thread in the queue and start a new
970 displaced step operation on it. See displaced_step_prepare and
971 displaced_step_fixup for details. */
972
973 struct displaced_step_request
974 {
975 ptid_t ptid;
976 struct displaced_step_request *next;
977 };
978
979 /* Per-inferior displaced stepping state. */
980 struct displaced_step_inferior_state
981 {
982 /* Pointer to next in linked list. */
983 struct displaced_step_inferior_state *next;
984
985 /* The process this displaced step state refers to. */
986 int pid;
987
988 /* A queue of pending displaced stepping requests. One entry per
989 thread that needs to do a displaced step. */
990 struct displaced_step_request *step_request_queue;
991
992 /* If this is not null_ptid, this is the thread carrying out a
993 displaced single-step in process PID. This thread's state will
994 require fixing up once it has completed its step. */
995 ptid_t step_ptid;
996
997 /* The architecture the thread had when we stepped it. */
998 struct gdbarch *step_gdbarch;
999
1000 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1001 for post-step cleanup. */
1002 struct displaced_step_closure *step_closure;
1003
1004 /* The address of the original instruction, and the copy we
1005 made. */
1006 CORE_ADDR step_original, step_copy;
1007
1008 /* Saved contents of copy area. */
1009 gdb_byte *step_saved_copy;
1010 };
1011
1012 /* The list of states of processes involved in displaced stepping
1013 presently. */
1014 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1015
1016 /* Get the displaced stepping state of process PID. */
1017
1018 static struct displaced_step_inferior_state *
1019 get_displaced_stepping_state (int pid)
1020 {
1021 struct displaced_step_inferior_state *state;
1022
1023 for (state = displaced_step_inferior_states;
1024 state != NULL;
1025 state = state->next)
1026 if (state->pid == pid)
1027 return state;
1028
1029 return NULL;
1030 }
1031
1032 /* Add a new displaced stepping state for process PID to the displaced
1033 stepping state list, or return a pointer to an already existing
1034 entry, if it already exists. Never returns NULL. */
1035
1036 static struct displaced_step_inferior_state *
1037 add_displaced_stepping_state (int pid)
1038 {
1039 struct displaced_step_inferior_state *state;
1040
1041 for (state = displaced_step_inferior_states;
1042 state != NULL;
1043 state = state->next)
1044 if (state->pid == pid)
1045 return state;
1046
1047 state = xcalloc (1, sizeof (*state));
1048 state->pid = pid;
1049 state->next = displaced_step_inferior_states;
1050 displaced_step_inferior_states = state;
1051
1052 return state;
1053 }
1054
1055 /* Remove the displaced stepping state of process PID. */
1056
1057 static void
1058 remove_displaced_stepping_state (int pid)
1059 {
1060 struct displaced_step_inferior_state *it, **prev_next_p;
1061
1062 gdb_assert (pid != 0);
1063
1064 it = displaced_step_inferior_states;
1065 prev_next_p = &displaced_step_inferior_states;
1066 while (it)
1067 {
1068 if (it->pid == pid)
1069 {
1070 *prev_next_p = it->next;
1071 xfree (it);
1072 return;
1073 }
1074
1075 prev_next_p = &it->next;
1076 it = *prev_next_p;
1077 }
1078 }
1079
1080 static void
1081 infrun_inferior_exit (struct inferior *inf)
1082 {
1083 remove_displaced_stepping_state (inf->pid);
1084 }
1085
1086 /* Enum strings for "set|show displaced-stepping". */
1087
1088 static const char can_use_displaced_stepping_auto[] = "auto";
1089 static const char can_use_displaced_stepping_on[] = "on";
1090 static const char can_use_displaced_stepping_off[] = "off";
1091 static const char *can_use_displaced_stepping_enum[] =
1092 {
1093 can_use_displaced_stepping_auto,
1094 can_use_displaced_stepping_on,
1095 can_use_displaced_stepping_off,
1096 NULL,
1097 };
1098
1099 /* If ON, and the architecture supports it, GDB will use displaced
1100 stepping to step over breakpoints. If OFF, or if the architecture
1101 doesn't support it, GDB will instead use the traditional
1102 hold-and-step approach. If AUTO (which is the default), GDB will
1103 decide which technique to use to step over breakpoints depending on
1104 which of all-stop or non-stop mode is active --- displaced stepping
1105 in non-stop mode; hold-and-step in all-stop mode. */
1106
1107 static const char *can_use_displaced_stepping =
1108 can_use_displaced_stepping_auto;
1109
1110 static void
1111 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1112 struct cmd_list_element *c,
1113 const char *value)
1114 {
1115 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1116 fprintf_filtered (file, _("\
1117 Debugger's willingness to use displaced stepping to step over \
1118 breakpoints is %s (currently %s).\n"),
1119 value, non_stop ? "on" : "off");
1120 else
1121 fprintf_filtered (file, _("\
1122 Debugger's willingness to use displaced stepping to step over \
1123 breakpoints is %s.\n"), value);
1124 }
1125
1126 /* Return non-zero if displaced stepping can/should be used to step
1127 over breakpoints. */
1128
1129 static int
1130 use_displaced_stepping (struct gdbarch *gdbarch)
1131 {
1132 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1133 && non_stop)
1134 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1135 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1136 && !RECORD_IS_USED);
1137 }
1138
1139 /* Clean out any stray displaced stepping state. */
1140 static void
1141 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1142 {
1143 /* Indicate that there is no cleanup pending. */
1144 displaced->step_ptid = null_ptid;
1145
1146 if (displaced->step_closure)
1147 {
1148 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1149 displaced->step_closure);
1150 displaced->step_closure = NULL;
1151 }
1152 }
1153
1154 static void
1155 displaced_step_clear_cleanup (void *arg)
1156 {
1157 struct displaced_step_inferior_state *state = arg;
1158
1159 displaced_step_clear (state);
1160 }
1161
1162 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1163 void
1164 displaced_step_dump_bytes (struct ui_file *file,
1165 const gdb_byte *buf,
1166 size_t len)
1167 {
1168 int i;
1169
1170 for (i = 0; i < len; i++)
1171 fprintf_unfiltered (file, "%02x ", buf[i]);
1172 fputs_unfiltered ("\n", file);
1173 }
1174
1175 /* Prepare to single-step, using displaced stepping.
1176
1177 Note that we cannot use displaced stepping when we have a signal to
1178 deliver. If we have a signal to deliver and an instruction to step
1179 over, then after the step, there will be no indication from the
1180 target whether the thread entered a signal handler or ignored the
1181 signal and stepped over the instruction successfully --- both cases
1182 result in a simple SIGTRAP. In the first case we mustn't do a
1183 fixup, and in the second case we must --- but we can't tell which.
1184 Comments in the code for 'random signals' in handle_inferior_event
1185 explain how we handle this case instead.
1186
1187 Returns 1 if preparing was successful -- this thread is going to be
1188 stepped now; or 0 if displaced stepping this thread got queued. */
1189 static int
1190 displaced_step_prepare (ptid_t ptid)
1191 {
1192 struct cleanup *old_cleanups, *ignore_cleanups;
1193 struct regcache *regcache = get_thread_regcache (ptid);
1194 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1195 CORE_ADDR original, copy;
1196 ULONGEST len;
1197 struct displaced_step_closure *closure;
1198 struct displaced_step_inferior_state *displaced;
1199
1200 /* We should never reach this function if the architecture does not
1201 support displaced stepping. */
1202 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1203
1204 /* We have to displaced step one thread at a time, as we only have
1205 access to a single scratch space per inferior. */
1206
1207 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1208
1209 if (!ptid_equal (displaced->step_ptid, null_ptid))
1210 {
1211 /* Already waiting for a displaced step to finish. Defer this
1212 request and place in queue. */
1213 struct displaced_step_request *req, *new_req;
1214
1215 if (debug_displaced)
1216 fprintf_unfiltered (gdb_stdlog,
1217 "displaced: defering step of %s\n",
1218 target_pid_to_str (ptid));
1219
1220 new_req = xmalloc (sizeof (*new_req));
1221 new_req->ptid = ptid;
1222 new_req->next = NULL;
1223
1224 if (displaced->step_request_queue)
1225 {
1226 for (req = displaced->step_request_queue;
1227 req && req->next;
1228 req = req->next)
1229 ;
1230 req->next = new_req;
1231 }
1232 else
1233 displaced->step_request_queue = new_req;
1234
1235 return 0;
1236 }
1237 else
1238 {
1239 if (debug_displaced)
1240 fprintf_unfiltered (gdb_stdlog,
1241 "displaced: stepping %s now\n",
1242 target_pid_to_str (ptid));
1243 }
1244
1245 displaced_step_clear (displaced);
1246
1247 old_cleanups = save_inferior_ptid ();
1248 inferior_ptid = ptid;
1249
1250 original = regcache_read_pc (regcache);
1251
1252 copy = gdbarch_displaced_step_location (gdbarch);
1253 len = gdbarch_max_insn_length (gdbarch);
1254
1255 /* Save the original contents of the copy area. */
1256 displaced->step_saved_copy = xmalloc (len);
1257 ignore_cleanups = make_cleanup (free_current_contents,
1258 &displaced->step_saved_copy);
1259 read_memory (copy, displaced->step_saved_copy, len);
1260 if (debug_displaced)
1261 {
1262 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1263 paddress (gdbarch, copy));
1264 displaced_step_dump_bytes (gdb_stdlog,
1265 displaced->step_saved_copy,
1266 len);
1267 };
1268
1269 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1270 original, copy, regcache);
1271
1272 /* We don't support the fully-simulated case at present. */
1273 gdb_assert (closure);
1274
1275 /* Save the information we need to fix things up if the step
1276 succeeds. */
1277 displaced->step_ptid = ptid;
1278 displaced->step_gdbarch = gdbarch;
1279 displaced->step_closure = closure;
1280 displaced->step_original = original;
1281 displaced->step_copy = copy;
1282
1283 make_cleanup (displaced_step_clear_cleanup, displaced);
1284
1285 /* Resume execution at the copy. */
1286 regcache_write_pc (regcache, copy);
1287
1288 discard_cleanups (ignore_cleanups);
1289
1290 do_cleanups (old_cleanups);
1291
1292 if (debug_displaced)
1293 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1294 paddress (gdbarch, copy));
1295
1296 return 1;
1297 }
1298
1299 static void
1300 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1301 {
1302 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1303
1304 inferior_ptid = ptid;
1305 write_memory (memaddr, myaddr, len);
1306 do_cleanups (ptid_cleanup);
1307 }
1308
1309 static void
1310 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1311 {
1312 struct cleanup *old_cleanups;
1313 struct displaced_step_inferior_state *displaced
1314 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1315
1316 /* Was any thread of this process doing a displaced step? */
1317 if (displaced == NULL)
1318 return;
1319
1320 /* Was this event for the pid we displaced? */
1321 if (ptid_equal (displaced->step_ptid, null_ptid)
1322 || ! ptid_equal (displaced->step_ptid, event_ptid))
1323 return;
1324
1325 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1326
1327 /* Restore the contents of the copy area. */
1328 {
1329 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1330
1331 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1332 displaced->step_saved_copy, len);
1333 if (debug_displaced)
1334 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1335 paddress (displaced->step_gdbarch,
1336 displaced->step_copy));
1337 }
1338
1339 /* Did the instruction complete successfully? */
1340 if (signal == TARGET_SIGNAL_TRAP)
1341 {
1342 /* Fix up the resulting state. */
1343 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1344 displaced->step_closure,
1345 displaced->step_original,
1346 displaced->step_copy,
1347 get_thread_regcache (displaced->step_ptid));
1348 }
1349 else
1350 {
1351 /* Since the instruction didn't complete, all we can do is
1352 relocate the PC. */
1353 struct regcache *regcache = get_thread_regcache (event_ptid);
1354 CORE_ADDR pc = regcache_read_pc (regcache);
1355
1356 pc = displaced->step_original + (pc - displaced->step_copy);
1357 regcache_write_pc (regcache, pc);
1358 }
1359
1360 do_cleanups (old_cleanups);
1361
1362 displaced->step_ptid = null_ptid;
1363
1364 /* Are there any pending displaced stepping requests? If so, run
1365 one now. Leave the state object around, since we're likely to
1366 need it again soon. */
1367 while (displaced->step_request_queue)
1368 {
1369 struct displaced_step_request *head;
1370 ptid_t ptid;
1371 struct regcache *regcache;
1372 struct gdbarch *gdbarch;
1373 CORE_ADDR actual_pc;
1374 struct address_space *aspace;
1375
1376 head = displaced->step_request_queue;
1377 ptid = head->ptid;
1378 displaced->step_request_queue = head->next;
1379 xfree (head);
1380
1381 context_switch (ptid);
1382
1383 regcache = get_thread_regcache (ptid);
1384 actual_pc = regcache_read_pc (regcache);
1385 aspace = get_regcache_aspace (regcache);
1386
1387 if (breakpoint_here_p (aspace, actual_pc))
1388 {
1389 if (debug_displaced)
1390 fprintf_unfiltered (gdb_stdlog,
1391 "displaced: stepping queued %s now\n",
1392 target_pid_to_str (ptid));
1393
1394 displaced_step_prepare (ptid);
1395
1396 gdbarch = get_regcache_arch (regcache);
1397
1398 if (debug_displaced)
1399 {
1400 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1401 gdb_byte buf[4];
1402
1403 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1404 paddress (gdbarch, actual_pc));
1405 read_memory (actual_pc, buf, sizeof (buf));
1406 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1407 }
1408
1409 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1410 displaced->step_closure))
1411 target_resume (ptid, 1, TARGET_SIGNAL_0);
1412 else
1413 target_resume (ptid, 0, TARGET_SIGNAL_0);
1414
1415 /* Done, we're stepping a thread. */
1416 break;
1417 }
1418 else
1419 {
1420 int step;
1421 struct thread_info *tp = inferior_thread ();
1422
1423 /* The breakpoint we were sitting under has since been
1424 removed. */
1425 tp->trap_expected = 0;
1426
1427 /* Go back to what we were trying to do. */
1428 step = currently_stepping (tp);
1429
1430 if (debug_displaced)
1431 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1432 target_pid_to_str (tp->ptid), step);
1433
1434 target_resume (ptid, step, TARGET_SIGNAL_0);
1435 tp->stop_signal = TARGET_SIGNAL_0;
1436
1437 /* This request was discarded. See if there's any other
1438 thread waiting for its turn. */
1439 }
1440 }
1441 }
1442
1443 /* Update global variables holding ptids to hold NEW_PTID if they were
1444 holding OLD_PTID. */
1445 static void
1446 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1447 {
1448 struct displaced_step_request *it;
1449 struct displaced_step_inferior_state *displaced;
1450
1451 if (ptid_equal (inferior_ptid, old_ptid))
1452 inferior_ptid = new_ptid;
1453
1454 if (ptid_equal (singlestep_ptid, old_ptid))
1455 singlestep_ptid = new_ptid;
1456
1457 if (ptid_equal (deferred_step_ptid, old_ptid))
1458 deferred_step_ptid = new_ptid;
1459
1460 for (displaced = displaced_step_inferior_states;
1461 displaced;
1462 displaced = displaced->next)
1463 {
1464 if (ptid_equal (displaced->step_ptid, old_ptid))
1465 displaced->step_ptid = new_ptid;
1466
1467 for (it = displaced->step_request_queue; it; it = it->next)
1468 if (ptid_equal (it->ptid, old_ptid))
1469 it->ptid = new_ptid;
1470 }
1471 }
1472
1473 \f
1474 /* Resuming. */
1475
1476 /* Things to clean up if we QUIT out of resume (). */
1477 static void
1478 resume_cleanups (void *ignore)
1479 {
1480 normal_stop ();
1481 }
1482
1483 static const char schedlock_off[] = "off";
1484 static const char schedlock_on[] = "on";
1485 static const char schedlock_step[] = "step";
1486 static const char *scheduler_enums[] = {
1487 schedlock_off,
1488 schedlock_on,
1489 schedlock_step,
1490 NULL
1491 };
1492 static const char *scheduler_mode = schedlock_off;
1493 static void
1494 show_scheduler_mode (struct ui_file *file, int from_tty,
1495 struct cmd_list_element *c, const char *value)
1496 {
1497 fprintf_filtered (file, _("\
1498 Mode for locking scheduler during execution is \"%s\".\n"),
1499 value);
1500 }
1501
1502 static void
1503 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1504 {
1505 if (!target_can_lock_scheduler)
1506 {
1507 scheduler_mode = schedlock_off;
1508 error (_("Target '%s' cannot support this command."), target_shortname);
1509 }
1510 }
1511
1512 /* True if execution commands resume all threads of all processes by
1513 default; otherwise, resume only threads of the current inferior
1514 process. */
1515 int sched_multi = 0;
1516
1517 /* Try to setup for software single stepping over the specified location.
1518 Return 1 if target_resume() should use hardware single step.
1519
1520 GDBARCH the current gdbarch.
1521 PC the location to step over. */
1522
1523 static int
1524 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1525 {
1526 int hw_step = 1;
1527
1528 if (execution_direction == EXEC_FORWARD
1529 && gdbarch_software_single_step_p (gdbarch)
1530 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1531 {
1532 hw_step = 0;
1533 /* Do not pull these breakpoints until after a `wait' in
1534 `wait_for_inferior' */
1535 singlestep_breakpoints_inserted_p = 1;
1536 singlestep_ptid = inferior_ptid;
1537 singlestep_pc = pc;
1538 }
1539 return hw_step;
1540 }
1541
1542 /* Resume the inferior, but allow a QUIT. This is useful if the user
1543 wants to interrupt some lengthy single-stepping operation
1544 (for child processes, the SIGINT goes to the inferior, and so
1545 we get a SIGINT random_signal, but for remote debugging and perhaps
1546 other targets, that's not true).
1547
1548 STEP nonzero if we should step (zero to continue instead).
1549 SIG is the signal to give the inferior (zero for none). */
1550 void
1551 resume (int step, enum target_signal sig)
1552 {
1553 int should_resume = 1;
1554 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1555 struct regcache *regcache = get_current_regcache ();
1556 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1557 struct thread_info *tp = inferior_thread ();
1558 CORE_ADDR pc = regcache_read_pc (regcache);
1559 struct address_space *aspace = get_regcache_aspace (regcache);
1560
1561 QUIT;
1562
1563 if (current_inferior ()->waiting_for_vfork_done)
1564 {
1565 /* Don't try to single-step a vfork parent that is waiting for
1566 the child to get out of the shared memory region (by exec'ing
1567 or exiting). This is particularly important on software
1568 single-step archs, as the child process would trip on the
1569 software single step breakpoint inserted for the parent
1570 process. Since the parent will not actually execute any
1571 instruction until the child is out of the shared region (such
1572 are vfork's semantics), it is safe to simply continue it.
1573 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1574 the parent, and tell it to `keep_going', which automatically
1575 re-sets it stepping. */
1576 if (debug_infrun)
1577 fprintf_unfiltered (gdb_stdlog,
1578 "infrun: resume : clear step\n");
1579 step = 0;
1580 }
1581
1582 if (debug_infrun)
1583 fprintf_unfiltered (gdb_stdlog,
1584 "infrun: resume (step=%d, signal=%d), "
1585 "trap_expected=%d\n",
1586 step, sig, tp->trap_expected);
1587
1588 /* Normally, by the time we reach `resume', the breakpoints are either
1589 removed or inserted, as appropriate. The exception is if we're sitting
1590 at a permanent breakpoint; we need to step over it, but permanent
1591 breakpoints can't be removed. So we have to test for it here. */
1592 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1593 {
1594 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1595 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1596 else
1597 error (_("\
1598 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1599 how to step past a permanent breakpoint on this architecture. Try using\n\
1600 a command like `return' or `jump' to continue execution."));
1601 }
1602
1603 /* If enabled, step over breakpoints by executing a copy of the
1604 instruction at a different address.
1605
1606 We can't use displaced stepping when we have a signal to deliver;
1607 the comments for displaced_step_prepare explain why. The
1608 comments in the handle_inferior event for dealing with 'random
1609 signals' explain what we do instead.
1610
1611 We can't use displaced stepping when we are waiting for vfork_done
1612 event, displaced stepping breaks the vfork child similarly as single
1613 step software breakpoint. */
1614 if (use_displaced_stepping (gdbarch)
1615 && (tp->trap_expected
1616 || (step && gdbarch_software_single_step_p (gdbarch)))
1617 && sig == TARGET_SIGNAL_0
1618 && !current_inferior ()->waiting_for_vfork_done)
1619 {
1620 struct displaced_step_inferior_state *displaced;
1621
1622 if (!displaced_step_prepare (inferior_ptid))
1623 {
1624 /* Got placed in displaced stepping queue. Will be resumed
1625 later when all the currently queued displaced stepping
1626 requests finish. The thread is not executing at this point,
1627 and the call to set_executing will be made later. But we
1628 need to call set_running here, since from frontend point of view,
1629 the thread is running. */
1630 set_running (inferior_ptid, 1);
1631 discard_cleanups (old_cleanups);
1632 return;
1633 }
1634
1635 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1636 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1637 displaced->step_closure);
1638 }
1639
1640 /* Do we need to do it the hard way, w/temp breakpoints? */
1641 else if (step)
1642 step = maybe_software_singlestep (gdbarch, pc);
1643
1644 if (should_resume)
1645 {
1646 ptid_t resume_ptid;
1647
1648 /* If STEP is set, it's a request to use hardware stepping
1649 facilities. But in that case, we should never
1650 use singlestep breakpoint. */
1651 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1652
1653 /* Decide the set of threads to ask the target to resume. Start
1654 by assuming everything will be resumed, than narrow the set
1655 by applying increasingly restricting conditions. */
1656
1657 /* By default, resume all threads of all processes. */
1658 resume_ptid = RESUME_ALL;
1659
1660 /* Maybe resume only all threads of the current process. */
1661 if (!sched_multi && target_supports_multi_process ())
1662 {
1663 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1664 }
1665
1666 /* Maybe resume a single thread after all. */
1667 if (singlestep_breakpoints_inserted_p
1668 && stepping_past_singlestep_breakpoint)
1669 {
1670 /* The situation here is as follows. In thread T1 we wanted to
1671 single-step. Lacking hardware single-stepping we've
1672 set breakpoint at the PC of the next instruction -- call it
1673 P. After resuming, we've hit that breakpoint in thread T2.
1674 Now we've removed original breakpoint, inserted breakpoint
1675 at P+1, and try to step to advance T2 past breakpoint.
1676 We need to step only T2, as if T1 is allowed to freely run,
1677 it can run past P, and if other threads are allowed to run,
1678 they can hit breakpoint at P+1, and nested hits of single-step
1679 breakpoints is not something we'd want -- that's complicated
1680 to support, and has no value. */
1681 resume_ptid = inferior_ptid;
1682 }
1683 else if ((step || singlestep_breakpoints_inserted_p)
1684 && tp->trap_expected)
1685 {
1686 /* We're allowing a thread to run past a breakpoint it has
1687 hit, by single-stepping the thread with the breakpoint
1688 removed. In which case, we need to single-step only this
1689 thread, and keep others stopped, as they can miss this
1690 breakpoint if allowed to run.
1691
1692 The current code actually removes all breakpoints when
1693 doing this, not just the one being stepped over, so if we
1694 let other threads run, we can actually miss any
1695 breakpoint, not just the one at PC. */
1696 resume_ptid = inferior_ptid;
1697 }
1698 else if (non_stop)
1699 {
1700 /* With non-stop mode on, threads are always handled
1701 individually. */
1702 resume_ptid = inferior_ptid;
1703 }
1704 else if ((scheduler_mode == schedlock_on)
1705 || (scheduler_mode == schedlock_step
1706 && (step || singlestep_breakpoints_inserted_p)))
1707 {
1708 /* User-settable 'scheduler' mode requires solo thread resume. */
1709 resume_ptid = inferior_ptid;
1710 }
1711
1712 if (gdbarch_cannot_step_breakpoint (gdbarch))
1713 {
1714 /* Most targets can step a breakpoint instruction, thus
1715 executing it normally. But if this one cannot, just
1716 continue and we will hit it anyway. */
1717 if (step && breakpoint_inserted_here_p (aspace, pc))
1718 step = 0;
1719 }
1720
1721 if (debug_displaced
1722 && use_displaced_stepping (gdbarch)
1723 && tp->trap_expected)
1724 {
1725 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1726 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1727 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1728 gdb_byte buf[4];
1729
1730 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1731 paddress (resume_gdbarch, actual_pc));
1732 read_memory (actual_pc, buf, sizeof (buf));
1733 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1734 }
1735
1736 /* Install inferior's terminal modes. */
1737 target_terminal_inferior ();
1738
1739 /* Avoid confusing the next resume, if the next stop/resume
1740 happens to apply to another thread. */
1741 tp->stop_signal = TARGET_SIGNAL_0;
1742
1743 target_resume (resume_ptid, step, sig);
1744 }
1745
1746 discard_cleanups (old_cleanups);
1747 }
1748 \f
1749 /* Proceeding. */
1750
1751 /* Clear out all variables saying what to do when inferior is continued.
1752 First do this, then set the ones you want, then call `proceed'. */
1753
1754 static void
1755 clear_proceed_status_thread (struct thread_info *tp)
1756 {
1757 if (debug_infrun)
1758 fprintf_unfiltered (gdb_stdlog,
1759 "infrun: clear_proceed_status_thread (%s)\n",
1760 target_pid_to_str (tp->ptid));
1761
1762 tp->trap_expected = 0;
1763 tp->step_range_start = 0;
1764 tp->step_range_end = 0;
1765 tp->step_frame_id = null_frame_id;
1766 tp->step_stack_frame_id = null_frame_id;
1767 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1768 tp->stop_requested = 0;
1769
1770 tp->stop_step = 0;
1771
1772 tp->proceed_to_finish = 0;
1773
1774 /* Discard any remaining commands or status from previous stop. */
1775 bpstat_clear (&tp->stop_bpstat);
1776 }
1777
1778 static int
1779 clear_proceed_status_callback (struct thread_info *tp, void *data)
1780 {
1781 if (is_exited (tp->ptid))
1782 return 0;
1783
1784 clear_proceed_status_thread (tp);
1785 return 0;
1786 }
1787
1788 void
1789 clear_proceed_status (void)
1790 {
1791 if (!non_stop)
1792 {
1793 /* In all-stop mode, delete the per-thread status of all
1794 threads, even if inferior_ptid is null_ptid, there may be
1795 threads on the list. E.g., we may be launching a new
1796 process, while selecting the executable. */
1797 iterate_over_threads (clear_proceed_status_callback, NULL);
1798 }
1799
1800 if (!ptid_equal (inferior_ptid, null_ptid))
1801 {
1802 struct inferior *inferior;
1803
1804 if (non_stop)
1805 {
1806 /* If in non-stop mode, only delete the per-thread status of
1807 the current thread. */
1808 clear_proceed_status_thread (inferior_thread ());
1809 }
1810
1811 inferior = current_inferior ();
1812 inferior->stop_soon = NO_STOP_QUIETLY;
1813 }
1814
1815 stop_after_trap = 0;
1816
1817 observer_notify_about_to_proceed ();
1818
1819 if (stop_registers)
1820 {
1821 regcache_xfree (stop_registers);
1822 stop_registers = NULL;
1823 }
1824 }
1825
1826 /* Check the current thread against the thread that reported the most recent
1827 event. If a step-over is required return TRUE and set the current thread
1828 to the old thread. Otherwise return FALSE.
1829
1830 This should be suitable for any targets that support threads. */
1831
1832 static int
1833 prepare_to_proceed (int step)
1834 {
1835 ptid_t wait_ptid;
1836 struct target_waitstatus wait_status;
1837 int schedlock_enabled;
1838
1839 /* With non-stop mode on, threads are always handled individually. */
1840 gdb_assert (! non_stop);
1841
1842 /* Get the last target status returned by target_wait(). */
1843 get_last_target_status (&wait_ptid, &wait_status);
1844
1845 /* Make sure we were stopped at a breakpoint. */
1846 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1847 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1848 && wait_status.value.sig != TARGET_SIGNAL_ILL
1849 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1850 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1851 {
1852 return 0;
1853 }
1854
1855 schedlock_enabled = (scheduler_mode == schedlock_on
1856 || (scheduler_mode == schedlock_step
1857 && step));
1858
1859 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1860 if (schedlock_enabled)
1861 return 0;
1862
1863 /* Don't switch over if we're about to resume some other process
1864 other than WAIT_PTID's, and schedule-multiple is off. */
1865 if (!sched_multi
1866 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1867 return 0;
1868
1869 /* Switched over from WAIT_PID. */
1870 if (!ptid_equal (wait_ptid, minus_one_ptid)
1871 && !ptid_equal (inferior_ptid, wait_ptid))
1872 {
1873 struct regcache *regcache = get_thread_regcache (wait_ptid);
1874
1875 if (breakpoint_here_p (get_regcache_aspace (regcache),
1876 regcache_read_pc (regcache)))
1877 {
1878 /* If stepping, remember current thread to switch back to. */
1879 if (step)
1880 deferred_step_ptid = inferior_ptid;
1881
1882 /* Switch back to WAIT_PID thread. */
1883 switch_to_thread (wait_ptid);
1884
1885 /* We return 1 to indicate that there is a breakpoint here,
1886 so we need to step over it before continuing to avoid
1887 hitting it straight away. */
1888 return 1;
1889 }
1890 }
1891
1892 return 0;
1893 }
1894
1895 /* Basic routine for continuing the program in various fashions.
1896
1897 ADDR is the address to resume at, or -1 for resume where stopped.
1898 SIGGNAL is the signal to give it, or 0 for none,
1899 or -1 for act according to how it stopped.
1900 STEP is nonzero if should trap after one instruction.
1901 -1 means return after that and print nothing.
1902 You should probably set various step_... variables
1903 before calling here, if you are stepping.
1904
1905 You should call clear_proceed_status before calling proceed. */
1906
1907 void
1908 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1909 {
1910 struct regcache *regcache;
1911 struct gdbarch *gdbarch;
1912 struct thread_info *tp;
1913 CORE_ADDR pc;
1914 struct address_space *aspace;
1915 int oneproc = 0;
1916
1917 /* If we're stopped at a fork/vfork, follow the branch set by the
1918 "set follow-fork-mode" command; otherwise, we'll just proceed
1919 resuming the current thread. */
1920 if (!follow_fork ())
1921 {
1922 /* The target for some reason decided not to resume. */
1923 normal_stop ();
1924 return;
1925 }
1926
1927 regcache = get_current_regcache ();
1928 gdbarch = get_regcache_arch (regcache);
1929 aspace = get_regcache_aspace (regcache);
1930 pc = regcache_read_pc (regcache);
1931
1932 if (step > 0)
1933 step_start_function = find_pc_function (pc);
1934 if (step < 0)
1935 stop_after_trap = 1;
1936
1937 if (addr == (CORE_ADDR) -1)
1938 {
1939 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1940 && execution_direction != EXEC_REVERSE)
1941 /* There is a breakpoint at the address we will resume at,
1942 step one instruction before inserting breakpoints so that
1943 we do not stop right away (and report a second hit at this
1944 breakpoint).
1945
1946 Note, we don't do this in reverse, because we won't
1947 actually be executing the breakpoint insn anyway.
1948 We'll be (un-)executing the previous instruction. */
1949
1950 oneproc = 1;
1951 else if (gdbarch_single_step_through_delay_p (gdbarch)
1952 && gdbarch_single_step_through_delay (gdbarch,
1953 get_current_frame ()))
1954 /* We stepped onto an instruction that needs to be stepped
1955 again before re-inserting the breakpoint, do so. */
1956 oneproc = 1;
1957 }
1958 else
1959 {
1960 regcache_write_pc (regcache, addr);
1961 }
1962
1963 if (debug_infrun)
1964 fprintf_unfiltered (gdb_stdlog,
1965 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1966 paddress (gdbarch, addr), siggnal, step);
1967
1968 /* We're handling a live event, so make sure we're doing live
1969 debugging. If we're looking at traceframes while the target is
1970 running, we're going to need to get back to that mode after
1971 handling the event. */
1972 if (non_stop)
1973 {
1974 make_cleanup_restore_current_traceframe ();
1975 set_traceframe_number (-1);
1976 }
1977
1978 if (non_stop)
1979 /* In non-stop, each thread is handled individually. The context
1980 must already be set to the right thread here. */
1981 ;
1982 else
1983 {
1984 /* In a multi-threaded task we may select another thread and
1985 then continue or step.
1986
1987 But if the old thread was stopped at a breakpoint, it will
1988 immediately cause another breakpoint stop without any
1989 execution (i.e. it will report a breakpoint hit incorrectly).
1990 So we must step over it first.
1991
1992 prepare_to_proceed checks the current thread against the
1993 thread that reported the most recent event. If a step-over
1994 is required it returns TRUE and sets the current thread to
1995 the old thread. */
1996 if (prepare_to_proceed (step))
1997 oneproc = 1;
1998 }
1999
2000 /* prepare_to_proceed may change the current thread. */
2001 tp = inferior_thread ();
2002
2003 if (oneproc)
2004 {
2005 tp->trap_expected = 1;
2006 /* If displaced stepping is enabled, we can step over the
2007 breakpoint without hitting it, so leave all breakpoints
2008 inserted. Otherwise we need to disable all breakpoints, step
2009 one instruction, and then re-add them when that step is
2010 finished. */
2011 if (!use_displaced_stepping (gdbarch))
2012 remove_breakpoints ();
2013 }
2014
2015 /* We can insert breakpoints if we're not trying to step over one,
2016 or if we are stepping over one but we're using displaced stepping
2017 to do so. */
2018 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
2019 insert_breakpoints ();
2020
2021 if (!non_stop)
2022 {
2023 /* Pass the last stop signal to the thread we're resuming,
2024 irrespective of whether the current thread is the thread that
2025 got the last event or not. This was historically GDB's
2026 behaviour before keeping a stop_signal per thread. */
2027
2028 struct thread_info *last_thread;
2029 ptid_t last_ptid;
2030 struct target_waitstatus last_status;
2031
2032 get_last_target_status (&last_ptid, &last_status);
2033 if (!ptid_equal (inferior_ptid, last_ptid)
2034 && !ptid_equal (last_ptid, null_ptid)
2035 && !ptid_equal (last_ptid, minus_one_ptid))
2036 {
2037 last_thread = find_thread_ptid (last_ptid);
2038 if (last_thread)
2039 {
2040 tp->stop_signal = last_thread->stop_signal;
2041 last_thread->stop_signal = TARGET_SIGNAL_0;
2042 }
2043 }
2044 }
2045
2046 if (siggnal != TARGET_SIGNAL_DEFAULT)
2047 tp->stop_signal = siggnal;
2048 /* If this signal should not be seen by program,
2049 give it zero. Used for debugging signals. */
2050 else if (!signal_program[tp->stop_signal])
2051 tp->stop_signal = TARGET_SIGNAL_0;
2052
2053 annotate_starting ();
2054
2055 /* Make sure that output from GDB appears before output from the
2056 inferior. */
2057 gdb_flush (gdb_stdout);
2058
2059 /* Refresh prev_pc value just prior to resuming. This used to be
2060 done in stop_stepping, however, setting prev_pc there did not handle
2061 scenarios such as inferior function calls or returning from
2062 a function via the return command. In those cases, the prev_pc
2063 value was not set properly for subsequent commands. The prev_pc value
2064 is used to initialize the starting line number in the ecs. With an
2065 invalid value, the gdb next command ends up stopping at the position
2066 represented by the next line table entry past our start position.
2067 On platforms that generate one line table entry per line, this
2068 is not a problem. However, on the ia64, the compiler generates
2069 extraneous line table entries that do not increase the line number.
2070 When we issue the gdb next command on the ia64 after an inferior call
2071 or a return command, we often end up a few instructions forward, still
2072 within the original line we started.
2073
2074 An attempt was made to refresh the prev_pc at the same time the
2075 execution_control_state is initialized (for instance, just before
2076 waiting for an inferior event). But this approach did not work
2077 because of platforms that use ptrace, where the pc register cannot
2078 be read unless the inferior is stopped. At that point, we are not
2079 guaranteed the inferior is stopped and so the regcache_read_pc() call
2080 can fail. Setting the prev_pc value here ensures the value is updated
2081 correctly when the inferior is stopped. */
2082 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2083
2084 /* Fill in with reasonable starting values. */
2085 init_thread_stepping_state (tp);
2086
2087 /* Reset to normal state. */
2088 init_infwait_state ();
2089
2090 /* Resume inferior. */
2091 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
2092
2093 /* Wait for it to stop (if not standalone)
2094 and in any case decode why it stopped, and act accordingly. */
2095 /* Do this only if we are not using the event loop, or if the target
2096 does not support asynchronous execution. */
2097 if (!target_can_async_p ())
2098 {
2099 wait_for_inferior (0);
2100 normal_stop ();
2101 }
2102 }
2103 \f
2104
2105 /* Start remote-debugging of a machine over a serial link. */
2106
2107 void
2108 start_remote (int from_tty)
2109 {
2110 struct inferior *inferior;
2111
2112 init_wait_for_inferior ();
2113 inferior = current_inferior ();
2114 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2115
2116 /* Always go on waiting for the target, regardless of the mode. */
2117 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2118 indicate to wait_for_inferior that a target should timeout if
2119 nothing is returned (instead of just blocking). Because of this,
2120 targets expecting an immediate response need to, internally, set
2121 things up so that the target_wait() is forced to eventually
2122 timeout. */
2123 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2124 differentiate to its caller what the state of the target is after
2125 the initial open has been performed. Here we're assuming that
2126 the target has stopped. It should be possible to eventually have
2127 target_open() return to the caller an indication that the target
2128 is currently running and GDB state should be set to the same as
2129 for an async run. */
2130 wait_for_inferior (0);
2131
2132 /* Now that the inferior has stopped, do any bookkeeping like
2133 loading shared libraries. We want to do this before normal_stop,
2134 so that the displayed frame is up to date. */
2135 post_create_inferior (&current_target, from_tty);
2136
2137 normal_stop ();
2138 }
2139
2140 /* Initialize static vars when a new inferior begins. */
2141
2142 void
2143 init_wait_for_inferior (void)
2144 {
2145 /* These are meaningless until the first time through wait_for_inferior. */
2146
2147 breakpoint_init_inferior (inf_starting);
2148
2149 clear_proceed_status ();
2150
2151 stepping_past_singlestep_breakpoint = 0;
2152 deferred_step_ptid = null_ptid;
2153
2154 target_last_wait_ptid = minus_one_ptid;
2155
2156 previous_inferior_ptid = null_ptid;
2157 init_infwait_state ();
2158
2159 /* Discard any skipped inlined frames. */
2160 clear_inline_frame_state (minus_one_ptid);
2161 }
2162
2163 \f
2164 /* This enum encodes possible reasons for doing a target_wait, so that
2165 wfi can call target_wait in one place. (Ultimately the call will be
2166 moved out of the infinite loop entirely.) */
2167
2168 enum infwait_states
2169 {
2170 infwait_normal_state,
2171 infwait_thread_hop_state,
2172 infwait_step_watch_state,
2173 infwait_nonstep_watch_state
2174 };
2175
2176 /* The PTID we'll do a target_wait on.*/
2177 ptid_t waiton_ptid;
2178
2179 /* Current inferior wait state. */
2180 enum infwait_states infwait_state;
2181
2182 /* Data to be passed around while handling an event. This data is
2183 discarded between events. */
2184 struct execution_control_state
2185 {
2186 ptid_t ptid;
2187 /* The thread that got the event, if this was a thread event; NULL
2188 otherwise. */
2189 struct thread_info *event_thread;
2190
2191 struct target_waitstatus ws;
2192 int random_signal;
2193 CORE_ADDR stop_func_start;
2194 CORE_ADDR stop_func_end;
2195 char *stop_func_name;
2196 int new_thread_event;
2197 int wait_some_more;
2198 };
2199
2200 static void handle_inferior_event (struct execution_control_state *ecs);
2201
2202 static void handle_step_into_function (struct gdbarch *gdbarch,
2203 struct execution_control_state *ecs);
2204 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2205 struct execution_control_state *ecs);
2206 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2207 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2208 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2209 struct symtab_and_line sr_sal,
2210 struct frame_id sr_id);
2211 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2212
2213 static void stop_stepping (struct execution_control_state *ecs);
2214 static void prepare_to_wait (struct execution_control_state *ecs);
2215 static void keep_going (struct execution_control_state *ecs);
2216
2217 /* Callback for iterate over threads. If the thread is stopped, but
2218 the user/frontend doesn't know about that yet, go through
2219 normal_stop, as if the thread had just stopped now. ARG points at
2220 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2221 ptid_is_pid(PTID) is true, applies to all threads of the process
2222 pointed at by PTID. Otherwise, apply only to the thread pointed by
2223 PTID. */
2224
2225 static int
2226 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2227 {
2228 ptid_t ptid = * (ptid_t *) arg;
2229
2230 if ((ptid_equal (info->ptid, ptid)
2231 || ptid_equal (minus_one_ptid, ptid)
2232 || (ptid_is_pid (ptid)
2233 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2234 && is_running (info->ptid)
2235 && !is_executing (info->ptid))
2236 {
2237 struct cleanup *old_chain;
2238 struct execution_control_state ecss;
2239 struct execution_control_state *ecs = &ecss;
2240
2241 memset (ecs, 0, sizeof (*ecs));
2242
2243 old_chain = make_cleanup_restore_current_thread ();
2244
2245 switch_to_thread (info->ptid);
2246
2247 /* Go through handle_inferior_event/normal_stop, so we always
2248 have consistent output as if the stop event had been
2249 reported. */
2250 ecs->ptid = info->ptid;
2251 ecs->event_thread = find_thread_ptid (info->ptid);
2252 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2253 ecs->ws.value.sig = TARGET_SIGNAL_0;
2254
2255 handle_inferior_event (ecs);
2256
2257 if (!ecs->wait_some_more)
2258 {
2259 struct thread_info *tp;
2260
2261 normal_stop ();
2262
2263 /* Finish off the continuations. The continations
2264 themselves are responsible for realising the thread
2265 didn't finish what it was supposed to do. */
2266 tp = inferior_thread ();
2267 do_all_intermediate_continuations_thread (tp);
2268 do_all_continuations_thread (tp);
2269 }
2270
2271 do_cleanups (old_chain);
2272 }
2273
2274 return 0;
2275 }
2276
2277 /* This function is attached as a "thread_stop_requested" observer.
2278 Cleanup local state that assumed the PTID was to be resumed, and
2279 report the stop to the frontend. */
2280
2281 static void
2282 infrun_thread_stop_requested (ptid_t ptid)
2283 {
2284 struct displaced_step_inferior_state *displaced;
2285
2286 /* PTID was requested to stop. Remove it from the displaced
2287 stepping queue, so we don't try to resume it automatically. */
2288
2289 for (displaced = displaced_step_inferior_states;
2290 displaced;
2291 displaced = displaced->next)
2292 {
2293 struct displaced_step_request *it, **prev_next_p;
2294
2295 it = displaced->step_request_queue;
2296 prev_next_p = &displaced->step_request_queue;
2297 while (it)
2298 {
2299 if (ptid_match (it->ptid, ptid))
2300 {
2301 *prev_next_p = it->next;
2302 it->next = NULL;
2303 xfree (it);
2304 }
2305 else
2306 {
2307 prev_next_p = &it->next;
2308 }
2309
2310 it = *prev_next_p;
2311 }
2312 }
2313
2314 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2315 }
2316
2317 static void
2318 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2319 {
2320 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2321 nullify_last_target_wait_ptid ();
2322 }
2323
2324 /* Callback for iterate_over_threads. */
2325
2326 static int
2327 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2328 {
2329 if (is_exited (info->ptid))
2330 return 0;
2331
2332 delete_step_resume_breakpoint (info);
2333 return 0;
2334 }
2335
2336 /* In all-stop, delete the step resume breakpoint of any thread that
2337 had one. In non-stop, delete the step resume breakpoint of the
2338 thread that just stopped. */
2339
2340 static void
2341 delete_step_thread_step_resume_breakpoint (void)
2342 {
2343 if (!target_has_execution
2344 || ptid_equal (inferior_ptid, null_ptid))
2345 /* If the inferior has exited, we have already deleted the step
2346 resume breakpoints out of GDB's lists. */
2347 return;
2348
2349 if (non_stop)
2350 {
2351 /* If in non-stop mode, only delete the step-resume or
2352 longjmp-resume breakpoint of the thread that just stopped
2353 stepping. */
2354 struct thread_info *tp = inferior_thread ();
2355
2356 delete_step_resume_breakpoint (tp);
2357 }
2358 else
2359 /* In all-stop mode, delete all step-resume and longjmp-resume
2360 breakpoints of any thread that had them. */
2361 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2362 }
2363
2364 /* A cleanup wrapper. */
2365
2366 static void
2367 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2368 {
2369 delete_step_thread_step_resume_breakpoint ();
2370 }
2371
2372 /* Pretty print the results of target_wait, for debugging purposes. */
2373
2374 static void
2375 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2376 const struct target_waitstatus *ws)
2377 {
2378 char *status_string = target_waitstatus_to_string (ws);
2379 struct ui_file *tmp_stream = mem_fileopen ();
2380 char *text;
2381
2382 /* The text is split over several lines because it was getting too long.
2383 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2384 output as a unit; we want only one timestamp printed if debug_timestamp
2385 is set. */
2386
2387 fprintf_unfiltered (tmp_stream,
2388 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2389 if (PIDGET (waiton_ptid) != -1)
2390 fprintf_unfiltered (tmp_stream,
2391 " [%s]", target_pid_to_str (waiton_ptid));
2392 fprintf_unfiltered (tmp_stream, ", status) =\n");
2393 fprintf_unfiltered (tmp_stream,
2394 "infrun: %d [%s],\n",
2395 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2396 fprintf_unfiltered (tmp_stream,
2397 "infrun: %s\n",
2398 status_string);
2399
2400 text = ui_file_xstrdup (tmp_stream, NULL);
2401
2402 /* This uses %s in part to handle %'s in the text, but also to avoid
2403 a gcc error: the format attribute requires a string literal. */
2404 fprintf_unfiltered (gdb_stdlog, "%s", text);
2405
2406 xfree (status_string);
2407 xfree (text);
2408 ui_file_delete (tmp_stream);
2409 }
2410
2411 /* Prepare and stabilize the inferior for detaching it. E.g.,
2412 detaching while a thread is displaced stepping is a recipe for
2413 crashing it, as nothing would readjust the PC out of the scratch
2414 pad. */
2415
2416 void
2417 prepare_for_detach (void)
2418 {
2419 struct inferior *inf = current_inferior ();
2420 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2421 struct cleanup *old_chain_1;
2422 struct displaced_step_inferior_state *displaced;
2423
2424 displaced = get_displaced_stepping_state (inf->pid);
2425
2426 /* Is any thread of this process displaced stepping? If not,
2427 there's nothing else to do. */
2428 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2429 return;
2430
2431 if (debug_infrun)
2432 fprintf_unfiltered (gdb_stdlog,
2433 "displaced-stepping in-process while detaching");
2434
2435 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2436 inf->detaching = 1;
2437
2438 while (!ptid_equal (displaced->step_ptid, null_ptid))
2439 {
2440 struct cleanup *old_chain_2;
2441 struct execution_control_state ecss;
2442 struct execution_control_state *ecs;
2443
2444 ecs = &ecss;
2445 memset (ecs, 0, sizeof (*ecs));
2446
2447 overlay_cache_invalid = 1;
2448
2449 /* We have to invalidate the registers BEFORE calling
2450 target_wait because they can be loaded from the target while
2451 in target_wait. This makes remote debugging a bit more
2452 efficient for those targets that provide critical registers
2453 as part of their normal status mechanism. */
2454
2455 registers_changed ();
2456
2457 if (deprecated_target_wait_hook)
2458 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2459 else
2460 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2461
2462 if (debug_infrun)
2463 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2464
2465 /* If an error happens while handling the event, propagate GDB's
2466 knowledge of the executing state to the frontend/user running
2467 state. */
2468 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2469
2470 /* In non-stop mode, each thread is handled individually.
2471 Switch early, so the global state is set correctly for this
2472 thread. */
2473 if (non_stop
2474 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2475 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2476 context_switch (ecs->ptid);
2477
2478 /* Now figure out what to do with the result of the result. */
2479 handle_inferior_event (ecs);
2480
2481 /* No error, don't finish the state yet. */
2482 discard_cleanups (old_chain_2);
2483
2484 /* Breakpoints and watchpoints are not installed on the target
2485 at this point, and signals are passed directly to the
2486 inferior, so this must mean the process is gone. */
2487 if (!ecs->wait_some_more)
2488 {
2489 discard_cleanups (old_chain_1);
2490 error (_("Program exited while detaching"));
2491 }
2492 }
2493
2494 discard_cleanups (old_chain_1);
2495 }
2496
2497 /* Wait for control to return from inferior to debugger.
2498
2499 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2500 as if they were SIGTRAP signals. This can be useful during
2501 the startup sequence on some targets such as HP/UX, where
2502 we receive an EXEC event instead of the expected SIGTRAP.
2503
2504 If inferior gets a signal, we may decide to start it up again
2505 instead of returning. That is why there is a loop in this function.
2506 When this function actually returns it means the inferior
2507 should be left stopped and GDB should read more commands. */
2508
2509 void
2510 wait_for_inferior (int treat_exec_as_sigtrap)
2511 {
2512 struct cleanup *old_cleanups;
2513 struct execution_control_state ecss;
2514 struct execution_control_state *ecs;
2515
2516 if (debug_infrun)
2517 fprintf_unfiltered
2518 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2519 treat_exec_as_sigtrap);
2520
2521 old_cleanups =
2522 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2523
2524 ecs = &ecss;
2525 memset (ecs, 0, sizeof (*ecs));
2526
2527 /* We'll update this if & when we switch to a new thread. */
2528 previous_inferior_ptid = inferior_ptid;
2529
2530 while (1)
2531 {
2532 struct cleanup *old_chain;
2533
2534 /* We have to invalidate the registers BEFORE calling target_wait
2535 because they can be loaded from the target while in target_wait.
2536 This makes remote debugging a bit more efficient for those
2537 targets that provide critical registers as part of their normal
2538 status mechanism. */
2539
2540 overlay_cache_invalid = 1;
2541 registers_changed ();
2542
2543 if (deprecated_target_wait_hook)
2544 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2545 else
2546 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2547
2548 if (debug_infrun)
2549 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2550
2551 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2552 {
2553 xfree (ecs->ws.value.execd_pathname);
2554 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2555 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2556 }
2557
2558 /* If an error happens while handling the event, propagate GDB's
2559 knowledge of the executing state to the frontend/user running
2560 state. */
2561 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2562
2563 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2564 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2565 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2566
2567 /* Now figure out what to do with the result of the result. */
2568 handle_inferior_event (ecs);
2569
2570 /* No error, don't finish the state yet. */
2571 discard_cleanups (old_chain);
2572
2573 if (!ecs->wait_some_more)
2574 break;
2575 }
2576
2577 do_cleanups (old_cleanups);
2578 }
2579
2580 /* Asynchronous version of wait_for_inferior. It is called by the
2581 event loop whenever a change of state is detected on the file
2582 descriptor corresponding to the target. It can be called more than
2583 once to complete a single execution command. In such cases we need
2584 to keep the state in a global variable ECSS. If it is the last time
2585 that this function is called for a single execution command, then
2586 report to the user that the inferior has stopped, and do the
2587 necessary cleanups. */
2588
2589 void
2590 fetch_inferior_event (void *client_data)
2591 {
2592 struct execution_control_state ecss;
2593 struct execution_control_state *ecs = &ecss;
2594 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2595 struct cleanup *ts_old_chain;
2596 int was_sync = sync_execution;
2597
2598 memset (ecs, 0, sizeof (*ecs));
2599
2600 /* We'll update this if & when we switch to a new thread. */
2601 previous_inferior_ptid = inferior_ptid;
2602
2603 if (non_stop)
2604 /* In non-stop mode, the user/frontend should not notice a thread
2605 switch due to internal events. Make sure we reverse to the
2606 user selected thread and frame after handling the event and
2607 running any breakpoint commands. */
2608 make_cleanup_restore_current_thread ();
2609
2610 /* We have to invalidate the registers BEFORE calling target_wait
2611 because they can be loaded from the target while in target_wait.
2612 This makes remote debugging a bit more efficient for those
2613 targets that provide critical registers as part of their normal
2614 status mechanism. */
2615
2616 overlay_cache_invalid = 1;
2617 registers_changed ();
2618
2619 if (deprecated_target_wait_hook)
2620 ecs->ptid =
2621 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2622 else
2623 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2624
2625 if (debug_infrun)
2626 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2627
2628 if (non_stop
2629 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2630 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2631 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2632 /* In non-stop mode, each thread is handled individually. Switch
2633 early, so the global state is set correctly for this
2634 thread. */
2635 context_switch (ecs->ptid);
2636
2637 /* If an error happens while handling the event, propagate GDB's
2638 knowledge of the executing state to the frontend/user running
2639 state. */
2640 if (!non_stop)
2641 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2642 else
2643 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2644
2645 /* Now figure out what to do with the result of the result. */
2646 handle_inferior_event (ecs);
2647
2648 if (!ecs->wait_some_more)
2649 {
2650 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2651
2652 delete_step_thread_step_resume_breakpoint ();
2653
2654 /* We may not find an inferior if this was a process exit. */
2655 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2656 normal_stop ();
2657
2658 if (target_has_execution
2659 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2660 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2661 && ecs->event_thread->step_multi
2662 && ecs->event_thread->stop_step)
2663 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2664 else
2665 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2666 }
2667
2668 /* No error, don't finish the thread states yet. */
2669 discard_cleanups (ts_old_chain);
2670
2671 /* Revert thread and frame. */
2672 do_cleanups (old_chain);
2673
2674 /* If the inferior was in sync execution mode, and now isn't,
2675 restore the prompt. */
2676 if (was_sync && !sync_execution)
2677 display_gdb_prompt (0);
2678 }
2679
2680 /* Record the frame and location we're currently stepping through. */
2681 void
2682 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2683 {
2684 struct thread_info *tp = inferior_thread ();
2685
2686 tp->step_frame_id = get_frame_id (frame);
2687 tp->step_stack_frame_id = get_stack_frame_id (frame);
2688
2689 tp->current_symtab = sal.symtab;
2690 tp->current_line = sal.line;
2691 }
2692
2693 /* Clear context switchable stepping state. */
2694
2695 void
2696 init_thread_stepping_state (struct thread_info *tss)
2697 {
2698 tss->stepping_over_breakpoint = 0;
2699 tss->step_after_step_resume_breakpoint = 0;
2700 tss->stepping_through_solib_after_catch = 0;
2701 tss->stepping_through_solib_catchpoints = NULL;
2702 }
2703
2704 /* Return the cached copy of the last pid/waitstatus returned by
2705 target_wait()/deprecated_target_wait_hook(). The data is actually
2706 cached by handle_inferior_event(), which gets called immediately
2707 after target_wait()/deprecated_target_wait_hook(). */
2708
2709 void
2710 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2711 {
2712 *ptidp = target_last_wait_ptid;
2713 *status = target_last_waitstatus;
2714 }
2715
2716 void
2717 nullify_last_target_wait_ptid (void)
2718 {
2719 target_last_wait_ptid = minus_one_ptid;
2720 }
2721
2722 /* Switch thread contexts. */
2723
2724 static void
2725 context_switch (ptid_t ptid)
2726 {
2727 if (debug_infrun)
2728 {
2729 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2730 target_pid_to_str (inferior_ptid));
2731 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2732 target_pid_to_str (ptid));
2733 }
2734
2735 switch_to_thread (ptid);
2736 }
2737
2738 static void
2739 adjust_pc_after_break (struct execution_control_state *ecs)
2740 {
2741 struct regcache *regcache;
2742 struct gdbarch *gdbarch;
2743 struct address_space *aspace;
2744 CORE_ADDR breakpoint_pc;
2745
2746 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2747 we aren't, just return.
2748
2749 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2750 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2751 implemented by software breakpoints should be handled through the normal
2752 breakpoint layer.
2753
2754 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2755 different signals (SIGILL or SIGEMT for instance), but it is less
2756 clear where the PC is pointing afterwards. It may not match
2757 gdbarch_decr_pc_after_break. I don't know any specific target that
2758 generates these signals at breakpoints (the code has been in GDB since at
2759 least 1992) so I can not guess how to handle them here.
2760
2761 In earlier versions of GDB, a target with
2762 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2763 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2764 target with both of these set in GDB history, and it seems unlikely to be
2765 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2766
2767 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2768 return;
2769
2770 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2771 return;
2772
2773 /* In reverse execution, when a breakpoint is hit, the instruction
2774 under it has already been de-executed. The reported PC always
2775 points at the breakpoint address, so adjusting it further would
2776 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2777 architecture:
2778
2779 B1 0x08000000 : INSN1
2780 B2 0x08000001 : INSN2
2781 0x08000002 : INSN3
2782 PC -> 0x08000003 : INSN4
2783
2784 Say you're stopped at 0x08000003 as above. Reverse continuing
2785 from that point should hit B2 as below. Reading the PC when the
2786 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2787 been de-executed already.
2788
2789 B1 0x08000000 : INSN1
2790 B2 PC -> 0x08000001 : INSN2
2791 0x08000002 : INSN3
2792 0x08000003 : INSN4
2793
2794 We can't apply the same logic as for forward execution, because
2795 we would wrongly adjust the PC to 0x08000000, since there's a
2796 breakpoint at PC - 1. We'd then report a hit on B1, although
2797 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2798 behaviour. */
2799 if (execution_direction == EXEC_REVERSE)
2800 return;
2801
2802 /* If this target does not decrement the PC after breakpoints, then
2803 we have nothing to do. */
2804 regcache = get_thread_regcache (ecs->ptid);
2805 gdbarch = get_regcache_arch (regcache);
2806 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2807 return;
2808
2809 aspace = get_regcache_aspace (regcache);
2810
2811 /* Find the location where (if we've hit a breakpoint) the
2812 breakpoint would be. */
2813 breakpoint_pc = regcache_read_pc (regcache)
2814 - gdbarch_decr_pc_after_break (gdbarch);
2815
2816 /* Check whether there actually is a software breakpoint inserted at
2817 that location.
2818
2819 If in non-stop mode, a race condition is possible where we've
2820 removed a breakpoint, but stop events for that breakpoint were
2821 already queued and arrive later. To suppress those spurious
2822 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2823 and retire them after a number of stop events are reported. */
2824 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2825 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2826 {
2827 struct cleanup *old_cleanups = NULL;
2828
2829 if (RECORD_IS_USED)
2830 old_cleanups = record_gdb_operation_disable_set ();
2831
2832 /* When using hardware single-step, a SIGTRAP is reported for both
2833 a completed single-step and a software breakpoint. Need to
2834 differentiate between the two, as the latter needs adjusting
2835 but the former does not.
2836
2837 The SIGTRAP can be due to a completed hardware single-step only if
2838 - we didn't insert software single-step breakpoints
2839 - the thread to be examined is still the current thread
2840 - this thread is currently being stepped
2841
2842 If any of these events did not occur, we must have stopped due
2843 to hitting a software breakpoint, and have to back up to the
2844 breakpoint address.
2845
2846 As a special case, we could have hardware single-stepped a
2847 software breakpoint. In this case (prev_pc == breakpoint_pc),
2848 we also need to back up to the breakpoint address. */
2849
2850 if (singlestep_breakpoints_inserted_p
2851 || !ptid_equal (ecs->ptid, inferior_ptid)
2852 || !currently_stepping (ecs->event_thread)
2853 || ecs->event_thread->prev_pc == breakpoint_pc)
2854 regcache_write_pc (regcache, breakpoint_pc);
2855
2856 if (RECORD_IS_USED)
2857 do_cleanups (old_cleanups);
2858 }
2859 }
2860
2861 void
2862 init_infwait_state (void)
2863 {
2864 waiton_ptid = pid_to_ptid (-1);
2865 infwait_state = infwait_normal_state;
2866 }
2867
2868 void
2869 error_is_running (void)
2870 {
2871 error (_("\
2872 Cannot execute this command while the selected thread is running."));
2873 }
2874
2875 void
2876 ensure_not_running (void)
2877 {
2878 if (is_running (inferior_ptid))
2879 error_is_running ();
2880 }
2881
2882 static int
2883 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2884 {
2885 for (frame = get_prev_frame (frame);
2886 frame != NULL;
2887 frame = get_prev_frame (frame))
2888 {
2889 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2890 return 1;
2891 if (get_frame_type (frame) != INLINE_FRAME)
2892 break;
2893 }
2894
2895 return 0;
2896 }
2897
2898 /* Auxiliary function that handles syscall entry/return events.
2899 It returns 1 if the inferior should keep going (and GDB
2900 should ignore the event), or 0 if the event deserves to be
2901 processed. */
2902
2903 static int
2904 handle_syscall_event (struct execution_control_state *ecs)
2905 {
2906 struct regcache *regcache;
2907 struct gdbarch *gdbarch;
2908 int syscall_number;
2909
2910 if (!ptid_equal (ecs->ptid, inferior_ptid))
2911 context_switch (ecs->ptid);
2912
2913 regcache = get_thread_regcache (ecs->ptid);
2914 gdbarch = get_regcache_arch (regcache);
2915 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2916 stop_pc = regcache_read_pc (regcache);
2917
2918 target_last_waitstatus.value.syscall_number = syscall_number;
2919
2920 if (catch_syscall_enabled () > 0
2921 && catching_syscall_number (syscall_number) > 0)
2922 {
2923 if (debug_infrun)
2924 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2925 syscall_number);
2926
2927 ecs->event_thread->stop_bpstat
2928 = bpstat_stop_status (get_regcache_aspace (regcache),
2929 stop_pc, ecs->ptid);
2930 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2931
2932 if (!ecs->random_signal)
2933 {
2934 /* Catchpoint hit. */
2935 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2936 return 0;
2937 }
2938 }
2939
2940 /* If no catchpoint triggered for this, then keep going. */
2941 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2942 keep_going (ecs);
2943 return 1;
2944 }
2945
2946 /* Given an execution control state that has been freshly filled in
2947 by an event from the inferior, figure out what it means and take
2948 appropriate action. */
2949
2950 static void
2951 handle_inferior_event (struct execution_control_state *ecs)
2952 {
2953 struct frame_info *frame;
2954 struct gdbarch *gdbarch;
2955 int sw_single_step_trap_p = 0;
2956 int stopped_by_watchpoint;
2957 int stepped_after_stopped_by_watchpoint = 0;
2958 struct symtab_and_line stop_pc_sal;
2959 enum stop_kind stop_soon;
2960
2961 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2962 {
2963 /* We had an event in the inferior, but we are not interested in
2964 handling it at this level. The lower layers have already
2965 done what needs to be done, if anything.
2966
2967 One of the possible circumstances for this is when the
2968 inferior produces output for the console. The inferior has
2969 not stopped, and we are ignoring the event. Another possible
2970 circumstance is any event which the lower level knows will be
2971 reported multiple times without an intervening resume. */
2972 if (debug_infrun)
2973 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2974 prepare_to_wait (ecs);
2975 return;
2976 }
2977
2978 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2979 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2980 {
2981 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2982
2983 gdb_assert (inf);
2984 stop_soon = inf->stop_soon;
2985 }
2986 else
2987 stop_soon = NO_STOP_QUIETLY;
2988
2989 /* Cache the last pid/waitstatus. */
2990 target_last_wait_ptid = ecs->ptid;
2991 target_last_waitstatus = ecs->ws;
2992
2993 /* Always clear state belonging to the previous time we stopped. */
2994 stop_stack_dummy = STOP_NONE;
2995
2996 /* If it's a new process, add it to the thread database */
2997
2998 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2999 && !ptid_equal (ecs->ptid, minus_one_ptid)
3000 && !in_thread_list (ecs->ptid));
3001
3002 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3003 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3004 add_thread (ecs->ptid);
3005
3006 ecs->event_thread = find_thread_ptid (ecs->ptid);
3007
3008 /* Dependent on valid ECS->EVENT_THREAD. */
3009 adjust_pc_after_break (ecs);
3010
3011 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3012 reinit_frame_cache ();
3013
3014 breakpoint_retire_moribund ();
3015
3016 /* First, distinguish signals caused by the debugger from signals
3017 that have to do with the program's own actions. Note that
3018 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3019 on the operating system version. Here we detect when a SIGILL or
3020 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3021 something similar for SIGSEGV, since a SIGSEGV will be generated
3022 when we're trying to execute a breakpoint instruction on a
3023 non-executable stack. This happens for call dummy breakpoints
3024 for architectures like SPARC that place call dummies on the
3025 stack. */
3026 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3027 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3028 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3029 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3030 {
3031 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3032
3033 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3034 regcache_read_pc (regcache)))
3035 {
3036 if (debug_infrun)
3037 fprintf_unfiltered (gdb_stdlog,
3038 "infrun: Treating signal as SIGTRAP\n");
3039 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3040 }
3041 }
3042
3043 /* Mark the non-executing threads accordingly. In all-stop, all
3044 threads of all processes are stopped when we get any event
3045 reported. In non-stop mode, only the event thread stops. If
3046 we're handling a process exit in non-stop mode, there's nothing
3047 to do, as threads of the dead process are gone, and threads of
3048 any other process were left running. */
3049 if (!non_stop)
3050 set_executing (minus_one_ptid, 0);
3051 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3052 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3053 set_executing (inferior_ptid, 0);
3054
3055 switch (infwait_state)
3056 {
3057 case infwait_thread_hop_state:
3058 if (debug_infrun)
3059 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3060 break;
3061
3062 case infwait_normal_state:
3063 if (debug_infrun)
3064 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3065 break;
3066
3067 case infwait_step_watch_state:
3068 if (debug_infrun)
3069 fprintf_unfiltered (gdb_stdlog,
3070 "infrun: infwait_step_watch_state\n");
3071
3072 stepped_after_stopped_by_watchpoint = 1;
3073 break;
3074
3075 case infwait_nonstep_watch_state:
3076 if (debug_infrun)
3077 fprintf_unfiltered (gdb_stdlog,
3078 "infrun: infwait_nonstep_watch_state\n");
3079 insert_breakpoints ();
3080
3081 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3082 handle things like signals arriving and other things happening
3083 in combination correctly? */
3084 stepped_after_stopped_by_watchpoint = 1;
3085 break;
3086
3087 default:
3088 internal_error (__FILE__, __LINE__, _("bad switch"));
3089 }
3090
3091 infwait_state = infwait_normal_state;
3092 waiton_ptid = pid_to_ptid (-1);
3093
3094 switch (ecs->ws.kind)
3095 {
3096 case TARGET_WAITKIND_LOADED:
3097 if (debug_infrun)
3098 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3099 /* Ignore gracefully during startup of the inferior, as it might
3100 be the shell which has just loaded some objects, otherwise
3101 add the symbols for the newly loaded objects. Also ignore at
3102 the beginning of an attach or remote session; we will query
3103 the full list of libraries once the connection is
3104 established. */
3105 if (stop_soon == NO_STOP_QUIETLY)
3106 {
3107 /* Check for any newly added shared libraries if we're
3108 supposed to be adding them automatically. Switch
3109 terminal for any messages produced by
3110 breakpoint_re_set. */
3111 target_terminal_ours_for_output ();
3112 /* NOTE: cagney/2003-11-25: Make certain that the target
3113 stack's section table is kept up-to-date. Architectures,
3114 (e.g., PPC64), use the section table to perform
3115 operations such as address => section name and hence
3116 require the table to contain all sections (including
3117 those found in shared libraries). */
3118 #ifdef SOLIB_ADD
3119 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3120 #else
3121 solib_add (NULL, 0, &current_target, auto_solib_add);
3122 #endif
3123 target_terminal_inferior ();
3124
3125 /* If requested, stop when the dynamic linker notifies
3126 gdb of events. This allows the user to get control
3127 and place breakpoints in initializer routines for
3128 dynamically loaded objects (among other things). */
3129 if (stop_on_solib_events)
3130 {
3131 /* Make sure we print "Stopped due to solib-event" in
3132 normal_stop. */
3133 stop_print_frame = 1;
3134
3135 stop_stepping (ecs);
3136 return;
3137 }
3138
3139 /* NOTE drow/2007-05-11: This might be a good place to check
3140 for "catch load". */
3141 }
3142
3143 /* If we are skipping through a shell, or through shared library
3144 loading that we aren't interested in, resume the program. If
3145 we're running the program normally, also resume. But stop if
3146 we're attaching or setting up a remote connection. */
3147 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3148 {
3149 /* Loading of shared libraries might have changed breakpoint
3150 addresses. Make sure new breakpoints are inserted. */
3151 if (stop_soon == NO_STOP_QUIETLY
3152 && !breakpoints_always_inserted_mode ())
3153 insert_breakpoints ();
3154 resume (0, TARGET_SIGNAL_0);
3155 prepare_to_wait (ecs);
3156 return;
3157 }
3158
3159 break;
3160
3161 case TARGET_WAITKIND_SPURIOUS:
3162 if (debug_infrun)
3163 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3164 resume (0, TARGET_SIGNAL_0);
3165 prepare_to_wait (ecs);
3166 return;
3167
3168 case TARGET_WAITKIND_EXITED:
3169 if (debug_infrun)
3170 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3171 inferior_ptid = ecs->ptid;
3172 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3173 set_current_program_space (current_inferior ()->pspace);
3174 handle_vfork_child_exec_or_exit (0);
3175 target_terminal_ours (); /* Must do this before mourn anyway */
3176 print_exited_reason (ecs->ws.value.integer);
3177
3178 /* Record the exit code in the convenience variable $_exitcode, so
3179 that the user can inspect this again later. */
3180 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3181 (LONGEST) ecs->ws.value.integer);
3182 gdb_flush (gdb_stdout);
3183 target_mourn_inferior ();
3184 singlestep_breakpoints_inserted_p = 0;
3185 cancel_single_step_breakpoints ();
3186 stop_print_frame = 0;
3187 stop_stepping (ecs);
3188 return;
3189
3190 case TARGET_WAITKIND_SIGNALLED:
3191 if (debug_infrun)
3192 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3193 inferior_ptid = ecs->ptid;
3194 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3195 set_current_program_space (current_inferior ()->pspace);
3196 handle_vfork_child_exec_or_exit (0);
3197 stop_print_frame = 0;
3198 target_terminal_ours (); /* Must do this before mourn anyway */
3199
3200 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3201 reach here unless the inferior is dead. However, for years
3202 target_kill() was called here, which hints that fatal signals aren't
3203 really fatal on some systems. If that's true, then some changes
3204 may be needed. */
3205 target_mourn_inferior ();
3206
3207 print_signal_exited_reason (ecs->ws.value.sig);
3208 singlestep_breakpoints_inserted_p = 0;
3209 cancel_single_step_breakpoints ();
3210 stop_stepping (ecs);
3211 return;
3212
3213 /* The following are the only cases in which we keep going;
3214 the above cases end in a continue or goto. */
3215 case TARGET_WAITKIND_FORKED:
3216 case TARGET_WAITKIND_VFORKED:
3217 if (debug_infrun)
3218 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3219
3220 if (!ptid_equal (ecs->ptid, inferior_ptid))
3221 {
3222 context_switch (ecs->ptid);
3223 reinit_frame_cache ();
3224 }
3225
3226 /* Immediately detach breakpoints from the child before there's
3227 any chance of letting the user delete breakpoints from the
3228 breakpoint lists. If we don't do this early, it's easy to
3229 leave left over traps in the child, vis: "break foo; catch
3230 fork; c; <fork>; del; c; <child calls foo>". We only follow
3231 the fork on the last `continue', and by that time the
3232 breakpoint at "foo" is long gone from the breakpoint table.
3233 If we vforked, then we don't need to unpatch here, since both
3234 parent and child are sharing the same memory pages; we'll
3235 need to unpatch at follow/detach time instead to be certain
3236 that new breakpoints added between catchpoint hit time and
3237 vfork follow are detached. */
3238 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3239 {
3240 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3241
3242 /* This won't actually modify the breakpoint list, but will
3243 physically remove the breakpoints from the child. */
3244 detach_breakpoints (child_pid);
3245 }
3246
3247 if (singlestep_breakpoints_inserted_p)
3248 {
3249 /* Pull the single step breakpoints out of the target. */
3250 remove_single_step_breakpoints ();
3251 singlestep_breakpoints_inserted_p = 0;
3252 }
3253
3254 /* In case the event is caught by a catchpoint, remember that
3255 the event is to be followed at the next resume of the thread,
3256 and not immediately. */
3257 ecs->event_thread->pending_follow = ecs->ws;
3258
3259 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3260
3261 ecs->event_thread->stop_bpstat
3262 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3263 stop_pc, ecs->ptid);
3264
3265 /* Note that we're interested in knowing the bpstat actually
3266 causes a stop, not just if it may explain the signal.
3267 Software watchpoints, for example, always appear in the
3268 bpstat. */
3269 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3270
3271 /* If no catchpoint triggered for this, then keep going. */
3272 if (ecs->random_signal)
3273 {
3274 ptid_t parent;
3275 ptid_t child;
3276 int should_resume;
3277 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3278
3279 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3280
3281 should_resume = follow_fork ();
3282
3283 parent = ecs->ptid;
3284 child = ecs->ws.value.related_pid;
3285
3286 /* In non-stop mode, also resume the other branch. */
3287 if (non_stop && !detach_fork)
3288 {
3289 if (follow_child)
3290 switch_to_thread (parent);
3291 else
3292 switch_to_thread (child);
3293
3294 ecs->event_thread = inferior_thread ();
3295 ecs->ptid = inferior_ptid;
3296 keep_going (ecs);
3297 }
3298
3299 if (follow_child)
3300 switch_to_thread (child);
3301 else
3302 switch_to_thread (parent);
3303
3304 ecs->event_thread = inferior_thread ();
3305 ecs->ptid = inferior_ptid;
3306
3307 if (should_resume)
3308 keep_going (ecs);
3309 else
3310 stop_stepping (ecs);
3311 return;
3312 }
3313 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3314 goto process_event_stop_test;
3315
3316 case TARGET_WAITKIND_VFORK_DONE:
3317 /* Done with the shared memory region. Re-insert breakpoints in
3318 the parent, and keep going. */
3319
3320 if (debug_infrun)
3321 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3322
3323 if (!ptid_equal (ecs->ptid, inferior_ptid))
3324 context_switch (ecs->ptid);
3325
3326 current_inferior ()->waiting_for_vfork_done = 0;
3327 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3328 /* This also takes care of reinserting breakpoints in the
3329 previously locked inferior. */
3330 keep_going (ecs);
3331 return;
3332
3333 case TARGET_WAITKIND_EXECD:
3334 if (debug_infrun)
3335 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3336
3337 if (!ptid_equal (ecs->ptid, inferior_ptid))
3338 {
3339 context_switch (ecs->ptid);
3340 reinit_frame_cache ();
3341 }
3342
3343 singlestep_breakpoints_inserted_p = 0;
3344 cancel_single_step_breakpoints ();
3345
3346 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3347
3348 /* Do whatever is necessary to the parent branch of the vfork. */
3349 handle_vfork_child_exec_or_exit (1);
3350
3351 /* This causes the eventpoints and symbol table to be reset.
3352 Must do this now, before trying to determine whether to
3353 stop. */
3354 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3355
3356 ecs->event_thread->stop_bpstat
3357 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3358 stop_pc, ecs->ptid);
3359 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3360
3361 /* Note that this may be referenced from inside
3362 bpstat_stop_status above, through inferior_has_execd. */
3363 xfree (ecs->ws.value.execd_pathname);
3364 ecs->ws.value.execd_pathname = NULL;
3365
3366 /* If no catchpoint triggered for this, then keep going. */
3367 if (ecs->random_signal)
3368 {
3369 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3370 keep_going (ecs);
3371 return;
3372 }
3373 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3374 goto process_event_stop_test;
3375
3376 /* Be careful not to try to gather much state about a thread
3377 that's in a syscall. It's frequently a losing proposition. */
3378 case TARGET_WAITKIND_SYSCALL_ENTRY:
3379 if (debug_infrun)
3380 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3381 /* Getting the current syscall number */
3382 if (handle_syscall_event (ecs) != 0)
3383 return;
3384 goto process_event_stop_test;
3385
3386 /* Before examining the threads further, step this thread to
3387 get it entirely out of the syscall. (We get notice of the
3388 event when the thread is just on the verge of exiting a
3389 syscall. Stepping one instruction seems to get it back
3390 into user code.) */
3391 case TARGET_WAITKIND_SYSCALL_RETURN:
3392 if (debug_infrun)
3393 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3394 if (handle_syscall_event (ecs) != 0)
3395 return;
3396 goto process_event_stop_test;
3397
3398 case TARGET_WAITKIND_STOPPED:
3399 if (debug_infrun)
3400 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3401 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3402 break;
3403
3404 case TARGET_WAITKIND_NO_HISTORY:
3405 /* Reverse execution: target ran out of history info. */
3406 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3407 print_no_history_reason ();
3408 stop_stepping (ecs);
3409 return;
3410 }
3411
3412 if (ecs->new_thread_event)
3413 {
3414 if (non_stop)
3415 /* Non-stop assumes that the target handles adding new threads
3416 to the thread list. */
3417 internal_error (__FILE__, __LINE__, "\
3418 targets should add new threads to the thread list themselves in non-stop mode.");
3419
3420 /* We may want to consider not doing a resume here in order to
3421 give the user a chance to play with the new thread. It might
3422 be good to make that a user-settable option. */
3423
3424 /* At this point, all threads are stopped (happens automatically
3425 in either the OS or the native code). Therefore we need to
3426 continue all threads in order to make progress. */
3427
3428 if (!ptid_equal (ecs->ptid, inferior_ptid))
3429 context_switch (ecs->ptid);
3430 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3431 prepare_to_wait (ecs);
3432 return;
3433 }
3434
3435 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3436 {
3437 /* Do we need to clean up the state of a thread that has
3438 completed a displaced single-step? (Doing so usually affects
3439 the PC, so do it here, before we set stop_pc.) */
3440 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3441
3442 /* If we either finished a single-step or hit a breakpoint, but
3443 the user wanted this thread to be stopped, pretend we got a
3444 SIG0 (generic unsignaled stop). */
3445
3446 if (ecs->event_thread->stop_requested
3447 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3448 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3449 }
3450
3451 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3452
3453 if (debug_infrun)
3454 {
3455 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3456 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3457 struct cleanup *old_chain = save_inferior_ptid ();
3458
3459 inferior_ptid = ecs->ptid;
3460
3461 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3462 paddress (gdbarch, stop_pc));
3463 if (target_stopped_by_watchpoint ())
3464 {
3465 CORE_ADDR addr;
3466
3467 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3468
3469 if (target_stopped_data_address (&current_target, &addr))
3470 fprintf_unfiltered (gdb_stdlog,
3471 "infrun: stopped data address = %s\n",
3472 paddress (gdbarch, addr));
3473 else
3474 fprintf_unfiltered (gdb_stdlog,
3475 "infrun: (no data address available)\n");
3476 }
3477
3478 do_cleanups (old_chain);
3479 }
3480
3481 if (stepping_past_singlestep_breakpoint)
3482 {
3483 gdb_assert (singlestep_breakpoints_inserted_p);
3484 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3485 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3486
3487 stepping_past_singlestep_breakpoint = 0;
3488
3489 /* We've either finished single-stepping past the single-step
3490 breakpoint, or stopped for some other reason. It would be nice if
3491 we could tell, but we can't reliably. */
3492 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3493 {
3494 if (debug_infrun)
3495 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3496 /* Pull the single step breakpoints out of the target. */
3497 remove_single_step_breakpoints ();
3498 singlestep_breakpoints_inserted_p = 0;
3499
3500 ecs->random_signal = 0;
3501 ecs->event_thread->trap_expected = 0;
3502
3503 context_switch (saved_singlestep_ptid);
3504 if (deprecated_context_hook)
3505 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3506
3507 resume (1, TARGET_SIGNAL_0);
3508 prepare_to_wait (ecs);
3509 return;
3510 }
3511 }
3512
3513 if (!ptid_equal (deferred_step_ptid, null_ptid))
3514 {
3515 /* In non-stop mode, there's never a deferred_step_ptid set. */
3516 gdb_assert (!non_stop);
3517
3518 /* If we stopped for some other reason than single-stepping, ignore
3519 the fact that we were supposed to switch back. */
3520 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3521 {
3522 if (debug_infrun)
3523 fprintf_unfiltered (gdb_stdlog,
3524 "infrun: handling deferred step\n");
3525
3526 /* Pull the single step breakpoints out of the target. */
3527 if (singlestep_breakpoints_inserted_p)
3528 {
3529 remove_single_step_breakpoints ();
3530 singlestep_breakpoints_inserted_p = 0;
3531 }
3532
3533 /* Note: We do not call context_switch at this point, as the
3534 context is already set up for stepping the original thread. */
3535 switch_to_thread (deferred_step_ptid);
3536 deferred_step_ptid = null_ptid;
3537 /* Suppress spurious "Switching to ..." message. */
3538 previous_inferior_ptid = inferior_ptid;
3539
3540 resume (1, TARGET_SIGNAL_0);
3541 prepare_to_wait (ecs);
3542 return;
3543 }
3544
3545 deferred_step_ptid = null_ptid;
3546 }
3547
3548 /* See if a thread hit a thread-specific breakpoint that was meant for
3549 another thread. If so, then step that thread past the breakpoint,
3550 and continue it. */
3551
3552 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3553 {
3554 int thread_hop_needed = 0;
3555 struct address_space *aspace =
3556 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3557
3558 /* Check if a regular breakpoint has been hit before checking
3559 for a potential single step breakpoint. Otherwise, GDB will
3560 not see this breakpoint hit when stepping onto breakpoints. */
3561 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3562 {
3563 ecs->random_signal = 0;
3564 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3565 thread_hop_needed = 1;
3566 }
3567 else if (singlestep_breakpoints_inserted_p)
3568 {
3569 /* We have not context switched yet, so this should be true
3570 no matter which thread hit the singlestep breakpoint. */
3571 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3572 if (debug_infrun)
3573 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3574 "trap for %s\n",
3575 target_pid_to_str (ecs->ptid));
3576
3577 ecs->random_signal = 0;
3578 /* The call to in_thread_list is necessary because PTIDs sometimes
3579 change when we go from single-threaded to multi-threaded. If
3580 the singlestep_ptid is still in the list, assume that it is
3581 really different from ecs->ptid. */
3582 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3583 && in_thread_list (singlestep_ptid))
3584 {
3585 /* If the PC of the thread we were trying to single-step
3586 has changed, discard this event (which we were going
3587 to ignore anyway), and pretend we saw that thread
3588 trap. This prevents us continuously moving the
3589 single-step breakpoint forward, one instruction at a
3590 time. If the PC has changed, then the thread we were
3591 trying to single-step has trapped or been signalled,
3592 but the event has not been reported to GDB yet.
3593
3594 There might be some cases where this loses signal
3595 information, if a signal has arrived at exactly the
3596 same time that the PC changed, but this is the best
3597 we can do with the information available. Perhaps we
3598 should arrange to report all events for all threads
3599 when they stop, or to re-poll the remote looking for
3600 this particular thread (i.e. temporarily enable
3601 schedlock). */
3602
3603 CORE_ADDR new_singlestep_pc
3604 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3605
3606 if (new_singlestep_pc != singlestep_pc)
3607 {
3608 enum target_signal stop_signal;
3609
3610 if (debug_infrun)
3611 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3612 " but expected thread advanced also\n");
3613
3614 /* The current context still belongs to
3615 singlestep_ptid. Don't swap here, since that's
3616 the context we want to use. Just fudge our
3617 state and continue. */
3618 stop_signal = ecs->event_thread->stop_signal;
3619 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3620 ecs->ptid = singlestep_ptid;
3621 ecs->event_thread = find_thread_ptid (ecs->ptid);
3622 ecs->event_thread->stop_signal = stop_signal;
3623 stop_pc = new_singlestep_pc;
3624 }
3625 else
3626 {
3627 if (debug_infrun)
3628 fprintf_unfiltered (gdb_stdlog,
3629 "infrun: unexpected thread\n");
3630
3631 thread_hop_needed = 1;
3632 stepping_past_singlestep_breakpoint = 1;
3633 saved_singlestep_ptid = singlestep_ptid;
3634 }
3635 }
3636 }
3637
3638 if (thread_hop_needed)
3639 {
3640 struct regcache *thread_regcache;
3641 int remove_status = 0;
3642
3643 if (debug_infrun)
3644 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3645
3646 /* Switch context before touching inferior memory, the
3647 previous thread may have exited. */
3648 if (!ptid_equal (inferior_ptid, ecs->ptid))
3649 context_switch (ecs->ptid);
3650
3651 /* Saw a breakpoint, but it was hit by the wrong thread.
3652 Just continue. */
3653
3654 if (singlestep_breakpoints_inserted_p)
3655 {
3656 /* Pull the single step breakpoints out of the target. */
3657 remove_single_step_breakpoints ();
3658 singlestep_breakpoints_inserted_p = 0;
3659 }
3660
3661 /* If the arch can displace step, don't remove the
3662 breakpoints. */
3663 thread_regcache = get_thread_regcache (ecs->ptid);
3664 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3665 remove_status = remove_breakpoints ();
3666
3667 /* Did we fail to remove breakpoints? If so, try
3668 to set the PC past the bp. (There's at least
3669 one situation in which we can fail to remove
3670 the bp's: On HP-UX's that use ttrace, we can't
3671 change the address space of a vforking child
3672 process until the child exits (well, okay, not
3673 then either :-) or execs. */
3674 if (remove_status != 0)
3675 error (_("Cannot step over breakpoint hit in wrong thread"));
3676 else
3677 { /* Single step */
3678 if (!non_stop)
3679 {
3680 /* Only need to require the next event from this
3681 thread in all-stop mode. */
3682 waiton_ptid = ecs->ptid;
3683 infwait_state = infwait_thread_hop_state;
3684 }
3685
3686 ecs->event_thread->stepping_over_breakpoint = 1;
3687 keep_going (ecs);
3688 return;
3689 }
3690 }
3691 else if (singlestep_breakpoints_inserted_p)
3692 {
3693 sw_single_step_trap_p = 1;
3694 ecs->random_signal = 0;
3695 }
3696 }
3697 else
3698 ecs->random_signal = 1;
3699
3700 /* See if something interesting happened to the non-current thread. If
3701 so, then switch to that thread. */
3702 if (!ptid_equal (ecs->ptid, inferior_ptid))
3703 {
3704 if (debug_infrun)
3705 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3706
3707 context_switch (ecs->ptid);
3708
3709 if (deprecated_context_hook)
3710 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3711 }
3712
3713 /* At this point, get hold of the now-current thread's frame. */
3714 frame = get_current_frame ();
3715 gdbarch = get_frame_arch (frame);
3716
3717 if (singlestep_breakpoints_inserted_p)
3718 {
3719 /* Pull the single step breakpoints out of the target. */
3720 remove_single_step_breakpoints ();
3721 singlestep_breakpoints_inserted_p = 0;
3722 }
3723
3724 if (stepped_after_stopped_by_watchpoint)
3725 stopped_by_watchpoint = 0;
3726 else
3727 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3728
3729 /* If necessary, step over this watchpoint. We'll be back to display
3730 it in a moment. */
3731 if (stopped_by_watchpoint
3732 && (target_have_steppable_watchpoint
3733 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3734 {
3735 /* At this point, we are stopped at an instruction which has
3736 attempted to write to a piece of memory under control of
3737 a watchpoint. The instruction hasn't actually executed
3738 yet. If we were to evaluate the watchpoint expression
3739 now, we would get the old value, and therefore no change
3740 would seem to have occurred.
3741
3742 In order to make watchpoints work `right', we really need
3743 to complete the memory write, and then evaluate the
3744 watchpoint expression. We do this by single-stepping the
3745 target.
3746
3747 It may not be necessary to disable the watchpoint to stop over
3748 it. For example, the PA can (with some kernel cooperation)
3749 single step over a watchpoint without disabling the watchpoint.
3750
3751 It is far more common to need to disable a watchpoint to step
3752 the inferior over it. If we have non-steppable watchpoints,
3753 we must disable the current watchpoint; it's simplest to
3754 disable all watchpoints and breakpoints. */
3755 int hw_step = 1;
3756
3757 if (!target_have_steppable_watchpoint)
3758 remove_breakpoints ();
3759 /* Single step */
3760 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3761 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3762 waiton_ptid = ecs->ptid;
3763 if (target_have_steppable_watchpoint)
3764 infwait_state = infwait_step_watch_state;
3765 else
3766 infwait_state = infwait_nonstep_watch_state;
3767 prepare_to_wait (ecs);
3768 return;
3769 }
3770
3771 ecs->stop_func_start = 0;
3772 ecs->stop_func_end = 0;
3773 ecs->stop_func_name = 0;
3774 /* Don't care about return value; stop_func_start and stop_func_name
3775 will both be 0 if it doesn't work. */
3776 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3777 &ecs->stop_func_start, &ecs->stop_func_end);
3778 ecs->stop_func_start
3779 += gdbarch_deprecated_function_start_offset (gdbarch);
3780 ecs->event_thread->stepping_over_breakpoint = 0;
3781 bpstat_clear (&ecs->event_thread->stop_bpstat);
3782 ecs->event_thread->stop_step = 0;
3783 stop_print_frame = 1;
3784 ecs->random_signal = 0;
3785 stopped_by_random_signal = 0;
3786
3787 /* Hide inlined functions starting here, unless we just performed stepi or
3788 nexti. After stepi and nexti, always show the innermost frame (not any
3789 inline function call sites). */
3790 if (ecs->event_thread->step_range_end != 1)
3791 skip_inline_frames (ecs->ptid);
3792
3793 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3794 && ecs->event_thread->trap_expected
3795 && gdbarch_single_step_through_delay_p (gdbarch)
3796 && currently_stepping (ecs->event_thread))
3797 {
3798 /* We're trying to step off a breakpoint. Turns out that we're
3799 also on an instruction that needs to be stepped multiple
3800 times before it's been fully executing. E.g., architectures
3801 with a delay slot. It needs to be stepped twice, once for
3802 the instruction and once for the delay slot. */
3803 int step_through_delay
3804 = gdbarch_single_step_through_delay (gdbarch, frame);
3805
3806 if (debug_infrun && step_through_delay)
3807 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3808 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3809 {
3810 /* The user issued a continue when stopped at a breakpoint.
3811 Set up for another trap and get out of here. */
3812 ecs->event_thread->stepping_over_breakpoint = 1;
3813 keep_going (ecs);
3814 return;
3815 }
3816 else if (step_through_delay)
3817 {
3818 /* The user issued a step when stopped at a breakpoint.
3819 Maybe we should stop, maybe we should not - the delay
3820 slot *might* correspond to a line of source. In any
3821 case, don't decide that here, just set
3822 ecs->stepping_over_breakpoint, making sure we
3823 single-step again before breakpoints are re-inserted. */
3824 ecs->event_thread->stepping_over_breakpoint = 1;
3825 }
3826 }
3827
3828 /* Look at the cause of the stop, and decide what to do.
3829 The alternatives are:
3830 1) stop_stepping and return; to really stop and return to the debugger,
3831 2) keep_going and return to start up again
3832 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3833 3) set ecs->random_signal to 1, and the decision between 1 and 2
3834 will be made according to the signal handling tables. */
3835
3836 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3837 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3838 || stop_soon == STOP_QUIETLY_REMOTE)
3839 {
3840 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3841 {
3842 if (debug_infrun)
3843 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3844 stop_print_frame = 0;
3845 stop_stepping (ecs);
3846 return;
3847 }
3848
3849 /* This is originated from start_remote(), start_inferior() and
3850 shared libraries hook functions. */
3851 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3852 {
3853 if (debug_infrun)
3854 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3855 stop_stepping (ecs);
3856 return;
3857 }
3858
3859 /* This originates from attach_command(). We need to overwrite
3860 the stop_signal here, because some kernels don't ignore a
3861 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3862 See more comments in inferior.h. On the other hand, if we
3863 get a non-SIGSTOP, report it to the user - assume the backend
3864 will handle the SIGSTOP if it should show up later.
3865
3866 Also consider that the attach is complete when we see a
3867 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3868 target extended-remote report it instead of a SIGSTOP
3869 (e.g. gdbserver). We already rely on SIGTRAP being our
3870 signal, so this is no exception.
3871
3872 Also consider that the attach is complete when we see a
3873 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3874 the target to stop all threads of the inferior, in case the
3875 low level attach operation doesn't stop them implicitly. If
3876 they weren't stopped implicitly, then the stub will report a
3877 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3878 other than GDB's request. */
3879 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3880 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3881 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3882 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3883 {
3884 stop_stepping (ecs);
3885 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3886 return;
3887 }
3888
3889 /* See if there is a breakpoint at the current PC. */
3890 ecs->event_thread->stop_bpstat
3891 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3892 stop_pc, ecs->ptid);
3893
3894 /* Following in case break condition called a
3895 function. */
3896 stop_print_frame = 1;
3897
3898 /* This is where we handle "moribund" watchpoints. Unlike
3899 software breakpoints traps, hardware watchpoint traps are
3900 always distinguishable from random traps. If no high-level
3901 watchpoint is associated with the reported stop data address
3902 anymore, then the bpstat does not explain the signal ---
3903 simply make sure to ignore it if `stopped_by_watchpoint' is
3904 set. */
3905
3906 if (debug_infrun
3907 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3908 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3909 && stopped_by_watchpoint)
3910 fprintf_unfiltered (gdb_stdlog, "\
3911 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3912
3913 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3914 at one stage in the past included checks for an inferior
3915 function call's call dummy's return breakpoint. The original
3916 comment, that went with the test, read:
3917
3918 ``End of a stack dummy. Some systems (e.g. Sony news) give
3919 another signal besides SIGTRAP, so check here as well as
3920 above.''
3921
3922 If someone ever tries to get call dummys on a
3923 non-executable stack to work (where the target would stop
3924 with something like a SIGSEGV), then those tests might need
3925 to be re-instated. Given, however, that the tests were only
3926 enabled when momentary breakpoints were not being used, I
3927 suspect that it won't be the case.
3928
3929 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3930 be necessary for call dummies on a non-executable stack on
3931 SPARC. */
3932
3933 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3934 ecs->random_signal
3935 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3936 || stopped_by_watchpoint
3937 || ecs->event_thread->trap_expected
3938 || (ecs->event_thread->step_range_end
3939 && ecs->event_thread->step_resume_breakpoint == NULL));
3940 else
3941 {
3942 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3943 if (!ecs->random_signal)
3944 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3945 }
3946 }
3947
3948 /* When we reach this point, we've pretty much decided
3949 that the reason for stopping must've been a random
3950 (unexpected) signal. */
3951
3952 else
3953 ecs->random_signal = 1;
3954
3955 process_event_stop_test:
3956
3957 /* Re-fetch current thread's frame in case we did a
3958 "goto process_event_stop_test" above. */
3959 frame = get_current_frame ();
3960 gdbarch = get_frame_arch (frame);
3961
3962 /* For the program's own signals, act according to
3963 the signal handling tables. */
3964
3965 if (ecs->random_signal)
3966 {
3967 /* Signal not for debugging purposes. */
3968 int printed = 0;
3969 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3970
3971 if (debug_infrun)
3972 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3973 ecs->event_thread->stop_signal);
3974
3975 stopped_by_random_signal = 1;
3976
3977 if (signal_print[ecs->event_thread->stop_signal])
3978 {
3979 printed = 1;
3980 target_terminal_ours_for_output ();
3981 print_signal_received_reason (ecs->event_thread->stop_signal);
3982 }
3983 /* Always stop on signals if we're either just gaining control
3984 of the program, or the user explicitly requested this thread
3985 to remain stopped. */
3986 if (stop_soon != NO_STOP_QUIETLY
3987 || ecs->event_thread->stop_requested
3988 || (!inf->detaching
3989 && signal_stop_state (ecs->event_thread->stop_signal)))
3990 {
3991 stop_stepping (ecs);
3992 return;
3993 }
3994 /* If not going to stop, give terminal back
3995 if we took it away. */
3996 else if (printed)
3997 target_terminal_inferior ();
3998
3999 /* Clear the signal if it should not be passed. */
4000 if (signal_program[ecs->event_thread->stop_signal] == 0)
4001 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4002
4003 if (ecs->event_thread->prev_pc == stop_pc
4004 && ecs->event_thread->trap_expected
4005 && ecs->event_thread->step_resume_breakpoint == NULL)
4006 {
4007 /* We were just starting a new sequence, attempting to
4008 single-step off of a breakpoint and expecting a SIGTRAP.
4009 Instead this signal arrives. This signal will take us out
4010 of the stepping range so GDB needs to remember to, when
4011 the signal handler returns, resume stepping off that
4012 breakpoint. */
4013 /* To simplify things, "continue" is forced to use the same
4014 code paths as single-step - set a breakpoint at the
4015 signal return address and then, once hit, step off that
4016 breakpoint. */
4017 if (debug_infrun)
4018 fprintf_unfiltered (gdb_stdlog,
4019 "infrun: signal arrived while stepping over "
4020 "breakpoint\n");
4021
4022 insert_step_resume_breakpoint_at_frame (frame);
4023 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4024 keep_going (ecs);
4025 return;
4026 }
4027
4028 if (ecs->event_thread->step_range_end != 0
4029 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
4030 && (ecs->event_thread->step_range_start <= stop_pc
4031 && stop_pc < ecs->event_thread->step_range_end)
4032 && frame_id_eq (get_stack_frame_id (frame),
4033 ecs->event_thread->step_stack_frame_id)
4034 && ecs->event_thread->step_resume_breakpoint == NULL)
4035 {
4036 /* The inferior is about to take a signal that will take it
4037 out of the single step range. Set a breakpoint at the
4038 current PC (which is presumably where the signal handler
4039 will eventually return) and then allow the inferior to
4040 run free.
4041
4042 Note that this is only needed for a signal delivered
4043 while in the single-step range. Nested signals aren't a
4044 problem as they eventually all return. */
4045 if (debug_infrun)
4046 fprintf_unfiltered (gdb_stdlog,
4047 "infrun: signal may take us out of "
4048 "single-step range\n");
4049
4050 insert_step_resume_breakpoint_at_frame (frame);
4051 keep_going (ecs);
4052 return;
4053 }
4054
4055 /* Note: step_resume_breakpoint may be non-NULL. This occures
4056 when either there's a nested signal, or when there's a
4057 pending signal enabled just as the signal handler returns
4058 (leaving the inferior at the step-resume-breakpoint without
4059 actually executing it). Either way continue until the
4060 breakpoint is really hit. */
4061 keep_going (ecs);
4062 return;
4063 }
4064
4065 /* Handle cases caused by hitting a breakpoint. */
4066 {
4067 CORE_ADDR jmp_buf_pc;
4068 struct bpstat_what what;
4069
4070 what = bpstat_what (ecs->event_thread->stop_bpstat);
4071
4072 if (what.call_dummy)
4073 {
4074 stop_stack_dummy = what.call_dummy;
4075 }
4076
4077 /* If we hit an internal event that triggers symbol changes, the
4078 current frame will be invalidated within bpstat_what (e.g., if
4079 we hit an internal solib event). Re-fetch it. */
4080 frame = get_current_frame ();
4081 gdbarch = get_frame_arch (frame);
4082
4083 switch (what.main_action)
4084 {
4085 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4086 /* If we hit the breakpoint at longjmp while stepping, we
4087 install a momentary breakpoint at the target of the
4088 jmp_buf. */
4089
4090 if (debug_infrun)
4091 fprintf_unfiltered (gdb_stdlog,
4092 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4093
4094 ecs->event_thread->stepping_over_breakpoint = 1;
4095
4096 if (!gdbarch_get_longjmp_target_p (gdbarch)
4097 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
4098 {
4099 if (debug_infrun)
4100 fprintf_unfiltered (gdb_stdlog, "\
4101 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4102 keep_going (ecs);
4103 return;
4104 }
4105
4106 /* We're going to replace the current step-resume breakpoint
4107 with a longjmp-resume breakpoint. */
4108 delete_step_resume_breakpoint (ecs->event_thread);
4109
4110 /* Insert a breakpoint at resume address. */
4111 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4112
4113 keep_going (ecs);
4114 return;
4115
4116 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4117 if (debug_infrun)
4118 fprintf_unfiltered (gdb_stdlog,
4119 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4120
4121 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4122 delete_step_resume_breakpoint (ecs->event_thread);
4123
4124 ecs->event_thread->stop_step = 1;
4125 print_end_stepping_range_reason ();
4126 stop_stepping (ecs);
4127 return;
4128
4129 case BPSTAT_WHAT_SINGLE:
4130 if (debug_infrun)
4131 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4132 ecs->event_thread->stepping_over_breakpoint = 1;
4133 /* Still need to check other stuff, at least the case
4134 where we are stepping and step out of the right range. */
4135 break;
4136
4137 case BPSTAT_WHAT_STOP_NOISY:
4138 if (debug_infrun)
4139 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4140 stop_print_frame = 1;
4141
4142 /* We are about to nuke the step_resume_breakpointt via the
4143 cleanup chain, so no need to worry about it here. */
4144
4145 stop_stepping (ecs);
4146 return;
4147
4148 case BPSTAT_WHAT_STOP_SILENT:
4149 if (debug_infrun)
4150 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4151 stop_print_frame = 0;
4152
4153 /* We are about to nuke the step_resume_breakpoin via the
4154 cleanup chain, so no need to worry about it here. */
4155
4156 stop_stepping (ecs);
4157 return;
4158
4159 case BPSTAT_WHAT_STEP_RESUME:
4160 if (debug_infrun)
4161 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4162
4163 delete_step_resume_breakpoint (ecs->event_thread);
4164 if (ecs->event_thread->step_after_step_resume_breakpoint)
4165 {
4166 /* Back when the step-resume breakpoint was inserted, we
4167 were trying to single-step off a breakpoint. Go back
4168 to doing that. */
4169 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4170 ecs->event_thread->stepping_over_breakpoint = 1;
4171 keep_going (ecs);
4172 return;
4173 }
4174 if (stop_pc == ecs->stop_func_start
4175 && execution_direction == EXEC_REVERSE)
4176 {
4177 /* We are stepping over a function call in reverse, and
4178 just hit the step-resume breakpoint at the start
4179 address of the function. Go back to single-stepping,
4180 which should take us back to the function call. */
4181 ecs->event_thread->stepping_over_breakpoint = 1;
4182 keep_going (ecs);
4183 return;
4184 }
4185 break;
4186
4187 case BPSTAT_WHAT_KEEP_CHECKING:
4188 break;
4189 }
4190 }
4191
4192 /* We come here if we hit a breakpoint but should not
4193 stop for it. Possibly we also were stepping
4194 and should stop for that. So fall through and
4195 test for stepping. But, if not stepping,
4196 do not stop. */
4197
4198 /* In all-stop mode, if we're currently stepping but have stopped in
4199 some other thread, we need to switch back to the stepped thread. */
4200 if (!non_stop)
4201 {
4202 struct thread_info *tp;
4203
4204 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4205 ecs->event_thread);
4206 if (tp)
4207 {
4208 /* However, if the current thread is blocked on some internal
4209 breakpoint, and we simply need to step over that breakpoint
4210 to get it going again, do that first. */
4211 if ((ecs->event_thread->trap_expected
4212 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4213 || ecs->event_thread->stepping_over_breakpoint)
4214 {
4215 keep_going (ecs);
4216 return;
4217 }
4218
4219 /* If the stepping thread exited, then don't try to switch
4220 back and resume it, which could fail in several different
4221 ways depending on the target. Instead, just keep going.
4222
4223 We can find a stepping dead thread in the thread list in
4224 two cases:
4225
4226 - The target supports thread exit events, and when the
4227 target tries to delete the thread from the thread list,
4228 inferior_ptid pointed at the exiting thread. In such
4229 case, calling delete_thread does not really remove the
4230 thread from the list; instead, the thread is left listed,
4231 with 'exited' state.
4232
4233 - The target's debug interface does not support thread
4234 exit events, and so we have no idea whatsoever if the
4235 previously stepping thread is still alive. For that
4236 reason, we need to synchronously query the target
4237 now. */
4238 if (is_exited (tp->ptid)
4239 || !target_thread_alive (tp->ptid))
4240 {
4241 if (debug_infrun)
4242 fprintf_unfiltered (gdb_stdlog, "\
4243 infrun: not switching back to stepped thread, it has vanished\n");
4244
4245 delete_thread (tp->ptid);
4246 keep_going (ecs);
4247 return;
4248 }
4249
4250 /* Otherwise, we no longer expect a trap in the current thread.
4251 Clear the trap_expected flag before switching back -- this is
4252 what keep_going would do as well, if we called it. */
4253 ecs->event_thread->trap_expected = 0;
4254
4255 if (debug_infrun)
4256 fprintf_unfiltered (gdb_stdlog,
4257 "infrun: switching back to stepped thread\n");
4258
4259 ecs->event_thread = tp;
4260 ecs->ptid = tp->ptid;
4261 context_switch (ecs->ptid);
4262 keep_going (ecs);
4263 return;
4264 }
4265 }
4266
4267 /* Are we stepping to get the inferior out of the dynamic linker's
4268 hook (and possibly the dld itself) after catching a shlib
4269 event? */
4270 if (ecs->event_thread->stepping_through_solib_after_catch)
4271 {
4272 #if defined(SOLIB_ADD)
4273 /* Have we reached our destination? If not, keep going. */
4274 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4275 {
4276 if (debug_infrun)
4277 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4278 ecs->event_thread->stepping_over_breakpoint = 1;
4279 keep_going (ecs);
4280 return;
4281 }
4282 #endif
4283 if (debug_infrun)
4284 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4285 /* Else, stop and report the catchpoint(s) whose triggering
4286 caused us to begin stepping. */
4287 ecs->event_thread->stepping_through_solib_after_catch = 0;
4288 bpstat_clear (&ecs->event_thread->stop_bpstat);
4289 ecs->event_thread->stop_bpstat
4290 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4291 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4292 stop_print_frame = 1;
4293 stop_stepping (ecs);
4294 return;
4295 }
4296
4297 if (ecs->event_thread->step_resume_breakpoint)
4298 {
4299 if (debug_infrun)
4300 fprintf_unfiltered (gdb_stdlog,
4301 "infrun: step-resume breakpoint is inserted\n");
4302
4303 /* Having a step-resume breakpoint overrides anything
4304 else having to do with stepping commands until
4305 that breakpoint is reached. */
4306 keep_going (ecs);
4307 return;
4308 }
4309
4310 if (ecs->event_thread->step_range_end == 0)
4311 {
4312 if (debug_infrun)
4313 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4314 /* Likewise if we aren't even stepping. */
4315 keep_going (ecs);
4316 return;
4317 }
4318
4319 /* Re-fetch current thread's frame in case the code above caused
4320 the frame cache to be re-initialized, making our FRAME variable
4321 a dangling pointer. */
4322 frame = get_current_frame ();
4323 gdbarch = get_frame_arch (frame);
4324
4325 /* If stepping through a line, keep going if still within it.
4326
4327 Note that step_range_end is the address of the first instruction
4328 beyond the step range, and NOT the address of the last instruction
4329 within it!
4330
4331 Note also that during reverse execution, we may be stepping
4332 through a function epilogue and therefore must detect when
4333 the current-frame changes in the middle of a line. */
4334
4335 if (stop_pc >= ecs->event_thread->step_range_start
4336 && stop_pc < ecs->event_thread->step_range_end
4337 && (execution_direction != EXEC_REVERSE
4338 || frame_id_eq (get_frame_id (frame),
4339 ecs->event_thread->step_frame_id)))
4340 {
4341 if (debug_infrun)
4342 fprintf_unfiltered
4343 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4344 paddress (gdbarch, ecs->event_thread->step_range_start),
4345 paddress (gdbarch, ecs->event_thread->step_range_end));
4346
4347 /* When stepping backward, stop at beginning of line range
4348 (unless it's the function entry point, in which case
4349 keep going back to the call point). */
4350 if (stop_pc == ecs->event_thread->step_range_start
4351 && stop_pc != ecs->stop_func_start
4352 && execution_direction == EXEC_REVERSE)
4353 {
4354 ecs->event_thread->stop_step = 1;
4355 print_end_stepping_range_reason ();
4356 stop_stepping (ecs);
4357 }
4358 else
4359 keep_going (ecs);
4360
4361 return;
4362 }
4363
4364 /* We stepped out of the stepping range. */
4365
4366 /* If we are stepping at the source level and entered the runtime
4367 loader dynamic symbol resolution code...
4368
4369 EXEC_FORWARD: we keep on single stepping until we exit the run
4370 time loader code and reach the callee's address.
4371
4372 EXEC_REVERSE: we've already executed the callee (backward), and
4373 the runtime loader code is handled just like any other
4374 undebuggable function call. Now we need only keep stepping
4375 backward through the trampoline code, and that's handled further
4376 down, so there is nothing for us to do here. */
4377
4378 if (execution_direction != EXEC_REVERSE
4379 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4380 && in_solib_dynsym_resolve_code (stop_pc))
4381 {
4382 CORE_ADDR pc_after_resolver =
4383 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4384
4385 if (debug_infrun)
4386 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4387
4388 if (pc_after_resolver)
4389 {
4390 /* Set up a step-resume breakpoint at the address
4391 indicated by SKIP_SOLIB_RESOLVER. */
4392 struct symtab_and_line sr_sal;
4393
4394 init_sal (&sr_sal);
4395 sr_sal.pc = pc_after_resolver;
4396 sr_sal.pspace = get_frame_program_space (frame);
4397
4398 insert_step_resume_breakpoint_at_sal (gdbarch,
4399 sr_sal, null_frame_id);
4400 }
4401
4402 keep_going (ecs);
4403 return;
4404 }
4405
4406 if (ecs->event_thread->step_range_end != 1
4407 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4408 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4409 && get_frame_type (frame) == SIGTRAMP_FRAME)
4410 {
4411 if (debug_infrun)
4412 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4413 /* The inferior, while doing a "step" or "next", has ended up in
4414 a signal trampoline (either by a signal being delivered or by
4415 the signal handler returning). Just single-step until the
4416 inferior leaves the trampoline (either by calling the handler
4417 or returning). */
4418 keep_going (ecs);
4419 return;
4420 }
4421
4422 /* Check for subroutine calls. The check for the current frame
4423 equalling the step ID is not necessary - the check of the
4424 previous frame's ID is sufficient - but it is a common case and
4425 cheaper than checking the previous frame's ID.
4426
4427 NOTE: frame_id_eq will never report two invalid frame IDs as
4428 being equal, so to get into this block, both the current and
4429 previous frame must have valid frame IDs. */
4430 /* The outer_frame_id check is a heuristic to detect stepping
4431 through startup code. If we step over an instruction which
4432 sets the stack pointer from an invalid value to a valid value,
4433 we may detect that as a subroutine call from the mythical
4434 "outermost" function. This could be fixed by marking
4435 outermost frames as !stack_p,code_p,special_p. Then the
4436 initial outermost frame, before sp was valid, would
4437 have code_addr == &_start. See the comment in frame_id_eq
4438 for more. */
4439 if (!frame_id_eq (get_stack_frame_id (frame),
4440 ecs->event_thread->step_stack_frame_id)
4441 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4442 ecs->event_thread->step_stack_frame_id)
4443 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4444 outer_frame_id)
4445 || step_start_function != find_pc_function (stop_pc))))
4446 {
4447 CORE_ADDR real_stop_pc;
4448
4449 if (debug_infrun)
4450 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4451
4452 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4453 || ((ecs->event_thread->step_range_end == 1)
4454 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4455 ecs->stop_func_start)))
4456 {
4457 /* I presume that step_over_calls is only 0 when we're
4458 supposed to be stepping at the assembly language level
4459 ("stepi"). Just stop. */
4460 /* Also, maybe we just did a "nexti" inside a prolog, so we
4461 thought it was a subroutine call but it was not. Stop as
4462 well. FENN */
4463 /* And this works the same backward as frontward. MVS */
4464 ecs->event_thread->stop_step = 1;
4465 print_end_stepping_range_reason ();
4466 stop_stepping (ecs);
4467 return;
4468 }
4469
4470 /* Reverse stepping through solib trampolines. */
4471
4472 if (execution_direction == EXEC_REVERSE
4473 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4474 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4475 || (ecs->stop_func_start == 0
4476 && in_solib_dynsym_resolve_code (stop_pc))))
4477 {
4478 /* Any solib trampoline code can be handled in reverse
4479 by simply continuing to single-step. We have already
4480 executed the solib function (backwards), and a few
4481 steps will take us back through the trampoline to the
4482 caller. */
4483 keep_going (ecs);
4484 return;
4485 }
4486
4487 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4488 {
4489 /* We're doing a "next".
4490
4491 Normal (forward) execution: set a breakpoint at the
4492 callee's return address (the address at which the caller
4493 will resume).
4494
4495 Reverse (backward) execution. set the step-resume
4496 breakpoint at the start of the function that we just
4497 stepped into (backwards), and continue to there. When we
4498 get there, we'll need to single-step back to the caller. */
4499
4500 if (execution_direction == EXEC_REVERSE)
4501 {
4502 struct symtab_and_line sr_sal;
4503
4504 /* Normal function call return (static or dynamic). */
4505 init_sal (&sr_sal);
4506 sr_sal.pc = ecs->stop_func_start;
4507 sr_sal.pspace = get_frame_program_space (frame);
4508 insert_step_resume_breakpoint_at_sal (gdbarch,
4509 sr_sal, null_frame_id);
4510 }
4511 else
4512 insert_step_resume_breakpoint_at_caller (frame);
4513
4514 keep_going (ecs);
4515 return;
4516 }
4517
4518 /* If we are in a function call trampoline (a stub between the
4519 calling routine and the real function), locate the real
4520 function. That's what tells us (a) whether we want to step
4521 into it at all, and (b) what prologue we want to run to the
4522 end of, if we do step into it. */
4523 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4524 if (real_stop_pc == 0)
4525 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4526 if (real_stop_pc != 0)
4527 ecs->stop_func_start = real_stop_pc;
4528
4529 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4530 {
4531 struct symtab_and_line sr_sal;
4532
4533 init_sal (&sr_sal);
4534 sr_sal.pc = ecs->stop_func_start;
4535 sr_sal.pspace = get_frame_program_space (frame);
4536
4537 insert_step_resume_breakpoint_at_sal (gdbarch,
4538 sr_sal, null_frame_id);
4539 keep_going (ecs);
4540 return;
4541 }
4542
4543 /* If we have line number information for the function we are
4544 thinking of stepping into, step into it.
4545
4546 If there are several symtabs at that PC (e.g. with include
4547 files), just want to know whether *any* of them have line
4548 numbers. find_pc_line handles this. */
4549 {
4550 struct symtab_and_line tmp_sal;
4551
4552 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4553 tmp_sal.pspace = get_frame_program_space (frame);
4554 if (tmp_sal.line != 0)
4555 {
4556 if (execution_direction == EXEC_REVERSE)
4557 handle_step_into_function_backward (gdbarch, ecs);
4558 else
4559 handle_step_into_function (gdbarch, ecs);
4560 return;
4561 }
4562 }
4563
4564 /* If we have no line number and the step-stop-if-no-debug is
4565 set, we stop the step so that the user has a chance to switch
4566 in assembly mode. */
4567 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4568 && step_stop_if_no_debug)
4569 {
4570 ecs->event_thread->stop_step = 1;
4571 print_end_stepping_range_reason ();
4572 stop_stepping (ecs);
4573 return;
4574 }
4575
4576 if (execution_direction == EXEC_REVERSE)
4577 {
4578 /* Set a breakpoint at callee's start address.
4579 From there we can step once and be back in the caller. */
4580 struct symtab_and_line sr_sal;
4581
4582 init_sal (&sr_sal);
4583 sr_sal.pc = ecs->stop_func_start;
4584 sr_sal.pspace = get_frame_program_space (frame);
4585 insert_step_resume_breakpoint_at_sal (gdbarch,
4586 sr_sal, null_frame_id);
4587 }
4588 else
4589 /* Set a breakpoint at callee's return address (the address
4590 at which the caller will resume). */
4591 insert_step_resume_breakpoint_at_caller (frame);
4592
4593 keep_going (ecs);
4594 return;
4595 }
4596
4597 /* Reverse stepping through solib trampolines. */
4598
4599 if (execution_direction == EXEC_REVERSE
4600 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4601 {
4602 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4603 || (ecs->stop_func_start == 0
4604 && in_solib_dynsym_resolve_code (stop_pc)))
4605 {
4606 /* Any solib trampoline code can be handled in reverse
4607 by simply continuing to single-step. We have already
4608 executed the solib function (backwards), and a few
4609 steps will take us back through the trampoline to the
4610 caller. */
4611 keep_going (ecs);
4612 return;
4613 }
4614 else if (in_solib_dynsym_resolve_code (stop_pc))
4615 {
4616 /* Stepped backward into the solib dynsym resolver.
4617 Set a breakpoint at its start and continue, then
4618 one more step will take us out. */
4619 struct symtab_and_line sr_sal;
4620
4621 init_sal (&sr_sal);
4622 sr_sal.pc = ecs->stop_func_start;
4623 sr_sal.pspace = get_frame_program_space (frame);
4624 insert_step_resume_breakpoint_at_sal (gdbarch,
4625 sr_sal, null_frame_id);
4626 keep_going (ecs);
4627 return;
4628 }
4629 }
4630
4631 /* If we're in the return path from a shared library trampoline,
4632 we want to proceed through the trampoline when stepping. */
4633 if (gdbarch_in_solib_return_trampoline (gdbarch,
4634 stop_pc, ecs->stop_func_name))
4635 {
4636 /* Determine where this trampoline returns. */
4637 CORE_ADDR real_stop_pc;
4638
4639 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4640
4641 if (debug_infrun)
4642 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4643
4644 /* Only proceed through if we know where it's going. */
4645 if (real_stop_pc)
4646 {
4647 /* And put the step-breakpoint there and go until there. */
4648 struct symtab_and_line sr_sal;
4649
4650 init_sal (&sr_sal); /* initialize to zeroes */
4651 sr_sal.pc = real_stop_pc;
4652 sr_sal.section = find_pc_overlay (sr_sal.pc);
4653 sr_sal.pspace = get_frame_program_space (frame);
4654
4655 /* Do not specify what the fp should be when we stop since
4656 on some machines the prologue is where the new fp value
4657 is established. */
4658 insert_step_resume_breakpoint_at_sal (gdbarch,
4659 sr_sal, null_frame_id);
4660
4661 /* Restart without fiddling with the step ranges or
4662 other state. */
4663 keep_going (ecs);
4664 return;
4665 }
4666 }
4667
4668 stop_pc_sal = find_pc_line (stop_pc, 0);
4669
4670 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4671 the trampoline processing logic, however, there are some trampolines
4672 that have no names, so we should do trampoline handling first. */
4673 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4674 && ecs->stop_func_name == NULL
4675 && stop_pc_sal.line == 0)
4676 {
4677 if (debug_infrun)
4678 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4679
4680 /* The inferior just stepped into, or returned to, an
4681 undebuggable function (where there is no debugging information
4682 and no line number corresponding to the address where the
4683 inferior stopped). Since we want to skip this kind of code,
4684 we keep going until the inferior returns from this
4685 function - unless the user has asked us not to (via
4686 set step-mode) or we no longer know how to get back
4687 to the call site. */
4688 if (step_stop_if_no_debug
4689 || !frame_id_p (frame_unwind_caller_id (frame)))
4690 {
4691 /* If we have no line number and the step-stop-if-no-debug
4692 is set, we stop the step so that the user has a chance to
4693 switch in assembly mode. */
4694 ecs->event_thread->stop_step = 1;
4695 print_end_stepping_range_reason ();
4696 stop_stepping (ecs);
4697 return;
4698 }
4699 else
4700 {
4701 /* Set a breakpoint at callee's return address (the address
4702 at which the caller will resume). */
4703 insert_step_resume_breakpoint_at_caller (frame);
4704 keep_going (ecs);
4705 return;
4706 }
4707 }
4708
4709 if (ecs->event_thread->step_range_end == 1)
4710 {
4711 /* It is stepi or nexti. We always want to stop stepping after
4712 one instruction. */
4713 if (debug_infrun)
4714 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4715 ecs->event_thread->stop_step = 1;
4716 print_end_stepping_range_reason ();
4717 stop_stepping (ecs);
4718 return;
4719 }
4720
4721 if (stop_pc_sal.line == 0)
4722 {
4723 /* We have no line number information. That means to stop
4724 stepping (does this always happen right after one instruction,
4725 when we do "s" in a function with no line numbers,
4726 or can this happen as a result of a return or longjmp?). */
4727 if (debug_infrun)
4728 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4729 ecs->event_thread->stop_step = 1;
4730 print_end_stepping_range_reason ();
4731 stop_stepping (ecs);
4732 return;
4733 }
4734
4735 /* Look for "calls" to inlined functions, part one. If the inline
4736 frame machinery detected some skipped call sites, we have entered
4737 a new inline function. */
4738
4739 if (frame_id_eq (get_frame_id (get_current_frame ()),
4740 ecs->event_thread->step_frame_id)
4741 && inline_skipped_frames (ecs->ptid))
4742 {
4743 struct symtab_and_line call_sal;
4744
4745 if (debug_infrun)
4746 fprintf_unfiltered (gdb_stdlog,
4747 "infrun: stepped into inlined function\n");
4748
4749 find_frame_sal (get_current_frame (), &call_sal);
4750
4751 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4752 {
4753 /* For "step", we're going to stop. But if the call site
4754 for this inlined function is on the same source line as
4755 we were previously stepping, go down into the function
4756 first. Otherwise stop at the call site. */
4757
4758 if (call_sal.line == ecs->event_thread->current_line
4759 && call_sal.symtab == ecs->event_thread->current_symtab)
4760 step_into_inline_frame (ecs->ptid);
4761
4762 ecs->event_thread->stop_step = 1;
4763 print_end_stepping_range_reason ();
4764 stop_stepping (ecs);
4765 return;
4766 }
4767 else
4768 {
4769 /* For "next", we should stop at the call site if it is on a
4770 different source line. Otherwise continue through the
4771 inlined function. */
4772 if (call_sal.line == ecs->event_thread->current_line
4773 && call_sal.symtab == ecs->event_thread->current_symtab)
4774 keep_going (ecs);
4775 else
4776 {
4777 ecs->event_thread->stop_step = 1;
4778 print_end_stepping_range_reason ();
4779 stop_stepping (ecs);
4780 }
4781 return;
4782 }
4783 }
4784
4785 /* Look for "calls" to inlined functions, part two. If we are still
4786 in the same real function we were stepping through, but we have
4787 to go further up to find the exact frame ID, we are stepping
4788 through a more inlined call beyond its call site. */
4789
4790 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4791 && !frame_id_eq (get_frame_id (get_current_frame ()),
4792 ecs->event_thread->step_frame_id)
4793 && stepped_in_from (get_current_frame (),
4794 ecs->event_thread->step_frame_id))
4795 {
4796 if (debug_infrun)
4797 fprintf_unfiltered (gdb_stdlog,
4798 "infrun: stepping through inlined function\n");
4799
4800 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4801 keep_going (ecs);
4802 else
4803 {
4804 ecs->event_thread->stop_step = 1;
4805 print_end_stepping_range_reason ();
4806 stop_stepping (ecs);
4807 }
4808 return;
4809 }
4810
4811 if ((stop_pc == stop_pc_sal.pc)
4812 && (ecs->event_thread->current_line != stop_pc_sal.line
4813 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4814 {
4815 /* We are at the start of a different line. So stop. Note that
4816 we don't stop if we step into the middle of a different line.
4817 That is said to make things like for (;;) statements work
4818 better. */
4819 if (debug_infrun)
4820 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4821 ecs->event_thread->stop_step = 1;
4822 print_end_stepping_range_reason ();
4823 stop_stepping (ecs);
4824 return;
4825 }
4826
4827 /* We aren't done stepping.
4828
4829 Optimize by setting the stepping range to the line.
4830 (We might not be in the original line, but if we entered a
4831 new line in mid-statement, we continue stepping. This makes
4832 things like for(;;) statements work better.) */
4833
4834 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4835 ecs->event_thread->step_range_end = stop_pc_sal.end;
4836 set_step_info (frame, stop_pc_sal);
4837
4838 if (debug_infrun)
4839 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4840 keep_going (ecs);
4841 }
4842
4843 /* Is thread TP in the middle of single-stepping? */
4844
4845 static int
4846 currently_stepping (struct thread_info *tp)
4847 {
4848 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4849 || tp->trap_expected
4850 || tp->stepping_through_solib_after_catch
4851 || bpstat_should_step ());
4852 }
4853
4854 /* Returns true if any thread *but* the one passed in "data" is in the
4855 middle of stepping or of handling a "next". */
4856
4857 static int
4858 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4859 {
4860 if (tp == data)
4861 return 0;
4862
4863 return (tp->step_range_end
4864 || tp->trap_expected
4865 || tp->stepping_through_solib_after_catch);
4866 }
4867
4868 /* Inferior has stepped into a subroutine call with source code that
4869 we should not step over. Do step to the first line of code in
4870 it. */
4871
4872 static void
4873 handle_step_into_function (struct gdbarch *gdbarch,
4874 struct execution_control_state *ecs)
4875 {
4876 struct symtab *s;
4877 struct symtab_and_line stop_func_sal, sr_sal;
4878
4879 s = find_pc_symtab (stop_pc);
4880 if (s && s->language != language_asm)
4881 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4882 ecs->stop_func_start);
4883
4884 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4885 /* Use the step_resume_break to step until the end of the prologue,
4886 even if that involves jumps (as it seems to on the vax under
4887 4.2). */
4888 /* If the prologue ends in the middle of a source line, continue to
4889 the end of that source line (if it is still within the function).
4890 Otherwise, just go to end of prologue. */
4891 if (stop_func_sal.end
4892 && stop_func_sal.pc != ecs->stop_func_start
4893 && stop_func_sal.end < ecs->stop_func_end)
4894 ecs->stop_func_start = stop_func_sal.end;
4895
4896 /* Architectures which require breakpoint adjustment might not be able
4897 to place a breakpoint at the computed address. If so, the test
4898 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4899 ecs->stop_func_start to an address at which a breakpoint may be
4900 legitimately placed.
4901
4902 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4903 made, GDB will enter an infinite loop when stepping through
4904 optimized code consisting of VLIW instructions which contain
4905 subinstructions corresponding to different source lines. On
4906 FR-V, it's not permitted to place a breakpoint on any but the
4907 first subinstruction of a VLIW instruction. When a breakpoint is
4908 set, GDB will adjust the breakpoint address to the beginning of
4909 the VLIW instruction. Thus, we need to make the corresponding
4910 adjustment here when computing the stop address. */
4911
4912 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4913 {
4914 ecs->stop_func_start
4915 = gdbarch_adjust_breakpoint_address (gdbarch,
4916 ecs->stop_func_start);
4917 }
4918
4919 if (ecs->stop_func_start == stop_pc)
4920 {
4921 /* We are already there: stop now. */
4922 ecs->event_thread->stop_step = 1;
4923 print_end_stepping_range_reason ();
4924 stop_stepping (ecs);
4925 return;
4926 }
4927 else
4928 {
4929 /* Put the step-breakpoint there and go until there. */
4930 init_sal (&sr_sal); /* initialize to zeroes */
4931 sr_sal.pc = ecs->stop_func_start;
4932 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4933 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4934
4935 /* Do not specify what the fp should be when we stop since on
4936 some machines the prologue is where the new fp value is
4937 established. */
4938 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4939
4940 /* And make sure stepping stops right away then. */
4941 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4942 }
4943 keep_going (ecs);
4944 }
4945
4946 /* Inferior has stepped backward into a subroutine call with source
4947 code that we should not step over. Do step to the beginning of the
4948 last line of code in it. */
4949
4950 static void
4951 handle_step_into_function_backward (struct gdbarch *gdbarch,
4952 struct execution_control_state *ecs)
4953 {
4954 struct symtab *s;
4955 struct symtab_and_line stop_func_sal;
4956
4957 s = find_pc_symtab (stop_pc);
4958 if (s && s->language != language_asm)
4959 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4960 ecs->stop_func_start);
4961
4962 stop_func_sal = find_pc_line (stop_pc, 0);
4963
4964 /* OK, we're just going to keep stepping here. */
4965 if (stop_func_sal.pc == stop_pc)
4966 {
4967 /* We're there already. Just stop stepping now. */
4968 ecs->event_thread->stop_step = 1;
4969 print_end_stepping_range_reason ();
4970 stop_stepping (ecs);
4971 }
4972 else
4973 {
4974 /* Else just reset the step range and keep going.
4975 No step-resume breakpoint, they don't work for
4976 epilogues, which can have multiple entry paths. */
4977 ecs->event_thread->step_range_start = stop_func_sal.pc;
4978 ecs->event_thread->step_range_end = stop_func_sal.end;
4979 keep_going (ecs);
4980 }
4981 return;
4982 }
4983
4984 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4985 This is used to both functions and to skip over code. */
4986
4987 static void
4988 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4989 struct symtab_and_line sr_sal,
4990 struct frame_id sr_id)
4991 {
4992 /* There should never be more than one step-resume or longjmp-resume
4993 breakpoint per thread, so we should never be setting a new
4994 step_resume_breakpoint when one is already active. */
4995 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4996
4997 if (debug_infrun)
4998 fprintf_unfiltered (gdb_stdlog,
4999 "infrun: inserting step-resume breakpoint at %s\n",
5000 paddress (gdbarch, sr_sal.pc));
5001
5002 inferior_thread ()->step_resume_breakpoint
5003 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
5004 }
5005
5006 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
5007 to skip a potential signal handler.
5008
5009 This is called with the interrupted function's frame. The signal
5010 handler, when it returns, will resume the interrupted function at
5011 RETURN_FRAME.pc. */
5012
5013 static void
5014 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5015 {
5016 struct symtab_and_line sr_sal;
5017 struct gdbarch *gdbarch;
5018
5019 gdb_assert (return_frame != NULL);
5020 init_sal (&sr_sal); /* initialize to zeros */
5021
5022 gdbarch = get_frame_arch (return_frame);
5023 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5024 sr_sal.section = find_pc_overlay (sr_sal.pc);
5025 sr_sal.pspace = get_frame_program_space (return_frame);
5026
5027 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5028 get_stack_frame_id (return_frame));
5029 }
5030
5031 /* Similar to insert_step_resume_breakpoint_at_frame, except
5032 but a breakpoint at the previous frame's PC. This is used to
5033 skip a function after stepping into it (for "next" or if the called
5034 function has no debugging information).
5035
5036 The current function has almost always been reached by single
5037 stepping a call or return instruction. NEXT_FRAME belongs to the
5038 current function, and the breakpoint will be set at the caller's
5039 resume address.
5040
5041 This is a separate function rather than reusing
5042 insert_step_resume_breakpoint_at_frame in order to avoid
5043 get_prev_frame, which may stop prematurely (see the implementation
5044 of frame_unwind_caller_id for an example). */
5045
5046 static void
5047 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5048 {
5049 struct symtab_and_line sr_sal;
5050 struct gdbarch *gdbarch;
5051
5052 /* We shouldn't have gotten here if we don't know where the call site
5053 is. */
5054 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5055
5056 init_sal (&sr_sal); /* initialize to zeros */
5057
5058 gdbarch = frame_unwind_caller_arch (next_frame);
5059 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5060 frame_unwind_caller_pc (next_frame));
5061 sr_sal.section = find_pc_overlay (sr_sal.pc);
5062 sr_sal.pspace = frame_unwind_program_space (next_frame);
5063
5064 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5065 frame_unwind_caller_id (next_frame));
5066 }
5067
5068 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5069 new breakpoint at the target of a jmp_buf. The handling of
5070 longjmp-resume uses the same mechanisms used for handling
5071 "step-resume" breakpoints. */
5072
5073 static void
5074 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5075 {
5076 /* There should never be more than one step-resume or longjmp-resume
5077 breakpoint per thread, so we should never be setting a new
5078 longjmp_resume_breakpoint when one is already active. */
5079 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5080
5081 if (debug_infrun)
5082 fprintf_unfiltered (gdb_stdlog,
5083 "infrun: inserting longjmp-resume breakpoint at %s\n",
5084 paddress (gdbarch, pc));
5085
5086 inferior_thread ()->step_resume_breakpoint =
5087 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5088 }
5089
5090 static void
5091 stop_stepping (struct execution_control_state *ecs)
5092 {
5093 if (debug_infrun)
5094 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5095
5096 /* Let callers know we don't want to wait for the inferior anymore. */
5097 ecs->wait_some_more = 0;
5098 }
5099
5100 /* This function handles various cases where we need to continue
5101 waiting for the inferior. */
5102 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5103
5104 static void
5105 keep_going (struct execution_control_state *ecs)
5106 {
5107 /* Make sure normal_stop is called if we get a QUIT handled before
5108 reaching resume. */
5109 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5110
5111 /* Save the pc before execution, to compare with pc after stop. */
5112 ecs->event_thread->prev_pc
5113 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5114
5115 /* If we did not do break;, it means we should keep running the
5116 inferior and not return to debugger. */
5117
5118 if (ecs->event_thread->trap_expected
5119 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5120 {
5121 /* We took a signal (which we are supposed to pass through to
5122 the inferior, else we'd not get here) and we haven't yet
5123 gotten our trap. Simply continue. */
5124
5125 discard_cleanups (old_cleanups);
5126 resume (currently_stepping (ecs->event_thread),
5127 ecs->event_thread->stop_signal);
5128 }
5129 else
5130 {
5131 /* Either the trap was not expected, but we are continuing
5132 anyway (the user asked that this signal be passed to the
5133 child)
5134 -- or --
5135 The signal was SIGTRAP, e.g. it was our signal, but we
5136 decided we should resume from it.
5137
5138 We're going to run this baby now!
5139
5140 Note that insert_breakpoints won't try to re-insert
5141 already inserted breakpoints. Therefore, we don't
5142 care if breakpoints were already inserted, or not. */
5143
5144 if (ecs->event_thread->stepping_over_breakpoint)
5145 {
5146 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5147
5148 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5149 /* Since we can't do a displaced step, we have to remove
5150 the breakpoint while we step it. To keep things
5151 simple, we remove them all. */
5152 remove_breakpoints ();
5153 }
5154 else
5155 {
5156 struct gdb_exception e;
5157
5158 /* Stop stepping when inserting breakpoints
5159 has failed. */
5160 TRY_CATCH (e, RETURN_MASK_ERROR)
5161 {
5162 insert_breakpoints ();
5163 }
5164 if (e.reason < 0)
5165 {
5166 exception_print (gdb_stderr, e);
5167 stop_stepping (ecs);
5168 return;
5169 }
5170 }
5171
5172 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5173
5174 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5175 specifies that such a signal should be delivered to the
5176 target program).
5177
5178 Typically, this would occure when a user is debugging a
5179 target monitor on a simulator: the target monitor sets a
5180 breakpoint; the simulator encounters this break-point and
5181 halts the simulation handing control to GDB; GDB, noteing
5182 that the break-point isn't valid, returns control back to the
5183 simulator; the simulator then delivers the hardware
5184 equivalent of a SIGNAL_TRAP to the program being debugged. */
5185
5186 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5187 && !signal_program[ecs->event_thread->stop_signal])
5188 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5189
5190 discard_cleanups (old_cleanups);
5191 resume (currently_stepping (ecs->event_thread),
5192 ecs->event_thread->stop_signal);
5193 }
5194
5195 prepare_to_wait (ecs);
5196 }
5197
5198 /* This function normally comes after a resume, before
5199 handle_inferior_event exits. It takes care of any last bits of
5200 housekeeping, and sets the all-important wait_some_more flag. */
5201
5202 static void
5203 prepare_to_wait (struct execution_control_state *ecs)
5204 {
5205 if (debug_infrun)
5206 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5207
5208 /* This is the old end of the while loop. Let everybody know we
5209 want to wait for the inferior some more and get called again
5210 soon. */
5211 ecs->wait_some_more = 1;
5212 }
5213
5214 /* Several print_*_reason functions to print why the inferior has stopped.
5215 We always print something when the inferior exits, or receives a signal.
5216 The rest of the cases are dealt with later on in normal_stop and
5217 print_it_typical. Ideally there should be a call to one of these
5218 print_*_reason functions functions from handle_inferior_event each time
5219 stop_stepping is called. */
5220
5221 /* Print why the inferior has stopped.
5222 We are done with a step/next/si/ni command, print why the inferior has
5223 stopped. For now print nothing. Print a message only if not in the middle
5224 of doing a "step n" operation for n > 1. */
5225
5226 static void
5227 print_end_stepping_range_reason (void)
5228 {
5229 if ((!inferior_thread ()->step_multi || !inferior_thread ()->stop_step)
5230 && ui_out_is_mi_like_p (uiout))
5231 ui_out_field_string (uiout, "reason",
5232 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5233 }
5234
5235 /* The inferior was terminated by a signal, print why it stopped. */
5236
5237 static void
5238 print_signal_exited_reason (enum target_signal siggnal)
5239 {
5240 annotate_signalled ();
5241 if (ui_out_is_mi_like_p (uiout))
5242 ui_out_field_string
5243 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5244 ui_out_text (uiout, "\nProgram terminated with signal ");
5245 annotate_signal_name ();
5246 ui_out_field_string (uiout, "signal-name",
5247 target_signal_to_name (siggnal));
5248 annotate_signal_name_end ();
5249 ui_out_text (uiout, ", ");
5250 annotate_signal_string ();
5251 ui_out_field_string (uiout, "signal-meaning",
5252 target_signal_to_string (siggnal));
5253 annotate_signal_string_end ();
5254 ui_out_text (uiout, ".\n");
5255 ui_out_text (uiout, "The program no longer exists.\n");
5256 }
5257
5258 /* The inferior program is finished, print why it stopped. */
5259
5260 static void
5261 print_exited_reason (int exitstatus)
5262 {
5263 annotate_exited (exitstatus);
5264 if (exitstatus)
5265 {
5266 if (ui_out_is_mi_like_p (uiout))
5267 ui_out_field_string (uiout, "reason",
5268 async_reason_lookup (EXEC_ASYNC_EXITED));
5269 ui_out_text (uiout, "\nProgram exited with code ");
5270 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5271 ui_out_text (uiout, ".\n");
5272 }
5273 else
5274 {
5275 if (ui_out_is_mi_like_p (uiout))
5276 ui_out_field_string
5277 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5278 ui_out_text (uiout, "\nProgram exited normally.\n");
5279 }
5280 /* Support the --return-child-result option. */
5281 return_child_result_value = exitstatus;
5282 }
5283
5284 /* Signal received, print why the inferior has stopped. The signal table
5285 tells us to print about it. */
5286
5287 static void
5288 print_signal_received_reason (enum target_signal siggnal)
5289 {
5290 annotate_signal ();
5291
5292 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5293 {
5294 struct thread_info *t = inferior_thread ();
5295
5296 ui_out_text (uiout, "\n[");
5297 ui_out_field_string (uiout, "thread-name",
5298 target_pid_to_str (t->ptid));
5299 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5300 ui_out_text (uiout, " stopped");
5301 }
5302 else
5303 {
5304 ui_out_text (uiout, "\nProgram received signal ");
5305 annotate_signal_name ();
5306 if (ui_out_is_mi_like_p (uiout))
5307 ui_out_field_string
5308 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5309 ui_out_field_string (uiout, "signal-name",
5310 target_signal_to_name (siggnal));
5311 annotate_signal_name_end ();
5312 ui_out_text (uiout, ", ");
5313 annotate_signal_string ();
5314 ui_out_field_string (uiout, "signal-meaning",
5315 target_signal_to_string (siggnal));
5316 annotate_signal_string_end ();
5317 }
5318 ui_out_text (uiout, ".\n");
5319 }
5320
5321 /* Reverse execution: target ran out of history info, print why the inferior
5322 has stopped. */
5323
5324 static void
5325 print_no_history_reason (void)
5326 {
5327 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5328 }
5329
5330 /* Here to return control to GDB when the inferior stops for real.
5331 Print appropriate messages, remove breakpoints, give terminal our modes.
5332
5333 STOP_PRINT_FRAME nonzero means print the executing frame
5334 (pc, function, args, file, line number and line text).
5335 BREAKPOINTS_FAILED nonzero means stop was due to error
5336 attempting to insert breakpoints. */
5337
5338 void
5339 normal_stop (void)
5340 {
5341 struct target_waitstatus last;
5342 ptid_t last_ptid;
5343 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5344
5345 get_last_target_status (&last_ptid, &last);
5346
5347 /* If an exception is thrown from this point on, make sure to
5348 propagate GDB's knowledge of the executing state to the
5349 frontend/user running state. A QUIT is an easy exception to see
5350 here, so do this before any filtered output. */
5351 if (!non_stop)
5352 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5353 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5354 && last.kind != TARGET_WAITKIND_EXITED)
5355 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5356
5357 /* In non-stop mode, we don't want GDB to switch threads behind the
5358 user's back, to avoid races where the user is typing a command to
5359 apply to thread x, but GDB switches to thread y before the user
5360 finishes entering the command. */
5361
5362 /* As with the notification of thread events, we want to delay
5363 notifying the user that we've switched thread context until
5364 the inferior actually stops.
5365
5366 There's no point in saying anything if the inferior has exited.
5367 Note that SIGNALLED here means "exited with a signal", not
5368 "received a signal". */
5369 if (!non_stop
5370 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5371 && target_has_execution
5372 && last.kind != TARGET_WAITKIND_SIGNALLED
5373 && last.kind != TARGET_WAITKIND_EXITED)
5374 {
5375 target_terminal_ours_for_output ();
5376 printf_filtered (_("[Switching to %s]\n"),
5377 target_pid_to_str (inferior_ptid));
5378 annotate_thread_changed ();
5379 previous_inferior_ptid = inferior_ptid;
5380 }
5381
5382 if (!breakpoints_always_inserted_mode () && target_has_execution)
5383 {
5384 if (remove_breakpoints ())
5385 {
5386 target_terminal_ours_for_output ();
5387 printf_filtered (_("\
5388 Cannot remove breakpoints because program is no longer writable.\n\
5389 Further execution is probably impossible.\n"));
5390 }
5391 }
5392
5393 /* If an auto-display called a function and that got a signal,
5394 delete that auto-display to avoid an infinite recursion. */
5395
5396 if (stopped_by_random_signal)
5397 disable_current_display ();
5398
5399 /* Don't print a message if in the middle of doing a "step n"
5400 operation for n > 1 */
5401 if (target_has_execution
5402 && last.kind != TARGET_WAITKIND_SIGNALLED
5403 && last.kind != TARGET_WAITKIND_EXITED
5404 && inferior_thread ()->step_multi
5405 && inferior_thread ()->stop_step)
5406 goto done;
5407
5408 target_terminal_ours ();
5409
5410 /* Set the current source location. This will also happen if we
5411 display the frame below, but the current SAL will be incorrect
5412 during a user hook-stop function. */
5413 if (has_stack_frames () && !stop_stack_dummy)
5414 set_current_sal_from_frame (get_current_frame (), 1);
5415
5416 /* Let the user/frontend see the threads as stopped. */
5417 do_cleanups (old_chain);
5418
5419 /* Look up the hook_stop and run it (CLI internally handles problem
5420 of stop_command's pre-hook not existing). */
5421 if (stop_command)
5422 catch_errors (hook_stop_stub, stop_command,
5423 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5424
5425 if (!has_stack_frames ())
5426 goto done;
5427
5428 if (last.kind == TARGET_WAITKIND_SIGNALLED
5429 || last.kind == TARGET_WAITKIND_EXITED)
5430 goto done;
5431
5432 /* Select innermost stack frame - i.e., current frame is frame 0,
5433 and current location is based on that.
5434 Don't do this on return from a stack dummy routine,
5435 or if the program has exited. */
5436
5437 if (!stop_stack_dummy)
5438 {
5439 select_frame (get_current_frame ());
5440
5441 /* Print current location without a level number, if
5442 we have changed functions or hit a breakpoint.
5443 Print source line if we have one.
5444 bpstat_print() contains the logic deciding in detail
5445 what to print, based on the event(s) that just occurred. */
5446
5447 /* If --batch-silent is enabled then there's no need to print the current
5448 source location, and to try risks causing an error message about
5449 missing source files. */
5450 if (stop_print_frame && !batch_silent)
5451 {
5452 int bpstat_ret;
5453 int source_flag;
5454 int do_frame_printing = 1;
5455 struct thread_info *tp = inferior_thread ();
5456
5457 bpstat_ret = bpstat_print (tp->stop_bpstat);
5458 switch (bpstat_ret)
5459 {
5460 case PRINT_UNKNOWN:
5461 /* If we had hit a shared library event breakpoint,
5462 bpstat_print would print out this message. If we hit
5463 an OS-level shared library event, do the same
5464 thing. */
5465 if (last.kind == TARGET_WAITKIND_LOADED)
5466 {
5467 printf_filtered (_("Stopped due to shared library event\n"));
5468 source_flag = SRC_LINE; /* something bogus */
5469 do_frame_printing = 0;
5470 break;
5471 }
5472
5473 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5474 (or should) carry around the function and does (or
5475 should) use that when doing a frame comparison. */
5476 if (tp->stop_step
5477 && frame_id_eq (tp->step_frame_id,
5478 get_frame_id (get_current_frame ()))
5479 && step_start_function == find_pc_function (stop_pc))
5480 source_flag = SRC_LINE; /* finished step, just print source line */
5481 else
5482 source_flag = SRC_AND_LOC; /* print location and source line */
5483 break;
5484 case PRINT_SRC_AND_LOC:
5485 source_flag = SRC_AND_LOC; /* print location and source line */
5486 break;
5487 case PRINT_SRC_ONLY:
5488 source_flag = SRC_LINE;
5489 break;
5490 case PRINT_NOTHING:
5491 source_flag = SRC_LINE; /* something bogus */
5492 do_frame_printing = 0;
5493 break;
5494 default:
5495 internal_error (__FILE__, __LINE__, _("Unknown value."));
5496 }
5497
5498 /* The behavior of this routine with respect to the source
5499 flag is:
5500 SRC_LINE: Print only source line
5501 LOCATION: Print only location
5502 SRC_AND_LOC: Print location and source line */
5503 if (do_frame_printing)
5504 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5505
5506 /* Display the auto-display expressions. */
5507 do_displays ();
5508 }
5509 }
5510
5511 /* Save the function value return registers, if we care.
5512 We might be about to restore their previous contents. */
5513 if (inferior_thread ()->proceed_to_finish)
5514 {
5515 /* This should not be necessary. */
5516 if (stop_registers)
5517 regcache_xfree (stop_registers);
5518
5519 /* NB: The copy goes through to the target picking up the value of
5520 all the registers. */
5521 stop_registers = regcache_dup (get_current_regcache ());
5522 }
5523
5524 if (stop_stack_dummy == STOP_STACK_DUMMY)
5525 {
5526 /* Pop the empty frame that contains the stack dummy.
5527 This also restores inferior state prior to the call
5528 (struct inferior_thread_state). */
5529 struct frame_info *frame = get_current_frame ();
5530
5531 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5532 frame_pop (frame);
5533 /* frame_pop() calls reinit_frame_cache as the last thing it does
5534 which means there's currently no selected frame. We don't need
5535 to re-establish a selected frame if the dummy call returns normally,
5536 that will be done by restore_inferior_status. However, we do have
5537 to handle the case where the dummy call is returning after being
5538 stopped (e.g. the dummy call previously hit a breakpoint). We
5539 can't know which case we have so just always re-establish a
5540 selected frame here. */
5541 select_frame (get_current_frame ());
5542 }
5543
5544 done:
5545 annotate_stopped ();
5546
5547 /* Suppress the stop observer if we're in the middle of:
5548
5549 - a step n (n > 1), as there still more steps to be done.
5550
5551 - a "finish" command, as the observer will be called in
5552 finish_command_continuation, so it can include the inferior
5553 function's return value.
5554
5555 - calling an inferior function, as we pretend we inferior didn't
5556 run at all. The return value of the call is handled by the
5557 expression evaluator, through call_function_by_hand. */
5558
5559 if (!target_has_execution
5560 || last.kind == TARGET_WAITKIND_SIGNALLED
5561 || last.kind == TARGET_WAITKIND_EXITED
5562 || (!inferior_thread ()->step_multi
5563 && !(inferior_thread ()->stop_bpstat
5564 && inferior_thread ()->proceed_to_finish)
5565 && !inferior_thread ()->in_infcall))
5566 {
5567 if (!ptid_equal (inferior_ptid, null_ptid))
5568 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5569 stop_print_frame);
5570 else
5571 observer_notify_normal_stop (NULL, stop_print_frame);
5572 }
5573
5574 if (target_has_execution)
5575 {
5576 if (last.kind != TARGET_WAITKIND_SIGNALLED
5577 && last.kind != TARGET_WAITKIND_EXITED)
5578 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5579 Delete any breakpoint that is to be deleted at the next stop. */
5580 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5581 }
5582
5583 /* Try to get rid of automatically added inferiors that are no
5584 longer needed. Keeping those around slows down things linearly.
5585 Note that this never removes the current inferior. */
5586 prune_inferiors ();
5587 }
5588
5589 static int
5590 hook_stop_stub (void *cmd)
5591 {
5592 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5593 return (0);
5594 }
5595 \f
5596 int
5597 signal_stop_state (int signo)
5598 {
5599 return signal_stop[signo];
5600 }
5601
5602 int
5603 signal_print_state (int signo)
5604 {
5605 return signal_print[signo];
5606 }
5607
5608 int
5609 signal_pass_state (int signo)
5610 {
5611 return signal_program[signo];
5612 }
5613
5614 int
5615 signal_stop_update (int signo, int state)
5616 {
5617 int ret = signal_stop[signo];
5618
5619 signal_stop[signo] = state;
5620 return ret;
5621 }
5622
5623 int
5624 signal_print_update (int signo, int state)
5625 {
5626 int ret = signal_print[signo];
5627
5628 signal_print[signo] = state;
5629 return ret;
5630 }
5631
5632 int
5633 signal_pass_update (int signo, int state)
5634 {
5635 int ret = signal_program[signo];
5636
5637 signal_program[signo] = state;
5638 return ret;
5639 }
5640
5641 static void
5642 sig_print_header (void)
5643 {
5644 printf_filtered (_("\
5645 Signal Stop\tPrint\tPass to program\tDescription\n"));
5646 }
5647
5648 static void
5649 sig_print_info (enum target_signal oursig)
5650 {
5651 const char *name = target_signal_to_name (oursig);
5652 int name_padding = 13 - strlen (name);
5653
5654 if (name_padding <= 0)
5655 name_padding = 0;
5656
5657 printf_filtered ("%s", name);
5658 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5659 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5660 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5661 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5662 printf_filtered ("%s\n", target_signal_to_string (oursig));
5663 }
5664
5665 /* Specify how various signals in the inferior should be handled. */
5666
5667 static void
5668 handle_command (char *args, int from_tty)
5669 {
5670 char **argv;
5671 int digits, wordlen;
5672 int sigfirst, signum, siglast;
5673 enum target_signal oursig;
5674 int allsigs;
5675 int nsigs;
5676 unsigned char *sigs;
5677 struct cleanup *old_chain;
5678
5679 if (args == NULL)
5680 {
5681 error_no_arg (_("signal to handle"));
5682 }
5683
5684 /* Allocate and zero an array of flags for which signals to handle. */
5685
5686 nsigs = (int) TARGET_SIGNAL_LAST;
5687 sigs = (unsigned char *) alloca (nsigs);
5688 memset (sigs, 0, nsigs);
5689
5690 /* Break the command line up into args. */
5691
5692 argv = gdb_buildargv (args);
5693 old_chain = make_cleanup_freeargv (argv);
5694
5695 /* Walk through the args, looking for signal oursigs, signal names, and
5696 actions. Signal numbers and signal names may be interspersed with
5697 actions, with the actions being performed for all signals cumulatively
5698 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5699
5700 while (*argv != NULL)
5701 {
5702 wordlen = strlen (*argv);
5703 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5704 {;
5705 }
5706 allsigs = 0;
5707 sigfirst = siglast = -1;
5708
5709 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5710 {
5711 /* Apply action to all signals except those used by the
5712 debugger. Silently skip those. */
5713 allsigs = 1;
5714 sigfirst = 0;
5715 siglast = nsigs - 1;
5716 }
5717 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5718 {
5719 SET_SIGS (nsigs, sigs, signal_stop);
5720 SET_SIGS (nsigs, sigs, signal_print);
5721 }
5722 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5723 {
5724 UNSET_SIGS (nsigs, sigs, signal_program);
5725 }
5726 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5727 {
5728 SET_SIGS (nsigs, sigs, signal_print);
5729 }
5730 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5731 {
5732 SET_SIGS (nsigs, sigs, signal_program);
5733 }
5734 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5735 {
5736 UNSET_SIGS (nsigs, sigs, signal_stop);
5737 }
5738 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5739 {
5740 SET_SIGS (nsigs, sigs, signal_program);
5741 }
5742 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5743 {
5744 UNSET_SIGS (nsigs, sigs, signal_print);
5745 UNSET_SIGS (nsigs, sigs, signal_stop);
5746 }
5747 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5748 {
5749 UNSET_SIGS (nsigs, sigs, signal_program);
5750 }
5751 else if (digits > 0)
5752 {
5753 /* It is numeric. The numeric signal refers to our own
5754 internal signal numbering from target.h, not to host/target
5755 signal number. This is a feature; users really should be
5756 using symbolic names anyway, and the common ones like
5757 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5758
5759 sigfirst = siglast = (int)
5760 target_signal_from_command (atoi (*argv));
5761 if ((*argv)[digits] == '-')
5762 {
5763 siglast = (int)
5764 target_signal_from_command (atoi ((*argv) + digits + 1));
5765 }
5766 if (sigfirst > siglast)
5767 {
5768 /* Bet he didn't figure we'd think of this case... */
5769 signum = sigfirst;
5770 sigfirst = siglast;
5771 siglast = signum;
5772 }
5773 }
5774 else
5775 {
5776 oursig = target_signal_from_name (*argv);
5777 if (oursig != TARGET_SIGNAL_UNKNOWN)
5778 {
5779 sigfirst = siglast = (int) oursig;
5780 }
5781 else
5782 {
5783 /* Not a number and not a recognized flag word => complain. */
5784 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5785 }
5786 }
5787
5788 /* If any signal numbers or symbol names were found, set flags for
5789 which signals to apply actions to. */
5790
5791 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5792 {
5793 switch ((enum target_signal) signum)
5794 {
5795 case TARGET_SIGNAL_TRAP:
5796 case TARGET_SIGNAL_INT:
5797 if (!allsigs && !sigs[signum])
5798 {
5799 if (query (_("%s is used by the debugger.\n\
5800 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5801 {
5802 sigs[signum] = 1;
5803 }
5804 else
5805 {
5806 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5807 gdb_flush (gdb_stdout);
5808 }
5809 }
5810 break;
5811 case TARGET_SIGNAL_0:
5812 case TARGET_SIGNAL_DEFAULT:
5813 case TARGET_SIGNAL_UNKNOWN:
5814 /* Make sure that "all" doesn't print these. */
5815 break;
5816 default:
5817 sigs[signum] = 1;
5818 break;
5819 }
5820 }
5821
5822 argv++;
5823 }
5824
5825 for (signum = 0; signum < nsigs; signum++)
5826 if (sigs[signum])
5827 {
5828 target_notice_signals (inferior_ptid);
5829
5830 if (from_tty)
5831 {
5832 /* Show the results. */
5833 sig_print_header ();
5834 for (; signum < nsigs; signum++)
5835 if (sigs[signum])
5836 sig_print_info (signum);
5837 }
5838
5839 break;
5840 }
5841
5842 do_cleanups (old_chain);
5843 }
5844
5845 static void
5846 xdb_handle_command (char *args, int from_tty)
5847 {
5848 char **argv;
5849 struct cleanup *old_chain;
5850
5851 if (args == NULL)
5852 error_no_arg (_("xdb command"));
5853
5854 /* Break the command line up into args. */
5855
5856 argv = gdb_buildargv (args);
5857 old_chain = make_cleanup_freeargv (argv);
5858 if (argv[1] != (char *) NULL)
5859 {
5860 char *argBuf;
5861 int bufLen;
5862
5863 bufLen = strlen (argv[0]) + 20;
5864 argBuf = (char *) xmalloc (bufLen);
5865 if (argBuf)
5866 {
5867 int validFlag = 1;
5868 enum target_signal oursig;
5869
5870 oursig = target_signal_from_name (argv[0]);
5871 memset (argBuf, 0, bufLen);
5872 if (strcmp (argv[1], "Q") == 0)
5873 sprintf (argBuf, "%s %s", argv[0], "noprint");
5874 else
5875 {
5876 if (strcmp (argv[1], "s") == 0)
5877 {
5878 if (!signal_stop[oursig])
5879 sprintf (argBuf, "%s %s", argv[0], "stop");
5880 else
5881 sprintf (argBuf, "%s %s", argv[0], "nostop");
5882 }
5883 else if (strcmp (argv[1], "i") == 0)
5884 {
5885 if (!signal_program[oursig])
5886 sprintf (argBuf, "%s %s", argv[0], "pass");
5887 else
5888 sprintf (argBuf, "%s %s", argv[0], "nopass");
5889 }
5890 else if (strcmp (argv[1], "r") == 0)
5891 {
5892 if (!signal_print[oursig])
5893 sprintf (argBuf, "%s %s", argv[0], "print");
5894 else
5895 sprintf (argBuf, "%s %s", argv[0], "noprint");
5896 }
5897 else
5898 validFlag = 0;
5899 }
5900 if (validFlag)
5901 handle_command (argBuf, from_tty);
5902 else
5903 printf_filtered (_("Invalid signal handling flag.\n"));
5904 if (argBuf)
5905 xfree (argBuf);
5906 }
5907 }
5908 do_cleanups (old_chain);
5909 }
5910
5911 /* Print current contents of the tables set by the handle command.
5912 It is possible we should just be printing signals actually used
5913 by the current target (but for things to work right when switching
5914 targets, all signals should be in the signal tables). */
5915
5916 static void
5917 signals_info (char *signum_exp, int from_tty)
5918 {
5919 enum target_signal oursig;
5920
5921 sig_print_header ();
5922
5923 if (signum_exp)
5924 {
5925 /* First see if this is a symbol name. */
5926 oursig = target_signal_from_name (signum_exp);
5927 if (oursig == TARGET_SIGNAL_UNKNOWN)
5928 {
5929 /* No, try numeric. */
5930 oursig =
5931 target_signal_from_command (parse_and_eval_long (signum_exp));
5932 }
5933 sig_print_info (oursig);
5934 return;
5935 }
5936
5937 printf_filtered ("\n");
5938 /* These ugly casts brought to you by the native VAX compiler. */
5939 for (oursig = TARGET_SIGNAL_FIRST;
5940 (int) oursig < (int) TARGET_SIGNAL_LAST;
5941 oursig = (enum target_signal) ((int) oursig + 1))
5942 {
5943 QUIT;
5944
5945 if (oursig != TARGET_SIGNAL_UNKNOWN
5946 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5947 sig_print_info (oursig);
5948 }
5949
5950 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5951 }
5952
5953 /* The $_siginfo convenience variable is a bit special. We don't know
5954 for sure the type of the value until we actually have a chance to
5955 fetch the data. The type can change depending on gdbarch, so it it
5956 also dependent on which thread you have selected.
5957
5958 1. making $_siginfo be an internalvar that creates a new value on
5959 access.
5960
5961 2. making the value of $_siginfo be an lval_computed value. */
5962
5963 /* This function implements the lval_computed support for reading a
5964 $_siginfo value. */
5965
5966 static void
5967 siginfo_value_read (struct value *v)
5968 {
5969 LONGEST transferred;
5970
5971 transferred =
5972 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5973 NULL,
5974 value_contents_all_raw (v),
5975 value_offset (v),
5976 TYPE_LENGTH (value_type (v)));
5977
5978 if (transferred != TYPE_LENGTH (value_type (v)))
5979 error (_("Unable to read siginfo"));
5980 }
5981
5982 /* This function implements the lval_computed support for writing a
5983 $_siginfo value. */
5984
5985 static void
5986 siginfo_value_write (struct value *v, struct value *fromval)
5987 {
5988 LONGEST transferred;
5989
5990 transferred = target_write (&current_target,
5991 TARGET_OBJECT_SIGNAL_INFO,
5992 NULL,
5993 value_contents_all_raw (fromval),
5994 value_offset (v),
5995 TYPE_LENGTH (value_type (fromval)));
5996
5997 if (transferred != TYPE_LENGTH (value_type (fromval)))
5998 error (_("Unable to write siginfo"));
5999 }
6000
6001 static struct lval_funcs siginfo_value_funcs =
6002 {
6003 siginfo_value_read,
6004 siginfo_value_write
6005 };
6006
6007 /* Return a new value with the correct type for the siginfo object of
6008 the current thread using architecture GDBARCH. Return a void value
6009 if there's no object available. */
6010
6011 static struct value *
6012 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6013 {
6014 if (target_has_stack
6015 && !ptid_equal (inferior_ptid, null_ptid)
6016 && gdbarch_get_siginfo_type_p (gdbarch))
6017 {
6018 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6019
6020 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6021 }
6022
6023 return allocate_value (builtin_type (gdbarch)->builtin_void);
6024 }
6025
6026 \f
6027 /* Inferior thread state.
6028 These are details related to the inferior itself, and don't include
6029 things like what frame the user had selected or what gdb was doing
6030 with the target at the time.
6031 For inferior function calls these are things we want to restore
6032 regardless of whether the function call successfully completes
6033 or the dummy frame has to be manually popped. */
6034
6035 struct inferior_thread_state
6036 {
6037 enum target_signal stop_signal;
6038 CORE_ADDR stop_pc;
6039 struct regcache *registers;
6040 };
6041
6042 struct inferior_thread_state *
6043 save_inferior_thread_state (void)
6044 {
6045 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
6046 struct thread_info *tp = inferior_thread ();
6047
6048 inf_state->stop_signal = tp->stop_signal;
6049 inf_state->stop_pc = stop_pc;
6050
6051 inf_state->registers = regcache_dup (get_current_regcache ());
6052
6053 return inf_state;
6054 }
6055
6056 /* Restore inferior session state to INF_STATE. */
6057
6058 void
6059 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6060 {
6061 struct thread_info *tp = inferior_thread ();
6062
6063 tp->stop_signal = inf_state->stop_signal;
6064 stop_pc = inf_state->stop_pc;
6065
6066 /* The inferior can be gone if the user types "print exit(0)"
6067 (and perhaps other times). */
6068 if (target_has_execution)
6069 /* NB: The register write goes through to the target. */
6070 regcache_cpy (get_current_regcache (), inf_state->registers);
6071 regcache_xfree (inf_state->registers);
6072 xfree (inf_state);
6073 }
6074
6075 static void
6076 do_restore_inferior_thread_state_cleanup (void *state)
6077 {
6078 restore_inferior_thread_state (state);
6079 }
6080
6081 struct cleanup *
6082 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6083 {
6084 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6085 }
6086
6087 void
6088 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6089 {
6090 regcache_xfree (inf_state->registers);
6091 xfree (inf_state);
6092 }
6093
6094 struct regcache *
6095 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6096 {
6097 return inf_state->registers;
6098 }
6099
6100 /* Session related state for inferior function calls.
6101 These are the additional bits of state that need to be restored
6102 when an inferior function call successfully completes. */
6103
6104 struct inferior_status
6105 {
6106 bpstat stop_bpstat;
6107 int stop_step;
6108 enum stop_stack_kind stop_stack_dummy;
6109 int stopped_by_random_signal;
6110 int stepping_over_breakpoint;
6111 CORE_ADDR step_range_start;
6112 CORE_ADDR step_range_end;
6113 struct frame_id step_frame_id;
6114 struct frame_id step_stack_frame_id;
6115 enum step_over_calls_kind step_over_calls;
6116 CORE_ADDR step_resume_break_address;
6117 int stop_after_trap;
6118 int stop_soon;
6119
6120 /* ID if the selected frame when the inferior function call was made. */
6121 struct frame_id selected_frame_id;
6122
6123 int proceed_to_finish;
6124 int in_infcall;
6125 };
6126
6127 /* Save all of the information associated with the inferior<==>gdb
6128 connection. */
6129
6130 struct inferior_status *
6131 save_inferior_status (void)
6132 {
6133 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6134 struct thread_info *tp = inferior_thread ();
6135 struct inferior *inf = current_inferior ();
6136
6137 inf_status->stop_step = tp->stop_step;
6138 inf_status->stop_stack_dummy = stop_stack_dummy;
6139 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6140 inf_status->stepping_over_breakpoint = tp->trap_expected;
6141 inf_status->step_range_start = tp->step_range_start;
6142 inf_status->step_range_end = tp->step_range_end;
6143 inf_status->step_frame_id = tp->step_frame_id;
6144 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6145 inf_status->step_over_calls = tp->step_over_calls;
6146 inf_status->stop_after_trap = stop_after_trap;
6147 inf_status->stop_soon = inf->stop_soon;
6148 /* Save original bpstat chain here; replace it with copy of chain.
6149 If caller's caller is walking the chain, they'll be happier if we
6150 hand them back the original chain when restore_inferior_status is
6151 called. */
6152 inf_status->stop_bpstat = tp->stop_bpstat;
6153 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6154 inf_status->proceed_to_finish = tp->proceed_to_finish;
6155 inf_status->in_infcall = tp->in_infcall;
6156
6157 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6158
6159 return inf_status;
6160 }
6161
6162 static int
6163 restore_selected_frame (void *args)
6164 {
6165 struct frame_id *fid = (struct frame_id *) args;
6166 struct frame_info *frame;
6167
6168 frame = frame_find_by_id (*fid);
6169
6170 /* If inf_status->selected_frame_id is NULL, there was no previously
6171 selected frame. */
6172 if (frame == NULL)
6173 {
6174 warning (_("Unable to restore previously selected frame."));
6175 return 0;
6176 }
6177
6178 select_frame (frame);
6179
6180 return (1);
6181 }
6182
6183 /* Restore inferior session state to INF_STATUS. */
6184
6185 void
6186 restore_inferior_status (struct inferior_status *inf_status)
6187 {
6188 struct thread_info *tp = inferior_thread ();
6189 struct inferior *inf = current_inferior ();
6190
6191 tp->stop_step = inf_status->stop_step;
6192 stop_stack_dummy = inf_status->stop_stack_dummy;
6193 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6194 tp->trap_expected = inf_status->stepping_over_breakpoint;
6195 tp->step_range_start = inf_status->step_range_start;
6196 tp->step_range_end = inf_status->step_range_end;
6197 tp->step_frame_id = inf_status->step_frame_id;
6198 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6199 tp->step_over_calls = inf_status->step_over_calls;
6200 stop_after_trap = inf_status->stop_after_trap;
6201 inf->stop_soon = inf_status->stop_soon;
6202 bpstat_clear (&tp->stop_bpstat);
6203 tp->stop_bpstat = inf_status->stop_bpstat;
6204 inf_status->stop_bpstat = NULL;
6205 tp->proceed_to_finish = inf_status->proceed_to_finish;
6206 tp->in_infcall = inf_status->in_infcall;
6207
6208 if (target_has_stack)
6209 {
6210 /* The point of catch_errors is that if the stack is clobbered,
6211 walking the stack might encounter a garbage pointer and
6212 error() trying to dereference it. */
6213 if (catch_errors
6214 (restore_selected_frame, &inf_status->selected_frame_id,
6215 "Unable to restore previously selected frame:\n",
6216 RETURN_MASK_ERROR) == 0)
6217 /* Error in restoring the selected frame. Select the innermost
6218 frame. */
6219 select_frame (get_current_frame ());
6220 }
6221
6222 xfree (inf_status);
6223 }
6224
6225 static void
6226 do_restore_inferior_status_cleanup (void *sts)
6227 {
6228 restore_inferior_status (sts);
6229 }
6230
6231 struct cleanup *
6232 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6233 {
6234 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6235 }
6236
6237 void
6238 discard_inferior_status (struct inferior_status *inf_status)
6239 {
6240 /* See save_inferior_status for info on stop_bpstat. */
6241 bpstat_clear (&inf_status->stop_bpstat);
6242 xfree (inf_status);
6243 }
6244 \f
6245 int
6246 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6247 {
6248 struct target_waitstatus last;
6249 ptid_t last_ptid;
6250
6251 get_last_target_status (&last_ptid, &last);
6252
6253 if (last.kind != TARGET_WAITKIND_FORKED)
6254 return 0;
6255
6256 if (!ptid_equal (last_ptid, pid))
6257 return 0;
6258
6259 *child_pid = last.value.related_pid;
6260 return 1;
6261 }
6262
6263 int
6264 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6265 {
6266 struct target_waitstatus last;
6267 ptid_t last_ptid;
6268
6269 get_last_target_status (&last_ptid, &last);
6270
6271 if (last.kind != TARGET_WAITKIND_VFORKED)
6272 return 0;
6273
6274 if (!ptid_equal (last_ptid, pid))
6275 return 0;
6276
6277 *child_pid = last.value.related_pid;
6278 return 1;
6279 }
6280
6281 int
6282 inferior_has_execd (ptid_t pid, char **execd_pathname)
6283 {
6284 struct target_waitstatus last;
6285 ptid_t last_ptid;
6286
6287 get_last_target_status (&last_ptid, &last);
6288
6289 if (last.kind != TARGET_WAITKIND_EXECD)
6290 return 0;
6291
6292 if (!ptid_equal (last_ptid, pid))
6293 return 0;
6294
6295 *execd_pathname = xstrdup (last.value.execd_pathname);
6296 return 1;
6297 }
6298
6299 int
6300 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6301 {
6302 struct target_waitstatus last;
6303 ptid_t last_ptid;
6304
6305 get_last_target_status (&last_ptid, &last);
6306
6307 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6308 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6309 return 0;
6310
6311 if (!ptid_equal (last_ptid, pid))
6312 return 0;
6313
6314 *syscall_number = last.value.syscall_number;
6315 return 1;
6316 }
6317
6318 /* Oft used ptids */
6319 ptid_t null_ptid;
6320 ptid_t minus_one_ptid;
6321
6322 /* Create a ptid given the necessary PID, LWP, and TID components. */
6323
6324 ptid_t
6325 ptid_build (int pid, long lwp, long tid)
6326 {
6327 ptid_t ptid;
6328
6329 ptid.pid = pid;
6330 ptid.lwp = lwp;
6331 ptid.tid = tid;
6332 return ptid;
6333 }
6334
6335 /* Create a ptid from just a pid. */
6336
6337 ptid_t
6338 pid_to_ptid (int pid)
6339 {
6340 return ptid_build (pid, 0, 0);
6341 }
6342
6343 /* Fetch the pid (process id) component from a ptid. */
6344
6345 int
6346 ptid_get_pid (ptid_t ptid)
6347 {
6348 return ptid.pid;
6349 }
6350
6351 /* Fetch the lwp (lightweight process) component from a ptid. */
6352
6353 long
6354 ptid_get_lwp (ptid_t ptid)
6355 {
6356 return ptid.lwp;
6357 }
6358
6359 /* Fetch the tid (thread id) component from a ptid. */
6360
6361 long
6362 ptid_get_tid (ptid_t ptid)
6363 {
6364 return ptid.tid;
6365 }
6366
6367 /* ptid_equal() is used to test equality of two ptids. */
6368
6369 int
6370 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6371 {
6372 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6373 && ptid1.tid == ptid2.tid);
6374 }
6375
6376 /* Returns true if PTID represents a process. */
6377
6378 int
6379 ptid_is_pid (ptid_t ptid)
6380 {
6381 if (ptid_equal (minus_one_ptid, ptid))
6382 return 0;
6383 if (ptid_equal (null_ptid, ptid))
6384 return 0;
6385
6386 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6387 }
6388
6389 int
6390 ptid_match (ptid_t ptid, ptid_t filter)
6391 {
6392 /* Since both parameters have the same type, prevent easy mistakes
6393 from happening. */
6394 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6395 && !ptid_equal (ptid, null_ptid));
6396
6397 if (ptid_equal (filter, minus_one_ptid))
6398 return 1;
6399 if (ptid_is_pid (filter)
6400 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6401 return 1;
6402 else if (ptid_equal (ptid, filter))
6403 return 1;
6404
6405 return 0;
6406 }
6407
6408 /* restore_inferior_ptid() will be used by the cleanup machinery
6409 to restore the inferior_ptid value saved in a call to
6410 save_inferior_ptid(). */
6411
6412 static void
6413 restore_inferior_ptid (void *arg)
6414 {
6415 ptid_t *saved_ptid_ptr = arg;
6416
6417 inferior_ptid = *saved_ptid_ptr;
6418 xfree (arg);
6419 }
6420
6421 /* Save the value of inferior_ptid so that it may be restored by a
6422 later call to do_cleanups(). Returns the struct cleanup pointer
6423 needed for later doing the cleanup. */
6424
6425 struct cleanup *
6426 save_inferior_ptid (void)
6427 {
6428 ptid_t *saved_ptid_ptr;
6429
6430 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6431 *saved_ptid_ptr = inferior_ptid;
6432 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6433 }
6434 \f
6435
6436 /* User interface for reverse debugging:
6437 Set exec-direction / show exec-direction commands
6438 (returns error unless target implements to_set_exec_direction method). */
6439
6440 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6441 static const char exec_forward[] = "forward";
6442 static const char exec_reverse[] = "reverse";
6443 static const char *exec_direction = exec_forward;
6444 static const char *exec_direction_names[] = {
6445 exec_forward,
6446 exec_reverse,
6447 NULL
6448 };
6449
6450 static void
6451 set_exec_direction_func (char *args, int from_tty,
6452 struct cmd_list_element *cmd)
6453 {
6454 if (target_can_execute_reverse)
6455 {
6456 if (!strcmp (exec_direction, exec_forward))
6457 execution_direction = EXEC_FORWARD;
6458 else if (!strcmp (exec_direction, exec_reverse))
6459 execution_direction = EXEC_REVERSE;
6460 }
6461 else
6462 {
6463 exec_direction = exec_forward;
6464 error (_("Target does not support this operation."));
6465 }
6466 }
6467
6468 static void
6469 show_exec_direction_func (struct ui_file *out, int from_tty,
6470 struct cmd_list_element *cmd, const char *value)
6471 {
6472 switch (execution_direction) {
6473 case EXEC_FORWARD:
6474 fprintf_filtered (out, _("Forward.\n"));
6475 break;
6476 case EXEC_REVERSE:
6477 fprintf_filtered (out, _("Reverse.\n"));
6478 break;
6479 case EXEC_ERROR:
6480 default:
6481 fprintf_filtered (out,
6482 _("Forward (target `%s' does not support exec-direction).\n"),
6483 target_shortname);
6484 break;
6485 }
6486 }
6487
6488 /* User interface for non-stop mode. */
6489
6490 int non_stop = 0;
6491
6492 static void
6493 set_non_stop (char *args, int from_tty,
6494 struct cmd_list_element *c)
6495 {
6496 if (target_has_execution)
6497 {
6498 non_stop_1 = non_stop;
6499 error (_("Cannot change this setting while the inferior is running."));
6500 }
6501
6502 non_stop = non_stop_1;
6503 }
6504
6505 static void
6506 show_non_stop (struct ui_file *file, int from_tty,
6507 struct cmd_list_element *c, const char *value)
6508 {
6509 fprintf_filtered (file,
6510 _("Controlling the inferior in non-stop mode is %s.\n"),
6511 value);
6512 }
6513
6514 static void
6515 show_schedule_multiple (struct ui_file *file, int from_tty,
6516 struct cmd_list_element *c, const char *value)
6517 {
6518 fprintf_filtered (file, _("\
6519 Resuming the execution of threads of all processes is %s.\n"), value);
6520 }
6521
6522 void
6523 _initialize_infrun (void)
6524 {
6525 int i;
6526 int numsigs;
6527
6528 add_info ("signals", signals_info, _("\
6529 What debugger does when program gets various signals.\n\
6530 Specify a signal as argument to print info on that signal only."));
6531 add_info_alias ("handle", "signals", 0);
6532
6533 add_com ("handle", class_run, handle_command, _("\
6534 Specify how to handle a signal.\n\
6535 Args are signals and actions to apply to those signals.\n\
6536 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6537 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6538 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6539 The special arg \"all\" is recognized to mean all signals except those\n\
6540 used by the debugger, typically SIGTRAP and SIGINT.\n\
6541 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6542 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6543 Stop means reenter debugger if this signal happens (implies print).\n\
6544 Print means print a message if this signal happens.\n\
6545 Pass means let program see this signal; otherwise program doesn't know.\n\
6546 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6547 Pass and Stop may be combined."));
6548 if (xdb_commands)
6549 {
6550 add_com ("lz", class_info, signals_info, _("\
6551 What debugger does when program gets various signals.\n\
6552 Specify a signal as argument to print info on that signal only."));
6553 add_com ("z", class_run, xdb_handle_command, _("\
6554 Specify how to handle a signal.\n\
6555 Args are signals and actions to apply to those signals.\n\
6556 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6557 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6558 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6559 The special arg \"all\" is recognized to mean all signals except those\n\
6560 used by the debugger, typically SIGTRAP and SIGINT.\n\
6561 Recognized actions include \"s\" (toggles between stop and nostop),\n\
6562 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6563 nopass), \"Q\" (noprint)\n\
6564 Stop means reenter debugger if this signal happens (implies print).\n\
6565 Print means print a message if this signal happens.\n\
6566 Pass means let program see this signal; otherwise program doesn't know.\n\
6567 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6568 Pass and Stop may be combined."));
6569 }
6570
6571 if (!dbx_commands)
6572 stop_command = add_cmd ("stop", class_obscure,
6573 not_just_help_class_command, _("\
6574 There is no `stop' command, but you can set a hook on `stop'.\n\
6575 This allows you to set a list of commands to be run each time execution\n\
6576 of the program stops."), &cmdlist);
6577
6578 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6579 Set inferior debugging."), _("\
6580 Show inferior debugging."), _("\
6581 When non-zero, inferior specific debugging is enabled."),
6582 NULL,
6583 show_debug_infrun,
6584 &setdebuglist, &showdebuglist);
6585
6586 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6587 Set displaced stepping debugging."), _("\
6588 Show displaced stepping debugging."), _("\
6589 When non-zero, displaced stepping specific debugging is enabled."),
6590 NULL,
6591 show_debug_displaced,
6592 &setdebuglist, &showdebuglist);
6593
6594 add_setshow_boolean_cmd ("non-stop", no_class,
6595 &non_stop_1, _("\
6596 Set whether gdb controls the inferior in non-stop mode."), _("\
6597 Show whether gdb controls the inferior in non-stop mode."), _("\
6598 When debugging a multi-threaded program and this setting is\n\
6599 off (the default, also called all-stop mode), when one thread stops\n\
6600 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6601 all other threads in the program while you interact with the thread of\n\
6602 interest. When you continue or step a thread, you can allow the other\n\
6603 threads to run, or have them remain stopped, but while you inspect any\n\
6604 thread's state, all threads stop.\n\
6605 \n\
6606 In non-stop mode, when one thread stops, other threads can continue\n\
6607 to run freely. You'll be able to step each thread independently,\n\
6608 leave it stopped or free to run as needed."),
6609 set_non_stop,
6610 show_non_stop,
6611 &setlist,
6612 &showlist);
6613
6614 numsigs = (int) TARGET_SIGNAL_LAST;
6615 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6616 signal_print = (unsigned char *)
6617 xmalloc (sizeof (signal_print[0]) * numsigs);
6618 signal_program = (unsigned char *)
6619 xmalloc (sizeof (signal_program[0]) * numsigs);
6620 for (i = 0; i < numsigs; i++)
6621 {
6622 signal_stop[i] = 1;
6623 signal_print[i] = 1;
6624 signal_program[i] = 1;
6625 }
6626
6627 /* Signals caused by debugger's own actions
6628 should not be given to the program afterwards. */
6629 signal_program[TARGET_SIGNAL_TRAP] = 0;
6630 signal_program[TARGET_SIGNAL_INT] = 0;
6631
6632 /* Signals that are not errors should not normally enter the debugger. */
6633 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6634 signal_print[TARGET_SIGNAL_ALRM] = 0;
6635 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6636 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6637 signal_stop[TARGET_SIGNAL_PROF] = 0;
6638 signal_print[TARGET_SIGNAL_PROF] = 0;
6639 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6640 signal_print[TARGET_SIGNAL_CHLD] = 0;
6641 signal_stop[TARGET_SIGNAL_IO] = 0;
6642 signal_print[TARGET_SIGNAL_IO] = 0;
6643 signal_stop[TARGET_SIGNAL_POLL] = 0;
6644 signal_print[TARGET_SIGNAL_POLL] = 0;
6645 signal_stop[TARGET_SIGNAL_URG] = 0;
6646 signal_print[TARGET_SIGNAL_URG] = 0;
6647 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6648 signal_print[TARGET_SIGNAL_WINCH] = 0;
6649
6650 /* These signals are used internally by user-level thread
6651 implementations. (See signal(5) on Solaris.) Like the above
6652 signals, a healthy program receives and handles them as part of
6653 its normal operation. */
6654 signal_stop[TARGET_SIGNAL_LWP] = 0;
6655 signal_print[TARGET_SIGNAL_LWP] = 0;
6656 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6657 signal_print[TARGET_SIGNAL_WAITING] = 0;
6658 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6659 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6660
6661 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6662 &stop_on_solib_events, _("\
6663 Set stopping for shared library events."), _("\
6664 Show stopping for shared library events."), _("\
6665 If nonzero, gdb will give control to the user when the dynamic linker\n\
6666 notifies gdb of shared library events. The most common event of interest\n\
6667 to the user would be loading/unloading of a new library."),
6668 NULL,
6669 show_stop_on_solib_events,
6670 &setlist, &showlist);
6671
6672 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6673 follow_fork_mode_kind_names,
6674 &follow_fork_mode_string, _("\
6675 Set debugger response to a program call of fork or vfork."), _("\
6676 Show debugger response to a program call of fork or vfork."), _("\
6677 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6678 parent - the original process is debugged after a fork\n\
6679 child - the new process is debugged after a fork\n\
6680 The unfollowed process will continue to run.\n\
6681 By default, the debugger will follow the parent process."),
6682 NULL,
6683 show_follow_fork_mode_string,
6684 &setlist, &showlist);
6685
6686 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6687 follow_exec_mode_names,
6688 &follow_exec_mode_string, _("\
6689 Set debugger response to a program call of exec."), _("\
6690 Show debugger response to a program call of exec."), _("\
6691 An exec call replaces the program image of a process.\n\
6692 \n\
6693 follow-exec-mode can be:\n\
6694 \n\
6695 new - the debugger creates a new inferior and rebinds the process\n\
6696 to this new inferior. The program the process was running before\n\
6697 the exec call can be restarted afterwards by restarting the original\n\
6698 inferior.\n\
6699 \n\
6700 same - the debugger keeps the process bound to the same inferior.\n\
6701 The new executable image replaces the previous executable loaded in\n\
6702 the inferior. Restarting the inferior after the exec call restarts\n\
6703 the executable the process was running after the exec call.\n\
6704 \n\
6705 By default, the debugger will use the same inferior."),
6706 NULL,
6707 show_follow_exec_mode_string,
6708 &setlist, &showlist);
6709
6710 add_setshow_enum_cmd ("scheduler-locking", class_run,
6711 scheduler_enums, &scheduler_mode, _("\
6712 Set mode for locking scheduler during execution."), _("\
6713 Show mode for locking scheduler during execution."), _("\
6714 off == no locking (threads may preempt at any time)\n\
6715 on == full locking (no thread except the current thread may run)\n\
6716 step == scheduler locked during every single-step operation.\n\
6717 In this mode, no other thread may run during a step command.\n\
6718 Other threads may run while stepping over a function call ('next')."),
6719 set_schedlock_func, /* traps on target vector */
6720 show_scheduler_mode,
6721 &setlist, &showlist);
6722
6723 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6724 Set mode for resuming threads of all processes."), _("\
6725 Show mode for resuming threads of all processes."), _("\
6726 When on, execution commands (such as 'continue' or 'next') resume all\n\
6727 threads of all processes. When off (which is the default), execution\n\
6728 commands only resume the threads of the current process. The set of\n\
6729 threads that are resumed is further refined by the scheduler-locking\n\
6730 mode (see help set scheduler-locking)."),
6731 NULL,
6732 show_schedule_multiple,
6733 &setlist, &showlist);
6734
6735 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6736 Set mode of the step operation."), _("\
6737 Show mode of the step operation."), _("\
6738 When set, doing a step over a function without debug line information\n\
6739 will stop at the first instruction of that function. Otherwise, the\n\
6740 function is skipped and the step command stops at a different source line."),
6741 NULL,
6742 show_step_stop_if_no_debug,
6743 &setlist, &showlist);
6744
6745 add_setshow_enum_cmd ("displaced-stepping", class_run,
6746 can_use_displaced_stepping_enum,
6747 &can_use_displaced_stepping, _("\
6748 Set debugger's willingness to use displaced stepping."), _("\
6749 Show debugger's willingness to use displaced stepping."), _("\
6750 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6751 supported by the target architecture. If off, gdb will not use displaced\n\
6752 stepping to step over breakpoints, even if such is supported by the target\n\
6753 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6754 if the target architecture supports it and non-stop mode is active, but will not\n\
6755 use it in all-stop mode (see help set non-stop)."),
6756 NULL,
6757 show_can_use_displaced_stepping,
6758 &setlist, &showlist);
6759
6760 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6761 &exec_direction, _("Set direction of execution.\n\
6762 Options are 'forward' or 'reverse'."),
6763 _("Show direction of execution (forward/reverse)."),
6764 _("Tells gdb whether to execute forward or backward."),
6765 set_exec_direction_func, show_exec_direction_func,
6766 &setlist, &showlist);
6767
6768 /* Set/show detach-on-fork: user-settable mode. */
6769
6770 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6771 Set whether gdb will detach the child of a fork."), _("\
6772 Show whether gdb will detach the child of a fork."), _("\
6773 Tells gdb whether to detach the child of a fork."),
6774 NULL, NULL, &setlist, &showlist);
6775
6776 /* ptid initializations */
6777 null_ptid = ptid_build (0, 0, 0);
6778 minus_one_ptid = ptid_build (-1, 0, 0);
6779 inferior_ptid = null_ptid;
6780 target_last_wait_ptid = minus_one_ptid;
6781
6782 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6783 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6784 observer_attach_thread_exit (infrun_thread_thread_exit);
6785 observer_attach_inferior_exit (infrun_inferior_exit);
6786
6787 /* Explicitly create without lookup, since that tries to create a
6788 value with a void typed value, and when we get here, gdbarch
6789 isn't initialized yet. At this point, we're quite sure there
6790 isn't another convenience variable of the same name. */
6791 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6792
6793 add_setshow_boolean_cmd ("observer", no_class,
6794 &observer_mode_1, _("\
6795 Set whether gdb controls the inferior in observer mode."), _("\
6796 Show whether gdb controls the inferior in observer mode."), _("\
6797 In observer mode, GDB can get data from the inferior, but not\n\
6798 affect its execution. Registers and memory may not be changed,\n\
6799 breakpoints may not be set, and the program cannot be interrupted\n\
6800 or signalled."),
6801 set_observer_mode,
6802 show_observer_mode,
6803 &setlist,
6804 &showlist);
6805 }