gdb/
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 static void print_exited_reason (int exitstatus);
87
88 static void print_signal_exited_reason (enum target_signal siggnal);
89
90 static void print_no_history_reason (void);
91
92 static void print_signal_received_reason (enum target_signal siggnal);
93
94 static void print_end_stepping_range_reason (void);
95
96 void _initialize_infrun (void);
97
98 void nullify_last_target_wait_ptid (void);
99
100 /* When set, stop the 'step' command if we enter a function which has
101 no line number information. The normal behavior is that we step
102 over such function. */
103 int step_stop_if_no_debug = 0;
104 static void
105 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
106 struct cmd_list_element *c, const char *value)
107 {
108 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
109 }
110
111 /* In asynchronous mode, but simulating synchronous execution. */
112
113 int sync_execution = 0;
114
115 /* wait_for_inferior and normal_stop use this to notify the user
116 when the inferior stopped in a different thread than it had been
117 running in. */
118
119 static ptid_t previous_inferior_ptid;
120
121 /* Default behavior is to detach newly forked processes (legacy). */
122 int detach_fork = 1;
123
124 int debug_displaced = 0;
125 static void
126 show_debug_displaced (struct ui_file *file, int from_tty,
127 struct cmd_list_element *c, const char *value)
128 {
129 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
130 }
131
132 int debug_infrun = 0;
133 static void
134 show_debug_infrun (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136 {
137 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
138 }
139
140 /* If the program uses ELF-style shared libraries, then calls to
141 functions in shared libraries go through stubs, which live in a
142 table called the PLT (Procedure Linkage Table). The first time the
143 function is called, the stub sends control to the dynamic linker,
144 which looks up the function's real address, patches the stub so
145 that future calls will go directly to the function, and then passes
146 control to the function.
147
148 If we are stepping at the source level, we don't want to see any of
149 this --- we just want to skip over the stub and the dynamic linker.
150 The simple approach is to single-step until control leaves the
151 dynamic linker.
152
153 However, on some systems (e.g., Red Hat's 5.2 distribution) the
154 dynamic linker calls functions in the shared C library, so you
155 can't tell from the PC alone whether the dynamic linker is still
156 running. In this case, we use a step-resume breakpoint to get us
157 past the dynamic linker, as if we were using "next" to step over a
158 function call.
159
160 in_solib_dynsym_resolve_code() says whether we're in the dynamic
161 linker code or not. Normally, this means we single-step. However,
162 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
163 address where we can place a step-resume breakpoint to get past the
164 linker's symbol resolution function.
165
166 in_solib_dynsym_resolve_code() can generally be implemented in a
167 pretty portable way, by comparing the PC against the address ranges
168 of the dynamic linker's sections.
169
170 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
171 it depends on internal details of the dynamic linker. It's usually
172 not too hard to figure out where to put a breakpoint, but it
173 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
174 sanity checking. If it can't figure things out, returning zero and
175 getting the (possibly confusing) stepping behavior is better than
176 signalling an error, which will obscure the change in the
177 inferior's state. */
178
179 /* This function returns TRUE if pc is the address of an instruction
180 that lies within the dynamic linker (such as the event hook, or the
181 dld itself).
182
183 This function must be used only when a dynamic linker event has
184 been caught, and the inferior is being stepped out of the hook, or
185 undefined results are guaranteed. */
186
187 #ifndef SOLIB_IN_DYNAMIC_LINKER
188 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
189 #endif
190
191 /* "Observer mode" is somewhat like a more extreme version of
192 non-stop, in which all GDB operations that might affect the
193 target's execution have been disabled. */
194
195 static int non_stop_1 = 0;
196
197 int observer_mode = 0;
198 static int observer_mode_1 = 0;
199
200 static void
201 set_observer_mode (char *args, int from_tty,
202 struct cmd_list_element *c)
203 {
204 extern int pagination_enabled;
205
206 if (target_has_execution)
207 {
208 observer_mode_1 = observer_mode;
209 error (_("Cannot change this setting while the inferior is running."));
210 }
211
212 observer_mode = observer_mode_1;
213
214 may_write_registers = !observer_mode;
215 may_write_memory = !observer_mode;
216 may_insert_breakpoints = !observer_mode;
217 may_insert_tracepoints = !observer_mode;
218 /* We can insert fast tracepoints in or out of observer mode,
219 but enable them if we're going into this mode. */
220 if (observer_mode)
221 may_insert_fast_tracepoints = 1;
222 may_stop = !observer_mode;
223 update_target_permissions ();
224
225 /* Going *into* observer mode we must force non-stop, then
226 going out we leave it that way. */
227 if (observer_mode)
228 {
229 target_async_permitted = 1;
230 pagination_enabled = 0;
231 non_stop = non_stop_1 = 1;
232 }
233
234 if (from_tty)
235 printf_filtered (_("Observer mode is now %s.\n"),
236 (observer_mode ? "on" : "off"));
237 }
238
239 static void
240 show_observer_mode (struct ui_file *file, int from_tty,
241 struct cmd_list_element *c, const char *value)
242 {
243 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
244 }
245
246 /* This updates the value of observer mode based on changes in
247 permissions. Note that we are deliberately ignoring the values of
248 may-write-registers and may-write-memory, since the user may have
249 reason to enable these during a session, for instance to turn on a
250 debugging-related global. */
251
252 void
253 update_observer_mode (void)
254 {
255 int newval;
256
257 newval = (!may_insert_breakpoints
258 && !may_insert_tracepoints
259 && may_insert_fast_tracepoints
260 && !may_stop
261 && non_stop);
262
263 /* Let the user know if things change. */
264 if (newval != observer_mode)
265 printf_filtered (_("Observer mode is now %s.\n"),
266 (newval ? "on" : "off"));
267
268 observer_mode = observer_mode_1 = newval;
269 }
270
271 /* Tables of how to react to signals; the user sets them. */
272
273 static unsigned char *signal_stop;
274 static unsigned char *signal_print;
275 static unsigned char *signal_program;
276
277 #define SET_SIGS(nsigs,sigs,flags) \
278 do { \
279 int signum = (nsigs); \
280 while (signum-- > 0) \
281 if ((sigs)[signum]) \
282 (flags)[signum] = 1; \
283 } while (0)
284
285 #define UNSET_SIGS(nsigs,sigs,flags) \
286 do { \
287 int signum = (nsigs); \
288 while (signum-- > 0) \
289 if ((sigs)[signum]) \
290 (flags)[signum] = 0; \
291 } while (0)
292
293 /* Value to pass to target_resume() to cause all threads to resume */
294
295 #define RESUME_ALL minus_one_ptid
296
297 /* Command list pointer for the "stop" placeholder. */
298
299 static struct cmd_list_element *stop_command;
300
301 /* Function inferior was in as of last step command. */
302
303 static struct symbol *step_start_function;
304
305 /* Nonzero if we want to give control to the user when we're notified
306 of shared library events by the dynamic linker. */
307 int stop_on_solib_events;
308 static void
309 show_stop_on_solib_events (struct ui_file *file, int from_tty,
310 struct cmd_list_element *c, const char *value)
311 {
312 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
313 value);
314 }
315
316 /* Nonzero means expecting a trace trap
317 and should stop the inferior and return silently when it happens. */
318
319 int stop_after_trap;
320
321 /* Save register contents here when executing a "finish" command or are
322 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
323 Thus this contains the return value from the called function (assuming
324 values are returned in a register). */
325
326 struct regcache *stop_registers;
327
328 /* Nonzero after stop if current stack frame should be printed. */
329
330 static int stop_print_frame;
331
332 /* This is a cached copy of the pid/waitstatus of the last event
333 returned by target_wait()/deprecated_target_wait_hook(). This
334 information is returned by get_last_target_status(). */
335 static ptid_t target_last_wait_ptid;
336 static struct target_waitstatus target_last_waitstatus;
337
338 static void context_switch (ptid_t ptid);
339
340 void init_thread_stepping_state (struct thread_info *tss);
341
342 void init_infwait_state (void);
343
344 static const char follow_fork_mode_child[] = "child";
345 static const char follow_fork_mode_parent[] = "parent";
346
347 static const char *follow_fork_mode_kind_names[] = {
348 follow_fork_mode_child,
349 follow_fork_mode_parent,
350 NULL
351 };
352
353 static const char *follow_fork_mode_string = follow_fork_mode_parent;
354 static void
355 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
356 struct cmd_list_element *c, const char *value)
357 {
358 fprintf_filtered (file, _("\
359 Debugger response to a program call of fork or vfork is \"%s\".\n"),
360 value);
361 }
362 \f
363
364 /* Tell the target to follow the fork we're stopped at. Returns true
365 if the inferior should be resumed; false, if the target for some
366 reason decided it's best not to resume. */
367
368 static int
369 follow_fork (void)
370 {
371 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
372 int should_resume = 1;
373 struct thread_info *tp;
374
375 /* Copy user stepping state to the new inferior thread. FIXME: the
376 followed fork child thread should have a copy of most of the
377 parent thread structure's run control related fields, not just these.
378 Initialized to avoid "may be used uninitialized" warnings from gcc. */
379 struct breakpoint *step_resume_breakpoint = NULL;
380 CORE_ADDR step_range_start = 0;
381 CORE_ADDR step_range_end = 0;
382 struct frame_id step_frame_id = { 0 };
383
384 if (!non_stop)
385 {
386 ptid_t wait_ptid;
387 struct target_waitstatus wait_status;
388
389 /* Get the last target status returned by target_wait(). */
390 get_last_target_status (&wait_ptid, &wait_status);
391
392 /* If not stopped at a fork event, then there's nothing else to
393 do. */
394 if (wait_status.kind != TARGET_WAITKIND_FORKED
395 && wait_status.kind != TARGET_WAITKIND_VFORKED)
396 return 1;
397
398 /* Check if we switched over from WAIT_PTID, since the event was
399 reported. */
400 if (!ptid_equal (wait_ptid, minus_one_ptid)
401 && !ptid_equal (inferior_ptid, wait_ptid))
402 {
403 /* We did. Switch back to WAIT_PTID thread, to tell the
404 target to follow it (in either direction). We'll
405 afterwards refuse to resume, and inform the user what
406 happened. */
407 switch_to_thread (wait_ptid);
408 should_resume = 0;
409 }
410 }
411
412 tp = inferior_thread ();
413
414 /* If there were any forks/vforks that were caught and are now to be
415 followed, then do so now. */
416 switch (tp->pending_follow.kind)
417 {
418 case TARGET_WAITKIND_FORKED:
419 case TARGET_WAITKIND_VFORKED:
420 {
421 ptid_t parent, child;
422
423 /* If the user did a next/step, etc, over a fork call,
424 preserve the stepping state in the fork child. */
425 if (follow_child && should_resume)
426 {
427 step_resume_breakpoint
428 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
429 step_range_start = tp->step_range_start;
430 step_range_end = tp->step_range_end;
431 step_frame_id = tp->step_frame_id;
432
433 /* For now, delete the parent's sr breakpoint, otherwise,
434 parent/child sr breakpoints are considered duplicates,
435 and the child version will not be installed. Remove
436 this when the breakpoints module becomes aware of
437 inferiors and address spaces. */
438 delete_step_resume_breakpoint (tp);
439 tp->step_range_start = 0;
440 tp->step_range_end = 0;
441 tp->step_frame_id = null_frame_id;
442 }
443
444 parent = inferior_ptid;
445 child = tp->pending_follow.value.related_pid;
446
447 /* Tell the target to do whatever is necessary to follow
448 either parent or child. */
449 if (target_follow_fork (follow_child))
450 {
451 /* Target refused to follow, or there's some other reason
452 we shouldn't resume. */
453 should_resume = 0;
454 }
455 else
456 {
457 /* This pending follow fork event is now handled, one way
458 or another. The previous selected thread may be gone
459 from the lists by now, but if it is still around, need
460 to clear the pending follow request. */
461 tp = find_thread_ptid (parent);
462 if (tp)
463 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
464
465 /* This makes sure we don't try to apply the "Switched
466 over from WAIT_PID" logic above. */
467 nullify_last_target_wait_ptid ();
468
469 /* If we followed the child, switch to it... */
470 if (follow_child)
471 {
472 switch_to_thread (child);
473
474 /* ... and preserve the stepping state, in case the
475 user was stepping over the fork call. */
476 if (should_resume)
477 {
478 tp = inferior_thread ();
479 tp->step_resume_breakpoint = step_resume_breakpoint;
480 tp->step_range_start = step_range_start;
481 tp->step_range_end = step_range_end;
482 tp->step_frame_id = step_frame_id;
483 }
484 else
485 {
486 /* If we get here, it was because we're trying to
487 resume from a fork catchpoint, but, the user
488 has switched threads away from the thread that
489 forked. In that case, the resume command
490 issued is most likely not applicable to the
491 child, so just warn, and refuse to resume. */
492 warning (_("\
493 Not resuming: switched threads before following fork child.\n"));
494 }
495
496 /* Reset breakpoints in the child as appropriate. */
497 follow_inferior_reset_breakpoints ();
498 }
499 else
500 switch_to_thread (parent);
501 }
502 }
503 break;
504 case TARGET_WAITKIND_SPURIOUS:
505 /* Nothing to follow. */
506 break;
507 default:
508 internal_error (__FILE__, __LINE__,
509 "Unexpected pending_follow.kind %d\n",
510 tp->pending_follow.kind);
511 break;
512 }
513
514 return should_resume;
515 }
516
517 void
518 follow_inferior_reset_breakpoints (void)
519 {
520 struct thread_info *tp = inferior_thread ();
521
522 /* Was there a step_resume breakpoint? (There was if the user
523 did a "next" at the fork() call.) If so, explicitly reset its
524 thread number.
525
526 step_resumes are a form of bp that are made to be per-thread.
527 Since we created the step_resume bp when the parent process
528 was being debugged, and now are switching to the child process,
529 from the breakpoint package's viewpoint, that's a switch of
530 "threads". We must update the bp's notion of which thread
531 it is for, or it'll be ignored when it triggers. */
532
533 if (tp->step_resume_breakpoint)
534 breakpoint_re_set_thread (tp->step_resume_breakpoint);
535
536 /* Reinsert all breakpoints in the child. The user may have set
537 breakpoints after catching the fork, in which case those
538 were never set in the child, but only in the parent. This makes
539 sure the inserted breakpoints match the breakpoint list. */
540
541 breakpoint_re_set ();
542 insert_breakpoints ();
543 }
544
545 /* The child has exited or execed: resume threads of the parent the
546 user wanted to be executing. */
547
548 static int
549 proceed_after_vfork_done (struct thread_info *thread,
550 void *arg)
551 {
552 int pid = * (int *) arg;
553
554 if (ptid_get_pid (thread->ptid) == pid
555 && is_running (thread->ptid)
556 && !is_executing (thread->ptid)
557 && !thread->stop_requested
558 && thread->stop_signal == TARGET_SIGNAL_0)
559 {
560 if (debug_infrun)
561 fprintf_unfiltered (gdb_stdlog,
562 "infrun: resuming vfork parent thread %s\n",
563 target_pid_to_str (thread->ptid));
564
565 switch_to_thread (thread->ptid);
566 clear_proceed_status ();
567 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
568 }
569
570 return 0;
571 }
572
573 /* Called whenever we notice an exec or exit event, to handle
574 detaching or resuming a vfork parent. */
575
576 static void
577 handle_vfork_child_exec_or_exit (int exec)
578 {
579 struct inferior *inf = current_inferior ();
580
581 if (inf->vfork_parent)
582 {
583 int resume_parent = -1;
584
585 /* This exec or exit marks the end of the shared memory region
586 between the parent and the child. If the user wanted to
587 detach from the parent, now is the time. */
588
589 if (inf->vfork_parent->pending_detach)
590 {
591 struct thread_info *tp;
592 struct cleanup *old_chain;
593 struct program_space *pspace;
594 struct address_space *aspace;
595
596 /* follow-fork child, detach-on-fork on */
597
598 old_chain = make_cleanup_restore_current_thread ();
599
600 /* We're letting loose of the parent. */
601 tp = any_live_thread_of_process (inf->vfork_parent->pid);
602 switch_to_thread (tp->ptid);
603
604 /* We're about to detach from the parent, which implicitly
605 removes breakpoints from its address space. There's a
606 catch here: we want to reuse the spaces for the child,
607 but, parent/child are still sharing the pspace at this
608 point, although the exec in reality makes the kernel give
609 the child a fresh set of new pages. The problem here is
610 that the breakpoints module being unaware of this, would
611 likely chose the child process to write to the parent
612 address space. Swapping the child temporarily away from
613 the spaces has the desired effect. Yes, this is "sort
614 of" a hack. */
615
616 pspace = inf->pspace;
617 aspace = inf->aspace;
618 inf->aspace = NULL;
619 inf->pspace = NULL;
620
621 if (debug_infrun || info_verbose)
622 {
623 target_terminal_ours ();
624
625 if (exec)
626 fprintf_filtered (gdb_stdlog,
627 "Detaching vfork parent process %d after child exec.\n",
628 inf->vfork_parent->pid);
629 else
630 fprintf_filtered (gdb_stdlog,
631 "Detaching vfork parent process %d after child exit.\n",
632 inf->vfork_parent->pid);
633 }
634
635 target_detach (NULL, 0);
636
637 /* Put it back. */
638 inf->pspace = pspace;
639 inf->aspace = aspace;
640
641 do_cleanups (old_chain);
642 }
643 else if (exec)
644 {
645 /* We're staying attached to the parent, so, really give the
646 child a new address space. */
647 inf->pspace = add_program_space (maybe_new_address_space ());
648 inf->aspace = inf->pspace->aspace;
649 inf->removable = 1;
650 set_current_program_space (inf->pspace);
651
652 resume_parent = inf->vfork_parent->pid;
653
654 /* Break the bonds. */
655 inf->vfork_parent->vfork_child = NULL;
656 }
657 else
658 {
659 struct cleanup *old_chain;
660 struct program_space *pspace;
661
662 /* If this is a vfork child exiting, then the pspace and
663 aspaces were shared with the parent. Since we're
664 reporting the process exit, we'll be mourning all that is
665 found in the address space, and switching to null_ptid,
666 preparing to start a new inferior. But, since we don't
667 want to clobber the parent's address/program spaces, we
668 go ahead and create a new one for this exiting
669 inferior. */
670
671 /* Switch to null_ptid, so that clone_program_space doesn't want
672 to read the selected frame of a dead process. */
673 old_chain = save_inferior_ptid ();
674 inferior_ptid = null_ptid;
675
676 /* This inferior is dead, so avoid giving the breakpoints
677 module the option to write through to it (cloning a
678 program space resets breakpoints). */
679 inf->aspace = NULL;
680 inf->pspace = NULL;
681 pspace = add_program_space (maybe_new_address_space ());
682 set_current_program_space (pspace);
683 inf->removable = 1;
684 clone_program_space (pspace, inf->vfork_parent->pspace);
685 inf->pspace = pspace;
686 inf->aspace = pspace->aspace;
687
688 /* Put back inferior_ptid. We'll continue mourning this
689 inferior. */
690 do_cleanups (old_chain);
691
692 resume_parent = inf->vfork_parent->pid;
693 /* Break the bonds. */
694 inf->vfork_parent->vfork_child = NULL;
695 }
696
697 inf->vfork_parent = NULL;
698
699 gdb_assert (current_program_space == inf->pspace);
700
701 if (non_stop && resume_parent != -1)
702 {
703 /* If the user wanted the parent to be running, let it go
704 free now. */
705 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
706
707 if (debug_infrun)
708 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
709 resume_parent);
710
711 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
712
713 do_cleanups (old_chain);
714 }
715 }
716 }
717
718 /* Enum strings for "set|show displaced-stepping". */
719
720 static const char follow_exec_mode_new[] = "new";
721 static const char follow_exec_mode_same[] = "same";
722 static const char *follow_exec_mode_names[] =
723 {
724 follow_exec_mode_new,
725 follow_exec_mode_same,
726 NULL,
727 };
728
729 static const char *follow_exec_mode_string = follow_exec_mode_same;
730 static void
731 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
732 struct cmd_list_element *c, const char *value)
733 {
734 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
735 }
736
737 /* EXECD_PATHNAME is assumed to be non-NULL. */
738
739 static void
740 follow_exec (ptid_t pid, char *execd_pathname)
741 {
742 struct thread_info *th = inferior_thread ();
743 struct inferior *inf = current_inferior ();
744
745 /* This is an exec event that we actually wish to pay attention to.
746 Refresh our symbol table to the newly exec'd program, remove any
747 momentary bp's, etc.
748
749 If there are breakpoints, they aren't really inserted now,
750 since the exec() transformed our inferior into a fresh set
751 of instructions.
752
753 We want to preserve symbolic breakpoints on the list, since
754 we have hopes that they can be reset after the new a.out's
755 symbol table is read.
756
757 However, any "raw" breakpoints must be removed from the list
758 (e.g., the solib bp's), since their address is probably invalid
759 now.
760
761 And, we DON'T want to call delete_breakpoints() here, since
762 that may write the bp's "shadow contents" (the instruction
763 value that was overwritten witha TRAP instruction). Since
764 we now have a new a.out, those shadow contents aren't valid. */
765
766 mark_breakpoints_out ();
767
768 update_breakpoints_after_exec ();
769
770 /* If there was one, it's gone now. We cannot truly step-to-next
771 statement through an exec(). */
772 th->step_resume_breakpoint = NULL;
773 th->step_range_start = 0;
774 th->step_range_end = 0;
775
776 /* The target reports the exec event to the main thread, even if
777 some other thread does the exec, and even if the main thread was
778 already stopped --- if debugging in non-stop mode, it's possible
779 the user had the main thread held stopped in the previous image
780 --- release it now. This is the same behavior as step-over-exec
781 with scheduler-locking on in all-stop mode. */
782 th->stop_requested = 0;
783
784 /* What is this a.out's name? */
785 printf_unfiltered (_("%s is executing new program: %s\n"),
786 target_pid_to_str (inferior_ptid),
787 execd_pathname);
788
789 /* We've followed the inferior through an exec. Therefore, the
790 inferior has essentially been killed & reborn. */
791
792 gdb_flush (gdb_stdout);
793
794 breakpoint_init_inferior (inf_execd);
795
796 if (gdb_sysroot && *gdb_sysroot)
797 {
798 char *name = alloca (strlen (gdb_sysroot)
799 + strlen (execd_pathname)
800 + 1);
801
802 strcpy (name, gdb_sysroot);
803 strcat (name, execd_pathname);
804 execd_pathname = name;
805 }
806
807 /* Reset the shared library package. This ensures that we get a
808 shlib event when the child reaches "_start", at which point the
809 dld will have had a chance to initialize the child. */
810 /* Also, loading a symbol file below may trigger symbol lookups, and
811 we don't want those to be satisfied by the libraries of the
812 previous incarnation of this process. */
813 no_shared_libraries (NULL, 0);
814
815 if (follow_exec_mode_string == follow_exec_mode_new)
816 {
817 struct program_space *pspace;
818
819 /* The user wants to keep the old inferior and program spaces
820 around. Create a new fresh one, and switch to it. */
821
822 inf = add_inferior (current_inferior ()->pid);
823 pspace = add_program_space (maybe_new_address_space ());
824 inf->pspace = pspace;
825 inf->aspace = pspace->aspace;
826
827 exit_inferior_num_silent (current_inferior ()->num);
828
829 set_current_inferior (inf);
830 set_current_program_space (pspace);
831 }
832
833 gdb_assert (current_program_space == inf->pspace);
834
835 /* That a.out is now the one to use. */
836 exec_file_attach (execd_pathname, 0);
837
838 /* Load the main file's symbols. */
839 symbol_file_add_main (execd_pathname, 0);
840
841 #ifdef SOLIB_CREATE_INFERIOR_HOOK
842 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
843 #else
844 solib_create_inferior_hook (0);
845 #endif
846
847 jit_inferior_created_hook ();
848
849 /* Reinsert all breakpoints. (Those which were symbolic have
850 been reset to the proper address in the new a.out, thanks
851 to symbol_file_command...) */
852 insert_breakpoints ();
853
854 /* The next resume of this inferior should bring it to the shlib
855 startup breakpoints. (If the user had also set bp's on
856 "main" from the old (parent) process, then they'll auto-
857 matically get reset there in the new process.) */
858 }
859
860 /* Non-zero if we just simulating a single-step. This is needed
861 because we cannot remove the breakpoints in the inferior process
862 until after the `wait' in `wait_for_inferior'. */
863 static int singlestep_breakpoints_inserted_p = 0;
864
865 /* The thread we inserted single-step breakpoints for. */
866 static ptid_t singlestep_ptid;
867
868 /* PC when we started this single-step. */
869 static CORE_ADDR singlestep_pc;
870
871 /* If another thread hit the singlestep breakpoint, we save the original
872 thread here so that we can resume single-stepping it later. */
873 static ptid_t saved_singlestep_ptid;
874 static int stepping_past_singlestep_breakpoint;
875
876 /* If not equal to null_ptid, this means that after stepping over breakpoint
877 is finished, we need to switch to deferred_step_ptid, and step it.
878
879 The use case is when one thread has hit a breakpoint, and then the user
880 has switched to another thread and issued 'step'. We need to step over
881 breakpoint in the thread which hit the breakpoint, but then continue
882 stepping the thread user has selected. */
883 static ptid_t deferred_step_ptid;
884 \f
885 /* Displaced stepping. */
886
887 /* In non-stop debugging mode, we must take special care to manage
888 breakpoints properly; in particular, the traditional strategy for
889 stepping a thread past a breakpoint it has hit is unsuitable.
890 'Displaced stepping' is a tactic for stepping one thread past a
891 breakpoint it has hit while ensuring that other threads running
892 concurrently will hit the breakpoint as they should.
893
894 The traditional way to step a thread T off a breakpoint in a
895 multi-threaded program in all-stop mode is as follows:
896
897 a0) Initially, all threads are stopped, and breakpoints are not
898 inserted.
899 a1) We single-step T, leaving breakpoints uninserted.
900 a2) We insert breakpoints, and resume all threads.
901
902 In non-stop debugging, however, this strategy is unsuitable: we
903 don't want to have to stop all threads in the system in order to
904 continue or step T past a breakpoint. Instead, we use displaced
905 stepping:
906
907 n0) Initially, T is stopped, other threads are running, and
908 breakpoints are inserted.
909 n1) We copy the instruction "under" the breakpoint to a separate
910 location, outside the main code stream, making any adjustments
911 to the instruction, register, and memory state as directed by
912 T's architecture.
913 n2) We single-step T over the instruction at its new location.
914 n3) We adjust the resulting register and memory state as directed
915 by T's architecture. This includes resetting T's PC to point
916 back into the main instruction stream.
917 n4) We resume T.
918
919 This approach depends on the following gdbarch methods:
920
921 - gdbarch_max_insn_length and gdbarch_displaced_step_location
922 indicate where to copy the instruction, and how much space must
923 be reserved there. We use these in step n1.
924
925 - gdbarch_displaced_step_copy_insn copies a instruction to a new
926 address, and makes any necessary adjustments to the instruction,
927 register contents, and memory. We use this in step n1.
928
929 - gdbarch_displaced_step_fixup adjusts registers and memory after
930 we have successfuly single-stepped the instruction, to yield the
931 same effect the instruction would have had if we had executed it
932 at its original address. We use this in step n3.
933
934 - gdbarch_displaced_step_free_closure provides cleanup.
935
936 The gdbarch_displaced_step_copy_insn and
937 gdbarch_displaced_step_fixup functions must be written so that
938 copying an instruction with gdbarch_displaced_step_copy_insn,
939 single-stepping across the copied instruction, and then applying
940 gdbarch_displaced_insn_fixup should have the same effects on the
941 thread's memory and registers as stepping the instruction in place
942 would have. Exactly which responsibilities fall to the copy and
943 which fall to the fixup is up to the author of those functions.
944
945 See the comments in gdbarch.sh for details.
946
947 Note that displaced stepping and software single-step cannot
948 currently be used in combination, although with some care I think
949 they could be made to. Software single-step works by placing
950 breakpoints on all possible subsequent instructions; if the
951 displaced instruction is a PC-relative jump, those breakpoints
952 could fall in very strange places --- on pages that aren't
953 executable, or at addresses that are not proper instruction
954 boundaries. (We do generally let other threads run while we wait
955 to hit the software single-step breakpoint, and they might
956 encounter such a corrupted instruction.) One way to work around
957 this would be to have gdbarch_displaced_step_copy_insn fully
958 simulate the effect of PC-relative instructions (and return NULL)
959 on architectures that use software single-stepping.
960
961 In non-stop mode, we can have independent and simultaneous step
962 requests, so more than one thread may need to simultaneously step
963 over a breakpoint. The current implementation assumes there is
964 only one scratch space per process. In this case, we have to
965 serialize access to the scratch space. If thread A wants to step
966 over a breakpoint, but we are currently waiting for some other
967 thread to complete a displaced step, we leave thread A stopped and
968 place it in the displaced_step_request_queue. Whenever a displaced
969 step finishes, we pick the next thread in the queue and start a new
970 displaced step operation on it. See displaced_step_prepare and
971 displaced_step_fixup for details. */
972
973 struct displaced_step_request
974 {
975 ptid_t ptid;
976 struct displaced_step_request *next;
977 };
978
979 /* Per-inferior displaced stepping state. */
980 struct displaced_step_inferior_state
981 {
982 /* Pointer to next in linked list. */
983 struct displaced_step_inferior_state *next;
984
985 /* The process this displaced step state refers to. */
986 int pid;
987
988 /* A queue of pending displaced stepping requests. One entry per
989 thread that needs to do a displaced step. */
990 struct displaced_step_request *step_request_queue;
991
992 /* If this is not null_ptid, this is the thread carrying out a
993 displaced single-step in process PID. This thread's state will
994 require fixing up once it has completed its step. */
995 ptid_t step_ptid;
996
997 /* The architecture the thread had when we stepped it. */
998 struct gdbarch *step_gdbarch;
999
1000 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1001 for post-step cleanup. */
1002 struct displaced_step_closure *step_closure;
1003
1004 /* The address of the original instruction, and the copy we
1005 made. */
1006 CORE_ADDR step_original, step_copy;
1007
1008 /* Saved contents of copy area. */
1009 gdb_byte *step_saved_copy;
1010 };
1011
1012 /* The list of states of processes involved in displaced stepping
1013 presently. */
1014 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1015
1016 /* Get the displaced stepping state of process PID. */
1017
1018 static struct displaced_step_inferior_state *
1019 get_displaced_stepping_state (int pid)
1020 {
1021 struct displaced_step_inferior_state *state;
1022
1023 for (state = displaced_step_inferior_states;
1024 state != NULL;
1025 state = state->next)
1026 if (state->pid == pid)
1027 return state;
1028
1029 return NULL;
1030 }
1031
1032 /* Add a new displaced stepping state for process PID to the displaced
1033 stepping state list, or return a pointer to an already existing
1034 entry, if it already exists. Never returns NULL. */
1035
1036 static struct displaced_step_inferior_state *
1037 add_displaced_stepping_state (int pid)
1038 {
1039 struct displaced_step_inferior_state *state;
1040
1041 for (state = displaced_step_inferior_states;
1042 state != NULL;
1043 state = state->next)
1044 if (state->pid == pid)
1045 return state;
1046
1047 state = xcalloc (1, sizeof (*state));
1048 state->pid = pid;
1049 state->next = displaced_step_inferior_states;
1050 displaced_step_inferior_states = state;
1051
1052 return state;
1053 }
1054
1055 /* Remove the displaced stepping state of process PID. */
1056
1057 static void
1058 remove_displaced_stepping_state (int pid)
1059 {
1060 struct displaced_step_inferior_state *it, **prev_next_p;
1061
1062 gdb_assert (pid != 0);
1063
1064 it = displaced_step_inferior_states;
1065 prev_next_p = &displaced_step_inferior_states;
1066 while (it)
1067 {
1068 if (it->pid == pid)
1069 {
1070 *prev_next_p = it->next;
1071 xfree (it);
1072 return;
1073 }
1074
1075 prev_next_p = &it->next;
1076 it = *prev_next_p;
1077 }
1078 }
1079
1080 static void
1081 infrun_inferior_exit (struct inferior *inf)
1082 {
1083 remove_displaced_stepping_state (inf->pid);
1084 }
1085
1086 /* Enum strings for "set|show displaced-stepping". */
1087
1088 static const char can_use_displaced_stepping_auto[] = "auto";
1089 static const char can_use_displaced_stepping_on[] = "on";
1090 static const char can_use_displaced_stepping_off[] = "off";
1091 static const char *can_use_displaced_stepping_enum[] =
1092 {
1093 can_use_displaced_stepping_auto,
1094 can_use_displaced_stepping_on,
1095 can_use_displaced_stepping_off,
1096 NULL,
1097 };
1098
1099 /* If ON, and the architecture supports it, GDB will use displaced
1100 stepping to step over breakpoints. If OFF, or if the architecture
1101 doesn't support it, GDB will instead use the traditional
1102 hold-and-step approach. If AUTO (which is the default), GDB will
1103 decide which technique to use to step over breakpoints depending on
1104 which of all-stop or non-stop mode is active --- displaced stepping
1105 in non-stop mode; hold-and-step in all-stop mode. */
1106
1107 static const char *can_use_displaced_stepping =
1108 can_use_displaced_stepping_auto;
1109
1110 static void
1111 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1112 struct cmd_list_element *c,
1113 const char *value)
1114 {
1115 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1116 fprintf_filtered (file, _("\
1117 Debugger's willingness to use displaced stepping to step over \
1118 breakpoints is %s (currently %s).\n"),
1119 value, non_stop ? "on" : "off");
1120 else
1121 fprintf_filtered (file, _("\
1122 Debugger's willingness to use displaced stepping to step over \
1123 breakpoints is %s.\n"), value);
1124 }
1125
1126 /* Return non-zero if displaced stepping can/should be used to step
1127 over breakpoints. */
1128
1129 static int
1130 use_displaced_stepping (struct gdbarch *gdbarch)
1131 {
1132 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1133 && non_stop)
1134 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1135 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1136 && !RECORD_IS_USED);
1137 }
1138
1139 /* Clean out any stray displaced stepping state. */
1140 static void
1141 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1142 {
1143 /* Indicate that there is no cleanup pending. */
1144 displaced->step_ptid = null_ptid;
1145
1146 if (displaced->step_closure)
1147 {
1148 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1149 displaced->step_closure);
1150 displaced->step_closure = NULL;
1151 }
1152 }
1153
1154 static void
1155 displaced_step_clear_cleanup (void *arg)
1156 {
1157 struct displaced_step_inferior_state *state = arg;
1158
1159 displaced_step_clear (state);
1160 }
1161
1162 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1163 void
1164 displaced_step_dump_bytes (struct ui_file *file,
1165 const gdb_byte *buf,
1166 size_t len)
1167 {
1168 int i;
1169
1170 for (i = 0; i < len; i++)
1171 fprintf_unfiltered (file, "%02x ", buf[i]);
1172 fputs_unfiltered ("\n", file);
1173 }
1174
1175 /* Prepare to single-step, using displaced stepping.
1176
1177 Note that we cannot use displaced stepping when we have a signal to
1178 deliver. If we have a signal to deliver and an instruction to step
1179 over, then after the step, there will be no indication from the
1180 target whether the thread entered a signal handler or ignored the
1181 signal and stepped over the instruction successfully --- both cases
1182 result in a simple SIGTRAP. In the first case we mustn't do a
1183 fixup, and in the second case we must --- but we can't tell which.
1184 Comments in the code for 'random signals' in handle_inferior_event
1185 explain how we handle this case instead.
1186
1187 Returns 1 if preparing was successful -- this thread is going to be
1188 stepped now; or 0 if displaced stepping this thread got queued. */
1189 static int
1190 displaced_step_prepare (ptid_t ptid)
1191 {
1192 struct cleanup *old_cleanups, *ignore_cleanups;
1193 struct regcache *regcache = get_thread_regcache (ptid);
1194 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1195 CORE_ADDR original, copy;
1196 ULONGEST len;
1197 struct displaced_step_closure *closure;
1198 struct displaced_step_inferior_state *displaced;
1199
1200 /* We should never reach this function if the architecture does not
1201 support displaced stepping. */
1202 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1203
1204 /* We have to displaced step one thread at a time, as we only have
1205 access to a single scratch space per inferior. */
1206
1207 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1208
1209 if (!ptid_equal (displaced->step_ptid, null_ptid))
1210 {
1211 /* Already waiting for a displaced step to finish. Defer this
1212 request and place in queue. */
1213 struct displaced_step_request *req, *new_req;
1214
1215 if (debug_displaced)
1216 fprintf_unfiltered (gdb_stdlog,
1217 "displaced: defering step of %s\n",
1218 target_pid_to_str (ptid));
1219
1220 new_req = xmalloc (sizeof (*new_req));
1221 new_req->ptid = ptid;
1222 new_req->next = NULL;
1223
1224 if (displaced->step_request_queue)
1225 {
1226 for (req = displaced->step_request_queue;
1227 req && req->next;
1228 req = req->next)
1229 ;
1230 req->next = new_req;
1231 }
1232 else
1233 displaced->step_request_queue = new_req;
1234
1235 return 0;
1236 }
1237 else
1238 {
1239 if (debug_displaced)
1240 fprintf_unfiltered (gdb_stdlog,
1241 "displaced: stepping %s now\n",
1242 target_pid_to_str (ptid));
1243 }
1244
1245 displaced_step_clear (displaced);
1246
1247 old_cleanups = save_inferior_ptid ();
1248 inferior_ptid = ptid;
1249
1250 original = regcache_read_pc (regcache);
1251
1252 copy = gdbarch_displaced_step_location (gdbarch);
1253 len = gdbarch_max_insn_length (gdbarch);
1254
1255 /* Save the original contents of the copy area. */
1256 displaced->step_saved_copy = xmalloc (len);
1257 ignore_cleanups = make_cleanup (free_current_contents,
1258 &displaced->step_saved_copy);
1259 read_memory (copy, displaced->step_saved_copy, len);
1260 if (debug_displaced)
1261 {
1262 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1263 paddress (gdbarch, copy));
1264 displaced_step_dump_bytes (gdb_stdlog,
1265 displaced->step_saved_copy,
1266 len);
1267 };
1268
1269 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1270 original, copy, regcache);
1271
1272 /* We don't support the fully-simulated case at present. */
1273 gdb_assert (closure);
1274
1275 /* Save the information we need to fix things up if the step
1276 succeeds. */
1277 displaced->step_ptid = ptid;
1278 displaced->step_gdbarch = gdbarch;
1279 displaced->step_closure = closure;
1280 displaced->step_original = original;
1281 displaced->step_copy = copy;
1282
1283 make_cleanup (displaced_step_clear_cleanup, displaced);
1284
1285 /* Resume execution at the copy. */
1286 regcache_write_pc (regcache, copy);
1287
1288 discard_cleanups (ignore_cleanups);
1289
1290 do_cleanups (old_cleanups);
1291
1292 if (debug_displaced)
1293 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1294 paddress (gdbarch, copy));
1295
1296 return 1;
1297 }
1298
1299 static void
1300 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1301 {
1302 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1303
1304 inferior_ptid = ptid;
1305 write_memory (memaddr, myaddr, len);
1306 do_cleanups (ptid_cleanup);
1307 }
1308
1309 static void
1310 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1311 {
1312 struct cleanup *old_cleanups;
1313 struct displaced_step_inferior_state *displaced
1314 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1315
1316 /* Was any thread of this process doing a displaced step? */
1317 if (displaced == NULL)
1318 return;
1319
1320 /* Was this event for the pid we displaced? */
1321 if (ptid_equal (displaced->step_ptid, null_ptid)
1322 || ! ptid_equal (displaced->step_ptid, event_ptid))
1323 return;
1324
1325 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1326
1327 /* Restore the contents of the copy area. */
1328 {
1329 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1330
1331 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1332 displaced->step_saved_copy, len);
1333 if (debug_displaced)
1334 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1335 paddress (displaced->step_gdbarch,
1336 displaced->step_copy));
1337 }
1338
1339 /* Did the instruction complete successfully? */
1340 if (signal == TARGET_SIGNAL_TRAP)
1341 {
1342 /* Fix up the resulting state. */
1343 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1344 displaced->step_closure,
1345 displaced->step_original,
1346 displaced->step_copy,
1347 get_thread_regcache (displaced->step_ptid));
1348 }
1349 else
1350 {
1351 /* Since the instruction didn't complete, all we can do is
1352 relocate the PC. */
1353 struct regcache *regcache = get_thread_regcache (event_ptid);
1354 CORE_ADDR pc = regcache_read_pc (regcache);
1355
1356 pc = displaced->step_original + (pc - displaced->step_copy);
1357 regcache_write_pc (regcache, pc);
1358 }
1359
1360 do_cleanups (old_cleanups);
1361
1362 displaced->step_ptid = null_ptid;
1363
1364 /* Are there any pending displaced stepping requests? If so, run
1365 one now. Leave the state object around, since we're likely to
1366 need it again soon. */
1367 while (displaced->step_request_queue)
1368 {
1369 struct displaced_step_request *head;
1370 ptid_t ptid;
1371 struct regcache *regcache;
1372 struct gdbarch *gdbarch;
1373 CORE_ADDR actual_pc;
1374 struct address_space *aspace;
1375
1376 head = displaced->step_request_queue;
1377 ptid = head->ptid;
1378 displaced->step_request_queue = head->next;
1379 xfree (head);
1380
1381 context_switch (ptid);
1382
1383 regcache = get_thread_regcache (ptid);
1384 actual_pc = regcache_read_pc (regcache);
1385 aspace = get_regcache_aspace (regcache);
1386
1387 if (breakpoint_here_p (aspace, actual_pc))
1388 {
1389 if (debug_displaced)
1390 fprintf_unfiltered (gdb_stdlog,
1391 "displaced: stepping queued %s now\n",
1392 target_pid_to_str (ptid));
1393
1394 displaced_step_prepare (ptid);
1395
1396 gdbarch = get_regcache_arch (regcache);
1397
1398 if (debug_displaced)
1399 {
1400 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1401 gdb_byte buf[4];
1402
1403 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1404 paddress (gdbarch, actual_pc));
1405 read_memory (actual_pc, buf, sizeof (buf));
1406 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1407 }
1408
1409 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1410 displaced->step_closure))
1411 target_resume (ptid, 1, TARGET_SIGNAL_0);
1412 else
1413 target_resume (ptid, 0, TARGET_SIGNAL_0);
1414
1415 /* Done, we're stepping a thread. */
1416 break;
1417 }
1418 else
1419 {
1420 int step;
1421 struct thread_info *tp = inferior_thread ();
1422
1423 /* The breakpoint we were sitting under has since been
1424 removed. */
1425 tp->trap_expected = 0;
1426
1427 /* Go back to what we were trying to do. */
1428 step = currently_stepping (tp);
1429
1430 if (debug_displaced)
1431 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1432 target_pid_to_str (tp->ptid), step);
1433
1434 target_resume (ptid, step, TARGET_SIGNAL_0);
1435 tp->stop_signal = TARGET_SIGNAL_0;
1436
1437 /* This request was discarded. See if there's any other
1438 thread waiting for its turn. */
1439 }
1440 }
1441 }
1442
1443 /* Update global variables holding ptids to hold NEW_PTID if they were
1444 holding OLD_PTID. */
1445 static void
1446 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1447 {
1448 struct displaced_step_request *it;
1449 struct displaced_step_inferior_state *displaced;
1450
1451 if (ptid_equal (inferior_ptid, old_ptid))
1452 inferior_ptid = new_ptid;
1453
1454 if (ptid_equal (singlestep_ptid, old_ptid))
1455 singlestep_ptid = new_ptid;
1456
1457 if (ptid_equal (deferred_step_ptid, old_ptid))
1458 deferred_step_ptid = new_ptid;
1459
1460 for (displaced = displaced_step_inferior_states;
1461 displaced;
1462 displaced = displaced->next)
1463 {
1464 if (ptid_equal (displaced->step_ptid, old_ptid))
1465 displaced->step_ptid = new_ptid;
1466
1467 for (it = displaced->step_request_queue; it; it = it->next)
1468 if (ptid_equal (it->ptid, old_ptid))
1469 it->ptid = new_ptid;
1470 }
1471 }
1472
1473 \f
1474 /* Resuming. */
1475
1476 /* Things to clean up if we QUIT out of resume (). */
1477 static void
1478 resume_cleanups (void *ignore)
1479 {
1480 normal_stop ();
1481 }
1482
1483 static const char schedlock_off[] = "off";
1484 static const char schedlock_on[] = "on";
1485 static const char schedlock_step[] = "step";
1486 static const char *scheduler_enums[] = {
1487 schedlock_off,
1488 schedlock_on,
1489 schedlock_step,
1490 NULL
1491 };
1492 static const char *scheduler_mode = schedlock_off;
1493 static void
1494 show_scheduler_mode (struct ui_file *file, int from_tty,
1495 struct cmd_list_element *c, const char *value)
1496 {
1497 fprintf_filtered (file, _("\
1498 Mode for locking scheduler during execution is \"%s\".\n"),
1499 value);
1500 }
1501
1502 static void
1503 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1504 {
1505 if (!target_can_lock_scheduler)
1506 {
1507 scheduler_mode = schedlock_off;
1508 error (_("Target '%s' cannot support this command."), target_shortname);
1509 }
1510 }
1511
1512 /* True if execution commands resume all threads of all processes by
1513 default; otherwise, resume only threads of the current inferior
1514 process. */
1515 int sched_multi = 0;
1516
1517 /* Try to setup for software single stepping over the specified location.
1518 Return 1 if target_resume() should use hardware single step.
1519
1520 GDBARCH the current gdbarch.
1521 PC the location to step over. */
1522
1523 static int
1524 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1525 {
1526 int hw_step = 1;
1527
1528 if (execution_direction == EXEC_FORWARD
1529 && gdbarch_software_single_step_p (gdbarch)
1530 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1531 {
1532 hw_step = 0;
1533 /* Do not pull these breakpoints until after a `wait' in
1534 `wait_for_inferior' */
1535 singlestep_breakpoints_inserted_p = 1;
1536 singlestep_ptid = inferior_ptid;
1537 singlestep_pc = pc;
1538 }
1539 return hw_step;
1540 }
1541
1542 /* Resume the inferior, but allow a QUIT. This is useful if the user
1543 wants to interrupt some lengthy single-stepping operation
1544 (for child processes, the SIGINT goes to the inferior, and so
1545 we get a SIGINT random_signal, but for remote debugging and perhaps
1546 other targets, that's not true).
1547
1548 STEP nonzero if we should step (zero to continue instead).
1549 SIG is the signal to give the inferior (zero for none). */
1550 void
1551 resume (int step, enum target_signal sig)
1552 {
1553 int should_resume = 1;
1554 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1555 struct regcache *regcache = get_current_regcache ();
1556 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1557 struct thread_info *tp = inferior_thread ();
1558 CORE_ADDR pc = regcache_read_pc (regcache);
1559 struct address_space *aspace = get_regcache_aspace (regcache);
1560
1561 QUIT;
1562
1563 if (debug_infrun)
1564 fprintf_unfiltered (gdb_stdlog,
1565 "infrun: resume (step=%d, signal=%d), "
1566 "trap_expected=%d\n",
1567 step, sig, tp->trap_expected);
1568
1569 /* Normally, by the time we reach `resume', the breakpoints are either
1570 removed or inserted, as appropriate. The exception is if we're sitting
1571 at a permanent breakpoint; we need to step over it, but permanent
1572 breakpoints can't be removed. So we have to test for it here. */
1573 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1574 {
1575 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1576 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1577 else
1578 error (_("\
1579 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1580 how to step past a permanent breakpoint on this architecture. Try using\n\
1581 a command like `return' or `jump' to continue execution."));
1582 }
1583
1584 /* If enabled, step over breakpoints by executing a copy of the
1585 instruction at a different address.
1586
1587 We can't use displaced stepping when we have a signal to deliver;
1588 the comments for displaced_step_prepare explain why. The
1589 comments in the handle_inferior event for dealing with 'random
1590 signals' explain what we do instead. */
1591 if (use_displaced_stepping (gdbarch)
1592 && (tp->trap_expected
1593 || (step && gdbarch_software_single_step_p (gdbarch)))
1594 && sig == TARGET_SIGNAL_0)
1595 {
1596 struct displaced_step_inferior_state *displaced;
1597
1598 if (!displaced_step_prepare (inferior_ptid))
1599 {
1600 /* Got placed in displaced stepping queue. Will be resumed
1601 later when all the currently queued displaced stepping
1602 requests finish. The thread is not executing at this point,
1603 and the call to set_executing will be made later. But we
1604 need to call set_running here, since from frontend point of view,
1605 the thread is running. */
1606 set_running (inferior_ptid, 1);
1607 discard_cleanups (old_cleanups);
1608 return;
1609 }
1610
1611 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1612 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1613 displaced->step_closure);
1614 }
1615
1616 /* Do we need to do it the hard way, w/temp breakpoints? */
1617 else if (step)
1618 step = maybe_software_singlestep (gdbarch, pc);
1619
1620 if (should_resume)
1621 {
1622 ptid_t resume_ptid;
1623
1624 /* If STEP is set, it's a request to use hardware stepping
1625 facilities. But in that case, we should never
1626 use singlestep breakpoint. */
1627 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1628
1629 /* Decide the set of threads to ask the target to resume. Start
1630 by assuming everything will be resumed, than narrow the set
1631 by applying increasingly restricting conditions. */
1632
1633 /* By default, resume all threads of all processes. */
1634 resume_ptid = RESUME_ALL;
1635
1636 /* Maybe resume only all threads of the current process. */
1637 if (!sched_multi && target_supports_multi_process ())
1638 {
1639 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1640 }
1641
1642 /* Maybe resume a single thread after all. */
1643 if (singlestep_breakpoints_inserted_p
1644 && stepping_past_singlestep_breakpoint)
1645 {
1646 /* The situation here is as follows. In thread T1 we wanted to
1647 single-step. Lacking hardware single-stepping we've
1648 set breakpoint at the PC of the next instruction -- call it
1649 P. After resuming, we've hit that breakpoint in thread T2.
1650 Now we've removed original breakpoint, inserted breakpoint
1651 at P+1, and try to step to advance T2 past breakpoint.
1652 We need to step only T2, as if T1 is allowed to freely run,
1653 it can run past P, and if other threads are allowed to run,
1654 they can hit breakpoint at P+1, and nested hits of single-step
1655 breakpoints is not something we'd want -- that's complicated
1656 to support, and has no value. */
1657 resume_ptid = inferior_ptid;
1658 }
1659 else if ((step || singlestep_breakpoints_inserted_p)
1660 && tp->trap_expected)
1661 {
1662 /* We're allowing a thread to run past a breakpoint it has
1663 hit, by single-stepping the thread with the breakpoint
1664 removed. In which case, we need to single-step only this
1665 thread, and keep others stopped, as they can miss this
1666 breakpoint if allowed to run.
1667
1668 The current code actually removes all breakpoints when
1669 doing this, not just the one being stepped over, so if we
1670 let other threads run, we can actually miss any
1671 breakpoint, not just the one at PC. */
1672 resume_ptid = inferior_ptid;
1673 }
1674 else if (non_stop)
1675 {
1676 /* With non-stop mode on, threads are always handled
1677 individually. */
1678 resume_ptid = inferior_ptid;
1679 }
1680 else if ((scheduler_mode == schedlock_on)
1681 || (scheduler_mode == schedlock_step
1682 && (step || singlestep_breakpoints_inserted_p)))
1683 {
1684 /* User-settable 'scheduler' mode requires solo thread resume. */
1685 resume_ptid = inferior_ptid;
1686 }
1687
1688 if (gdbarch_cannot_step_breakpoint (gdbarch))
1689 {
1690 /* Most targets can step a breakpoint instruction, thus
1691 executing it normally. But if this one cannot, just
1692 continue and we will hit it anyway. */
1693 if (step && breakpoint_inserted_here_p (aspace, pc))
1694 step = 0;
1695 }
1696
1697 if (debug_displaced
1698 && use_displaced_stepping (gdbarch)
1699 && tp->trap_expected)
1700 {
1701 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1702 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1703 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1704 gdb_byte buf[4];
1705
1706 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1707 paddress (resume_gdbarch, actual_pc));
1708 read_memory (actual_pc, buf, sizeof (buf));
1709 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1710 }
1711
1712 /* Install inferior's terminal modes. */
1713 target_terminal_inferior ();
1714
1715 /* Avoid confusing the next resume, if the next stop/resume
1716 happens to apply to another thread. */
1717 tp->stop_signal = TARGET_SIGNAL_0;
1718
1719 target_resume (resume_ptid, step, sig);
1720 }
1721
1722 discard_cleanups (old_cleanups);
1723 }
1724 \f
1725 /* Proceeding. */
1726
1727 /* Clear out all variables saying what to do when inferior is continued.
1728 First do this, then set the ones you want, then call `proceed'. */
1729
1730 static void
1731 clear_proceed_status_thread (struct thread_info *tp)
1732 {
1733 if (debug_infrun)
1734 fprintf_unfiltered (gdb_stdlog,
1735 "infrun: clear_proceed_status_thread (%s)\n",
1736 target_pid_to_str (tp->ptid));
1737
1738 tp->trap_expected = 0;
1739 tp->step_range_start = 0;
1740 tp->step_range_end = 0;
1741 tp->step_frame_id = null_frame_id;
1742 tp->step_stack_frame_id = null_frame_id;
1743 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1744 tp->stop_requested = 0;
1745
1746 tp->stop_step = 0;
1747
1748 tp->proceed_to_finish = 0;
1749
1750 /* Discard any remaining commands or status from previous stop. */
1751 bpstat_clear (&tp->stop_bpstat);
1752 }
1753
1754 static int
1755 clear_proceed_status_callback (struct thread_info *tp, void *data)
1756 {
1757 if (is_exited (tp->ptid))
1758 return 0;
1759
1760 clear_proceed_status_thread (tp);
1761 return 0;
1762 }
1763
1764 void
1765 clear_proceed_status (void)
1766 {
1767 if (!non_stop)
1768 {
1769 /* In all-stop mode, delete the per-thread status of all
1770 threads, even if inferior_ptid is null_ptid, there may be
1771 threads on the list. E.g., we may be launching a new
1772 process, while selecting the executable. */
1773 iterate_over_threads (clear_proceed_status_callback, NULL);
1774 }
1775
1776 if (!ptid_equal (inferior_ptid, null_ptid))
1777 {
1778 struct inferior *inferior;
1779
1780 if (non_stop)
1781 {
1782 /* If in non-stop mode, only delete the per-thread status of
1783 the current thread. */
1784 clear_proceed_status_thread (inferior_thread ());
1785 }
1786
1787 inferior = current_inferior ();
1788 inferior->stop_soon = NO_STOP_QUIETLY;
1789 }
1790
1791 stop_after_trap = 0;
1792
1793 observer_notify_about_to_proceed ();
1794
1795 if (stop_registers)
1796 {
1797 regcache_xfree (stop_registers);
1798 stop_registers = NULL;
1799 }
1800 }
1801
1802 /* Check the current thread against the thread that reported the most recent
1803 event. If a step-over is required return TRUE and set the current thread
1804 to the old thread. Otherwise return FALSE.
1805
1806 This should be suitable for any targets that support threads. */
1807
1808 static int
1809 prepare_to_proceed (int step)
1810 {
1811 ptid_t wait_ptid;
1812 struct target_waitstatus wait_status;
1813 int schedlock_enabled;
1814
1815 /* With non-stop mode on, threads are always handled individually. */
1816 gdb_assert (! non_stop);
1817
1818 /* Get the last target status returned by target_wait(). */
1819 get_last_target_status (&wait_ptid, &wait_status);
1820
1821 /* Make sure we were stopped at a breakpoint. */
1822 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1823 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1824 && wait_status.value.sig != TARGET_SIGNAL_ILL
1825 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1826 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1827 {
1828 return 0;
1829 }
1830
1831 schedlock_enabled = (scheduler_mode == schedlock_on
1832 || (scheduler_mode == schedlock_step
1833 && step));
1834
1835 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1836 if (schedlock_enabled)
1837 return 0;
1838
1839 /* Don't switch over if we're about to resume some other process
1840 other than WAIT_PTID's, and schedule-multiple is off. */
1841 if (!sched_multi
1842 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1843 return 0;
1844
1845 /* Switched over from WAIT_PID. */
1846 if (!ptid_equal (wait_ptid, minus_one_ptid)
1847 && !ptid_equal (inferior_ptid, wait_ptid))
1848 {
1849 struct regcache *regcache = get_thread_regcache (wait_ptid);
1850
1851 if (breakpoint_here_p (get_regcache_aspace (regcache),
1852 regcache_read_pc (regcache)))
1853 {
1854 /* If stepping, remember current thread to switch back to. */
1855 if (step)
1856 deferred_step_ptid = inferior_ptid;
1857
1858 /* Switch back to WAIT_PID thread. */
1859 switch_to_thread (wait_ptid);
1860
1861 /* We return 1 to indicate that there is a breakpoint here,
1862 so we need to step over it before continuing to avoid
1863 hitting it straight away. */
1864 return 1;
1865 }
1866 }
1867
1868 return 0;
1869 }
1870
1871 /* Basic routine for continuing the program in various fashions.
1872
1873 ADDR is the address to resume at, or -1 for resume where stopped.
1874 SIGGNAL is the signal to give it, or 0 for none,
1875 or -1 for act according to how it stopped.
1876 STEP is nonzero if should trap after one instruction.
1877 -1 means return after that and print nothing.
1878 You should probably set various step_... variables
1879 before calling here, if you are stepping.
1880
1881 You should call clear_proceed_status before calling proceed. */
1882
1883 void
1884 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1885 {
1886 struct regcache *regcache;
1887 struct gdbarch *gdbarch;
1888 struct thread_info *tp;
1889 CORE_ADDR pc;
1890 struct address_space *aspace;
1891 int oneproc = 0;
1892
1893 /* If we're stopped at a fork/vfork, follow the branch set by the
1894 "set follow-fork-mode" command; otherwise, we'll just proceed
1895 resuming the current thread. */
1896 if (!follow_fork ())
1897 {
1898 /* The target for some reason decided not to resume. */
1899 normal_stop ();
1900 return;
1901 }
1902
1903 regcache = get_current_regcache ();
1904 gdbarch = get_regcache_arch (regcache);
1905 aspace = get_regcache_aspace (regcache);
1906 pc = regcache_read_pc (regcache);
1907
1908 if (step > 0)
1909 step_start_function = find_pc_function (pc);
1910 if (step < 0)
1911 stop_after_trap = 1;
1912
1913 if (addr == (CORE_ADDR) -1)
1914 {
1915 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1916 && execution_direction != EXEC_REVERSE)
1917 /* There is a breakpoint at the address we will resume at,
1918 step one instruction before inserting breakpoints so that
1919 we do not stop right away (and report a second hit at this
1920 breakpoint).
1921
1922 Note, we don't do this in reverse, because we won't
1923 actually be executing the breakpoint insn anyway.
1924 We'll be (un-)executing the previous instruction. */
1925
1926 oneproc = 1;
1927 else if (gdbarch_single_step_through_delay_p (gdbarch)
1928 && gdbarch_single_step_through_delay (gdbarch,
1929 get_current_frame ()))
1930 /* We stepped onto an instruction that needs to be stepped
1931 again before re-inserting the breakpoint, do so. */
1932 oneproc = 1;
1933 }
1934 else
1935 {
1936 regcache_write_pc (regcache, addr);
1937 }
1938
1939 if (debug_infrun)
1940 fprintf_unfiltered (gdb_stdlog,
1941 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1942 paddress (gdbarch, addr), siggnal, step);
1943
1944 /* We're handling a live event, so make sure we're doing live
1945 debugging. If we're looking at traceframes while the target is
1946 running, we're going to need to get back to that mode after
1947 handling the event. */
1948 if (non_stop)
1949 {
1950 make_cleanup_restore_current_traceframe ();
1951 set_traceframe_number (-1);
1952 }
1953
1954 if (non_stop)
1955 /* In non-stop, each thread is handled individually. The context
1956 must already be set to the right thread here. */
1957 ;
1958 else
1959 {
1960 /* In a multi-threaded task we may select another thread and
1961 then continue or step.
1962
1963 But if the old thread was stopped at a breakpoint, it will
1964 immediately cause another breakpoint stop without any
1965 execution (i.e. it will report a breakpoint hit incorrectly).
1966 So we must step over it first.
1967
1968 prepare_to_proceed checks the current thread against the
1969 thread that reported the most recent event. If a step-over
1970 is required it returns TRUE and sets the current thread to
1971 the old thread. */
1972 if (prepare_to_proceed (step))
1973 oneproc = 1;
1974 }
1975
1976 /* prepare_to_proceed may change the current thread. */
1977 tp = inferior_thread ();
1978
1979 if (oneproc)
1980 {
1981 tp->trap_expected = 1;
1982 /* If displaced stepping is enabled, we can step over the
1983 breakpoint without hitting it, so leave all breakpoints
1984 inserted. Otherwise we need to disable all breakpoints, step
1985 one instruction, and then re-add them when that step is
1986 finished. */
1987 if (!use_displaced_stepping (gdbarch))
1988 remove_breakpoints ();
1989 }
1990
1991 /* We can insert breakpoints if we're not trying to step over one,
1992 or if we are stepping over one but we're using displaced stepping
1993 to do so. */
1994 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1995 insert_breakpoints ();
1996
1997 if (!non_stop)
1998 {
1999 /* Pass the last stop signal to the thread we're resuming,
2000 irrespective of whether the current thread is the thread that
2001 got the last event or not. This was historically GDB's
2002 behaviour before keeping a stop_signal per thread. */
2003
2004 struct thread_info *last_thread;
2005 ptid_t last_ptid;
2006 struct target_waitstatus last_status;
2007
2008 get_last_target_status (&last_ptid, &last_status);
2009 if (!ptid_equal (inferior_ptid, last_ptid)
2010 && !ptid_equal (last_ptid, null_ptid)
2011 && !ptid_equal (last_ptid, minus_one_ptid))
2012 {
2013 last_thread = find_thread_ptid (last_ptid);
2014 if (last_thread)
2015 {
2016 tp->stop_signal = last_thread->stop_signal;
2017 last_thread->stop_signal = TARGET_SIGNAL_0;
2018 }
2019 }
2020 }
2021
2022 if (siggnal != TARGET_SIGNAL_DEFAULT)
2023 tp->stop_signal = siggnal;
2024 /* If this signal should not be seen by program,
2025 give it zero. Used for debugging signals. */
2026 else if (!signal_program[tp->stop_signal])
2027 tp->stop_signal = TARGET_SIGNAL_0;
2028
2029 annotate_starting ();
2030
2031 /* Make sure that output from GDB appears before output from the
2032 inferior. */
2033 gdb_flush (gdb_stdout);
2034
2035 /* Refresh prev_pc value just prior to resuming. This used to be
2036 done in stop_stepping, however, setting prev_pc there did not handle
2037 scenarios such as inferior function calls or returning from
2038 a function via the return command. In those cases, the prev_pc
2039 value was not set properly for subsequent commands. The prev_pc value
2040 is used to initialize the starting line number in the ecs. With an
2041 invalid value, the gdb next command ends up stopping at the position
2042 represented by the next line table entry past our start position.
2043 On platforms that generate one line table entry per line, this
2044 is not a problem. However, on the ia64, the compiler generates
2045 extraneous line table entries that do not increase the line number.
2046 When we issue the gdb next command on the ia64 after an inferior call
2047 or a return command, we often end up a few instructions forward, still
2048 within the original line we started.
2049
2050 An attempt was made to refresh the prev_pc at the same time the
2051 execution_control_state is initialized (for instance, just before
2052 waiting for an inferior event). But this approach did not work
2053 because of platforms that use ptrace, where the pc register cannot
2054 be read unless the inferior is stopped. At that point, we are not
2055 guaranteed the inferior is stopped and so the regcache_read_pc() call
2056 can fail. Setting the prev_pc value here ensures the value is updated
2057 correctly when the inferior is stopped. */
2058 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2059
2060 /* Fill in with reasonable starting values. */
2061 init_thread_stepping_state (tp);
2062
2063 /* Reset to normal state. */
2064 init_infwait_state ();
2065
2066 /* Resume inferior. */
2067 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
2068
2069 /* Wait for it to stop (if not standalone)
2070 and in any case decode why it stopped, and act accordingly. */
2071 /* Do this only if we are not using the event loop, or if the target
2072 does not support asynchronous execution. */
2073 if (!target_can_async_p ())
2074 {
2075 wait_for_inferior (0);
2076 normal_stop ();
2077 }
2078 }
2079 \f
2080
2081 /* Start remote-debugging of a machine over a serial link. */
2082
2083 void
2084 start_remote (int from_tty)
2085 {
2086 struct inferior *inferior;
2087
2088 init_wait_for_inferior ();
2089 inferior = current_inferior ();
2090 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2091
2092 /* Always go on waiting for the target, regardless of the mode. */
2093 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2094 indicate to wait_for_inferior that a target should timeout if
2095 nothing is returned (instead of just blocking). Because of this,
2096 targets expecting an immediate response need to, internally, set
2097 things up so that the target_wait() is forced to eventually
2098 timeout. */
2099 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2100 differentiate to its caller what the state of the target is after
2101 the initial open has been performed. Here we're assuming that
2102 the target has stopped. It should be possible to eventually have
2103 target_open() return to the caller an indication that the target
2104 is currently running and GDB state should be set to the same as
2105 for an async run. */
2106 wait_for_inferior (0);
2107
2108 /* Now that the inferior has stopped, do any bookkeeping like
2109 loading shared libraries. We want to do this before normal_stop,
2110 so that the displayed frame is up to date. */
2111 post_create_inferior (&current_target, from_tty);
2112
2113 normal_stop ();
2114 }
2115
2116 /* Initialize static vars when a new inferior begins. */
2117
2118 void
2119 init_wait_for_inferior (void)
2120 {
2121 /* These are meaningless until the first time through wait_for_inferior. */
2122
2123 breakpoint_init_inferior (inf_starting);
2124
2125 clear_proceed_status ();
2126
2127 stepping_past_singlestep_breakpoint = 0;
2128 deferred_step_ptid = null_ptid;
2129
2130 target_last_wait_ptid = minus_one_ptid;
2131
2132 previous_inferior_ptid = null_ptid;
2133 init_infwait_state ();
2134
2135 /* Discard any skipped inlined frames. */
2136 clear_inline_frame_state (minus_one_ptid);
2137 }
2138
2139 \f
2140 /* This enum encodes possible reasons for doing a target_wait, so that
2141 wfi can call target_wait in one place. (Ultimately the call will be
2142 moved out of the infinite loop entirely.) */
2143
2144 enum infwait_states
2145 {
2146 infwait_normal_state,
2147 infwait_thread_hop_state,
2148 infwait_step_watch_state,
2149 infwait_nonstep_watch_state
2150 };
2151
2152 /* The PTID we'll do a target_wait on.*/
2153 ptid_t waiton_ptid;
2154
2155 /* Current inferior wait state. */
2156 enum infwait_states infwait_state;
2157
2158 /* Data to be passed around while handling an event. This data is
2159 discarded between events. */
2160 struct execution_control_state
2161 {
2162 ptid_t ptid;
2163 /* The thread that got the event, if this was a thread event; NULL
2164 otherwise. */
2165 struct thread_info *event_thread;
2166
2167 struct target_waitstatus ws;
2168 int random_signal;
2169 CORE_ADDR stop_func_start;
2170 CORE_ADDR stop_func_end;
2171 char *stop_func_name;
2172 int new_thread_event;
2173 int wait_some_more;
2174 };
2175
2176 static void handle_inferior_event (struct execution_control_state *ecs);
2177
2178 static void handle_step_into_function (struct gdbarch *gdbarch,
2179 struct execution_control_state *ecs);
2180 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2181 struct execution_control_state *ecs);
2182 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2183 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2184 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2185 struct symtab_and_line sr_sal,
2186 struct frame_id sr_id);
2187 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2188
2189 static void stop_stepping (struct execution_control_state *ecs);
2190 static void prepare_to_wait (struct execution_control_state *ecs);
2191 static void keep_going (struct execution_control_state *ecs);
2192
2193 /* Callback for iterate over threads. If the thread is stopped, but
2194 the user/frontend doesn't know about that yet, go through
2195 normal_stop, as if the thread had just stopped now. ARG points at
2196 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2197 ptid_is_pid(PTID) is true, applies to all threads of the process
2198 pointed at by PTID. Otherwise, apply only to the thread pointed by
2199 PTID. */
2200
2201 static int
2202 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2203 {
2204 ptid_t ptid = * (ptid_t *) arg;
2205
2206 if ((ptid_equal (info->ptid, ptid)
2207 || ptid_equal (minus_one_ptid, ptid)
2208 || (ptid_is_pid (ptid)
2209 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2210 && is_running (info->ptid)
2211 && !is_executing (info->ptid))
2212 {
2213 struct cleanup *old_chain;
2214 struct execution_control_state ecss;
2215 struct execution_control_state *ecs = &ecss;
2216
2217 memset (ecs, 0, sizeof (*ecs));
2218
2219 old_chain = make_cleanup_restore_current_thread ();
2220
2221 switch_to_thread (info->ptid);
2222
2223 /* Go through handle_inferior_event/normal_stop, so we always
2224 have consistent output as if the stop event had been
2225 reported. */
2226 ecs->ptid = info->ptid;
2227 ecs->event_thread = find_thread_ptid (info->ptid);
2228 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2229 ecs->ws.value.sig = TARGET_SIGNAL_0;
2230
2231 handle_inferior_event (ecs);
2232
2233 if (!ecs->wait_some_more)
2234 {
2235 struct thread_info *tp;
2236
2237 normal_stop ();
2238
2239 /* Finish off the continuations. The continations
2240 themselves are responsible for realising the thread
2241 didn't finish what it was supposed to do. */
2242 tp = inferior_thread ();
2243 do_all_intermediate_continuations_thread (tp);
2244 do_all_continuations_thread (tp);
2245 }
2246
2247 do_cleanups (old_chain);
2248 }
2249
2250 return 0;
2251 }
2252
2253 /* This function is attached as a "thread_stop_requested" observer.
2254 Cleanup local state that assumed the PTID was to be resumed, and
2255 report the stop to the frontend. */
2256
2257 static void
2258 infrun_thread_stop_requested (ptid_t ptid)
2259 {
2260 struct displaced_step_inferior_state *displaced;
2261
2262 /* PTID was requested to stop. Remove it from the displaced
2263 stepping queue, so we don't try to resume it automatically. */
2264
2265 for (displaced = displaced_step_inferior_states;
2266 displaced;
2267 displaced = displaced->next)
2268 {
2269 struct displaced_step_request *it, **prev_next_p;
2270
2271 it = displaced->step_request_queue;
2272 prev_next_p = &displaced->step_request_queue;
2273 while (it)
2274 {
2275 if (ptid_match (it->ptid, ptid))
2276 {
2277 *prev_next_p = it->next;
2278 it->next = NULL;
2279 xfree (it);
2280 }
2281 else
2282 {
2283 prev_next_p = &it->next;
2284 }
2285
2286 it = *prev_next_p;
2287 }
2288 }
2289
2290 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2291 }
2292
2293 static void
2294 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2295 {
2296 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2297 nullify_last_target_wait_ptid ();
2298 }
2299
2300 /* Callback for iterate_over_threads. */
2301
2302 static int
2303 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2304 {
2305 if (is_exited (info->ptid))
2306 return 0;
2307
2308 delete_step_resume_breakpoint (info);
2309 return 0;
2310 }
2311
2312 /* In all-stop, delete the step resume breakpoint of any thread that
2313 had one. In non-stop, delete the step resume breakpoint of the
2314 thread that just stopped. */
2315
2316 static void
2317 delete_step_thread_step_resume_breakpoint (void)
2318 {
2319 if (!target_has_execution
2320 || ptid_equal (inferior_ptid, null_ptid))
2321 /* If the inferior has exited, we have already deleted the step
2322 resume breakpoints out of GDB's lists. */
2323 return;
2324
2325 if (non_stop)
2326 {
2327 /* If in non-stop mode, only delete the step-resume or
2328 longjmp-resume breakpoint of the thread that just stopped
2329 stepping. */
2330 struct thread_info *tp = inferior_thread ();
2331
2332 delete_step_resume_breakpoint (tp);
2333 }
2334 else
2335 /* In all-stop mode, delete all step-resume and longjmp-resume
2336 breakpoints of any thread that had them. */
2337 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2338 }
2339
2340 /* A cleanup wrapper. */
2341
2342 static void
2343 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2344 {
2345 delete_step_thread_step_resume_breakpoint ();
2346 }
2347
2348 /* Pretty print the results of target_wait, for debugging purposes. */
2349
2350 static void
2351 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2352 const struct target_waitstatus *ws)
2353 {
2354 char *status_string = target_waitstatus_to_string (ws);
2355 struct ui_file *tmp_stream = mem_fileopen ();
2356 char *text;
2357
2358 /* The text is split over several lines because it was getting too long.
2359 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2360 output as a unit; we want only one timestamp printed if debug_timestamp
2361 is set. */
2362
2363 fprintf_unfiltered (tmp_stream,
2364 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2365 if (PIDGET (waiton_ptid) != -1)
2366 fprintf_unfiltered (tmp_stream,
2367 " [%s]", target_pid_to_str (waiton_ptid));
2368 fprintf_unfiltered (tmp_stream, ", status) =\n");
2369 fprintf_unfiltered (tmp_stream,
2370 "infrun: %d [%s],\n",
2371 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2372 fprintf_unfiltered (tmp_stream,
2373 "infrun: %s\n",
2374 status_string);
2375
2376 text = ui_file_xstrdup (tmp_stream, NULL);
2377
2378 /* This uses %s in part to handle %'s in the text, but also to avoid
2379 a gcc error: the format attribute requires a string literal. */
2380 fprintf_unfiltered (gdb_stdlog, "%s", text);
2381
2382 xfree (status_string);
2383 xfree (text);
2384 ui_file_delete (tmp_stream);
2385 }
2386
2387 /* Prepare and stabilize the inferior for detaching it. E.g.,
2388 detaching while a thread is displaced stepping is a recipe for
2389 crashing it, as nothing would readjust the PC out of the scratch
2390 pad. */
2391
2392 void
2393 prepare_for_detach (void)
2394 {
2395 struct inferior *inf = current_inferior ();
2396 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2397 struct cleanup *old_chain_1;
2398 struct displaced_step_inferior_state *displaced;
2399
2400 displaced = get_displaced_stepping_state (inf->pid);
2401
2402 /* Is any thread of this process displaced stepping? If not,
2403 there's nothing else to do. */
2404 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2405 return;
2406
2407 if (debug_infrun)
2408 fprintf_unfiltered (gdb_stdlog,
2409 "displaced-stepping in-process while detaching");
2410
2411 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2412 inf->detaching = 1;
2413
2414 while (!ptid_equal (displaced->step_ptid, null_ptid))
2415 {
2416 struct cleanup *old_chain_2;
2417 struct execution_control_state ecss;
2418 struct execution_control_state *ecs;
2419
2420 ecs = &ecss;
2421 memset (ecs, 0, sizeof (*ecs));
2422
2423 overlay_cache_invalid = 1;
2424
2425 /* We have to invalidate the registers BEFORE calling
2426 target_wait because they can be loaded from the target while
2427 in target_wait. This makes remote debugging a bit more
2428 efficient for those targets that provide critical registers
2429 as part of their normal status mechanism. */
2430
2431 registers_changed ();
2432
2433 if (deprecated_target_wait_hook)
2434 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2435 else
2436 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2437
2438 if (debug_infrun)
2439 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2440
2441 /* If an error happens while handling the event, propagate GDB's
2442 knowledge of the executing state to the frontend/user running
2443 state. */
2444 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2445
2446 /* In non-stop mode, each thread is handled individually.
2447 Switch early, so the global state is set correctly for this
2448 thread. */
2449 if (non_stop
2450 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2451 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2452 context_switch (ecs->ptid);
2453
2454 /* Now figure out what to do with the result of the result. */
2455 handle_inferior_event (ecs);
2456
2457 /* No error, don't finish the state yet. */
2458 discard_cleanups (old_chain_2);
2459
2460 /* Breakpoints and watchpoints are not installed on the target
2461 at this point, and signals are passed directly to the
2462 inferior, so this must mean the process is gone. */
2463 if (!ecs->wait_some_more)
2464 {
2465 discard_cleanups (old_chain_1);
2466 error (_("Program exited while detaching"));
2467 }
2468 }
2469
2470 discard_cleanups (old_chain_1);
2471 }
2472
2473 /* Wait for control to return from inferior to debugger.
2474
2475 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2476 as if they were SIGTRAP signals. This can be useful during
2477 the startup sequence on some targets such as HP/UX, where
2478 we receive an EXEC event instead of the expected SIGTRAP.
2479
2480 If inferior gets a signal, we may decide to start it up again
2481 instead of returning. That is why there is a loop in this function.
2482 When this function actually returns it means the inferior
2483 should be left stopped and GDB should read more commands. */
2484
2485 void
2486 wait_for_inferior (int treat_exec_as_sigtrap)
2487 {
2488 struct cleanup *old_cleanups;
2489 struct execution_control_state ecss;
2490 struct execution_control_state *ecs;
2491
2492 if (debug_infrun)
2493 fprintf_unfiltered
2494 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2495 treat_exec_as_sigtrap);
2496
2497 old_cleanups =
2498 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2499
2500 ecs = &ecss;
2501 memset (ecs, 0, sizeof (*ecs));
2502
2503 /* We'll update this if & when we switch to a new thread. */
2504 previous_inferior_ptid = inferior_ptid;
2505
2506 while (1)
2507 {
2508 struct cleanup *old_chain;
2509
2510 /* We have to invalidate the registers BEFORE calling target_wait
2511 because they can be loaded from the target while in target_wait.
2512 This makes remote debugging a bit more efficient for those
2513 targets that provide critical registers as part of their normal
2514 status mechanism. */
2515
2516 overlay_cache_invalid = 1;
2517 registers_changed ();
2518
2519 if (deprecated_target_wait_hook)
2520 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2521 else
2522 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2523
2524 if (debug_infrun)
2525 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2526
2527 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2528 {
2529 xfree (ecs->ws.value.execd_pathname);
2530 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2531 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2532 }
2533
2534 /* If an error happens while handling the event, propagate GDB's
2535 knowledge of the executing state to the frontend/user running
2536 state. */
2537 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2538
2539 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2540 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2541 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2542
2543 /* Now figure out what to do with the result of the result. */
2544 handle_inferior_event (ecs);
2545
2546 /* No error, don't finish the state yet. */
2547 discard_cleanups (old_chain);
2548
2549 if (!ecs->wait_some_more)
2550 break;
2551 }
2552
2553 do_cleanups (old_cleanups);
2554 }
2555
2556 /* Asynchronous version of wait_for_inferior. It is called by the
2557 event loop whenever a change of state is detected on the file
2558 descriptor corresponding to the target. It can be called more than
2559 once to complete a single execution command. In such cases we need
2560 to keep the state in a global variable ECSS. If it is the last time
2561 that this function is called for a single execution command, then
2562 report to the user that the inferior has stopped, and do the
2563 necessary cleanups. */
2564
2565 void
2566 fetch_inferior_event (void *client_data)
2567 {
2568 struct execution_control_state ecss;
2569 struct execution_control_state *ecs = &ecss;
2570 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2571 struct cleanup *ts_old_chain;
2572 int was_sync = sync_execution;
2573
2574 memset (ecs, 0, sizeof (*ecs));
2575
2576 /* We'll update this if & when we switch to a new thread. */
2577 previous_inferior_ptid = inferior_ptid;
2578
2579 if (non_stop)
2580 /* In non-stop mode, the user/frontend should not notice a thread
2581 switch due to internal events. Make sure we reverse to the
2582 user selected thread and frame after handling the event and
2583 running any breakpoint commands. */
2584 make_cleanup_restore_current_thread ();
2585
2586 /* We have to invalidate the registers BEFORE calling target_wait
2587 because they can be loaded from the target while in target_wait.
2588 This makes remote debugging a bit more efficient for those
2589 targets that provide critical registers as part of their normal
2590 status mechanism. */
2591
2592 overlay_cache_invalid = 1;
2593 registers_changed ();
2594
2595 if (deprecated_target_wait_hook)
2596 ecs->ptid =
2597 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2598 else
2599 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2600
2601 if (debug_infrun)
2602 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2603
2604 if (non_stop
2605 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2606 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2607 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2608 /* In non-stop mode, each thread is handled individually. Switch
2609 early, so the global state is set correctly for this
2610 thread. */
2611 context_switch (ecs->ptid);
2612
2613 /* If an error happens while handling the event, propagate GDB's
2614 knowledge of the executing state to the frontend/user running
2615 state. */
2616 if (!non_stop)
2617 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2618 else
2619 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2620
2621 /* Now figure out what to do with the result of the result. */
2622 handle_inferior_event (ecs);
2623
2624 if (!ecs->wait_some_more)
2625 {
2626 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2627
2628 delete_step_thread_step_resume_breakpoint ();
2629
2630 /* We may not find an inferior if this was a process exit. */
2631 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2632 normal_stop ();
2633
2634 if (target_has_execution
2635 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2636 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2637 && ecs->event_thread->step_multi
2638 && ecs->event_thread->stop_step)
2639 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2640 else
2641 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2642 }
2643
2644 /* No error, don't finish the thread states yet. */
2645 discard_cleanups (ts_old_chain);
2646
2647 /* Revert thread and frame. */
2648 do_cleanups (old_chain);
2649
2650 /* If the inferior was in sync execution mode, and now isn't,
2651 restore the prompt. */
2652 if (was_sync && !sync_execution)
2653 display_gdb_prompt (0);
2654 }
2655
2656 /* Record the frame and location we're currently stepping through. */
2657 void
2658 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2659 {
2660 struct thread_info *tp = inferior_thread ();
2661
2662 tp->step_frame_id = get_frame_id (frame);
2663 tp->step_stack_frame_id = get_stack_frame_id (frame);
2664
2665 tp->current_symtab = sal.symtab;
2666 tp->current_line = sal.line;
2667 }
2668
2669 /* Clear context switchable stepping state. */
2670
2671 void
2672 init_thread_stepping_state (struct thread_info *tss)
2673 {
2674 tss->stepping_over_breakpoint = 0;
2675 tss->step_after_step_resume_breakpoint = 0;
2676 tss->stepping_through_solib_after_catch = 0;
2677 tss->stepping_through_solib_catchpoints = NULL;
2678 }
2679
2680 /* Return the cached copy of the last pid/waitstatus returned by
2681 target_wait()/deprecated_target_wait_hook(). The data is actually
2682 cached by handle_inferior_event(), which gets called immediately
2683 after target_wait()/deprecated_target_wait_hook(). */
2684
2685 void
2686 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2687 {
2688 *ptidp = target_last_wait_ptid;
2689 *status = target_last_waitstatus;
2690 }
2691
2692 void
2693 nullify_last_target_wait_ptid (void)
2694 {
2695 target_last_wait_ptid = minus_one_ptid;
2696 }
2697
2698 /* Switch thread contexts. */
2699
2700 static void
2701 context_switch (ptid_t ptid)
2702 {
2703 if (debug_infrun)
2704 {
2705 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2706 target_pid_to_str (inferior_ptid));
2707 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2708 target_pid_to_str (ptid));
2709 }
2710
2711 switch_to_thread (ptid);
2712 }
2713
2714 static void
2715 adjust_pc_after_break (struct execution_control_state *ecs)
2716 {
2717 struct regcache *regcache;
2718 struct gdbarch *gdbarch;
2719 struct address_space *aspace;
2720 CORE_ADDR breakpoint_pc;
2721
2722 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2723 we aren't, just return.
2724
2725 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2726 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2727 implemented by software breakpoints should be handled through the normal
2728 breakpoint layer.
2729
2730 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2731 different signals (SIGILL or SIGEMT for instance), but it is less
2732 clear where the PC is pointing afterwards. It may not match
2733 gdbarch_decr_pc_after_break. I don't know any specific target that
2734 generates these signals at breakpoints (the code has been in GDB since at
2735 least 1992) so I can not guess how to handle them here.
2736
2737 In earlier versions of GDB, a target with
2738 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2739 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2740 target with both of these set in GDB history, and it seems unlikely to be
2741 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2742
2743 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2744 return;
2745
2746 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2747 return;
2748
2749 /* In reverse execution, when a breakpoint is hit, the instruction
2750 under it has already been de-executed. The reported PC always
2751 points at the breakpoint address, so adjusting it further would
2752 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2753 architecture:
2754
2755 B1 0x08000000 : INSN1
2756 B2 0x08000001 : INSN2
2757 0x08000002 : INSN3
2758 PC -> 0x08000003 : INSN4
2759
2760 Say you're stopped at 0x08000003 as above. Reverse continuing
2761 from that point should hit B2 as below. Reading the PC when the
2762 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2763 been de-executed already.
2764
2765 B1 0x08000000 : INSN1
2766 B2 PC -> 0x08000001 : INSN2
2767 0x08000002 : INSN3
2768 0x08000003 : INSN4
2769
2770 We can't apply the same logic as for forward execution, because
2771 we would wrongly adjust the PC to 0x08000000, since there's a
2772 breakpoint at PC - 1. We'd then report a hit on B1, although
2773 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2774 behaviour. */
2775 if (execution_direction == EXEC_REVERSE)
2776 return;
2777
2778 /* If this target does not decrement the PC after breakpoints, then
2779 we have nothing to do. */
2780 regcache = get_thread_regcache (ecs->ptid);
2781 gdbarch = get_regcache_arch (regcache);
2782 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2783 return;
2784
2785 aspace = get_regcache_aspace (regcache);
2786
2787 /* Find the location where (if we've hit a breakpoint) the
2788 breakpoint would be. */
2789 breakpoint_pc = regcache_read_pc (regcache)
2790 - gdbarch_decr_pc_after_break (gdbarch);
2791
2792 /* Check whether there actually is a software breakpoint inserted at
2793 that location.
2794
2795 If in non-stop mode, a race condition is possible where we've
2796 removed a breakpoint, but stop events for that breakpoint were
2797 already queued and arrive later. To suppress those spurious
2798 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2799 and retire them after a number of stop events are reported. */
2800 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2801 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2802 {
2803 struct cleanup *old_cleanups = NULL;
2804
2805 if (RECORD_IS_USED)
2806 old_cleanups = record_gdb_operation_disable_set ();
2807
2808 /* When using hardware single-step, a SIGTRAP is reported for both
2809 a completed single-step and a software breakpoint. Need to
2810 differentiate between the two, as the latter needs adjusting
2811 but the former does not.
2812
2813 The SIGTRAP can be due to a completed hardware single-step only if
2814 - we didn't insert software single-step breakpoints
2815 - the thread to be examined is still the current thread
2816 - this thread is currently being stepped
2817
2818 If any of these events did not occur, we must have stopped due
2819 to hitting a software breakpoint, and have to back up to the
2820 breakpoint address.
2821
2822 As a special case, we could have hardware single-stepped a
2823 software breakpoint. In this case (prev_pc == breakpoint_pc),
2824 we also need to back up to the breakpoint address. */
2825
2826 if (singlestep_breakpoints_inserted_p
2827 || !ptid_equal (ecs->ptid, inferior_ptid)
2828 || !currently_stepping (ecs->event_thread)
2829 || ecs->event_thread->prev_pc == breakpoint_pc)
2830 regcache_write_pc (regcache, breakpoint_pc);
2831
2832 if (RECORD_IS_USED)
2833 do_cleanups (old_cleanups);
2834 }
2835 }
2836
2837 void
2838 init_infwait_state (void)
2839 {
2840 waiton_ptid = pid_to_ptid (-1);
2841 infwait_state = infwait_normal_state;
2842 }
2843
2844 void
2845 error_is_running (void)
2846 {
2847 error (_("\
2848 Cannot execute this command while the selected thread is running."));
2849 }
2850
2851 void
2852 ensure_not_running (void)
2853 {
2854 if (is_running (inferior_ptid))
2855 error_is_running ();
2856 }
2857
2858 static int
2859 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2860 {
2861 for (frame = get_prev_frame (frame);
2862 frame != NULL;
2863 frame = get_prev_frame (frame))
2864 {
2865 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2866 return 1;
2867 if (get_frame_type (frame) != INLINE_FRAME)
2868 break;
2869 }
2870
2871 return 0;
2872 }
2873
2874 /* Auxiliary function that handles syscall entry/return events.
2875 It returns 1 if the inferior should keep going (and GDB
2876 should ignore the event), or 0 if the event deserves to be
2877 processed. */
2878
2879 static int
2880 handle_syscall_event (struct execution_control_state *ecs)
2881 {
2882 struct regcache *regcache;
2883 struct gdbarch *gdbarch;
2884 int syscall_number;
2885
2886 if (!ptid_equal (ecs->ptid, inferior_ptid))
2887 context_switch (ecs->ptid);
2888
2889 regcache = get_thread_regcache (ecs->ptid);
2890 gdbarch = get_regcache_arch (regcache);
2891 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2892 stop_pc = regcache_read_pc (regcache);
2893
2894 target_last_waitstatus.value.syscall_number = syscall_number;
2895
2896 if (catch_syscall_enabled () > 0
2897 && catching_syscall_number (syscall_number) > 0)
2898 {
2899 if (debug_infrun)
2900 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2901 syscall_number);
2902
2903 ecs->event_thread->stop_bpstat
2904 = bpstat_stop_status (get_regcache_aspace (regcache),
2905 stop_pc, ecs->ptid);
2906 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2907
2908 if (!ecs->random_signal)
2909 {
2910 /* Catchpoint hit. */
2911 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2912 return 0;
2913 }
2914 }
2915
2916 /* If no catchpoint triggered for this, then keep going. */
2917 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2918 keep_going (ecs);
2919 return 1;
2920 }
2921
2922 /* Given an execution control state that has been freshly filled in
2923 by an event from the inferior, figure out what it means and take
2924 appropriate action. */
2925
2926 static void
2927 handle_inferior_event (struct execution_control_state *ecs)
2928 {
2929 struct frame_info *frame;
2930 struct gdbarch *gdbarch;
2931 int sw_single_step_trap_p = 0;
2932 int stopped_by_watchpoint;
2933 int stepped_after_stopped_by_watchpoint = 0;
2934 struct symtab_and_line stop_pc_sal;
2935 enum stop_kind stop_soon;
2936
2937 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2938 {
2939 /* We had an event in the inferior, but we are not interested in
2940 handling it at this level. The lower layers have already
2941 done what needs to be done, if anything.
2942
2943 One of the possible circumstances for this is when the
2944 inferior produces output for the console. The inferior has
2945 not stopped, and we are ignoring the event. Another possible
2946 circumstance is any event which the lower level knows will be
2947 reported multiple times without an intervening resume. */
2948 if (debug_infrun)
2949 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2950 prepare_to_wait (ecs);
2951 return;
2952 }
2953
2954 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2955 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2956 {
2957 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2958
2959 gdb_assert (inf);
2960 stop_soon = inf->stop_soon;
2961 }
2962 else
2963 stop_soon = NO_STOP_QUIETLY;
2964
2965 /* Cache the last pid/waitstatus. */
2966 target_last_wait_ptid = ecs->ptid;
2967 target_last_waitstatus = ecs->ws;
2968
2969 /* Always clear state belonging to the previous time we stopped. */
2970 stop_stack_dummy = STOP_NONE;
2971
2972 /* If it's a new process, add it to the thread database */
2973
2974 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2975 && !ptid_equal (ecs->ptid, minus_one_ptid)
2976 && !in_thread_list (ecs->ptid));
2977
2978 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2979 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2980 add_thread (ecs->ptid);
2981
2982 ecs->event_thread = find_thread_ptid (ecs->ptid);
2983
2984 /* Dependent on valid ECS->EVENT_THREAD. */
2985 adjust_pc_after_break (ecs);
2986
2987 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2988 reinit_frame_cache ();
2989
2990 breakpoint_retire_moribund ();
2991
2992 /* First, distinguish signals caused by the debugger from signals
2993 that have to do with the program's own actions. Note that
2994 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2995 on the operating system version. Here we detect when a SIGILL or
2996 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2997 something similar for SIGSEGV, since a SIGSEGV will be generated
2998 when we're trying to execute a breakpoint instruction on a
2999 non-executable stack. This happens for call dummy breakpoints
3000 for architectures like SPARC that place call dummies on the
3001 stack. */
3002 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3003 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3004 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3005 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3006 {
3007 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3008
3009 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3010 regcache_read_pc (regcache)))
3011 {
3012 if (debug_infrun)
3013 fprintf_unfiltered (gdb_stdlog,
3014 "infrun: Treating signal as SIGTRAP\n");
3015 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3016 }
3017 }
3018
3019 /* Mark the non-executing threads accordingly. In all-stop, all
3020 threads of all processes are stopped when we get any event
3021 reported. In non-stop mode, only the event thread stops. If
3022 we're handling a process exit in non-stop mode, there's nothing
3023 to do, as threads of the dead process are gone, and threads of
3024 any other process were left running. */
3025 if (!non_stop)
3026 set_executing (minus_one_ptid, 0);
3027 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3028 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3029 set_executing (inferior_ptid, 0);
3030
3031 switch (infwait_state)
3032 {
3033 case infwait_thread_hop_state:
3034 if (debug_infrun)
3035 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3036 break;
3037
3038 case infwait_normal_state:
3039 if (debug_infrun)
3040 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3041 break;
3042
3043 case infwait_step_watch_state:
3044 if (debug_infrun)
3045 fprintf_unfiltered (gdb_stdlog,
3046 "infrun: infwait_step_watch_state\n");
3047
3048 stepped_after_stopped_by_watchpoint = 1;
3049 break;
3050
3051 case infwait_nonstep_watch_state:
3052 if (debug_infrun)
3053 fprintf_unfiltered (gdb_stdlog,
3054 "infrun: infwait_nonstep_watch_state\n");
3055 insert_breakpoints ();
3056
3057 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3058 handle things like signals arriving and other things happening
3059 in combination correctly? */
3060 stepped_after_stopped_by_watchpoint = 1;
3061 break;
3062
3063 default:
3064 internal_error (__FILE__, __LINE__, _("bad switch"));
3065 }
3066
3067 infwait_state = infwait_normal_state;
3068 waiton_ptid = pid_to_ptid (-1);
3069
3070 switch (ecs->ws.kind)
3071 {
3072 case TARGET_WAITKIND_LOADED:
3073 if (debug_infrun)
3074 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3075 /* Ignore gracefully during startup of the inferior, as it might
3076 be the shell which has just loaded some objects, otherwise
3077 add the symbols for the newly loaded objects. Also ignore at
3078 the beginning of an attach or remote session; we will query
3079 the full list of libraries once the connection is
3080 established. */
3081 if (stop_soon == NO_STOP_QUIETLY)
3082 {
3083 /* Check for any newly added shared libraries if we're
3084 supposed to be adding them automatically. Switch
3085 terminal for any messages produced by
3086 breakpoint_re_set. */
3087 target_terminal_ours_for_output ();
3088 /* NOTE: cagney/2003-11-25: Make certain that the target
3089 stack's section table is kept up-to-date. Architectures,
3090 (e.g., PPC64), use the section table to perform
3091 operations such as address => section name and hence
3092 require the table to contain all sections (including
3093 those found in shared libraries). */
3094 #ifdef SOLIB_ADD
3095 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3096 #else
3097 solib_add (NULL, 0, &current_target, auto_solib_add);
3098 #endif
3099 target_terminal_inferior ();
3100
3101 /* If requested, stop when the dynamic linker notifies
3102 gdb of events. This allows the user to get control
3103 and place breakpoints in initializer routines for
3104 dynamically loaded objects (among other things). */
3105 if (stop_on_solib_events)
3106 {
3107 /* Make sure we print "Stopped due to solib-event" in
3108 normal_stop. */
3109 stop_print_frame = 1;
3110
3111 stop_stepping (ecs);
3112 return;
3113 }
3114
3115 /* NOTE drow/2007-05-11: This might be a good place to check
3116 for "catch load". */
3117 }
3118
3119 /* If we are skipping through a shell, or through shared library
3120 loading that we aren't interested in, resume the program. If
3121 we're running the program normally, also resume. But stop if
3122 we're attaching or setting up a remote connection. */
3123 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3124 {
3125 /* Loading of shared libraries might have changed breakpoint
3126 addresses. Make sure new breakpoints are inserted. */
3127 if (stop_soon == NO_STOP_QUIETLY
3128 && !breakpoints_always_inserted_mode ())
3129 insert_breakpoints ();
3130 resume (0, TARGET_SIGNAL_0);
3131 prepare_to_wait (ecs);
3132 return;
3133 }
3134
3135 break;
3136
3137 case TARGET_WAITKIND_SPURIOUS:
3138 if (debug_infrun)
3139 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3140 resume (0, TARGET_SIGNAL_0);
3141 prepare_to_wait (ecs);
3142 return;
3143
3144 case TARGET_WAITKIND_EXITED:
3145 if (debug_infrun)
3146 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3147 inferior_ptid = ecs->ptid;
3148 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3149 set_current_program_space (current_inferior ()->pspace);
3150 handle_vfork_child_exec_or_exit (0);
3151 target_terminal_ours (); /* Must do this before mourn anyway */
3152 print_exited_reason (ecs->ws.value.integer);
3153
3154 /* Record the exit code in the convenience variable $_exitcode, so
3155 that the user can inspect this again later. */
3156 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3157 (LONGEST) ecs->ws.value.integer);
3158 gdb_flush (gdb_stdout);
3159 target_mourn_inferior ();
3160 singlestep_breakpoints_inserted_p = 0;
3161 cancel_single_step_breakpoints ();
3162 stop_print_frame = 0;
3163 stop_stepping (ecs);
3164 return;
3165
3166 case TARGET_WAITKIND_SIGNALLED:
3167 if (debug_infrun)
3168 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3169 inferior_ptid = ecs->ptid;
3170 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3171 set_current_program_space (current_inferior ()->pspace);
3172 handle_vfork_child_exec_or_exit (0);
3173 stop_print_frame = 0;
3174 target_terminal_ours (); /* Must do this before mourn anyway */
3175
3176 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3177 reach here unless the inferior is dead. However, for years
3178 target_kill() was called here, which hints that fatal signals aren't
3179 really fatal on some systems. If that's true, then some changes
3180 may be needed. */
3181 target_mourn_inferior ();
3182
3183 print_signal_exited_reason (ecs->ws.value.sig);
3184 singlestep_breakpoints_inserted_p = 0;
3185 cancel_single_step_breakpoints ();
3186 stop_stepping (ecs);
3187 return;
3188
3189 /* The following are the only cases in which we keep going;
3190 the above cases end in a continue or goto. */
3191 case TARGET_WAITKIND_FORKED:
3192 case TARGET_WAITKIND_VFORKED:
3193 if (debug_infrun)
3194 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3195
3196 if (!ptid_equal (ecs->ptid, inferior_ptid))
3197 {
3198 context_switch (ecs->ptid);
3199 reinit_frame_cache ();
3200 }
3201
3202 /* Immediately detach breakpoints from the child before there's
3203 any chance of letting the user delete breakpoints from the
3204 breakpoint lists. If we don't do this early, it's easy to
3205 leave left over traps in the child, vis: "break foo; catch
3206 fork; c; <fork>; del; c; <child calls foo>". We only follow
3207 the fork on the last `continue', and by that time the
3208 breakpoint at "foo" is long gone from the breakpoint table.
3209 If we vforked, then we don't need to unpatch here, since both
3210 parent and child are sharing the same memory pages; we'll
3211 need to unpatch at follow/detach time instead to be certain
3212 that new breakpoints added between catchpoint hit time and
3213 vfork follow are detached. */
3214 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3215 {
3216 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3217
3218 /* This won't actually modify the breakpoint list, but will
3219 physically remove the breakpoints from the child. */
3220 detach_breakpoints (child_pid);
3221 }
3222
3223 if (singlestep_breakpoints_inserted_p)
3224 {
3225 /* Pull the single step breakpoints out of the target. */
3226 remove_single_step_breakpoints ();
3227 singlestep_breakpoints_inserted_p = 0;
3228 }
3229
3230 /* In case the event is caught by a catchpoint, remember that
3231 the event is to be followed at the next resume of the thread,
3232 and not immediately. */
3233 ecs->event_thread->pending_follow = ecs->ws;
3234
3235 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3236
3237 ecs->event_thread->stop_bpstat
3238 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3239 stop_pc, ecs->ptid);
3240
3241 /* Note that we're interested in knowing the bpstat actually
3242 causes a stop, not just if it may explain the signal.
3243 Software watchpoints, for example, always appear in the
3244 bpstat. */
3245 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3246
3247 /* If no catchpoint triggered for this, then keep going. */
3248 if (ecs->random_signal)
3249 {
3250 ptid_t parent;
3251 ptid_t child;
3252 int should_resume;
3253 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3254
3255 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3256
3257 should_resume = follow_fork ();
3258
3259 parent = ecs->ptid;
3260 child = ecs->ws.value.related_pid;
3261
3262 /* In non-stop mode, also resume the other branch. */
3263 if (non_stop && !detach_fork)
3264 {
3265 if (follow_child)
3266 switch_to_thread (parent);
3267 else
3268 switch_to_thread (child);
3269
3270 ecs->event_thread = inferior_thread ();
3271 ecs->ptid = inferior_ptid;
3272 keep_going (ecs);
3273 }
3274
3275 if (follow_child)
3276 switch_to_thread (child);
3277 else
3278 switch_to_thread (parent);
3279
3280 ecs->event_thread = inferior_thread ();
3281 ecs->ptid = inferior_ptid;
3282
3283 if (should_resume)
3284 keep_going (ecs);
3285 else
3286 stop_stepping (ecs);
3287 return;
3288 }
3289 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3290 goto process_event_stop_test;
3291
3292 case TARGET_WAITKIND_VFORK_DONE:
3293 /* Done with the shared memory region. Re-insert breakpoints in
3294 the parent, and keep going. */
3295
3296 if (debug_infrun)
3297 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3298
3299 if (!ptid_equal (ecs->ptid, inferior_ptid))
3300 context_switch (ecs->ptid);
3301
3302 current_inferior ()->waiting_for_vfork_done = 0;
3303 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3304 /* This also takes care of reinserting breakpoints in the
3305 previously locked inferior. */
3306 keep_going (ecs);
3307 return;
3308
3309 case TARGET_WAITKIND_EXECD:
3310 if (debug_infrun)
3311 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3312
3313 if (!ptid_equal (ecs->ptid, inferior_ptid))
3314 {
3315 context_switch (ecs->ptid);
3316 reinit_frame_cache ();
3317 }
3318
3319 singlestep_breakpoints_inserted_p = 0;
3320 cancel_single_step_breakpoints ();
3321
3322 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3323
3324 /* Do whatever is necessary to the parent branch of the vfork. */
3325 handle_vfork_child_exec_or_exit (1);
3326
3327 /* This causes the eventpoints and symbol table to be reset.
3328 Must do this now, before trying to determine whether to
3329 stop. */
3330 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3331
3332 ecs->event_thread->stop_bpstat
3333 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3334 stop_pc, ecs->ptid);
3335 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3336
3337 /* Note that this may be referenced from inside
3338 bpstat_stop_status above, through inferior_has_execd. */
3339 xfree (ecs->ws.value.execd_pathname);
3340 ecs->ws.value.execd_pathname = NULL;
3341
3342 /* If no catchpoint triggered for this, then keep going. */
3343 if (ecs->random_signal)
3344 {
3345 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3346 keep_going (ecs);
3347 return;
3348 }
3349 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3350 goto process_event_stop_test;
3351
3352 /* Be careful not to try to gather much state about a thread
3353 that's in a syscall. It's frequently a losing proposition. */
3354 case TARGET_WAITKIND_SYSCALL_ENTRY:
3355 if (debug_infrun)
3356 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3357 /* Getting the current syscall number */
3358 if (handle_syscall_event (ecs) != 0)
3359 return;
3360 goto process_event_stop_test;
3361
3362 /* Before examining the threads further, step this thread to
3363 get it entirely out of the syscall. (We get notice of the
3364 event when the thread is just on the verge of exiting a
3365 syscall. Stepping one instruction seems to get it back
3366 into user code.) */
3367 case TARGET_WAITKIND_SYSCALL_RETURN:
3368 if (debug_infrun)
3369 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3370 if (handle_syscall_event (ecs) != 0)
3371 return;
3372 goto process_event_stop_test;
3373
3374 case TARGET_WAITKIND_STOPPED:
3375 if (debug_infrun)
3376 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3377 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3378 break;
3379
3380 case TARGET_WAITKIND_NO_HISTORY:
3381 /* Reverse execution: target ran out of history info. */
3382 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3383 print_no_history_reason ();
3384 stop_stepping (ecs);
3385 return;
3386 }
3387
3388 if (ecs->new_thread_event)
3389 {
3390 if (non_stop)
3391 /* Non-stop assumes that the target handles adding new threads
3392 to the thread list. */
3393 internal_error (__FILE__, __LINE__, "\
3394 targets should add new threads to the thread list themselves in non-stop mode.");
3395
3396 /* We may want to consider not doing a resume here in order to
3397 give the user a chance to play with the new thread. It might
3398 be good to make that a user-settable option. */
3399
3400 /* At this point, all threads are stopped (happens automatically
3401 in either the OS or the native code). Therefore we need to
3402 continue all threads in order to make progress. */
3403
3404 if (!ptid_equal (ecs->ptid, inferior_ptid))
3405 context_switch (ecs->ptid);
3406 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3407 prepare_to_wait (ecs);
3408 return;
3409 }
3410
3411 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3412 {
3413 /* Do we need to clean up the state of a thread that has
3414 completed a displaced single-step? (Doing so usually affects
3415 the PC, so do it here, before we set stop_pc.) */
3416 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3417
3418 /* If we either finished a single-step or hit a breakpoint, but
3419 the user wanted this thread to be stopped, pretend we got a
3420 SIG0 (generic unsignaled stop). */
3421
3422 if (ecs->event_thread->stop_requested
3423 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3424 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3425 }
3426
3427 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3428
3429 if (debug_infrun)
3430 {
3431 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3432 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3433 struct cleanup *old_chain = save_inferior_ptid ();
3434
3435 inferior_ptid = ecs->ptid;
3436
3437 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3438 paddress (gdbarch, stop_pc));
3439 if (target_stopped_by_watchpoint ())
3440 {
3441 CORE_ADDR addr;
3442
3443 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3444
3445 if (target_stopped_data_address (&current_target, &addr))
3446 fprintf_unfiltered (gdb_stdlog,
3447 "infrun: stopped data address = %s\n",
3448 paddress (gdbarch, addr));
3449 else
3450 fprintf_unfiltered (gdb_stdlog,
3451 "infrun: (no data address available)\n");
3452 }
3453
3454 do_cleanups (old_chain);
3455 }
3456
3457 if (stepping_past_singlestep_breakpoint)
3458 {
3459 gdb_assert (singlestep_breakpoints_inserted_p);
3460 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3461 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3462
3463 stepping_past_singlestep_breakpoint = 0;
3464
3465 /* We've either finished single-stepping past the single-step
3466 breakpoint, or stopped for some other reason. It would be nice if
3467 we could tell, but we can't reliably. */
3468 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3469 {
3470 if (debug_infrun)
3471 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3472 /* Pull the single step breakpoints out of the target. */
3473 remove_single_step_breakpoints ();
3474 singlestep_breakpoints_inserted_p = 0;
3475
3476 ecs->random_signal = 0;
3477 ecs->event_thread->trap_expected = 0;
3478
3479 context_switch (saved_singlestep_ptid);
3480 if (deprecated_context_hook)
3481 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3482
3483 resume (1, TARGET_SIGNAL_0);
3484 prepare_to_wait (ecs);
3485 return;
3486 }
3487 }
3488
3489 if (!ptid_equal (deferred_step_ptid, null_ptid))
3490 {
3491 /* In non-stop mode, there's never a deferred_step_ptid set. */
3492 gdb_assert (!non_stop);
3493
3494 /* If we stopped for some other reason than single-stepping, ignore
3495 the fact that we were supposed to switch back. */
3496 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3497 {
3498 if (debug_infrun)
3499 fprintf_unfiltered (gdb_stdlog,
3500 "infrun: handling deferred step\n");
3501
3502 /* Pull the single step breakpoints out of the target. */
3503 if (singlestep_breakpoints_inserted_p)
3504 {
3505 remove_single_step_breakpoints ();
3506 singlestep_breakpoints_inserted_p = 0;
3507 }
3508
3509 /* Note: We do not call context_switch at this point, as the
3510 context is already set up for stepping the original thread. */
3511 switch_to_thread (deferred_step_ptid);
3512 deferred_step_ptid = null_ptid;
3513 /* Suppress spurious "Switching to ..." message. */
3514 previous_inferior_ptid = inferior_ptid;
3515
3516 resume (1, TARGET_SIGNAL_0);
3517 prepare_to_wait (ecs);
3518 return;
3519 }
3520
3521 deferred_step_ptid = null_ptid;
3522 }
3523
3524 /* See if a thread hit a thread-specific breakpoint that was meant for
3525 another thread. If so, then step that thread past the breakpoint,
3526 and continue it. */
3527
3528 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3529 {
3530 int thread_hop_needed = 0;
3531 struct address_space *aspace =
3532 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3533
3534 /* Check if a regular breakpoint has been hit before checking
3535 for a potential single step breakpoint. Otherwise, GDB will
3536 not see this breakpoint hit when stepping onto breakpoints. */
3537 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3538 {
3539 ecs->random_signal = 0;
3540 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3541 thread_hop_needed = 1;
3542 }
3543 else if (singlestep_breakpoints_inserted_p)
3544 {
3545 /* We have not context switched yet, so this should be true
3546 no matter which thread hit the singlestep breakpoint. */
3547 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3548 if (debug_infrun)
3549 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3550 "trap for %s\n",
3551 target_pid_to_str (ecs->ptid));
3552
3553 ecs->random_signal = 0;
3554 /* The call to in_thread_list is necessary because PTIDs sometimes
3555 change when we go from single-threaded to multi-threaded. If
3556 the singlestep_ptid is still in the list, assume that it is
3557 really different from ecs->ptid. */
3558 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3559 && in_thread_list (singlestep_ptid))
3560 {
3561 /* If the PC of the thread we were trying to single-step
3562 has changed, discard this event (which we were going
3563 to ignore anyway), and pretend we saw that thread
3564 trap. This prevents us continuously moving the
3565 single-step breakpoint forward, one instruction at a
3566 time. If the PC has changed, then the thread we were
3567 trying to single-step has trapped or been signalled,
3568 but the event has not been reported to GDB yet.
3569
3570 There might be some cases where this loses signal
3571 information, if a signal has arrived at exactly the
3572 same time that the PC changed, but this is the best
3573 we can do with the information available. Perhaps we
3574 should arrange to report all events for all threads
3575 when they stop, or to re-poll the remote looking for
3576 this particular thread (i.e. temporarily enable
3577 schedlock). */
3578
3579 CORE_ADDR new_singlestep_pc
3580 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3581
3582 if (new_singlestep_pc != singlestep_pc)
3583 {
3584 enum target_signal stop_signal;
3585
3586 if (debug_infrun)
3587 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3588 " but expected thread advanced also\n");
3589
3590 /* The current context still belongs to
3591 singlestep_ptid. Don't swap here, since that's
3592 the context we want to use. Just fudge our
3593 state and continue. */
3594 stop_signal = ecs->event_thread->stop_signal;
3595 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3596 ecs->ptid = singlestep_ptid;
3597 ecs->event_thread = find_thread_ptid (ecs->ptid);
3598 ecs->event_thread->stop_signal = stop_signal;
3599 stop_pc = new_singlestep_pc;
3600 }
3601 else
3602 {
3603 if (debug_infrun)
3604 fprintf_unfiltered (gdb_stdlog,
3605 "infrun: unexpected thread\n");
3606
3607 thread_hop_needed = 1;
3608 stepping_past_singlestep_breakpoint = 1;
3609 saved_singlestep_ptid = singlestep_ptid;
3610 }
3611 }
3612 }
3613
3614 if (thread_hop_needed)
3615 {
3616 struct regcache *thread_regcache;
3617 int remove_status = 0;
3618
3619 if (debug_infrun)
3620 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3621
3622 /* Switch context before touching inferior memory, the
3623 previous thread may have exited. */
3624 if (!ptid_equal (inferior_ptid, ecs->ptid))
3625 context_switch (ecs->ptid);
3626
3627 /* Saw a breakpoint, but it was hit by the wrong thread.
3628 Just continue. */
3629
3630 if (singlestep_breakpoints_inserted_p)
3631 {
3632 /* Pull the single step breakpoints out of the target. */
3633 remove_single_step_breakpoints ();
3634 singlestep_breakpoints_inserted_p = 0;
3635 }
3636
3637 /* If the arch can displace step, don't remove the
3638 breakpoints. */
3639 thread_regcache = get_thread_regcache (ecs->ptid);
3640 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3641 remove_status = remove_breakpoints ();
3642
3643 /* Did we fail to remove breakpoints? If so, try
3644 to set the PC past the bp. (There's at least
3645 one situation in which we can fail to remove
3646 the bp's: On HP-UX's that use ttrace, we can't
3647 change the address space of a vforking child
3648 process until the child exits (well, okay, not
3649 then either :-) or execs. */
3650 if (remove_status != 0)
3651 error (_("Cannot step over breakpoint hit in wrong thread"));
3652 else
3653 { /* Single step */
3654 if (!non_stop)
3655 {
3656 /* Only need to require the next event from this
3657 thread in all-stop mode. */
3658 waiton_ptid = ecs->ptid;
3659 infwait_state = infwait_thread_hop_state;
3660 }
3661
3662 ecs->event_thread->stepping_over_breakpoint = 1;
3663 keep_going (ecs);
3664 return;
3665 }
3666 }
3667 else if (singlestep_breakpoints_inserted_p)
3668 {
3669 sw_single_step_trap_p = 1;
3670 ecs->random_signal = 0;
3671 }
3672 }
3673 else
3674 ecs->random_signal = 1;
3675
3676 /* See if something interesting happened to the non-current thread. If
3677 so, then switch to that thread. */
3678 if (!ptid_equal (ecs->ptid, inferior_ptid))
3679 {
3680 if (debug_infrun)
3681 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3682
3683 context_switch (ecs->ptid);
3684
3685 if (deprecated_context_hook)
3686 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3687 }
3688
3689 /* At this point, get hold of the now-current thread's frame. */
3690 frame = get_current_frame ();
3691 gdbarch = get_frame_arch (frame);
3692
3693 if (singlestep_breakpoints_inserted_p)
3694 {
3695 /* Pull the single step breakpoints out of the target. */
3696 remove_single_step_breakpoints ();
3697 singlestep_breakpoints_inserted_p = 0;
3698 }
3699
3700 if (stepped_after_stopped_by_watchpoint)
3701 stopped_by_watchpoint = 0;
3702 else
3703 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3704
3705 /* If necessary, step over this watchpoint. We'll be back to display
3706 it in a moment. */
3707 if (stopped_by_watchpoint
3708 && (target_have_steppable_watchpoint
3709 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3710 {
3711 /* At this point, we are stopped at an instruction which has
3712 attempted to write to a piece of memory under control of
3713 a watchpoint. The instruction hasn't actually executed
3714 yet. If we were to evaluate the watchpoint expression
3715 now, we would get the old value, and therefore no change
3716 would seem to have occurred.
3717
3718 In order to make watchpoints work `right', we really need
3719 to complete the memory write, and then evaluate the
3720 watchpoint expression. We do this by single-stepping the
3721 target.
3722
3723 It may not be necessary to disable the watchpoint to stop over
3724 it. For example, the PA can (with some kernel cooperation)
3725 single step over a watchpoint without disabling the watchpoint.
3726
3727 It is far more common to need to disable a watchpoint to step
3728 the inferior over it. If we have non-steppable watchpoints,
3729 we must disable the current watchpoint; it's simplest to
3730 disable all watchpoints and breakpoints. */
3731 int hw_step = 1;
3732
3733 if (!target_have_steppable_watchpoint)
3734 remove_breakpoints ();
3735 /* Single step */
3736 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3737 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3738 waiton_ptid = ecs->ptid;
3739 if (target_have_steppable_watchpoint)
3740 infwait_state = infwait_step_watch_state;
3741 else
3742 infwait_state = infwait_nonstep_watch_state;
3743 prepare_to_wait (ecs);
3744 return;
3745 }
3746
3747 ecs->stop_func_start = 0;
3748 ecs->stop_func_end = 0;
3749 ecs->stop_func_name = 0;
3750 /* Don't care about return value; stop_func_start and stop_func_name
3751 will both be 0 if it doesn't work. */
3752 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3753 &ecs->stop_func_start, &ecs->stop_func_end);
3754 ecs->stop_func_start
3755 += gdbarch_deprecated_function_start_offset (gdbarch);
3756 ecs->event_thread->stepping_over_breakpoint = 0;
3757 bpstat_clear (&ecs->event_thread->stop_bpstat);
3758 ecs->event_thread->stop_step = 0;
3759 stop_print_frame = 1;
3760 ecs->random_signal = 0;
3761 stopped_by_random_signal = 0;
3762
3763 /* Hide inlined functions starting here, unless we just performed stepi or
3764 nexti. After stepi and nexti, always show the innermost frame (not any
3765 inline function call sites). */
3766 if (ecs->event_thread->step_range_end != 1)
3767 skip_inline_frames (ecs->ptid);
3768
3769 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3770 && ecs->event_thread->trap_expected
3771 && gdbarch_single_step_through_delay_p (gdbarch)
3772 && currently_stepping (ecs->event_thread))
3773 {
3774 /* We're trying to step off a breakpoint. Turns out that we're
3775 also on an instruction that needs to be stepped multiple
3776 times before it's been fully executing. E.g., architectures
3777 with a delay slot. It needs to be stepped twice, once for
3778 the instruction and once for the delay slot. */
3779 int step_through_delay
3780 = gdbarch_single_step_through_delay (gdbarch, frame);
3781
3782 if (debug_infrun && step_through_delay)
3783 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3784 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3785 {
3786 /* The user issued a continue when stopped at a breakpoint.
3787 Set up for another trap and get out of here. */
3788 ecs->event_thread->stepping_over_breakpoint = 1;
3789 keep_going (ecs);
3790 return;
3791 }
3792 else if (step_through_delay)
3793 {
3794 /* The user issued a step when stopped at a breakpoint.
3795 Maybe we should stop, maybe we should not - the delay
3796 slot *might* correspond to a line of source. In any
3797 case, don't decide that here, just set
3798 ecs->stepping_over_breakpoint, making sure we
3799 single-step again before breakpoints are re-inserted. */
3800 ecs->event_thread->stepping_over_breakpoint = 1;
3801 }
3802 }
3803
3804 /* Look at the cause of the stop, and decide what to do.
3805 The alternatives are:
3806 1) stop_stepping and return; to really stop and return to the debugger,
3807 2) keep_going and return to start up again
3808 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3809 3) set ecs->random_signal to 1, and the decision between 1 and 2
3810 will be made according to the signal handling tables. */
3811
3812 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3813 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3814 || stop_soon == STOP_QUIETLY_REMOTE)
3815 {
3816 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3817 {
3818 if (debug_infrun)
3819 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3820 stop_print_frame = 0;
3821 stop_stepping (ecs);
3822 return;
3823 }
3824
3825 /* This is originated from start_remote(), start_inferior() and
3826 shared libraries hook functions. */
3827 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3828 {
3829 if (debug_infrun)
3830 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3831 stop_stepping (ecs);
3832 return;
3833 }
3834
3835 /* This originates from attach_command(). We need to overwrite
3836 the stop_signal here, because some kernels don't ignore a
3837 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3838 See more comments in inferior.h. On the other hand, if we
3839 get a non-SIGSTOP, report it to the user - assume the backend
3840 will handle the SIGSTOP if it should show up later.
3841
3842 Also consider that the attach is complete when we see a
3843 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3844 target extended-remote report it instead of a SIGSTOP
3845 (e.g. gdbserver). We already rely on SIGTRAP being our
3846 signal, so this is no exception.
3847
3848 Also consider that the attach is complete when we see a
3849 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3850 the target to stop all threads of the inferior, in case the
3851 low level attach operation doesn't stop them implicitly. If
3852 they weren't stopped implicitly, then the stub will report a
3853 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3854 other than GDB's request. */
3855 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3856 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3857 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3858 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3859 {
3860 stop_stepping (ecs);
3861 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3862 return;
3863 }
3864
3865 /* See if there is a breakpoint at the current PC. */
3866 ecs->event_thread->stop_bpstat
3867 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3868 stop_pc, ecs->ptid);
3869
3870 /* Following in case break condition called a
3871 function. */
3872 stop_print_frame = 1;
3873
3874 /* This is where we handle "moribund" watchpoints. Unlike
3875 software breakpoints traps, hardware watchpoint traps are
3876 always distinguishable from random traps. If no high-level
3877 watchpoint is associated with the reported stop data address
3878 anymore, then the bpstat does not explain the signal ---
3879 simply make sure to ignore it if `stopped_by_watchpoint' is
3880 set. */
3881
3882 if (debug_infrun
3883 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3884 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3885 && stopped_by_watchpoint)
3886 fprintf_unfiltered (gdb_stdlog, "\
3887 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3888
3889 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3890 at one stage in the past included checks for an inferior
3891 function call's call dummy's return breakpoint. The original
3892 comment, that went with the test, read:
3893
3894 ``End of a stack dummy. Some systems (e.g. Sony news) give
3895 another signal besides SIGTRAP, so check here as well as
3896 above.''
3897
3898 If someone ever tries to get call dummys on a
3899 non-executable stack to work (where the target would stop
3900 with something like a SIGSEGV), then those tests might need
3901 to be re-instated. Given, however, that the tests were only
3902 enabled when momentary breakpoints were not being used, I
3903 suspect that it won't be the case.
3904
3905 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3906 be necessary for call dummies on a non-executable stack on
3907 SPARC. */
3908
3909 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3910 ecs->random_signal
3911 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3912 || stopped_by_watchpoint
3913 || ecs->event_thread->trap_expected
3914 || (ecs->event_thread->step_range_end
3915 && ecs->event_thread->step_resume_breakpoint == NULL));
3916 else
3917 {
3918 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3919 if (!ecs->random_signal)
3920 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3921 }
3922 }
3923
3924 /* When we reach this point, we've pretty much decided
3925 that the reason for stopping must've been a random
3926 (unexpected) signal. */
3927
3928 else
3929 ecs->random_signal = 1;
3930
3931 process_event_stop_test:
3932
3933 /* Re-fetch current thread's frame in case we did a
3934 "goto process_event_stop_test" above. */
3935 frame = get_current_frame ();
3936 gdbarch = get_frame_arch (frame);
3937
3938 /* For the program's own signals, act according to
3939 the signal handling tables. */
3940
3941 if (ecs->random_signal)
3942 {
3943 /* Signal not for debugging purposes. */
3944 int printed = 0;
3945 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3946
3947 if (debug_infrun)
3948 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3949 ecs->event_thread->stop_signal);
3950
3951 stopped_by_random_signal = 1;
3952
3953 if (signal_print[ecs->event_thread->stop_signal])
3954 {
3955 printed = 1;
3956 target_terminal_ours_for_output ();
3957 print_signal_received_reason (ecs->event_thread->stop_signal);
3958 }
3959 /* Always stop on signals if we're either just gaining control
3960 of the program, or the user explicitly requested this thread
3961 to remain stopped. */
3962 if (stop_soon != NO_STOP_QUIETLY
3963 || ecs->event_thread->stop_requested
3964 || (!inf->detaching
3965 && signal_stop_state (ecs->event_thread->stop_signal)))
3966 {
3967 stop_stepping (ecs);
3968 return;
3969 }
3970 /* If not going to stop, give terminal back
3971 if we took it away. */
3972 else if (printed)
3973 target_terminal_inferior ();
3974
3975 /* Clear the signal if it should not be passed. */
3976 if (signal_program[ecs->event_thread->stop_signal] == 0)
3977 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3978
3979 if (ecs->event_thread->prev_pc == stop_pc
3980 && ecs->event_thread->trap_expected
3981 && ecs->event_thread->step_resume_breakpoint == NULL)
3982 {
3983 /* We were just starting a new sequence, attempting to
3984 single-step off of a breakpoint and expecting a SIGTRAP.
3985 Instead this signal arrives. This signal will take us out
3986 of the stepping range so GDB needs to remember to, when
3987 the signal handler returns, resume stepping off that
3988 breakpoint. */
3989 /* To simplify things, "continue" is forced to use the same
3990 code paths as single-step - set a breakpoint at the
3991 signal return address and then, once hit, step off that
3992 breakpoint. */
3993 if (debug_infrun)
3994 fprintf_unfiltered (gdb_stdlog,
3995 "infrun: signal arrived while stepping over "
3996 "breakpoint\n");
3997
3998 insert_step_resume_breakpoint_at_frame (frame);
3999 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4000 keep_going (ecs);
4001 return;
4002 }
4003
4004 if (ecs->event_thread->step_range_end != 0
4005 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
4006 && (ecs->event_thread->step_range_start <= stop_pc
4007 && stop_pc < ecs->event_thread->step_range_end)
4008 && frame_id_eq (get_stack_frame_id (frame),
4009 ecs->event_thread->step_stack_frame_id)
4010 && ecs->event_thread->step_resume_breakpoint == NULL)
4011 {
4012 /* The inferior is about to take a signal that will take it
4013 out of the single step range. Set a breakpoint at the
4014 current PC (which is presumably where the signal handler
4015 will eventually return) and then allow the inferior to
4016 run free.
4017
4018 Note that this is only needed for a signal delivered
4019 while in the single-step range. Nested signals aren't a
4020 problem as they eventually all return. */
4021 if (debug_infrun)
4022 fprintf_unfiltered (gdb_stdlog,
4023 "infrun: signal may take us out of "
4024 "single-step range\n");
4025
4026 insert_step_resume_breakpoint_at_frame (frame);
4027 keep_going (ecs);
4028 return;
4029 }
4030
4031 /* Note: step_resume_breakpoint may be non-NULL. This occures
4032 when either there's a nested signal, or when there's a
4033 pending signal enabled just as the signal handler returns
4034 (leaving the inferior at the step-resume-breakpoint without
4035 actually executing it). Either way continue until the
4036 breakpoint is really hit. */
4037 keep_going (ecs);
4038 return;
4039 }
4040
4041 /* Handle cases caused by hitting a breakpoint. */
4042 {
4043 CORE_ADDR jmp_buf_pc;
4044 struct bpstat_what what;
4045
4046 what = bpstat_what (ecs->event_thread->stop_bpstat);
4047
4048 if (what.call_dummy)
4049 {
4050 stop_stack_dummy = what.call_dummy;
4051 }
4052
4053 /* If we hit an internal event that triggers symbol changes, the
4054 current frame will be invalidated within bpstat_what (e.g., if
4055 we hit an internal solib event). Re-fetch it. */
4056 frame = get_current_frame ();
4057 gdbarch = get_frame_arch (frame);
4058
4059 switch (what.main_action)
4060 {
4061 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4062 /* If we hit the breakpoint at longjmp while stepping, we
4063 install a momentary breakpoint at the target of the
4064 jmp_buf. */
4065
4066 if (debug_infrun)
4067 fprintf_unfiltered (gdb_stdlog,
4068 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4069
4070 ecs->event_thread->stepping_over_breakpoint = 1;
4071
4072 if (!gdbarch_get_longjmp_target_p (gdbarch)
4073 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
4074 {
4075 if (debug_infrun)
4076 fprintf_unfiltered (gdb_stdlog, "\
4077 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4078 keep_going (ecs);
4079 return;
4080 }
4081
4082 /* We're going to replace the current step-resume breakpoint
4083 with a longjmp-resume breakpoint. */
4084 delete_step_resume_breakpoint (ecs->event_thread);
4085
4086 /* Insert a breakpoint at resume address. */
4087 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4088
4089 keep_going (ecs);
4090 return;
4091
4092 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4093 if (debug_infrun)
4094 fprintf_unfiltered (gdb_stdlog,
4095 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4096
4097 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4098 delete_step_resume_breakpoint (ecs->event_thread);
4099
4100 ecs->event_thread->stop_step = 1;
4101 print_end_stepping_range_reason ();
4102 stop_stepping (ecs);
4103 return;
4104
4105 case BPSTAT_WHAT_SINGLE:
4106 if (debug_infrun)
4107 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4108 ecs->event_thread->stepping_over_breakpoint = 1;
4109 /* Still need to check other stuff, at least the case
4110 where we are stepping and step out of the right range. */
4111 break;
4112
4113 case BPSTAT_WHAT_STOP_NOISY:
4114 if (debug_infrun)
4115 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4116 stop_print_frame = 1;
4117
4118 /* We are about to nuke the step_resume_breakpointt via the
4119 cleanup chain, so no need to worry about it here. */
4120
4121 stop_stepping (ecs);
4122 return;
4123
4124 case BPSTAT_WHAT_STOP_SILENT:
4125 if (debug_infrun)
4126 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4127 stop_print_frame = 0;
4128
4129 /* We are about to nuke the step_resume_breakpoin via the
4130 cleanup chain, so no need to worry about it here. */
4131
4132 stop_stepping (ecs);
4133 return;
4134
4135 case BPSTAT_WHAT_STEP_RESUME:
4136 if (debug_infrun)
4137 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4138
4139 delete_step_resume_breakpoint (ecs->event_thread);
4140 if (ecs->event_thread->step_after_step_resume_breakpoint)
4141 {
4142 /* Back when the step-resume breakpoint was inserted, we
4143 were trying to single-step off a breakpoint. Go back
4144 to doing that. */
4145 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4146 ecs->event_thread->stepping_over_breakpoint = 1;
4147 keep_going (ecs);
4148 return;
4149 }
4150 if (stop_pc == ecs->stop_func_start
4151 && execution_direction == EXEC_REVERSE)
4152 {
4153 /* We are stepping over a function call in reverse, and
4154 just hit the step-resume breakpoint at the start
4155 address of the function. Go back to single-stepping,
4156 which should take us back to the function call. */
4157 ecs->event_thread->stepping_over_breakpoint = 1;
4158 keep_going (ecs);
4159 return;
4160 }
4161 break;
4162
4163 case BPSTAT_WHAT_KEEP_CHECKING:
4164 break;
4165 }
4166 }
4167
4168 /* We come here if we hit a breakpoint but should not
4169 stop for it. Possibly we also were stepping
4170 and should stop for that. So fall through and
4171 test for stepping. But, if not stepping,
4172 do not stop. */
4173
4174 /* In all-stop mode, if we're currently stepping but have stopped in
4175 some other thread, we need to switch back to the stepped thread. */
4176 if (!non_stop)
4177 {
4178 struct thread_info *tp;
4179
4180 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4181 ecs->event_thread);
4182 if (tp)
4183 {
4184 /* However, if the current thread is blocked on some internal
4185 breakpoint, and we simply need to step over that breakpoint
4186 to get it going again, do that first. */
4187 if ((ecs->event_thread->trap_expected
4188 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4189 || ecs->event_thread->stepping_over_breakpoint)
4190 {
4191 keep_going (ecs);
4192 return;
4193 }
4194
4195 /* If the stepping thread exited, then don't try to switch
4196 back and resume it, which could fail in several different
4197 ways depending on the target. Instead, just keep going.
4198
4199 We can find a stepping dead thread in the thread list in
4200 two cases:
4201
4202 - The target supports thread exit events, and when the
4203 target tries to delete the thread from the thread list,
4204 inferior_ptid pointed at the exiting thread. In such
4205 case, calling delete_thread does not really remove the
4206 thread from the list; instead, the thread is left listed,
4207 with 'exited' state.
4208
4209 - The target's debug interface does not support thread
4210 exit events, and so we have no idea whatsoever if the
4211 previously stepping thread is still alive. For that
4212 reason, we need to synchronously query the target
4213 now. */
4214 if (is_exited (tp->ptid)
4215 || !target_thread_alive (tp->ptid))
4216 {
4217 if (debug_infrun)
4218 fprintf_unfiltered (gdb_stdlog, "\
4219 infrun: not switching back to stepped thread, it has vanished\n");
4220
4221 delete_thread (tp->ptid);
4222 keep_going (ecs);
4223 return;
4224 }
4225
4226 /* Otherwise, we no longer expect a trap in the current thread.
4227 Clear the trap_expected flag before switching back -- this is
4228 what keep_going would do as well, if we called it. */
4229 ecs->event_thread->trap_expected = 0;
4230
4231 if (debug_infrun)
4232 fprintf_unfiltered (gdb_stdlog,
4233 "infrun: switching back to stepped thread\n");
4234
4235 ecs->event_thread = tp;
4236 ecs->ptid = tp->ptid;
4237 context_switch (ecs->ptid);
4238 keep_going (ecs);
4239 return;
4240 }
4241 }
4242
4243 /* Are we stepping to get the inferior out of the dynamic linker's
4244 hook (and possibly the dld itself) after catching a shlib
4245 event? */
4246 if (ecs->event_thread->stepping_through_solib_after_catch)
4247 {
4248 #if defined(SOLIB_ADD)
4249 /* Have we reached our destination? If not, keep going. */
4250 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4251 {
4252 if (debug_infrun)
4253 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4254 ecs->event_thread->stepping_over_breakpoint = 1;
4255 keep_going (ecs);
4256 return;
4257 }
4258 #endif
4259 if (debug_infrun)
4260 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4261 /* Else, stop and report the catchpoint(s) whose triggering
4262 caused us to begin stepping. */
4263 ecs->event_thread->stepping_through_solib_after_catch = 0;
4264 bpstat_clear (&ecs->event_thread->stop_bpstat);
4265 ecs->event_thread->stop_bpstat
4266 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4267 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4268 stop_print_frame = 1;
4269 stop_stepping (ecs);
4270 return;
4271 }
4272
4273 if (ecs->event_thread->step_resume_breakpoint)
4274 {
4275 if (debug_infrun)
4276 fprintf_unfiltered (gdb_stdlog,
4277 "infrun: step-resume breakpoint is inserted\n");
4278
4279 /* Having a step-resume breakpoint overrides anything
4280 else having to do with stepping commands until
4281 that breakpoint is reached. */
4282 keep_going (ecs);
4283 return;
4284 }
4285
4286 if (ecs->event_thread->step_range_end == 0)
4287 {
4288 if (debug_infrun)
4289 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4290 /* Likewise if we aren't even stepping. */
4291 keep_going (ecs);
4292 return;
4293 }
4294
4295 /* Re-fetch current thread's frame in case the code above caused
4296 the frame cache to be re-initialized, making our FRAME variable
4297 a dangling pointer. */
4298 frame = get_current_frame ();
4299 gdbarch = get_frame_arch (frame);
4300
4301 /* If stepping through a line, keep going if still within it.
4302
4303 Note that step_range_end is the address of the first instruction
4304 beyond the step range, and NOT the address of the last instruction
4305 within it!
4306
4307 Note also that during reverse execution, we may be stepping
4308 through a function epilogue and therefore must detect when
4309 the current-frame changes in the middle of a line. */
4310
4311 if (stop_pc >= ecs->event_thread->step_range_start
4312 && stop_pc < ecs->event_thread->step_range_end
4313 && (execution_direction != EXEC_REVERSE
4314 || frame_id_eq (get_frame_id (frame),
4315 ecs->event_thread->step_frame_id)))
4316 {
4317 if (debug_infrun)
4318 fprintf_unfiltered
4319 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4320 paddress (gdbarch, ecs->event_thread->step_range_start),
4321 paddress (gdbarch, ecs->event_thread->step_range_end));
4322
4323 /* When stepping backward, stop at beginning of line range
4324 (unless it's the function entry point, in which case
4325 keep going back to the call point). */
4326 if (stop_pc == ecs->event_thread->step_range_start
4327 && stop_pc != ecs->stop_func_start
4328 && execution_direction == EXEC_REVERSE)
4329 {
4330 ecs->event_thread->stop_step = 1;
4331 print_end_stepping_range_reason ();
4332 stop_stepping (ecs);
4333 }
4334 else
4335 keep_going (ecs);
4336
4337 return;
4338 }
4339
4340 /* We stepped out of the stepping range. */
4341
4342 /* If we are stepping at the source level and entered the runtime
4343 loader dynamic symbol resolution code...
4344
4345 EXEC_FORWARD: we keep on single stepping until we exit the run
4346 time loader code and reach the callee's address.
4347
4348 EXEC_REVERSE: we've already executed the callee (backward), and
4349 the runtime loader code is handled just like any other
4350 undebuggable function call. Now we need only keep stepping
4351 backward through the trampoline code, and that's handled further
4352 down, so there is nothing for us to do here. */
4353
4354 if (execution_direction != EXEC_REVERSE
4355 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4356 && in_solib_dynsym_resolve_code (stop_pc))
4357 {
4358 CORE_ADDR pc_after_resolver =
4359 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4360
4361 if (debug_infrun)
4362 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4363
4364 if (pc_after_resolver)
4365 {
4366 /* Set up a step-resume breakpoint at the address
4367 indicated by SKIP_SOLIB_RESOLVER. */
4368 struct symtab_and_line sr_sal;
4369
4370 init_sal (&sr_sal);
4371 sr_sal.pc = pc_after_resolver;
4372 sr_sal.pspace = get_frame_program_space (frame);
4373
4374 insert_step_resume_breakpoint_at_sal (gdbarch,
4375 sr_sal, null_frame_id);
4376 }
4377
4378 keep_going (ecs);
4379 return;
4380 }
4381
4382 if (ecs->event_thread->step_range_end != 1
4383 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4384 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4385 && get_frame_type (frame) == SIGTRAMP_FRAME)
4386 {
4387 if (debug_infrun)
4388 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4389 /* The inferior, while doing a "step" or "next", has ended up in
4390 a signal trampoline (either by a signal being delivered or by
4391 the signal handler returning). Just single-step until the
4392 inferior leaves the trampoline (either by calling the handler
4393 or returning). */
4394 keep_going (ecs);
4395 return;
4396 }
4397
4398 /* Check for subroutine calls. The check for the current frame
4399 equalling the step ID is not necessary - the check of the
4400 previous frame's ID is sufficient - but it is a common case and
4401 cheaper than checking the previous frame's ID.
4402
4403 NOTE: frame_id_eq will never report two invalid frame IDs as
4404 being equal, so to get into this block, both the current and
4405 previous frame must have valid frame IDs. */
4406 /* The outer_frame_id check is a heuristic to detect stepping
4407 through startup code. If we step over an instruction which
4408 sets the stack pointer from an invalid value to a valid value,
4409 we may detect that as a subroutine call from the mythical
4410 "outermost" function. This could be fixed by marking
4411 outermost frames as !stack_p,code_p,special_p. Then the
4412 initial outermost frame, before sp was valid, would
4413 have code_addr == &_start. See the comment in frame_id_eq
4414 for more. */
4415 if (!frame_id_eq (get_stack_frame_id (frame),
4416 ecs->event_thread->step_stack_frame_id)
4417 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4418 ecs->event_thread->step_stack_frame_id)
4419 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4420 outer_frame_id)
4421 || step_start_function != find_pc_function (stop_pc))))
4422 {
4423 CORE_ADDR real_stop_pc;
4424
4425 if (debug_infrun)
4426 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4427
4428 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4429 || ((ecs->event_thread->step_range_end == 1)
4430 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4431 ecs->stop_func_start)))
4432 {
4433 /* I presume that step_over_calls is only 0 when we're
4434 supposed to be stepping at the assembly language level
4435 ("stepi"). Just stop. */
4436 /* Also, maybe we just did a "nexti" inside a prolog, so we
4437 thought it was a subroutine call but it was not. Stop as
4438 well. FENN */
4439 /* And this works the same backward as frontward. MVS */
4440 ecs->event_thread->stop_step = 1;
4441 print_end_stepping_range_reason ();
4442 stop_stepping (ecs);
4443 return;
4444 }
4445
4446 /* Reverse stepping through solib trampolines. */
4447
4448 if (execution_direction == EXEC_REVERSE
4449 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4450 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4451 || (ecs->stop_func_start == 0
4452 && in_solib_dynsym_resolve_code (stop_pc))))
4453 {
4454 /* Any solib trampoline code can be handled in reverse
4455 by simply continuing to single-step. We have already
4456 executed the solib function (backwards), and a few
4457 steps will take us back through the trampoline to the
4458 caller. */
4459 keep_going (ecs);
4460 return;
4461 }
4462
4463 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4464 {
4465 /* We're doing a "next".
4466
4467 Normal (forward) execution: set a breakpoint at the
4468 callee's return address (the address at which the caller
4469 will resume).
4470
4471 Reverse (backward) execution. set the step-resume
4472 breakpoint at the start of the function that we just
4473 stepped into (backwards), and continue to there. When we
4474 get there, we'll need to single-step back to the caller. */
4475
4476 if (execution_direction == EXEC_REVERSE)
4477 {
4478 struct symtab_and_line sr_sal;
4479
4480 /* Normal function call return (static or dynamic). */
4481 init_sal (&sr_sal);
4482 sr_sal.pc = ecs->stop_func_start;
4483 sr_sal.pspace = get_frame_program_space (frame);
4484 insert_step_resume_breakpoint_at_sal (gdbarch,
4485 sr_sal, null_frame_id);
4486 }
4487 else
4488 insert_step_resume_breakpoint_at_caller (frame);
4489
4490 keep_going (ecs);
4491 return;
4492 }
4493
4494 /* If we are in a function call trampoline (a stub between the
4495 calling routine and the real function), locate the real
4496 function. That's what tells us (a) whether we want to step
4497 into it at all, and (b) what prologue we want to run to the
4498 end of, if we do step into it. */
4499 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4500 if (real_stop_pc == 0)
4501 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4502 if (real_stop_pc != 0)
4503 ecs->stop_func_start = real_stop_pc;
4504
4505 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4506 {
4507 struct symtab_and_line sr_sal;
4508
4509 init_sal (&sr_sal);
4510 sr_sal.pc = ecs->stop_func_start;
4511 sr_sal.pspace = get_frame_program_space (frame);
4512
4513 insert_step_resume_breakpoint_at_sal (gdbarch,
4514 sr_sal, null_frame_id);
4515 keep_going (ecs);
4516 return;
4517 }
4518
4519 /* If we have line number information for the function we are
4520 thinking of stepping into, step into it.
4521
4522 If there are several symtabs at that PC (e.g. with include
4523 files), just want to know whether *any* of them have line
4524 numbers. find_pc_line handles this. */
4525 {
4526 struct symtab_and_line tmp_sal;
4527
4528 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4529 tmp_sal.pspace = get_frame_program_space (frame);
4530 if (tmp_sal.line != 0)
4531 {
4532 if (execution_direction == EXEC_REVERSE)
4533 handle_step_into_function_backward (gdbarch, ecs);
4534 else
4535 handle_step_into_function (gdbarch, ecs);
4536 return;
4537 }
4538 }
4539
4540 /* If we have no line number and the step-stop-if-no-debug is
4541 set, we stop the step so that the user has a chance to switch
4542 in assembly mode. */
4543 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4544 && step_stop_if_no_debug)
4545 {
4546 ecs->event_thread->stop_step = 1;
4547 print_end_stepping_range_reason ();
4548 stop_stepping (ecs);
4549 return;
4550 }
4551
4552 if (execution_direction == EXEC_REVERSE)
4553 {
4554 /* Set a breakpoint at callee's start address.
4555 From there we can step once and be back in the caller. */
4556 struct symtab_and_line sr_sal;
4557
4558 init_sal (&sr_sal);
4559 sr_sal.pc = ecs->stop_func_start;
4560 sr_sal.pspace = get_frame_program_space (frame);
4561 insert_step_resume_breakpoint_at_sal (gdbarch,
4562 sr_sal, null_frame_id);
4563 }
4564 else
4565 /* Set a breakpoint at callee's return address (the address
4566 at which the caller will resume). */
4567 insert_step_resume_breakpoint_at_caller (frame);
4568
4569 keep_going (ecs);
4570 return;
4571 }
4572
4573 /* Reverse stepping through solib trampolines. */
4574
4575 if (execution_direction == EXEC_REVERSE
4576 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4577 {
4578 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4579 || (ecs->stop_func_start == 0
4580 && in_solib_dynsym_resolve_code (stop_pc)))
4581 {
4582 /* Any solib trampoline code can be handled in reverse
4583 by simply continuing to single-step. We have already
4584 executed the solib function (backwards), and a few
4585 steps will take us back through the trampoline to the
4586 caller. */
4587 keep_going (ecs);
4588 return;
4589 }
4590 else if (in_solib_dynsym_resolve_code (stop_pc))
4591 {
4592 /* Stepped backward into the solib dynsym resolver.
4593 Set a breakpoint at its start and continue, then
4594 one more step will take us out. */
4595 struct symtab_and_line sr_sal;
4596
4597 init_sal (&sr_sal);
4598 sr_sal.pc = ecs->stop_func_start;
4599 sr_sal.pspace = get_frame_program_space (frame);
4600 insert_step_resume_breakpoint_at_sal (gdbarch,
4601 sr_sal, null_frame_id);
4602 keep_going (ecs);
4603 return;
4604 }
4605 }
4606
4607 /* If we're in the return path from a shared library trampoline,
4608 we want to proceed through the trampoline when stepping. */
4609 if (gdbarch_in_solib_return_trampoline (gdbarch,
4610 stop_pc, ecs->stop_func_name))
4611 {
4612 /* Determine where this trampoline returns. */
4613 CORE_ADDR real_stop_pc;
4614
4615 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4616
4617 if (debug_infrun)
4618 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4619
4620 /* Only proceed through if we know where it's going. */
4621 if (real_stop_pc)
4622 {
4623 /* And put the step-breakpoint there and go until there. */
4624 struct symtab_and_line sr_sal;
4625
4626 init_sal (&sr_sal); /* initialize to zeroes */
4627 sr_sal.pc = real_stop_pc;
4628 sr_sal.section = find_pc_overlay (sr_sal.pc);
4629 sr_sal.pspace = get_frame_program_space (frame);
4630
4631 /* Do not specify what the fp should be when we stop since
4632 on some machines the prologue is where the new fp value
4633 is established. */
4634 insert_step_resume_breakpoint_at_sal (gdbarch,
4635 sr_sal, null_frame_id);
4636
4637 /* Restart without fiddling with the step ranges or
4638 other state. */
4639 keep_going (ecs);
4640 return;
4641 }
4642 }
4643
4644 stop_pc_sal = find_pc_line (stop_pc, 0);
4645
4646 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4647 the trampoline processing logic, however, there are some trampolines
4648 that have no names, so we should do trampoline handling first. */
4649 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4650 && ecs->stop_func_name == NULL
4651 && stop_pc_sal.line == 0)
4652 {
4653 if (debug_infrun)
4654 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4655
4656 /* The inferior just stepped into, or returned to, an
4657 undebuggable function (where there is no debugging information
4658 and no line number corresponding to the address where the
4659 inferior stopped). Since we want to skip this kind of code,
4660 we keep going until the inferior returns from this
4661 function - unless the user has asked us not to (via
4662 set step-mode) or we no longer know how to get back
4663 to the call site. */
4664 if (step_stop_if_no_debug
4665 || !frame_id_p (frame_unwind_caller_id (frame)))
4666 {
4667 /* If we have no line number and the step-stop-if-no-debug
4668 is set, we stop the step so that the user has a chance to
4669 switch in assembly mode. */
4670 ecs->event_thread->stop_step = 1;
4671 print_end_stepping_range_reason ();
4672 stop_stepping (ecs);
4673 return;
4674 }
4675 else
4676 {
4677 /* Set a breakpoint at callee's return address (the address
4678 at which the caller will resume). */
4679 insert_step_resume_breakpoint_at_caller (frame);
4680 keep_going (ecs);
4681 return;
4682 }
4683 }
4684
4685 if (ecs->event_thread->step_range_end == 1)
4686 {
4687 /* It is stepi or nexti. We always want to stop stepping after
4688 one instruction. */
4689 if (debug_infrun)
4690 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4691 ecs->event_thread->stop_step = 1;
4692 print_end_stepping_range_reason ();
4693 stop_stepping (ecs);
4694 return;
4695 }
4696
4697 if (stop_pc_sal.line == 0)
4698 {
4699 /* We have no line number information. That means to stop
4700 stepping (does this always happen right after one instruction,
4701 when we do "s" in a function with no line numbers,
4702 or can this happen as a result of a return or longjmp?). */
4703 if (debug_infrun)
4704 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4705 ecs->event_thread->stop_step = 1;
4706 print_end_stepping_range_reason ();
4707 stop_stepping (ecs);
4708 return;
4709 }
4710
4711 /* Look for "calls" to inlined functions, part one. If the inline
4712 frame machinery detected some skipped call sites, we have entered
4713 a new inline function. */
4714
4715 if (frame_id_eq (get_frame_id (get_current_frame ()),
4716 ecs->event_thread->step_frame_id)
4717 && inline_skipped_frames (ecs->ptid))
4718 {
4719 struct symtab_and_line call_sal;
4720
4721 if (debug_infrun)
4722 fprintf_unfiltered (gdb_stdlog,
4723 "infrun: stepped into inlined function\n");
4724
4725 find_frame_sal (get_current_frame (), &call_sal);
4726
4727 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4728 {
4729 /* For "step", we're going to stop. But if the call site
4730 for this inlined function is on the same source line as
4731 we were previously stepping, go down into the function
4732 first. Otherwise stop at the call site. */
4733
4734 if (call_sal.line == ecs->event_thread->current_line
4735 && call_sal.symtab == ecs->event_thread->current_symtab)
4736 step_into_inline_frame (ecs->ptid);
4737
4738 ecs->event_thread->stop_step = 1;
4739 print_end_stepping_range_reason ();
4740 stop_stepping (ecs);
4741 return;
4742 }
4743 else
4744 {
4745 /* For "next", we should stop at the call site if it is on a
4746 different source line. Otherwise continue through the
4747 inlined function. */
4748 if (call_sal.line == ecs->event_thread->current_line
4749 && call_sal.symtab == ecs->event_thread->current_symtab)
4750 keep_going (ecs);
4751 else
4752 {
4753 ecs->event_thread->stop_step = 1;
4754 print_end_stepping_range_reason ();
4755 stop_stepping (ecs);
4756 }
4757 return;
4758 }
4759 }
4760
4761 /* Look for "calls" to inlined functions, part two. If we are still
4762 in the same real function we were stepping through, but we have
4763 to go further up to find the exact frame ID, we are stepping
4764 through a more inlined call beyond its call site. */
4765
4766 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4767 && !frame_id_eq (get_frame_id (get_current_frame ()),
4768 ecs->event_thread->step_frame_id)
4769 && stepped_in_from (get_current_frame (),
4770 ecs->event_thread->step_frame_id))
4771 {
4772 if (debug_infrun)
4773 fprintf_unfiltered (gdb_stdlog,
4774 "infrun: stepping through inlined function\n");
4775
4776 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4777 keep_going (ecs);
4778 else
4779 {
4780 ecs->event_thread->stop_step = 1;
4781 print_end_stepping_range_reason ();
4782 stop_stepping (ecs);
4783 }
4784 return;
4785 }
4786
4787 if ((stop_pc == stop_pc_sal.pc)
4788 && (ecs->event_thread->current_line != stop_pc_sal.line
4789 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4790 {
4791 /* We are at the start of a different line. So stop. Note that
4792 we don't stop if we step into the middle of a different line.
4793 That is said to make things like for (;;) statements work
4794 better. */
4795 if (debug_infrun)
4796 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4797 ecs->event_thread->stop_step = 1;
4798 print_end_stepping_range_reason ();
4799 stop_stepping (ecs);
4800 return;
4801 }
4802
4803 /* We aren't done stepping.
4804
4805 Optimize by setting the stepping range to the line.
4806 (We might not be in the original line, but if we entered a
4807 new line in mid-statement, we continue stepping. This makes
4808 things like for(;;) statements work better.) */
4809
4810 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4811 ecs->event_thread->step_range_end = stop_pc_sal.end;
4812 set_step_info (frame, stop_pc_sal);
4813
4814 if (debug_infrun)
4815 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4816 keep_going (ecs);
4817 }
4818
4819 /* Is thread TP in the middle of single-stepping? */
4820
4821 static int
4822 currently_stepping (struct thread_info *tp)
4823 {
4824 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4825 || tp->trap_expected
4826 || tp->stepping_through_solib_after_catch
4827 || bpstat_should_step ());
4828 }
4829
4830 /* Returns true if any thread *but* the one passed in "data" is in the
4831 middle of stepping or of handling a "next". */
4832
4833 static int
4834 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4835 {
4836 if (tp == data)
4837 return 0;
4838
4839 return (tp->step_range_end
4840 || tp->trap_expected
4841 || tp->stepping_through_solib_after_catch);
4842 }
4843
4844 /* Inferior has stepped into a subroutine call with source code that
4845 we should not step over. Do step to the first line of code in
4846 it. */
4847
4848 static void
4849 handle_step_into_function (struct gdbarch *gdbarch,
4850 struct execution_control_state *ecs)
4851 {
4852 struct symtab *s;
4853 struct symtab_and_line stop_func_sal, sr_sal;
4854
4855 s = find_pc_symtab (stop_pc);
4856 if (s && s->language != language_asm)
4857 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4858 ecs->stop_func_start);
4859
4860 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4861 /* Use the step_resume_break to step until the end of the prologue,
4862 even if that involves jumps (as it seems to on the vax under
4863 4.2). */
4864 /* If the prologue ends in the middle of a source line, continue to
4865 the end of that source line (if it is still within the function).
4866 Otherwise, just go to end of prologue. */
4867 if (stop_func_sal.end
4868 && stop_func_sal.pc != ecs->stop_func_start
4869 && stop_func_sal.end < ecs->stop_func_end)
4870 ecs->stop_func_start = stop_func_sal.end;
4871
4872 /* Architectures which require breakpoint adjustment might not be able
4873 to place a breakpoint at the computed address. If so, the test
4874 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4875 ecs->stop_func_start to an address at which a breakpoint may be
4876 legitimately placed.
4877
4878 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4879 made, GDB will enter an infinite loop when stepping through
4880 optimized code consisting of VLIW instructions which contain
4881 subinstructions corresponding to different source lines. On
4882 FR-V, it's not permitted to place a breakpoint on any but the
4883 first subinstruction of a VLIW instruction. When a breakpoint is
4884 set, GDB will adjust the breakpoint address to the beginning of
4885 the VLIW instruction. Thus, we need to make the corresponding
4886 adjustment here when computing the stop address. */
4887
4888 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4889 {
4890 ecs->stop_func_start
4891 = gdbarch_adjust_breakpoint_address (gdbarch,
4892 ecs->stop_func_start);
4893 }
4894
4895 if (ecs->stop_func_start == stop_pc)
4896 {
4897 /* We are already there: stop now. */
4898 ecs->event_thread->stop_step = 1;
4899 print_end_stepping_range_reason ();
4900 stop_stepping (ecs);
4901 return;
4902 }
4903 else
4904 {
4905 /* Put the step-breakpoint there and go until there. */
4906 init_sal (&sr_sal); /* initialize to zeroes */
4907 sr_sal.pc = ecs->stop_func_start;
4908 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4909 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4910
4911 /* Do not specify what the fp should be when we stop since on
4912 some machines the prologue is where the new fp value is
4913 established. */
4914 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4915
4916 /* And make sure stepping stops right away then. */
4917 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4918 }
4919 keep_going (ecs);
4920 }
4921
4922 /* Inferior has stepped backward into a subroutine call with source
4923 code that we should not step over. Do step to the beginning of the
4924 last line of code in it. */
4925
4926 static void
4927 handle_step_into_function_backward (struct gdbarch *gdbarch,
4928 struct execution_control_state *ecs)
4929 {
4930 struct symtab *s;
4931 struct symtab_and_line stop_func_sal;
4932
4933 s = find_pc_symtab (stop_pc);
4934 if (s && s->language != language_asm)
4935 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4936 ecs->stop_func_start);
4937
4938 stop_func_sal = find_pc_line (stop_pc, 0);
4939
4940 /* OK, we're just going to keep stepping here. */
4941 if (stop_func_sal.pc == stop_pc)
4942 {
4943 /* We're there already. Just stop stepping now. */
4944 ecs->event_thread->stop_step = 1;
4945 print_end_stepping_range_reason ();
4946 stop_stepping (ecs);
4947 }
4948 else
4949 {
4950 /* Else just reset the step range and keep going.
4951 No step-resume breakpoint, they don't work for
4952 epilogues, which can have multiple entry paths. */
4953 ecs->event_thread->step_range_start = stop_func_sal.pc;
4954 ecs->event_thread->step_range_end = stop_func_sal.end;
4955 keep_going (ecs);
4956 }
4957 return;
4958 }
4959
4960 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4961 This is used to both functions and to skip over code. */
4962
4963 static void
4964 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4965 struct symtab_and_line sr_sal,
4966 struct frame_id sr_id)
4967 {
4968 /* There should never be more than one step-resume or longjmp-resume
4969 breakpoint per thread, so we should never be setting a new
4970 step_resume_breakpoint when one is already active. */
4971 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4972
4973 if (debug_infrun)
4974 fprintf_unfiltered (gdb_stdlog,
4975 "infrun: inserting step-resume breakpoint at %s\n",
4976 paddress (gdbarch, sr_sal.pc));
4977
4978 inferior_thread ()->step_resume_breakpoint
4979 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4980 }
4981
4982 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4983 to skip a potential signal handler.
4984
4985 This is called with the interrupted function's frame. The signal
4986 handler, when it returns, will resume the interrupted function at
4987 RETURN_FRAME.pc. */
4988
4989 static void
4990 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4991 {
4992 struct symtab_and_line sr_sal;
4993 struct gdbarch *gdbarch;
4994
4995 gdb_assert (return_frame != NULL);
4996 init_sal (&sr_sal); /* initialize to zeros */
4997
4998 gdbarch = get_frame_arch (return_frame);
4999 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5000 sr_sal.section = find_pc_overlay (sr_sal.pc);
5001 sr_sal.pspace = get_frame_program_space (return_frame);
5002
5003 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5004 get_stack_frame_id (return_frame));
5005 }
5006
5007 /* Similar to insert_step_resume_breakpoint_at_frame, except
5008 but a breakpoint at the previous frame's PC. This is used to
5009 skip a function after stepping into it (for "next" or if the called
5010 function has no debugging information).
5011
5012 The current function has almost always been reached by single
5013 stepping a call or return instruction. NEXT_FRAME belongs to the
5014 current function, and the breakpoint will be set at the caller's
5015 resume address.
5016
5017 This is a separate function rather than reusing
5018 insert_step_resume_breakpoint_at_frame in order to avoid
5019 get_prev_frame, which may stop prematurely (see the implementation
5020 of frame_unwind_caller_id for an example). */
5021
5022 static void
5023 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5024 {
5025 struct symtab_and_line sr_sal;
5026 struct gdbarch *gdbarch;
5027
5028 /* We shouldn't have gotten here if we don't know where the call site
5029 is. */
5030 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5031
5032 init_sal (&sr_sal); /* initialize to zeros */
5033
5034 gdbarch = frame_unwind_caller_arch (next_frame);
5035 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5036 frame_unwind_caller_pc (next_frame));
5037 sr_sal.section = find_pc_overlay (sr_sal.pc);
5038 sr_sal.pspace = frame_unwind_program_space (next_frame);
5039
5040 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5041 frame_unwind_caller_id (next_frame));
5042 }
5043
5044 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5045 new breakpoint at the target of a jmp_buf. The handling of
5046 longjmp-resume uses the same mechanisms used for handling
5047 "step-resume" breakpoints. */
5048
5049 static void
5050 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5051 {
5052 /* There should never be more than one step-resume or longjmp-resume
5053 breakpoint per thread, so we should never be setting a new
5054 longjmp_resume_breakpoint when one is already active. */
5055 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5056
5057 if (debug_infrun)
5058 fprintf_unfiltered (gdb_stdlog,
5059 "infrun: inserting longjmp-resume breakpoint at %s\n",
5060 paddress (gdbarch, pc));
5061
5062 inferior_thread ()->step_resume_breakpoint =
5063 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5064 }
5065
5066 static void
5067 stop_stepping (struct execution_control_state *ecs)
5068 {
5069 if (debug_infrun)
5070 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5071
5072 /* Let callers know we don't want to wait for the inferior anymore. */
5073 ecs->wait_some_more = 0;
5074 }
5075
5076 /* This function handles various cases where we need to continue
5077 waiting for the inferior. */
5078 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5079
5080 static void
5081 keep_going (struct execution_control_state *ecs)
5082 {
5083 /* Make sure normal_stop is called if we get a QUIT handled before
5084 reaching resume. */
5085 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5086
5087 /* Save the pc before execution, to compare with pc after stop. */
5088 ecs->event_thread->prev_pc
5089 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5090
5091 /* If we did not do break;, it means we should keep running the
5092 inferior and not return to debugger. */
5093
5094 if (ecs->event_thread->trap_expected
5095 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5096 {
5097 /* We took a signal (which we are supposed to pass through to
5098 the inferior, else we'd not get here) and we haven't yet
5099 gotten our trap. Simply continue. */
5100
5101 discard_cleanups (old_cleanups);
5102 resume (currently_stepping (ecs->event_thread),
5103 ecs->event_thread->stop_signal);
5104 }
5105 else
5106 {
5107 /* Either the trap was not expected, but we are continuing
5108 anyway (the user asked that this signal be passed to the
5109 child)
5110 -- or --
5111 The signal was SIGTRAP, e.g. it was our signal, but we
5112 decided we should resume from it.
5113
5114 We're going to run this baby now!
5115
5116 Note that insert_breakpoints won't try to re-insert
5117 already inserted breakpoints. Therefore, we don't
5118 care if breakpoints were already inserted, or not. */
5119
5120 if (ecs->event_thread->stepping_over_breakpoint)
5121 {
5122 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5123
5124 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5125 /* Since we can't do a displaced step, we have to remove
5126 the breakpoint while we step it. To keep things
5127 simple, we remove them all. */
5128 remove_breakpoints ();
5129 }
5130 else
5131 {
5132 struct gdb_exception e;
5133
5134 /* Stop stepping when inserting breakpoints
5135 has failed. */
5136 TRY_CATCH (e, RETURN_MASK_ERROR)
5137 {
5138 insert_breakpoints ();
5139 }
5140 if (e.reason < 0)
5141 {
5142 exception_print (gdb_stderr, e);
5143 stop_stepping (ecs);
5144 return;
5145 }
5146 }
5147
5148 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5149
5150 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5151 specifies that such a signal should be delivered to the
5152 target program).
5153
5154 Typically, this would occure when a user is debugging a
5155 target monitor on a simulator: the target monitor sets a
5156 breakpoint; the simulator encounters this break-point and
5157 halts the simulation handing control to GDB; GDB, noteing
5158 that the break-point isn't valid, returns control back to the
5159 simulator; the simulator then delivers the hardware
5160 equivalent of a SIGNAL_TRAP to the program being debugged. */
5161
5162 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5163 && !signal_program[ecs->event_thread->stop_signal])
5164 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5165
5166 discard_cleanups (old_cleanups);
5167 resume (currently_stepping (ecs->event_thread),
5168 ecs->event_thread->stop_signal);
5169 }
5170
5171 prepare_to_wait (ecs);
5172 }
5173
5174 /* This function normally comes after a resume, before
5175 handle_inferior_event exits. It takes care of any last bits of
5176 housekeeping, and sets the all-important wait_some_more flag. */
5177
5178 static void
5179 prepare_to_wait (struct execution_control_state *ecs)
5180 {
5181 if (debug_infrun)
5182 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5183
5184 /* This is the old end of the while loop. Let everybody know we
5185 want to wait for the inferior some more and get called again
5186 soon. */
5187 ecs->wait_some_more = 1;
5188 }
5189
5190 /* Several print_*_reason functions to print why the inferior has stopped.
5191 We always print something when the inferior exits, or receives a signal.
5192 The rest of the cases are dealt with later on in normal_stop and
5193 print_it_typical. Ideally there should be a call to one of these
5194 print_*_reason functions functions from handle_inferior_event each time
5195 stop_stepping is called. */
5196
5197 /* Print why the inferior has stopped.
5198 We are done with a step/next/si/ni command, print why the inferior has
5199 stopped. For now print nothing. Print a message only if not in the middle
5200 of doing a "step n" operation for n > 1. */
5201
5202 static void
5203 print_end_stepping_range_reason (void)
5204 {
5205 if ((!inferior_thread ()->step_multi || !inferior_thread ()->stop_step)
5206 && ui_out_is_mi_like_p (uiout))
5207 ui_out_field_string (uiout, "reason",
5208 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5209 }
5210
5211 /* The inferior was terminated by a signal, print why it stopped. */
5212
5213 static void
5214 print_signal_exited_reason (enum target_signal siggnal)
5215 {
5216 annotate_signalled ();
5217 if (ui_out_is_mi_like_p (uiout))
5218 ui_out_field_string
5219 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5220 ui_out_text (uiout, "\nProgram terminated with signal ");
5221 annotate_signal_name ();
5222 ui_out_field_string (uiout, "signal-name",
5223 target_signal_to_name (siggnal));
5224 annotate_signal_name_end ();
5225 ui_out_text (uiout, ", ");
5226 annotate_signal_string ();
5227 ui_out_field_string (uiout, "signal-meaning",
5228 target_signal_to_string (siggnal));
5229 annotate_signal_string_end ();
5230 ui_out_text (uiout, ".\n");
5231 ui_out_text (uiout, "The program no longer exists.\n");
5232 }
5233
5234 /* The inferior program is finished, print why it stopped. */
5235
5236 static void
5237 print_exited_reason (int exitstatus)
5238 {
5239 annotate_exited (exitstatus);
5240 if (exitstatus)
5241 {
5242 if (ui_out_is_mi_like_p (uiout))
5243 ui_out_field_string (uiout, "reason",
5244 async_reason_lookup (EXEC_ASYNC_EXITED));
5245 ui_out_text (uiout, "\nProgram exited with code ");
5246 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5247 ui_out_text (uiout, ".\n");
5248 }
5249 else
5250 {
5251 if (ui_out_is_mi_like_p (uiout))
5252 ui_out_field_string
5253 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5254 ui_out_text (uiout, "\nProgram exited normally.\n");
5255 }
5256 /* Support the --return-child-result option. */
5257 return_child_result_value = exitstatus;
5258 }
5259
5260 /* Signal received, print why the inferior has stopped. The signal table
5261 tells us to print about it. */
5262
5263 static void
5264 print_signal_received_reason (enum target_signal siggnal)
5265 {
5266 annotate_signal ();
5267
5268 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5269 {
5270 struct thread_info *t = inferior_thread ();
5271
5272 ui_out_text (uiout, "\n[");
5273 ui_out_field_string (uiout, "thread-name",
5274 target_pid_to_str (t->ptid));
5275 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5276 ui_out_text (uiout, " stopped");
5277 }
5278 else
5279 {
5280 ui_out_text (uiout, "\nProgram received signal ");
5281 annotate_signal_name ();
5282 if (ui_out_is_mi_like_p (uiout))
5283 ui_out_field_string
5284 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5285 ui_out_field_string (uiout, "signal-name",
5286 target_signal_to_name (siggnal));
5287 annotate_signal_name_end ();
5288 ui_out_text (uiout, ", ");
5289 annotate_signal_string ();
5290 ui_out_field_string (uiout, "signal-meaning",
5291 target_signal_to_string (siggnal));
5292 annotate_signal_string_end ();
5293 }
5294 ui_out_text (uiout, ".\n");
5295 }
5296
5297 /* Reverse execution: target ran out of history info, print why the inferior
5298 has stopped. */
5299
5300 static void
5301 print_no_history_reason (void)
5302 {
5303 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5304 }
5305
5306 /* Here to return control to GDB when the inferior stops for real.
5307 Print appropriate messages, remove breakpoints, give terminal our modes.
5308
5309 STOP_PRINT_FRAME nonzero means print the executing frame
5310 (pc, function, args, file, line number and line text).
5311 BREAKPOINTS_FAILED nonzero means stop was due to error
5312 attempting to insert breakpoints. */
5313
5314 void
5315 normal_stop (void)
5316 {
5317 struct target_waitstatus last;
5318 ptid_t last_ptid;
5319 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5320
5321 get_last_target_status (&last_ptid, &last);
5322
5323 /* If an exception is thrown from this point on, make sure to
5324 propagate GDB's knowledge of the executing state to the
5325 frontend/user running state. A QUIT is an easy exception to see
5326 here, so do this before any filtered output. */
5327 if (!non_stop)
5328 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5329 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5330 && last.kind != TARGET_WAITKIND_EXITED)
5331 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5332
5333 /* In non-stop mode, we don't want GDB to switch threads behind the
5334 user's back, to avoid races where the user is typing a command to
5335 apply to thread x, but GDB switches to thread y before the user
5336 finishes entering the command. */
5337
5338 /* As with the notification of thread events, we want to delay
5339 notifying the user that we've switched thread context until
5340 the inferior actually stops.
5341
5342 There's no point in saying anything if the inferior has exited.
5343 Note that SIGNALLED here means "exited with a signal", not
5344 "received a signal". */
5345 if (!non_stop
5346 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5347 && target_has_execution
5348 && last.kind != TARGET_WAITKIND_SIGNALLED
5349 && last.kind != TARGET_WAITKIND_EXITED)
5350 {
5351 target_terminal_ours_for_output ();
5352 printf_filtered (_("[Switching to %s]\n"),
5353 target_pid_to_str (inferior_ptid));
5354 annotate_thread_changed ();
5355 previous_inferior_ptid = inferior_ptid;
5356 }
5357
5358 if (!breakpoints_always_inserted_mode () && target_has_execution)
5359 {
5360 if (remove_breakpoints ())
5361 {
5362 target_terminal_ours_for_output ();
5363 printf_filtered (_("\
5364 Cannot remove breakpoints because program is no longer writable.\n\
5365 Further execution is probably impossible.\n"));
5366 }
5367 }
5368
5369 /* If an auto-display called a function and that got a signal,
5370 delete that auto-display to avoid an infinite recursion. */
5371
5372 if (stopped_by_random_signal)
5373 disable_current_display ();
5374
5375 /* Don't print a message if in the middle of doing a "step n"
5376 operation for n > 1 */
5377 if (target_has_execution
5378 && last.kind != TARGET_WAITKIND_SIGNALLED
5379 && last.kind != TARGET_WAITKIND_EXITED
5380 && inferior_thread ()->step_multi
5381 && inferior_thread ()->stop_step)
5382 goto done;
5383
5384 target_terminal_ours ();
5385
5386 /* Set the current source location. This will also happen if we
5387 display the frame below, but the current SAL will be incorrect
5388 during a user hook-stop function. */
5389 if (has_stack_frames () && !stop_stack_dummy)
5390 set_current_sal_from_frame (get_current_frame (), 1);
5391
5392 /* Let the user/frontend see the threads as stopped. */
5393 do_cleanups (old_chain);
5394
5395 /* Look up the hook_stop and run it (CLI internally handles problem
5396 of stop_command's pre-hook not existing). */
5397 if (stop_command)
5398 catch_errors (hook_stop_stub, stop_command,
5399 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5400
5401 if (!has_stack_frames ())
5402 goto done;
5403
5404 if (last.kind == TARGET_WAITKIND_SIGNALLED
5405 || last.kind == TARGET_WAITKIND_EXITED)
5406 goto done;
5407
5408 /* Select innermost stack frame - i.e., current frame is frame 0,
5409 and current location is based on that.
5410 Don't do this on return from a stack dummy routine,
5411 or if the program has exited. */
5412
5413 if (!stop_stack_dummy)
5414 {
5415 select_frame (get_current_frame ());
5416
5417 /* Print current location without a level number, if
5418 we have changed functions or hit a breakpoint.
5419 Print source line if we have one.
5420 bpstat_print() contains the logic deciding in detail
5421 what to print, based on the event(s) that just occurred. */
5422
5423 /* If --batch-silent is enabled then there's no need to print the current
5424 source location, and to try risks causing an error message about
5425 missing source files. */
5426 if (stop_print_frame && !batch_silent)
5427 {
5428 int bpstat_ret;
5429 int source_flag;
5430 int do_frame_printing = 1;
5431 struct thread_info *tp = inferior_thread ();
5432
5433 bpstat_ret = bpstat_print (tp->stop_bpstat);
5434 switch (bpstat_ret)
5435 {
5436 case PRINT_UNKNOWN:
5437 /* If we had hit a shared library event breakpoint,
5438 bpstat_print would print out this message. If we hit
5439 an OS-level shared library event, do the same
5440 thing. */
5441 if (last.kind == TARGET_WAITKIND_LOADED)
5442 {
5443 printf_filtered (_("Stopped due to shared library event\n"));
5444 source_flag = SRC_LINE; /* something bogus */
5445 do_frame_printing = 0;
5446 break;
5447 }
5448
5449 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5450 (or should) carry around the function and does (or
5451 should) use that when doing a frame comparison. */
5452 if (tp->stop_step
5453 && frame_id_eq (tp->step_frame_id,
5454 get_frame_id (get_current_frame ()))
5455 && step_start_function == find_pc_function (stop_pc))
5456 source_flag = SRC_LINE; /* finished step, just print source line */
5457 else
5458 source_flag = SRC_AND_LOC; /* print location and source line */
5459 break;
5460 case PRINT_SRC_AND_LOC:
5461 source_flag = SRC_AND_LOC; /* print location and source line */
5462 break;
5463 case PRINT_SRC_ONLY:
5464 source_flag = SRC_LINE;
5465 break;
5466 case PRINT_NOTHING:
5467 source_flag = SRC_LINE; /* something bogus */
5468 do_frame_printing = 0;
5469 break;
5470 default:
5471 internal_error (__FILE__, __LINE__, _("Unknown value."));
5472 }
5473
5474 /* The behavior of this routine with respect to the source
5475 flag is:
5476 SRC_LINE: Print only source line
5477 LOCATION: Print only location
5478 SRC_AND_LOC: Print location and source line */
5479 if (do_frame_printing)
5480 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5481
5482 /* Display the auto-display expressions. */
5483 do_displays ();
5484 }
5485 }
5486
5487 /* Save the function value return registers, if we care.
5488 We might be about to restore their previous contents. */
5489 if (inferior_thread ()->proceed_to_finish)
5490 {
5491 /* This should not be necessary. */
5492 if (stop_registers)
5493 regcache_xfree (stop_registers);
5494
5495 /* NB: The copy goes through to the target picking up the value of
5496 all the registers. */
5497 stop_registers = regcache_dup (get_current_regcache ());
5498 }
5499
5500 if (stop_stack_dummy == STOP_STACK_DUMMY)
5501 {
5502 /* Pop the empty frame that contains the stack dummy.
5503 This also restores inferior state prior to the call
5504 (struct inferior_thread_state). */
5505 struct frame_info *frame = get_current_frame ();
5506
5507 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5508 frame_pop (frame);
5509 /* frame_pop() calls reinit_frame_cache as the last thing it does
5510 which means there's currently no selected frame. We don't need
5511 to re-establish a selected frame if the dummy call returns normally,
5512 that will be done by restore_inferior_status. However, we do have
5513 to handle the case where the dummy call is returning after being
5514 stopped (e.g. the dummy call previously hit a breakpoint). We
5515 can't know which case we have so just always re-establish a
5516 selected frame here. */
5517 select_frame (get_current_frame ());
5518 }
5519
5520 done:
5521 annotate_stopped ();
5522
5523 /* Suppress the stop observer if we're in the middle of:
5524
5525 - a step n (n > 1), as there still more steps to be done.
5526
5527 - a "finish" command, as the observer will be called in
5528 finish_command_continuation, so it can include the inferior
5529 function's return value.
5530
5531 - calling an inferior function, as we pretend we inferior didn't
5532 run at all. The return value of the call is handled by the
5533 expression evaluator, through call_function_by_hand. */
5534
5535 if (!target_has_execution
5536 || last.kind == TARGET_WAITKIND_SIGNALLED
5537 || last.kind == TARGET_WAITKIND_EXITED
5538 || (!inferior_thread ()->step_multi
5539 && !(inferior_thread ()->stop_bpstat
5540 && inferior_thread ()->proceed_to_finish)
5541 && !inferior_thread ()->in_infcall))
5542 {
5543 if (!ptid_equal (inferior_ptid, null_ptid))
5544 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5545 stop_print_frame);
5546 else
5547 observer_notify_normal_stop (NULL, stop_print_frame);
5548 }
5549
5550 if (target_has_execution)
5551 {
5552 if (last.kind != TARGET_WAITKIND_SIGNALLED
5553 && last.kind != TARGET_WAITKIND_EXITED)
5554 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5555 Delete any breakpoint that is to be deleted at the next stop. */
5556 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5557 }
5558
5559 /* Try to get rid of automatically added inferiors that are no
5560 longer needed. Keeping those around slows down things linearly.
5561 Note that this never removes the current inferior. */
5562 prune_inferiors ();
5563 }
5564
5565 static int
5566 hook_stop_stub (void *cmd)
5567 {
5568 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5569 return (0);
5570 }
5571 \f
5572 int
5573 signal_stop_state (int signo)
5574 {
5575 return signal_stop[signo];
5576 }
5577
5578 int
5579 signal_print_state (int signo)
5580 {
5581 return signal_print[signo];
5582 }
5583
5584 int
5585 signal_pass_state (int signo)
5586 {
5587 return signal_program[signo];
5588 }
5589
5590 int
5591 signal_stop_update (int signo, int state)
5592 {
5593 int ret = signal_stop[signo];
5594
5595 signal_stop[signo] = state;
5596 return ret;
5597 }
5598
5599 int
5600 signal_print_update (int signo, int state)
5601 {
5602 int ret = signal_print[signo];
5603
5604 signal_print[signo] = state;
5605 return ret;
5606 }
5607
5608 int
5609 signal_pass_update (int signo, int state)
5610 {
5611 int ret = signal_program[signo];
5612
5613 signal_program[signo] = state;
5614 return ret;
5615 }
5616
5617 static void
5618 sig_print_header (void)
5619 {
5620 printf_filtered (_("\
5621 Signal Stop\tPrint\tPass to program\tDescription\n"));
5622 }
5623
5624 static void
5625 sig_print_info (enum target_signal oursig)
5626 {
5627 const char *name = target_signal_to_name (oursig);
5628 int name_padding = 13 - strlen (name);
5629
5630 if (name_padding <= 0)
5631 name_padding = 0;
5632
5633 printf_filtered ("%s", name);
5634 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5635 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5636 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5637 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5638 printf_filtered ("%s\n", target_signal_to_string (oursig));
5639 }
5640
5641 /* Specify how various signals in the inferior should be handled. */
5642
5643 static void
5644 handle_command (char *args, int from_tty)
5645 {
5646 char **argv;
5647 int digits, wordlen;
5648 int sigfirst, signum, siglast;
5649 enum target_signal oursig;
5650 int allsigs;
5651 int nsigs;
5652 unsigned char *sigs;
5653 struct cleanup *old_chain;
5654
5655 if (args == NULL)
5656 {
5657 error_no_arg (_("signal to handle"));
5658 }
5659
5660 /* Allocate and zero an array of flags for which signals to handle. */
5661
5662 nsigs = (int) TARGET_SIGNAL_LAST;
5663 sigs = (unsigned char *) alloca (nsigs);
5664 memset (sigs, 0, nsigs);
5665
5666 /* Break the command line up into args. */
5667
5668 argv = gdb_buildargv (args);
5669 old_chain = make_cleanup_freeargv (argv);
5670
5671 /* Walk through the args, looking for signal oursigs, signal names, and
5672 actions. Signal numbers and signal names may be interspersed with
5673 actions, with the actions being performed for all signals cumulatively
5674 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5675
5676 while (*argv != NULL)
5677 {
5678 wordlen = strlen (*argv);
5679 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5680 {;
5681 }
5682 allsigs = 0;
5683 sigfirst = siglast = -1;
5684
5685 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5686 {
5687 /* Apply action to all signals except those used by the
5688 debugger. Silently skip those. */
5689 allsigs = 1;
5690 sigfirst = 0;
5691 siglast = nsigs - 1;
5692 }
5693 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5694 {
5695 SET_SIGS (nsigs, sigs, signal_stop);
5696 SET_SIGS (nsigs, sigs, signal_print);
5697 }
5698 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5699 {
5700 UNSET_SIGS (nsigs, sigs, signal_program);
5701 }
5702 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5703 {
5704 SET_SIGS (nsigs, sigs, signal_print);
5705 }
5706 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5707 {
5708 SET_SIGS (nsigs, sigs, signal_program);
5709 }
5710 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5711 {
5712 UNSET_SIGS (nsigs, sigs, signal_stop);
5713 }
5714 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5715 {
5716 SET_SIGS (nsigs, sigs, signal_program);
5717 }
5718 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5719 {
5720 UNSET_SIGS (nsigs, sigs, signal_print);
5721 UNSET_SIGS (nsigs, sigs, signal_stop);
5722 }
5723 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5724 {
5725 UNSET_SIGS (nsigs, sigs, signal_program);
5726 }
5727 else if (digits > 0)
5728 {
5729 /* It is numeric. The numeric signal refers to our own
5730 internal signal numbering from target.h, not to host/target
5731 signal number. This is a feature; users really should be
5732 using symbolic names anyway, and the common ones like
5733 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5734
5735 sigfirst = siglast = (int)
5736 target_signal_from_command (atoi (*argv));
5737 if ((*argv)[digits] == '-')
5738 {
5739 siglast = (int)
5740 target_signal_from_command (atoi ((*argv) + digits + 1));
5741 }
5742 if (sigfirst > siglast)
5743 {
5744 /* Bet he didn't figure we'd think of this case... */
5745 signum = sigfirst;
5746 sigfirst = siglast;
5747 siglast = signum;
5748 }
5749 }
5750 else
5751 {
5752 oursig = target_signal_from_name (*argv);
5753 if (oursig != TARGET_SIGNAL_UNKNOWN)
5754 {
5755 sigfirst = siglast = (int) oursig;
5756 }
5757 else
5758 {
5759 /* Not a number and not a recognized flag word => complain. */
5760 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5761 }
5762 }
5763
5764 /* If any signal numbers or symbol names were found, set flags for
5765 which signals to apply actions to. */
5766
5767 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5768 {
5769 switch ((enum target_signal) signum)
5770 {
5771 case TARGET_SIGNAL_TRAP:
5772 case TARGET_SIGNAL_INT:
5773 if (!allsigs && !sigs[signum])
5774 {
5775 if (query (_("%s is used by the debugger.\n\
5776 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5777 {
5778 sigs[signum] = 1;
5779 }
5780 else
5781 {
5782 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5783 gdb_flush (gdb_stdout);
5784 }
5785 }
5786 break;
5787 case TARGET_SIGNAL_0:
5788 case TARGET_SIGNAL_DEFAULT:
5789 case TARGET_SIGNAL_UNKNOWN:
5790 /* Make sure that "all" doesn't print these. */
5791 break;
5792 default:
5793 sigs[signum] = 1;
5794 break;
5795 }
5796 }
5797
5798 argv++;
5799 }
5800
5801 for (signum = 0; signum < nsigs; signum++)
5802 if (sigs[signum])
5803 {
5804 target_notice_signals (inferior_ptid);
5805
5806 if (from_tty)
5807 {
5808 /* Show the results. */
5809 sig_print_header ();
5810 for (; signum < nsigs; signum++)
5811 if (sigs[signum])
5812 sig_print_info (signum);
5813 }
5814
5815 break;
5816 }
5817
5818 do_cleanups (old_chain);
5819 }
5820
5821 static void
5822 xdb_handle_command (char *args, int from_tty)
5823 {
5824 char **argv;
5825 struct cleanup *old_chain;
5826
5827 if (args == NULL)
5828 error_no_arg (_("xdb command"));
5829
5830 /* Break the command line up into args. */
5831
5832 argv = gdb_buildargv (args);
5833 old_chain = make_cleanup_freeargv (argv);
5834 if (argv[1] != (char *) NULL)
5835 {
5836 char *argBuf;
5837 int bufLen;
5838
5839 bufLen = strlen (argv[0]) + 20;
5840 argBuf = (char *) xmalloc (bufLen);
5841 if (argBuf)
5842 {
5843 int validFlag = 1;
5844 enum target_signal oursig;
5845
5846 oursig = target_signal_from_name (argv[0]);
5847 memset (argBuf, 0, bufLen);
5848 if (strcmp (argv[1], "Q") == 0)
5849 sprintf (argBuf, "%s %s", argv[0], "noprint");
5850 else
5851 {
5852 if (strcmp (argv[1], "s") == 0)
5853 {
5854 if (!signal_stop[oursig])
5855 sprintf (argBuf, "%s %s", argv[0], "stop");
5856 else
5857 sprintf (argBuf, "%s %s", argv[0], "nostop");
5858 }
5859 else if (strcmp (argv[1], "i") == 0)
5860 {
5861 if (!signal_program[oursig])
5862 sprintf (argBuf, "%s %s", argv[0], "pass");
5863 else
5864 sprintf (argBuf, "%s %s", argv[0], "nopass");
5865 }
5866 else if (strcmp (argv[1], "r") == 0)
5867 {
5868 if (!signal_print[oursig])
5869 sprintf (argBuf, "%s %s", argv[0], "print");
5870 else
5871 sprintf (argBuf, "%s %s", argv[0], "noprint");
5872 }
5873 else
5874 validFlag = 0;
5875 }
5876 if (validFlag)
5877 handle_command (argBuf, from_tty);
5878 else
5879 printf_filtered (_("Invalid signal handling flag.\n"));
5880 if (argBuf)
5881 xfree (argBuf);
5882 }
5883 }
5884 do_cleanups (old_chain);
5885 }
5886
5887 /* Print current contents of the tables set by the handle command.
5888 It is possible we should just be printing signals actually used
5889 by the current target (but for things to work right when switching
5890 targets, all signals should be in the signal tables). */
5891
5892 static void
5893 signals_info (char *signum_exp, int from_tty)
5894 {
5895 enum target_signal oursig;
5896
5897 sig_print_header ();
5898
5899 if (signum_exp)
5900 {
5901 /* First see if this is a symbol name. */
5902 oursig = target_signal_from_name (signum_exp);
5903 if (oursig == TARGET_SIGNAL_UNKNOWN)
5904 {
5905 /* No, try numeric. */
5906 oursig =
5907 target_signal_from_command (parse_and_eval_long (signum_exp));
5908 }
5909 sig_print_info (oursig);
5910 return;
5911 }
5912
5913 printf_filtered ("\n");
5914 /* These ugly casts brought to you by the native VAX compiler. */
5915 for (oursig = TARGET_SIGNAL_FIRST;
5916 (int) oursig < (int) TARGET_SIGNAL_LAST;
5917 oursig = (enum target_signal) ((int) oursig + 1))
5918 {
5919 QUIT;
5920
5921 if (oursig != TARGET_SIGNAL_UNKNOWN
5922 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5923 sig_print_info (oursig);
5924 }
5925
5926 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5927 }
5928
5929 /* The $_siginfo convenience variable is a bit special. We don't know
5930 for sure the type of the value until we actually have a chance to
5931 fetch the data. The type can change depending on gdbarch, so it it
5932 also dependent on which thread you have selected.
5933
5934 1. making $_siginfo be an internalvar that creates a new value on
5935 access.
5936
5937 2. making the value of $_siginfo be an lval_computed value. */
5938
5939 /* This function implements the lval_computed support for reading a
5940 $_siginfo value. */
5941
5942 static void
5943 siginfo_value_read (struct value *v)
5944 {
5945 LONGEST transferred;
5946
5947 transferred =
5948 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5949 NULL,
5950 value_contents_all_raw (v),
5951 value_offset (v),
5952 TYPE_LENGTH (value_type (v)));
5953
5954 if (transferred != TYPE_LENGTH (value_type (v)))
5955 error (_("Unable to read siginfo"));
5956 }
5957
5958 /* This function implements the lval_computed support for writing a
5959 $_siginfo value. */
5960
5961 static void
5962 siginfo_value_write (struct value *v, struct value *fromval)
5963 {
5964 LONGEST transferred;
5965
5966 transferred = target_write (&current_target,
5967 TARGET_OBJECT_SIGNAL_INFO,
5968 NULL,
5969 value_contents_all_raw (fromval),
5970 value_offset (v),
5971 TYPE_LENGTH (value_type (fromval)));
5972
5973 if (transferred != TYPE_LENGTH (value_type (fromval)))
5974 error (_("Unable to write siginfo"));
5975 }
5976
5977 static struct lval_funcs siginfo_value_funcs =
5978 {
5979 siginfo_value_read,
5980 siginfo_value_write
5981 };
5982
5983 /* Return a new value with the correct type for the siginfo object of
5984 the current thread using architecture GDBARCH. Return a void value
5985 if there's no object available. */
5986
5987 static struct value *
5988 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5989 {
5990 if (target_has_stack
5991 && !ptid_equal (inferior_ptid, null_ptid)
5992 && gdbarch_get_siginfo_type_p (gdbarch))
5993 {
5994 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5995
5996 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5997 }
5998
5999 return allocate_value (builtin_type (gdbarch)->builtin_void);
6000 }
6001
6002 \f
6003 /* Inferior thread state.
6004 These are details related to the inferior itself, and don't include
6005 things like what frame the user had selected or what gdb was doing
6006 with the target at the time.
6007 For inferior function calls these are things we want to restore
6008 regardless of whether the function call successfully completes
6009 or the dummy frame has to be manually popped. */
6010
6011 struct inferior_thread_state
6012 {
6013 enum target_signal stop_signal;
6014 CORE_ADDR stop_pc;
6015 struct regcache *registers;
6016 };
6017
6018 struct inferior_thread_state *
6019 save_inferior_thread_state (void)
6020 {
6021 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
6022 struct thread_info *tp = inferior_thread ();
6023
6024 inf_state->stop_signal = tp->stop_signal;
6025 inf_state->stop_pc = stop_pc;
6026
6027 inf_state->registers = regcache_dup (get_current_regcache ());
6028
6029 return inf_state;
6030 }
6031
6032 /* Restore inferior session state to INF_STATE. */
6033
6034 void
6035 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6036 {
6037 struct thread_info *tp = inferior_thread ();
6038
6039 tp->stop_signal = inf_state->stop_signal;
6040 stop_pc = inf_state->stop_pc;
6041
6042 /* The inferior can be gone if the user types "print exit(0)"
6043 (and perhaps other times). */
6044 if (target_has_execution)
6045 /* NB: The register write goes through to the target. */
6046 regcache_cpy (get_current_regcache (), inf_state->registers);
6047 regcache_xfree (inf_state->registers);
6048 xfree (inf_state);
6049 }
6050
6051 static void
6052 do_restore_inferior_thread_state_cleanup (void *state)
6053 {
6054 restore_inferior_thread_state (state);
6055 }
6056
6057 struct cleanup *
6058 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6059 {
6060 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6061 }
6062
6063 void
6064 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6065 {
6066 regcache_xfree (inf_state->registers);
6067 xfree (inf_state);
6068 }
6069
6070 struct regcache *
6071 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6072 {
6073 return inf_state->registers;
6074 }
6075
6076 /* Session related state for inferior function calls.
6077 These are the additional bits of state that need to be restored
6078 when an inferior function call successfully completes. */
6079
6080 struct inferior_status
6081 {
6082 bpstat stop_bpstat;
6083 int stop_step;
6084 enum stop_stack_kind stop_stack_dummy;
6085 int stopped_by_random_signal;
6086 int stepping_over_breakpoint;
6087 CORE_ADDR step_range_start;
6088 CORE_ADDR step_range_end;
6089 struct frame_id step_frame_id;
6090 struct frame_id step_stack_frame_id;
6091 enum step_over_calls_kind step_over_calls;
6092 CORE_ADDR step_resume_break_address;
6093 int stop_after_trap;
6094 int stop_soon;
6095
6096 /* ID if the selected frame when the inferior function call was made. */
6097 struct frame_id selected_frame_id;
6098
6099 int proceed_to_finish;
6100 int in_infcall;
6101 };
6102
6103 /* Save all of the information associated with the inferior<==>gdb
6104 connection. */
6105
6106 struct inferior_status *
6107 save_inferior_status (void)
6108 {
6109 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6110 struct thread_info *tp = inferior_thread ();
6111 struct inferior *inf = current_inferior ();
6112
6113 inf_status->stop_step = tp->stop_step;
6114 inf_status->stop_stack_dummy = stop_stack_dummy;
6115 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6116 inf_status->stepping_over_breakpoint = tp->trap_expected;
6117 inf_status->step_range_start = tp->step_range_start;
6118 inf_status->step_range_end = tp->step_range_end;
6119 inf_status->step_frame_id = tp->step_frame_id;
6120 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6121 inf_status->step_over_calls = tp->step_over_calls;
6122 inf_status->stop_after_trap = stop_after_trap;
6123 inf_status->stop_soon = inf->stop_soon;
6124 /* Save original bpstat chain here; replace it with copy of chain.
6125 If caller's caller is walking the chain, they'll be happier if we
6126 hand them back the original chain when restore_inferior_status is
6127 called. */
6128 inf_status->stop_bpstat = tp->stop_bpstat;
6129 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6130 inf_status->proceed_to_finish = tp->proceed_to_finish;
6131 inf_status->in_infcall = tp->in_infcall;
6132
6133 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6134
6135 return inf_status;
6136 }
6137
6138 static int
6139 restore_selected_frame (void *args)
6140 {
6141 struct frame_id *fid = (struct frame_id *) args;
6142 struct frame_info *frame;
6143
6144 frame = frame_find_by_id (*fid);
6145
6146 /* If inf_status->selected_frame_id is NULL, there was no previously
6147 selected frame. */
6148 if (frame == NULL)
6149 {
6150 warning (_("Unable to restore previously selected frame."));
6151 return 0;
6152 }
6153
6154 select_frame (frame);
6155
6156 return (1);
6157 }
6158
6159 /* Restore inferior session state to INF_STATUS. */
6160
6161 void
6162 restore_inferior_status (struct inferior_status *inf_status)
6163 {
6164 struct thread_info *tp = inferior_thread ();
6165 struct inferior *inf = current_inferior ();
6166
6167 tp->stop_step = inf_status->stop_step;
6168 stop_stack_dummy = inf_status->stop_stack_dummy;
6169 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6170 tp->trap_expected = inf_status->stepping_over_breakpoint;
6171 tp->step_range_start = inf_status->step_range_start;
6172 tp->step_range_end = inf_status->step_range_end;
6173 tp->step_frame_id = inf_status->step_frame_id;
6174 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6175 tp->step_over_calls = inf_status->step_over_calls;
6176 stop_after_trap = inf_status->stop_after_trap;
6177 inf->stop_soon = inf_status->stop_soon;
6178 bpstat_clear (&tp->stop_bpstat);
6179 tp->stop_bpstat = inf_status->stop_bpstat;
6180 inf_status->stop_bpstat = NULL;
6181 tp->proceed_to_finish = inf_status->proceed_to_finish;
6182 tp->in_infcall = inf_status->in_infcall;
6183
6184 if (target_has_stack)
6185 {
6186 /* The point of catch_errors is that if the stack is clobbered,
6187 walking the stack might encounter a garbage pointer and
6188 error() trying to dereference it. */
6189 if (catch_errors
6190 (restore_selected_frame, &inf_status->selected_frame_id,
6191 "Unable to restore previously selected frame:\n",
6192 RETURN_MASK_ERROR) == 0)
6193 /* Error in restoring the selected frame. Select the innermost
6194 frame. */
6195 select_frame (get_current_frame ());
6196 }
6197
6198 xfree (inf_status);
6199 }
6200
6201 static void
6202 do_restore_inferior_status_cleanup (void *sts)
6203 {
6204 restore_inferior_status (sts);
6205 }
6206
6207 struct cleanup *
6208 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6209 {
6210 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6211 }
6212
6213 void
6214 discard_inferior_status (struct inferior_status *inf_status)
6215 {
6216 /* See save_inferior_status for info on stop_bpstat. */
6217 bpstat_clear (&inf_status->stop_bpstat);
6218 xfree (inf_status);
6219 }
6220 \f
6221 int
6222 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6223 {
6224 struct target_waitstatus last;
6225 ptid_t last_ptid;
6226
6227 get_last_target_status (&last_ptid, &last);
6228
6229 if (last.kind != TARGET_WAITKIND_FORKED)
6230 return 0;
6231
6232 if (!ptid_equal (last_ptid, pid))
6233 return 0;
6234
6235 *child_pid = last.value.related_pid;
6236 return 1;
6237 }
6238
6239 int
6240 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6241 {
6242 struct target_waitstatus last;
6243 ptid_t last_ptid;
6244
6245 get_last_target_status (&last_ptid, &last);
6246
6247 if (last.kind != TARGET_WAITKIND_VFORKED)
6248 return 0;
6249
6250 if (!ptid_equal (last_ptid, pid))
6251 return 0;
6252
6253 *child_pid = last.value.related_pid;
6254 return 1;
6255 }
6256
6257 int
6258 inferior_has_execd (ptid_t pid, char **execd_pathname)
6259 {
6260 struct target_waitstatus last;
6261 ptid_t last_ptid;
6262
6263 get_last_target_status (&last_ptid, &last);
6264
6265 if (last.kind != TARGET_WAITKIND_EXECD)
6266 return 0;
6267
6268 if (!ptid_equal (last_ptid, pid))
6269 return 0;
6270
6271 *execd_pathname = xstrdup (last.value.execd_pathname);
6272 return 1;
6273 }
6274
6275 int
6276 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6277 {
6278 struct target_waitstatus last;
6279 ptid_t last_ptid;
6280
6281 get_last_target_status (&last_ptid, &last);
6282
6283 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6284 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6285 return 0;
6286
6287 if (!ptid_equal (last_ptid, pid))
6288 return 0;
6289
6290 *syscall_number = last.value.syscall_number;
6291 return 1;
6292 }
6293
6294 /* Oft used ptids */
6295 ptid_t null_ptid;
6296 ptid_t minus_one_ptid;
6297
6298 /* Create a ptid given the necessary PID, LWP, and TID components. */
6299
6300 ptid_t
6301 ptid_build (int pid, long lwp, long tid)
6302 {
6303 ptid_t ptid;
6304
6305 ptid.pid = pid;
6306 ptid.lwp = lwp;
6307 ptid.tid = tid;
6308 return ptid;
6309 }
6310
6311 /* Create a ptid from just a pid. */
6312
6313 ptid_t
6314 pid_to_ptid (int pid)
6315 {
6316 return ptid_build (pid, 0, 0);
6317 }
6318
6319 /* Fetch the pid (process id) component from a ptid. */
6320
6321 int
6322 ptid_get_pid (ptid_t ptid)
6323 {
6324 return ptid.pid;
6325 }
6326
6327 /* Fetch the lwp (lightweight process) component from a ptid. */
6328
6329 long
6330 ptid_get_lwp (ptid_t ptid)
6331 {
6332 return ptid.lwp;
6333 }
6334
6335 /* Fetch the tid (thread id) component from a ptid. */
6336
6337 long
6338 ptid_get_tid (ptid_t ptid)
6339 {
6340 return ptid.tid;
6341 }
6342
6343 /* ptid_equal() is used to test equality of two ptids. */
6344
6345 int
6346 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6347 {
6348 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6349 && ptid1.tid == ptid2.tid);
6350 }
6351
6352 /* Returns true if PTID represents a process. */
6353
6354 int
6355 ptid_is_pid (ptid_t ptid)
6356 {
6357 if (ptid_equal (minus_one_ptid, ptid))
6358 return 0;
6359 if (ptid_equal (null_ptid, ptid))
6360 return 0;
6361
6362 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6363 }
6364
6365 int
6366 ptid_match (ptid_t ptid, ptid_t filter)
6367 {
6368 /* Since both parameters have the same type, prevent easy mistakes
6369 from happening. */
6370 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6371 && !ptid_equal (ptid, null_ptid));
6372
6373 if (ptid_equal (filter, minus_one_ptid))
6374 return 1;
6375 if (ptid_is_pid (filter)
6376 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6377 return 1;
6378 else if (ptid_equal (ptid, filter))
6379 return 1;
6380
6381 return 0;
6382 }
6383
6384 /* restore_inferior_ptid() will be used by the cleanup machinery
6385 to restore the inferior_ptid value saved in a call to
6386 save_inferior_ptid(). */
6387
6388 static void
6389 restore_inferior_ptid (void *arg)
6390 {
6391 ptid_t *saved_ptid_ptr = arg;
6392
6393 inferior_ptid = *saved_ptid_ptr;
6394 xfree (arg);
6395 }
6396
6397 /* Save the value of inferior_ptid so that it may be restored by a
6398 later call to do_cleanups(). Returns the struct cleanup pointer
6399 needed for later doing the cleanup. */
6400
6401 struct cleanup *
6402 save_inferior_ptid (void)
6403 {
6404 ptid_t *saved_ptid_ptr;
6405
6406 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6407 *saved_ptid_ptr = inferior_ptid;
6408 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6409 }
6410 \f
6411
6412 /* User interface for reverse debugging:
6413 Set exec-direction / show exec-direction commands
6414 (returns error unless target implements to_set_exec_direction method). */
6415
6416 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6417 static const char exec_forward[] = "forward";
6418 static const char exec_reverse[] = "reverse";
6419 static const char *exec_direction = exec_forward;
6420 static const char *exec_direction_names[] = {
6421 exec_forward,
6422 exec_reverse,
6423 NULL
6424 };
6425
6426 static void
6427 set_exec_direction_func (char *args, int from_tty,
6428 struct cmd_list_element *cmd)
6429 {
6430 if (target_can_execute_reverse)
6431 {
6432 if (!strcmp (exec_direction, exec_forward))
6433 execution_direction = EXEC_FORWARD;
6434 else if (!strcmp (exec_direction, exec_reverse))
6435 execution_direction = EXEC_REVERSE;
6436 }
6437 else
6438 {
6439 exec_direction = exec_forward;
6440 error (_("Target does not support this operation."));
6441 }
6442 }
6443
6444 static void
6445 show_exec_direction_func (struct ui_file *out, int from_tty,
6446 struct cmd_list_element *cmd, const char *value)
6447 {
6448 switch (execution_direction) {
6449 case EXEC_FORWARD:
6450 fprintf_filtered (out, _("Forward.\n"));
6451 break;
6452 case EXEC_REVERSE:
6453 fprintf_filtered (out, _("Reverse.\n"));
6454 break;
6455 case EXEC_ERROR:
6456 default:
6457 fprintf_filtered (out,
6458 _("Forward (target `%s' does not support exec-direction).\n"),
6459 target_shortname);
6460 break;
6461 }
6462 }
6463
6464 /* User interface for non-stop mode. */
6465
6466 int non_stop = 0;
6467
6468 static void
6469 set_non_stop (char *args, int from_tty,
6470 struct cmd_list_element *c)
6471 {
6472 if (target_has_execution)
6473 {
6474 non_stop_1 = non_stop;
6475 error (_("Cannot change this setting while the inferior is running."));
6476 }
6477
6478 non_stop = non_stop_1;
6479 }
6480
6481 static void
6482 show_non_stop (struct ui_file *file, int from_tty,
6483 struct cmd_list_element *c, const char *value)
6484 {
6485 fprintf_filtered (file,
6486 _("Controlling the inferior in non-stop mode is %s.\n"),
6487 value);
6488 }
6489
6490 static void
6491 show_schedule_multiple (struct ui_file *file, int from_tty,
6492 struct cmd_list_element *c, const char *value)
6493 {
6494 fprintf_filtered (file, _("\
6495 Resuming the execution of threads of all processes is %s.\n"), value);
6496 }
6497
6498 void
6499 _initialize_infrun (void)
6500 {
6501 int i;
6502 int numsigs;
6503
6504 add_info ("signals", signals_info, _("\
6505 What debugger does when program gets various signals.\n\
6506 Specify a signal as argument to print info on that signal only."));
6507 add_info_alias ("handle", "signals", 0);
6508
6509 add_com ("handle", class_run, handle_command, _("\
6510 Specify how to handle a signal.\n\
6511 Args are signals and actions to apply to those signals.\n\
6512 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6513 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6514 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6515 The special arg \"all\" is recognized to mean all signals except those\n\
6516 used by the debugger, typically SIGTRAP and SIGINT.\n\
6517 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6518 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6519 Stop means reenter debugger if this signal happens (implies print).\n\
6520 Print means print a message if this signal happens.\n\
6521 Pass means let program see this signal; otherwise program doesn't know.\n\
6522 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6523 Pass and Stop may be combined."));
6524 if (xdb_commands)
6525 {
6526 add_com ("lz", class_info, signals_info, _("\
6527 What debugger does when program gets various signals.\n\
6528 Specify a signal as argument to print info on that signal only."));
6529 add_com ("z", class_run, xdb_handle_command, _("\
6530 Specify how to handle a signal.\n\
6531 Args are signals and actions to apply to those signals.\n\
6532 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6533 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6534 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6535 The special arg \"all\" is recognized to mean all signals except those\n\
6536 used by the debugger, typically SIGTRAP and SIGINT.\n\
6537 Recognized actions include \"s\" (toggles between stop and nostop),\n\
6538 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6539 nopass), \"Q\" (noprint)\n\
6540 Stop means reenter debugger if this signal happens (implies print).\n\
6541 Print means print a message if this signal happens.\n\
6542 Pass means let program see this signal; otherwise program doesn't know.\n\
6543 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6544 Pass and Stop may be combined."));
6545 }
6546
6547 if (!dbx_commands)
6548 stop_command = add_cmd ("stop", class_obscure,
6549 not_just_help_class_command, _("\
6550 There is no `stop' command, but you can set a hook on `stop'.\n\
6551 This allows you to set a list of commands to be run each time execution\n\
6552 of the program stops."), &cmdlist);
6553
6554 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6555 Set inferior debugging."), _("\
6556 Show inferior debugging."), _("\
6557 When non-zero, inferior specific debugging is enabled."),
6558 NULL,
6559 show_debug_infrun,
6560 &setdebuglist, &showdebuglist);
6561
6562 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6563 Set displaced stepping debugging."), _("\
6564 Show displaced stepping debugging."), _("\
6565 When non-zero, displaced stepping specific debugging is enabled."),
6566 NULL,
6567 show_debug_displaced,
6568 &setdebuglist, &showdebuglist);
6569
6570 add_setshow_boolean_cmd ("non-stop", no_class,
6571 &non_stop_1, _("\
6572 Set whether gdb controls the inferior in non-stop mode."), _("\
6573 Show whether gdb controls the inferior in non-stop mode."), _("\
6574 When debugging a multi-threaded program and this setting is\n\
6575 off (the default, also called all-stop mode), when one thread stops\n\
6576 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6577 all other threads in the program while you interact with the thread of\n\
6578 interest. When you continue or step a thread, you can allow the other\n\
6579 threads to run, or have them remain stopped, but while you inspect any\n\
6580 thread's state, all threads stop.\n\
6581 \n\
6582 In non-stop mode, when one thread stops, other threads can continue\n\
6583 to run freely. You'll be able to step each thread independently,\n\
6584 leave it stopped or free to run as needed."),
6585 set_non_stop,
6586 show_non_stop,
6587 &setlist,
6588 &showlist);
6589
6590 numsigs = (int) TARGET_SIGNAL_LAST;
6591 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6592 signal_print = (unsigned char *)
6593 xmalloc (sizeof (signal_print[0]) * numsigs);
6594 signal_program = (unsigned char *)
6595 xmalloc (sizeof (signal_program[0]) * numsigs);
6596 for (i = 0; i < numsigs; i++)
6597 {
6598 signal_stop[i] = 1;
6599 signal_print[i] = 1;
6600 signal_program[i] = 1;
6601 }
6602
6603 /* Signals caused by debugger's own actions
6604 should not be given to the program afterwards. */
6605 signal_program[TARGET_SIGNAL_TRAP] = 0;
6606 signal_program[TARGET_SIGNAL_INT] = 0;
6607
6608 /* Signals that are not errors should not normally enter the debugger. */
6609 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6610 signal_print[TARGET_SIGNAL_ALRM] = 0;
6611 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6612 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6613 signal_stop[TARGET_SIGNAL_PROF] = 0;
6614 signal_print[TARGET_SIGNAL_PROF] = 0;
6615 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6616 signal_print[TARGET_SIGNAL_CHLD] = 0;
6617 signal_stop[TARGET_SIGNAL_IO] = 0;
6618 signal_print[TARGET_SIGNAL_IO] = 0;
6619 signal_stop[TARGET_SIGNAL_POLL] = 0;
6620 signal_print[TARGET_SIGNAL_POLL] = 0;
6621 signal_stop[TARGET_SIGNAL_URG] = 0;
6622 signal_print[TARGET_SIGNAL_URG] = 0;
6623 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6624 signal_print[TARGET_SIGNAL_WINCH] = 0;
6625
6626 /* These signals are used internally by user-level thread
6627 implementations. (See signal(5) on Solaris.) Like the above
6628 signals, a healthy program receives and handles them as part of
6629 its normal operation. */
6630 signal_stop[TARGET_SIGNAL_LWP] = 0;
6631 signal_print[TARGET_SIGNAL_LWP] = 0;
6632 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6633 signal_print[TARGET_SIGNAL_WAITING] = 0;
6634 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6635 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6636
6637 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6638 &stop_on_solib_events, _("\
6639 Set stopping for shared library events."), _("\
6640 Show stopping for shared library events."), _("\
6641 If nonzero, gdb will give control to the user when the dynamic linker\n\
6642 notifies gdb of shared library events. The most common event of interest\n\
6643 to the user would be loading/unloading of a new library."),
6644 NULL,
6645 show_stop_on_solib_events,
6646 &setlist, &showlist);
6647
6648 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6649 follow_fork_mode_kind_names,
6650 &follow_fork_mode_string, _("\
6651 Set debugger response to a program call of fork or vfork."), _("\
6652 Show debugger response to a program call of fork or vfork."), _("\
6653 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6654 parent - the original process is debugged after a fork\n\
6655 child - the new process is debugged after a fork\n\
6656 The unfollowed process will continue to run.\n\
6657 By default, the debugger will follow the parent process."),
6658 NULL,
6659 show_follow_fork_mode_string,
6660 &setlist, &showlist);
6661
6662 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6663 follow_exec_mode_names,
6664 &follow_exec_mode_string, _("\
6665 Set debugger response to a program call of exec."), _("\
6666 Show debugger response to a program call of exec."), _("\
6667 An exec call replaces the program image of a process.\n\
6668 \n\
6669 follow-exec-mode can be:\n\
6670 \n\
6671 new - the debugger creates a new inferior and rebinds the process\n\
6672 to this new inferior. The program the process was running before\n\
6673 the exec call can be restarted afterwards by restarting the original\n\
6674 inferior.\n\
6675 \n\
6676 same - the debugger keeps the process bound to the same inferior.\n\
6677 The new executable image replaces the previous executable loaded in\n\
6678 the inferior. Restarting the inferior after the exec call restarts\n\
6679 the executable the process was running after the exec call.\n\
6680 \n\
6681 By default, the debugger will use the same inferior."),
6682 NULL,
6683 show_follow_exec_mode_string,
6684 &setlist, &showlist);
6685
6686 add_setshow_enum_cmd ("scheduler-locking", class_run,
6687 scheduler_enums, &scheduler_mode, _("\
6688 Set mode for locking scheduler during execution."), _("\
6689 Show mode for locking scheduler during execution."), _("\
6690 off == no locking (threads may preempt at any time)\n\
6691 on == full locking (no thread except the current thread may run)\n\
6692 step == scheduler locked during every single-step operation.\n\
6693 In this mode, no other thread may run during a step command.\n\
6694 Other threads may run while stepping over a function call ('next')."),
6695 set_schedlock_func, /* traps on target vector */
6696 show_scheduler_mode,
6697 &setlist, &showlist);
6698
6699 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6700 Set mode for resuming threads of all processes."), _("\
6701 Show mode for resuming threads of all processes."), _("\
6702 When on, execution commands (such as 'continue' or 'next') resume all\n\
6703 threads of all processes. When off (which is the default), execution\n\
6704 commands only resume the threads of the current process. The set of\n\
6705 threads that are resumed is further refined by the scheduler-locking\n\
6706 mode (see help set scheduler-locking)."),
6707 NULL,
6708 show_schedule_multiple,
6709 &setlist, &showlist);
6710
6711 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6712 Set mode of the step operation."), _("\
6713 Show mode of the step operation."), _("\
6714 When set, doing a step over a function without debug line information\n\
6715 will stop at the first instruction of that function. Otherwise, the\n\
6716 function is skipped and the step command stops at a different source line."),
6717 NULL,
6718 show_step_stop_if_no_debug,
6719 &setlist, &showlist);
6720
6721 add_setshow_enum_cmd ("displaced-stepping", class_run,
6722 can_use_displaced_stepping_enum,
6723 &can_use_displaced_stepping, _("\
6724 Set debugger's willingness to use displaced stepping."), _("\
6725 Show debugger's willingness to use displaced stepping."), _("\
6726 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6727 supported by the target architecture. If off, gdb will not use displaced\n\
6728 stepping to step over breakpoints, even if such is supported by the target\n\
6729 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6730 if the target architecture supports it and non-stop mode is active, but will not\n\
6731 use it in all-stop mode (see help set non-stop)."),
6732 NULL,
6733 show_can_use_displaced_stepping,
6734 &setlist, &showlist);
6735
6736 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6737 &exec_direction, _("Set direction of execution.\n\
6738 Options are 'forward' or 'reverse'."),
6739 _("Show direction of execution (forward/reverse)."),
6740 _("Tells gdb whether to execute forward or backward."),
6741 set_exec_direction_func, show_exec_direction_func,
6742 &setlist, &showlist);
6743
6744 /* Set/show detach-on-fork: user-settable mode. */
6745
6746 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6747 Set whether gdb will detach the child of a fork."), _("\
6748 Show whether gdb will detach the child of a fork."), _("\
6749 Tells gdb whether to detach the child of a fork."),
6750 NULL, NULL, &setlist, &showlist);
6751
6752 /* ptid initializations */
6753 null_ptid = ptid_build (0, 0, 0);
6754 minus_one_ptid = ptid_build (-1, 0, 0);
6755 inferior_ptid = null_ptid;
6756 target_last_wait_ptid = minus_one_ptid;
6757
6758 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6759 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6760 observer_attach_thread_exit (infrun_thread_thread_exit);
6761 observer_attach_inferior_exit (infrun_inferior_exit);
6762
6763 /* Explicitly create without lookup, since that tries to create a
6764 value with a void typed value, and when we get here, gdbarch
6765 isn't initialized yet. At this point, we're quite sure there
6766 isn't another convenience variable of the same name. */
6767 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6768
6769 add_setshow_boolean_cmd ("observer", no_class,
6770 &observer_mode_1, _("\
6771 Set whether gdb controls the inferior in observer mode."), _("\
6772 Show whether gdb controls the inferior in observer mode."), _("\
6773 In observer mode, GDB can get data from the inferior, but not\n\
6774 affect its execution. Registers and memory may not be changed,\n\
6775 breakpoints may not be set, and the program cannot be interrupted\n\
6776 or signalled."),
6777 set_observer_mode,
6778 show_observer_mode,
6779 &setlist,
6780 &showlist);
6781 }