gdb/
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2012 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdb_string.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55 #include "continuations.h"
56 #include "interps.h"
57 #include "skip.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149
150 /* Support for disabling address space randomization. */
151
152 int disable_randomization = 1;
153
154 static void
155 show_disable_randomization (struct ui_file *file, int from_tty,
156 struct cmd_list_element *c, const char *value)
157 {
158 if (target_supports_disable_randomization ())
159 fprintf_filtered (file,
160 _("Disabling randomization of debuggee's "
161 "virtual address space is %s.\n"),
162 value);
163 else
164 fputs_filtered (_("Disabling randomization of debuggee's "
165 "virtual address space is unsupported on\n"
166 "this platform.\n"), file);
167 }
168
169 static void
170 set_disable_randomization (char *args, int from_tty,
171 struct cmd_list_element *c)
172 {
173 if (!target_supports_disable_randomization ())
174 error (_("Disabling randomization of debuggee's "
175 "virtual address space is unsupported on\n"
176 "this platform."));
177 }
178
179
180 /* If the program uses ELF-style shared libraries, then calls to
181 functions in shared libraries go through stubs, which live in a
182 table called the PLT (Procedure Linkage Table). The first time the
183 function is called, the stub sends control to the dynamic linker,
184 which looks up the function's real address, patches the stub so
185 that future calls will go directly to the function, and then passes
186 control to the function.
187
188 If we are stepping at the source level, we don't want to see any of
189 this --- we just want to skip over the stub and the dynamic linker.
190 The simple approach is to single-step until control leaves the
191 dynamic linker.
192
193 However, on some systems (e.g., Red Hat's 5.2 distribution) the
194 dynamic linker calls functions in the shared C library, so you
195 can't tell from the PC alone whether the dynamic linker is still
196 running. In this case, we use a step-resume breakpoint to get us
197 past the dynamic linker, as if we were using "next" to step over a
198 function call.
199
200 in_solib_dynsym_resolve_code() says whether we're in the dynamic
201 linker code or not. Normally, this means we single-step. However,
202 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
203 address where we can place a step-resume breakpoint to get past the
204 linker's symbol resolution function.
205
206 in_solib_dynsym_resolve_code() can generally be implemented in a
207 pretty portable way, by comparing the PC against the address ranges
208 of the dynamic linker's sections.
209
210 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
211 it depends on internal details of the dynamic linker. It's usually
212 not too hard to figure out where to put a breakpoint, but it
213 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
214 sanity checking. If it can't figure things out, returning zero and
215 getting the (possibly confusing) stepping behavior is better than
216 signalling an error, which will obscure the change in the
217 inferior's state. */
218
219 /* This function returns TRUE if pc is the address of an instruction
220 that lies within the dynamic linker (such as the event hook, or the
221 dld itself).
222
223 This function must be used only when a dynamic linker event has
224 been caught, and the inferior is being stepped out of the hook, or
225 undefined results are guaranteed. */
226
227 #ifndef SOLIB_IN_DYNAMIC_LINKER
228 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
229 #endif
230
231 /* "Observer mode" is somewhat like a more extreme version of
232 non-stop, in which all GDB operations that might affect the
233 target's execution have been disabled. */
234
235 static int non_stop_1 = 0;
236
237 int observer_mode = 0;
238 static int observer_mode_1 = 0;
239
240 static void
241 set_observer_mode (char *args, int from_tty,
242 struct cmd_list_element *c)
243 {
244 extern int pagination_enabled;
245
246 if (target_has_execution)
247 {
248 observer_mode_1 = observer_mode;
249 error (_("Cannot change this setting while the inferior is running."));
250 }
251
252 observer_mode = observer_mode_1;
253
254 may_write_registers = !observer_mode;
255 may_write_memory = !observer_mode;
256 may_insert_breakpoints = !observer_mode;
257 may_insert_tracepoints = !observer_mode;
258 /* We can insert fast tracepoints in or out of observer mode,
259 but enable them if we're going into this mode. */
260 if (observer_mode)
261 may_insert_fast_tracepoints = 1;
262 may_stop = !observer_mode;
263 update_target_permissions ();
264
265 /* Going *into* observer mode we must force non-stop, then
266 going out we leave it that way. */
267 if (observer_mode)
268 {
269 target_async_permitted = 1;
270 pagination_enabled = 0;
271 non_stop = non_stop_1 = 1;
272 }
273
274 if (from_tty)
275 printf_filtered (_("Observer mode is now %s.\n"),
276 (observer_mode ? "on" : "off"));
277 }
278
279 static void
280 show_observer_mode (struct ui_file *file, int from_tty,
281 struct cmd_list_element *c, const char *value)
282 {
283 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
284 }
285
286 /* This updates the value of observer mode based on changes in
287 permissions. Note that we are deliberately ignoring the values of
288 may-write-registers and may-write-memory, since the user may have
289 reason to enable these during a session, for instance to turn on a
290 debugging-related global. */
291
292 void
293 update_observer_mode (void)
294 {
295 int newval;
296
297 newval = (!may_insert_breakpoints
298 && !may_insert_tracepoints
299 && may_insert_fast_tracepoints
300 && !may_stop
301 && non_stop);
302
303 /* Let the user know if things change. */
304 if (newval != observer_mode)
305 printf_filtered (_("Observer mode is now %s.\n"),
306 (newval ? "on" : "off"));
307
308 observer_mode = observer_mode_1 = newval;
309 }
310
311 /* Tables of how to react to signals; the user sets them. */
312
313 static unsigned char *signal_stop;
314 static unsigned char *signal_print;
315 static unsigned char *signal_program;
316
317 /* Table of signals that the target may silently handle.
318 This is automatically determined from the flags above,
319 and simply cached here. */
320 static unsigned char *signal_pass;
321
322 #define SET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 1; \
328 } while (0)
329
330 #define UNSET_SIGS(nsigs,sigs,flags) \
331 do { \
332 int signum = (nsigs); \
333 while (signum-- > 0) \
334 if ((sigs)[signum]) \
335 (flags)[signum] = 0; \
336 } while (0)
337
338 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
339 this function is to avoid exporting `signal_program'. */
340
341 void
342 update_signals_program_target (void)
343 {
344 target_program_signals ((int) TARGET_SIGNAL_LAST, signal_program);
345 }
346
347 /* Value to pass to target_resume() to cause all threads to resume. */
348
349 #define RESUME_ALL minus_one_ptid
350
351 /* Command list pointer for the "stop" placeholder. */
352
353 static struct cmd_list_element *stop_command;
354
355 /* Function inferior was in as of last step command. */
356
357 static struct symbol *step_start_function;
358
359 /* Nonzero if we want to give control to the user when we're notified
360 of shared library events by the dynamic linker. */
361 int stop_on_solib_events;
362 static void
363 show_stop_on_solib_events (struct ui_file *file, int from_tty,
364 struct cmd_list_element *c, const char *value)
365 {
366 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
367 value);
368 }
369
370 /* Nonzero means expecting a trace trap
371 and should stop the inferior and return silently when it happens. */
372
373 int stop_after_trap;
374
375 /* Save register contents here when executing a "finish" command or are
376 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
377 Thus this contains the return value from the called function (assuming
378 values are returned in a register). */
379
380 struct regcache *stop_registers;
381
382 /* Nonzero after stop if current stack frame should be printed. */
383
384 static int stop_print_frame;
385
386 /* This is a cached copy of the pid/waitstatus of the last event
387 returned by target_wait()/deprecated_target_wait_hook(). This
388 information is returned by get_last_target_status(). */
389 static ptid_t target_last_wait_ptid;
390 static struct target_waitstatus target_last_waitstatus;
391
392 static void context_switch (ptid_t ptid);
393
394 void init_thread_stepping_state (struct thread_info *tss);
395
396 void init_infwait_state (void);
397
398 static const char follow_fork_mode_child[] = "child";
399 static const char follow_fork_mode_parent[] = "parent";
400
401 static const char *const follow_fork_mode_kind_names[] = {
402 follow_fork_mode_child,
403 follow_fork_mode_parent,
404 NULL
405 };
406
407 static const char *follow_fork_mode_string = follow_fork_mode_parent;
408 static void
409 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
410 struct cmd_list_element *c, const char *value)
411 {
412 fprintf_filtered (file,
413 _("Debugger response to a program "
414 "call of fork or vfork is \"%s\".\n"),
415 value);
416 }
417 \f
418
419 /* Tell the target to follow the fork we're stopped at. Returns true
420 if the inferior should be resumed; false, if the target for some
421 reason decided it's best not to resume. */
422
423 static int
424 follow_fork (void)
425 {
426 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
427 int should_resume = 1;
428 struct thread_info *tp;
429
430 /* Copy user stepping state to the new inferior thread. FIXME: the
431 followed fork child thread should have a copy of most of the
432 parent thread structure's run control related fields, not just these.
433 Initialized to avoid "may be used uninitialized" warnings from gcc. */
434 struct breakpoint *step_resume_breakpoint = NULL;
435 struct breakpoint *exception_resume_breakpoint = NULL;
436 CORE_ADDR step_range_start = 0;
437 CORE_ADDR step_range_end = 0;
438 struct frame_id step_frame_id = { 0 };
439
440 if (!non_stop)
441 {
442 ptid_t wait_ptid;
443 struct target_waitstatus wait_status;
444
445 /* Get the last target status returned by target_wait(). */
446 get_last_target_status (&wait_ptid, &wait_status);
447
448 /* If not stopped at a fork event, then there's nothing else to
449 do. */
450 if (wait_status.kind != TARGET_WAITKIND_FORKED
451 && wait_status.kind != TARGET_WAITKIND_VFORKED)
452 return 1;
453
454 /* Check if we switched over from WAIT_PTID, since the event was
455 reported. */
456 if (!ptid_equal (wait_ptid, minus_one_ptid)
457 && !ptid_equal (inferior_ptid, wait_ptid))
458 {
459 /* We did. Switch back to WAIT_PTID thread, to tell the
460 target to follow it (in either direction). We'll
461 afterwards refuse to resume, and inform the user what
462 happened. */
463 switch_to_thread (wait_ptid);
464 should_resume = 0;
465 }
466 }
467
468 tp = inferior_thread ();
469
470 /* If there were any forks/vforks that were caught and are now to be
471 followed, then do so now. */
472 switch (tp->pending_follow.kind)
473 {
474 case TARGET_WAITKIND_FORKED:
475 case TARGET_WAITKIND_VFORKED:
476 {
477 ptid_t parent, child;
478
479 /* If the user did a next/step, etc, over a fork call,
480 preserve the stepping state in the fork child. */
481 if (follow_child && should_resume)
482 {
483 step_resume_breakpoint = clone_momentary_breakpoint
484 (tp->control.step_resume_breakpoint);
485 step_range_start = tp->control.step_range_start;
486 step_range_end = tp->control.step_range_end;
487 step_frame_id = tp->control.step_frame_id;
488 exception_resume_breakpoint
489 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
490
491 /* For now, delete the parent's sr breakpoint, otherwise,
492 parent/child sr breakpoints are considered duplicates,
493 and the child version will not be installed. Remove
494 this when the breakpoints module becomes aware of
495 inferiors and address spaces. */
496 delete_step_resume_breakpoint (tp);
497 tp->control.step_range_start = 0;
498 tp->control.step_range_end = 0;
499 tp->control.step_frame_id = null_frame_id;
500 delete_exception_resume_breakpoint (tp);
501 }
502
503 parent = inferior_ptid;
504 child = tp->pending_follow.value.related_pid;
505
506 /* Tell the target to do whatever is necessary to follow
507 either parent or child. */
508 if (target_follow_fork (follow_child))
509 {
510 /* Target refused to follow, or there's some other reason
511 we shouldn't resume. */
512 should_resume = 0;
513 }
514 else
515 {
516 /* This pending follow fork event is now handled, one way
517 or another. The previous selected thread may be gone
518 from the lists by now, but if it is still around, need
519 to clear the pending follow request. */
520 tp = find_thread_ptid (parent);
521 if (tp)
522 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
523
524 /* This makes sure we don't try to apply the "Switched
525 over from WAIT_PID" logic above. */
526 nullify_last_target_wait_ptid ();
527
528 /* If we followed the child, switch to it... */
529 if (follow_child)
530 {
531 switch_to_thread (child);
532
533 /* ... and preserve the stepping state, in case the
534 user was stepping over the fork call. */
535 if (should_resume)
536 {
537 tp = inferior_thread ();
538 tp->control.step_resume_breakpoint
539 = step_resume_breakpoint;
540 tp->control.step_range_start = step_range_start;
541 tp->control.step_range_end = step_range_end;
542 tp->control.step_frame_id = step_frame_id;
543 tp->control.exception_resume_breakpoint
544 = exception_resume_breakpoint;
545 }
546 else
547 {
548 /* If we get here, it was because we're trying to
549 resume from a fork catchpoint, but, the user
550 has switched threads away from the thread that
551 forked. In that case, the resume command
552 issued is most likely not applicable to the
553 child, so just warn, and refuse to resume. */
554 warning (_("Not resuming: switched threads "
555 "before following fork child.\n"));
556 }
557
558 /* Reset breakpoints in the child as appropriate. */
559 follow_inferior_reset_breakpoints ();
560 }
561 else
562 switch_to_thread (parent);
563 }
564 }
565 break;
566 case TARGET_WAITKIND_SPURIOUS:
567 /* Nothing to follow. */
568 break;
569 default:
570 internal_error (__FILE__, __LINE__,
571 "Unexpected pending_follow.kind %d\n",
572 tp->pending_follow.kind);
573 break;
574 }
575
576 return should_resume;
577 }
578
579 void
580 follow_inferior_reset_breakpoints (void)
581 {
582 struct thread_info *tp = inferior_thread ();
583
584 /* Was there a step_resume breakpoint? (There was if the user
585 did a "next" at the fork() call.) If so, explicitly reset its
586 thread number.
587
588 step_resumes are a form of bp that are made to be per-thread.
589 Since we created the step_resume bp when the parent process
590 was being debugged, and now are switching to the child process,
591 from the breakpoint package's viewpoint, that's a switch of
592 "threads". We must update the bp's notion of which thread
593 it is for, or it'll be ignored when it triggers. */
594
595 if (tp->control.step_resume_breakpoint)
596 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
597
598 if (tp->control.exception_resume_breakpoint)
599 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
600
601 /* Reinsert all breakpoints in the child. The user may have set
602 breakpoints after catching the fork, in which case those
603 were never set in the child, but only in the parent. This makes
604 sure the inserted breakpoints match the breakpoint list. */
605
606 breakpoint_re_set ();
607 insert_breakpoints ();
608 }
609
610 /* The child has exited or execed: resume threads of the parent the
611 user wanted to be executing. */
612
613 static int
614 proceed_after_vfork_done (struct thread_info *thread,
615 void *arg)
616 {
617 int pid = * (int *) arg;
618
619 if (ptid_get_pid (thread->ptid) == pid
620 && is_running (thread->ptid)
621 && !is_executing (thread->ptid)
622 && !thread->stop_requested
623 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
624 {
625 if (debug_infrun)
626 fprintf_unfiltered (gdb_stdlog,
627 "infrun: resuming vfork parent thread %s\n",
628 target_pid_to_str (thread->ptid));
629
630 switch_to_thread (thread->ptid);
631 clear_proceed_status ();
632 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
633 }
634
635 return 0;
636 }
637
638 /* Called whenever we notice an exec or exit event, to handle
639 detaching or resuming a vfork parent. */
640
641 static void
642 handle_vfork_child_exec_or_exit (int exec)
643 {
644 struct inferior *inf = current_inferior ();
645
646 if (inf->vfork_parent)
647 {
648 int resume_parent = -1;
649
650 /* This exec or exit marks the end of the shared memory region
651 between the parent and the child. If the user wanted to
652 detach from the parent, now is the time. */
653
654 if (inf->vfork_parent->pending_detach)
655 {
656 struct thread_info *tp;
657 struct cleanup *old_chain;
658 struct program_space *pspace;
659 struct address_space *aspace;
660
661 /* follow-fork child, detach-on-fork on. */
662
663 old_chain = make_cleanup_restore_current_thread ();
664
665 /* We're letting loose of the parent. */
666 tp = any_live_thread_of_process (inf->vfork_parent->pid);
667 switch_to_thread (tp->ptid);
668
669 /* We're about to detach from the parent, which implicitly
670 removes breakpoints from its address space. There's a
671 catch here: we want to reuse the spaces for the child,
672 but, parent/child are still sharing the pspace at this
673 point, although the exec in reality makes the kernel give
674 the child a fresh set of new pages. The problem here is
675 that the breakpoints module being unaware of this, would
676 likely chose the child process to write to the parent
677 address space. Swapping the child temporarily away from
678 the spaces has the desired effect. Yes, this is "sort
679 of" a hack. */
680
681 pspace = inf->pspace;
682 aspace = inf->aspace;
683 inf->aspace = NULL;
684 inf->pspace = NULL;
685
686 if (debug_infrun || info_verbose)
687 {
688 target_terminal_ours ();
689
690 if (exec)
691 fprintf_filtered (gdb_stdlog,
692 "Detaching vfork parent process "
693 "%d after child exec.\n",
694 inf->vfork_parent->pid);
695 else
696 fprintf_filtered (gdb_stdlog,
697 "Detaching vfork parent process "
698 "%d after child exit.\n",
699 inf->vfork_parent->pid);
700 }
701
702 target_detach (NULL, 0);
703
704 /* Put it back. */
705 inf->pspace = pspace;
706 inf->aspace = aspace;
707
708 do_cleanups (old_chain);
709 }
710 else if (exec)
711 {
712 /* We're staying attached to the parent, so, really give the
713 child a new address space. */
714 inf->pspace = add_program_space (maybe_new_address_space ());
715 inf->aspace = inf->pspace->aspace;
716 inf->removable = 1;
717 set_current_program_space (inf->pspace);
718
719 resume_parent = inf->vfork_parent->pid;
720
721 /* Break the bonds. */
722 inf->vfork_parent->vfork_child = NULL;
723 }
724 else
725 {
726 struct cleanup *old_chain;
727 struct program_space *pspace;
728
729 /* If this is a vfork child exiting, then the pspace and
730 aspaces were shared with the parent. Since we're
731 reporting the process exit, we'll be mourning all that is
732 found in the address space, and switching to null_ptid,
733 preparing to start a new inferior. But, since we don't
734 want to clobber the parent's address/program spaces, we
735 go ahead and create a new one for this exiting
736 inferior. */
737
738 /* Switch to null_ptid, so that clone_program_space doesn't want
739 to read the selected frame of a dead process. */
740 old_chain = save_inferior_ptid ();
741 inferior_ptid = null_ptid;
742
743 /* This inferior is dead, so avoid giving the breakpoints
744 module the option to write through to it (cloning a
745 program space resets breakpoints). */
746 inf->aspace = NULL;
747 inf->pspace = NULL;
748 pspace = add_program_space (maybe_new_address_space ());
749 set_current_program_space (pspace);
750 inf->removable = 1;
751 inf->symfile_flags = SYMFILE_NO_READ;
752 clone_program_space (pspace, inf->vfork_parent->pspace);
753 inf->pspace = pspace;
754 inf->aspace = pspace->aspace;
755
756 /* Put back inferior_ptid. We'll continue mourning this
757 inferior. */
758 do_cleanups (old_chain);
759
760 resume_parent = inf->vfork_parent->pid;
761 /* Break the bonds. */
762 inf->vfork_parent->vfork_child = NULL;
763 }
764
765 inf->vfork_parent = NULL;
766
767 gdb_assert (current_program_space == inf->pspace);
768
769 if (non_stop && resume_parent != -1)
770 {
771 /* If the user wanted the parent to be running, let it go
772 free now. */
773 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
774
775 if (debug_infrun)
776 fprintf_unfiltered (gdb_stdlog,
777 "infrun: resuming vfork parent process %d\n",
778 resume_parent);
779
780 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
781
782 do_cleanups (old_chain);
783 }
784 }
785 }
786
787 /* Enum strings for "set|show displaced-stepping". */
788
789 static const char follow_exec_mode_new[] = "new";
790 static const char follow_exec_mode_same[] = "same";
791 static const char *const follow_exec_mode_names[] =
792 {
793 follow_exec_mode_new,
794 follow_exec_mode_same,
795 NULL,
796 };
797
798 static const char *follow_exec_mode_string = follow_exec_mode_same;
799 static void
800 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
801 struct cmd_list_element *c, const char *value)
802 {
803 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
804 }
805
806 /* EXECD_PATHNAME is assumed to be non-NULL. */
807
808 static void
809 follow_exec (ptid_t pid, char *execd_pathname)
810 {
811 struct thread_info *th = inferior_thread ();
812 struct inferior *inf = current_inferior ();
813
814 /* This is an exec event that we actually wish to pay attention to.
815 Refresh our symbol table to the newly exec'd program, remove any
816 momentary bp's, etc.
817
818 If there are breakpoints, they aren't really inserted now,
819 since the exec() transformed our inferior into a fresh set
820 of instructions.
821
822 We want to preserve symbolic breakpoints on the list, since
823 we have hopes that they can be reset after the new a.out's
824 symbol table is read.
825
826 However, any "raw" breakpoints must be removed from the list
827 (e.g., the solib bp's), since their address is probably invalid
828 now.
829
830 And, we DON'T want to call delete_breakpoints() here, since
831 that may write the bp's "shadow contents" (the instruction
832 value that was overwritten witha TRAP instruction). Since
833 we now have a new a.out, those shadow contents aren't valid. */
834
835 mark_breakpoints_out ();
836
837 update_breakpoints_after_exec ();
838
839 /* If there was one, it's gone now. We cannot truly step-to-next
840 statement through an exec(). */
841 th->control.step_resume_breakpoint = NULL;
842 th->control.exception_resume_breakpoint = NULL;
843 th->control.step_range_start = 0;
844 th->control.step_range_end = 0;
845
846 /* The target reports the exec event to the main thread, even if
847 some other thread does the exec, and even if the main thread was
848 already stopped --- if debugging in non-stop mode, it's possible
849 the user had the main thread held stopped in the previous image
850 --- release it now. This is the same behavior as step-over-exec
851 with scheduler-locking on in all-stop mode. */
852 th->stop_requested = 0;
853
854 /* What is this a.out's name? */
855 printf_unfiltered (_("%s is executing new program: %s\n"),
856 target_pid_to_str (inferior_ptid),
857 execd_pathname);
858
859 /* We've followed the inferior through an exec. Therefore, the
860 inferior has essentially been killed & reborn. */
861
862 gdb_flush (gdb_stdout);
863
864 breakpoint_init_inferior (inf_execd);
865
866 if (gdb_sysroot && *gdb_sysroot)
867 {
868 char *name = alloca (strlen (gdb_sysroot)
869 + strlen (execd_pathname)
870 + 1);
871
872 strcpy (name, gdb_sysroot);
873 strcat (name, execd_pathname);
874 execd_pathname = name;
875 }
876
877 /* Reset the shared library package. This ensures that we get a
878 shlib event when the child reaches "_start", at which point the
879 dld will have had a chance to initialize the child. */
880 /* Also, loading a symbol file below may trigger symbol lookups, and
881 we don't want those to be satisfied by the libraries of the
882 previous incarnation of this process. */
883 no_shared_libraries (NULL, 0);
884
885 if (follow_exec_mode_string == follow_exec_mode_new)
886 {
887 struct program_space *pspace;
888
889 /* The user wants to keep the old inferior and program spaces
890 around. Create a new fresh one, and switch to it. */
891
892 inf = add_inferior (current_inferior ()->pid);
893 pspace = add_program_space (maybe_new_address_space ());
894 inf->pspace = pspace;
895 inf->aspace = pspace->aspace;
896
897 exit_inferior_num_silent (current_inferior ()->num);
898
899 set_current_inferior (inf);
900 set_current_program_space (pspace);
901 }
902
903 gdb_assert (current_program_space == inf->pspace);
904
905 /* That a.out is now the one to use. */
906 exec_file_attach (execd_pathname, 0);
907
908 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
909 (Position Independent Executable) main symbol file will get applied by
910 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
911 the breakpoints with the zero displacement. */
912
913 symbol_file_add (execd_pathname,
914 (inf->symfile_flags
915 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
916 NULL, 0);
917
918 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
919 set_initial_language ();
920
921 #ifdef SOLIB_CREATE_INFERIOR_HOOK
922 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
923 #else
924 solib_create_inferior_hook (0);
925 #endif
926
927 jit_inferior_created_hook ();
928
929 breakpoint_re_set ();
930
931 /* Reinsert all breakpoints. (Those which were symbolic have
932 been reset to the proper address in the new a.out, thanks
933 to symbol_file_command...). */
934 insert_breakpoints ();
935
936 /* The next resume of this inferior should bring it to the shlib
937 startup breakpoints. (If the user had also set bp's on
938 "main" from the old (parent) process, then they'll auto-
939 matically get reset there in the new process.). */
940 }
941
942 /* Non-zero if we just simulating a single-step. This is needed
943 because we cannot remove the breakpoints in the inferior process
944 until after the `wait' in `wait_for_inferior'. */
945 static int singlestep_breakpoints_inserted_p = 0;
946
947 /* The thread we inserted single-step breakpoints for. */
948 static ptid_t singlestep_ptid;
949
950 /* PC when we started this single-step. */
951 static CORE_ADDR singlestep_pc;
952
953 /* If another thread hit the singlestep breakpoint, we save the original
954 thread here so that we can resume single-stepping it later. */
955 static ptid_t saved_singlestep_ptid;
956 static int stepping_past_singlestep_breakpoint;
957
958 /* If not equal to null_ptid, this means that after stepping over breakpoint
959 is finished, we need to switch to deferred_step_ptid, and step it.
960
961 The use case is when one thread has hit a breakpoint, and then the user
962 has switched to another thread and issued 'step'. We need to step over
963 breakpoint in the thread which hit the breakpoint, but then continue
964 stepping the thread user has selected. */
965 static ptid_t deferred_step_ptid;
966 \f
967 /* Displaced stepping. */
968
969 /* In non-stop debugging mode, we must take special care to manage
970 breakpoints properly; in particular, the traditional strategy for
971 stepping a thread past a breakpoint it has hit is unsuitable.
972 'Displaced stepping' is a tactic for stepping one thread past a
973 breakpoint it has hit while ensuring that other threads running
974 concurrently will hit the breakpoint as they should.
975
976 The traditional way to step a thread T off a breakpoint in a
977 multi-threaded program in all-stop mode is as follows:
978
979 a0) Initially, all threads are stopped, and breakpoints are not
980 inserted.
981 a1) We single-step T, leaving breakpoints uninserted.
982 a2) We insert breakpoints, and resume all threads.
983
984 In non-stop debugging, however, this strategy is unsuitable: we
985 don't want to have to stop all threads in the system in order to
986 continue or step T past a breakpoint. Instead, we use displaced
987 stepping:
988
989 n0) Initially, T is stopped, other threads are running, and
990 breakpoints are inserted.
991 n1) We copy the instruction "under" the breakpoint to a separate
992 location, outside the main code stream, making any adjustments
993 to the instruction, register, and memory state as directed by
994 T's architecture.
995 n2) We single-step T over the instruction at its new location.
996 n3) We adjust the resulting register and memory state as directed
997 by T's architecture. This includes resetting T's PC to point
998 back into the main instruction stream.
999 n4) We resume T.
1000
1001 This approach depends on the following gdbarch methods:
1002
1003 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1004 indicate where to copy the instruction, and how much space must
1005 be reserved there. We use these in step n1.
1006
1007 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1008 address, and makes any necessary adjustments to the instruction,
1009 register contents, and memory. We use this in step n1.
1010
1011 - gdbarch_displaced_step_fixup adjusts registers and memory after
1012 we have successfuly single-stepped the instruction, to yield the
1013 same effect the instruction would have had if we had executed it
1014 at its original address. We use this in step n3.
1015
1016 - gdbarch_displaced_step_free_closure provides cleanup.
1017
1018 The gdbarch_displaced_step_copy_insn and
1019 gdbarch_displaced_step_fixup functions must be written so that
1020 copying an instruction with gdbarch_displaced_step_copy_insn,
1021 single-stepping across the copied instruction, and then applying
1022 gdbarch_displaced_insn_fixup should have the same effects on the
1023 thread's memory and registers as stepping the instruction in place
1024 would have. Exactly which responsibilities fall to the copy and
1025 which fall to the fixup is up to the author of those functions.
1026
1027 See the comments in gdbarch.sh for details.
1028
1029 Note that displaced stepping and software single-step cannot
1030 currently be used in combination, although with some care I think
1031 they could be made to. Software single-step works by placing
1032 breakpoints on all possible subsequent instructions; if the
1033 displaced instruction is a PC-relative jump, those breakpoints
1034 could fall in very strange places --- on pages that aren't
1035 executable, or at addresses that are not proper instruction
1036 boundaries. (We do generally let other threads run while we wait
1037 to hit the software single-step breakpoint, and they might
1038 encounter such a corrupted instruction.) One way to work around
1039 this would be to have gdbarch_displaced_step_copy_insn fully
1040 simulate the effect of PC-relative instructions (and return NULL)
1041 on architectures that use software single-stepping.
1042
1043 In non-stop mode, we can have independent and simultaneous step
1044 requests, so more than one thread may need to simultaneously step
1045 over a breakpoint. The current implementation assumes there is
1046 only one scratch space per process. In this case, we have to
1047 serialize access to the scratch space. If thread A wants to step
1048 over a breakpoint, but we are currently waiting for some other
1049 thread to complete a displaced step, we leave thread A stopped and
1050 place it in the displaced_step_request_queue. Whenever a displaced
1051 step finishes, we pick the next thread in the queue and start a new
1052 displaced step operation on it. See displaced_step_prepare and
1053 displaced_step_fixup for details. */
1054
1055 struct displaced_step_request
1056 {
1057 ptid_t ptid;
1058 struct displaced_step_request *next;
1059 };
1060
1061 /* Per-inferior displaced stepping state. */
1062 struct displaced_step_inferior_state
1063 {
1064 /* Pointer to next in linked list. */
1065 struct displaced_step_inferior_state *next;
1066
1067 /* The process this displaced step state refers to. */
1068 int pid;
1069
1070 /* A queue of pending displaced stepping requests. One entry per
1071 thread that needs to do a displaced step. */
1072 struct displaced_step_request *step_request_queue;
1073
1074 /* If this is not null_ptid, this is the thread carrying out a
1075 displaced single-step in process PID. This thread's state will
1076 require fixing up once it has completed its step. */
1077 ptid_t step_ptid;
1078
1079 /* The architecture the thread had when we stepped it. */
1080 struct gdbarch *step_gdbarch;
1081
1082 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1083 for post-step cleanup. */
1084 struct displaced_step_closure *step_closure;
1085
1086 /* The address of the original instruction, and the copy we
1087 made. */
1088 CORE_ADDR step_original, step_copy;
1089
1090 /* Saved contents of copy area. */
1091 gdb_byte *step_saved_copy;
1092 };
1093
1094 /* The list of states of processes involved in displaced stepping
1095 presently. */
1096 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1097
1098 /* Get the displaced stepping state of process PID. */
1099
1100 static struct displaced_step_inferior_state *
1101 get_displaced_stepping_state (int pid)
1102 {
1103 struct displaced_step_inferior_state *state;
1104
1105 for (state = displaced_step_inferior_states;
1106 state != NULL;
1107 state = state->next)
1108 if (state->pid == pid)
1109 return state;
1110
1111 return NULL;
1112 }
1113
1114 /* Add a new displaced stepping state for process PID to the displaced
1115 stepping state list, or return a pointer to an already existing
1116 entry, if it already exists. Never returns NULL. */
1117
1118 static struct displaced_step_inferior_state *
1119 add_displaced_stepping_state (int pid)
1120 {
1121 struct displaced_step_inferior_state *state;
1122
1123 for (state = displaced_step_inferior_states;
1124 state != NULL;
1125 state = state->next)
1126 if (state->pid == pid)
1127 return state;
1128
1129 state = xcalloc (1, sizeof (*state));
1130 state->pid = pid;
1131 state->next = displaced_step_inferior_states;
1132 displaced_step_inferior_states = state;
1133
1134 return state;
1135 }
1136
1137 /* If inferior is in displaced stepping, and ADDR equals to starting address
1138 of copy area, return corresponding displaced_step_closure. Otherwise,
1139 return NULL. */
1140
1141 struct displaced_step_closure*
1142 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1143 {
1144 struct displaced_step_inferior_state *displaced
1145 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1146
1147 /* If checking the mode of displaced instruction in copy area. */
1148 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1149 && (displaced->step_copy == addr))
1150 return displaced->step_closure;
1151
1152 return NULL;
1153 }
1154
1155 /* Remove the displaced stepping state of process PID. */
1156
1157 static void
1158 remove_displaced_stepping_state (int pid)
1159 {
1160 struct displaced_step_inferior_state *it, **prev_next_p;
1161
1162 gdb_assert (pid != 0);
1163
1164 it = displaced_step_inferior_states;
1165 prev_next_p = &displaced_step_inferior_states;
1166 while (it)
1167 {
1168 if (it->pid == pid)
1169 {
1170 *prev_next_p = it->next;
1171 xfree (it);
1172 return;
1173 }
1174
1175 prev_next_p = &it->next;
1176 it = *prev_next_p;
1177 }
1178 }
1179
1180 static void
1181 infrun_inferior_exit (struct inferior *inf)
1182 {
1183 remove_displaced_stepping_state (inf->pid);
1184 }
1185
1186 /* Enum strings for "set|show displaced-stepping". */
1187
1188 static const char can_use_displaced_stepping_auto[] = "auto";
1189 static const char can_use_displaced_stepping_on[] = "on";
1190 static const char can_use_displaced_stepping_off[] = "off";
1191 static const char *const can_use_displaced_stepping_enum[] =
1192 {
1193 can_use_displaced_stepping_auto,
1194 can_use_displaced_stepping_on,
1195 can_use_displaced_stepping_off,
1196 NULL,
1197 };
1198
1199 /* If ON, and the architecture supports it, GDB will use displaced
1200 stepping to step over breakpoints. If OFF, or if the architecture
1201 doesn't support it, GDB will instead use the traditional
1202 hold-and-step approach. If AUTO (which is the default), GDB will
1203 decide which technique to use to step over breakpoints depending on
1204 which of all-stop or non-stop mode is active --- displaced stepping
1205 in non-stop mode; hold-and-step in all-stop mode. */
1206
1207 static const char *can_use_displaced_stepping =
1208 can_use_displaced_stepping_auto;
1209
1210 static void
1211 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1212 struct cmd_list_element *c,
1213 const char *value)
1214 {
1215 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1216 fprintf_filtered (file,
1217 _("Debugger's willingness to use displaced stepping "
1218 "to step over breakpoints is %s (currently %s).\n"),
1219 value, non_stop ? "on" : "off");
1220 else
1221 fprintf_filtered (file,
1222 _("Debugger's willingness to use displaced stepping "
1223 "to step over breakpoints is %s.\n"), value);
1224 }
1225
1226 /* Return non-zero if displaced stepping can/should be used to step
1227 over breakpoints. */
1228
1229 static int
1230 use_displaced_stepping (struct gdbarch *gdbarch)
1231 {
1232 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1233 && non_stop)
1234 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1235 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1236 && !RECORD_IS_USED);
1237 }
1238
1239 /* Clean out any stray displaced stepping state. */
1240 static void
1241 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1242 {
1243 /* Indicate that there is no cleanup pending. */
1244 displaced->step_ptid = null_ptid;
1245
1246 if (displaced->step_closure)
1247 {
1248 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1249 displaced->step_closure);
1250 displaced->step_closure = NULL;
1251 }
1252 }
1253
1254 static void
1255 displaced_step_clear_cleanup (void *arg)
1256 {
1257 struct displaced_step_inferior_state *state = arg;
1258
1259 displaced_step_clear (state);
1260 }
1261
1262 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1263 void
1264 displaced_step_dump_bytes (struct ui_file *file,
1265 const gdb_byte *buf,
1266 size_t len)
1267 {
1268 int i;
1269
1270 for (i = 0; i < len; i++)
1271 fprintf_unfiltered (file, "%02x ", buf[i]);
1272 fputs_unfiltered ("\n", file);
1273 }
1274
1275 /* Prepare to single-step, using displaced stepping.
1276
1277 Note that we cannot use displaced stepping when we have a signal to
1278 deliver. If we have a signal to deliver and an instruction to step
1279 over, then after the step, there will be no indication from the
1280 target whether the thread entered a signal handler or ignored the
1281 signal and stepped over the instruction successfully --- both cases
1282 result in a simple SIGTRAP. In the first case we mustn't do a
1283 fixup, and in the second case we must --- but we can't tell which.
1284 Comments in the code for 'random signals' in handle_inferior_event
1285 explain how we handle this case instead.
1286
1287 Returns 1 if preparing was successful -- this thread is going to be
1288 stepped now; or 0 if displaced stepping this thread got queued. */
1289 static int
1290 displaced_step_prepare (ptid_t ptid)
1291 {
1292 struct cleanup *old_cleanups, *ignore_cleanups;
1293 struct regcache *regcache = get_thread_regcache (ptid);
1294 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1295 CORE_ADDR original, copy;
1296 ULONGEST len;
1297 struct displaced_step_closure *closure;
1298 struct displaced_step_inferior_state *displaced;
1299 int status;
1300
1301 /* We should never reach this function if the architecture does not
1302 support displaced stepping. */
1303 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1304
1305 /* We have to displaced step one thread at a time, as we only have
1306 access to a single scratch space per inferior. */
1307
1308 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1309
1310 if (!ptid_equal (displaced->step_ptid, null_ptid))
1311 {
1312 /* Already waiting for a displaced step to finish. Defer this
1313 request and place in queue. */
1314 struct displaced_step_request *req, *new_req;
1315
1316 if (debug_displaced)
1317 fprintf_unfiltered (gdb_stdlog,
1318 "displaced: defering step of %s\n",
1319 target_pid_to_str (ptid));
1320
1321 new_req = xmalloc (sizeof (*new_req));
1322 new_req->ptid = ptid;
1323 new_req->next = NULL;
1324
1325 if (displaced->step_request_queue)
1326 {
1327 for (req = displaced->step_request_queue;
1328 req && req->next;
1329 req = req->next)
1330 ;
1331 req->next = new_req;
1332 }
1333 else
1334 displaced->step_request_queue = new_req;
1335
1336 return 0;
1337 }
1338 else
1339 {
1340 if (debug_displaced)
1341 fprintf_unfiltered (gdb_stdlog,
1342 "displaced: stepping %s now\n",
1343 target_pid_to_str (ptid));
1344 }
1345
1346 displaced_step_clear (displaced);
1347
1348 old_cleanups = save_inferior_ptid ();
1349 inferior_ptid = ptid;
1350
1351 original = regcache_read_pc (regcache);
1352
1353 copy = gdbarch_displaced_step_location (gdbarch);
1354 len = gdbarch_max_insn_length (gdbarch);
1355
1356 /* Save the original contents of the copy area. */
1357 displaced->step_saved_copy = xmalloc (len);
1358 ignore_cleanups = make_cleanup (free_current_contents,
1359 &displaced->step_saved_copy);
1360 status = target_read_memory (copy, displaced->step_saved_copy, len);
1361 if (status != 0)
1362 throw_error (MEMORY_ERROR,
1363 _("Error accessing memory address %s (%s) for "
1364 "displaced-stepping scratch space."),
1365 paddress (gdbarch, copy), safe_strerror (status));
1366 if (debug_displaced)
1367 {
1368 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1369 paddress (gdbarch, copy));
1370 displaced_step_dump_bytes (gdb_stdlog,
1371 displaced->step_saved_copy,
1372 len);
1373 };
1374
1375 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1376 original, copy, regcache);
1377
1378 /* We don't support the fully-simulated case at present. */
1379 gdb_assert (closure);
1380
1381 /* Save the information we need to fix things up if the step
1382 succeeds. */
1383 displaced->step_ptid = ptid;
1384 displaced->step_gdbarch = gdbarch;
1385 displaced->step_closure = closure;
1386 displaced->step_original = original;
1387 displaced->step_copy = copy;
1388
1389 make_cleanup (displaced_step_clear_cleanup, displaced);
1390
1391 /* Resume execution at the copy. */
1392 regcache_write_pc (regcache, copy);
1393
1394 discard_cleanups (ignore_cleanups);
1395
1396 do_cleanups (old_cleanups);
1397
1398 if (debug_displaced)
1399 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1400 paddress (gdbarch, copy));
1401
1402 return 1;
1403 }
1404
1405 static void
1406 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1407 const gdb_byte *myaddr, int len)
1408 {
1409 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1410
1411 inferior_ptid = ptid;
1412 write_memory (memaddr, myaddr, len);
1413 do_cleanups (ptid_cleanup);
1414 }
1415
1416 /* Restore the contents of the copy area for thread PTID. */
1417
1418 static void
1419 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1420 ptid_t ptid)
1421 {
1422 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1423
1424 write_memory_ptid (ptid, displaced->step_copy,
1425 displaced->step_saved_copy, len);
1426 if (debug_displaced)
1427 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1428 target_pid_to_str (ptid),
1429 paddress (displaced->step_gdbarch,
1430 displaced->step_copy));
1431 }
1432
1433 static void
1434 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1435 {
1436 struct cleanup *old_cleanups;
1437 struct displaced_step_inferior_state *displaced
1438 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1439
1440 /* Was any thread of this process doing a displaced step? */
1441 if (displaced == NULL)
1442 return;
1443
1444 /* Was this event for the pid we displaced? */
1445 if (ptid_equal (displaced->step_ptid, null_ptid)
1446 || ! ptid_equal (displaced->step_ptid, event_ptid))
1447 return;
1448
1449 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1450
1451 displaced_step_restore (displaced, displaced->step_ptid);
1452
1453 /* Did the instruction complete successfully? */
1454 if (signal == TARGET_SIGNAL_TRAP)
1455 {
1456 /* Fix up the resulting state. */
1457 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1458 displaced->step_closure,
1459 displaced->step_original,
1460 displaced->step_copy,
1461 get_thread_regcache (displaced->step_ptid));
1462 }
1463 else
1464 {
1465 /* Since the instruction didn't complete, all we can do is
1466 relocate the PC. */
1467 struct regcache *regcache = get_thread_regcache (event_ptid);
1468 CORE_ADDR pc = regcache_read_pc (regcache);
1469
1470 pc = displaced->step_original + (pc - displaced->step_copy);
1471 regcache_write_pc (regcache, pc);
1472 }
1473
1474 do_cleanups (old_cleanups);
1475
1476 displaced->step_ptid = null_ptid;
1477
1478 /* Are there any pending displaced stepping requests? If so, run
1479 one now. Leave the state object around, since we're likely to
1480 need it again soon. */
1481 while (displaced->step_request_queue)
1482 {
1483 struct displaced_step_request *head;
1484 ptid_t ptid;
1485 struct regcache *regcache;
1486 struct gdbarch *gdbarch;
1487 CORE_ADDR actual_pc;
1488 struct address_space *aspace;
1489
1490 head = displaced->step_request_queue;
1491 ptid = head->ptid;
1492 displaced->step_request_queue = head->next;
1493 xfree (head);
1494
1495 context_switch (ptid);
1496
1497 regcache = get_thread_regcache (ptid);
1498 actual_pc = regcache_read_pc (regcache);
1499 aspace = get_regcache_aspace (regcache);
1500
1501 if (breakpoint_here_p (aspace, actual_pc))
1502 {
1503 if (debug_displaced)
1504 fprintf_unfiltered (gdb_stdlog,
1505 "displaced: stepping queued %s now\n",
1506 target_pid_to_str (ptid));
1507
1508 displaced_step_prepare (ptid);
1509
1510 gdbarch = get_regcache_arch (regcache);
1511
1512 if (debug_displaced)
1513 {
1514 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1515 gdb_byte buf[4];
1516
1517 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1518 paddress (gdbarch, actual_pc));
1519 read_memory (actual_pc, buf, sizeof (buf));
1520 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1521 }
1522
1523 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1524 displaced->step_closure))
1525 target_resume (ptid, 1, TARGET_SIGNAL_0);
1526 else
1527 target_resume (ptid, 0, TARGET_SIGNAL_0);
1528
1529 /* Done, we're stepping a thread. */
1530 break;
1531 }
1532 else
1533 {
1534 int step;
1535 struct thread_info *tp = inferior_thread ();
1536
1537 /* The breakpoint we were sitting under has since been
1538 removed. */
1539 tp->control.trap_expected = 0;
1540
1541 /* Go back to what we were trying to do. */
1542 step = currently_stepping (tp);
1543
1544 if (debug_displaced)
1545 fprintf_unfiltered (gdb_stdlog,
1546 "breakpoint is gone %s: step(%d)\n",
1547 target_pid_to_str (tp->ptid), step);
1548
1549 target_resume (ptid, step, TARGET_SIGNAL_0);
1550 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1551
1552 /* This request was discarded. See if there's any other
1553 thread waiting for its turn. */
1554 }
1555 }
1556 }
1557
1558 /* Update global variables holding ptids to hold NEW_PTID if they were
1559 holding OLD_PTID. */
1560 static void
1561 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1562 {
1563 struct displaced_step_request *it;
1564 struct displaced_step_inferior_state *displaced;
1565
1566 if (ptid_equal (inferior_ptid, old_ptid))
1567 inferior_ptid = new_ptid;
1568
1569 if (ptid_equal (singlestep_ptid, old_ptid))
1570 singlestep_ptid = new_ptid;
1571
1572 if (ptid_equal (deferred_step_ptid, old_ptid))
1573 deferred_step_ptid = new_ptid;
1574
1575 for (displaced = displaced_step_inferior_states;
1576 displaced;
1577 displaced = displaced->next)
1578 {
1579 if (ptid_equal (displaced->step_ptid, old_ptid))
1580 displaced->step_ptid = new_ptid;
1581
1582 for (it = displaced->step_request_queue; it; it = it->next)
1583 if (ptid_equal (it->ptid, old_ptid))
1584 it->ptid = new_ptid;
1585 }
1586 }
1587
1588 \f
1589 /* Resuming. */
1590
1591 /* Things to clean up if we QUIT out of resume (). */
1592 static void
1593 resume_cleanups (void *ignore)
1594 {
1595 normal_stop ();
1596 }
1597
1598 static const char schedlock_off[] = "off";
1599 static const char schedlock_on[] = "on";
1600 static const char schedlock_step[] = "step";
1601 static const char *const scheduler_enums[] = {
1602 schedlock_off,
1603 schedlock_on,
1604 schedlock_step,
1605 NULL
1606 };
1607 static const char *scheduler_mode = schedlock_off;
1608 static void
1609 show_scheduler_mode (struct ui_file *file, int from_tty,
1610 struct cmd_list_element *c, const char *value)
1611 {
1612 fprintf_filtered (file,
1613 _("Mode for locking scheduler "
1614 "during execution is \"%s\".\n"),
1615 value);
1616 }
1617
1618 static void
1619 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1620 {
1621 if (!target_can_lock_scheduler)
1622 {
1623 scheduler_mode = schedlock_off;
1624 error (_("Target '%s' cannot support this command."), target_shortname);
1625 }
1626 }
1627
1628 /* True if execution commands resume all threads of all processes by
1629 default; otherwise, resume only threads of the current inferior
1630 process. */
1631 int sched_multi = 0;
1632
1633 /* Try to setup for software single stepping over the specified location.
1634 Return 1 if target_resume() should use hardware single step.
1635
1636 GDBARCH the current gdbarch.
1637 PC the location to step over. */
1638
1639 static int
1640 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1641 {
1642 int hw_step = 1;
1643
1644 if (execution_direction == EXEC_FORWARD
1645 && gdbarch_software_single_step_p (gdbarch)
1646 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1647 {
1648 hw_step = 0;
1649 /* Do not pull these breakpoints until after a `wait' in
1650 `wait_for_inferior'. */
1651 singlestep_breakpoints_inserted_p = 1;
1652 singlestep_ptid = inferior_ptid;
1653 singlestep_pc = pc;
1654 }
1655 return hw_step;
1656 }
1657
1658 /* Return a ptid representing the set of threads that we will proceed,
1659 in the perspective of the user/frontend. We may actually resume
1660 fewer threads at first, e.g., if a thread is stopped at a
1661 breakpoint that needs stepping-off, but that should not be visible
1662 to the user/frontend, and neither should the frontend/user be
1663 allowed to proceed any of the threads that happen to be stopped for
1664 internal run control handling, if a previous command wanted them
1665 resumed. */
1666
1667 ptid_t
1668 user_visible_resume_ptid (int step)
1669 {
1670 /* By default, resume all threads of all processes. */
1671 ptid_t resume_ptid = RESUME_ALL;
1672
1673 /* Maybe resume only all threads of the current process. */
1674 if (!sched_multi && target_supports_multi_process ())
1675 {
1676 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1677 }
1678
1679 /* Maybe resume a single thread after all. */
1680 if (non_stop)
1681 {
1682 /* With non-stop mode on, threads are always handled
1683 individually. */
1684 resume_ptid = inferior_ptid;
1685 }
1686 else if ((scheduler_mode == schedlock_on)
1687 || (scheduler_mode == schedlock_step
1688 && (step || singlestep_breakpoints_inserted_p)))
1689 {
1690 /* User-settable 'scheduler' mode requires solo thread resume. */
1691 resume_ptid = inferior_ptid;
1692 }
1693
1694 return resume_ptid;
1695 }
1696
1697 /* Resume the inferior, but allow a QUIT. This is useful if the user
1698 wants to interrupt some lengthy single-stepping operation
1699 (for child processes, the SIGINT goes to the inferior, and so
1700 we get a SIGINT random_signal, but for remote debugging and perhaps
1701 other targets, that's not true).
1702
1703 STEP nonzero if we should step (zero to continue instead).
1704 SIG is the signal to give the inferior (zero for none). */
1705 void
1706 resume (int step, enum target_signal sig)
1707 {
1708 int should_resume = 1;
1709 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1710 struct regcache *regcache = get_current_regcache ();
1711 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1712 struct thread_info *tp = inferior_thread ();
1713 CORE_ADDR pc = regcache_read_pc (regcache);
1714 struct address_space *aspace = get_regcache_aspace (regcache);
1715
1716 QUIT;
1717
1718 if (current_inferior ()->waiting_for_vfork_done)
1719 {
1720 /* Don't try to single-step a vfork parent that is waiting for
1721 the child to get out of the shared memory region (by exec'ing
1722 or exiting). This is particularly important on software
1723 single-step archs, as the child process would trip on the
1724 software single step breakpoint inserted for the parent
1725 process. Since the parent will not actually execute any
1726 instruction until the child is out of the shared region (such
1727 are vfork's semantics), it is safe to simply continue it.
1728 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1729 the parent, and tell it to `keep_going', which automatically
1730 re-sets it stepping. */
1731 if (debug_infrun)
1732 fprintf_unfiltered (gdb_stdlog,
1733 "infrun: resume : clear step\n");
1734 step = 0;
1735 }
1736
1737 if (debug_infrun)
1738 fprintf_unfiltered (gdb_stdlog,
1739 "infrun: resume (step=%d, signal=%d), "
1740 "trap_expected=%d, current thread [%s] at %s\n",
1741 step, sig, tp->control.trap_expected,
1742 target_pid_to_str (inferior_ptid),
1743 paddress (gdbarch, pc));
1744
1745 /* Normally, by the time we reach `resume', the breakpoints are either
1746 removed or inserted, as appropriate. The exception is if we're sitting
1747 at a permanent breakpoint; we need to step over it, but permanent
1748 breakpoints can't be removed. So we have to test for it here. */
1749 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1750 {
1751 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1752 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1753 else
1754 error (_("\
1755 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1756 how to step past a permanent breakpoint on this architecture. Try using\n\
1757 a command like `return' or `jump' to continue execution."));
1758 }
1759
1760 /* If enabled, step over breakpoints by executing a copy of the
1761 instruction at a different address.
1762
1763 We can't use displaced stepping when we have a signal to deliver;
1764 the comments for displaced_step_prepare explain why. The
1765 comments in the handle_inferior event for dealing with 'random
1766 signals' explain what we do instead.
1767
1768 We can't use displaced stepping when we are waiting for vfork_done
1769 event, displaced stepping breaks the vfork child similarly as single
1770 step software breakpoint. */
1771 if (use_displaced_stepping (gdbarch)
1772 && (tp->control.trap_expected
1773 || (step && gdbarch_software_single_step_p (gdbarch)))
1774 && sig == TARGET_SIGNAL_0
1775 && !current_inferior ()->waiting_for_vfork_done)
1776 {
1777 struct displaced_step_inferior_state *displaced;
1778
1779 if (!displaced_step_prepare (inferior_ptid))
1780 {
1781 /* Got placed in displaced stepping queue. Will be resumed
1782 later when all the currently queued displaced stepping
1783 requests finish. The thread is not executing at this point,
1784 and the call to set_executing will be made later. But we
1785 need to call set_running here, since from frontend point of view,
1786 the thread is running. */
1787 set_running (inferior_ptid, 1);
1788 discard_cleanups (old_cleanups);
1789 return;
1790 }
1791
1792 /* Update pc to reflect the new address from which we will execute
1793 instructions due to displaced stepping. */
1794 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1795
1796 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1797 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1798 displaced->step_closure);
1799 }
1800
1801 /* Do we need to do it the hard way, w/temp breakpoints? */
1802 else if (step)
1803 step = maybe_software_singlestep (gdbarch, pc);
1804
1805 /* Currently, our software single-step implementation leads to different
1806 results than hardware single-stepping in one situation: when stepping
1807 into delivering a signal which has an associated signal handler,
1808 hardware single-step will stop at the first instruction of the handler,
1809 while software single-step will simply skip execution of the handler.
1810
1811 For now, this difference in behavior is accepted since there is no
1812 easy way to actually implement single-stepping into a signal handler
1813 without kernel support.
1814
1815 However, there is one scenario where this difference leads to follow-on
1816 problems: if we're stepping off a breakpoint by removing all breakpoints
1817 and then single-stepping. In this case, the software single-step
1818 behavior means that even if there is a *breakpoint* in the signal
1819 handler, GDB still would not stop.
1820
1821 Fortunately, we can at least fix this particular issue. We detect
1822 here the case where we are about to deliver a signal while software
1823 single-stepping with breakpoints removed. In this situation, we
1824 revert the decisions to remove all breakpoints and insert single-
1825 step breakpoints, and instead we install a step-resume breakpoint
1826 at the current address, deliver the signal without stepping, and
1827 once we arrive back at the step-resume breakpoint, actually step
1828 over the breakpoint we originally wanted to step over. */
1829 if (singlestep_breakpoints_inserted_p
1830 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1831 {
1832 /* If we have nested signals or a pending signal is delivered
1833 immediately after a handler returns, might might already have
1834 a step-resume breakpoint set on the earlier handler. We cannot
1835 set another step-resume breakpoint; just continue on until the
1836 original breakpoint is hit. */
1837 if (tp->control.step_resume_breakpoint == NULL)
1838 {
1839 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1840 tp->step_after_step_resume_breakpoint = 1;
1841 }
1842
1843 remove_single_step_breakpoints ();
1844 singlestep_breakpoints_inserted_p = 0;
1845
1846 insert_breakpoints ();
1847 tp->control.trap_expected = 0;
1848 }
1849
1850 if (should_resume)
1851 {
1852 ptid_t resume_ptid;
1853
1854 /* If STEP is set, it's a request to use hardware stepping
1855 facilities. But in that case, we should never
1856 use singlestep breakpoint. */
1857 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1858
1859 /* Decide the set of threads to ask the target to resume. Start
1860 by assuming everything will be resumed, than narrow the set
1861 by applying increasingly restricting conditions. */
1862 resume_ptid = user_visible_resume_ptid (step);
1863
1864 /* Maybe resume a single thread after all. */
1865 if (singlestep_breakpoints_inserted_p
1866 && stepping_past_singlestep_breakpoint)
1867 {
1868 /* The situation here is as follows. In thread T1 we wanted to
1869 single-step. Lacking hardware single-stepping we've
1870 set breakpoint at the PC of the next instruction -- call it
1871 P. After resuming, we've hit that breakpoint in thread T2.
1872 Now we've removed original breakpoint, inserted breakpoint
1873 at P+1, and try to step to advance T2 past breakpoint.
1874 We need to step only T2, as if T1 is allowed to freely run,
1875 it can run past P, and if other threads are allowed to run,
1876 they can hit breakpoint at P+1, and nested hits of single-step
1877 breakpoints is not something we'd want -- that's complicated
1878 to support, and has no value. */
1879 resume_ptid = inferior_ptid;
1880 }
1881 else if ((step || singlestep_breakpoints_inserted_p)
1882 && tp->control.trap_expected)
1883 {
1884 /* We're allowing a thread to run past a breakpoint it has
1885 hit, by single-stepping the thread with the breakpoint
1886 removed. In which case, we need to single-step only this
1887 thread, and keep others stopped, as they can miss this
1888 breakpoint if allowed to run.
1889
1890 The current code actually removes all breakpoints when
1891 doing this, not just the one being stepped over, so if we
1892 let other threads run, we can actually miss any
1893 breakpoint, not just the one at PC. */
1894 resume_ptid = inferior_ptid;
1895 }
1896
1897 if (gdbarch_cannot_step_breakpoint (gdbarch))
1898 {
1899 /* Most targets can step a breakpoint instruction, thus
1900 executing it normally. But if this one cannot, just
1901 continue and we will hit it anyway. */
1902 if (step && breakpoint_inserted_here_p (aspace, pc))
1903 step = 0;
1904 }
1905
1906 if (debug_displaced
1907 && use_displaced_stepping (gdbarch)
1908 && tp->control.trap_expected)
1909 {
1910 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1911 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1912 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1913 gdb_byte buf[4];
1914
1915 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1916 paddress (resume_gdbarch, actual_pc));
1917 read_memory (actual_pc, buf, sizeof (buf));
1918 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1919 }
1920
1921 /* Install inferior's terminal modes. */
1922 target_terminal_inferior ();
1923
1924 /* Avoid confusing the next resume, if the next stop/resume
1925 happens to apply to another thread. */
1926 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1927
1928 /* Advise target which signals may be handled silently. If we have
1929 removed breakpoints because we are stepping over one (which can
1930 happen only if we are not using displaced stepping), we need to
1931 receive all signals to avoid accidentally skipping a breakpoint
1932 during execution of a signal handler. */
1933 if ((step || singlestep_breakpoints_inserted_p)
1934 && tp->control.trap_expected
1935 && !use_displaced_stepping (gdbarch))
1936 target_pass_signals (0, NULL);
1937 else
1938 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1939
1940 target_resume (resume_ptid, step, sig);
1941 }
1942
1943 discard_cleanups (old_cleanups);
1944 }
1945 \f
1946 /* Proceeding. */
1947
1948 /* Clear out all variables saying what to do when inferior is continued.
1949 First do this, then set the ones you want, then call `proceed'. */
1950
1951 static void
1952 clear_proceed_status_thread (struct thread_info *tp)
1953 {
1954 if (debug_infrun)
1955 fprintf_unfiltered (gdb_stdlog,
1956 "infrun: clear_proceed_status_thread (%s)\n",
1957 target_pid_to_str (tp->ptid));
1958
1959 tp->control.trap_expected = 0;
1960 tp->control.step_range_start = 0;
1961 tp->control.step_range_end = 0;
1962 tp->control.step_frame_id = null_frame_id;
1963 tp->control.step_stack_frame_id = null_frame_id;
1964 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1965 tp->stop_requested = 0;
1966
1967 tp->control.stop_step = 0;
1968
1969 tp->control.proceed_to_finish = 0;
1970
1971 /* Discard any remaining commands or status from previous stop. */
1972 bpstat_clear (&tp->control.stop_bpstat);
1973 }
1974
1975 static int
1976 clear_proceed_status_callback (struct thread_info *tp, void *data)
1977 {
1978 if (is_exited (tp->ptid))
1979 return 0;
1980
1981 clear_proceed_status_thread (tp);
1982 return 0;
1983 }
1984
1985 void
1986 clear_proceed_status (void)
1987 {
1988 if (!non_stop)
1989 {
1990 /* In all-stop mode, delete the per-thread status of all
1991 threads, even if inferior_ptid is null_ptid, there may be
1992 threads on the list. E.g., we may be launching a new
1993 process, while selecting the executable. */
1994 iterate_over_threads (clear_proceed_status_callback, NULL);
1995 }
1996
1997 if (!ptid_equal (inferior_ptid, null_ptid))
1998 {
1999 struct inferior *inferior;
2000
2001 if (non_stop)
2002 {
2003 /* If in non-stop mode, only delete the per-thread status of
2004 the current thread. */
2005 clear_proceed_status_thread (inferior_thread ());
2006 }
2007
2008 inferior = current_inferior ();
2009 inferior->control.stop_soon = NO_STOP_QUIETLY;
2010 }
2011
2012 stop_after_trap = 0;
2013
2014 observer_notify_about_to_proceed ();
2015
2016 if (stop_registers)
2017 {
2018 regcache_xfree (stop_registers);
2019 stop_registers = NULL;
2020 }
2021 }
2022
2023 /* Check the current thread against the thread that reported the most recent
2024 event. If a step-over is required return TRUE and set the current thread
2025 to the old thread. Otherwise return FALSE.
2026
2027 This should be suitable for any targets that support threads. */
2028
2029 static int
2030 prepare_to_proceed (int step)
2031 {
2032 ptid_t wait_ptid;
2033 struct target_waitstatus wait_status;
2034 int schedlock_enabled;
2035
2036 /* With non-stop mode on, threads are always handled individually. */
2037 gdb_assert (! non_stop);
2038
2039 /* Get the last target status returned by target_wait(). */
2040 get_last_target_status (&wait_ptid, &wait_status);
2041
2042 /* Make sure we were stopped at a breakpoint. */
2043 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2044 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
2045 && wait_status.value.sig != TARGET_SIGNAL_ILL
2046 && wait_status.value.sig != TARGET_SIGNAL_SEGV
2047 && wait_status.value.sig != TARGET_SIGNAL_EMT))
2048 {
2049 return 0;
2050 }
2051
2052 schedlock_enabled = (scheduler_mode == schedlock_on
2053 || (scheduler_mode == schedlock_step
2054 && step));
2055
2056 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2057 if (schedlock_enabled)
2058 return 0;
2059
2060 /* Don't switch over if we're about to resume some other process
2061 other than WAIT_PTID's, and schedule-multiple is off. */
2062 if (!sched_multi
2063 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2064 return 0;
2065
2066 /* Switched over from WAIT_PID. */
2067 if (!ptid_equal (wait_ptid, minus_one_ptid)
2068 && !ptid_equal (inferior_ptid, wait_ptid))
2069 {
2070 struct regcache *regcache = get_thread_regcache (wait_ptid);
2071
2072 if (breakpoint_here_p (get_regcache_aspace (regcache),
2073 regcache_read_pc (regcache)))
2074 {
2075 /* If stepping, remember current thread to switch back to. */
2076 if (step)
2077 deferred_step_ptid = inferior_ptid;
2078
2079 /* Switch back to WAIT_PID thread. */
2080 switch_to_thread (wait_ptid);
2081
2082 if (debug_infrun)
2083 fprintf_unfiltered (gdb_stdlog,
2084 "infrun: prepare_to_proceed (step=%d), "
2085 "switched to [%s]\n",
2086 step, target_pid_to_str (inferior_ptid));
2087
2088 /* We return 1 to indicate that there is a breakpoint here,
2089 so we need to step over it before continuing to avoid
2090 hitting it straight away. */
2091 return 1;
2092 }
2093 }
2094
2095 return 0;
2096 }
2097
2098 /* Basic routine for continuing the program in various fashions.
2099
2100 ADDR is the address to resume at, or -1 for resume where stopped.
2101 SIGGNAL is the signal to give it, or 0 for none,
2102 or -1 for act according to how it stopped.
2103 STEP is nonzero if should trap after one instruction.
2104 -1 means return after that and print nothing.
2105 You should probably set various step_... variables
2106 before calling here, if you are stepping.
2107
2108 You should call clear_proceed_status before calling proceed. */
2109
2110 void
2111 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2112 {
2113 struct regcache *regcache;
2114 struct gdbarch *gdbarch;
2115 struct thread_info *tp;
2116 CORE_ADDR pc;
2117 struct address_space *aspace;
2118 int oneproc = 0;
2119
2120 /* If we're stopped at a fork/vfork, follow the branch set by the
2121 "set follow-fork-mode" command; otherwise, we'll just proceed
2122 resuming the current thread. */
2123 if (!follow_fork ())
2124 {
2125 /* The target for some reason decided not to resume. */
2126 normal_stop ();
2127 if (target_can_async_p ())
2128 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2129 return;
2130 }
2131
2132 /* We'll update this if & when we switch to a new thread. */
2133 previous_inferior_ptid = inferior_ptid;
2134
2135 regcache = get_current_regcache ();
2136 gdbarch = get_regcache_arch (regcache);
2137 aspace = get_regcache_aspace (regcache);
2138 pc = regcache_read_pc (regcache);
2139
2140 if (step > 0)
2141 step_start_function = find_pc_function (pc);
2142 if (step < 0)
2143 stop_after_trap = 1;
2144
2145 if (addr == (CORE_ADDR) -1)
2146 {
2147 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2148 && execution_direction != EXEC_REVERSE)
2149 /* There is a breakpoint at the address we will resume at,
2150 step one instruction before inserting breakpoints so that
2151 we do not stop right away (and report a second hit at this
2152 breakpoint).
2153
2154 Note, we don't do this in reverse, because we won't
2155 actually be executing the breakpoint insn anyway.
2156 We'll be (un-)executing the previous instruction. */
2157
2158 oneproc = 1;
2159 else if (gdbarch_single_step_through_delay_p (gdbarch)
2160 && gdbarch_single_step_through_delay (gdbarch,
2161 get_current_frame ()))
2162 /* We stepped onto an instruction that needs to be stepped
2163 again before re-inserting the breakpoint, do so. */
2164 oneproc = 1;
2165 }
2166 else
2167 {
2168 regcache_write_pc (regcache, addr);
2169 }
2170
2171 if (debug_infrun)
2172 fprintf_unfiltered (gdb_stdlog,
2173 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2174 paddress (gdbarch, addr), siggnal, step);
2175
2176 if (non_stop)
2177 /* In non-stop, each thread is handled individually. The context
2178 must already be set to the right thread here. */
2179 ;
2180 else
2181 {
2182 /* In a multi-threaded task we may select another thread and
2183 then continue or step.
2184
2185 But if the old thread was stopped at a breakpoint, it will
2186 immediately cause another breakpoint stop without any
2187 execution (i.e. it will report a breakpoint hit incorrectly).
2188 So we must step over it first.
2189
2190 prepare_to_proceed checks the current thread against the
2191 thread that reported the most recent event. If a step-over
2192 is required it returns TRUE and sets the current thread to
2193 the old thread. */
2194 if (prepare_to_proceed (step))
2195 oneproc = 1;
2196 }
2197
2198 /* prepare_to_proceed may change the current thread. */
2199 tp = inferior_thread ();
2200
2201 if (oneproc)
2202 {
2203 tp->control.trap_expected = 1;
2204 /* If displaced stepping is enabled, we can step over the
2205 breakpoint without hitting it, so leave all breakpoints
2206 inserted. Otherwise we need to disable all breakpoints, step
2207 one instruction, and then re-add them when that step is
2208 finished. */
2209 if (!use_displaced_stepping (gdbarch))
2210 remove_breakpoints ();
2211 }
2212
2213 /* We can insert breakpoints if we're not trying to step over one,
2214 or if we are stepping over one but we're using displaced stepping
2215 to do so. */
2216 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2217 insert_breakpoints ();
2218
2219 if (!non_stop)
2220 {
2221 /* Pass the last stop signal to the thread we're resuming,
2222 irrespective of whether the current thread is the thread that
2223 got the last event or not. This was historically GDB's
2224 behaviour before keeping a stop_signal per thread. */
2225
2226 struct thread_info *last_thread;
2227 ptid_t last_ptid;
2228 struct target_waitstatus last_status;
2229
2230 get_last_target_status (&last_ptid, &last_status);
2231 if (!ptid_equal (inferior_ptid, last_ptid)
2232 && !ptid_equal (last_ptid, null_ptid)
2233 && !ptid_equal (last_ptid, minus_one_ptid))
2234 {
2235 last_thread = find_thread_ptid (last_ptid);
2236 if (last_thread)
2237 {
2238 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2239 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2240 }
2241 }
2242 }
2243
2244 if (siggnal != TARGET_SIGNAL_DEFAULT)
2245 tp->suspend.stop_signal = siggnal;
2246 /* If this signal should not be seen by program,
2247 give it zero. Used for debugging signals. */
2248 else if (!signal_program[tp->suspend.stop_signal])
2249 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2250
2251 annotate_starting ();
2252
2253 /* Make sure that output from GDB appears before output from the
2254 inferior. */
2255 gdb_flush (gdb_stdout);
2256
2257 /* Refresh prev_pc value just prior to resuming. This used to be
2258 done in stop_stepping, however, setting prev_pc there did not handle
2259 scenarios such as inferior function calls or returning from
2260 a function via the return command. In those cases, the prev_pc
2261 value was not set properly for subsequent commands. The prev_pc value
2262 is used to initialize the starting line number in the ecs. With an
2263 invalid value, the gdb next command ends up stopping at the position
2264 represented by the next line table entry past our start position.
2265 On platforms that generate one line table entry per line, this
2266 is not a problem. However, on the ia64, the compiler generates
2267 extraneous line table entries that do not increase the line number.
2268 When we issue the gdb next command on the ia64 after an inferior call
2269 or a return command, we often end up a few instructions forward, still
2270 within the original line we started.
2271
2272 An attempt was made to refresh the prev_pc at the same time the
2273 execution_control_state is initialized (for instance, just before
2274 waiting for an inferior event). But this approach did not work
2275 because of platforms that use ptrace, where the pc register cannot
2276 be read unless the inferior is stopped. At that point, we are not
2277 guaranteed the inferior is stopped and so the regcache_read_pc() call
2278 can fail. Setting the prev_pc value here ensures the value is updated
2279 correctly when the inferior is stopped. */
2280 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2281
2282 /* Fill in with reasonable starting values. */
2283 init_thread_stepping_state (tp);
2284
2285 /* Reset to normal state. */
2286 init_infwait_state ();
2287
2288 /* Resume inferior. */
2289 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2290
2291 /* Wait for it to stop (if not standalone)
2292 and in any case decode why it stopped, and act accordingly. */
2293 /* Do this only if we are not using the event loop, or if the target
2294 does not support asynchronous execution. */
2295 if (!target_can_async_p ())
2296 {
2297 wait_for_inferior ();
2298 normal_stop ();
2299 }
2300 }
2301 \f
2302
2303 /* Start remote-debugging of a machine over a serial link. */
2304
2305 void
2306 start_remote (int from_tty)
2307 {
2308 struct inferior *inferior;
2309
2310 inferior = current_inferior ();
2311 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2312
2313 /* Always go on waiting for the target, regardless of the mode. */
2314 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2315 indicate to wait_for_inferior that a target should timeout if
2316 nothing is returned (instead of just blocking). Because of this,
2317 targets expecting an immediate response need to, internally, set
2318 things up so that the target_wait() is forced to eventually
2319 timeout. */
2320 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2321 differentiate to its caller what the state of the target is after
2322 the initial open has been performed. Here we're assuming that
2323 the target has stopped. It should be possible to eventually have
2324 target_open() return to the caller an indication that the target
2325 is currently running and GDB state should be set to the same as
2326 for an async run. */
2327 wait_for_inferior ();
2328
2329 /* Now that the inferior has stopped, do any bookkeeping like
2330 loading shared libraries. We want to do this before normal_stop,
2331 so that the displayed frame is up to date. */
2332 post_create_inferior (&current_target, from_tty);
2333
2334 normal_stop ();
2335 }
2336
2337 /* Initialize static vars when a new inferior begins. */
2338
2339 void
2340 init_wait_for_inferior (void)
2341 {
2342 /* These are meaningless until the first time through wait_for_inferior. */
2343
2344 breakpoint_init_inferior (inf_starting);
2345
2346 clear_proceed_status ();
2347
2348 stepping_past_singlestep_breakpoint = 0;
2349 deferred_step_ptid = null_ptid;
2350
2351 target_last_wait_ptid = minus_one_ptid;
2352
2353 previous_inferior_ptid = inferior_ptid;
2354 init_infwait_state ();
2355
2356 /* Discard any skipped inlined frames. */
2357 clear_inline_frame_state (minus_one_ptid);
2358 }
2359
2360 \f
2361 /* This enum encodes possible reasons for doing a target_wait, so that
2362 wfi can call target_wait in one place. (Ultimately the call will be
2363 moved out of the infinite loop entirely.) */
2364
2365 enum infwait_states
2366 {
2367 infwait_normal_state,
2368 infwait_thread_hop_state,
2369 infwait_step_watch_state,
2370 infwait_nonstep_watch_state
2371 };
2372
2373 /* The PTID we'll do a target_wait on.*/
2374 ptid_t waiton_ptid;
2375
2376 /* Current inferior wait state. */
2377 enum infwait_states infwait_state;
2378
2379 /* Data to be passed around while handling an event. This data is
2380 discarded between events. */
2381 struct execution_control_state
2382 {
2383 ptid_t ptid;
2384 /* The thread that got the event, if this was a thread event; NULL
2385 otherwise. */
2386 struct thread_info *event_thread;
2387
2388 struct target_waitstatus ws;
2389 int random_signal;
2390 int stop_func_filled_in;
2391 CORE_ADDR stop_func_start;
2392 CORE_ADDR stop_func_end;
2393 const char *stop_func_name;
2394 int new_thread_event;
2395 int wait_some_more;
2396 };
2397
2398 static void handle_inferior_event (struct execution_control_state *ecs);
2399
2400 static void handle_step_into_function (struct gdbarch *gdbarch,
2401 struct execution_control_state *ecs);
2402 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2403 struct execution_control_state *ecs);
2404 static void check_exception_resume (struct execution_control_state *,
2405 struct frame_info *, struct symbol *);
2406
2407 static void stop_stepping (struct execution_control_state *ecs);
2408 static void prepare_to_wait (struct execution_control_state *ecs);
2409 static void keep_going (struct execution_control_state *ecs);
2410
2411 /* Callback for iterate over threads. If the thread is stopped, but
2412 the user/frontend doesn't know about that yet, go through
2413 normal_stop, as if the thread had just stopped now. ARG points at
2414 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2415 ptid_is_pid(PTID) is true, applies to all threads of the process
2416 pointed at by PTID. Otherwise, apply only to the thread pointed by
2417 PTID. */
2418
2419 static int
2420 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2421 {
2422 ptid_t ptid = * (ptid_t *) arg;
2423
2424 if ((ptid_equal (info->ptid, ptid)
2425 || ptid_equal (minus_one_ptid, ptid)
2426 || (ptid_is_pid (ptid)
2427 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2428 && is_running (info->ptid)
2429 && !is_executing (info->ptid))
2430 {
2431 struct cleanup *old_chain;
2432 struct execution_control_state ecss;
2433 struct execution_control_state *ecs = &ecss;
2434
2435 memset (ecs, 0, sizeof (*ecs));
2436
2437 old_chain = make_cleanup_restore_current_thread ();
2438
2439 switch_to_thread (info->ptid);
2440
2441 /* Go through handle_inferior_event/normal_stop, so we always
2442 have consistent output as if the stop event had been
2443 reported. */
2444 ecs->ptid = info->ptid;
2445 ecs->event_thread = find_thread_ptid (info->ptid);
2446 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2447 ecs->ws.value.sig = TARGET_SIGNAL_0;
2448
2449 handle_inferior_event (ecs);
2450
2451 if (!ecs->wait_some_more)
2452 {
2453 struct thread_info *tp;
2454
2455 normal_stop ();
2456
2457 /* Finish off the continuations. */
2458 tp = inferior_thread ();
2459 do_all_intermediate_continuations_thread (tp, 1);
2460 do_all_continuations_thread (tp, 1);
2461 }
2462
2463 do_cleanups (old_chain);
2464 }
2465
2466 return 0;
2467 }
2468
2469 /* This function is attached as a "thread_stop_requested" observer.
2470 Cleanup local state that assumed the PTID was to be resumed, and
2471 report the stop to the frontend. */
2472
2473 static void
2474 infrun_thread_stop_requested (ptid_t ptid)
2475 {
2476 struct displaced_step_inferior_state *displaced;
2477
2478 /* PTID was requested to stop. Remove it from the displaced
2479 stepping queue, so we don't try to resume it automatically. */
2480
2481 for (displaced = displaced_step_inferior_states;
2482 displaced;
2483 displaced = displaced->next)
2484 {
2485 struct displaced_step_request *it, **prev_next_p;
2486
2487 it = displaced->step_request_queue;
2488 prev_next_p = &displaced->step_request_queue;
2489 while (it)
2490 {
2491 if (ptid_match (it->ptid, ptid))
2492 {
2493 *prev_next_p = it->next;
2494 it->next = NULL;
2495 xfree (it);
2496 }
2497 else
2498 {
2499 prev_next_p = &it->next;
2500 }
2501
2502 it = *prev_next_p;
2503 }
2504 }
2505
2506 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2507 }
2508
2509 static void
2510 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2511 {
2512 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2513 nullify_last_target_wait_ptid ();
2514 }
2515
2516 /* Callback for iterate_over_threads. */
2517
2518 static int
2519 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2520 {
2521 if (is_exited (info->ptid))
2522 return 0;
2523
2524 delete_step_resume_breakpoint (info);
2525 delete_exception_resume_breakpoint (info);
2526 return 0;
2527 }
2528
2529 /* In all-stop, delete the step resume breakpoint of any thread that
2530 had one. In non-stop, delete the step resume breakpoint of the
2531 thread that just stopped. */
2532
2533 static void
2534 delete_step_thread_step_resume_breakpoint (void)
2535 {
2536 if (!target_has_execution
2537 || ptid_equal (inferior_ptid, null_ptid))
2538 /* If the inferior has exited, we have already deleted the step
2539 resume breakpoints out of GDB's lists. */
2540 return;
2541
2542 if (non_stop)
2543 {
2544 /* If in non-stop mode, only delete the step-resume or
2545 longjmp-resume breakpoint of the thread that just stopped
2546 stepping. */
2547 struct thread_info *tp = inferior_thread ();
2548
2549 delete_step_resume_breakpoint (tp);
2550 delete_exception_resume_breakpoint (tp);
2551 }
2552 else
2553 /* In all-stop mode, delete all step-resume and longjmp-resume
2554 breakpoints of any thread that had them. */
2555 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2556 }
2557
2558 /* A cleanup wrapper. */
2559
2560 static void
2561 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2562 {
2563 delete_step_thread_step_resume_breakpoint ();
2564 }
2565
2566 /* Pretty print the results of target_wait, for debugging purposes. */
2567
2568 static void
2569 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2570 const struct target_waitstatus *ws)
2571 {
2572 char *status_string = target_waitstatus_to_string (ws);
2573 struct ui_file *tmp_stream = mem_fileopen ();
2574 char *text;
2575
2576 /* The text is split over several lines because it was getting too long.
2577 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2578 output as a unit; we want only one timestamp printed if debug_timestamp
2579 is set. */
2580
2581 fprintf_unfiltered (tmp_stream,
2582 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2583 if (PIDGET (waiton_ptid) != -1)
2584 fprintf_unfiltered (tmp_stream,
2585 " [%s]", target_pid_to_str (waiton_ptid));
2586 fprintf_unfiltered (tmp_stream, ", status) =\n");
2587 fprintf_unfiltered (tmp_stream,
2588 "infrun: %d [%s],\n",
2589 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2590 fprintf_unfiltered (tmp_stream,
2591 "infrun: %s\n",
2592 status_string);
2593
2594 text = ui_file_xstrdup (tmp_stream, NULL);
2595
2596 /* This uses %s in part to handle %'s in the text, but also to avoid
2597 a gcc error: the format attribute requires a string literal. */
2598 fprintf_unfiltered (gdb_stdlog, "%s", text);
2599
2600 xfree (status_string);
2601 xfree (text);
2602 ui_file_delete (tmp_stream);
2603 }
2604
2605 /* Prepare and stabilize the inferior for detaching it. E.g.,
2606 detaching while a thread is displaced stepping is a recipe for
2607 crashing it, as nothing would readjust the PC out of the scratch
2608 pad. */
2609
2610 void
2611 prepare_for_detach (void)
2612 {
2613 struct inferior *inf = current_inferior ();
2614 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2615 struct cleanup *old_chain_1;
2616 struct displaced_step_inferior_state *displaced;
2617
2618 displaced = get_displaced_stepping_state (inf->pid);
2619
2620 /* Is any thread of this process displaced stepping? If not,
2621 there's nothing else to do. */
2622 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2623 return;
2624
2625 if (debug_infrun)
2626 fprintf_unfiltered (gdb_stdlog,
2627 "displaced-stepping in-process while detaching");
2628
2629 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2630 inf->detaching = 1;
2631
2632 while (!ptid_equal (displaced->step_ptid, null_ptid))
2633 {
2634 struct cleanup *old_chain_2;
2635 struct execution_control_state ecss;
2636 struct execution_control_state *ecs;
2637
2638 ecs = &ecss;
2639 memset (ecs, 0, sizeof (*ecs));
2640
2641 overlay_cache_invalid = 1;
2642
2643 if (deprecated_target_wait_hook)
2644 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2645 else
2646 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2647
2648 if (debug_infrun)
2649 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2650
2651 /* If an error happens while handling the event, propagate GDB's
2652 knowledge of the executing state to the frontend/user running
2653 state. */
2654 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2655 &minus_one_ptid);
2656
2657 /* In non-stop mode, each thread is handled individually.
2658 Switch early, so the global state is set correctly for this
2659 thread. */
2660 if (non_stop
2661 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2662 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2663 context_switch (ecs->ptid);
2664
2665 /* Now figure out what to do with the result of the result. */
2666 handle_inferior_event (ecs);
2667
2668 /* No error, don't finish the state yet. */
2669 discard_cleanups (old_chain_2);
2670
2671 /* Breakpoints and watchpoints are not installed on the target
2672 at this point, and signals are passed directly to the
2673 inferior, so this must mean the process is gone. */
2674 if (!ecs->wait_some_more)
2675 {
2676 discard_cleanups (old_chain_1);
2677 error (_("Program exited while detaching"));
2678 }
2679 }
2680
2681 discard_cleanups (old_chain_1);
2682 }
2683
2684 /* Wait for control to return from inferior to debugger.
2685
2686 If inferior gets a signal, we may decide to start it up again
2687 instead of returning. That is why there is a loop in this function.
2688 When this function actually returns it means the inferior
2689 should be left stopped and GDB should read more commands. */
2690
2691 void
2692 wait_for_inferior (void)
2693 {
2694 struct cleanup *old_cleanups;
2695 struct execution_control_state ecss;
2696 struct execution_control_state *ecs;
2697
2698 if (debug_infrun)
2699 fprintf_unfiltered
2700 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2701
2702 old_cleanups =
2703 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2704
2705 ecs = &ecss;
2706 memset (ecs, 0, sizeof (*ecs));
2707
2708 while (1)
2709 {
2710 struct cleanup *old_chain;
2711
2712 overlay_cache_invalid = 1;
2713
2714 if (deprecated_target_wait_hook)
2715 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2716 else
2717 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2718
2719 if (debug_infrun)
2720 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2721
2722 /* If an error happens while handling the event, propagate GDB's
2723 knowledge of the executing state to the frontend/user running
2724 state. */
2725 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2726
2727 /* Now figure out what to do with the result of the result. */
2728 handle_inferior_event (ecs);
2729
2730 /* No error, don't finish the state yet. */
2731 discard_cleanups (old_chain);
2732
2733 if (!ecs->wait_some_more)
2734 break;
2735 }
2736
2737 do_cleanups (old_cleanups);
2738 }
2739
2740 /* Asynchronous version of wait_for_inferior. It is called by the
2741 event loop whenever a change of state is detected on the file
2742 descriptor corresponding to the target. It can be called more than
2743 once to complete a single execution command. In such cases we need
2744 to keep the state in a global variable ECSS. If it is the last time
2745 that this function is called for a single execution command, then
2746 report to the user that the inferior has stopped, and do the
2747 necessary cleanups. */
2748
2749 void
2750 fetch_inferior_event (void *client_data)
2751 {
2752 struct execution_control_state ecss;
2753 struct execution_control_state *ecs = &ecss;
2754 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2755 struct cleanup *ts_old_chain;
2756 int was_sync = sync_execution;
2757 int cmd_done = 0;
2758
2759 memset (ecs, 0, sizeof (*ecs));
2760
2761 /* We're handling a live event, so make sure we're doing live
2762 debugging. If we're looking at traceframes while the target is
2763 running, we're going to need to get back to that mode after
2764 handling the event. */
2765 if (non_stop)
2766 {
2767 make_cleanup_restore_current_traceframe ();
2768 set_current_traceframe (-1);
2769 }
2770
2771 if (non_stop)
2772 /* In non-stop mode, the user/frontend should not notice a thread
2773 switch due to internal events. Make sure we reverse to the
2774 user selected thread and frame after handling the event and
2775 running any breakpoint commands. */
2776 make_cleanup_restore_current_thread ();
2777
2778 overlay_cache_invalid = 1;
2779
2780 make_cleanup_restore_integer (&execution_direction);
2781 execution_direction = target_execution_direction ();
2782
2783 if (deprecated_target_wait_hook)
2784 ecs->ptid =
2785 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2786 else
2787 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2788
2789 if (debug_infrun)
2790 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2791
2792 if (non_stop
2793 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2794 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2795 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2796 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2797 /* In non-stop mode, each thread is handled individually. Switch
2798 early, so the global state is set correctly for this
2799 thread. */
2800 context_switch (ecs->ptid);
2801
2802 /* If an error happens while handling the event, propagate GDB's
2803 knowledge of the executing state to the frontend/user running
2804 state. */
2805 if (!non_stop)
2806 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2807 else
2808 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2809
2810 /* Get executed before make_cleanup_restore_current_thread above to apply
2811 still for the thread which has thrown the exception. */
2812 make_bpstat_clear_actions_cleanup ();
2813
2814 /* Now figure out what to do with the result of the result. */
2815 handle_inferior_event (ecs);
2816
2817 if (!ecs->wait_some_more)
2818 {
2819 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2820
2821 delete_step_thread_step_resume_breakpoint ();
2822
2823 /* We may not find an inferior if this was a process exit. */
2824 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2825 normal_stop ();
2826
2827 if (target_has_execution
2828 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2829 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2830 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2831 && ecs->event_thread->step_multi
2832 && ecs->event_thread->control.stop_step)
2833 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2834 else
2835 {
2836 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2837 cmd_done = 1;
2838 }
2839 }
2840
2841 /* No error, don't finish the thread states yet. */
2842 discard_cleanups (ts_old_chain);
2843
2844 /* Revert thread and frame. */
2845 do_cleanups (old_chain);
2846
2847 /* If the inferior was in sync execution mode, and now isn't,
2848 restore the prompt (a synchronous execution command has finished,
2849 and we're ready for input). */
2850 if (interpreter_async && was_sync && !sync_execution)
2851 display_gdb_prompt (0);
2852
2853 if (cmd_done
2854 && !was_sync
2855 && exec_done_display_p
2856 && (ptid_equal (inferior_ptid, null_ptid)
2857 || !is_running (inferior_ptid)))
2858 printf_unfiltered (_("completed.\n"));
2859 }
2860
2861 /* Record the frame and location we're currently stepping through. */
2862 void
2863 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2864 {
2865 struct thread_info *tp = inferior_thread ();
2866
2867 tp->control.step_frame_id = get_frame_id (frame);
2868 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2869
2870 tp->current_symtab = sal.symtab;
2871 tp->current_line = sal.line;
2872 }
2873
2874 /* Clear context switchable stepping state. */
2875
2876 void
2877 init_thread_stepping_state (struct thread_info *tss)
2878 {
2879 tss->stepping_over_breakpoint = 0;
2880 tss->step_after_step_resume_breakpoint = 0;
2881 }
2882
2883 /* Return the cached copy of the last pid/waitstatus returned by
2884 target_wait()/deprecated_target_wait_hook(). The data is actually
2885 cached by handle_inferior_event(), which gets called immediately
2886 after target_wait()/deprecated_target_wait_hook(). */
2887
2888 void
2889 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2890 {
2891 *ptidp = target_last_wait_ptid;
2892 *status = target_last_waitstatus;
2893 }
2894
2895 void
2896 nullify_last_target_wait_ptid (void)
2897 {
2898 target_last_wait_ptid = minus_one_ptid;
2899 }
2900
2901 /* Switch thread contexts. */
2902
2903 static void
2904 context_switch (ptid_t ptid)
2905 {
2906 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2907 {
2908 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2909 target_pid_to_str (inferior_ptid));
2910 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2911 target_pid_to_str (ptid));
2912 }
2913
2914 switch_to_thread (ptid);
2915 }
2916
2917 static void
2918 adjust_pc_after_break (struct execution_control_state *ecs)
2919 {
2920 struct regcache *regcache;
2921 struct gdbarch *gdbarch;
2922 struct address_space *aspace;
2923 CORE_ADDR breakpoint_pc;
2924
2925 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2926 we aren't, just return.
2927
2928 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2929 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2930 implemented by software breakpoints should be handled through the normal
2931 breakpoint layer.
2932
2933 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2934 different signals (SIGILL or SIGEMT for instance), but it is less
2935 clear where the PC is pointing afterwards. It may not match
2936 gdbarch_decr_pc_after_break. I don't know any specific target that
2937 generates these signals at breakpoints (the code has been in GDB since at
2938 least 1992) so I can not guess how to handle them here.
2939
2940 In earlier versions of GDB, a target with
2941 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2942 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2943 target with both of these set in GDB history, and it seems unlikely to be
2944 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2945
2946 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2947 return;
2948
2949 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2950 return;
2951
2952 /* In reverse execution, when a breakpoint is hit, the instruction
2953 under it has already been de-executed. The reported PC always
2954 points at the breakpoint address, so adjusting it further would
2955 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2956 architecture:
2957
2958 B1 0x08000000 : INSN1
2959 B2 0x08000001 : INSN2
2960 0x08000002 : INSN3
2961 PC -> 0x08000003 : INSN4
2962
2963 Say you're stopped at 0x08000003 as above. Reverse continuing
2964 from that point should hit B2 as below. Reading the PC when the
2965 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2966 been de-executed already.
2967
2968 B1 0x08000000 : INSN1
2969 B2 PC -> 0x08000001 : INSN2
2970 0x08000002 : INSN3
2971 0x08000003 : INSN4
2972
2973 We can't apply the same logic as for forward execution, because
2974 we would wrongly adjust the PC to 0x08000000, since there's a
2975 breakpoint at PC - 1. We'd then report a hit on B1, although
2976 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2977 behaviour. */
2978 if (execution_direction == EXEC_REVERSE)
2979 return;
2980
2981 /* If this target does not decrement the PC after breakpoints, then
2982 we have nothing to do. */
2983 regcache = get_thread_regcache (ecs->ptid);
2984 gdbarch = get_regcache_arch (regcache);
2985 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2986 return;
2987
2988 aspace = get_regcache_aspace (regcache);
2989
2990 /* Find the location where (if we've hit a breakpoint) the
2991 breakpoint would be. */
2992 breakpoint_pc = regcache_read_pc (regcache)
2993 - gdbarch_decr_pc_after_break (gdbarch);
2994
2995 /* Check whether there actually is a software breakpoint inserted at
2996 that location.
2997
2998 If in non-stop mode, a race condition is possible where we've
2999 removed a breakpoint, but stop events for that breakpoint were
3000 already queued and arrive later. To suppress those spurious
3001 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3002 and retire them after a number of stop events are reported. */
3003 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3004 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3005 {
3006 struct cleanup *old_cleanups = NULL;
3007
3008 if (RECORD_IS_USED)
3009 old_cleanups = record_gdb_operation_disable_set ();
3010
3011 /* When using hardware single-step, a SIGTRAP is reported for both
3012 a completed single-step and a software breakpoint. Need to
3013 differentiate between the two, as the latter needs adjusting
3014 but the former does not.
3015
3016 The SIGTRAP can be due to a completed hardware single-step only if
3017 - we didn't insert software single-step breakpoints
3018 - the thread to be examined is still the current thread
3019 - this thread is currently being stepped
3020
3021 If any of these events did not occur, we must have stopped due
3022 to hitting a software breakpoint, and have to back up to the
3023 breakpoint address.
3024
3025 As a special case, we could have hardware single-stepped a
3026 software breakpoint. In this case (prev_pc == breakpoint_pc),
3027 we also need to back up to the breakpoint address. */
3028
3029 if (singlestep_breakpoints_inserted_p
3030 || !ptid_equal (ecs->ptid, inferior_ptid)
3031 || !currently_stepping (ecs->event_thread)
3032 || ecs->event_thread->prev_pc == breakpoint_pc)
3033 regcache_write_pc (regcache, breakpoint_pc);
3034
3035 if (RECORD_IS_USED)
3036 do_cleanups (old_cleanups);
3037 }
3038 }
3039
3040 void
3041 init_infwait_state (void)
3042 {
3043 waiton_ptid = pid_to_ptid (-1);
3044 infwait_state = infwait_normal_state;
3045 }
3046
3047 void
3048 error_is_running (void)
3049 {
3050 error (_("Cannot execute this command while "
3051 "the selected thread is running."));
3052 }
3053
3054 void
3055 ensure_not_running (void)
3056 {
3057 if (is_running (inferior_ptid))
3058 error_is_running ();
3059 }
3060
3061 static int
3062 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3063 {
3064 for (frame = get_prev_frame (frame);
3065 frame != NULL;
3066 frame = get_prev_frame (frame))
3067 {
3068 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3069 return 1;
3070 if (get_frame_type (frame) != INLINE_FRAME)
3071 break;
3072 }
3073
3074 return 0;
3075 }
3076
3077 /* Auxiliary function that handles syscall entry/return events.
3078 It returns 1 if the inferior should keep going (and GDB
3079 should ignore the event), or 0 if the event deserves to be
3080 processed. */
3081
3082 static int
3083 handle_syscall_event (struct execution_control_state *ecs)
3084 {
3085 struct regcache *regcache;
3086 struct gdbarch *gdbarch;
3087 int syscall_number;
3088
3089 if (!ptid_equal (ecs->ptid, inferior_ptid))
3090 context_switch (ecs->ptid);
3091
3092 regcache = get_thread_regcache (ecs->ptid);
3093 gdbarch = get_regcache_arch (regcache);
3094 syscall_number = ecs->ws.value.syscall_number;
3095 stop_pc = regcache_read_pc (regcache);
3096
3097 if (catch_syscall_enabled () > 0
3098 && catching_syscall_number (syscall_number) > 0)
3099 {
3100 if (debug_infrun)
3101 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3102 syscall_number);
3103
3104 ecs->event_thread->control.stop_bpstat
3105 = bpstat_stop_status (get_regcache_aspace (regcache),
3106 stop_pc, ecs->ptid, &ecs->ws);
3107 ecs->random_signal
3108 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3109
3110 if (!ecs->random_signal)
3111 {
3112 /* Catchpoint hit. */
3113 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3114 return 0;
3115 }
3116 }
3117
3118 /* If no catchpoint triggered for this, then keep going. */
3119 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3120 keep_going (ecs);
3121 return 1;
3122 }
3123
3124 /* Clear the supplied execution_control_state's stop_func_* fields. */
3125
3126 static void
3127 clear_stop_func (struct execution_control_state *ecs)
3128 {
3129 ecs->stop_func_filled_in = 0;
3130 ecs->stop_func_start = 0;
3131 ecs->stop_func_end = 0;
3132 ecs->stop_func_name = NULL;
3133 }
3134
3135 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3136
3137 static void
3138 fill_in_stop_func (struct gdbarch *gdbarch,
3139 struct execution_control_state *ecs)
3140 {
3141 if (!ecs->stop_func_filled_in)
3142 {
3143 /* Don't care about return value; stop_func_start and stop_func_name
3144 will both be 0 if it doesn't work. */
3145 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3146 &ecs->stop_func_start, &ecs->stop_func_end);
3147 ecs->stop_func_start
3148 += gdbarch_deprecated_function_start_offset (gdbarch);
3149
3150 ecs->stop_func_filled_in = 1;
3151 }
3152 }
3153
3154 /* Given an execution control state that has been freshly filled in
3155 by an event from the inferior, figure out what it means and take
3156 appropriate action. */
3157
3158 static void
3159 handle_inferior_event (struct execution_control_state *ecs)
3160 {
3161 struct frame_info *frame;
3162 struct gdbarch *gdbarch;
3163 int stopped_by_watchpoint;
3164 int stepped_after_stopped_by_watchpoint = 0;
3165 struct symtab_and_line stop_pc_sal;
3166 enum stop_kind stop_soon;
3167
3168 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3169 {
3170 /* We had an event in the inferior, but we are not interested in
3171 handling it at this level. The lower layers have already
3172 done what needs to be done, if anything.
3173
3174 One of the possible circumstances for this is when the
3175 inferior produces output for the console. The inferior has
3176 not stopped, and we are ignoring the event. Another possible
3177 circumstance is any event which the lower level knows will be
3178 reported multiple times without an intervening resume. */
3179 if (debug_infrun)
3180 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3181 prepare_to_wait (ecs);
3182 return;
3183 }
3184
3185 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3186 && target_can_async_p () && !sync_execution)
3187 {
3188 /* There were no unwaited-for children left in the target, but,
3189 we're not synchronously waiting for events either. Just
3190 ignore. Otherwise, if we were running a synchronous
3191 execution command, we need to cancel it and give the user
3192 back the terminal. */
3193 if (debug_infrun)
3194 fprintf_unfiltered (gdb_stdlog,
3195 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3196 prepare_to_wait (ecs);
3197 return;
3198 }
3199
3200 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3201 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3202 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3203 {
3204 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3205
3206 gdb_assert (inf);
3207 stop_soon = inf->control.stop_soon;
3208 }
3209 else
3210 stop_soon = NO_STOP_QUIETLY;
3211
3212 /* Cache the last pid/waitstatus. */
3213 target_last_wait_ptid = ecs->ptid;
3214 target_last_waitstatus = ecs->ws;
3215
3216 /* Always clear state belonging to the previous time we stopped. */
3217 stop_stack_dummy = STOP_NONE;
3218
3219 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3220 {
3221 /* No unwaited-for children left. IOW, all resumed children
3222 have exited. */
3223 if (debug_infrun)
3224 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3225
3226 stop_print_frame = 0;
3227 stop_stepping (ecs);
3228 return;
3229 }
3230
3231 /* If it's a new process, add it to the thread database. */
3232
3233 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3234 && !ptid_equal (ecs->ptid, minus_one_ptid)
3235 && !in_thread_list (ecs->ptid));
3236
3237 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3238 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3239 add_thread (ecs->ptid);
3240
3241 ecs->event_thread = find_thread_ptid (ecs->ptid);
3242
3243 /* Dependent on valid ECS->EVENT_THREAD. */
3244 adjust_pc_after_break (ecs);
3245
3246 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3247 reinit_frame_cache ();
3248
3249 breakpoint_retire_moribund ();
3250
3251 /* First, distinguish signals caused by the debugger from signals
3252 that have to do with the program's own actions. Note that
3253 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3254 on the operating system version. Here we detect when a SIGILL or
3255 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3256 something similar for SIGSEGV, since a SIGSEGV will be generated
3257 when we're trying to execute a breakpoint instruction on a
3258 non-executable stack. This happens for call dummy breakpoints
3259 for architectures like SPARC that place call dummies on the
3260 stack. */
3261 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3262 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3263 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3264 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3265 {
3266 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3267
3268 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3269 regcache_read_pc (regcache)))
3270 {
3271 if (debug_infrun)
3272 fprintf_unfiltered (gdb_stdlog,
3273 "infrun: Treating signal as SIGTRAP\n");
3274 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3275 }
3276 }
3277
3278 /* Mark the non-executing threads accordingly. In all-stop, all
3279 threads of all processes are stopped when we get any event
3280 reported. In non-stop mode, only the event thread stops. If
3281 we're handling a process exit in non-stop mode, there's nothing
3282 to do, as threads of the dead process are gone, and threads of
3283 any other process were left running. */
3284 if (!non_stop)
3285 set_executing (minus_one_ptid, 0);
3286 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3287 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3288 set_executing (ecs->ptid, 0);
3289
3290 switch (infwait_state)
3291 {
3292 case infwait_thread_hop_state:
3293 if (debug_infrun)
3294 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3295 break;
3296
3297 case infwait_normal_state:
3298 if (debug_infrun)
3299 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3300 break;
3301
3302 case infwait_step_watch_state:
3303 if (debug_infrun)
3304 fprintf_unfiltered (gdb_stdlog,
3305 "infrun: infwait_step_watch_state\n");
3306
3307 stepped_after_stopped_by_watchpoint = 1;
3308 break;
3309
3310 case infwait_nonstep_watch_state:
3311 if (debug_infrun)
3312 fprintf_unfiltered (gdb_stdlog,
3313 "infrun: infwait_nonstep_watch_state\n");
3314 insert_breakpoints ();
3315
3316 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3317 handle things like signals arriving and other things happening
3318 in combination correctly? */
3319 stepped_after_stopped_by_watchpoint = 1;
3320 break;
3321
3322 default:
3323 internal_error (__FILE__, __LINE__, _("bad switch"));
3324 }
3325
3326 infwait_state = infwait_normal_state;
3327 waiton_ptid = pid_to_ptid (-1);
3328
3329 switch (ecs->ws.kind)
3330 {
3331 case TARGET_WAITKIND_LOADED:
3332 if (debug_infrun)
3333 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3334 /* Ignore gracefully during startup of the inferior, as it might
3335 be the shell which has just loaded some objects, otherwise
3336 add the symbols for the newly loaded objects. Also ignore at
3337 the beginning of an attach or remote session; we will query
3338 the full list of libraries once the connection is
3339 established. */
3340 if (stop_soon == NO_STOP_QUIETLY)
3341 {
3342 struct regcache *regcache;
3343
3344 if (!ptid_equal (ecs->ptid, inferior_ptid))
3345 context_switch (ecs->ptid);
3346 regcache = get_thread_regcache (ecs->ptid);
3347
3348 handle_solib_event ();
3349
3350 ecs->event_thread->control.stop_bpstat
3351 = bpstat_stop_status (get_regcache_aspace (regcache),
3352 stop_pc, ecs->ptid, &ecs->ws);
3353 ecs->random_signal
3354 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3355
3356 if (!ecs->random_signal)
3357 {
3358 /* A catchpoint triggered. */
3359 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3360 goto process_event_stop_test;
3361 }
3362
3363 /* If requested, stop when the dynamic linker notifies
3364 gdb of events. This allows the user to get control
3365 and place breakpoints in initializer routines for
3366 dynamically loaded objects (among other things). */
3367 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3368 if (stop_on_solib_events)
3369 {
3370 /* Make sure we print "Stopped due to solib-event" in
3371 normal_stop. */
3372 stop_print_frame = 1;
3373
3374 stop_stepping (ecs);
3375 return;
3376 }
3377 }
3378
3379 /* If we are skipping through a shell, or through shared library
3380 loading that we aren't interested in, resume the program. If
3381 we're running the program normally, also resume. But stop if
3382 we're attaching or setting up a remote connection. */
3383 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3384 {
3385 /* Loading of shared libraries might have changed breakpoint
3386 addresses. Make sure new breakpoints are inserted. */
3387 if (stop_soon == NO_STOP_QUIETLY
3388 && !breakpoints_always_inserted_mode ())
3389 insert_breakpoints ();
3390 resume (0, TARGET_SIGNAL_0);
3391 prepare_to_wait (ecs);
3392 return;
3393 }
3394
3395 break;
3396
3397 case TARGET_WAITKIND_SPURIOUS:
3398 if (debug_infrun)
3399 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3400 resume (0, TARGET_SIGNAL_0);
3401 prepare_to_wait (ecs);
3402 return;
3403
3404 case TARGET_WAITKIND_EXITED:
3405 if (debug_infrun)
3406 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3407 inferior_ptid = ecs->ptid;
3408 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3409 set_current_program_space (current_inferior ()->pspace);
3410 handle_vfork_child_exec_or_exit (0);
3411 target_terminal_ours (); /* Must do this before mourn anyway. */
3412 print_exited_reason (ecs->ws.value.integer);
3413
3414 /* Record the exit code in the convenience variable $_exitcode, so
3415 that the user can inspect this again later. */
3416 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3417 (LONGEST) ecs->ws.value.integer);
3418
3419 /* Also record this in the inferior itself. */
3420 current_inferior ()->has_exit_code = 1;
3421 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3422
3423 gdb_flush (gdb_stdout);
3424 target_mourn_inferior ();
3425 singlestep_breakpoints_inserted_p = 0;
3426 cancel_single_step_breakpoints ();
3427 stop_print_frame = 0;
3428 stop_stepping (ecs);
3429 return;
3430
3431 case TARGET_WAITKIND_SIGNALLED:
3432 if (debug_infrun)
3433 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3434 inferior_ptid = ecs->ptid;
3435 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3436 set_current_program_space (current_inferior ()->pspace);
3437 handle_vfork_child_exec_or_exit (0);
3438 stop_print_frame = 0;
3439 target_terminal_ours (); /* Must do this before mourn anyway. */
3440
3441 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3442 reach here unless the inferior is dead. However, for years
3443 target_kill() was called here, which hints that fatal signals aren't
3444 really fatal on some systems. If that's true, then some changes
3445 may be needed. */
3446 target_mourn_inferior ();
3447
3448 print_signal_exited_reason (ecs->ws.value.sig);
3449 singlestep_breakpoints_inserted_p = 0;
3450 cancel_single_step_breakpoints ();
3451 stop_stepping (ecs);
3452 return;
3453
3454 /* The following are the only cases in which we keep going;
3455 the above cases end in a continue or goto. */
3456 case TARGET_WAITKIND_FORKED:
3457 case TARGET_WAITKIND_VFORKED:
3458 if (debug_infrun)
3459 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3460
3461 /* Check whether the inferior is displaced stepping. */
3462 {
3463 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3464 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3465 struct displaced_step_inferior_state *displaced
3466 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3467
3468 /* If checking displaced stepping is supported, and thread
3469 ecs->ptid is displaced stepping. */
3470 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3471 {
3472 struct inferior *parent_inf
3473 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3474 struct regcache *child_regcache;
3475 CORE_ADDR parent_pc;
3476
3477 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3478 indicating that the displaced stepping of syscall instruction
3479 has been done. Perform cleanup for parent process here. Note
3480 that this operation also cleans up the child process for vfork,
3481 because their pages are shared. */
3482 displaced_step_fixup (ecs->ptid, TARGET_SIGNAL_TRAP);
3483
3484 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3485 {
3486 /* Restore scratch pad for child process. */
3487 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3488 }
3489
3490 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3491 the child's PC is also within the scratchpad. Set the child's PC
3492 to the parent's PC value, which has already been fixed up.
3493 FIXME: we use the parent's aspace here, although we're touching
3494 the child, because the child hasn't been added to the inferior
3495 list yet at this point. */
3496
3497 child_regcache
3498 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3499 gdbarch,
3500 parent_inf->aspace);
3501 /* Read PC value of parent process. */
3502 parent_pc = regcache_read_pc (regcache);
3503
3504 if (debug_displaced)
3505 fprintf_unfiltered (gdb_stdlog,
3506 "displaced: write child pc from %s to %s\n",
3507 paddress (gdbarch,
3508 regcache_read_pc (child_regcache)),
3509 paddress (gdbarch, parent_pc));
3510
3511 regcache_write_pc (child_regcache, parent_pc);
3512 }
3513 }
3514
3515 if (!ptid_equal (ecs->ptid, inferior_ptid))
3516 {
3517 context_switch (ecs->ptid);
3518 reinit_frame_cache ();
3519 }
3520
3521 /* Immediately detach breakpoints from the child before there's
3522 any chance of letting the user delete breakpoints from the
3523 breakpoint lists. If we don't do this early, it's easy to
3524 leave left over traps in the child, vis: "break foo; catch
3525 fork; c; <fork>; del; c; <child calls foo>". We only follow
3526 the fork on the last `continue', and by that time the
3527 breakpoint at "foo" is long gone from the breakpoint table.
3528 If we vforked, then we don't need to unpatch here, since both
3529 parent and child are sharing the same memory pages; we'll
3530 need to unpatch at follow/detach time instead to be certain
3531 that new breakpoints added between catchpoint hit time and
3532 vfork follow are detached. */
3533 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3534 {
3535 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3536
3537 /* This won't actually modify the breakpoint list, but will
3538 physically remove the breakpoints from the child. */
3539 detach_breakpoints (child_pid);
3540 }
3541
3542 if (singlestep_breakpoints_inserted_p)
3543 {
3544 /* Pull the single step breakpoints out of the target. */
3545 remove_single_step_breakpoints ();
3546 singlestep_breakpoints_inserted_p = 0;
3547 }
3548
3549 /* In case the event is caught by a catchpoint, remember that
3550 the event is to be followed at the next resume of the thread,
3551 and not immediately. */
3552 ecs->event_thread->pending_follow = ecs->ws;
3553
3554 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3555
3556 ecs->event_thread->control.stop_bpstat
3557 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3558 stop_pc, ecs->ptid, &ecs->ws);
3559
3560 /* Note that we're interested in knowing the bpstat actually
3561 causes a stop, not just if it may explain the signal.
3562 Software watchpoints, for example, always appear in the
3563 bpstat. */
3564 ecs->random_signal
3565 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3566
3567 /* If no catchpoint triggered for this, then keep going. */
3568 if (ecs->random_signal)
3569 {
3570 ptid_t parent;
3571 ptid_t child;
3572 int should_resume;
3573 int follow_child
3574 = (follow_fork_mode_string == follow_fork_mode_child);
3575
3576 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3577
3578 should_resume = follow_fork ();
3579
3580 parent = ecs->ptid;
3581 child = ecs->ws.value.related_pid;
3582
3583 /* In non-stop mode, also resume the other branch. */
3584 if (non_stop && !detach_fork)
3585 {
3586 if (follow_child)
3587 switch_to_thread (parent);
3588 else
3589 switch_to_thread (child);
3590
3591 ecs->event_thread = inferior_thread ();
3592 ecs->ptid = inferior_ptid;
3593 keep_going (ecs);
3594 }
3595
3596 if (follow_child)
3597 switch_to_thread (child);
3598 else
3599 switch_to_thread (parent);
3600
3601 ecs->event_thread = inferior_thread ();
3602 ecs->ptid = inferior_ptid;
3603
3604 if (should_resume)
3605 keep_going (ecs);
3606 else
3607 stop_stepping (ecs);
3608 return;
3609 }
3610 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3611 goto process_event_stop_test;
3612
3613 case TARGET_WAITKIND_VFORK_DONE:
3614 /* Done with the shared memory region. Re-insert breakpoints in
3615 the parent, and keep going. */
3616
3617 if (debug_infrun)
3618 fprintf_unfiltered (gdb_stdlog,
3619 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3620
3621 if (!ptid_equal (ecs->ptid, inferior_ptid))
3622 context_switch (ecs->ptid);
3623
3624 current_inferior ()->waiting_for_vfork_done = 0;
3625 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3626 /* This also takes care of reinserting breakpoints in the
3627 previously locked inferior. */
3628 keep_going (ecs);
3629 return;
3630
3631 case TARGET_WAITKIND_EXECD:
3632 if (debug_infrun)
3633 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3634
3635 if (!ptid_equal (ecs->ptid, inferior_ptid))
3636 {
3637 context_switch (ecs->ptid);
3638 reinit_frame_cache ();
3639 }
3640
3641 singlestep_breakpoints_inserted_p = 0;
3642 cancel_single_step_breakpoints ();
3643
3644 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3645
3646 /* Do whatever is necessary to the parent branch of the vfork. */
3647 handle_vfork_child_exec_or_exit (1);
3648
3649 /* This causes the eventpoints and symbol table to be reset.
3650 Must do this now, before trying to determine whether to
3651 stop. */
3652 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3653
3654 ecs->event_thread->control.stop_bpstat
3655 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3656 stop_pc, ecs->ptid, &ecs->ws);
3657 ecs->random_signal
3658 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3659
3660 /* Note that this may be referenced from inside
3661 bpstat_stop_status above, through inferior_has_execd. */
3662 xfree (ecs->ws.value.execd_pathname);
3663 ecs->ws.value.execd_pathname = NULL;
3664
3665 /* If no catchpoint triggered for this, then keep going. */
3666 if (ecs->random_signal)
3667 {
3668 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3669 keep_going (ecs);
3670 return;
3671 }
3672 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3673 goto process_event_stop_test;
3674
3675 /* Be careful not to try to gather much state about a thread
3676 that's in a syscall. It's frequently a losing proposition. */
3677 case TARGET_WAITKIND_SYSCALL_ENTRY:
3678 if (debug_infrun)
3679 fprintf_unfiltered (gdb_stdlog,
3680 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3681 /* Getting the current syscall number. */
3682 if (handle_syscall_event (ecs) != 0)
3683 return;
3684 goto process_event_stop_test;
3685
3686 /* Before examining the threads further, step this thread to
3687 get it entirely out of the syscall. (We get notice of the
3688 event when the thread is just on the verge of exiting a
3689 syscall. Stepping one instruction seems to get it back
3690 into user code.) */
3691 case TARGET_WAITKIND_SYSCALL_RETURN:
3692 if (debug_infrun)
3693 fprintf_unfiltered (gdb_stdlog,
3694 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3695 if (handle_syscall_event (ecs) != 0)
3696 return;
3697 goto process_event_stop_test;
3698
3699 case TARGET_WAITKIND_STOPPED:
3700 if (debug_infrun)
3701 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3702 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3703 break;
3704
3705 case TARGET_WAITKIND_NO_HISTORY:
3706 if (debug_infrun)
3707 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3708 /* Reverse execution: target ran out of history info. */
3709 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3710 print_no_history_reason ();
3711 stop_stepping (ecs);
3712 return;
3713 }
3714
3715 if (ecs->new_thread_event)
3716 {
3717 if (non_stop)
3718 /* Non-stop assumes that the target handles adding new threads
3719 to the thread list. */
3720 internal_error (__FILE__, __LINE__,
3721 "targets should add new threads to the thread "
3722 "list themselves in non-stop mode.");
3723
3724 /* We may want to consider not doing a resume here in order to
3725 give the user a chance to play with the new thread. It might
3726 be good to make that a user-settable option. */
3727
3728 /* At this point, all threads are stopped (happens automatically
3729 in either the OS or the native code). Therefore we need to
3730 continue all threads in order to make progress. */
3731
3732 if (!ptid_equal (ecs->ptid, inferior_ptid))
3733 context_switch (ecs->ptid);
3734 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3735 prepare_to_wait (ecs);
3736 return;
3737 }
3738
3739 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3740 {
3741 /* Do we need to clean up the state of a thread that has
3742 completed a displaced single-step? (Doing so usually affects
3743 the PC, so do it here, before we set stop_pc.) */
3744 displaced_step_fixup (ecs->ptid,
3745 ecs->event_thread->suspend.stop_signal);
3746
3747 /* If we either finished a single-step or hit a breakpoint, but
3748 the user wanted this thread to be stopped, pretend we got a
3749 SIG0 (generic unsignaled stop). */
3750
3751 if (ecs->event_thread->stop_requested
3752 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3753 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3754 }
3755
3756 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3757
3758 if (debug_infrun)
3759 {
3760 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3761 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3762 struct cleanup *old_chain = save_inferior_ptid ();
3763
3764 inferior_ptid = ecs->ptid;
3765
3766 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3767 paddress (gdbarch, stop_pc));
3768 if (target_stopped_by_watchpoint ())
3769 {
3770 CORE_ADDR addr;
3771
3772 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3773
3774 if (target_stopped_data_address (&current_target, &addr))
3775 fprintf_unfiltered (gdb_stdlog,
3776 "infrun: stopped data address = %s\n",
3777 paddress (gdbarch, addr));
3778 else
3779 fprintf_unfiltered (gdb_stdlog,
3780 "infrun: (no data address available)\n");
3781 }
3782
3783 do_cleanups (old_chain);
3784 }
3785
3786 if (stepping_past_singlestep_breakpoint)
3787 {
3788 gdb_assert (singlestep_breakpoints_inserted_p);
3789 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3790 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3791
3792 stepping_past_singlestep_breakpoint = 0;
3793
3794 /* We've either finished single-stepping past the single-step
3795 breakpoint, or stopped for some other reason. It would be nice if
3796 we could tell, but we can't reliably. */
3797 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3798 {
3799 if (debug_infrun)
3800 fprintf_unfiltered (gdb_stdlog,
3801 "infrun: stepping_past_"
3802 "singlestep_breakpoint\n");
3803 /* Pull the single step breakpoints out of the target. */
3804 remove_single_step_breakpoints ();
3805 singlestep_breakpoints_inserted_p = 0;
3806
3807 ecs->random_signal = 0;
3808 ecs->event_thread->control.trap_expected = 0;
3809
3810 context_switch (saved_singlestep_ptid);
3811 if (deprecated_context_hook)
3812 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3813
3814 resume (1, TARGET_SIGNAL_0);
3815 prepare_to_wait (ecs);
3816 return;
3817 }
3818 }
3819
3820 if (!ptid_equal (deferred_step_ptid, null_ptid))
3821 {
3822 /* In non-stop mode, there's never a deferred_step_ptid set. */
3823 gdb_assert (!non_stop);
3824
3825 /* If we stopped for some other reason than single-stepping, ignore
3826 the fact that we were supposed to switch back. */
3827 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3828 {
3829 if (debug_infrun)
3830 fprintf_unfiltered (gdb_stdlog,
3831 "infrun: handling deferred step\n");
3832
3833 /* Pull the single step breakpoints out of the target. */
3834 if (singlestep_breakpoints_inserted_p)
3835 {
3836 remove_single_step_breakpoints ();
3837 singlestep_breakpoints_inserted_p = 0;
3838 }
3839
3840 ecs->event_thread->control.trap_expected = 0;
3841
3842 /* Note: We do not call context_switch at this point, as the
3843 context is already set up for stepping the original thread. */
3844 switch_to_thread (deferred_step_ptid);
3845 deferred_step_ptid = null_ptid;
3846 /* Suppress spurious "Switching to ..." message. */
3847 previous_inferior_ptid = inferior_ptid;
3848
3849 resume (1, TARGET_SIGNAL_0);
3850 prepare_to_wait (ecs);
3851 return;
3852 }
3853
3854 deferred_step_ptid = null_ptid;
3855 }
3856
3857 /* See if a thread hit a thread-specific breakpoint that was meant for
3858 another thread. If so, then step that thread past the breakpoint,
3859 and continue it. */
3860
3861 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3862 {
3863 int thread_hop_needed = 0;
3864 struct address_space *aspace =
3865 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3866
3867 /* Check if a regular breakpoint has been hit before checking
3868 for a potential single step breakpoint. Otherwise, GDB will
3869 not see this breakpoint hit when stepping onto breakpoints. */
3870 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3871 {
3872 ecs->random_signal = 0;
3873 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3874 thread_hop_needed = 1;
3875 }
3876 else if (singlestep_breakpoints_inserted_p)
3877 {
3878 /* We have not context switched yet, so this should be true
3879 no matter which thread hit the singlestep breakpoint. */
3880 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3881 if (debug_infrun)
3882 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3883 "trap for %s\n",
3884 target_pid_to_str (ecs->ptid));
3885
3886 ecs->random_signal = 0;
3887 /* The call to in_thread_list is necessary because PTIDs sometimes
3888 change when we go from single-threaded to multi-threaded. If
3889 the singlestep_ptid is still in the list, assume that it is
3890 really different from ecs->ptid. */
3891 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3892 && in_thread_list (singlestep_ptid))
3893 {
3894 /* If the PC of the thread we were trying to single-step
3895 has changed, discard this event (which we were going
3896 to ignore anyway), and pretend we saw that thread
3897 trap. This prevents us continuously moving the
3898 single-step breakpoint forward, one instruction at a
3899 time. If the PC has changed, then the thread we were
3900 trying to single-step has trapped or been signalled,
3901 but the event has not been reported to GDB yet.
3902
3903 There might be some cases where this loses signal
3904 information, if a signal has arrived at exactly the
3905 same time that the PC changed, but this is the best
3906 we can do with the information available. Perhaps we
3907 should arrange to report all events for all threads
3908 when they stop, or to re-poll the remote looking for
3909 this particular thread (i.e. temporarily enable
3910 schedlock). */
3911
3912 CORE_ADDR new_singlestep_pc
3913 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3914
3915 if (new_singlestep_pc != singlestep_pc)
3916 {
3917 enum target_signal stop_signal;
3918
3919 if (debug_infrun)
3920 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3921 " but expected thread advanced also\n");
3922
3923 /* The current context still belongs to
3924 singlestep_ptid. Don't swap here, since that's
3925 the context we want to use. Just fudge our
3926 state and continue. */
3927 stop_signal = ecs->event_thread->suspend.stop_signal;
3928 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3929 ecs->ptid = singlestep_ptid;
3930 ecs->event_thread = find_thread_ptid (ecs->ptid);
3931 ecs->event_thread->suspend.stop_signal = stop_signal;
3932 stop_pc = new_singlestep_pc;
3933 }
3934 else
3935 {
3936 if (debug_infrun)
3937 fprintf_unfiltered (gdb_stdlog,
3938 "infrun: unexpected thread\n");
3939
3940 thread_hop_needed = 1;
3941 stepping_past_singlestep_breakpoint = 1;
3942 saved_singlestep_ptid = singlestep_ptid;
3943 }
3944 }
3945 }
3946
3947 if (thread_hop_needed)
3948 {
3949 struct regcache *thread_regcache;
3950 int remove_status = 0;
3951
3952 if (debug_infrun)
3953 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3954
3955 /* Switch context before touching inferior memory, the
3956 previous thread may have exited. */
3957 if (!ptid_equal (inferior_ptid, ecs->ptid))
3958 context_switch (ecs->ptid);
3959
3960 /* Saw a breakpoint, but it was hit by the wrong thread.
3961 Just continue. */
3962
3963 if (singlestep_breakpoints_inserted_p)
3964 {
3965 /* Pull the single step breakpoints out of the target. */
3966 remove_single_step_breakpoints ();
3967 singlestep_breakpoints_inserted_p = 0;
3968 }
3969
3970 /* If the arch can displace step, don't remove the
3971 breakpoints. */
3972 thread_regcache = get_thread_regcache (ecs->ptid);
3973 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3974 remove_status = remove_breakpoints ();
3975
3976 /* Did we fail to remove breakpoints? If so, try
3977 to set the PC past the bp. (There's at least
3978 one situation in which we can fail to remove
3979 the bp's: On HP-UX's that use ttrace, we can't
3980 change the address space of a vforking child
3981 process until the child exits (well, okay, not
3982 then either :-) or execs. */
3983 if (remove_status != 0)
3984 error (_("Cannot step over breakpoint hit in wrong thread"));
3985 else
3986 { /* Single step */
3987 if (!non_stop)
3988 {
3989 /* Only need to require the next event from this
3990 thread in all-stop mode. */
3991 waiton_ptid = ecs->ptid;
3992 infwait_state = infwait_thread_hop_state;
3993 }
3994
3995 ecs->event_thread->stepping_over_breakpoint = 1;
3996 keep_going (ecs);
3997 return;
3998 }
3999 }
4000 else if (singlestep_breakpoints_inserted_p)
4001 {
4002 ecs->random_signal = 0;
4003 }
4004 }
4005 else
4006 ecs->random_signal = 1;
4007
4008 /* See if something interesting happened to the non-current thread. If
4009 so, then switch to that thread. */
4010 if (!ptid_equal (ecs->ptid, inferior_ptid))
4011 {
4012 if (debug_infrun)
4013 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4014
4015 context_switch (ecs->ptid);
4016
4017 if (deprecated_context_hook)
4018 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4019 }
4020
4021 /* At this point, get hold of the now-current thread's frame. */
4022 frame = get_current_frame ();
4023 gdbarch = get_frame_arch (frame);
4024
4025 if (singlestep_breakpoints_inserted_p)
4026 {
4027 /* Pull the single step breakpoints out of the target. */
4028 remove_single_step_breakpoints ();
4029 singlestep_breakpoints_inserted_p = 0;
4030 }
4031
4032 if (stepped_after_stopped_by_watchpoint)
4033 stopped_by_watchpoint = 0;
4034 else
4035 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4036
4037 /* If necessary, step over this watchpoint. We'll be back to display
4038 it in a moment. */
4039 if (stopped_by_watchpoint
4040 && (target_have_steppable_watchpoint
4041 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4042 {
4043 /* At this point, we are stopped at an instruction which has
4044 attempted to write to a piece of memory under control of
4045 a watchpoint. The instruction hasn't actually executed
4046 yet. If we were to evaluate the watchpoint expression
4047 now, we would get the old value, and therefore no change
4048 would seem to have occurred.
4049
4050 In order to make watchpoints work `right', we really need
4051 to complete the memory write, and then evaluate the
4052 watchpoint expression. We do this by single-stepping the
4053 target.
4054
4055 It may not be necessary to disable the watchpoint to stop over
4056 it. For example, the PA can (with some kernel cooperation)
4057 single step over a watchpoint without disabling the watchpoint.
4058
4059 It is far more common to need to disable a watchpoint to step
4060 the inferior over it. If we have non-steppable watchpoints,
4061 we must disable the current watchpoint; it's simplest to
4062 disable all watchpoints and breakpoints. */
4063 int hw_step = 1;
4064
4065 if (!target_have_steppable_watchpoint)
4066 {
4067 remove_breakpoints ();
4068 /* See comment in resume why we need to stop bypassing signals
4069 while breakpoints have been removed. */
4070 target_pass_signals (0, NULL);
4071 }
4072 /* Single step */
4073 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4074 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
4075 waiton_ptid = ecs->ptid;
4076 if (target_have_steppable_watchpoint)
4077 infwait_state = infwait_step_watch_state;
4078 else
4079 infwait_state = infwait_nonstep_watch_state;
4080 prepare_to_wait (ecs);
4081 return;
4082 }
4083
4084 clear_stop_func (ecs);
4085 ecs->event_thread->stepping_over_breakpoint = 0;
4086 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4087 ecs->event_thread->control.stop_step = 0;
4088 stop_print_frame = 1;
4089 ecs->random_signal = 0;
4090 stopped_by_random_signal = 0;
4091
4092 /* Hide inlined functions starting here, unless we just performed stepi or
4093 nexti. After stepi and nexti, always show the innermost frame (not any
4094 inline function call sites). */
4095 if (ecs->event_thread->control.step_range_end != 1)
4096 {
4097 struct address_space *aspace =
4098 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4099
4100 /* skip_inline_frames is expensive, so we avoid it if we can
4101 determine that the address is one where functions cannot have
4102 been inlined. This improves performance with inferiors that
4103 load a lot of shared libraries, because the solib event
4104 breakpoint is defined as the address of a function (i.e. not
4105 inline). Note that we have to check the previous PC as well
4106 as the current one to catch cases when we have just
4107 single-stepped off a breakpoint prior to reinstating it.
4108 Note that we're assuming that the code we single-step to is
4109 not inline, but that's not definitive: there's nothing
4110 preventing the event breakpoint function from containing
4111 inlined code, and the single-step ending up there. If the
4112 user had set a breakpoint on that inlined code, the missing
4113 skip_inline_frames call would break things. Fortunately
4114 that's an extremely unlikely scenario. */
4115 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4116 && !(ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4117 && ecs->event_thread->control.trap_expected
4118 && pc_at_non_inline_function (aspace,
4119 ecs->event_thread->prev_pc,
4120 &ecs->ws)))
4121 skip_inline_frames (ecs->ptid);
4122 }
4123
4124 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4125 && ecs->event_thread->control.trap_expected
4126 && gdbarch_single_step_through_delay_p (gdbarch)
4127 && currently_stepping (ecs->event_thread))
4128 {
4129 /* We're trying to step off a breakpoint. Turns out that we're
4130 also on an instruction that needs to be stepped multiple
4131 times before it's been fully executing. E.g., architectures
4132 with a delay slot. It needs to be stepped twice, once for
4133 the instruction and once for the delay slot. */
4134 int step_through_delay
4135 = gdbarch_single_step_through_delay (gdbarch, frame);
4136
4137 if (debug_infrun && step_through_delay)
4138 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4139 if (ecs->event_thread->control.step_range_end == 0
4140 && step_through_delay)
4141 {
4142 /* The user issued a continue when stopped at a breakpoint.
4143 Set up for another trap and get out of here. */
4144 ecs->event_thread->stepping_over_breakpoint = 1;
4145 keep_going (ecs);
4146 return;
4147 }
4148 else if (step_through_delay)
4149 {
4150 /* The user issued a step when stopped at a breakpoint.
4151 Maybe we should stop, maybe we should not - the delay
4152 slot *might* correspond to a line of source. In any
4153 case, don't decide that here, just set
4154 ecs->stepping_over_breakpoint, making sure we
4155 single-step again before breakpoints are re-inserted. */
4156 ecs->event_thread->stepping_over_breakpoint = 1;
4157 }
4158 }
4159
4160 /* Look at the cause of the stop, and decide what to do.
4161 The alternatives are:
4162 1) stop_stepping and return; to really stop and return to the debugger,
4163 2) keep_going and return to start up again
4164 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4165 3) set ecs->random_signal to 1, and the decision between 1 and 2
4166 will be made according to the signal handling tables. */
4167
4168 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4169 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4170 || stop_soon == STOP_QUIETLY_REMOTE)
4171 {
4172 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4173 && stop_after_trap)
4174 {
4175 if (debug_infrun)
4176 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4177 stop_print_frame = 0;
4178 stop_stepping (ecs);
4179 return;
4180 }
4181
4182 /* This is originated from start_remote(), start_inferior() and
4183 shared libraries hook functions. */
4184 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4185 {
4186 if (debug_infrun)
4187 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4188 stop_stepping (ecs);
4189 return;
4190 }
4191
4192 /* This originates from attach_command(). We need to overwrite
4193 the stop_signal here, because some kernels don't ignore a
4194 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4195 See more comments in inferior.h. On the other hand, if we
4196 get a non-SIGSTOP, report it to the user - assume the backend
4197 will handle the SIGSTOP if it should show up later.
4198
4199 Also consider that the attach is complete when we see a
4200 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4201 target extended-remote report it instead of a SIGSTOP
4202 (e.g. gdbserver). We already rely on SIGTRAP being our
4203 signal, so this is no exception.
4204
4205 Also consider that the attach is complete when we see a
4206 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4207 the target to stop all threads of the inferior, in case the
4208 low level attach operation doesn't stop them implicitly. If
4209 they weren't stopped implicitly, then the stub will report a
4210 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4211 other than GDB's request. */
4212 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4213 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4214 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4215 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4216 {
4217 stop_stepping (ecs);
4218 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4219 return;
4220 }
4221
4222 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4223 handles this event. */
4224 ecs->event_thread->control.stop_bpstat
4225 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4226 stop_pc, ecs->ptid, &ecs->ws);
4227
4228 /* Following in case break condition called a
4229 function. */
4230 stop_print_frame = 1;
4231
4232 /* This is where we handle "moribund" watchpoints. Unlike
4233 software breakpoints traps, hardware watchpoint traps are
4234 always distinguishable from random traps. If no high-level
4235 watchpoint is associated with the reported stop data address
4236 anymore, then the bpstat does not explain the signal ---
4237 simply make sure to ignore it if `stopped_by_watchpoint' is
4238 set. */
4239
4240 if (debug_infrun
4241 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4242 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4243 && stopped_by_watchpoint)
4244 fprintf_unfiltered (gdb_stdlog,
4245 "infrun: no user watchpoint explains "
4246 "watchpoint SIGTRAP, ignoring\n");
4247
4248 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4249 at one stage in the past included checks for an inferior
4250 function call's call dummy's return breakpoint. The original
4251 comment, that went with the test, read:
4252
4253 ``End of a stack dummy. Some systems (e.g. Sony news) give
4254 another signal besides SIGTRAP, so check here as well as
4255 above.''
4256
4257 If someone ever tries to get call dummys on a
4258 non-executable stack to work (where the target would stop
4259 with something like a SIGSEGV), then those tests might need
4260 to be re-instated. Given, however, that the tests were only
4261 enabled when momentary breakpoints were not being used, I
4262 suspect that it won't be the case.
4263
4264 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4265 be necessary for call dummies on a non-executable stack on
4266 SPARC. */
4267
4268 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4269 ecs->random_signal
4270 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4271 || stopped_by_watchpoint
4272 || ecs->event_thread->control.trap_expected
4273 || (ecs->event_thread->control.step_range_end
4274 && (ecs->event_thread->control.step_resume_breakpoint
4275 == NULL)));
4276 else
4277 {
4278 ecs->random_signal = !bpstat_explains_signal
4279 (ecs->event_thread->control.stop_bpstat);
4280 if (!ecs->random_signal)
4281 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4282 }
4283 }
4284
4285 /* When we reach this point, we've pretty much decided
4286 that the reason for stopping must've been a random
4287 (unexpected) signal. */
4288
4289 else
4290 ecs->random_signal = 1;
4291
4292 process_event_stop_test:
4293
4294 /* Re-fetch current thread's frame in case we did a
4295 "goto process_event_stop_test" above. */
4296 frame = get_current_frame ();
4297 gdbarch = get_frame_arch (frame);
4298
4299 /* For the program's own signals, act according to
4300 the signal handling tables. */
4301
4302 if (ecs->random_signal)
4303 {
4304 /* Signal not for debugging purposes. */
4305 int printed = 0;
4306 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4307
4308 if (debug_infrun)
4309 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4310 ecs->event_thread->suspend.stop_signal);
4311
4312 stopped_by_random_signal = 1;
4313
4314 if (signal_print[ecs->event_thread->suspend.stop_signal])
4315 {
4316 printed = 1;
4317 target_terminal_ours_for_output ();
4318 print_signal_received_reason
4319 (ecs->event_thread->suspend.stop_signal);
4320 }
4321 /* Always stop on signals if we're either just gaining control
4322 of the program, or the user explicitly requested this thread
4323 to remain stopped. */
4324 if (stop_soon != NO_STOP_QUIETLY
4325 || ecs->event_thread->stop_requested
4326 || (!inf->detaching
4327 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4328 {
4329 stop_stepping (ecs);
4330 return;
4331 }
4332 /* If not going to stop, give terminal back
4333 if we took it away. */
4334 else if (printed)
4335 target_terminal_inferior ();
4336
4337 /* Clear the signal if it should not be passed. */
4338 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4339 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4340
4341 if (ecs->event_thread->prev_pc == stop_pc
4342 && ecs->event_thread->control.trap_expected
4343 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4344 {
4345 /* We were just starting a new sequence, attempting to
4346 single-step off of a breakpoint and expecting a SIGTRAP.
4347 Instead this signal arrives. This signal will take us out
4348 of the stepping range so GDB needs to remember to, when
4349 the signal handler returns, resume stepping off that
4350 breakpoint. */
4351 /* To simplify things, "continue" is forced to use the same
4352 code paths as single-step - set a breakpoint at the
4353 signal return address and then, once hit, step off that
4354 breakpoint. */
4355 if (debug_infrun)
4356 fprintf_unfiltered (gdb_stdlog,
4357 "infrun: signal arrived while stepping over "
4358 "breakpoint\n");
4359
4360 insert_hp_step_resume_breakpoint_at_frame (frame);
4361 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4362 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4363 ecs->event_thread->control.trap_expected = 0;
4364 keep_going (ecs);
4365 return;
4366 }
4367
4368 if (ecs->event_thread->control.step_range_end != 0
4369 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4370 && (ecs->event_thread->control.step_range_start <= stop_pc
4371 && stop_pc < ecs->event_thread->control.step_range_end)
4372 && frame_id_eq (get_stack_frame_id (frame),
4373 ecs->event_thread->control.step_stack_frame_id)
4374 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4375 {
4376 /* The inferior is about to take a signal that will take it
4377 out of the single step range. Set a breakpoint at the
4378 current PC (which is presumably where the signal handler
4379 will eventually return) and then allow the inferior to
4380 run free.
4381
4382 Note that this is only needed for a signal delivered
4383 while in the single-step range. Nested signals aren't a
4384 problem as they eventually all return. */
4385 if (debug_infrun)
4386 fprintf_unfiltered (gdb_stdlog,
4387 "infrun: signal may take us out of "
4388 "single-step range\n");
4389
4390 insert_hp_step_resume_breakpoint_at_frame (frame);
4391 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4392 ecs->event_thread->control.trap_expected = 0;
4393 keep_going (ecs);
4394 return;
4395 }
4396
4397 /* Note: step_resume_breakpoint may be non-NULL. This occures
4398 when either there's a nested signal, or when there's a
4399 pending signal enabled just as the signal handler returns
4400 (leaving the inferior at the step-resume-breakpoint without
4401 actually executing it). Either way continue until the
4402 breakpoint is really hit. */
4403 keep_going (ecs);
4404 return;
4405 }
4406
4407 /* Handle cases caused by hitting a breakpoint. */
4408 {
4409 CORE_ADDR jmp_buf_pc;
4410 struct bpstat_what what;
4411
4412 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4413
4414 if (what.call_dummy)
4415 {
4416 stop_stack_dummy = what.call_dummy;
4417 }
4418
4419 /* If we hit an internal event that triggers symbol changes, the
4420 current frame will be invalidated within bpstat_what (e.g., if
4421 we hit an internal solib event). Re-fetch it. */
4422 frame = get_current_frame ();
4423 gdbarch = get_frame_arch (frame);
4424
4425 switch (what.main_action)
4426 {
4427 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4428 /* If we hit the breakpoint at longjmp while stepping, we
4429 install a momentary breakpoint at the target of the
4430 jmp_buf. */
4431
4432 if (debug_infrun)
4433 fprintf_unfiltered (gdb_stdlog,
4434 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4435
4436 ecs->event_thread->stepping_over_breakpoint = 1;
4437
4438 if (what.is_longjmp)
4439 {
4440 if (!gdbarch_get_longjmp_target_p (gdbarch)
4441 || !gdbarch_get_longjmp_target (gdbarch,
4442 frame, &jmp_buf_pc))
4443 {
4444 if (debug_infrun)
4445 fprintf_unfiltered (gdb_stdlog,
4446 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4447 "(!gdbarch_get_longjmp_target)\n");
4448 keep_going (ecs);
4449 return;
4450 }
4451
4452 /* We're going to replace the current step-resume breakpoint
4453 with a longjmp-resume breakpoint. */
4454 delete_step_resume_breakpoint (ecs->event_thread);
4455
4456 /* Insert a breakpoint at resume address. */
4457 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4458 }
4459 else
4460 {
4461 struct symbol *func = get_frame_function (frame);
4462
4463 if (func)
4464 check_exception_resume (ecs, frame, func);
4465 }
4466 keep_going (ecs);
4467 return;
4468
4469 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4470 if (debug_infrun)
4471 fprintf_unfiltered (gdb_stdlog,
4472 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4473
4474 if (what.is_longjmp)
4475 {
4476 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4477 != NULL);
4478 delete_step_resume_breakpoint (ecs->event_thread);
4479 }
4480 else
4481 {
4482 /* There are several cases to consider.
4483
4484 1. The initiating frame no longer exists. In this case
4485 we must stop, because the exception has gone too far.
4486
4487 2. The initiating frame exists, and is the same as the
4488 current frame. We stop, because the exception has been
4489 caught.
4490
4491 3. The initiating frame exists and is different from
4492 the current frame. This means the exception has been
4493 caught beneath the initiating frame, so keep going. */
4494 struct frame_info *init_frame
4495 = frame_find_by_id (ecs->event_thread->initiating_frame);
4496
4497 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4498 != NULL);
4499 delete_exception_resume_breakpoint (ecs->event_thread);
4500
4501 if (init_frame)
4502 {
4503 struct frame_id current_id
4504 = get_frame_id (get_current_frame ());
4505 if (frame_id_eq (current_id,
4506 ecs->event_thread->initiating_frame))
4507 {
4508 /* Case 2. Fall through. */
4509 }
4510 else
4511 {
4512 /* Case 3. */
4513 keep_going (ecs);
4514 return;
4515 }
4516 }
4517
4518 /* For Cases 1 and 2, remove the step-resume breakpoint,
4519 if it exists. */
4520 delete_step_resume_breakpoint (ecs->event_thread);
4521 }
4522
4523 ecs->event_thread->control.stop_step = 1;
4524 print_end_stepping_range_reason ();
4525 stop_stepping (ecs);
4526 return;
4527
4528 case BPSTAT_WHAT_SINGLE:
4529 if (debug_infrun)
4530 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4531 ecs->event_thread->stepping_over_breakpoint = 1;
4532 /* Still need to check other stuff, at least the case
4533 where we are stepping and step out of the right range. */
4534 break;
4535
4536 case BPSTAT_WHAT_STEP_RESUME:
4537 if (debug_infrun)
4538 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4539
4540 delete_step_resume_breakpoint (ecs->event_thread);
4541 if (ecs->event_thread->control.proceed_to_finish
4542 && execution_direction == EXEC_REVERSE)
4543 {
4544 struct thread_info *tp = ecs->event_thread;
4545
4546 /* We are finishing a function in reverse, and just hit
4547 the step-resume breakpoint at the start address of the
4548 function, and we're almost there -- just need to back
4549 up by one more single-step, which should take us back
4550 to the function call. */
4551 tp->control.step_range_start = tp->control.step_range_end = 1;
4552 keep_going (ecs);
4553 return;
4554 }
4555 fill_in_stop_func (gdbarch, ecs);
4556 if (stop_pc == ecs->stop_func_start
4557 && execution_direction == EXEC_REVERSE)
4558 {
4559 /* We are stepping over a function call in reverse, and
4560 just hit the step-resume breakpoint at the start
4561 address of the function. Go back to single-stepping,
4562 which should take us back to the function call. */
4563 ecs->event_thread->stepping_over_breakpoint = 1;
4564 keep_going (ecs);
4565 return;
4566 }
4567 break;
4568
4569 case BPSTAT_WHAT_STOP_NOISY:
4570 if (debug_infrun)
4571 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4572 stop_print_frame = 1;
4573
4574 /* We are about to nuke the step_resume_breakpointt via the
4575 cleanup chain, so no need to worry about it here. */
4576
4577 stop_stepping (ecs);
4578 return;
4579
4580 case BPSTAT_WHAT_STOP_SILENT:
4581 if (debug_infrun)
4582 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4583 stop_print_frame = 0;
4584
4585 /* We are about to nuke the step_resume_breakpoin via the
4586 cleanup chain, so no need to worry about it here. */
4587
4588 stop_stepping (ecs);
4589 return;
4590
4591 case BPSTAT_WHAT_HP_STEP_RESUME:
4592 if (debug_infrun)
4593 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4594
4595 delete_step_resume_breakpoint (ecs->event_thread);
4596 if (ecs->event_thread->step_after_step_resume_breakpoint)
4597 {
4598 /* Back when the step-resume breakpoint was inserted, we
4599 were trying to single-step off a breakpoint. Go back
4600 to doing that. */
4601 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4602 ecs->event_thread->stepping_over_breakpoint = 1;
4603 keep_going (ecs);
4604 return;
4605 }
4606 break;
4607
4608 case BPSTAT_WHAT_KEEP_CHECKING:
4609 break;
4610 }
4611 }
4612
4613 /* We come here if we hit a breakpoint but should not
4614 stop for it. Possibly we also were stepping
4615 and should stop for that. So fall through and
4616 test for stepping. But, if not stepping,
4617 do not stop. */
4618
4619 /* In all-stop mode, if we're currently stepping but have stopped in
4620 some other thread, we need to switch back to the stepped thread. */
4621 if (!non_stop)
4622 {
4623 struct thread_info *tp;
4624
4625 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4626 ecs->event_thread);
4627 if (tp)
4628 {
4629 /* However, if the current thread is blocked on some internal
4630 breakpoint, and we simply need to step over that breakpoint
4631 to get it going again, do that first. */
4632 if ((ecs->event_thread->control.trap_expected
4633 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4634 || ecs->event_thread->stepping_over_breakpoint)
4635 {
4636 keep_going (ecs);
4637 return;
4638 }
4639
4640 /* If the stepping thread exited, then don't try to switch
4641 back and resume it, which could fail in several different
4642 ways depending on the target. Instead, just keep going.
4643
4644 We can find a stepping dead thread in the thread list in
4645 two cases:
4646
4647 - The target supports thread exit events, and when the
4648 target tries to delete the thread from the thread list,
4649 inferior_ptid pointed at the exiting thread. In such
4650 case, calling delete_thread does not really remove the
4651 thread from the list; instead, the thread is left listed,
4652 with 'exited' state.
4653
4654 - The target's debug interface does not support thread
4655 exit events, and so we have no idea whatsoever if the
4656 previously stepping thread is still alive. For that
4657 reason, we need to synchronously query the target
4658 now. */
4659 if (is_exited (tp->ptid)
4660 || !target_thread_alive (tp->ptid))
4661 {
4662 if (debug_infrun)
4663 fprintf_unfiltered (gdb_stdlog,
4664 "infrun: not switching back to "
4665 "stepped thread, it has vanished\n");
4666
4667 delete_thread (tp->ptid);
4668 keep_going (ecs);
4669 return;
4670 }
4671
4672 /* Otherwise, we no longer expect a trap in the current thread.
4673 Clear the trap_expected flag before switching back -- this is
4674 what keep_going would do as well, if we called it. */
4675 ecs->event_thread->control.trap_expected = 0;
4676
4677 if (debug_infrun)
4678 fprintf_unfiltered (gdb_stdlog,
4679 "infrun: switching back to stepped thread\n");
4680
4681 ecs->event_thread = tp;
4682 ecs->ptid = tp->ptid;
4683 context_switch (ecs->ptid);
4684 keep_going (ecs);
4685 return;
4686 }
4687 }
4688
4689 if (ecs->event_thread->control.step_resume_breakpoint)
4690 {
4691 if (debug_infrun)
4692 fprintf_unfiltered (gdb_stdlog,
4693 "infrun: step-resume breakpoint is inserted\n");
4694
4695 /* Having a step-resume breakpoint overrides anything
4696 else having to do with stepping commands until
4697 that breakpoint is reached. */
4698 keep_going (ecs);
4699 return;
4700 }
4701
4702 if (ecs->event_thread->control.step_range_end == 0)
4703 {
4704 if (debug_infrun)
4705 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4706 /* Likewise if we aren't even stepping. */
4707 keep_going (ecs);
4708 return;
4709 }
4710
4711 /* Re-fetch current thread's frame in case the code above caused
4712 the frame cache to be re-initialized, making our FRAME variable
4713 a dangling pointer. */
4714 frame = get_current_frame ();
4715 gdbarch = get_frame_arch (frame);
4716 fill_in_stop_func (gdbarch, ecs);
4717
4718 /* If stepping through a line, keep going if still within it.
4719
4720 Note that step_range_end is the address of the first instruction
4721 beyond the step range, and NOT the address of the last instruction
4722 within it!
4723
4724 Note also that during reverse execution, we may be stepping
4725 through a function epilogue and therefore must detect when
4726 the current-frame changes in the middle of a line. */
4727
4728 if (stop_pc >= ecs->event_thread->control.step_range_start
4729 && stop_pc < ecs->event_thread->control.step_range_end
4730 && (execution_direction != EXEC_REVERSE
4731 || frame_id_eq (get_frame_id (frame),
4732 ecs->event_thread->control.step_frame_id)))
4733 {
4734 if (debug_infrun)
4735 fprintf_unfiltered
4736 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4737 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4738 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4739
4740 /* When stepping backward, stop at beginning of line range
4741 (unless it's the function entry point, in which case
4742 keep going back to the call point). */
4743 if (stop_pc == ecs->event_thread->control.step_range_start
4744 && stop_pc != ecs->stop_func_start
4745 && execution_direction == EXEC_REVERSE)
4746 {
4747 ecs->event_thread->control.stop_step = 1;
4748 print_end_stepping_range_reason ();
4749 stop_stepping (ecs);
4750 }
4751 else
4752 keep_going (ecs);
4753
4754 return;
4755 }
4756
4757 /* We stepped out of the stepping range. */
4758
4759 /* If we are stepping at the source level and entered the runtime
4760 loader dynamic symbol resolution code...
4761
4762 EXEC_FORWARD: we keep on single stepping until we exit the run
4763 time loader code and reach the callee's address.
4764
4765 EXEC_REVERSE: we've already executed the callee (backward), and
4766 the runtime loader code is handled just like any other
4767 undebuggable function call. Now we need only keep stepping
4768 backward through the trampoline code, and that's handled further
4769 down, so there is nothing for us to do here. */
4770
4771 if (execution_direction != EXEC_REVERSE
4772 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4773 && in_solib_dynsym_resolve_code (stop_pc))
4774 {
4775 CORE_ADDR pc_after_resolver =
4776 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4777
4778 if (debug_infrun)
4779 fprintf_unfiltered (gdb_stdlog,
4780 "infrun: stepped into dynsym resolve code\n");
4781
4782 if (pc_after_resolver)
4783 {
4784 /* Set up a step-resume breakpoint at the address
4785 indicated by SKIP_SOLIB_RESOLVER. */
4786 struct symtab_and_line sr_sal;
4787
4788 init_sal (&sr_sal);
4789 sr_sal.pc = pc_after_resolver;
4790 sr_sal.pspace = get_frame_program_space (frame);
4791
4792 insert_step_resume_breakpoint_at_sal (gdbarch,
4793 sr_sal, null_frame_id);
4794 }
4795
4796 keep_going (ecs);
4797 return;
4798 }
4799
4800 if (ecs->event_thread->control.step_range_end != 1
4801 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4802 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4803 && get_frame_type (frame) == SIGTRAMP_FRAME)
4804 {
4805 if (debug_infrun)
4806 fprintf_unfiltered (gdb_stdlog,
4807 "infrun: stepped into signal trampoline\n");
4808 /* The inferior, while doing a "step" or "next", has ended up in
4809 a signal trampoline (either by a signal being delivered or by
4810 the signal handler returning). Just single-step until the
4811 inferior leaves the trampoline (either by calling the handler
4812 or returning). */
4813 keep_going (ecs);
4814 return;
4815 }
4816
4817 /* If we're in the return path from a shared library trampoline,
4818 we want to proceed through the trampoline when stepping. */
4819 /* macro/2012-04-25: This needs to come before the subroutine
4820 call check below as on some targets return trampolines look
4821 like subroutine calls (MIPS16 return thunks). */
4822 if (gdbarch_in_solib_return_trampoline (gdbarch,
4823 stop_pc, ecs->stop_func_name)
4824 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4825 {
4826 /* Determine where this trampoline returns. */
4827 CORE_ADDR real_stop_pc;
4828
4829 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4830
4831 if (debug_infrun)
4832 fprintf_unfiltered (gdb_stdlog,
4833 "infrun: stepped into solib return tramp\n");
4834
4835 /* Only proceed through if we know where it's going. */
4836 if (real_stop_pc)
4837 {
4838 /* And put the step-breakpoint there and go until there. */
4839 struct symtab_and_line sr_sal;
4840
4841 init_sal (&sr_sal); /* initialize to zeroes */
4842 sr_sal.pc = real_stop_pc;
4843 sr_sal.section = find_pc_overlay (sr_sal.pc);
4844 sr_sal.pspace = get_frame_program_space (frame);
4845
4846 /* Do not specify what the fp should be when we stop since
4847 on some machines the prologue is where the new fp value
4848 is established. */
4849 insert_step_resume_breakpoint_at_sal (gdbarch,
4850 sr_sal, null_frame_id);
4851
4852 /* Restart without fiddling with the step ranges or
4853 other state. */
4854 keep_going (ecs);
4855 return;
4856 }
4857 }
4858
4859 /* Check for subroutine calls. The check for the current frame
4860 equalling the step ID is not necessary - the check of the
4861 previous frame's ID is sufficient - but it is a common case and
4862 cheaper than checking the previous frame's ID.
4863
4864 NOTE: frame_id_eq will never report two invalid frame IDs as
4865 being equal, so to get into this block, both the current and
4866 previous frame must have valid frame IDs. */
4867 /* The outer_frame_id check is a heuristic to detect stepping
4868 through startup code. If we step over an instruction which
4869 sets the stack pointer from an invalid value to a valid value,
4870 we may detect that as a subroutine call from the mythical
4871 "outermost" function. This could be fixed by marking
4872 outermost frames as !stack_p,code_p,special_p. Then the
4873 initial outermost frame, before sp was valid, would
4874 have code_addr == &_start. See the comment in frame_id_eq
4875 for more. */
4876 if (!frame_id_eq (get_stack_frame_id (frame),
4877 ecs->event_thread->control.step_stack_frame_id)
4878 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4879 ecs->event_thread->control.step_stack_frame_id)
4880 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4881 outer_frame_id)
4882 || step_start_function != find_pc_function (stop_pc))))
4883 {
4884 CORE_ADDR real_stop_pc;
4885
4886 if (debug_infrun)
4887 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4888
4889 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4890 || ((ecs->event_thread->control.step_range_end == 1)
4891 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4892 ecs->stop_func_start)))
4893 {
4894 /* I presume that step_over_calls is only 0 when we're
4895 supposed to be stepping at the assembly language level
4896 ("stepi"). Just stop. */
4897 /* Also, maybe we just did a "nexti" inside a prolog, so we
4898 thought it was a subroutine call but it was not. Stop as
4899 well. FENN */
4900 /* And this works the same backward as frontward. MVS */
4901 ecs->event_thread->control.stop_step = 1;
4902 print_end_stepping_range_reason ();
4903 stop_stepping (ecs);
4904 return;
4905 }
4906
4907 /* Reverse stepping through solib trampolines. */
4908
4909 if (execution_direction == EXEC_REVERSE
4910 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4911 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4912 || (ecs->stop_func_start == 0
4913 && in_solib_dynsym_resolve_code (stop_pc))))
4914 {
4915 /* Any solib trampoline code can be handled in reverse
4916 by simply continuing to single-step. We have already
4917 executed the solib function (backwards), and a few
4918 steps will take us back through the trampoline to the
4919 caller. */
4920 keep_going (ecs);
4921 return;
4922 }
4923
4924 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4925 {
4926 /* We're doing a "next".
4927
4928 Normal (forward) execution: set a breakpoint at the
4929 callee's return address (the address at which the caller
4930 will resume).
4931
4932 Reverse (backward) execution. set the step-resume
4933 breakpoint at the start of the function that we just
4934 stepped into (backwards), and continue to there. When we
4935 get there, we'll need to single-step back to the caller. */
4936
4937 if (execution_direction == EXEC_REVERSE)
4938 {
4939 struct symtab_and_line sr_sal;
4940
4941 /* Normal function call return (static or dynamic). */
4942 init_sal (&sr_sal);
4943 sr_sal.pc = ecs->stop_func_start;
4944 sr_sal.pspace = get_frame_program_space (frame);
4945 insert_step_resume_breakpoint_at_sal (gdbarch,
4946 sr_sal, null_frame_id);
4947 }
4948 else
4949 insert_step_resume_breakpoint_at_caller (frame);
4950
4951 keep_going (ecs);
4952 return;
4953 }
4954
4955 /* If we are in a function call trampoline (a stub between the
4956 calling routine and the real function), locate the real
4957 function. That's what tells us (a) whether we want to step
4958 into it at all, and (b) what prologue we want to run to the
4959 end of, if we do step into it. */
4960 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4961 if (real_stop_pc == 0)
4962 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4963 if (real_stop_pc != 0)
4964 ecs->stop_func_start = real_stop_pc;
4965
4966 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4967 {
4968 struct symtab_and_line sr_sal;
4969
4970 init_sal (&sr_sal);
4971 sr_sal.pc = ecs->stop_func_start;
4972 sr_sal.pspace = get_frame_program_space (frame);
4973
4974 insert_step_resume_breakpoint_at_sal (gdbarch,
4975 sr_sal, null_frame_id);
4976 keep_going (ecs);
4977 return;
4978 }
4979
4980 /* If we have line number information for the function we are
4981 thinking of stepping into and the function isn't on the skip
4982 list, step into it.
4983
4984 If there are several symtabs at that PC (e.g. with include
4985 files), just want to know whether *any* of them have line
4986 numbers. find_pc_line handles this. */
4987 {
4988 struct symtab_and_line tmp_sal;
4989
4990 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4991 if (tmp_sal.line != 0
4992 && !function_pc_is_marked_for_skip (ecs->stop_func_start))
4993 {
4994 if (execution_direction == EXEC_REVERSE)
4995 handle_step_into_function_backward (gdbarch, ecs);
4996 else
4997 handle_step_into_function (gdbarch, ecs);
4998 return;
4999 }
5000 }
5001
5002 /* If we have no line number and the step-stop-if-no-debug is
5003 set, we stop the step so that the user has a chance to switch
5004 in assembly mode. */
5005 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5006 && step_stop_if_no_debug)
5007 {
5008 ecs->event_thread->control.stop_step = 1;
5009 print_end_stepping_range_reason ();
5010 stop_stepping (ecs);
5011 return;
5012 }
5013
5014 if (execution_direction == EXEC_REVERSE)
5015 {
5016 /* Set a breakpoint at callee's start address.
5017 From there we can step once and be back in the caller. */
5018 struct symtab_and_line sr_sal;
5019
5020 init_sal (&sr_sal);
5021 sr_sal.pc = ecs->stop_func_start;
5022 sr_sal.pspace = get_frame_program_space (frame);
5023 insert_step_resume_breakpoint_at_sal (gdbarch,
5024 sr_sal, null_frame_id);
5025 }
5026 else
5027 /* Set a breakpoint at callee's return address (the address
5028 at which the caller will resume). */
5029 insert_step_resume_breakpoint_at_caller (frame);
5030
5031 keep_going (ecs);
5032 return;
5033 }
5034
5035 /* Reverse stepping through solib trampolines. */
5036
5037 if (execution_direction == EXEC_REVERSE
5038 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5039 {
5040 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5041 || (ecs->stop_func_start == 0
5042 && in_solib_dynsym_resolve_code (stop_pc)))
5043 {
5044 /* Any solib trampoline code can be handled in reverse
5045 by simply continuing to single-step. We have already
5046 executed the solib function (backwards), and a few
5047 steps will take us back through the trampoline to the
5048 caller. */
5049 keep_going (ecs);
5050 return;
5051 }
5052 else if (in_solib_dynsym_resolve_code (stop_pc))
5053 {
5054 /* Stepped backward into the solib dynsym resolver.
5055 Set a breakpoint at its start and continue, then
5056 one more step will take us out. */
5057 struct symtab_and_line sr_sal;
5058
5059 init_sal (&sr_sal);
5060 sr_sal.pc = ecs->stop_func_start;
5061 sr_sal.pspace = get_frame_program_space (frame);
5062 insert_step_resume_breakpoint_at_sal (gdbarch,
5063 sr_sal, null_frame_id);
5064 keep_going (ecs);
5065 return;
5066 }
5067 }
5068
5069 stop_pc_sal = find_pc_line (stop_pc, 0);
5070
5071 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5072 the trampoline processing logic, however, there are some trampolines
5073 that have no names, so we should do trampoline handling first. */
5074 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5075 && ecs->stop_func_name == NULL
5076 && stop_pc_sal.line == 0)
5077 {
5078 if (debug_infrun)
5079 fprintf_unfiltered (gdb_stdlog,
5080 "infrun: stepped into undebuggable function\n");
5081
5082 /* The inferior just stepped into, or returned to, an
5083 undebuggable function (where there is no debugging information
5084 and no line number corresponding to the address where the
5085 inferior stopped). Since we want to skip this kind of code,
5086 we keep going until the inferior returns from this
5087 function - unless the user has asked us not to (via
5088 set step-mode) or we no longer know how to get back
5089 to the call site. */
5090 if (step_stop_if_no_debug
5091 || !frame_id_p (frame_unwind_caller_id (frame)))
5092 {
5093 /* If we have no line number and the step-stop-if-no-debug
5094 is set, we stop the step so that the user has a chance to
5095 switch in assembly mode. */
5096 ecs->event_thread->control.stop_step = 1;
5097 print_end_stepping_range_reason ();
5098 stop_stepping (ecs);
5099 return;
5100 }
5101 else
5102 {
5103 /* Set a breakpoint at callee's return address (the address
5104 at which the caller will resume). */
5105 insert_step_resume_breakpoint_at_caller (frame);
5106 keep_going (ecs);
5107 return;
5108 }
5109 }
5110
5111 if (ecs->event_thread->control.step_range_end == 1)
5112 {
5113 /* It is stepi or nexti. We always want to stop stepping after
5114 one instruction. */
5115 if (debug_infrun)
5116 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5117 ecs->event_thread->control.stop_step = 1;
5118 print_end_stepping_range_reason ();
5119 stop_stepping (ecs);
5120 return;
5121 }
5122
5123 if (stop_pc_sal.line == 0)
5124 {
5125 /* We have no line number information. That means to stop
5126 stepping (does this always happen right after one instruction,
5127 when we do "s" in a function with no line numbers,
5128 or can this happen as a result of a return or longjmp?). */
5129 if (debug_infrun)
5130 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5131 ecs->event_thread->control.stop_step = 1;
5132 print_end_stepping_range_reason ();
5133 stop_stepping (ecs);
5134 return;
5135 }
5136
5137 /* Look for "calls" to inlined functions, part one. If the inline
5138 frame machinery detected some skipped call sites, we have entered
5139 a new inline function. */
5140
5141 if (frame_id_eq (get_frame_id (get_current_frame ()),
5142 ecs->event_thread->control.step_frame_id)
5143 && inline_skipped_frames (ecs->ptid))
5144 {
5145 struct symtab_and_line call_sal;
5146
5147 if (debug_infrun)
5148 fprintf_unfiltered (gdb_stdlog,
5149 "infrun: stepped into inlined function\n");
5150
5151 find_frame_sal (get_current_frame (), &call_sal);
5152
5153 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5154 {
5155 /* For "step", we're going to stop. But if the call site
5156 for this inlined function is on the same source line as
5157 we were previously stepping, go down into the function
5158 first. Otherwise stop at the call site. */
5159
5160 if (call_sal.line == ecs->event_thread->current_line
5161 && call_sal.symtab == ecs->event_thread->current_symtab)
5162 step_into_inline_frame (ecs->ptid);
5163
5164 ecs->event_thread->control.stop_step = 1;
5165 print_end_stepping_range_reason ();
5166 stop_stepping (ecs);
5167 return;
5168 }
5169 else
5170 {
5171 /* For "next", we should stop at the call site if it is on a
5172 different source line. Otherwise continue through the
5173 inlined function. */
5174 if (call_sal.line == ecs->event_thread->current_line
5175 && call_sal.symtab == ecs->event_thread->current_symtab)
5176 keep_going (ecs);
5177 else
5178 {
5179 ecs->event_thread->control.stop_step = 1;
5180 print_end_stepping_range_reason ();
5181 stop_stepping (ecs);
5182 }
5183 return;
5184 }
5185 }
5186
5187 /* Look for "calls" to inlined functions, part two. If we are still
5188 in the same real function we were stepping through, but we have
5189 to go further up to find the exact frame ID, we are stepping
5190 through a more inlined call beyond its call site. */
5191
5192 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5193 && !frame_id_eq (get_frame_id (get_current_frame ()),
5194 ecs->event_thread->control.step_frame_id)
5195 && stepped_in_from (get_current_frame (),
5196 ecs->event_thread->control.step_frame_id))
5197 {
5198 if (debug_infrun)
5199 fprintf_unfiltered (gdb_stdlog,
5200 "infrun: stepping through inlined function\n");
5201
5202 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5203 keep_going (ecs);
5204 else
5205 {
5206 ecs->event_thread->control.stop_step = 1;
5207 print_end_stepping_range_reason ();
5208 stop_stepping (ecs);
5209 }
5210 return;
5211 }
5212
5213 if ((stop_pc == stop_pc_sal.pc)
5214 && (ecs->event_thread->current_line != stop_pc_sal.line
5215 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5216 {
5217 /* We are at the start of a different line. So stop. Note that
5218 we don't stop if we step into the middle of a different line.
5219 That is said to make things like for (;;) statements work
5220 better. */
5221 if (debug_infrun)
5222 fprintf_unfiltered (gdb_stdlog,
5223 "infrun: stepped to a different line\n");
5224 ecs->event_thread->control.stop_step = 1;
5225 print_end_stepping_range_reason ();
5226 stop_stepping (ecs);
5227 return;
5228 }
5229
5230 /* We aren't done stepping.
5231
5232 Optimize by setting the stepping range to the line.
5233 (We might not be in the original line, but if we entered a
5234 new line in mid-statement, we continue stepping. This makes
5235 things like for(;;) statements work better.) */
5236
5237 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5238 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5239 set_step_info (frame, stop_pc_sal);
5240
5241 if (debug_infrun)
5242 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5243 keep_going (ecs);
5244 }
5245
5246 /* Is thread TP in the middle of single-stepping? */
5247
5248 static int
5249 currently_stepping (struct thread_info *tp)
5250 {
5251 return ((tp->control.step_range_end
5252 && tp->control.step_resume_breakpoint == NULL)
5253 || tp->control.trap_expected
5254 || bpstat_should_step ());
5255 }
5256
5257 /* Returns true if any thread *but* the one passed in "data" is in the
5258 middle of stepping or of handling a "next". */
5259
5260 static int
5261 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5262 {
5263 if (tp == data)
5264 return 0;
5265
5266 return (tp->control.step_range_end
5267 || tp->control.trap_expected);
5268 }
5269
5270 /* Inferior has stepped into a subroutine call with source code that
5271 we should not step over. Do step to the first line of code in
5272 it. */
5273
5274 static void
5275 handle_step_into_function (struct gdbarch *gdbarch,
5276 struct execution_control_state *ecs)
5277 {
5278 struct symtab *s;
5279 struct symtab_and_line stop_func_sal, sr_sal;
5280
5281 fill_in_stop_func (gdbarch, ecs);
5282
5283 s = find_pc_symtab (stop_pc);
5284 if (s && s->language != language_asm)
5285 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5286 ecs->stop_func_start);
5287
5288 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5289 /* Use the step_resume_break to step until the end of the prologue,
5290 even if that involves jumps (as it seems to on the vax under
5291 4.2). */
5292 /* If the prologue ends in the middle of a source line, continue to
5293 the end of that source line (if it is still within the function).
5294 Otherwise, just go to end of prologue. */
5295 if (stop_func_sal.end
5296 && stop_func_sal.pc != ecs->stop_func_start
5297 && stop_func_sal.end < ecs->stop_func_end)
5298 ecs->stop_func_start = stop_func_sal.end;
5299
5300 /* Architectures which require breakpoint adjustment might not be able
5301 to place a breakpoint at the computed address. If so, the test
5302 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5303 ecs->stop_func_start to an address at which a breakpoint may be
5304 legitimately placed.
5305
5306 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5307 made, GDB will enter an infinite loop when stepping through
5308 optimized code consisting of VLIW instructions which contain
5309 subinstructions corresponding to different source lines. On
5310 FR-V, it's not permitted to place a breakpoint on any but the
5311 first subinstruction of a VLIW instruction. When a breakpoint is
5312 set, GDB will adjust the breakpoint address to the beginning of
5313 the VLIW instruction. Thus, we need to make the corresponding
5314 adjustment here when computing the stop address. */
5315
5316 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5317 {
5318 ecs->stop_func_start
5319 = gdbarch_adjust_breakpoint_address (gdbarch,
5320 ecs->stop_func_start);
5321 }
5322
5323 if (ecs->stop_func_start == stop_pc)
5324 {
5325 /* We are already there: stop now. */
5326 ecs->event_thread->control.stop_step = 1;
5327 print_end_stepping_range_reason ();
5328 stop_stepping (ecs);
5329 return;
5330 }
5331 else
5332 {
5333 /* Put the step-breakpoint there and go until there. */
5334 init_sal (&sr_sal); /* initialize to zeroes */
5335 sr_sal.pc = ecs->stop_func_start;
5336 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5337 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5338
5339 /* Do not specify what the fp should be when we stop since on
5340 some machines the prologue is where the new fp value is
5341 established. */
5342 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5343
5344 /* And make sure stepping stops right away then. */
5345 ecs->event_thread->control.step_range_end
5346 = ecs->event_thread->control.step_range_start;
5347 }
5348 keep_going (ecs);
5349 }
5350
5351 /* Inferior has stepped backward into a subroutine call with source
5352 code that we should not step over. Do step to the beginning of the
5353 last line of code in it. */
5354
5355 static void
5356 handle_step_into_function_backward (struct gdbarch *gdbarch,
5357 struct execution_control_state *ecs)
5358 {
5359 struct symtab *s;
5360 struct symtab_and_line stop_func_sal;
5361
5362 fill_in_stop_func (gdbarch, ecs);
5363
5364 s = find_pc_symtab (stop_pc);
5365 if (s && s->language != language_asm)
5366 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5367 ecs->stop_func_start);
5368
5369 stop_func_sal = find_pc_line (stop_pc, 0);
5370
5371 /* OK, we're just going to keep stepping here. */
5372 if (stop_func_sal.pc == stop_pc)
5373 {
5374 /* We're there already. Just stop stepping now. */
5375 ecs->event_thread->control.stop_step = 1;
5376 print_end_stepping_range_reason ();
5377 stop_stepping (ecs);
5378 }
5379 else
5380 {
5381 /* Else just reset the step range and keep going.
5382 No step-resume breakpoint, they don't work for
5383 epilogues, which can have multiple entry paths. */
5384 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5385 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5386 keep_going (ecs);
5387 }
5388 return;
5389 }
5390
5391 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5392 This is used to both functions and to skip over code. */
5393
5394 static void
5395 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5396 struct symtab_and_line sr_sal,
5397 struct frame_id sr_id,
5398 enum bptype sr_type)
5399 {
5400 /* There should never be more than one step-resume or longjmp-resume
5401 breakpoint per thread, so we should never be setting a new
5402 step_resume_breakpoint when one is already active. */
5403 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5404 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5405
5406 if (debug_infrun)
5407 fprintf_unfiltered (gdb_stdlog,
5408 "infrun: inserting step-resume breakpoint at %s\n",
5409 paddress (gdbarch, sr_sal.pc));
5410
5411 inferior_thread ()->control.step_resume_breakpoint
5412 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5413 }
5414
5415 void
5416 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5417 struct symtab_and_line sr_sal,
5418 struct frame_id sr_id)
5419 {
5420 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5421 sr_sal, sr_id,
5422 bp_step_resume);
5423 }
5424
5425 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5426 This is used to skip a potential signal handler.
5427
5428 This is called with the interrupted function's frame. The signal
5429 handler, when it returns, will resume the interrupted function at
5430 RETURN_FRAME.pc. */
5431
5432 static void
5433 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5434 {
5435 struct symtab_and_line sr_sal;
5436 struct gdbarch *gdbarch;
5437
5438 gdb_assert (return_frame != NULL);
5439 init_sal (&sr_sal); /* initialize to zeros */
5440
5441 gdbarch = get_frame_arch (return_frame);
5442 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5443 sr_sal.section = find_pc_overlay (sr_sal.pc);
5444 sr_sal.pspace = get_frame_program_space (return_frame);
5445
5446 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5447 get_stack_frame_id (return_frame),
5448 bp_hp_step_resume);
5449 }
5450
5451 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5452 is used to skip a function after stepping into it (for "next" or if
5453 the called function has no debugging information).
5454
5455 The current function has almost always been reached by single
5456 stepping a call or return instruction. NEXT_FRAME belongs to the
5457 current function, and the breakpoint will be set at the caller's
5458 resume address.
5459
5460 This is a separate function rather than reusing
5461 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5462 get_prev_frame, which may stop prematurely (see the implementation
5463 of frame_unwind_caller_id for an example). */
5464
5465 static void
5466 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5467 {
5468 struct symtab_and_line sr_sal;
5469 struct gdbarch *gdbarch;
5470
5471 /* We shouldn't have gotten here if we don't know where the call site
5472 is. */
5473 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5474
5475 init_sal (&sr_sal); /* initialize to zeros */
5476
5477 gdbarch = frame_unwind_caller_arch (next_frame);
5478 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5479 frame_unwind_caller_pc (next_frame));
5480 sr_sal.section = find_pc_overlay (sr_sal.pc);
5481 sr_sal.pspace = frame_unwind_program_space (next_frame);
5482
5483 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5484 frame_unwind_caller_id (next_frame));
5485 }
5486
5487 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5488 new breakpoint at the target of a jmp_buf. The handling of
5489 longjmp-resume uses the same mechanisms used for handling
5490 "step-resume" breakpoints. */
5491
5492 static void
5493 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5494 {
5495 /* There should never be more than one step-resume or longjmp-resume
5496 breakpoint per thread, so we should never be setting a new
5497 longjmp_resume_breakpoint when one is already active. */
5498 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5499
5500 if (debug_infrun)
5501 fprintf_unfiltered (gdb_stdlog,
5502 "infrun: inserting longjmp-resume breakpoint at %s\n",
5503 paddress (gdbarch, pc));
5504
5505 inferior_thread ()->control.step_resume_breakpoint =
5506 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5507 }
5508
5509 /* Insert an exception resume breakpoint. TP is the thread throwing
5510 the exception. The block B is the block of the unwinder debug hook
5511 function. FRAME is the frame corresponding to the call to this
5512 function. SYM is the symbol of the function argument holding the
5513 target PC of the exception. */
5514
5515 static void
5516 insert_exception_resume_breakpoint (struct thread_info *tp,
5517 struct block *b,
5518 struct frame_info *frame,
5519 struct symbol *sym)
5520 {
5521 volatile struct gdb_exception e;
5522
5523 /* We want to ignore errors here. */
5524 TRY_CATCH (e, RETURN_MASK_ERROR)
5525 {
5526 struct symbol *vsym;
5527 struct value *value;
5528 CORE_ADDR handler;
5529 struct breakpoint *bp;
5530
5531 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5532 value = read_var_value (vsym, frame);
5533 /* If the value was optimized out, revert to the old behavior. */
5534 if (! value_optimized_out (value))
5535 {
5536 handler = value_as_address (value);
5537
5538 if (debug_infrun)
5539 fprintf_unfiltered (gdb_stdlog,
5540 "infrun: exception resume at %lx\n",
5541 (unsigned long) handler);
5542
5543 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5544 handler, bp_exception_resume);
5545
5546 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5547 frame = NULL;
5548
5549 bp->thread = tp->num;
5550 inferior_thread ()->control.exception_resume_breakpoint = bp;
5551 }
5552 }
5553 }
5554
5555 /* This is called when an exception has been intercepted. Check to
5556 see whether the exception's destination is of interest, and if so,
5557 set an exception resume breakpoint there. */
5558
5559 static void
5560 check_exception_resume (struct execution_control_state *ecs,
5561 struct frame_info *frame, struct symbol *func)
5562 {
5563 volatile struct gdb_exception e;
5564
5565 TRY_CATCH (e, RETURN_MASK_ERROR)
5566 {
5567 struct block *b;
5568 struct dict_iterator iter;
5569 struct symbol *sym;
5570 int argno = 0;
5571
5572 /* The exception breakpoint is a thread-specific breakpoint on
5573 the unwinder's debug hook, declared as:
5574
5575 void _Unwind_DebugHook (void *cfa, void *handler);
5576
5577 The CFA argument indicates the frame to which control is
5578 about to be transferred. HANDLER is the destination PC.
5579
5580 We ignore the CFA and set a temporary breakpoint at HANDLER.
5581 This is not extremely efficient but it avoids issues in gdb
5582 with computing the DWARF CFA, and it also works even in weird
5583 cases such as throwing an exception from inside a signal
5584 handler. */
5585
5586 b = SYMBOL_BLOCK_VALUE (func);
5587 ALL_BLOCK_SYMBOLS (b, iter, sym)
5588 {
5589 if (!SYMBOL_IS_ARGUMENT (sym))
5590 continue;
5591
5592 if (argno == 0)
5593 ++argno;
5594 else
5595 {
5596 insert_exception_resume_breakpoint (ecs->event_thread,
5597 b, frame, sym);
5598 break;
5599 }
5600 }
5601 }
5602 }
5603
5604 static void
5605 stop_stepping (struct execution_control_state *ecs)
5606 {
5607 if (debug_infrun)
5608 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5609
5610 /* Let callers know we don't want to wait for the inferior anymore. */
5611 ecs->wait_some_more = 0;
5612 }
5613
5614 /* This function handles various cases where we need to continue
5615 waiting for the inferior. */
5616 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5617
5618 static void
5619 keep_going (struct execution_control_state *ecs)
5620 {
5621 /* Make sure normal_stop is called if we get a QUIT handled before
5622 reaching resume. */
5623 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5624
5625 /* Save the pc before execution, to compare with pc after stop. */
5626 ecs->event_thread->prev_pc
5627 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5628
5629 /* If we did not do break;, it means we should keep running the
5630 inferior and not return to debugger. */
5631
5632 if (ecs->event_thread->control.trap_expected
5633 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5634 {
5635 /* We took a signal (which we are supposed to pass through to
5636 the inferior, else we'd not get here) and we haven't yet
5637 gotten our trap. Simply continue. */
5638
5639 discard_cleanups (old_cleanups);
5640 resume (currently_stepping (ecs->event_thread),
5641 ecs->event_thread->suspend.stop_signal);
5642 }
5643 else
5644 {
5645 /* Either the trap was not expected, but we are continuing
5646 anyway (the user asked that this signal be passed to the
5647 child)
5648 -- or --
5649 The signal was SIGTRAP, e.g. it was our signal, but we
5650 decided we should resume from it.
5651
5652 We're going to run this baby now!
5653
5654 Note that insert_breakpoints won't try to re-insert
5655 already inserted breakpoints. Therefore, we don't
5656 care if breakpoints were already inserted, or not. */
5657
5658 if (ecs->event_thread->stepping_over_breakpoint)
5659 {
5660 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5661
5662 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5663 /* Since we can't do a displaced step, we have to remove
5664 the breakpoint while we step it. To keep things
5665 simple, we remove them all. */
5666 remove_breakpoints ();
5667 }
5668 else
5669 {
5670 volatile struct gdb_exception e;
5671
5672 /* Stop stepping when inserting breakpoints
5673 has failed. */
5674 TRY_CATCH (e, RETURN_MASK_ERROR)
5675 {
5676 insert_breakpoints ();
5677 }
5678 if (e.reason < 0)
5679 {
5680 exception_print (gdb_stderr, e);
5681 stop_stepping (ecs);
5682 return;
5683 }
5684 }
5685
5686 ecs->event_thread->control.trap_expected
5687 = ecs->event_thread->stepping_over_breakpoint;
5688
5689 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5690 specifies that such a signal should be delivered to the
5691 target program).
5692
5693 Typically, this would occure when a user is debugging a
5694 target monitor on a simulator: the target monitor sets a
5695 breakpoint; the simulator encounters this break-point and
5696 halts the simulation handing control to GDB; GDB, noteing
5697 that the break-point isn't valid, returns control back to the
5698 simulator; the simulator then delivers the hardware
5699 equivalent of a SIGNAL_TRAP to the program being debugged. */
5700
5701 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5702 && !signal_program[ecs->event_thread->suspend.stop_signal])
5703 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5704
5705 discard_cleanups (old_cleanups);
5706 resume (currently_stepping (ecs->event_thread),
5707 ecs->event_thread->suspend.stop_signal);
5708 }
5709
5710 prepare_to_wait (ecs);
5711 }
5712
5713 /* This function normally comes after a resume, before
5714 handle_inferior_event exits. It takes care of any last bits of
5715 housekeeping, and sets the all-important wait_some_more flag. */
5716
5717 static void
5718 prepare_to_wait (struct execution_control_state *ecs)
5719 {
5720 if (debug_infrun)
5721 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5722
5723 /* This is the old end of the while loop. Let everybody know we
5724 want to wait for the inferior some more and get called again
5725 soon. */
5726 ecs->wait_some_more = 1;
5727 }
5728
5729 /* Several print_*_reason functions to print why the inferior has stopped.
5730 We always print something when the inferior exits, or receives a signal.
5731 The rest of the cases are dealt with later on in normal_stop and
5732 print_it_typical. Ideally there should be a call to one of these
5733 print_*_reason functions functions from handle_inferior_event each time
5734 stop_stepping is called. */
5735
5736 /* Print why the inferior has stopped.
5737 We are done with a step/next/si/ni command, print why the inferior has
5738 stopped. For now print nothing. Print a message only if not in the middle
5739 of doing a "step n" operation for n > 1. */
5740
5741 static void
5742 print_end_stepping_range_reason (void)
5743 {
5744 if ((!inferior_thread ()->step_multi
5745 || !inferior_thread ()->control.stop_step)
5746 && ui_out_is_mi_like_p (current_uiout))
5747 ui_out_field_string (current_uiout, "reason",
5748 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5749 }
5750
5751 /* The inferior was terminated by a signal, print why it stopped. */
5752
5753 static void
5754 print_signal_exited_reason (enum target_signal siggnal)
5755 {
5756 struct ui_out *uiout = current_uiout;
5757
5758 annotate_signalled ();
5759 if (ui_out_is_mi_like_p (uiout))
5760 ui_out_field_string
5761 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5762 ui_out_text (uiout, "\nProgram terminated with signal ");
5763 annotate_signal_name ();
5764 ui_out_field_string (uiout, "signal-name",
5765 target_signal_to_name (siggnal));
5766 annotate_signal_name_end ();
5767 ui_out_text (uiout, ", ");
5768 annotate_signal_string ();
5769 ui_out_field_string (uiout, "signal-meaning",
5770 target_signal_to_string (siggnal));
5771 annotate_signal_string_end ();
5772 ui_out_text (uiout, ".\n");
5773 ui_out_text (uiout, "The program no longer exists.\n");
5774 }
5775
5776 /* The inferior program is finished, print why it stopped. */
5777
5778 static void
5779 print_exited_reason (int exitstatus)
5780 {
5781 struct inferior *inf = current_inferior ();
5782 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5783 struct ui_out *uiout = current_uiout;
5784
5785 annotate_exited (exitstatus);
5786 if (exitstatus)
5787 {
5788 if (ui_out_is_mi_like_p (uiout))
5789 ui_out_field_string (uiout, "reason",
5790 async_reason_lookup (EXEC_ASYNC_EXITED));
5791 ui_out_text (uiout, "[Inferior ");
5792 ui_out_text (uiout, plongest (inf->num));
5793 ui_out_text (uiout, " (");
5794 ui_out_text (uiout, pidstr);
5795 ui_out_text (uiout, ") exited with code ");
5796 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5797 ui_out_text (uiout, "]\n");
5798 }
5799 else
5800 {
5801 if (ui_out_is_mi_like_p (uiout))
5802 ui_out_field_string
5803 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5804 ui_out_text (uiout, "[Inferior ");
5805 ui_out_text (uiout, plongest (inf->num));
5806 ui_out_text (uiout, " (");
5807 ui_out_text (uiout, pidstr);
5808 ui_out_text (uiout, ") exited normally]\n");
5809 }
5810 /* Support the --return-child-result option. */
5811 return_child_result_value = exitstatus;
5812 }
5813
5814 /* Signal received, print why the inferior has stopped. The signal table
5815 tells us to print about it. */
5816
5817 static void
5818 print_signal_received_reason (enum target_signal siggnal)
5819 {
5820 struct ui_out *uiout = current_uiout;
5821
5822 annotate_signal ();
5823
5824 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5825 {
5826 struct thread_info *t = inferior_thread ();
5827
5828 ui_out_text (uiout, "\n[");
5829 ui_out_field_string (uiout, "thread-name",
5830 target_pid_to_str (t->ptid));
5831 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5832 ui_out_text (uiout, " stopped");
5833 }
5834 else
5835 {
5836 ui_out_text (uiout, "\nProgram received signal ");
5837 annotate_signal_name ();
5838 if (ui_out_is_mi_like_p (uiout))
5839 ui_out_field_string
5840 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5841 ui_out_field_string (uiout, "signal-name",
5842 target_signal_to_name (siggnal));
5843 annotate_signal_name_end ();
5844 ui_out_text (uiout, ", ");
5845 annotate_signal_string ();
5846 ui_out_field_string (uiout, "signal-meaning",
5847 target_signal_to_string (siggnal));
5848 annotate_signal_string_end ();
5849 }
5850 ui_out_text (uiout, ".\n");
5851 }
5852
5853 /* Reverse execution: target ran out of history info, print why the inferior
5854 has stopped. */
5855
5856 static void
5857 print_no_history_reason (void)
5858 {
5859 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5860 }
5861
5862 /* Here to return control to GDB when the inferior stops for real.
5863 Print appropriate messages, remove breakpoints, give terminal our modes.
5864
5865 STOP_PRINT_FRAME nonzero means print the executing frame
5866 (pc, function, args, file, line number and line text).
5867 BREAKPOINTS_FAILED nonzero means stop was due to error
5868 attempting to insert breakpoints. */
5869
5870 void
5871 normal_stop (void)
5872 {
5873 struct target_waitstatus last;
5874 ptid_t last_ptid;
5875 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5876
5877 get_last_target_status (&last_ptid, &last);
5878
5879 /* If an exception is thrown from this point on, make sure to
5880 propagate GDB's knowledge of the executing state to the
5881 frontend/user running state. A QUIT is an easy exception to see
5882 here, so do this before any filtered output. */
5883 if (!non_stop)
5884 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5885 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5886 && last.kind != TARGET_WAITKIND_EXITED
5887 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5888 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5889
5890 /* In non-stop mode, we don't want GDB to switch threads behind the
5891 user's back, to avoid races where the user is typing a command to
5892 apply to thread x, but GDB switches to thread y before the user
5893 finishes entering the command. */
5894
5895 /* As with the notification of thread events, we want to delay
5896 notifying the user that we've switched thread context until
5897 the inferior actually stops.
5898
5899 There's no point in saying anything if the inferior has exited.
5900 Note that SIGNALLED here means "exited with a signal", not
5901 "received a signal". */
5902 if (!non_stop
5903 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5904 && target_has_execution
5905 && last.kind != TARGET_WAITKIND_SIGNALLED
5906 && last.kind != TARGET_WAITKIND_EXITED
5907 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5908 {
5909 target_terminal_ours_for_output ();
5910 printf_filtered (_("[Switching to %s]\n"),
5911 target_pid_to_str (inferior_ptid));
5912 annotate_thread_changed ();
5913 previous_inferior_ptid = inferior_ptid;
5914 }
5915
5916 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
5917 {
5918 gdb_assert (sync_execution || !target_can_async_p ());
5919
5920 target_terminal_ours_for_output ();
5921 printf_filtered (_("No unwaited-for children left.\n"));
5922 }
5923
5924 if (!breakpoints_always_inserted_mode () && target_has_execution)
5925 {
5926 if (remove_breakpoints ())
5927 {
5928 target_terminal_ours_for_output ();
5929 printf_filtered (_("Cannot remove breakpoints because "
5930 "program is no longer writable.\nFurther "
5931 "execution is probably impossible.\n"));
5932 }
5933 }
5934
5935 /* If an auto-display called a function and that got a signal,
5936 delete that auto-display to avoid an infinite recursion. */
5937
5938 if (stopped_by_random_signal)
5939 disable_current_display ();
5940
5941 /* Don't print a message if in the middle of doing a "step n"
5942 operation for n > 1 */
5943 if (target_has_execution
5944 && last.kind != TARGET_WAITKIND_SIGNALLED
5945 && last.kind != TARGET_WAITKIND_EXITED
5946 && inferior_thread ()->step_multi
5947 && inferior_thread ()->control.stop_step)
5948 goto done;
5949
5950 target_terminal_ours ();
5951 async_enable_stdin ();
5952
5953 /* Set the current source location. This will also happen if we
5954 display the frame below, but the current SAL will be incorrect
5955 during a user hook-stop function. */
5956 if (has_stack_frames () && !stop_stack_dummy)
5957 set_current_sal_from_frame (get_current_frame (), 1);
5958
5959 /* Let the user/frontend see the threads as stopped. */
5960 do_cleanups (old_chain);
5961
5962 /* Look up the hook_stop and run it (CLI internally handles problem
5963 of stop_command's pre-hook not existing). */
5964 if (stop_command)
5965 catch_errors (hook_stop_stub, stop_command,
5966 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5967
5968 if (!has_stack_frames ())
5969 goto done;
5970
5971 if (last.kind == TARGET_WAITKIND_SIGNALLED
5972 || last.kind == TARGET_WAITKIND_EXITED)
5973 goto done;
5974
5975 /* Select innermost stack frame - i.e., current frame is frame 0,
5976 and current location is based on that.
5977 Don't do this on return from a stack dummy routine,
5978 or if the program has exited. */
5979
5980 if (!stop_stack_dummy)
5981 {
5982 select_frame (get_current_frame ());
5983
5984 /* Print current location without a level number, if
5985 we have changed functions or hit a breakpoint.
5986 Print source line if we have one.
5987 bpstat_print() contains the logic deciding in detail
5988 what to print, based on the event(s) that just occurred. */
5989
5990 /* If --batch-silent is enabled then there's no need to print the current
5991 source location, and to try risks causing an error message about
5992 missing source files. */
5993 if (stop_print_frame && !batch_silent)
5994 {
5995 int bpstat_ret;
5996 int source_flag;
5997 int do_frame_printing = 1;
5998 struct thread_info *tp = inferior_thread ();
5999
6000 bpstat_ret = bpstat_print (tp->control.stop_bpstat, last.kind);
6001 switch (bpstat_ret)
6002 {
6003 case PRINT_UNKNOWN:
6004 /* FIXME: cagney/2002-12-01: Given that a frame ID does
6005 (or should) carry around the function and does (or
6006 should) use that when doing a frame comparison. */
6007 if (tp->control.stop_step
6008 && frame_id_eq (tp->control.step_frame_id,
6009 get_frame_id (get_current_frame ()))
6010 && step_start_function == find_pc_function (stop_pc))
6011 source_flag = SRC_LINE; /* Finished step, just
6012 print source line. */
6013 else
6014 source_flag = SRC_AND_LOC; /* Print location and
6015 source line. */
6016 break;
6017 case PRINT_SRC_AND_LOC:
6018 source_flag = SRC_AND_LOC; /* Print location and
6019 source line. */
6020 break;
6021 case PRINT_SRC_ONLY:
6022 source_flag = SRC_LINE;
6023 break;
6024 case PRINT_NOTHING:
6025 source_flag = SRC_LINE; /* something bogus */
6026 do_frame_printing = 0;
6027 break;
6028 default:
6029 internal_error (__FILE__, __LINE__, _("Unknown value."));
6030 }
6031
6032 /* The behavior of this routine with respect to the source
6033 flag is:
6034 SRC_LINE: Print only source line
6035 LOCATION: Print only location
6036 SRC_AND_LOC: Print location and source line. */
6037 if (do_frame_printing)
6038 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
6039
6040 /* Display the auto-display expressions. */
6041 do_displays ();
6042 }
6043 }
6044
6045 /* Save the function value return registers, if we care.
6046 We might be about to restore their previous contents. */
6047 if (inferior_thread ()->control.proceed_to_finish
6048 && execution_direction != EXEC_REVERSE)
6049 {
6050 /* This should not be necessary. */
6051 if (stop_registers)
6052 regcache_xfree (stop_registers);
6053
6054 /* NB: The copy goes through to the target picking up the value of
6055 all the registers. */
6056 stop_registers = regcache_dup (get_current_regcache ());
6057 }
6058
6059 if (stop_stack_dummy == STOP_STACK_DUMMY)
6060 {
6061 /* Pop the empty frame that contains the stack dummy.
6062 This also restores inferior state prior to the call
6063 (struct infcall_suspend_state). */
6064 struct frame_info *frame = get_current_frame ();
6065
6066 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6067 frame_pop (frame);
6068 /* frame_pop() calls reinit_frame_cache as the last thing it
6069 does which means there's currently no selected frame. We
6070 don't need to re-establish a selected frame if the dummy call
6071 returns normally, that will be done by
6072 restore_infcall_control_state. However, we do have to handle
6073 the case where the dummy call is returning after being
6074 stopped (e.g. the dummy call previously hit a breakpoint).
6075 We can't know which case we have so just always re-establish
6076 a selected frame here. */
6077 select_frame (get_current_frame ());
6078 }
6079
6080 done:
6081 annotate_stopped ();
6082
6083 /* Suppress the stop observer if we're in the middle of:
6084
6085 - a step n (n > 1), as there still more steps to be done.
6086
6087 - a "finish" command, as the observer will be called in
6088 finish_command_continuation, so it can include the inferior
6089 function's return value.
6090
6091 - calling an inferior function, as we pretend we inferior didn't
6092 run at all. The return value of the call is handled by the
6093 expression evaluator, through call_function_by_hand. */
6094
6095 if (!target_has_execution
6096 || last.kind == TARGET_WAITKIND_SIGNALLED
6097 || last.kind == TARGET_WAITKIND_EXITED
6098 || last.kind == TARGET_WAITKIND_NO_RESUMED
6099 || (!(inferior_thread ()->step_multi
6100 && inferior_thread ()->control.stop_step)
6101 && !(inferior_thread ()->control.stop_bpstat
6102 && inferior_thread ()->control.proceed_to_finish)
6103 && !inferior_thread ()->control.in_infcall))
6104 {
6105 if (!ptid_equal (inferior_ptid, null_ptid))
6106 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6107 stop_print_frame);
6108 else
6109 observer_notify_normal_stop (NULL, stop_print_frame);
6110 }
6111
6112 if (target_has_execution)
6113 {
6114 if (last.kind != TARGET_WAITKIND_SIGNALLED
6115 && last.kind != TARGET_WAITKIND_EXITED)
6116 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6117 Delete any breakpoint that is to be deleted at the next stop. */
6118 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6119 }
6120
6121 /* Try to get rid of automatically added inferiors that are no
6122 longer needed. Keeping those around slows down things linearly.
6123 Note that this never removes the current inferior. */
6124 prune_inferiors ();
6125 }
6126
6127 static int
6128 hook_stop_stub (void *cmd)
6129 {
6130 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6131 return (0);
6132 }
6133 \f
6134 int
6135 signal_stop_state (int signo)
6136 {
6137 return signal_stop[signo];
6138 }
6139
6140 int
6141 signal_print_state (int signo)
6142 {
6143 return signal_print[signo];
6144 }
6145
6146 int
6147 signal_pass_state (int signo)
6148 {
6149 return signal_program[signo];
6150 }
6151
6152 static void
6153 signal_cache_update (int signo)
6154 {
6155 if (signo == -1)
6156 {
6157 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6158 signal_cache_update (signo);
6159
6160 return;
6161 }
6162
6163 signal_pass[signo] = (signal_stop[signo] == 0
6164 && signal_print[signo] == 0
6165 && signal_program[signo] == 1);
6166 }
6167
6168 int
6169 signal_stop_update (int signo, int state)
6170 {
6171 int ret = signal_stop[signo];
6172
6173 signal_stop[signo] = state;
6174 signal_cache_update (signo);
6175 return ret;
6176 }
6177
6178 int
6179 signal_print_update (int signo, int state)
6180 {
6181 int ret = signal_print[signo];
6182
6183 signal_print[signo] = state;
6184 signal_cache_update (signo);
6185 return ret;
6186 }
6187
6188 int
6189 signal_pass_update (int signo, int state)
6190 {
6191 int ret = signal_program[signo];
6192
6193 signal_program[signo] = state;
6194 signal_cache_update (signo);
6195 return ret;
6196 }
6197
6198 static void
6199 sig_print_header (void)
6200 {
6201 printf_filtered (_("Signal Stop\tPrint\tPass "
6202 "to program\tDescription\n"));
6203 }
6204
6205 static void
6206 sig_print_info (enum target_signal oursig)
6207 {
6208 const char *name = target_signal_to_name (oursig);
6209 int name_padding = 13 - strlen (name);
6210
6211 if (name_padding <= 0)
6212 name_padding = 0;
6213
6214 printf_filtered ("%s", name);
6215 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6216 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6217 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6218 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6219 printf_filtered ("%s\n", target_signal_to_string (oursig));
6220 }
6221
6222 /* Specify how various signals in the inferior should be handled. */
6223
6224 static void
6225 handle_command (char *args, int from_tty)
6226 {
6227 char **argv;
6228 int digits, wordlen;
6229 int sigfirst, signum, siglast;
6230 enum target_signal oursig;
6231 int allsigs;
6232 int nsigs;
6233 unsigned char *sigs;
6234 struct cleanup *old_chain;
6235
6236 if (args == NULL)
6237 {
6238 error_no_arg (_("signal to handle"));
6239 }
6240
6241 /* Allocate and zero an array of flags for which signals to handle. */
6242
6243 nsigs = (int) TARGET_SIGNAL_LAST;
6244 sigs = (unsigned char *) alloca (nsigs);
6245 memset (sigs, 0, nsigs);
6246
6247 /* Break the command line up into args. */
6248
6249 argv = gdb_buildargv (args);
6250 old_chain = make_cleanup_freeargv (argv);
6251
6252 /* Walk through the args, looking for signal oursigs, signal names, and
6253 actions. Signal numbers and signal names may be interspersed with
6254 actions, with the actions being performed for all signals cumulatively
6255 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6256
6257 while (*argv != NULL)
6258 {
6259 wordlen = strlen (*argv);
6260 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6261 {;
6262 }
6263 allsigs = 0;
6264 sigfirst = siglast = -1;
6265
6266 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6267 {
6268 /* Apply action to all signals except those used by the
6269 debugger. Silently skip those. */
6270 allsigs = 1;
6271 sigfirst = 0;
6272 siglast = nsigs - 1;
6273 }
6274 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6275 {
6276 SET_SIGS (nsigs, sigs, signal_stop);
6277 SET_SIGS (nsigs, sigs, signal_print);
6278 }
6279 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6280 {
6281 UNSET_SIGS (nsigs, sigs, signal_program);
6282 }
6283 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6284 {
6285 SET_SIGS (nsigs, sigs, signal_print);
6286 }
6287 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6288 {
6289 SET_SIGS (nsigs, sigs, signal_program);
6290 }
6291 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6292 {
6293 UNSET_SIGS (nsigs, sigs, signal_stop);
6294 }
6295 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6296 {
6297 SET_SIGS (nsigs, sigs, signal_program);
6298 }
6299 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6300 {
6301 UNSET_SIGS (nsigs, sigs, signal_print);
6302 UNSET_SIGS (nsigs, sigs, signal_stop);
6303 }
6304 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6305 {
6306 UNSET_SIGS (nsigs, sigs, signal_program);
6307 }
6308 else if (digits > 0)
6309 {
6310 /* It is numeric. The numeric signal refers to our own
6311 internal signal numbering from target.h, not to host/target
6312 signal number. This is a feature; users really should be
6313 using symbolic names anyway, and the common ones like
6314 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6315
6316 sigfirst = siglast = (int)
6317 target_signal_from_command (atoi (*argv));
6318 if ((*argv)[digits] == '-')
6319 {
6320 siglast = (int)
6321 target_signal_from_command (atoi ((*argv) + digits + 1));
6322 }
6323 if (sigfirst > siglast)
6324 {
6325 /* Bet he didn't figure we'd think of this case... */
6326 signum = sigfirst;
6327 sigfirst = siglast;
6328 siglast = signum;
6329 }
6330 }
6331 else
6332 {
6333 oursig = target_signal_from_name (*argv);
6334 if (oursig != TARGET_SIGNAL_UNKNOWN)
6335 {
6336 sigfirst = siglast = (int) oursig;
6337 }
6338 else
6339 {
6340 /* Not a number and not a recognized flag word => complain. */
6341 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6342 }
6343 }
6344
6345 /* If any signal numbers or symbol names were found, set flags for
6346 which signals to apply actions to. */
6347
6348 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6349 {
6350 switch ((enum target_signal) signum)
6351 {
6352 case TARGET_SIGNAL_TRAP:
6353 case TARGET_SIGNAL_INT:
6354 if (!allsigs && !sigs[signum])
6355 {
6356 if (query (_("%s is used by the debugger.\n\
6357 Are you sure you want to change it? "),
6358 target_signal_to_name ((enum target_signal) signum)))
6359 {
6360 sigs[signum] = 1;
6361 }
6362 else
6363 {
6364 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6365 gdb_flush (gdb_stdout);
6366 }
6367 }
6368 break;
6369 case TARGET_SIGNAL_0:
6370 case TARGET_SIGNAL_DEFAULT:
6371 case TARGET_SIGNAL_UNKNOWN:
6372 /* Make sure that "all" doesn't print these. */
6373 break;
6374 default:
6375 sigs[signum] = 1;
6376 break;
6377 }
6378 }
6379
6380 argv++;
6381 }
6382
6383 for (signum = 0; signum < nsigs; signum++)
6384 if (sigs[signum])
6385 {
6386 signal_cache_update (-1);
6387 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6388 target_program_signals ((int) TARGET_SIGNAL_LAST, signal_program);
6389
6390 if (from_tty)
6391 {
6392 /* Show the results. */
6393 sig_print_header ();
6394 for (; signum < nsigs; signum++)
6395 if (sigs[signum])
6396 sig_print_info (signum);
6397 }
6398
6399 break;
6400 }
6401
6402 do_cleanups (old_chain);
6403 }
6404
6405 static void
6406 xdb_handle_command (char *args, int from_tty)
6407 {
6408 char **argv;
6409 struct cleanup *old_chain;
6410
6411 if (args == NULL)
6412 error_no_arg (_("xdb command"));
6413
6414 /* Break the command line up into args. */
6415
6416 argv = gdb_buildargv (args);
6417 old_chain = make_cleanup_freeargv (argv);
6418 if (argv[1] != (char *) NULL)
6419 {
6420 char *argBuf;
6421 int bufLen;
6422
6423 bufLen = strlen (argv[0]) + 20;
6424 argBuf = (char *) xmalloc (bufLen);
6425 if (argBuf)
6426 {
6427 int validFlag = 1;
6428 enum target_signal oursig;
6429
6430 oursig = target_signal_from_name (argv[0]);
6431 memset (argBuf, 0, bufLen);
6432 if (strcmp (argv[1], "Q") == 0)
6433 sprintf (argBuf, "%s %s", argv[0], "noprint");
6434 else
6435 {
6436 if (strcmp (argv[1], "s") == 0)
6437 {
6438 if (!signal_stop[oursig])
6439 sprintf (argBuf, "%s %s", argv[0], "stop");
6440 else
6441 sprintf (argBuf, "%s %s", argv[0], "nostop");
6442 }
6443 else if (strcmp (argv[1], "i") == 0)
6444 {
6445 if (!signal_program[oursig])
6446 sprintf (argBuf, "%s %s", argv[0], "pass");
6447 else
6448 sprintf (argBuf, "%s %s", argv[0], "nopass");
6449 }
6450 else if (strcmp (argv[1], "r") == 0)
6451 {
6452 if (!signal_print[oursig])
6453 sprintf (argBuf, "%s %s", argv[0], "print");
6454 else
6455 sprintf (argBuf, "%s %s", argv[0], "noprint");
6456 }
6457 else
6458 validFlag = 0;
6459 }
6460 if (validFlag)
6461 handle_command (argBuf, from_tty);
6462 else
6463 printf_filtered (_("Invalid signal handling flag.\n"));
6464 if (argBuf)
6465 xfree (argBuf);
6466 }
6467 }
6468 do_cleanups (old_chain);
6469 }
6470
6471 enum target_signal
6472 target_signal_from_command (int num)
6473 {
6474 if (num >= 1 && num <= 15)
6475 return (enum target_signal) num;
6476 error (_("Only signals 1-15 are valid as numeric signals.\n\
6477 Use \"info signals\" for a list of symbolic signals."));
6478 }
6479
6480 /* Print current contents of the tables set by the handle command.
6481 It is possible we should just be printing signals actually used
6482 by the current target (but for things to work right when switching
6483 targets, all signals should be in the signal tables). */
6484
6485 static void
6486 signals_info (char *signum_exp, int from_tty)
6487 {
6488 enum target_signal oursig;
6489
6490 sig_print_header ();
6491
6492 if (signum_exp)
6493 {
6494 /* First see if this is a symbol name. */
6495 oursig = target_signal_from_name (signum_exp);
6496 if (oursig == TARGET_SIGNAL_UNKNOWN)
6497 {
6498 /* No, try numeric. */
6499 oursig =
6500 target_signal_from_command (parse_and_eval_long (signum_exp));
6501 }
6502 sig_print_info (oursig);
6503 return;
6504 }
6505
6506 printf_filtered ("\n");
6507 /* These ugly casts brought to you by the native VAX compiler. */
6508 for (oursig = TARGET_SIGNAL_FIRST;
6509 (int) oursig < (int) TARGET_SIGNAL_LAST;
6510 oursig = (enum target_signal) ((int) oursig + 1))
6511 {
6512 QUIT;
6513
6514 if (oursig != TARGET_SIGNAL_UNKNOWN
6515 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6516 sig_print_info (oursig);
6517 }
6518
6519 printf_filtered (_("\nUse the \"handle\" command "
6520 "to change these tables.\n"));
6521 }
6522
6523 /* Check if it makes sense to read $_siginfo from the current thread
6524 at this point. If not, throw an error. */
6525
6526 static void
6527 validate_siginfo_access (void)
6528 {
6529 /* No current inferior, no siginfo. */
6530 if (ptid_equal (inferior_ptid, null_ptid))
6531 error (_("No thread selected."));
6532
6533 /* Don't try to read from a dead thread. */
6534 if (is_exited (inferior_ptid))
6535 error (_("The current thread has terminated"));
6536
6537 /* ... or from a spinning thread. */
6538 if (is_running (inferior_ptid))
6539 error (_("Selected thread is running."));
6540 }
6541
6542 /* The $_siginfo convenience variable is a bit special. We don't know
6543 for sure the type of the value until we actually have a chance to
6544 fetch the data. The type can change depending on gdbarch, so it is
6545 also dependent on which thread you have selected.
6546
6547 1. making $_siginfo be an internalvar that creates a new value on
6548 access.
6549
6550 2. making the value of $_siginfo be an lval_computed value. */
6551
6552 /* This function implements the lval_computed support for reading a
6553 $_siginfo value. */
6554
6555 static void
6556 siginfo_value_read (struct value *v)
6557 {
6558 LONGEST transferred;
6559
6560 validate_siginfo_access ();
6561
6562 transferred =
6563 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6564 NULL,
6565 value_contents_all_raw (v),
6566 value_offset (v),
6567 TYPE_LENGTH (value_type (v)));
6568
6569 if (transferred != TYPE_LENGTH (value_type (v)))
6570 error (_("Unable to read siginfo"));
6571 }
6572
6573 /* This function implements the lval_computed support for writing a
6574 $_siginfo value. */
6575
6576 static void
6577 siginfo_value_write (struct value *v, struct value *fromval)
6578 {
6579 LONGEST transferred;
6580
6581 validate_siginfo_access ();
6582
6583 transferred = target_write (&current_target,
6584 TARGET_OBJECT_SIGNAL_INFO,
6585 NULL,
6586 value_contents_all_raw (fromval),
6587 value_offset (v),
6588 TYPE_LENGTH (value_type (fromval)));
6589
6590 if (transferred != TYPE_LENGTH (value_type (fromval)))
6591 error (_("Unable to write siginfo"));
6592 }
6593
6594 static const struct lval_funcs siginfo_value_funcs =
6595 {
6596 siginfo_value_read,
6597 siginfo_value_write
6598 };
6599
6600 /* Return a new value with the correct type for the siginfo object of
6601 the current thread using architecture GDBARCH. Return a void value
6602 if there's no object available. */
6603
6604 static struct value *
6605 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6606 {
6607 if (target_has_stack
6608 && !ptid_equal (inferior_ptid, null_ptid)
6609 && gdbarch_get_siginfo_type_p (gdbarch))
6610 {
6611 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6612
6613 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6614 }
6615
6616 return allocate_value (builtin_type (gdbarch)->builtin_void);
6617 }
6618
6619 \f
6620 /* infcall_suspend_state contains state about the program itself like its
6621 registers and any signal it received when it last stopped.
6622 This state must be restored regardless of how the inferior function call
6623 ends (either successfully, or after it hits a breakpoint or signal)
6624 if the program is to properly continue where it left off. */
6625
6626 struct infcall_suspend_state
6627 {
6628 struct thread_suspend_state thread_suspend;
6629 struct inferior_suspend_state inferior_suspend;
6630
6631 /* Other fields: */
6632 CORE_ADDR stop_pc;
6633 struct regcache *registers;
6634
6635 /* Format of SIGINFO_DATA or NULL if it is not present. */
6636 struct gdbarch *siginfo_gdbarch;
6637
6638 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6639 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6640 content would be invalid. */
6641 gdb_byte *siginfo_data;
6642 };
6643
6644 struct infcall_suspend_state *
6645 save_infcall_suspend_state (void)
6646 {
6647 struct infcall_suspend_state *inf_state;
6648 struct thread_info *tp = inferior_thread ();
6649 struct inferior *inf = current_inferior ();
6650 struct regcache *regcache = get_current_regcache ();
6651 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6652 gdb_byte *siginfo_data = NULL;
6653
6654 if (gdbarch_get_siginfo_type_p (gdbarch))
6655 {
6656 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6657 size_t len = TYPE_LENGTH (type);
6658 struct cleanup *back_to;
6659
6660 siginfo_data = xmalloc (len);
6661 back_to = make_cleanup (xfree, siginfo_data);
6662
6663 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6664 siginfo_data, 0, len) == len)
6665 discard_cleanups (back_to);
6666 else
6667 {
6668 /* Errors ignored. */
6669 do_cleanups (back_to);
6670 siginfo_data = NULL;
6671 }
6672 }
6673
6674 inf_state = XZALLOC (struct infcall_suspend_state);
6675
6676 if (siginfo_data)
6677 {
6678 inf_state->siginfo_gdbarch = gdbarch;
6679 inf_state->siginfo_data = siginfo_data;
6680 }
6681
6682 inf_state->thread_suspend = tp->suspend;
6683 inf_state->inferior_suspend = inf->suspend;
6684
6685 /* run_inferior_call will not use the signal due to its `proceed' call with
6686 TARGET_SIGNAL_0 anyway. */
6687 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6688
6689 inf_state->stop_pc = stop_pc;
6690
6691 inf_state->registers = regcache_dup (regcache);
6692
6693 return inf_state;
6694 }
6695
6696 /* Restore inferior session state to INF_STATE. */
6697
6698 void
6699 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6700 {
6701 struct thread_info *tp = inferior_thread ();
6702 struct inferior *inf = current_inferior ();
6703 struct regcache *regcache = get_current_regcache ();
6704 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6705
6706 tp->suspend = inf_state->thread_suspend;
6707 inf->suspend = inf_state->inferior_suspend;
6708
6709 stop_pc = inf_state->stop_pc;
6710
6711 if (inf_state->siginfo_gdbarch == gdbarch)
6712 {
6713 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6714 size_t len = TYPE_LENGTH (type);
6715
6716 /* Errors ignored. */
6717 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6718 inf_state->siginfo_data, 0, len);
6719 }
6720
6721 /* The inferior can be gone if the user types "print exit(0)"
6722 (and perhaps other times). */
6723 if (target_has_execution)
6724 /* NB: The register write goes through to the target. */
6725 regcache_cpy (regcache, inf_state->registers);
6726
6727 discard_infcall_suspend_state (inf_state);
6728 }
6729
6730 static void
6731 do_restore_infcall_suspend_state_cleanup (void *state)
6732 {
6733 restore_infcall_suspend_state (state);
6734 }
6735
6736 struct cleanup *
6737 make_cleanup_restore_infcall_suspend_state
6738 (struct infcall_suspend_state *inf_state)
6739 {
6740 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6741 }
6742
6743 void
6744 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6745 {
6746 regcache_xfree (inf_state->registers);
6747 xfree (inf_state->siginfo_data);
6748 xfree (inf_state);
6749 }
6750
6751 struct regcache *
6752 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6753 {
6754 return inf_state->registers;
6755 }
6756
6757 /* infcall_control_state contains state regarding gdb's control of the
6758 inferior itself like stepping control. It also contains session state like
6759 the user's currently selected frame. */
6760
6761 struct infcall_control_state
6762 {
6763 struct thread_control_state thread_control;
6764 struct inferior_control_state inferior_control;
6765
6766 /* Other fields: */
6767 enum stop_stack_kind stop_stack_dummy;
6768 int stopped_by_random_signal;
6769 int stop_after_trap;
6770
6771 /* ID if the selected frame when the inferior function call was made. */
6772 struct frame_id selected_frame_id;
6773 };
6774
6775 /* Save all of the information associated with the inferior<==>gdb
6776 connection. */
6777
6778 struct infcall_control_state *
6779 save_infcall_control_state (void)
6780 {
6781 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6782 struct thread_info *tp = inferior_thread ();
6783 struct inferior *inf = current_inferior ();
6784
6785 inf_status->thread_control = tp->control;
6786 inf_status->inferior_control = inf->control;
6787
6788 tp->control.step_resume_breakpoint = NULL;
6789 tp->control.exception_resume_breakpoint = NULL;
6790
6791 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6792 chain. If caller's caller is walking the chain, they'll be happier if we
6793 hand them back the original chain when restore_infcall_control_state is
6794 called. */
6795 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6796
6797 /* Other fields: */
6798 inf_status->stop_stack_dummy = stop_stack_dummy;
6799 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6800 inf_status->stop_after_trap = stop_after_trap;
6801
6802 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6803
6804 return inf_status;
6805 }
6806
6807 static int
6808 restore_selected_frame (void *args)
6809 {
6810 struct frame_id *fid = (struct frame_id *) args;
6811 struct frame_info *frame;
6812
6813 frame = frame_find_by_id (*fid);
6814
6815 /* If inf_status->selected_frame_id is NULL, there was no previously
6816 selected frame. */
6817 if (frame == NULL)
6818 {
6819 warning (_("Unable to restore previously selected frame."));
6820 return 0;
6821 }
6822
6823 select_frame (frame);
6824
6825 return (1);
6826 }
6827
6828 /* Restore inferior session state to INF_STATUS. */
6829
6830 void
6831 restore_infcall_control_state (struct infcall_control_state *inf_status)
6832 {
6833 struct thread_info *tp = inferior_thread ();
6834 struct inferior *inf = current_inferior ();
6835
6836 if (tp->control.step_resume_breakpoint)
6837 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6838
6839 if (tp->control.exception_resume_breakpoint)
6840 tp->control.exception_resume_breakpoint->disposition
6841 = disp_del_at_next_stop;
6842
6843 /* Handle the bpstat_copy of the chain. */
6844 bpstat_clear (&tp->control.stop_bpstat);
6845
6846 tp->control = inf_status->thread_control;
6847 inf->control = inf_status->inferior_control;
6848
6849 /* Other fields: */
6850 stop_stack_dummy = inf_status->stop_stack_dummy;
6851 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6852 stop_after_trap = inf_status->stop_after_trap;
6853
6854 if (target_has_stack)
6855 {
6856 /* The point of catch_errors is that if the stack is clobbered,
6857 walking the stack might encounter a garbage pointer and
6858 error() trying to dereference it. */
6859 if (catch_errors
6860 (restore_selected_frame, &inf_status->selected_frame_id,
6861 "Unable to restore previously selected frame:\n",
6862 RETURN_MASK_ERROR) == 0)
6863 /* Error in restoring the selected frame. Select the innermost
6864 frame. */
6865 select_frame (get_current_frame ());
6866 }
6867
6868 xfree (inf_status);
6869 }
6870
6871 static void
6872 do_restore_infcall_control_state_cleanup (void *sts)
6873 {
6874 restore_infcall_control_state (sts);
6875 }
6876
6877 struct cleanup *
6878 make_cleanup_restore_infcall_control_state
6879 (struct infcall_control_state *inf_status)
6880 {
6881 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6882 }
6883
6884 void
6885 discard_infcall_control_state (struct infcall_control_state *inf_status)
6886 {
6887 if (inf_status->thread_control.step_resume_breakpoint)
6888 inf_status->thread_control.step_resume_breakpoint->disposition
6889 = disp_del_at_next_stop;
6890
6891 if (inf_status->thread_control.exception_resume_breakpoint)
6892 inf_status->thread_control.exception_resume_breakpoint->disposition
6893 = disp_del_at_next_stop;
6894
6895 /* See save_infcall_control_state for info on stop_bpstat. */
6896 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6897
6898 xfree (inf_status);
6899 }
6900 \f
6901 int
6902 ptid_match (ptid_t ptid, ptid_t filter)
6903 {
6904 if (ptid_equal (filter, minus_one_ptid))
6905 return 1;
6906 if (ptid_is_pid (filter)
6907 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6908 return 1;
6909 else if (ptid_equal (ptid, filter))
6910 return 1;
6911
6912 return 0;
6913 }
6914
6915 /* restore_inferior_ptid() will be used by the cleanup machinery
6916 to restore the inferior_ptid value saved in a call to
6917 save_inferior_ptid(). */
6918
6919 static void
6920 restore_inferior_ptid (void *arg)
6921 {
6922 ptid_t *saved_ptid_ptr = arg;
6923
6924 inferior_ptid = *saved_ptid_ptr;
6925 xfree (arg);
6926 }
6927
6928 /* Save the value of inferior_ptid so that it may be restored by a
6929 later call to do_cleanups(). Returns the struct cleanup pointer
6930 needed for later doing the cleanup. */
6931
6932 struct cleanup *
6933 save_inferior_ptid (void)
6934 {
6935 ptid_t *saved_ptid_ptr;
6936
6937 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6938 *saved_ptid_ptr = inferior_ptid;
6939 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6940 }
6941 \f
6942
6943 /* User interface for reverse debugging:
6944 Set exec-direction / show exec-direction commands
6945 (returns error unless target implements to_set_exec_direction method). */
6946
6947 int execution_direction = EXEC_FORWARD;
6948 static const char exec_forward[] = "forward";
6949 static const char exec_reverse[] = "reverse";
6950 static const char *exec_direction = exec_forward;
6951 static const char *const exec_direction_names[] = {
6952 exec_forward,
6953 exec_reverse,
6954 NULL
6955 };
6956
6957 static void
6958 set_exec_direction_func (char *args, int from_tty,
6959 struct cmd_list_element *cmd)
6960 {
6961 if (target_can_execute_reverse)
6962 {
6963 if (!strcmp (exec_direction, exec_forward))
6964 execution_direction = EXEC_FORWARD;
6965 else if (!strcmp (exec_direction, exec_reverse))
6966 execution_direction = EXEC_REVERSE;
6967 }
6968 else
6969 {
6970 exec_direction = exec_forward;
6971 error (_("Target does not support this operation."));
6972 }
6973 }
6974
6975 static void
6976 show_exec_direction_func (struct ui_file *out, int from_tty,
6977 struct cmd_list_element *cmd, const char *value)
6978 {
6979 switch (execution_direction) {
6980 case EXEC_FORWARD:
6981 fprintf_filtered (out, _("Forward.\n"));
6982 break;
6983 case EXEC_REVERSE:
6984 fprintf_filtered (out, _("Reverse.\n"));
6985 break;
6986 default:
6987 internal_error (__FILE__, __LINE__,
6988 _("bogus execution_direction value: %d"),
6989 (int) execution_direction);
6990 }
6991 }
6992
6993 /* User interface for non-stop mode. */
6994
6995 int non_stop = 0;
6996
6997 static void
6998 set_non_stop (char *args, int from_tty,
6999 struct cmd_list_element *c)
7000 {
7001 if (target_has_execution)
7002 {
7003 non_stop_1 = non_stop;
7004 error (_("Cannot change this setting while the inferior is running."));
7005 }
7006
7007 non_stop = non_stop_1;
7008 }
7009
7010 static void
7011 show_non_stop (struct ui_file *file, int from_tty,
7012 struct cmd_list_element *c, const char *value)
7013 {
7014 fprintf_filtered (file,
7015 _("Controlling the inferior in non-stop mode is %s.\n"),
7016 value);
7017 }
7018
7019 static void
7020 show_schedule_multiple (struct ui_file *file, int from_tty,
7021 struct cmd_list_element *c, const char *value)
7022 {
7023 fprintf_filtered (file, _("Resuming the execution of threads "
7024 "of all processes is %s.\n"), value);
7025 }
7026
7027 void
7028 _initialize_infrun (void)
7029 {
7030 int i;
7031 int numsigs;
7032
7033 add_info ("signals", signals_info, _("\
7034 What debugger does when program gets various signals.\n\
7035 Specify a signal as argument to print info on that signal only."));
7036 add_info_alias ("handle", "signals", 0);
7037
7038 add_com ("handle", class_run, handle_command, _("\
7039 Specify how to handle a signal.\n\
7040 Args are signals and actions to apply to those signals.\n\
7041 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7042 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7043 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7044 The special arg \"all\" is recognized to mean all signals except those\n\
7045 used by the debugger, typically SIGTRAP and SIGINT.\n\
7046 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7047 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7048 Stop means reenter debugger if this signal happens (implies print).\n\
7049 Print means print a message if this signal happens.\n\
7050 Pass means let program see this signal; otherwise program doesn't know.\n\
7051 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7052 Pass and Stop may be combined."));
7053 if (xdb_commands)
7054 {
7055 add_com ("lz", class_info, signals_info, _("\
7056 What debugger does when program gets various signals.\n\
7057 Specify a signal as argument to print info on that signal only."));
7058 add_com ("z", class_run, xdb_handle_command, _("\
7059 Specify how to handle a signal.\n\
7060 Args are signals and actions to apply to those signals.\n\
7061 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7062 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7063 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7064 The special arg \"all\" is recognized to mean all signals except those\n\
7065 used by the debugger, typically SIGTRAP and SIGINT.\n\
7066 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7067 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7068 nopass), \"Q\" (noprint)\n\
7069 Stop means reenter debugger if this signal happens (implies print).\n\
7070 Print means print a message if this signal happens.\n\
7071 Pass means let program see this signal; otherwise program doesn't know.\n\
7072 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7073 Pass and Stop may be combined."));
7074 }
7075
7076 if (!dbx_commands)
7077 stop_command = add_cmd ("stop", class_obscure,
7078 not_just_help_class_command, _("\
7079 There is no `stop' command, but you can set a hook on `stop'.\n\
7080 This allows you to set a list of commands to be run each time execution\n\
7081 of the program stops."), &cmdlist);
7082
7083 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7084 Set inferior debugging."), _("\
7085 Show inferior debugging."), _("\
7086 When non-zero, inferior specific debugging is enabled."),
7087 NULL,
7088 show_debug_infrun,
7089 &setdebuglist, &showdebuglist);
7090
7091 add_setshow_boolean_cmd ("displaced", class_maintenance,
7092 &debug_displaced, _("\
7093 Set displaced stepping debugging."), _("\
7094 Show displaced stepping debugging."), _("\
7095 When non-zero, displaced stepping specific debugging is enabled."),
7096 NULL,
7097 show_debug_displaced,
7098 &setdebuglist, &showdebuglist);
7099
7100 add_setshow_boolean_cmd ("non-stop", no_class,
7101 &non_stop_1, _("\
7102 Set whether gdb controls the inferior in non-stop mode."), _("\
7103 Show whether gdb controls the inferior in non-stop mode."), _("\
7104 When debugging a multi-threaded program and this setting is\n\
7105 off (the default, also called all-stop mode), when one thread stops\n\
7106 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7107 all other threads in the program while you interact with the thread of\n\
7108 interest. When you continue or step a thread, you can allow the other\n\
7109 threads to run, or have them remain stopped, but while you inspect any\n\
7110 thread's state, all threads stop.\n\
7111 \n\
7112 In non-stop mode, when one thread stops, other threads can continue\n\
7113 to run freely. You'll be able to step each thread independently,\n\
7114 leave it stopped or free to run as needed."),
7115 set_non_stop,
7116 show_non_stop,
7117 &setlist,
7118 &showlist);
7119
7120 numsigs = (int) TARGET_SIGNAL_LAST;
7121 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7122 signal_print = (unsigned char *)
7123 xmalloc (sizeof (signal_print[0]) * numsigs);
7124 signal_program = (unsigned char *)
7125 xmalloc (sizeof (signal_program[0]) * numsigs);
7126 signal_pass = (unsigned char *)
7127 xmalloc (sizeof (signal_program[0]) * numsigs);
7128 for (i = 0; i < numsigs; i++)
7129 {
7130 signal_stop[i] = 1;
7131 signal_print[i] = 1;
7132 signal_program[i] = 1;
7133 }
7134
7135 /* Signals caused by debugger's own actions
7136 should not be given to the program afterwards. */
7137 signal_program[TARGET_SIGNAL_TRAP] = 0;
7138 signal_program[TARGET_SIGNAL_INT] = 0;
7139
7140 /* Signals that are not errors should not normally enter the debugger. */
7141 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7142 signal_print[TARGET_SIGNAL_ALRM] = 0;
7143 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7144 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7145 signal_stop[TARGET_SIGNAL_PROF] = 0;
7146 signal_print[TARGET_SIGNAL_PROF] = 0;
7147 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7148 signal_print[TARGET_SIGNAL_CHLD] = 0;
7149 signal_stop[TARGET_SIGNAL_IO] = 0;
7150 signal_print[TARGET_SIGNAL_IO] = 0;
7151 signal_stop[TARGET_SIGNAL_POLL] = 0;
7152 signal_print[TARGET_SIGNAL_POLL] = 0;
7153 signal_stop[TARGET_SIGNAL_URG] = 0;
7154 signal_print[TARGET_SIGNAL_URG] = 0;
7155 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7156 signal_print[TARGET_SIGNAL_WINCH] = 0;
7157 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7158 signal_print[TARGET_SIGNAL_PRIO] = 0;
7159
7160 /* These signals are used internally by user-level thread
7161 implementations. (See signal(5) on Solaris.) Like the above
7162 signals, a healthy program receives and handles them as part of
7163 its normal operation. */
7164 signal_stop[TARGET_SIGNAL_LWP] = 0;
7165 signal_print[TARGET_SIGNAL_LWP] = 0;
7166 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7167 signal_print[TARGET_SIGNAL_WAITING] = 0;
7168 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7169 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7170
7171 /* Update cached state. */
7172 signal_cache_update (-1);
7173
7174 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7175 &stop_on_solib_events, _("\
7176 Set stopping for shared library events."), _("\
7177 Show stopping for shared library events."), _("\
7178 If nonzero, gdb will give control to the user when the dynamic linker\n\
7179 notifies gdb of shared library events. The most common event of interest\n\
7180 to the user would be loading/unloading of a new library."),
7181 NULL,
7182 show_stop_on_solib_events,
7183 &setlist, &showlist);
7184
7185 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7186 follow_fork_mode_kind_names,
7187 &follow_fork_mode_string, _("\
7188 Set debugger response to a program call of fork or vfork."), _("\
7189 Show debugger response to a program call of fork or vfork."), _("\
7190 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7191 parent - the original process is debugged after a fork\n\
7192 child - the new process is debugged after a fork\n\
7193 The unfollowed process will continue to run.\n\
7194 By default, the debugger will follow the parent process."),
7195 NULL,
7196 show_follow_fork_mode_string,
7197 &setlist, &showlist);
7198
7199 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7200 follow_exec_mode_names,
7201 &follow_exec_mode_string, _("\
7202 Set debugger response to a program call of exec."), _("\
7203 Show debugger response to a program call of exec."), _("\
7204 An exec call replaces the program image of a process.\n\
7205 \n\
7206 follow-exec-mode can be:\n\
7207 \n\
7208 new - the debugger creates a new inferior and rebinds the process\n\
7209 to this new inferior. The program the process was running before\n\
7210 the exec call can be restarted afterwards by restarting the original\n\
7211 inferior.\n\
7212 \n\
7213 same - the debugger keeps the process bound to the same inferior.\n\
7214 The new executable image replaces the previous executable loaded in\n\
7215 the inferior. Restarting the inferior after the exec call restarts\n\
7216 the executable the process was running after the exec call.\n\
7217 \n\
7218 By default, the debugger will use the same inferior."),
7219 NULL,
7220 show_follow_exec_mode_string,
7221 &setlist, &showlist);
7222
7223 add_setshow_enum_cmd ("scheduler-locking", class_run,
7224 scheduler_enums, &scheduler_mode, _("\
7225 Set mode for locking scheduler during execution."), _("\
7226 Show mode for locking scheduler during execution."), _("\
7227 off == no locking (threads may preempt at any time)\n\
7228 on == full locking (no thread except the current thread may run)\n\
7229 step == scheduler locked during every single-step operation.\n\
7230 In this mode, no other thread may run during a step command.\n\
7231 Other threads may run while stepping over a function call ('next')."),
7232 set_schedlock_func, /* traps on target vector */
7233 show_scheduler_mode,
7234 &setlist, &showlist);
7235
7236 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7237 Set mode for resuming threads of all processes."), _("\
7238 Show mode for resuming threads of all processes."), _("\
7239 When on, execution commands (such as 'continue' or 'next') resume all\n\
7240 threads of all processes. When off (which is the default), execution\n\
7241 commands only resume the threads of the current process. The set of\n\
7242 threads that are resumed is further refined by the scheduler-locking\n\
7243 mode (see help set scheduler-locking)."),
7244 NULL,
7245 show_schedule_multiple,
7246 &setlist, &showlist);
7247
7248 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7249 Set mode of the step operation."), _("\
7250 Show mode of the step operation."), _("\
7251 When set, doing a step over a function without debug line information\n\
7252 will stop at the first instruction of that function. Otherwise, the\n\
7253 function is skipped and the step command stops at a different source line."),
7254 NULL,
7255 show_step_stop_if_no_debug,
7256 &setlist, &showlist);
7257
7258 add_setshow_enum_cmd ("displaced-stepping", class_run,
7259 can_use_displaced_stepping_enum,
7260 &can_use_displaced_stepping, _("\
7261 Set debugger's willingness to use displaced stepping."), _("\
7262 Show debugger's willingness to use displaced stepping."), _("\
7263 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7264 supported by the target architecture. If off, gdb will not use displaced\n\
7265 stepping to step over breakpoints, even if such is supported by the target\n\
7266 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7267 if the target architecture supports it and non-stop mode is active, but will not\n\
7268 use it in all-stop mode (see help set non-stop)."),
7269 NULL,
7270 show_can_use_displaced_stepping,
7271 &setlist, &showlist);
7272
7273 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7274 &exec_direction, _("Set direction of execution.\n\
7275 Options are 'forward' or 'reverse'."),
7276 _("Show direction of execution (forward/reverse)."),
7277 _("Tells gdb whether to execute forward or backward."),
7278 set_exec_direction_func, show_exec_direction_func,
7279 &setlist, &showlist);
7280
7281 /* Set/show detach-on-fork: user-settable mode. */
7282
7283 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7284 Set whether gdb will detach the child of a fork."), _("\
7285 Show whether gdb will detach the child of a fork."), _("\
7286 Tells gdb whether to detach the child of a fork."),
7287 NULL, NULL, &setlist, &showlist);
7288
7289 /* Set/show disable address space randomization mode. */
7290
7291 add_setshow_boolean_cmd ("disable-randomization", class_support,
7292 &disable_randomization, _("\
7293 Set disabling of debuggee's virtual address space randomization."), _("\
7294 Show disabling of debuggee's virtual address space randomization."), _("\
7295 When this mode is on (which is the default), randomization of the virtual\n\
7296 address space is disabled. Standalone programs run with the randomization\n\
7297 enabled by default on some platforms."),
7298 &set_disable_randomization,
7299 &show_disable_randomization,
7300 &setlist, &showlist);
7301
7302 /* ptid initializations */
7303 inferior_ptid = null_ptid;
7304 target_last_wait_ptid = minus_one_ptid;
7305
7306 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7307 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7308 observer_attach_thread_exit (infrun_thread_thread_exit);
7309 observer_attach_inferior_exit (infrun_inferior_exit);
7310
7311 /* Explicitly create without lookup, since that tries to create a
7312 value with a void typed value, and when we get here, gdbarch
7313 isn't initialized yet. At this point, we're quite sure there
7314 isn't another convenience variable of the same name. */
7315 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7316
7317 add_setshow_boolean_cmd ("observer", no_class,
7318 &observer_mode_1, _("\
7319 Set whether gdb controls the inferior in observer mode."), _("\
7320 Show whether gdb controls the inferior in observer mode."), _("\
7321 In observer mode, GDB can get data from the inferior, but not\n\
7322 affect its execution. Registers and memory may not be changed,\n\
7323 breakpoints may not be set, and the program cannot be interrupted\n\
7324 or signalled."),
7325 set_observer_mode,
7326 show_observer_mode,
7327 &setlist,
7328 &showlist);
7329 }