2012-01-24 Pedro Alves <palves@redhat.com>
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2012 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdb_string.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55 #include "continuations.h"
56 #include "interps.h"
57 #include "skip.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149
150 /* Support for disabling address space randomization. */
151
152 int disable_randomization = 1;
153
154 static void
155 show_disable_randomization (struct ui_file *file, int from_tty,
156 struct cmd_list_element *c, const char *value)
157 {
158 if (target_supports_disable_randomization ())
159 fprintf_filtered (file,
160 _("Disabling randomization of debuggee's "
161 "virtual address space is %s.\n"),
162 value);
163 else
164 fputs_filtered (_("Disabling randomization of debuggee's "
165 "virtual address space is unsupported on\n"
166 "this platform.\n"), file);
167 }
168
169 static void
170 set_disable_randomization (char *args, int from_tty,
171 struct cmd_list_element *c)
172 {
173 if (!target_supports_disable_randomization ())
174 error (_("Disabling randomization of debuggee's "
175 "virtual address space is unsupported on\n"
176 "this platform."));
177 }
178
179
180 /* If the program uses ELF-style shared libraries, then calls to
181 functions in shared libraries go through stubs, which live in a
182 table called the PLT (Procedure Linkage Table). The first time the
183 function is called, the stub sends control to the dynamic linker,
184 which looks up the function's real address, patches the stub so
185 that future calls will go directly to the function, and then passes
186 control to the function.
187
188 If we are stepping at the source level, we don't want to see any of
189 this --- we just want to skip over the stub and the dynamic linker.
190 The simple approach is to single-step until control leaves the
191 dynamic linker.
192
193 However, on some systems (e.g., Red Hat's 5.2 distribution) the
194 dynamic linker calls functions in the shared C library, so you
195 can't tell from the PC alone whether the dynamic linker is still
196 running. In this case, we use a step-resume breakpoint to get us
197 past the dynamic linker, as if we were using "next" to step over a
198 function call.
199
200 in_solib_dynsym_resolve_code() says whether we're in the dynamic
201 linker code or not. Normally, this means we single-step. However,
202 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
203 address where we can place a step-resume breakpoint to get past the
204 linker's symbol resolution function.
205
206 in_solib_dynsym_resolve_code() can generally be implemented in a
207 pretty portable way, by comparing the PC against the address ranges
208 of the dynamic linker's sections.
209
210 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
211 it depends on internal details of the dynamic linker. It's usually
212 not too hard to figure out where to put a breakpoint, but it
213 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
214 sanity checking. If it can't figure things out, returning zero and
215 getting the (possibly confusing) stepping behavior is better than
216 signalling an error, which will obscure the change in the
217 inferior's state. */
218
219 /* This function returns TRUE if pc is the address of an instruction
220 that lies within the dynamic linker (such as the event hook, or the
221 dld itself).
222
223 This function must be used only when a dynamic linker event has
224 been caught, and the inferior is being stepped out of the hook, or
225 undefined results are guaranteed. */
226
227 #ifndef SOLIB_IN_DYNAMIC_LINKER
228 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
229 #endif
230
231 /* "Observer mode" is somewhat like a more extreme version of
232 non-stop, in which all GDB operations that might affect the
233 target's execution have been disabled. */
234
235 static int non_stop_1 = 0;
236
237 int observer_mode = 0;
238 static int observer_mode_1 = 0;
239
240 static void
241 set_observer_mode (char *args, int from_tty,
242 struct cmd_list_element *c)
243 {
244 extern int pagination_enabled;
245
246 if (target_has_execution)
247 {
248 observer_mode_1 = observer_mode;
249 error (_("Cannot change this setting while the inferior is running."));
250 }
251
252 observer_mode = observer_mode_1;
253
254 may_write_registers = !observer_mode;
255 may_write_memory = !observer_mode;
256 may_insert_breakpoints = !observer_mode;
257 may_insert_tracepoints = !observer_mode;
258 /* We can insert fast tracepoints in or out of observer mode,
259 but enable them if we're going into this mode. */
260 if (observer_mode)
261 may_insert_fast_tracepoints = 1;
262 may_stop = !observer_mode;
263 update_target_permissions ();
264
265 /* Going *into* observer mode we must force non-stop, then
266 going out we leave it that way. */
267 if (observer_mode)
268 {
269 target_async_permitted = 1;
270 pagination_enabled = 0;
271 non_stop = non_stop_1 = 1;
272 }
273
274 if (from_tty)
275 printf_filtered (_("Observer mode is now %s.\n"),
276 (observer_mode ? "on" : "off"));
277 }
278
279 static void
280 show_observer_mode (struct ui_file *file, int from_tty,
281 struct cmd_list_element *c, const char *value)
282 {
283 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
284 }
285
286 /* This updates the value of observer mode based on changes in
287 permissions. Note that we are deliberately ignoring the values of
288 may-write-registers and may-write-memory, since the user may have
289 reason to enable these during a session, for instance to turn on a
290 debugging-related global. */
291
292 void
293 update_observer_mode (void)
294 {
295 int newval;
296
297 newval = (!may_insert_breakpoints
298 && !may_insert_tracepoints
299 && may_insert_fast_tracepoints
300 && !may_stop
301 && non_stop);
302
303 /* Let the user know if things change. */
304 if (newval != observer_mode)
305 printf_filtered (_("Observer mode is now %s.\n"),
306 (newval ? "on" : "off"));
307
308 observer_mode = observer_mode_1 = newval;
309 }
310
311 /* Tables of how to react to signals; the user sets them. */
312
313 static unsigned char *signal_stop;
314 static unsigned char *signal_print;
315 static unsigned char *signal_program;
316
317 /* Table of signals that the target may silently handle.
318 This is automatically determined from the flags above,
319 and simply cached here. */
320 static unsigned char *signal_pass;
321
322 #define SET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 1; \
328 } while (0)
329
330 #define UNSET_SIGS(nsigs,sigs,flags) \
331 do { \
332 int signum = (nsigs); \
333 while (signum-- > 0) \
334 if ((sigs)[signum]) \
335 (flags)[signum] = 0; \
336 } while (0)
337
338 /* Value to pass to target_resume() to cause all threads to resume. */
339
340 #define RESUME_ALL minus_one_ptid
341
342 /* Command list pointer for the "stop" placeholder. */
343
344 static struct cmd_list_element *stop_command;
345
346 /* Function inferior was in as of last step command. */
347
348 static struct symbol *step_start_function;
349
350 /* Nonzero if we want to give control to the user when we're notified
351 of shared library events by the dynamic linker. */
352 int stop_on_solib_events;
353 static void
354 show_stop_on_solib_events (struct ui_file *file, int from_tty,
355 struct cmd_list_element *c, const char *value)
356 {
357 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
358 value);
359 }
360
361 /* Nonzero means expecting a trace trap
362 and should stop the inferior and return silently when it happens. */
363
364 int stop_after_trap;
365
366 /* Save register contents here when executing a "finish" command or are
367 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
368 Thus this contains the return value from the called function (assuming
369 values are returned in a register). */
370
371 struct regcache *stop_registers;
372
373 /* Nonzero after stop if current stack frame should be printed. */
374
375 static int stop_print_frame;
376
377 /* This is a cached copy of the pid/waitstatus of the last event
378 returned by target_wait()/deprecated_target_wait_hook(). This
379 information is returned by get_last_target_status(). */
380 static ptid_t target_last_wait_ptid;
381 static struct target_waitstatus target_last_waitstatus;
382
383 static void context_switch (ptid_t ptid);
384
385 void init_thread_stepping_state (struct thread_info *tss);
386
387 void init_infwait_state (void);
388
389 static const char follow_fork_mode_child[] = "child";
390 static const char follow_fork_mode_parent[] = "parent";
391
392 static const char *follow_fork_mode_kind_names[] = {
393 follow_fork_mode_child,
394 follow_fork_mode_parent,
395 NULL
396 };
397
398 static const char *follow_fork_mode_string = follow_fork_mode_parent;
399 static void
400 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
401 struct cmd_list_element *c, const char *value)
402 {
403 fprintf_filtered (file,
404 _("Debugger response to a program "
405 "call of fork or vfork is \"%s\".\n"),
406 value);
407 }
408 \f
409
410 /* Tell the target to follow the fork we're stopped at. Returns true
411 if the inferior should be resumed; false, if the target for some
412 reason decided it's best not to resume. */
413
414 static int
415 follow_fork (void)
416 {
417 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
418 int should_resume = 1;
419 struct thread_info *tp;
420
421 /* Copy user stepping state to the new inferior thread. FIXME: the
422 followed fork child thread should have a copy of most of the
423 parent thread structure's run control related fields, not just these.
424 Initialized to avoid "may be used uninitialized" warnings from gcc. */
425 struct breakpoint *step_resume_breakpoint = NULL;
426 struct breakpoint *exception_resume_breakpoint = NULL;
427 CORE_ADDR step_range_start = 0;
428 CORE_ADDR step_range_end = 0;
429 struct frame_id step_frame_id = { 0 };
430
431 if (!non_stop)
432 {
433 ptid_t wait_ptid;
434 struct target_waitstatus wait_status;
435
436 /* Get the last target status returned by target_wait(). */
437 get_last_target_status (&wait_ptid, &wait_status);
438
439 /* If not stopped at a fork event, then there's nothing else to
440 do. */
441 if (wait_status.kind != TARGET_WAITKIND_FORKED
442 && wait_status.kind != TARGET_WAITKIND_VFORKED)
443 return 1;
444
445 /* Check if we switched over from WAIT_PTID, since the event was
446 reported. */
447 if (!ptid_equal (wait_ptid, minus_one_ptid)
448 && !ptid_equal (inferior_ptid, wait_ptid))
449 {
450 /* We did. Switch back to WAIT_PTID thread, to tell the
451 target to follow it (in either direction). We'll
452 afterwards refuse to resume, and inform the user what
453 happened. */
454 switch_to_thread (wait_ptid);
455 should_resume = 0;
456 }
457 }
458
459 tp = inferior_thread ();
460
461 /* If there were any forks/vforks that were caught and are now to be
462 followed, then do so now. */
463 switch (tp->pending_follow.kind)
464 {
465 case TARGET_WAITKIND_FORKED:
466 case TARGET_WAITKIND_VFORKED:
467 {
468 ptid_t parent, child;
469
470 /* If the user did a next/step, etc, over a fork call,
471 preserve the stepping state in the fork child. */
472 if (follow_child && should_resume)
473 {
474 step_resume_breakpoint = clone_momentary_breakpoint
475 (tp->control.step_resume_breakpoint);
476 step_range_start = tp->control.step_range_start;
477 step_range_end = tp->control.step_range_end;
478 step_frame_id = tp->control.step_frame_id;
479 exception_resume_breakpoint
480 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
481
482 /* For now, delete the parent's sr breakpoint, otherwise,
483 parent/child sr breakpoints are considered duplicates,
484 and the child version will not be installed. Remove
485 this when the breakpoints module becomes aware of
486 inferiors and address spaces. */
487 delete_step_resume_breakpoint (tp);
488 tp->control.step_range_start = 0;
489 tp->control.step_range_end = 0;
490 tp->control.step_frame_id = null_frame_id;
491 delete_exception_resume_breakpoint (tp);
492 }
493
494 parent = inferior_ptid;
495 child = tp->pending_follow.value.related_pid;
496
497 /* Tell the target to do whatever is necessary to follow
498 either parent or child. */
499 if (target_follow_fork (follow_child))
500 {
501 /* Target refused to follow, or there's some other reason
502 we shouldn't resume. */
503 should_resume = 0;
504 }
505 else
506 {
507 /* This pending follow fork event is now handled, one way
508 or another. The previous selected thread may be gone
509 from the lists by now, but if it is still around, need
510 to clear the pending follow request. */
511 tp = find_thread_ptid (parent);
512 if (tp)
513 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
514
515 /* This makes sure we don't try to apply the "Switched
516 over from WAIT_PID" logic above. */
517 nullify_last_target_wait_ptid ();
518
519 /* If we followed the child, switch to it... */
520 if (follow_child)
521 {
522 switch_to_thread (child);
523
524 /* ... and preserve the stepping state, in case the
525 user was stepping over the fork call. */
526 if (should_resume)
527 {
528 tp = inferior_thread ();
529 tp->control.step_resume_breakpoint
530 = step_resume_breakpoint;
531 tp->control.step_range_start = step_range_start;
532 tp->control.step_range_end = step_range_end;
533 tp->control.step_frame_id = step_frame_id;
534 tp->control.exception_resume_breakpoint
535 = exception_resume_breakpoint;
536 }
537 else
538 {
539 /* If we get here, it was because we're trying to
540 resume from a fork catchpoint, but, the user
541 has switched threads away from the thread that
542 forked. In that case, the resume command
543 issued is most likely not applicable to the
544 child, so just warn, and refuse to resume. */
545 warning (_("Not resuming: switched threads "
546 "before following fork child.\n"));
547 }
548
549 /* Reset breakpoints in the child as appropriate. */
550 follow_inferior_reset_breakpoints ();
551 }
552 else
553 switch_to_thread (parent);
554 }
555 }
556 break;
557 case TARGET_WAITKIND_SPURIOUS:
558 /* Nothing to follow. */
559 break;
560 default:
561 internal_error (__FILE__, __LINE__,
562 "Unexpected pending_follow.kind %d\n",
563 tp->pending_follow.kind);
564 break;
565 }
566
567 return should_resume;
568 }
569
570 void
571 follow_inferior_reset_breakpoints (void)
572 {
573 struct thread_info *tp = inferior_thread ();
574
575 /* Was there a step_resume breakpoint? (There was if the user
576 did a "next" at the fork() call.) If so, explicitly reset its
577 thread number.
578
579 step_resumes are a form of bp that are made to be per-thread.
580 Since we created the step_resume bp when the parent process
581 was being debugged, and now are switching to the child process,
582 from the breakpoint package's viewpoint, that's a switch of
583 "threads". We must update the bp's notion of which thread
584 it is for, or it'll be ignored when it triggers. */
585
586 if (tp->control.step_resume_breakpoint)
587 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
588
589 if (tp->control.exception_resume_breakpoint)
590 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
591
592 /* Reinsert all breakpoints in the child. The user may have set
593 breakpoints after catching the fork, in which case those
594 were never set in the child, but only in the parent. This makes
595 sure the inserted breakpoints match the breakpoint list. */
596
597 breakpoint_re_set ();
598 insert_breakpoints ();
599 }
600
601 /* The child has exited or execed: resume threads of the parent the
602 user wanted to be executing. */
603
604 static int
605 proceed_after_vfork_done (struct thread_info *thread,
606 void *arg)
607 {
608 int pid = * (int *) arg;
609
610 if (ptid_get_pid (thread->ptid) == pid
611 && is_running (thread->ptid)
612 && !is_executing (thread->ptid)
613 && !thread->stop_requested
614 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
615 {
616 if (debug_infrun)
617 fprintf_unfiltered (gdb_stdlog,
618 "infrun: resuming vfork parent thread %s\n",
619 target_pid_to_str (thread->ptid));
620
621 switch_to_thread (thread->ptid);
622 clear_proceed_status ();
623 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
624 }
625
626 return 0;
627 }
628
629 /* Called whenever we notice an exec or exit event, to handle
630 detaching or resuming a vfork parent. */
631
632 static void
633 handle_vfork_child_exec_or_exit (int exec)
634 {
635 struct inferior *inf = current_inferior ();
636
637 if (inf->vfork_parent)
638 {
639 int resume_parent = -1;
640
641 /* This exec or exit marks the end of the shared memory region
642 between the parent and the child. If the user wanted to
643 detach from the parent, now is the time. */
644
645 if (inf->vfork_parent->pending_detach)
646 {
647 struct thread_info *tp;
648 struct cleanup *old_chain;
649 struct program_space *pspace;
650 struct address_space *aspace;
651
652 /* follow-fork child, detach-on-fork on. */
653
654 old_chain = make_cleanup_restore_current_thread ();
655
656 /* We're letting loose of the parent. */
657 tp = any_live_thread_of_process (inf->vfork_parent->pid);
658 switch_to_thread (tp->ptid);
659
660 /* We're about to detach from the parent, which implicitly
661 removes breakpoints from its address space. There's a
662 catch here: we want to reuse the spaces for the child,
663 but, parent/child are still sharing the pspace at this
664 point, although the exec in reality makes the kernel give
665 the child a fresh set of new pages. The problem here is
666 that the breakpoints module being unaware of this, would
667 likely chose the child process to write to the parent
668 address space. Swapping the child temporarily away from
669 the spaces has the desired effect. Yes, this is "sort
670 of" a hack. */
671
672 pspace = inf->pspace;
673 aspace = inf->aspace;
674 inf->aspace = NULL;
675 inf->pspace = NULL;
676
677 if (debug_infrun || info_verbose)
678 {
679 target_terminal_ours ();
680
681 if (exec)
682 fprintf_filtered (gdb_stdlog,
683 "Detaching vfork parent process "
684 "%d after child exec.\n",
685 inf->vfork_parent->pid);
686 else
687 fprintf_filtered (gdb_stdlog,
688 "Detaching vfork parent process "
689 "%d after child exit.\n",
690 inf->vfork_parent->pid);
691 }
692
693 target_detach (NULL, 0);
694
695 /* Put it back. */
696 inf->pspace = pspace;
697 inf->aspace = aspace;
698
699 do_cleanups (old_chain);
700 }
701 else if (exec)
702 {
703 /* We're staying attached to the parent, so, really give the
704 child a new address space. */
705 inf->pspace = add_program_space (maybe_new_address_space ());
706 inf->aspace = inf->pspace->aspace;
707 inf->removable = 1;
708 set_current_program_space (inf->pspace);
709
710 resume_parent = inf->vfork_parent->pid;
711
712 /* Break the bonds. */
713 inf->vfork_parent->vfork_child = NULL;
714 }
715 else
716 {
717 struct cleanup *old_chain;
718 struct program_space *pspace;
719
720 /* If this is a vfork child exiting, then the pspace and
721 aspaces were shared with the parent. Since we're
722 reporting the process exit, we'll be mourning all that is
723 found in the address space, and switching to null_ptid,
724 preparing to start a new inferior. But, since we don't
725 want to clobber the parent's address/program spaces, we
726 go ahead and create a new one for this exiting
727 inferior. */
728
729 /* Switch to null_ptid, so that clone_program_space doesn't want
730 to read the selected frame of a dead process. */
731 old_chain = save_inferior_ptid ();
732 inferior_ptid = null_ptid;
733
734 /* This inferior is dead, so avoid giving the breakpoints
735 module the option to write through to it (cloning a
736 program space resets breakpoints). */
737 inf->aspace = NULL;
738 inf->pspace = NULL;
739 pspace = add_program_space (maybe_new_address_space ());
740 set_current_program_space (pspace);
741 inf->removable = 1;
742 clone_program_space (pspace, inf->vfork_parent->pspace);
743 inf->pspace = pspace;
744 inf->aspace = pspace->aspace;
745
746 /* Put back inferior_ptid. We'll continue mourning this
747 inferior. */
748 do_cleanups (old_chain);
749
750 resume_parent = inf->vfork_parent->pid;
751 /* Break the bonds. */
752 inf->vfork_parent->vfork_child = NULL;
753 }
754
755 inf->vfork_parent = NULL;
756
757 gdb_assert (current_program_space == inf->pspace);
758
759 if (non_stop && resume_parent != -1)
760 {
761 /* If the user wanted the parent to be running, let it go
762 free now. */
763 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
764
765 if (debug_infrun)
766 fprintf_unfiltered (gdb_stdlog,
767 "infrun: resuming vfork parent process %d\n",
768 resume_parent);
769
770 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
771
772 do_cleanups (old_chain);
773 }
774 }
775 }
776
777 /* Enum strings for "set|show displaced-stepping". */
778
779 static const char follow_exec_mode_new[] = "new";
780 static const char follow_exec_mode_same[] = "same";
781 static const char *follow_exec_mode_names[] =
782 {
783 follow_exec_mode_new,
784 follow_exec_mode_same,
785 NULL,
786 };
787
788 static const char *follow_exec_mode_string = follow_exec_mode_same;
789 static void
790 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
791 struct cmd_list_element *c, const char *value)
792 {
793 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
794 }
795
796 /* EXECD_PATHNAME is assumed to be non-NULL. */
797
798 static void
799 follow_exec (ptid_t pid, char *execd_pathname)
800 {
801 struct thread_info *th = inferior_thread ();
802 struct inferior *inf = current_inferior ();
803
804 /* This is an exec event that we actually wish to pay attention to.
805 Refresh our symbol table to the newly exec'd program, remove any
806 momentary bp's, etc.
807
808 If there are breakpoints, they aren't really inserted now,
809 since the exec() transformed our inferior into a fresh set
810 of instructions.
811
812 We want to preserve symbolic breakpoints on the list, since
813 we have hopes that they can be reset after the new a.out's
814 symbol table is read.
815
816 However, any "raw" breakpoints must be removed from the list
817 (e.g., the solib bp's), since their address is probably invalid
818 now.
819
820 And, we DON'T want to call delete_breakpoints() here, since
821 that may write the bp's "shadow contents" (the instruction
822 value that was overwritten witha TRAP instruction). Since
823 we now have a new a.out, those shadow contents aren't valid. */
824
825 mark_breakpoints_out ();
826
827 update_breakpoints_after_exec ();
828
829 /* If there was one, it's gone now. We cannot truly step-to-next
830 statement through an exec(). */
831 th->control.step_resume_breakpoint = NULL;
832 th->control.exception_resume_breakpoint = NULL;
833 th->control.step_range_start = 0;
834 th->control.step_range_end = 0;
835
836 /* The target reports the exec event to the main thread, even if
837 some other thread does the exec, and even if the main thread was
838 already stopped --- if debugging in non-stop mode, it's possible
839 the user had the main thread held stopped in the previous image
840 --- release it now. This is the same behavior as step-over-exec
841 with scheduler-locking on in all-stop mode. */
842 th->stop_requested = 0;
843
844 /* What is this a.out's name? */
845 printf_unfiltered (_("%s is executing new program: %s\n"),
846 target_pid_to_str (inferior_ptid),
847 execd_pathname);
848
849 /* We've followed the inferior through an exec. Therefore, the
850 inferior has essentially been killed & reborn. */
851
852 gdb_flush (gdb_stdout);
853
854 breakpoint_init_inferior (inf_execd);
855
856 if (gdb_sysroot && *gdb_sysroot)
857 {
858 char *name = alloca (strlen (gdb_sysroot)
859 + strlen (execd_pathname)
860 + 1);
861
862 strcpy (name, gdb_sysroot);
863 strcat (name, execd_pathname);
864 execd_pathname = name;
865 }
866
867 /* Reset the shared library package. This ensures that we get a
868 shlib event when the child reaches "_start", at which point the
869 dld will have had a chance to initialize the child. */
870 /* Also, loading a symbol file below may trigger symbol lookups, and
871 we don't want those to be satisfied by the libraries of the
872 previous incarnation of this process. */
873 no_shared_libraries (NULL, 0);
874
875 if (follow_exec_mode_string == follow_exec_mode_new)
876 {
877 struct program_space *pspace;
878
879 /* The user wants to keep the old inferior and program spaces
880 around. Create a new fresh one, and switch to it. */
881
882 inf = add_inferior (current_inferior ()->pid);
883 pspace = add_program_space (maybe_new_address_space ());
884 inf->pspace = pspace;
885 inf->aspace = pspace->aspace;
886
887 exit_inferior_num_silent (current_inferior ()->num);
888
889 set_current_inferior (inf);
890 set_current_program_space (pspace);
891 }
892
893 gdb_assert (current_program_space == inf->pspace);
894
895 /* That a.out is now the one to use. */
896 exec_file_attach (execd_pathname, 0);
897
898 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
899 (Position Independent Executable) main symbol file will get applied by
900 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
901 the breakpoints with the zero displacement. */
902
903 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
904 NULL, 0);
905
906 set_initial_language ();
907
908 #ifdef SOLIB_CREATE_INFERIOR_HOOK
909 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
910 #else
911 solib_create_inferior_hook (0);
912 #endif
913
914 jit_inferior_created_hook ();
915
916 breakpoint_re_set ();
917
918 /* Reinsert all breakpoints. (Those which were symbolic have
919 been reset to the proper address in the new a.out, thanks
920 to symbol_file_command...). */
921 insert_breakpoints ();
922
923 /* The next resume of this inferior should bring it to the shlib
924 startup breakpoints. (If the user had also set bp's on
925 "main" from the old (parent) process, then they'll auto-
926 matically get reset there in the new process.). */
927 }
928
929 /* Non-zero if we just simulating a single-step. This is needed
930 because we cannot remove the breakpoints in the inferior process
931 until after the `wait' in `wait_for_inferior'. */
932 static int singlestep_breakpoints_inserted_p = 0;
933
934 /* The thread we inserted single-step breakpoints for. */
935 static ptid_t singlestep_ptid;
936
937 /* PC when we started this single-step. */
938 static CORE_ADDR singlestep_pc;
939
940 /* If another thread hit the singlestep breakpoint, we save the original
941 thread here so that we can resume single-stepping it later. */
942 static ptid_t saved_singlestep_ptid;
943 static int stepping_past_singlestep_breakpoint;
944
945 /* If not equal to null_ptid, this means that after stepping over breakpoint
946 is finished, we need to switch to deferred_step_ptid, and step it.
947
948 The use case is when one thread has hit a breakpoint, and then the user
949 has switched to another thread and issued 'step'. We need to step over
950 breakpoint in the thread which hit the breakpoint, but then continue
951 stepping the thread user has selected. */
952 static ptid_t deferred_step_ptid;
953 \f
954 /* Displaced stepping. */
955
956 /* In non-stop debugging mode, we must take special care to manage
957 breakpoints properly; in particular, the traditional strategy for
958 stepping a thread past a breakpoint it has hit is unsuitable.
959 'Displaced stepping' is a tactic for stepping one thread past a
960 breakpoint it has hit while ensuring that other threads running
961 concurrently will hit the breakpoint as they should.
962
963 The traditional way to step a thread T off a breakpoint in a
964 multi-threaded program in all-stop mode is as follows:
965
966 a0) Initially, all threads are stopped, and breakpoints are not
967 inserted.
968 a1) We single-step T, leaving breakpoints uninserted.
969 a2) We insert breakpoints, and resume all threads.
970
971 In non-stop debugging, however, this strategy is unsuitable: we
972 don't want to have to stop all threads in the system in order to
973 continue or step T past a breakpoint. Instead, we use displaced
974 stepping:
975
976 n0) Initially, T is stopped, other threads are running, and
977 breakpoints are inserted.
978 n1) We copy the instruction "under" the breakpoint to a separate
979 location, outside the main code stream, making any adjustments
980 to the instruction, register, and memory state as directed by
981 T's architecture.
982 n2) We single-step T over the instruction at its new location.
983 n3) We adjust the resulting register and memory state as directed
984 by T's architecture. This includes resetting T's PC to point
985 back into the main instruction stream.
986 n4) We resume T.
987
988 This approach depends on the following gdbarch methods:
989
990 - gdbarch_max_insn_length and gdbarch_displaced_step_location
991 indicate where to copy the instruction, and how much space must
992 be reserved there. We use these in step n1.
993
994 - gdbarch_displaced_step_copy_insn copies a instruction to a new
995 address, and makes any necessary adjustments to the instruction,
996 register contents, and memory. We use this in step n1.
997
998 - gdbarch_displaced_step_fixup adjusts registers and memory after
999 we have successfuly single-stepped the instruction, to yield the
1000 same effect the instruction would have had if we had executed it
1001 at its original address. We use this in step n3.
1002
1003 - gdbarch_displaced_step_free_closure provides cleanup.
1004
1005 The gdbarch_displaced_step_copy_insn and
1006 gdbarch_displaced_step_fixup functions must be written so that
1007 copying an instruction with gdbarch_displaced_step_copy_insn,
1008 single-stepping across the copied instruction, and then applying
1009 gdbarch_displaced_insn_fixup should have the same effects on the
1010 thread's memory and registers as stepping the instruction in place
1011 would have. Exactly which responsibilities fall to the copy and
1012 which fall to the fixup is up to the author of those functions.
1013
1014 See the comments in gdbarch.sh for details.
1015
1016 Note that displaced stepping and software single-step cannot
1017 currently be used in combination, although with some care I think
1018 they could be made to. Software single-step works by placing
1019 breakpoints on all possible subsequent instructions; if the
1020 displaced instruction is a PC-relative jump, those breakpoints
1021 could fall in very strange places --- on pages that aren't
1022 executable, or at addresses that are not proper instruction
1023 boundaries. (We do generally let other threads run while we wait
1024 to hit the software single-step breakpoint, and they might
1025 encounter such a corrupted instruction.) One way to work around
1026 this would be to have gdbarch_displaced_step_copy_insn fully
1027 simulate the effect of PC-relative instructions (and return NULL)
1028 on architectures that use software single-stepping.
1029
1030 In non-stop mode, we can have independent and simultaneous step
1031 requests, so more than one thread may need to simultaneously step
1032 over a breakpoint. The current implementation assumes there is
1033 only one scratch space per process. In this case, we have to
1034 serialize access to the scratch space. If thread A wants to step
1035 over a breakpoint, but we are currently waiting for some other
1036 thread to complete a displaced step, we leave thread A stopped and
1037 place it in the displaced_step_request_queue. Whenever a displaced
1038 step finishes, we pick the next thread in the queue and start a new
1039 displaced step operation on it. See displaced_step_prepare and
1040 displaced_step_fixup for details. */
1041
1042 struct displaced_step_request
1043 {
1044 ptid_t ptid;
1045 struct displaced_step_request *next;
1046 };
1047
1048 /* Per-inferior displaced stepping state. */
1049 struct displaced_step_inferior_state
1050 {
1051 /* Pointer to next in linked list. */
1052 struct displaced_step_inferior_state *next;
1053
1054 /* The process this displaced step state refers to. */
1055 int pid;
1056
1057 /* A queue of pending displaced stepping requests. One entry per
1058 thread that needs to do a displaced step. */
1059 struct displaced_step_request *step_request_queue;
1060
1061 /* If this is not null_ptid, this is the thread carrying out a
1062 displaced single-step in process PID. This thread's state will
1063 require fixing up once it has completed its step. */
1064 ptid_t step_ptid;
1065
1066 /* The architecture the thread had when we stepped it. */
1067 struct gdbarch *step_gdbarch;
1068
1069 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1070 for post-step cleanup. */
1071 struct displaced_step_closure *step_closure;
1072
1073 /* The address of the original instruction, and the copy we
1074 made. */
1075 CORE_ADDR step_original, step_copy;
1076
1077 /* Saved contents of copy area. */
1078 gdb_byte *step_saved_copy;
1079 };
1080
1081 /* The list of states of processes involved in displaced stepping
1082 presently. */
1083 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1084
1085 /* Get the displaced stepping state of process PID. */
1086
1087 static struct displaced_step_inferior_state *
1088 get_displaced_stepping_state (int pid)
1089 {
1090 struct displaced_step_inferior_state *state;
1091
1092 for (state = displaced_step_inferior_states;
1093 state != NULL;
1094 state = state->next)
1095 if (state->pid == pid)
1096 return state;
1097
1098 return NULL;
1099 }
1100
1101 /* Add a new displaced stepping state for process PID to the displaced
1102 stepping state list, or return a pointer to an already existing
1103 entry, if it already exists. Never returns NULL. */
1104
1105 static struct displaced_step_inferior_state *
1106 add_displaced_stepping_state (int pid)
1107 {
1108 struct displaced_step_inferior_state *state;
1109
1110 for (state = displaced_step_inferior_states;
1111 state != NULL;
1112 state = state->next)
1113 if (state->pid == pid)
1114 return state;
1115
1116 state = xcalloc (1, sizeof (*state));
1117 state->pid = pid;
1118 state->next = displaced_step_inferior_states;
1119 displaced_step_inferior_states = state;
1120
1121 return state;
1122 }
1123
1124 /* If inferior is in displaced stepping, and ADDR equals to starting address
1125 of copy area, return corresponding displaced_step_closure. Otherwise,
1126 return NULL. */
1127
1128 struct displaced_step_closure*
1129 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1130 {
1131 struct displaced_step_inferior_state *displaced
1132 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1133
1134 /* If checking the mode of displaced instruction in copy area. */
1135 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1136 && (displaced->step_copy == addr))
1137 return displaced->step_closure;
1138
1139 return NULL;
1140 }
1141
1142 /* Remove the displaced stepping state of process PID. */
1143
1144 static void
1145 remove_displaced_stepping_state (int pid)
1146 {
1147 struct displaced_step_inferior_state *it, **prev_next_p;
1148
1149 gdb_assert (pid != 0);
1150
1151 it = displaced_step_inferior_states;
1152 prev_next_p = &displaced_step_inferior_states;
1153 while (it)
1154 {
1155 if (it->pid == pid)
1156 {
1157 *prev_next_p = it->next;
1158 xfree (it);
1159 return;
1160 }
1161
1162 prev_next_p = &it->next;
1163 it = *prev_next_p;
1164 }
1165 }
1166
1167 static void
1168 infrun_inferior_exit (struct inferior *inf)
1169 {
1170 remove_displaced_stepping_state (inf->pid);
1171 }
1172
1173 /* Enum strings for "set|show displaced-stepping". */
1174
1175 static const char can_use_displaced_stepping_auto[] = "auto";
1176 static const char can_use_displaced_stepping_on[] = "on";
1177 static const char can_use_displaced_stepping_off[] = "off";
1178 static const char *can_use_displaced_stepping_enum[] =
1179 {
1180 can_use_displaced_stepping_auto,
1181 can_use_displaced_stepping_on,
1182 can_use_displaced_stepping_off,
1183 NULL,
1184 };
1185
1186 /* If ON, and the architecture supports it, GDB will use displaced
1187 stepping to step over breakpoints. If OFF, or if the architecture
1188 doesn't support it, GDB will instead use the traditional
1189 hold-and-step approach. If AUTO (which is the default), GDB will
1190 decide which technique to use to step over breakpoints depending on
1191 which of all-stop or non-stop mode is active --- displaced stepping
1192 in non-stop mode; hold-and-step in all-stop mode. */
1193
1194 static const char *can_use_displaced_stepping =
1195 can_use_displaced_stepping_auto;
1196
1197 static void
1198 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1199 struct cmd_list_element *c,
1200 const char *value)
1201 {
1202 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1203 fprintf_filtered (file,
1204 _("Debugger's willingness to use displaced stepping "
1205 "to step over breakpoints is %s (currently %s).\n"),
1206 value, non_stop ? "on" : "off");
1207 else
1208 fprintf_filtered (file,
1209 _("Debugger's willingness to use displaced stepping "
1210 "to step over breakpoints is %s.\n"), value);
1211 }
1212
1213 /* Return non-zero if displaced stepping can/should be used to step
1214 over breakpoints. */
1215
1216 static int
1217 use_displaced_stepping (struct gdbarch *gdbarch)
1218 {
1219 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1220 && non_stop)
1221 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1222 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1223 && !RECORD_IS_USED);
1224 }
1225
1226 /* Clean out any stray displaced stepping state. */
1227 static void
1228 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1229 {
1230 /* Indicate that there is no cleanup pending. */
1231 displaced->step_ptid = null_ptid;
1232
1233 if (displaced->step_closure)
1234 {
1235 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1236 displaced->step_closure);
1237 displaced->step_closure = NULL;
1238 }
1239 }
1240
1241 static void
1242 displaced_step_clear_cleanup (void *arg)
1243 {
1244 struct displaced_step_inferior_state *state = arg;
1245
1246 displaced_step_clear (state);
1247 }
1248
1249 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1250 void
1251 displaced_step_dump_bytes (struct ui_file *file,
1252 const gdb_byte *buf,
1253 size_t len)
1254 {
1255 int i;
1256
1257 for (i = 0; i < len; i++)
1258 fprintf_unfiltered (file, "%02x ", buf[i]);
1259 fputs_unfiltered ("\n", file);
1260 }
1261
1262 /* Prepare to single-step, using displaced stepping.
1263
1264 Note that we cannot use displaced stepping when we have a signal to
1265 deliver. If we have a signal to deliver and an instruction to step
1266 over, then after the step, there will be no indication from the
1267 target whether the thread entered a signal handler or ignored the
1268 signal and stepped over the instruction successfully --- both cases
1269 result in a simple SIGTRAP. In the first case we mustn't do a
1270 fixup, and in the second case we must --- but we can't tell which.
1271 Comments in the code for 'random signals' in handle_inferior_event
1272 explain how we handle this case instead.
1273
1274 Returns 1 if preparing was successful -- this thread is going to be
1275 stepped now; or 0 if displaced stepping this thread got queued. */
1276 static int
1277 displaced_step_prepare (ptid_t ptid)
1278 {
1279 struct cleanup *old_cleanups, *ignore_cleanups;
1280 struct regcache *regcache = get_thread_regcache (ptid);
1281 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1282 CORE_ADDR original, copy;
1283 ULONGEST len;
1284 struct displaced_step_closure *closure;
1285 struct displaced_step_inferior_state *displaced;
1286
1287 /* We should never reach this function if the architecture does not
1288 support displaced stepping. */
1289 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1290
1291 /* We have to displaced step one thread at a time, as we only have
1292 access to a single scratch space per inferior. */
1293
1294 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1295
1296 if (!ptid_equal (displaced->step_ptid, null_ptid))
1297 {
1298 /* Already waiting for a displaced step to finish. Defer this
1299 request and place in queue. */
1300 struct displaced_step_request *req, *new_req;
1301
1302 if (debug_displaced)
1303 fprintf_unfiltered (gdb_stdlog,
1304 "displaced: defering step of %s\n",
1305 target_pid_to_str (ptid));
1306
1307 new_req = xmalloc (sizeof (*new_req));
1308 new_req->ptid = ptid;
1309 new_req->next = NULL;
1310
1311 if (displaced->step_request_queue)
1312 {
1313 for (req = displaced->step_request_queue;
1314 req && req->next;
1315 req = req->next)
1316 ;
1317 req->next = new_req;
1318 }
1319 else
1320 displaced->step_request_queue = new_req;
1321
1322 return 0;
1323 }
1324 else
1325 {
1326 if (debug_displaced)
1327 fprintf_unfiltered (gdb_stdlog,
1328 "displaced: stepping %s now\n",
1329 target_pid_to_str (ptid));
1330 }
1331
1332 displaced_step_clear (displaced);
1333
1334 old_cleanups = save_inferior_ptid ();
1335 inferior_ptid = ptid;
1336
1337 original = regcache_read_pc (regcache);
1338
1339 copy = gdbarch_displaced_step_location (gdbarch);
1340 len = gdbarch_max_insn_length (gdbarch);
1341
1342 /* Save the original contents of the copy area. */
1343 displaced->step_saved_copy = xmalloc (len);
1344 ignore_cleanups = make_cleanup (free_current_contents,
1345 &displaced->step_saved_copy);
1346 read_memory (copy, displaced->step_saved_copy, len);
1347 if (debug_displaced)
1348 {
1349 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1350 paddress (gdbarch, copy));
1351 displaced_step_dump_bytes (gdb_stdlog,
1352 displaced->step_saved_copy,
1353 len);
1354 };
1355
1356 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1357 original, copy, regcache);
1358
1359 /* We don't support the fully-simulated case at present. */
1360 gdb_assert (closure);
1361
1362 /* Save the information we need to fix things up if the step
1363 succeeds. */
1364 displaced->step_ptid = ptid;
1365 displaced->step_gdbarch = gdbarch;
1366 displaced->step_closure = closure;
1367 displaced->step_original = original;
1368 displaced->step_copy = copy;
1369
1370 make_cleanup (displaced_step_clear_cleanup, displaced);
1371
1372 /* Resume execution at the copy. */
1373 regcache_write_pc (regcache, copy);
1374
1375 discard_cleanups (ignore_cleanups);
1376
1377 do_cleanups (old_cleanups);
1378
1379 if (debug_displaced)
1380 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1381 paddress (gdbarch, copy));
1382
1383 return 1;
1384 }
1385
1386 static void
1387 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1388 const gdb_byte *myaddr, int len)
1389 {
1390 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1391
1392 inferior_ptid = ptid;
1393 write_memory (memaddr, myaddr, len);
1394 do_cleanups (ptid_cleanup);
1395 }
1396
1397 /* Restore the contents of the copy area for thread PTID. */
1398
1399 static void
1400 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1401 ptid_t ptid)
1402 {
1403 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1404
1405 write_memory_ptid (ptid, displaced->step_copy,
1406 displaced->step_saved_copy, len);
1407 if (debug_displaced)
1408 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1409 target_pid_to_str (ptid),
1410 paddress (displaced->step_gdbarch,
1411 displaced->step_copy));
1412 }
1413
1414 static void
1415 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1416 {
1417 struct cleanup *old_cleanups;
1418 struct displaced_step_inferior_state *displaced
1419 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1420
1421 /* Was any thread of this process doing a displaced step? */
1422 if (displaced == NULL)
1423 return;
1424
1425 /* Was this event for the pid we displaced? */
1426 if (ptid_equal (displaced->step_ptid, null_ptid)
1427 || ! ptid_equal (displaced->step_ptid, event_ptid))
1428 return;
1429
1430 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1431
1432 displaced_step_restore (displaced, displaced->step_ptid);
1433
1434 /* Did the instruction complete successfully? */
1435 if (signal == TARGET_SIGNAL_TRAP)
1436 {
1437 /* Fix up the resulting state. */
1438 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1439 displaced->step_closure,
1440 displaced->step_original,
1441 displaced->step_copy,
1442 get_thread_regcache (displaced->step_ptid));
1443 }
1444 else
1445 {
1446 /* Since the instruction didn't complete, all we can do is
1447 relocate the PC. */
1448 struct regcache *regcache = get_thread_regcache (event_ptid);
1449 CORE_ADDR pc = regcache_read_pc (regcache);
1450
1451 pc = displaced->step_original + (pc - displaced->step_copy);
1452 regcache_write_pc (regcache, pc);
1453 }
1454
1455 do_cleanups (old_cleanups);
1456
1457 displaced->step_ptid = null_ptid;
1458
1459 /* Are there any pending displaced stepping requests? If so, run
1460 one now. Leave the state object around, since we're likely to
1461 need it again soon. */
1462 while (displaced->step_request_queue)
1463 {
1464 struct displaced_step_request *head;
1465 ptid_t ptid;
1466 struct regcache *regcache;
1467 struct gdbarch *gdbarch;
1468 CORE_ADDR actual_pc;
1469 struct address_space *aspace;
1470
1471 head = displaced->step_request_queue;
1472 ptid = head->ptid;
1473 displaced->step_request_queue = head->next;
1474 xfree (head);
1475
1476 context_switch (ptid);
1477
1478 regcache = get_thread_regcache (ptid);
1479 actual_pc = regcache_read_pc (regcache);
1480 aspace = get_regcache_aspace (regcache);
1481
1482 if (breakpoint_here_p (aspace, actual_pc))
1483 {
1484 if (debug_displaced)
1485 fprintf_unfiltered (gdb_stdlog,
1486 "displaced: stepping queued %s now\n",
1487 target_pid_to_str (ptid));
1488
1489 displaced_step_prepare (ptid);
1490
1491 gdbarch = get_regcache_arch (regcache);
1492
1493 if (debug_displaced)
1494 {
1495 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1496 gdb_byte buf[4];
1497
1498 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1499 paddress (gdbarch, actual_pc));
1500 read_memory (actual_pc, buf, sizeof (buf));
1501 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1502 }
1503
1504 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1505 displaced->step_closure))
1506 target_resume (ptid, 1, TARGET_SIGNAL_0);
1507 else
1508 target_resume (ptid, 0, TARGET_SIGNAL_0);
1509
1510 /* Done, we're stepping a thread. */
1511 break;
1512 }
1513 else
1514 {
1515 int step;
1516 struct thread_info *tp = inferior_thread ();
1517
1518 /* The breakpoint we were sitting under has since been
1519 removed. */
1520 tp->control.trap_expected = 0;
1521
1522 /* Go back to what we were trying to do. */
1523 step = currently_stepping (tp);
1524
1525 if (debug_displaced)
1526 fprintf_unfiltered (gdb_stdlog,
1527 "breakpoint is gone %s: step(%d)\n",
1528 target_pid_to_str (tp->ptid), step);
1529
1530 target_resume (ptid, step, TARGET_SIGNAL_0);
1531 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1532
1533 /* This request was discarded. See if there's any other
1534 thread waiting for its turn. */
1535 }
1536 }
1537 }
1538
1539 /* Update global variables holding ptids to hold NEW_PTID if they were
1540 holding OLD_PTID. */
1541 static void
1542 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1543 {
1544 struct displaced_step_request *it;
1545 struct displaced_step_inferior_state *displaced;
1546
1547 if (ptid_equal (inferior_ptid, old_ptid))
1548 inferior_ptid = new_ptid;
1549
1550 if (ptid_equal (singlestep_ptid, old_ptid))
1551 singlestep_ptid = new_ptid;
1552
1553 if (ptid_equal (deferred_step_ptid, old_ptid))
1554 deferred_step_ptid = new_ptid;
1555
1556 for (displaced = displaced_step_inferior_states;
1557 displaced;
1558 displaced = displaced->next)
1559 {
1560 if (ptid_equal (displaced->step_ptid, old_ptid))
1561 displaced->step_ptid = new_ptid;
1562
1563 for (it = displaced->step_request_queue; it; it = it->next)
1564 if (ptid_equal (it->ptid, old_ptid))
1565 it->ptid = new_ptid;
1566 }
1567 }
1568
1569 \f
1570 /* Resuming. */
1571
1572 /* Things to clean up if we QUIT out of resume (). */
1573 static void
1574 resume_cleanups (void *ignore)
1575 {
1576 normal_stop ();
1577 }
1578
1579 static const char schedlock_off[] = "off";
1580 static const char schedlock_on[] = "on";
1581 static const char schedlock_step[] = "step";
1582 static const char *scheduler_enums[] = {
1583 schedlock_off,
1584 schedlock_on,
1585 schedlock_step,
1586 NULL
1587 };
1588 static const char *scheduler_mode = schedlock_off;
1589 static void
1590 show_scheduler_mode (struct ui_file *file, int from_tty,
1591 struct cmd_list_element *c, const char *value)
1592 {
1593 fprintf_filtered (file,
1594 _("Mode for locking scheduler "
1595 "during execution is \"%s\".\n"),
1596 value);
1597 }
1598
1599 static void
1600 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1601 {
1602 if (!target_can_lock_scheduler)
1603 {
1604 scheduler_mode = schedlock_off;
1605 error (_("Target '%s' cannot support this command."), target_shortname);
1606 }
1607 }
1608
1609 /* True if execution commands resume all threads of all processes by
1610 default; otherwise, resume only threads of the current inferior
1611 process. */
1612 int sched_multi = 0;
1613
1614 /* Try to setup for software single stepping over the specified location.
1615 Return 1 if target_resume() should use hardware single step.
1616
1617 GDBARCH the current gdbarch.
1618 PC the location to step over. */
1619
1620 static int
1621 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1622 {
1623 int hw_step = 1;
1624
1625 if (execution_direction == EXEC_FORWARD
1626 && gdbarch_software_single_step_p (gdbarch)
1627 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1628 {
1629 hw_step = 0;
1630 /* Do not pull these breakpoints until after a `wait' in
1631 `wait_for_inferior'. */
1632 singlestep_breakpoints_inserted_p = 1;
1633 singlestep_ptid = inferior_ptid;
1634 singlestep_pc = pc;
1635 }
1636 return hw_step;
1637 }
1638
1639 /* Return a ptid representing the set of threads that we will proceed,
1640 in the perspective of the user/frontend. We may actually resume
1641 fewer threads at first, e.g., if a thread is stopped at a
1642 breakpoint that needs stepping-off, but that should not be visible
1643 to the user/frontend, and neither should the frontend/user be
1644 allowed to proceed any of the threads that happen to be stopped for
1645 internal run control handling, if a previous command wanted them
1646 resumed. */
1647
1648 ptid_t
1649 user_visible_resume_ptid (int step)
1650 {
1651 /* By default, resume all threads of all processes. */
1652 ptid_t resume_ptid = RESUME_ALL;
1653
1654 /* Maybe resume only all threads of the current process. */
1655 if (!sched_multi && target_supports_multi_process ())
1656 {
1657 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1658 }
1659
1660 /* Maybe resume a single thread after all. */
1661 if (non_stop)
1662 {
1663 /* With non-stop mode on, threads are always handled
1664 individually. */
1665 resume_ptid = inferior_ptid;
1666 }
1667 else if ((scheduler_mode == schedlock_on)
1668 || (scheduler_mode == schedlock_step
1669 && (step || singlestep_breakpoints_inserted_p)))
1670 {
1671 /* User-settable 'scheduler' mode requires solo thread resume. */
1672 resume_ptid = inferior_ptid;
1673 }
1674
1675 return resume_ptid;
1676 }
1677
1678 /* Resume the inferior, but allow a QUIT. This is useful if the user
1679 wants to interrupt some lengthy single-stepping operation
1680 (for child processes, the SIGINT goes to the inferior, and so
1681 we get a SIGINT random_signal, but for remote debugging and perhaps
1682 other targets, that's not true).
1683
1684 STEP nonzero if we should step (zero to continue instead).
1685 SIG is the signal to give the inferior (zero for none). */
1686 void
1687 resume (int step, enum target_signal sig)
1688 {
1689 int should_resume = 1;
1690 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1691 struct regcache *regcache = get_current_regcache ();
1692 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1693 struct thread_info *tp = inferior_thread ();
1694 CORE_ADDR pc = regcache_read_pc (regcache);
1695 struct address_space *aspace = get_regcache_aspace (regcache);
1696
1697 QUIT;
1698
1699 if (current_inferior ()->waiting_for_vfork_done)
1700 {
1701 /* Don't try to single-step a vfork parent that is waiting for
1702 the child to get out of the shared memory region (by exec'ing
1703 or exiting). This is particularly important on software
1704 single-step archs, as the child process would trip on the
1705 software single step breakpoint inserted for the parent
1706 process. Since the parent will not actually execute any
1707 instruction until the child is out of the shared region (such
1708 are vfork's semantics), it is safe to simply continue it.
1709 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1710 the parent, and tell it to `keep_going', which automatically
1711 re-sets it stepping. */
1712 if (debug_infrun)
1713 fprintf_unfiltered (gdb_stdlog,
1714 "infrun: resume : clear step\n");
1715 step = 0;
1716 }
1717
1718 if (debug_infrun)
1719 fprintf_unfiltered (gdb_stdlog,
1720 "infrun: resume (step=%d, signal=%d), "
1721 "trap_expected=%d, current thread [%s] at %s\n",
1722 step, sig, tp->control.trap_expected,
1723 target_pid_to_str (inferior_ptid),
1724 paddress (gdbarch, pc));
1725
1726 /* Normally, by the time we reach `resume', the breakpoints are either
1727 removed or inserted, as appropriate. The exception is if we're sitting
1728 at a permanent breakpoint; we need to step over it, but permanent
1729 breakpoints can't be removed. So we have to test for it here. */
1730 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1731 {
1732 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1733 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1734 else
1735 error (_("\
1736 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1737 how to step past a permanent breakpoint on this architecture. Try using\n\
1738 a command like `return' or `jump' to continue execution."));
1739 }
1740
1741 /* If enabled, step over breakpoints by executing a copy of the
1742 instruction at a different address.
1743
1744 We can't use displaced stepping when we have a signal to deliver;
1745 the comments for displaced_step_prepare explain why. The
1746 comments in the handle_inferior event for dealing with 'random
1747 signals' explain what we do instead.
1748
1749 We can't use displaced stepping when we are waiting for vfork_done
1750 event, displaced stepping breaks the vfork child similarly as single
1751 step software breakpoint. */
1752 if (use_displaced_stepping (gdbarch)
1753 && (tp->control.trap_expected
1754 || (step && gdbarch_software_single_step_p (gdbarch)))
1755 && sig == TARGET_SIGNAL_0
1756 && !current_inferior ()->waiting_for_vfork_done)
1757 {
1758 struct displaced_step_inferior_state *displaced;
1759
1760 if (!displaced_step_prepare (inferior_ptid))
1761 {
1762 /* Got placed in displaced stepping queue. Will be resumed
1763 later when all the currently queued displaced stepping
1764 requests finish. The thread is not executing at this point,
1765 and the call to set_executing will be made later. But we
1766 need to call set_running here, since from frontend point of view,
1767 the thread is running. */
1768 set_running (inferior_ptid, 1);
1769 discard_cleanups (old_cleanups);
1770 return;
1771 }
1772
1773 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1774 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1775 displaced->step_closure);
1776 }
1777
1778 /* Do we need to do it the hard way, w/temp breakpoints? */
1779 else if (step)
1780 step = maybe_software_singlestep (gdbarch, pc);
1781
1782 /* Currently, our software single-step implementation leads to different
1783 results than hardware single-stepping in one situation: when stepping
1784 into delivering a signal which has an associated signal handler,
1785 hardware single-step will stop at the first instruction of the handler,
1786 while software single-step will simply skip execution of the handler.
1787
1788 For now, this difference in behavior is accepted since there is no
1789 easy way to actually implement single-stepping into a signal handler
1790 without kernel support.
1791
1792 However, there is one scenario where this difference leads to follow-on
1793 problems: if we're stepping off a breakpoint by removing all breakpoints
1794 and then single-stepping. In this case, the software single-step
1795 behavior means that even if there is a *breakpoint* in the signal
1796 handler, GDB still would not stop.
1797
1798 Fortunately, we can at least fix this particular issue. We detect
1799 here the case where we are about to deliver a signal while software
1800 single-stepping with breakpoints removed. In this situation, we
1801 revert the decisions to remove all breakpoints and insert single-
1802 step breakpoints, and instead we install a step-resume breakpoint
1803 at the current address, deliver the signal without stepping, and
1804 once we arrive back at the step-resume breakpoint, actually step
1805 over the breakpoint we originally wanted to step over. */
1806 if (singlestep_breakpoints_inserted_p
1807 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1808 {
1809 /* If we have nested signals or a pending signal is delivered
1810 immediately after a handler returns, might might already have
1811 a step-resume breakpoint set on the earlier handler. We cannot
1812 set another step-resume breakpoint; just continue on until the
1813 original breakpoint is hit. */
1814 if (tp->control.step_resume_breakpoint == NULL)
1815 {
1816 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1817 tp->step_after_step_resume_breakpoint = 1;
1818 }
1819
1820 remove_single_step_breakpoints ();
1821 singlestep_breakpoints_inserted_p = 0;
1822
1823 insert_breakpoints ();
1824 tp->control.trap_expected = 0;
1825 }
1826
1827 if (should_resume)
1828 {
1829 ptid_t resume_ptid;
1830
1831 /* If STEP is set, it's a request to use hardware stepping
1832 facilities. But in that case, we should never
1833 use singlestep breakpoint. */
1834 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1835
1836 /* Decide the set of threads to ask the target to resume. Start
1837 by assuming everything will be resumed, than narrow the set
1838 by applying increasingly restricting conditions. */
1839 resume_ptid = user_visible_resume_ptid (step);
1840
1841 /* Maybe resume a single thread after all. */
1842 if (singlestep_breakpoints_inserted_p
1843 && stepping_past_singlestep_breakpoint)
1844 {
1845 /* The situation here is as follows. In thread T1 we wanted to
1846 single-step. Lacking hardware single-stepping we've
1847 set breakpoint at the PC of the next instruction -- call it
1848 P. After resuming, we've hit that breakpoint in thread T2.
1849 Now we've removed original breakpoint, inserted breakpoint
1850 at P+1, and try to step to advance T2 past breakpoint.
1851 We need to step only T2, as if T1 is allowed to freely run,
1852 it can run past P, and if other threads are allowed to run,
1853 they can hit breakpoint at P+1, and nested hits of single-step
1854 breakpoints is not something we'd want -- that's complicated
1855 to support, and has no value. */
1856 resume_ptid = inferior_ptid;
1857 }
1858 else if ((step || singlestep_breakpoints_inserted_p)
1859 && tp->control.trap_expected)
1860 {
1861 /* We're allowing a thread to run past a breakpoint it has
1862 hit, by single-stepping the thread with the breakpoint
1863 removed. In which case, we need to single-step only this
1864 thread, and keep others stopped, as they can miss this
1865 breakpoint if allowed to run.
1866
1867 The current code actually removes all breakpoints when
1868 doing this, not just the one being stepped over, so if we
1869 let other threads run, we can actually miss any
1870 breakpoint, not just the one at PC. */
1871 resume_ptid = inferior_ptid;
1872 }
1873
1874 if (gdbarch_cannot_step_breakpoint (gdbarch))
1875 {
1876 /* Most targets can step a breakpoint instruction, thus
1877 executing it normally. But if this one cannot, just
1878 continue and we will hit it anyway. */
1879 if (step && breakpoint_inserted_here_p (aspace, pc))
1880 step = 0;
1881 }
1882
1883 if (debug_displaced
1884 && use_displaced_stepping (gdbarch)
1885 && tp->control.trap_expected)
1886 {
1887 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1888 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1889 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1890 gdb_byte buf[4];
1891
1892 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1893 paddress (resume_gdbarch, actual_pc));
1894 read_memory (actual_pc, buf, sizeof (buf));
1895 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1896 }
1897
1898 /* Install inferior's terminal modes. */
1899 target_terminal_inferior ();
1900
1901 /* Avoid confusing the next resume, if the next stop/resume
1902 happens to apply to another thread. */
1903 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1904
1905 /* Advise target which signals may be handled silently. If we have
1906 removed breakpoints because we are stepping over one (which can
1907 happen only if we are not using displaced stepping), we need to
1908 receive all signals to avoid accidentally skipping a breakpoint
1909 during execution of a signal handler. */
1910 if ((step || singlestep_breakpoints_inserted_p)
1911 && tp->control.trap_expected
1912 && !use_displaced_stepping (gdbarch))
1913 target_pass_signals (0, NULL);
1914 else
1915 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1916
1917 target_resume (resume_ptid, step, sig);
1918 }
1919
1920 discard_cleanups (old_cleanups);
1921 }
1922 \f
1923 /* Proceeding. */
1924
1925 /* Clear out all variables saying what to do when inferior is continued.
1926 First do this, then set the ones you want, then call `proceed'. */
1927
1928 static void
1929 clear_proceed_status_thread (struct thread_info *tp)
1930 {
1931 if (debug_infrun)
1932 fprintf_unfiltered (gdb_stdlog,
1933 "infrun: clear_proceed_status_thread (%s)\n",
1934 target_pid_to_str (tp->ptid));
1935
1936 tp->control.trap_expected = 0;
1937 tp->control.step_range_start = 0;
1938 tp->control.step_range_end = 0;
1939 tp->control.step_frame_id = null_frame_id;
1940 tp->control.step_stack_frame_id = null_frame_id;
1941 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1942 tp->stop_requested = 0;
1943
1944 tp->control.stop_step = 0;
1945
1946 tp->control.proceed_to_finish = 0;
1947
1948 /* Discard any remaining commands or status from previous stop. */
1949 bpstat_clear (&tp->control.stop_bpstat);
1950 }
1951
1952 static int
1953 clear_proceed_status_callback (struct thread_info *tp, void *data)
1954 {
1955 if (is_exited (tp->ptid))
1956 return 0;
1957
1958 clear_proceed_status_thread (tp);
1959 return 0;
1960 }
1961
1962 void
1963 clear_proceed_status (void)
1964 {
1965 if (!non_stop)
1966 {
1967 /* In all-stop mode, delete the per-thread status of all
1968 threads, even if inferior_ptid is null_ptid, there may be
1969 threads on the list. E.g., we may be launching a new
1970 process, while selecting the executable. */
1971 iterate_over_threads (clear_proceed_status_callback, NULL);
1972 }
1973
1974 if (!ptid_equal (inferior_ptid, null_ptid))
1975 {
1976 struct inferior *inferior;
1977
1978 if (non_stop)
1979 {
1980 /* If in non-stop mode, only delete the per-thread status of
1981 the current thread. */
1982 clear_proceed_status_thread (inferior_thread ());
1983 }
1984
1985 inferior = current_inferior ();
1986 inferior->control.stop_soon = NO_STOP_QUIETLY;
1987 }
1988
1989 stop_after_trap = 0;
1990
1991 observer_notify_about_to_proceed ();
1992
1993 if (stop_registers)
1994 {
1995 regcache_xfree (stop_registers);
1996 stop_registers = NULL;
1997 }
1998 }
1999
2000 /* Check the current thread against the thread that reported the most recent
2001 event. If a step-over is required return TRUE and set the current thread
2002 to the old thread. Otherwise return FALSE.
2003
2004 This should be suitable for any targets that support threads. */
2005
2006 static int
2007 prepare_to_proceed (int step)
2008 {
2009 ptid_t wait_ptid;
2010 struct target_waitstatus wait_status;
2011 int schedlock_enabled;
2012
2013 /* With non-stop mode on, threads are always handled individually. */
2014 gdb_assert (! non_stop);
2015
2016 /* Get the last target status returned by target_wait(). */
2017 get_last_target_status (&wait_ptid, &wait_status);
2018
2019 /* Make sure we were stopped at a breakpoint. */
2020 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2021 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
2022 && wait_status.value.sig != TARGET_SIGNAL_ILL
2023 && wait_status.value.sig != TARGET_SIGNAL_SEGV
2024 && wait_status.value.sig != TARGET_SIGNAL_EMT))
2025 {
2026 return 0;
2027 }
2028
2029 schedlock_enabled = (scheduler_mode == schedlock_on
2030 || (scheduler_mode == schedlock_step
2031 && step));
2032
2033 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2034 if (schedlock_enabled)
2035 return 0;
2036
2037 /* Don't switch over if we're about to resume some other process
2038 other than WAIT_PTID's, and schedule-multiple is off. */
2039 if (!sched_multi
2040 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2041 return 0;
2042
2043 /* Switched over from WAIT_PID. */
2044 if (!ptid_equal (wait_ptid, minus_one_ptid)
2045 && !ptid_equal (inferior_ptid, wait_ptid))
2046 {
2047 struct regcache *regcache = get_thread_regcache (wait_ptid);
2048
2049 if (breakpoint_here_p (get_regcache_aspace (regcache),
2050 regcache_read_pc (regcache)))
2051 {
2052 /* If stepping, remember current thread to switch back to. */
2053 if (step)
2054 deferred_step_ptid = inferior_ptid;
2055
2056 /* Switch back to WAIT_PID thread. */
2057 switch_to_thread (wait_ptid);
2058
2059 if (debug_infrun)
2060 fprintf_unfiltered (gdb_stdlog,
2061 "infrun: prepare_to_proceed (step=%d), "
2062 "switched to [%s]\n",
2063 step, target_pid_to_str (inferior_ptid));
2064
2065 /* We return 1 to indicate that there is a breakpoint here,
2066 so we need to step over it before continuing to avoid
2067 hitting it straight away. */
2068 return 1;
2069 }
2070 }
2071
2072 return 0;
2073 }
2074
2075 /* Basic routine for continuing the program in various fashions.
2076
2077 ADDR is the address to resume at, or -1 for resume where stopped.
2078 SIGGNAL is the signal to give it, or 0 for none,
2079 or -1 for act according to how it stopped.
2080 STEP is nonzero if should trap after one instruction.
2081 -1 means return after that and print nothing.
2082 You should probably set various step_... variables
2083 before calling here, if you are stepping.
2084
2085 You should call clear_proceed_status before calling proceed. */
2086
2087 void
2088 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2089 {
2090 struct regcache *regcache;
2091 struct gdbarch *gdbarch;
2092 struct thread_info *tp;
2093 CORE_ADDR pc;
2094 struct address_space *aspace;
2095 int oneproc = 0;
2096
2097 /* If we're stopped at a fork/vfork, follow the branch set by the
2098 "set follow-fork-mode" command; otherwise, we'll just proceed
2099 resuming the current thread. */
2100 if (!follow_fork ())
2101 {
2102 /* The target for some reason decided not to resume. */
2103 normal_stop ();
2104 if (target_can_async_p ())
2105 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2106 return;
2107 }
2108
2109 /* We'll update this if & when we switch to a new thread. */
2110 previous_inferior_ptid = inferior_ptid;
2111
2112 regcache = get_current_regcache ();
2113 gdbarch = get_regcache_arch (regcache);
2114 aspace = get_regcache_aspace (regcache);
2115 pc = regcache_read_pc (regcache);
2116
2117 if (step > 0)
2118 step_start_function = find_pc_function (pc);
2119 if (step < 0)
2120 stop_after_trap = 1;
2121
2122 if (addr == (CORE_ADDR) -1)
2123 {
2124 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2125 && execution_direction != EXEC_REVERSE)
2126 /* There is a breakpoint at the address we will resume at,
2127 step one instruction before inserting breakpoints so that
2128 we do not stop right away (and report a second hit at this
2129 breakpoint).
2130
2131 Note, we don't do this in reverse, because we won't
2132 actually be executing the breakpoint insn anyway.
2133 We'll be (un-)executing the previous instruction. */
2134
2135 oneproc = 1;
2136 else if (gdbarch_single_step_through_delay_p (gdbarch)
2137 && gdbarch_single_step_through_delay (gdbarch,
2138 get_current_frame ()))
2139 /* We stepped onto an instruction that needs to be stepped
2140 again before re-inserting the breakpoint, do so. */
2141 oneproc = 1;
2142 }
2143 else
2144 {
2145 regcache_write_pc (regcache, addr);
2146 }
2147
2148 if (debug_infrun)
2149 fprintf_unfiltered (gdb_stdlog,
2150 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2151 paddress (gdbarch, addr), siggnal, step);
2152
2153 if (non_stop)
2154 /* In non-stop, each thread is handled individually. The context
2155 must already be set to the right thread here. */
2156 ;
2157 else
2158 {
2159 /* In a multi-threaded task we may select another thread and
2160 then continue or step.
2161
2162 But if the old thread was stopped at a breakpoint, it will
2163 immediately cause another breakpoint stop without any
2164 execution (i.e. it will report a breakpoint hit incorrectly).
2165 So we must step over it first.
2166
2167 prepare_to_proceed checks the current thread against the
2168 thread that reported the most recent event. If a step-over
2169 is required it returns TRUE and sets the current thread to
2170 the old thread. */
2171 if (prepare_to_proceed (step))
2172 oneproc = 1;
2173 }
2174
2175 /* prepare_to_proceed may change the current thread. */
2176 tp = inferior_thread ();
2177
2178 if (oneproc)
2179 {
2180 tp->control.trap_expected = 1;
2181 /* If displaced stepping is enabled, we can step over the
2182 breakpoint without hitting it, so leave all breakpoints
2183 inserted. Otherwise we need to disable all breakpoints, step
2184 one instruction, and then re-add them when that step is
2185 finished. */
2186 if (!use_displaced_stepping (gdbarch))
2187 remove_breakpoints ();
2188 }
2189
2190 /* We can insert breakpoints if we're not trying to step over one,
2191 or if we are stepping over one but we're using displaced stepping
2192 to do so. */
2193 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2194 insert_breakpoints ();
2195
2196 if (!non_stop)
2197 {
2198 /* Pass the last stop signal to the thread we're resuming,
2199 irrespective of whether the current thread is the thread that
2200 got the last event or not. This was historically GDB's
2201 behaviour before keeping a stop_signal per thread. */
2202
2203 struct thread_info *last_thread;
2204 ptid_t last_ptid;
2205 struct target_waitstatus last_status;
2206
2207 get_last_target_status (&last_ptid, &last_status);
2208 if (!ptid_equal (inferior_ptid, last_ptid)
2209 && !ptid_equal (last_ptid, null_ptid)
2210 && !ptid_equal (last_ptid, minus_one_ptid))
2211 {
2212 last_thread = find_thread_ptid (last_ptid);
2213 if (last_thread)
2214 {
2215 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2216 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2217 }
2218 }
2219 }
2220
2221 if (siggnal != TARGET_SIGNAL_DEFAULT)
2222 tp->suspend.stop_signal = siggnal;
2223 /* If this signal should not be seen by program,
2224 give it zero. Used for debugging signals. */
2225 else if (!signal_program[tp->suspend.stop_signal])
2226 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2227
2228 annotate_starting ();
2229
2230 /* Make sure that output from GDB appears before output from the
2231 inferior. */
2232 gdb_flush (gdb_stdout);
2233
2234 /* Refresh prev_pc value just prior to resuming. This used to be
2235 done in stop_stepping, however, setting prev_pc there did not handle
2236 scenarios such as inferior function calls or returning from
2237 a function via the return command. In those cases, the prev_pc
2238 value was not set properly for subsequent commands. The prev_pc value
2239 is used to initialize the starting line number in the ecs. With an
2240 invalid value, the gdb next command ends up stopping at the position
2241 represented by the next line table entry past our start position.
2242 On platforms that generate one line table entry per line, this
2243 is not a problem. However, on the ia64, the compiler generates
2244 extraneous line table entries that do not increase the line number.
2245 When we issue the gdb next command on the ia64 after an inferior call
2246 or a return command, we often end up a few instructions forward, still
2247 within the original line we started.
2248
2249 An attempt was made to refresh the prev_pc at the same time the
2250 execution_control_state is initialized (for instance, just before
2251 waiting for an inferior event). But this approach did not work
2252 because of platforms that use ptrace, where the pc register cannot
2253 be read unless the inferior is stopped. At that point, we are not
2254 guaranteed the inferior is stopped and so the regcache_read_pc() call
2255 can fail. Setting the prev_pc value here ensures the value is updated
2256 correctly when the inferior is stopped. */
2257 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2258
2259 /* Fill in with reasonable starting values. */
2260 init_thread_stepping_state (tp);
2261
2262 /* Reset to normal state. */
2263 init_infwait_state ();
2264
2265 /* Resume inferior. */
2266 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2267
2268 /* Wait for it to stop (if not standalone)
2269 and in any case decode why it stopped, and act accordingly. */
2270 /* Do this only if we are not using the event loop, or if the target
2271 does not support asynchronous execution. */
2272 if (!target_can_async_p ())
2273 {
2274 wait_for_inferior ();
2275 normal_stop ();
2276 }
2277 }
2278 \f
2279
2280 /* Start remote-debugging of a machine over a serial link. */
2281
2282 void
2283 start_remote (int from_tty)
2284 {
2285 struct inferior *inferior;
2286
2287 inferior = current_inferior ();
2288 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2289
2290 /* Always go on waiting for the target, regardless of the mode. */
2291 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2292 indicate to wait_for_inferior that a target should timeout if
2293 nothing is returned (instead of just blocking). Because of this,
2294 targets expecting an immediate response need to, internally, set
2295 things up so that the target_wait() is forced to eventually
2296 timeout. */
2297 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2298 differentiate to its caller what the state of the target is after
2299 the initial open has been performed. Here we're assuming that
2300 the target has stopped. It should be possible to eventually have
2301 target_open() return to the caller an indication that the target
2302 is currently running and GDB state should be set to the same as
2303 for an async run. */
2304 wait_for_inferior ();
2305
2306 /* Now that the inferior has stopped, do any bookkeeping like
2307 loading shared libraries. We want to do this before normal_stop,
2308 so that the displayed frame is up to date. */
2309 post_create_inferior (&current_target, from_tty);
2310
2311 normal_stop ();
2312 }
2313
2314 /* Initialize static vars when a new inferior begins. */
2315
2316 void
2317 init_wait_for_inferior (void)
2318 {
2319 /* These are meaningless until the first time through wait_for_inferior. */
2320
2321 breakpoint_init_inferior (inf_starting);
2322
2323 clear_proceed_status ();
2324
2325 stepping_past_singlestep_breakpoint = 0;
2326 deferred_step_ptid = null_ptid;
2327
2328 target_last_wait_ptid = minus_one_ptid;
2329
2330 previous_inferior_ptid = inferior_ptid;
2331 init_infwait_state ();
2332
2333 /* Discard any skipped inlined frames. */
2334 clear_inline_frame_state (minus_one_ptid);
2335 }
2336
2337 \f
2338 /* This enum encodes possible reasons for doing a target_wait, so that
2339 wfi can call target_wait in one place. (Ultimately the call will be
2340 moved out of the infinite loop entirely.) */
2341
2342 enum infwait_states
2343 {
2344 infwait_normal_state,
2345 infwait_thread_hop_state,
2346 infwait_step_watch_state,
2347 infwait_nonstep_watch_state
2348 };
2349
2350 /* The PTID we'll do a target_wait on.*/
2351 ptid_t waiton_ptid;
2352
2353 /* Current inferior wait state. */
2354 enum infwait_states infwait_state;
2355
2356 /* Data to be passed around while handling an event. This data is
2357 discarded between events. */
2358 struct execution_control_state
2359 {
2360 ptid_t ptid;
2361 /* The thread that got the event, if this was a thread event; NULL
2362 otherwise. */
2363 struct thread_info *event_thread;
2364
2365 struct target_waitstatus ws;
2366 int random_signal;
2367 int stop_func_filled_in;
2368 CORE_ADDR stop_func_start;
2369 CORE_ADDR stop_func_end;
2370 char *stop_func_name;
2371 int new_thread_event;
2372 int wait_some_more;
2373 };
2374
2375 static void handle_inferior_event (struct execution_control_state *ecs);
2376
2377 static void handle_step_into_function (struct gdbarch *gdbarch,
2378 struct execution_control_state *ecs);
2379 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2380 struct execution_control_state *ecs);
2381 static void check_exception_resume (struct execution_control_state *,
2382 struct frame_info *, struct symbol *);
2383
2384 static void stop_stepping (struct execution_control_state *ecs);
2385 static void prepare_to_wait (struct execution_control_state *ecs);
2386 static void keep_going (struct execution_control_state *ecs);
2387
2388 /* Callback for iterate over threads. If the thread is stopped, but
2389 the user/frontend doesn't know about that yet, go through
2390 normal_stop, as if the thread had just stopped now. ARG points at
2391 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2392 ptid_is_pid(PTID) is true, applies to all threads of the process
2393 pointed at by PTID. Otherwise, apply only to the thread pointed by
2394 PTID. */
2395
2396 static int
2397 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2398 {
2399 ptid_t ptid = * (ptid_t *) arg;
2400
2401 if ((ptid_equal (info->ptid, ptid)
2402 || ptid_equal (minus_one_ptid, ptid)
2403 || (ptid_is_pid (ptid)
2404 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2405 && is_running (info->ptid)
2406 && !is_executing (info->ptid))
2407 {
2408 struct cleanup *old_chain;
2409 struct execution_control_state ecss;
2410 struct execution_control_state *ecs = &ecss;
2411
2412 memset (ecs, 0, sizeof (*ecs));
2413
2414 old_chain = make_cleanup_restore_current_thread ();
2415
2416 switch_to_thread (info->ptid);
2417
2418 /* Go through handle_inferior_event/normal_stop, so we always
2419 have consistent output as if the stop event had been
2420 reported. */
2421 ecs->ptid = info->ptid;
2422 ecs->event_thread = find_thread_ptid (info->ptid);
2423 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2424 ecs->ws.value.sig = TARGET_SIGNAL_0;
2425
2426 handle_inferior_event (ecs);
2427
2428 if (!ecs->wait_some_more)
2429 {
2430 struct thread_info *tp;
2431
2432 normal_stop ();
2433
2434 /* Finish off the continuations. */
2435 tp = inferior_thread ();
2436 do_all_intermediate_continuations_thread (tp, 1);
2437 do_all_continuations_thread (tp, 1);
2438 }
2439
2440 do_cleanups (old_chain);
2441 }
2442
2443 return 0;
2444 }
2445
2446 /* This function is attached as a "thread_stop_requested" observer.
2447 Cleanup local state that assumed the PTID was to be resumed, and
2448 report the stop to the frontend. */
2449
2450 static void
2451 infrun_thread_stop_requested (ptid_t ptid)
2452 {
2453 struct displaced_step_inferior_state *displaced;
2454
2455 /* PTID was requested to stop. Remove it from the displaced
2456 stepping queue, so we don't try to resume it automatically. */
2457
2458 for (displaced = displaced_step_inferior_states;
2459 displaced;
2460 displaced = displaced->next)
2461 {
2462 struct displaced_step_request *it, **prev_next_p;
2463
2464 it = displaced->step_request_queue;
2465 prev_next_p = &displaced->step_request_queue;
2466 while (it)
2467 {
2468 if (ptid_match (it->ptid, ptid))
2469 {
2470 *prev_next_p = it->next;
2471 it->next = NULL;
2472 xfree (it);
2473 }
2474 else
2475 {
2476 prev_next_p = &it->next;
2477 }
2478
2479 it = *prev_next_p;
2480 }
2481 }
2482
2483 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2484 }
2485
2486 static void
2487 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2488 {
2489 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2490 nullify_last_target_wait_ptid ();
2491 }
2492
2493 /* Callback for iterate_over_threads. */
2494
2495 static int
2496 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2497 {
2498 if (is_exited (info->ptid))
2499 return 0;
2500
2501 delete_step_resume_breakpoint (info);
2502 delete_exception_resume_breakpoint (info);
2503 return 0;
2504 }
2505
2506 /* In all-stop, delete the step resume breakpoint of any thread that
2507 had one. In non-stop, delete the step resume breakpoint of the
2508 thread that just stopped. */
2509
2510 static void
2511 delete_step_thread_step_resume_breakpoint (void)
2512 {
2513 if (!target_has_execution
2514 || ptid_equal (inferior_ptid, null_ptid))
2515 /* If the inferior has exited, we have already deleted the step
2516 resume breakpoints out of GDB's lists. */
2517 return;
2518
2519 if (non_stop)
2520 {
2521 /* If in non-stop mode, only delete the step-resume or
2522 longjmp-resume breakpoint of the thread that just stopped
2523 stepping. */
2524 struct thread_info *tp = inferior_thread ();
2525
2526 delete_step_resume_breakpoint (tp);
2527 delete_exception_resume_breakpoint (tp);
2528 }
2529 else
2530 /* In all-stop mode, delete all step-resume and longjmp-resume
2531 breakpoints of any thread that had them. */
2532 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2533 }
2534
2535 /* A cleanup wrapper. */
2536
2537 static void
2538 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2539 {
2540 delete_step_thread_step_resume_breakpoint ();
2541 }
2542
2543 /* Pretty print the results of target_wait, for debugging purposes. */
2544
2545 static void
2546 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2547 const struct target_waitstatus *ws)
2548 {
2549 char *status_string = target_waitstatus_to_string (ws);
2550 struct ui_file *tmp_stream = mem_fileopen ();
2551 char *text;
2552
2553 /* The text is split over several lines because it was getting too long.
2554 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2555 output as a unit; we want only one timestamp printed if debug_timestamp
2556 is set. */
2557
2558 fprintf_unfiltered (tmp_stream,
2559 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2560 if (PIDGET (waiton_ptid) != -1)
2561 fprintf_unfiltered (tmp_stream,
2562 " [%s]", target_pid_to_str (waiton_ptid));
2563 fprintf_unfiltered (tmp_stream, ", status) =\n");
2564 fprintf_unfiltered (tmp_stream,
2565 "infrun: %d [%s],\n",
2566 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2567 fprintf_unfiltered (tmp_stream,
2568 "infrun: %s\n",
2569 status_string);
2570
2571 text = ui_file_xstrdup (tmp_stream, NULL);
2572
2573 /* This uses %s in part to handle %'s in the text, but also to avoid
2574 a gcc error: the format attribute requires a string literal. */
2575 fprintf_unfiltered (gdb_stdlog, "%s", text);
2576
2577 xfree (status_string);
2578 xfree (text);
2579 ui_file_delete (tmp_stream);
2580 }
2581
2582 /* Prepare and stabilize the inferior for detaching it. E.g.,
2583 detaching while a thread is displaced stepping is a recipe for
2584 crashing it, as nothing would readjust the PC out of the scratch
2585 pad. */
2586
2587 void
2588 prepare_for_detach (void)
2589 {
2590 struct inferior *inf = current_inferior ();
2591 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2592 struct cleanup *old_chain_1;
2593 struct displaced_step_inferior_state *displaced;
2594
2595 displaced = get_displaced_stepping_state (inf->pid);
2596
2597 /* Is any thread of this process displaced stepping? If not,
2598 there's nothing else to do. */
2599 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2600 return;
2601
2602 if (debug_infrun)
2603 fprintf_unfiltered (gdb_stdlog,
2604 "displaced-stepping in-process while detaching");
2605
2606 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2607 inf->detaching = 1;
2608
2609 while (!ptid_equal (displaced->step_ptid, null_ptid))
2610 {
2611 struct cleanup *old_chain_2;
2612 struct execution_control_state ecss;
2613 struct execution_control_state *ecs;
2614
2615 ecs = &ecss;
2616 memset (ecs, 0, sizeof (*ecs));
2617
2618 overlay_cache_invalid = 1;
2619
2620 if (deprecated_target_wait_hook)
2621 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2622 else
2623 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2624
2625 if (debug_infrun)
2626 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2627
2628 /* If an error happens while handling the event, propagate GDB's
2629 knowledge of the executing state to the frontend/user running
2630 state. */
2631 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2632 &minus_one_ptid);
2633
2634 /* In non-stop mode, each thread is handled individually.
2635 Switch early, so the global state is set correctly for this
2636 thread. */
2637 if (non_stop
2638 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2639 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2640 context_switch (ecs->ptid);
2641
2642 /* Now figure out what to do with the result of the result. */
2643 handle_inferior_event (ecs);
2644
2645 /* No error, don't finish the state yet. */
2646 discard_cleanups (old_chain_2);
2647
2648 /* Breakpoints and watchpoints are not installed on the target
2649 at this point, and signals are passed directly to the
2650 inferior, so this must mean the process is gone. */
2651 if (!ecs->wait_some_more)
2652 {
2653 discard_cleanups (old_chain_1);
2654 error (_("Program exited while detaching"));
2655 }
2656 }
2657
2658 discard_cleanups (old_chain_1);
2659 }
2660
2661 /* Wait for control to return from inferior to debugger.
2662
2663 If inferior gets a signal, we may decide to start it up again
2664 instead of returning. That is why there is a loop in this function.
2665 When this function actually returns it means the inferior
2666 should be left stopped and GDB should read more commands. */
2667
2668 void
2669 wait_for_inferior (void)
2670 {
2671 struct cleanup *old_cleanups;
2672 struct execution_control_state ecss;
2673 struct execution_control_state *ecs;
2674
2675 if (debug_infrun)
2676 fprintf_unfiltered
2677 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2678
2679 old_cleanups =
2680 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2681
2682 ecs = &ecss;
2683 memset (ecs, 0, sizeof (*ecs));
2684
2685 while (1)
2686 {
2687 struct cleanup *old_chain;
2688
2689 overlay_cache_invalid = 1;
2690
2691 if (deprecated_target_wait_hook)
2692 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2693 else
2694 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2695
2696 if (debug_infrun)
2697 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2698
2699 /* If an error happens while handling the event, propagate GDB's
2700 knowledge of the executing state to the frontend/user running
2701 state. */
2702 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2703
2704 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2705 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2706 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2707
2708 /* Now figure out what to do with the result of the result. */
2709 handle_inferior_event (ecs);
2710
2711 /* No error, don't finish the state yet. */
2712 discard_cleanups (old_chain);
2713
2714 if (!ecs->wait_some_more)
2715 break;
2716 }
2717
2718 do_cleanups (old_cleanups);
2719 }
2720
2721 /* Asynchronous version of wait_for_inferior. It is called by the
2722 event loop whenever a change of state is detected on the file
2723 descriptor corresponding to the target. It can be called more than
2724 once to complete a single execution command. In such cases we need
2725 to keep the state in a global variable ECSS. If it is the last time
2726 that this function is called for a single execution command, then
2727 report to the user that the inferior has stopped, and do the
2728 necessary cleanups. */
2729
2730 void
2731 fetch_inferior_event (void *client_data)
2732 {
2733 struct execution_control_state ecss;
2734 struct execution_control_state *ecs = &ecss;
2735 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2736 struct cleanup *ts_old_chain;
2737 int was_sync = sync_execution;
2738 int cmd_done = 0;
2739
2740 memset (ecs, 0, sizeof (*ecs));
2741
2742 /* We're handling a live event, so make sure we're doing live
2743 debugging. If we're looking at traceframes while the target is
2744 running, we're going to need to get back to that mode after
2745 handling the event. */
2746 if (non_stop)
2747 {
2748 make_cleanup_restore_current_traceframe ();
2749 set_current_traceframe (-1);
2750 }
2751
2752 if (non_stop)
2753 /* In non-stop mode, the user/frontend should not notice a thread
2754 switch due to internal events. Make sure we reverse to the
2755 user selected thread and frame after handling the event and
2756 running any breakpoint commands. */
2757 make_cleanup_restore_current_thread ();
2758
2759 overlay_cache_invalid = 1;
2760
2761 make_cleanup_restore_integer (&execution_direction);
2762 execution_direction = target_execution_direction ();
2763
2764 if (deprecated_target_wait_hook)
2765 ecs->ptid =
2766 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2767 else
2768 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2769
2770 if (debug_infrun)
2771 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2772
2773 if (non_stop
2774 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2775 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2776 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2777 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2778 /* In non-stop mode, each thread is handled individually. Switch
2779 early, so the global state is set correctly for this
2780 thread. */
2781 context_switch (ecs->ptid);
2782
2783 /* If an error happens while handling the event, propagate GDB's
2784 knowledge of the executing state to the frontend/user running
2785 state. */
2786 if (!non_stop)
2787 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2788 else
2789 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2790
2791 /* Get executed before make_cleanup_restore_current_thread above to apply
2792 still for the thread which has thrown the exception. */
2793 make_bpstat_clear_actions_cleanup ();
2794
2795 /* Now figure out what to do with the result of the result. */
2796 handle_inferior_event (ecs);
2797
2798 if (!ecs->wait_some_more)
2799 {
2800 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2801
2802 delete_step_thread_step_resume_breakpoint ();
2803
2804 /* We may not find an inferior if this was a process exit. */
2805 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2806 normal_stop ();
2807
2808 if (target_has_execution
2809 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2810 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2811 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2812 && ecs->event_thread->step_multi
2813 && ecs->event_thread->control.stop_step)
2814 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2815 else
2816 {
2817 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2818 cmd_done = 1;
2819 }
2820 }
2821
2822 /* No error, don't finish the thread states yet. */
2823 discard_cleanups (ts_old_chain);
2824
2825 /* Revert thread and frame. */
2826 do_cleanups (old_chain);
2827
2828 /* If the inferior was in sync execution mode, and now isn't,
2829 restore the prompt (a synchronous execution command has finished,
2830 and we're ready for input). */
2831 if (interpreter_async && was_sync && !sync_execution)
2832 display_gdb_prompt (0);
2833
2834 if (cmd_done
2835 && !was_sync
2836 && exec_done_display_p
2837 && (ptid_equal (inferior_ptid, null_ptid)
2838 || !is_running (inferior_ptid)))
2839 printf_unfiltered (_("completed.\n"));
2840 }
2841
2842 /* Record the frame and location we're currently stepping through. */
2843 void
2844 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2845 {
2846 struct thread_info *tp = inferior_thread ();
2847
2848 tp->control.step_frame_id = get_frame_id (frame);
2849 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2850
2851 tp->current_symtab = sal.symtab;
2852 tp->current_line = sal.line;
2853 }
2854
2855 /* Clear context switchable stepping state. */
2856
2857 void
2858 init_thread_stepping_state (struct thread_info *tss)
2859 {
2860 tss->stepping_over_breakpoint = 0;
2861 tss->step_after_step_resume_breakpoint = 0;
2862 }
2863
2864 /* Return the cached copy of the last pid/waitstatus returned by
2865 target_wait()/deprecated_target_wait_hook(). The data is actually
2866 cached by handle_inferior_event(), which gets called immediately
2867 after target_wait()/deprecated_target_wait_hook(). */
2868
2869 void
2870 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2871 {
2872 *ptidp = target_last_wait_ptid;
2873 *status = target_last_waitstatus;
2874 }
2875
2876 void
2877 nullify_last_target_wait_ptid (void)
2878 {
2879 target_last_wait_ptid = minus_one_ptid;
2880 }
2881
2882 /* Switch thread contexts. */
2883
2884 static void
2885 context_switch (ptid_t ptid)
2886 {
2887 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2888 {
2889 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2890 target_pid_to_str (inferior_ptid));
2891 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2892 target_pid_to_str (ptid));
2893 }
2894
2895 switch_to_thread (ptid);
2896 }
2897
2898 static void
2899 adjust_pc_after_break (struct execution_control_state *ecs)
2900 {
2901 struct regcache *regcache;
2902 struct gdbarch *gdbarch;
2903 struct address_space *aspace;
2904 CORE_ADDR breakpoint_pc;
2905
2906 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2907 we aren't, just return.
2908
2909 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2910 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2911 implemented by software breakpoints should be handled through the normal
2912 breakpoint layer.
2913
2914 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2915 different signals (SIGILL or SIGEMT for instance), but it is less
2916 clear where the PC is pointing afterwards. It may not match
2917 gdbarch_decr_pc_after_break. I don't know any specific target that
2918 generates these signals at breakpoints (the code has been in GDB since at
2919 least 1992) so I can not guess how to handle them here.
2920
2921 In earlier versions of GDB, a target with
2922 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2923 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2924 target with both of these set in GDB history, and it seems unlikely to be
2925 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2926
2927 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2928 return;
2929
2930 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2931 return;
2932
2933 /* In reverse execution, when a breakpoint is hit, the instruction
2934 under it has already been de-executed. The reported PC always
2935 points at the breakpoint address, so adjusting it further would
2936 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2937 architecture:
2938
2939 B1 0x08000000 : INSN1
2940 B2 0x08000001 : INSN2
2941 0x08000002 : INSN3
2942 PC -> 0x08000003 : INSN4
2943
2944 Say you're stopped at 0x08000003 as above. Reverse continuing
2945 from that point should hit B2 as below. Reading the PC when the
2946 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2947 been de-executed already.
2948
2949 B1 0x08000000 : INSN1
2950 B2 PC -> 0x08000001 : INSN2
2951 0x08000002 : INSN3
2952 0x08000003 : INSN4
2953
2954 We can't apply the same logic as for forward execution, because
2955 we would wrongly adjust the PC to 0x08000000, since there's a
2956 breakpoint at PC - 1. We'd then report a hit on B1, although
2957 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2958 behaviour. */
2959 if (execution_direction == EXEC_REVERSE)
2960 return;
2961
2962 /* If this target does not decrement the PC after breakpoints, then
2963 we have nothing to do. */
2964 regcache = get_thread_regcache (ecs->ptid);
2965 gdbarch = get_regcache_arch (regcache);
2966 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2967 return;
2968
2969 aspace = get_regcache_aspace (regcache);
2970
2971 /* Find the location where (if we've hit a breakpoint) the
2972 breakpoint would be. */
2973 breakpoint_pc = regcache_read_pc (regcache)
2974 - gdbarch_decr_pc_after_break (gdbarch);
2975
2976 /* Check whether there actually is a software breakpoint inserted at
2977 that location.
2978
2979 If in non-stop mode, a race condition is possible where we've
2980 removed a breakpoint, but stop events for that breakpoint were
2981 already queued and arrive later. To suppress those spurious
2982 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2983 and retire them after a number of stop events are reported. */
2984 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2985 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2986 {
2987 struct cleanup *old_cleanups = NULL;
2988
2989 if (RECORD_IS_USED)
2990 old_cleanups = record_gdb_operation_disable_set ();
2991
2992 /* When using hardware single-step, a SIGTRAP is reported for both
2993 a completed single-step and a software breakpoint. Need to
2994 differentiate between the two, as the latter needs adjusting
2995 but the former does not.
2996
2997 The SIGTRAP can be due to a completed hardware single-step only if
2998 - we didn't insert software single-step breakpoints
2999 - the thread to be examined is still the current thread
3000 - this thread is currently being stepped
3001
3002 If any of these events did not occur, we must have stopped due
3003 to hitting a software breakpoint, and have to back up to the
3004 breakpoint address.
3005
3006 As a special case, we could have hardware single-stepped a
3007 software breakpoint. In this case (prev_pc == breakpoint_pc),
3008 we also need to back up to the breakpoint address. */
3009
3010 if (singlestep_breakpoints_inserted_p
3011 || !ptid_equal (ecs->ptid, inferior_ptid)
3012 || !currently_stepping (ecs->event_thread)
3013 || ecs->event_thread->prev_pc == breakpoint_pc)
3014 regcache_write_pc (regcache, breakpoint_pc);
3015
3016 if (RECORD_IS_USED)
3017 do_cleanups (old_cleanups);
3018 }
3019 }
3020
3021 void
3022 init_infwait_state (void)
3023 {
3024 waiton_ptid = pid_to_ptid (-1);
3025 infwait_state = infwait_normal_state;
3026 }
3027
3028 void
3029 error_is_running (void)
3030 {
3031 error (_("Cannot execute this command while "
3032 "the selected thread is running."));
3033 }
3034
3035 void
3036 ensure_not_running (void)
3037 {
3038 if (is_running (inferior_ptid))
3039 error_is_running ();
3040 }
3041
3042 static int
3043 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3044 {
3045 for (frame = get_prev_frame (frame);
3046 frame != NULL;
3047 frame = get_prev_frame (frame))
3048 {
3049 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3050 return 1;
3051 if (get_frame_type (frame) != INLINE_FRAME)
3052 break;
3053 }
3054
3055 return 0;
3056 }
3057
3058 /* Auxiliary function that handles syscall entry/return events.
3059 It returns 1 if the inferior should keep going (and GDB
3060 should ignore the event), or 0 if the event deserves to be
3061 processed. */
3062
3063 static int
3064 handle_syscall_event (struct execution_control_state *ecs)
3065 {
3066 struct regcache *regcache;
3067 struct gdbarch *gdbarch;
3068 int syscall_number;
3069
3070 if (!ptid_equal (ecs->ptid, inferior_ptid))
3071 context_switch (ecs->ptid);
3072
3073 regcache = get_thread_regcache (ecs->ptid);
3074 gdbarch = get_regcache_arch (regcache);
3075 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
3076 stop_pc = regcache_read_pc (regcache);
3077
3078 target_last_waitstatus.value.syscall_number = syscall_number;
3079
3080 if (catch_syscall_enabled () > 0
3081 && catching_syscall_number (syscall_number) > 0)
3082 {
3083 if (debug_infrun)
3084 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3085 syscall_number);
3086
3087 ecs->event_thread->control.stop_bpstat
3088 = bpstat_stop_status (get_regcache_aspace (regcache),
3089 stop_pc, ecs->ptid, &ecs->ws);
3090 ecs->random_signal
3091 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3092
3093 if (!ecs->random_signal)
3094 {
3095 /* Catchpoint hit. */
3096 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3097 return 0;
3098 }
3099 }
3100
3101 /* If no catchpoint triggered for this, then keep going. */
3102 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3103 keep_going (ecs);
3104 return 1;
3105 }
3106
3107 /* Clear the supplied execution_control_state's stop_func_* fields. */
3108
3109 static void
3110 clear_stop_func (struct execution_control_state *ecs)
3111 {
3112 ecs->stop_func_filled_in = 0;
3113 ecs->stop_func_start = 0;
3114 ecs->stop_func_end = 0;
3115 ecs->stop_func_name = NULL;
3116 }
3117
3118 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3119
3120 static void
3121 fill_in_stop_func (struct gdbarch *gdbarch,
3122 struct execution_control_state *ecs)
3123 {
3124 if (!ecs->stop_func_filled_in)
3125 {
3126 /* Don't care about return value; stop_func_start and stop_func_name
3127 will both be 0 if it doesn't work. */
3128 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3129 &ecs->stop_func_start, &ecs->stop_func_end);
3130 ecs->stop_func_start
3131 += gdbarch_deprecated_function_start_offset (gdbarch);
3132
3133 ecs->stop_func_filled_in = 1;
3134 }
3135 }
3136
3137 /* Given an execution control state that has been freshly filled in
3138 by an event from the inferior, figure out what it means and take
3139 appropriate action. */
3140
3141 static void
3142 handle_inferior_event (struct execution_control_state *ecs)
3143 {
3144 struct frame_info *frame;
3145 struct gdbarch *gdbarch;
3146 int stopped_by_watchpoint;
3147 int stepped_after_stopped_by_watchpoint = 0;
3148 struct symtab_and_line stop_pc_sal;
3149 enum stop_kind stop_soon;
3150
3151 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3152 {
3153 /* We had an event in the inferior, but we are not interested in
3154 handling it at this level. The lower layers have already
3155 done what needs to be done, if anything.
3156
3157 One of the possible circumstances for this is when the
3158 inferior produces output for the console. The inferior has
3159 not stopped, and we are ignoring the event. Another possible
3160 circumstance is any event which the lower level knows will be
3161 reported multiple times without an intervening resume. */
3162 if (debug_infrun)
3163 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3164 prepare_to_wait (ecs);
3165 return;
3166 }
3167
3168 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3169 && target_can_async_p () && !sync_execution)
3170 {
3171 /* There were no unwaited-for children left in the target, but,
3172 we're not synchronously waiting for events either. Just
3173 ignore. Otherwise, if we were running a synchronous
3174 execution command, we need to cancel it and give the user
3175 back the terminal. */
3176 if (debug_infrun)
3177 fprintf_unfiltered (gdb_stdlog,
3178 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3179 prepare_to_wait (ecs);
3180 return;
3181 }
3182
3183 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3184 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3185 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3186 {
3187 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3188
3189 gdb_assert (inf);
3190 stop_soon = inf->control.stop_soon;
3191 }
3192 else
3193 stop_soon = NO_STOP_QUIETLY;
3194
3195 /* Cache the last pid/waitstatus. */
3196 target_last_wait_ptid = ecs->ptid;
3197 target_last_waitstatus = ecs->ws;
3198
3199 /* Always clear state belonging to the previous time we stopped. */
3200 stop_stack_dummy = STOP_NONE;
3201
3202 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3203 {
3204 /* No unwaited-for children left. IOW, all resumed children
3205 have exited. */
3206 if (debug_infrun)
3207 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3208
3209 stop_print_frame = 0;
3210 stop_stepping (ecs);
3211 return;
3212 }
3213
3214 /* If it's a new process, add it to the thread database. */
3215
3216 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3217 && !ptid_equal (ecs->ptid, minus_one_ptid)
3218 && !in_thread_list (ecs->ptid));
3219
3220 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3221 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3222 add_thread (ecs->ptid);
3223
3224 ecs->event_thread = find_thread_ptid (ecs->ptid);
3225
3226 /* Dependent on valid ECS->EVENT_THREAD. */
3227 adjust_pc_after_break (ecs);
3228
3229 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3230 reinit_frame_cache ();
3231
3232 breakpoint_retire_moribund ();
3233
3234 /* First, distinguish signals caused by the debugger from signals
3235 that have to do with the program's own actions. Note that
3236 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3237 on the operating system version. Here we detect when a SIGILL or
3238 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3239 something similar for SIGSEGV, since a SIGSEGV will be generated
3240 when we're trying to execute a breakpoint instruction on a
3241 non-executable stack. This happens for call dummy breakpoints
3242 for architectures like SPARC that place call dummies on the
3243 stack. */
3244 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3245 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3246 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3247 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3248 {
3249 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3250
3251 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3252 regcache_read_pc (regcache)))
3253 {
3254 if (debug_infrun)
3255 fprintf_unfiltered (gdb_stdlog,
3256 "infrun: Treating signal as SIGTRAP\n");
3257 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3258 }
3259 }
3260
3261 /* Mark the non-executing threads accordingly. In all-stop, all
3262 threads of all processes are stopped when we get any event
3263 reported. In non-stop mode, only the event thread stops. If
3264 we're handling a process exit in non-stop mode, there's nothing
3265 to do, as threads of the dead process are gone, and threads of
3266 any other process were left running. */
3267 if (!non_stop)
3268 set_executing (minus_one_ptid, 0);
3269 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3270 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3271 set_executing (ecs->ptid, 0);
3272
3273 switch (infwait_state)
3274 {
3275 case infwait_thread_hop_state:
3276 if (debug_infrun)
3277 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3278 break;
3279
3280 case infwait_normal_state:
3281 if (debug_infrun)
3282 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3283 break;
3284
3285 case infwait_step_watch_state:
3286 if (debug_infrun)
3287 fprintf_unfiltered (gdb_stdlog,
3288 "infrun: infwait_step_watch_state\n");
3289
3290 stepped_after_stopped_by_watchpoint = 1;
3291 break;
3292
3293 case infwait_nonstep_watch_state:
3294 if (debug_infrun)
3295 fprintf_unfiltered (gdb_stdlog,
3296 "infrun: infwait_nonstep_watch_state\n");
3297 insert_breakpoints ();
3298
3299 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3300 handle things like signals arriving and other things happening
3301 in combination correctly? */
3302 stepped_after_stopped_by_watchpoint = 1;
3303 break;
3304
3305 default:
3306 internal_error (__FILE__, __LINE__, _("bad switch"));
3307 }
3308
3309 infwait_state = infwait_normal_state;
3310 waiton_ptid = pid_to_ptid (-1);
3311
3312 switch (ecs->ws.kind)
3313 {
3314 case TARGET_WAITKIND_LOADED:
3315 if (debug_infrun)
3316 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3317 /* Ignore gracefully during startup of the inferior, as it might
3318 be the shell which has just loaded some objects, otherwise
3319 add the symbols for the newly loaded objects. Also ignore at
3320 the beginning of an attach or remote session; we will query
3321 the full list of libraries once the connection is
3322 established. */
3323 if (stop_soon == NO_STOP_QUIETLY)
3324 {
3325 /* Check for any newly added shared libraries if we're
3326 supposed to be adding them automatically. Switch
3327 terminal for any messages produced by
3328 breakpoint_re_set. */
3329 target_terminal_ours_for_output ();
3330 /* NOTE: cagney/2003-11-25: Make certain that the target
3331 stack's section table is kept up-to-date. Architectures,
3332 (e.g., PPC64), use the section table to perform
3333 operations such as address => section name and hence
3334 require the table to contain all sections (including
3335 those found in shared libraries). */
3336 #ifdef SOLIB_ADD
3337 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3338 #else
3339 solib_add (NULL, 0, &current_target, auto_solib_add);
3340 #endif
3341 target_terminal_inferior ();
3342
3343 /* If requested, stop when the dynamic linker notifies
3344 gdb of events. This allows the user to get control
3345 and place breakpoints in initializer routines for
3346 dynamically loaded objects (among other things). */
3347 if (stop_on_solib_events)
3348 {
3349 /* Make sure we print "Stopped due to solib-event" in
3350 normal_stop. */
3351 stop_print_frame = 1;
3352
3353 stop_stepping (ecs);
3354 return;
3355 }
3356
3357 /* NOTE drow/2007-05-11: This might be a good place to check
3358 for "catch load". */
3359 }
3360
3361 /* If we are skipping through a shell, or through shared library
3362 loading that we aren't interested in, resume the program. If
3363 we're running the program normally, also resume. But stop if
3364 we're attaching or setting up a remote connection. */
3365 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3366 {
3367 /* Loading of shared libraries might have changed breakpoint
3368 addresses. Make sure new breakpoints are inserted. */
3369 if (stop_soon == NO_STOP_QUIETLY
3370 && !breakpoints_always_inserted_mode ())
3371 insert_breakpoints ();
3372 resume (0, TARGET_SIGNAL_0);
3373 prepare_to_wait (ecs);
3374 return;
3375 }
3376
3377 break;
3378
3379 case TARGET_WAITKIND_SPURIOUS:
3380 if (debug_infrun)
3381 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3382 resume (0, TARGET_SIGNAL_0);
3383 prepare_to_wait (ecs);
3384 return;
3385
3386 case TARGET_WAITKIND_EXITED:
3387 if (debug_infrun)
3388 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3389 inferior_ptid = ecs->ptid;
3390 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3391 set_current_program_space (current_inferior ()->pspace);
3392 handle_vfork_child_exec_or_exit (0);
3393 target_terminal_ours (); /* Must do this before mourn anyway. */
3394 print_exited_reason (ecs->ws.value.integer);
3395
3396 /* Record the exit code in the convenience variable $_exitcode, so
3397 that the user can inspect this again later. */
3398 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3399 (LONGEST) ecs->ws.value.integer);
3400
3401 /* Also record this in the inferior itself. */
3402 current_inferior ()->has_exit_code = 1;
3403 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3404
3405 gdb_flush (gdb_stdout);
3406 target_mourn_inferior ();
3407 singlestep_breakpoints_inserted_p = 0;
3408 cancel_single_step_breakpoints ();
3409 stop_print_frame = 0;
3410 stop_stepping (ecs);
3411 return;
3412
3413 case TARGET_WAITKIND_SIGNALLED:
3414 if (debug_infrun)
3415 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3416 inferior_ptid = ecs->ptid;
3417 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3418 set_current_program_space (current_inferior ()->pspace);
3419 handle_vfork_child_exec_or_exit (0);
3420 stop_print_frame = 0;
3421 target_terminal_ours (); /* Must do this before mourn anyway. */
3422
3423 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3424 reach here unless the inferior is dead. However, for years
3425 target_kill() was called here, which hints that fatal signals aren't
3426 really fatal on some systems. If that's true, then some changes
3427 may be needed. */
3428 target_mourn_inferior ();
3429
3430 print_signal_exited_reason (ecs->ws.value.sig);
3431 singlestep_breakpoints_inserted_p = 0;
3432 cancel_single_step_breakpoints ();
3433 stop_stepping (ecs);
3434 return;
3435
3436 /* The following are the only cases in which we keep going;
3437 the above cases end in a continue or goto. */
3438 case TARGET_WAITKIND_FORKED:
3439 case TARGET_WAITKIND_VFORKED:
3440 if (debug_infrun)
3441 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3442
3443 /* Check whether the inferior is displaced stepping. */
3444 {
3445 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3446 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3447 struct displaced_step_inferior_state *displaced
3448 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3449
3450 /* If checking displaced stepping is supported, and thread
3451 ecs->ptid is displaced stepping. */
3452 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3453 {
3454 struct inferior *parent_inf
3455 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3456 struct regcache *child_regcache;
3457 CORE_ADDR parent_pc;
3458
3459 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3460 indicating that the displaced stepping of syscall instruction
3461 has been done. Perform cleanup for parent process here. Note
3462 that this operation also cleans up the child process for vfork,
3463 because their pages are shared. */
3464 displaced_step_fixup (ecs->ptid, TARGET_SIGNAL_TRAP);
3465
3466 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3467 {
3468 /* Restore scratch pad for child process. */
3469 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3470 }
3471
3472 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3473 the child's PC is also within the scratchpad. Set the child's PC
3474 to the parent's PC value, which has already been fixed up.
3475 FIXME: we use the parent's aspace here, although we're touching
3476 the child, because the child hasn't been added to the inferior
3477 list yet at this point. */
3478
3479 child_regcache
3480 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3481 gdbarch,
3482 parent_inf->aspace);
3483 /* Read PC value of parent process. */
3484 parent_pc = regcache_read_pc (regcache);
3485
3486 if (debug_displaced)
3487 fprintf_unfiltered (gdb_stdlog,
3488 "displaced: write child pc from %s to %s\n",
3489 paddress (gdbarch,
3490 regcache_read_pc (child_regcache)),
3491 paddress (gdbarch, parent_pc));
3492
3493 regcache_write_pc (child_regcache, parent_pc);
3494 }
3495 }
3496
3497 if (!ptid_equal (ecs->ptid, inferior_ptid))
3498 {
3499 context_switch (ecs->ptid);
3500 reinit_frame_cache ();
3501 }
3502
3503 /* Immediately detach breakpoints from the child before there's
3504 any chance of letting the user delete breakpoints from the
3505 breakpoint lists. If we don't do this early, it's easy to
3506 leave left over traps in the child, vis: "break foo; catch
3507 fork; c; <fork>; del; c; <child calls foo>". We only follow
3508 the fork on the last `continue', and by that time the
3509 breakpoint at "foo" is long gone from the breakpoint table.
3510 If we vforked, then we don't need to unpatch here, since both
3511 parent and child are sharing the same memory pages; we'll
3512 need to unpatch at follow/detach time instead to be certain
3513 that new breakpoints added between catchpoint hit time and
3514 vfork follow are detached. */
3515 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3516 {
3517 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3518
3519 /* This won't actually modify the breakpoint list, but will
3520 physically remove the breakpoints from the child. */
3521 detach_breakpoints (child_pid);
3522 }
3523
3524 if (singlestep_breakpoints_inserted_p)
3525 {
3526 /* Pull the single step breakpoints out of the target. */
3527 remove_single_step_breakpoints ();
3528 singlestep_breakpoints_inserted_p = 0;
3529 }
3530
3531 /* In case the event is caught by a catchpoint, remember that
3532 the event is to be followed at the next resume of the thread,
3533 and not immediately. */
3534 ecs->event_thread->pending_follow = ecs->ws;
3535
3536 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3537
3538 ecs->event_thread->control.stop_bpstat
3539 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3540 stop_pc, ecs->ptid, &ecs->ws);
3541
3542 /* Note that we're interested in knowing the bpstat actually
3543 causes a stop, not just if it may explain the signal.
3544 Software watchpoints, for example, always appear in the
3545 bpstat. */
3546 ecs->random_signal
3547 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3548
3549 /* If no catchpoint triggered for this, then keep going. */
3550 if (ecs->random_signal)
3551 {
3552 ptid_t parent;
3553 ptid_t child;
3554 int should_resume;
3555 int follow_child
3556 = (follow_fork_mode_string == follow_fork_mode_child);
3557
3558 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3559
3560 should_resume = follow_fork ();
3561
3562 parent = ecs->ptid;
3563 child = ecs->ws.value.related_pid;
3564
3565 /* In non-stop mode, also resume the other branch. */
3566 if (non_stop && !detach_fork)
3567 {
3568 if (follow_child)
3569 switch_to_thread (parent);
3570 else
3571 switch_to_thread (child);
3572
3573 ecs->event_thread = inferior_thread ();
3574 ecs->ptid = inferior_ptid;
3575 keep_going (ecs);
3576 }
3577
3578 if (follow_child)
3579 switch_to_thread (child);
3580 else
3581 switch_to_thread (parent);
3582
3583 ecs->event_thread = inferior_thread ();
3584 ecs->ptid = inferior_ptid;
3585
3586 if (should_resume)
3587 keep_going (ecs);
3588 else
3589 stop_stepping (ecs);
3590 return;
3591 }
3592 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3593 goto process_event_stop_test;
3594
3595 case TARGET_WAITKIND_VFORK_DONE:
3596 /* Done with the shared memory region. Re-insert breakpoints in
3597 the parent, and keep going. */
3598
3599 if (debug_infrun)
3600 fprintf_unfiltered (gdb_stdlog,
3601 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3602
3603 if (!ptid_equal (ecs->ptid, inferior_ptid))
3604 context_switch (ecs->ptid);
3605
3606 current_inferior ()->waiting_for_vfork_done = 0;
3607 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3608 /* This also takes care of reinserting breakpoints in the
3609 previously locked inferior. */
3610 keep_going (ecs);
3611 return;
3612
3613 case TARGET_WAITKIND_EXECD:
3614 if (debug_infrun)
3615 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3616
3617 if (!ptid_equal (ecs->ptid, inferior_ptid))
3618 {
3619 context_switch (ecs->ptid);
3620 reinit_frame_cache ();
3621 }
3622
3623 singlestep_breakpoints_inserted_p = 0;
3624 cancel_single_step_breakpoints ();
3625
3626 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3627
3628 /* Do whatever is necessary to the parent branch of the vfork. */
3629 handle_vfork_child_exec_or_exit (1);
3630
3631 /* This causes the eventpoints and symbol table to be reset.
3632 Must do this now, before trying to determine whether to
3633 stop. */
3634 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3635
3636 ecs->event_thread->control.stop_bpstat
3637 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3638 stop_pc, ecs->ptid, &ecs->ws);
3639 ecs->random_signal
3640 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3641
3642 /* Note that this may be referenced from inside
3643 bpstat_stop_status above, through inferior_has_execd. */
3644 xfree (ecs->ws.value.execd_pathname);
3645 ecs->ws.value.execd_pathname = NULL;
3646
3647 /* If no catchpoint triggered for this, then keep going. */
3648 if (ecs->random_signal)
3649 {
3650 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3651 keep_going (ecs);
3652 return;
3653 }
3654 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3655 goto process_event_stop_test;
3656
3657 /* Be careful not to try to gather much state about a thread
3658 that's in a syscall. It's frequently a losing proposition. */
3659 case TARGET_WAITKIND_SYSCALL_ENTRY:
3660 if (debug_infrun)
3661 fprintf_unfiltered (gdb_stdlog,
3662 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3663 /* Getting the current syscall number. */
3664 if (handle_syscall_event (ecs) != 0)
3665 return;
3666 goto process_event_stop_test;
3667
3668 /* Before examining the threads further, step this thread to
3669 get it entirely out of the syscall. (We get notice of the
3670 event when the thread is just on the verge of exiting a
3671 syscall. Stepping one instruction seems to get it back
3672 into user code.) */
3673 case TARGET_WAITKIND_SYSCALL_RETURN:
3674 if (debug_infrun)
3675 fprintf_unfiltered (gdb_stdlog,
3676 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3677 if (handle_syscall_event (ecs) != 0)
3678 return;
3679 goto process_event_stop_test;
3680
3681 case TARGET_WAITKIND_STOPPED:
3682 if (debug_infrun)
3683 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3684 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3685 break;
3686
3687 case TARGET_WAITKIND_NO_HISTORY:
3688 if (debug_infrun)
3689 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3690 /* Reverse execution: target ran out of history info. */
3691 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3692 print_no_history_reason ();
3693 stop_stepping (ecs);
3694 return;
3695 }
3696
3697 if (ecs->new_thread_event)
3698 {
3699 if (non_stop)
3700 /* Non-stop assumes that the target handles adding new threads
3701 to the thread list. */
3702 internal_error (__FILE__, __LINE__,
3703 "targets should add new threads to the thread "
3704 "list themselves in non-stop mode.");
3705
3706 /* We may want to consider not doing a resume here in order to
3707 give the user a chance to play with the new thread. It might
3708 be good to make that a user-settable option. */
3709
3710 /* At this point, all threads are stopped (happens automatically
3711 in either the OS or the native code). Therefore we need to
3712 continue all threads in order to make progress. */
3713
3714 if (!ptid_equal (ecs->ptid, inferior_ptid))
3715 context_switch (ecs->ptid);
3716 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3717 prepare_to_wait (ecs);
3718 return;
3719 }
3720
3721 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3722 {
3723 /* Do we need to clean up the state of a thread that has
3724 completed a displaced single-step? (Doing so usually affects
3725 the PC, so do it here, before we set stop_pc.) */
3726 displaced_step_fixup (ecs->ptid,
3727 ecs->event_thread->suspend.stop_signal);
3728
3729 /* If we either finished a single-step or hit a breakpoint, but
3730 the user wanted this thread to be stopped, pretend we got a
3731 SIG0 (generic unsignaled stop). */
3732
3733 if (ecs->event_thread->stop_requested
3734 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3735 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3736 }
3737
3738 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3739
3740 if (debug_infrun)
3741 {
3742 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3743 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3744 struct cleanup *old_chain = save_inferior_ptid ();
3745
3746 inferior_ptid = ecs->ptid;
3747
3748 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3749 paddress (gdbarch, stop_pc));
3750 if (target_stopped_by_watchpoint ())
3751 {
3752 CORE_ADDR addr;
3753
3754 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3755
3756 if (target_stopped_data_address (&current_target, &addr))
3757 fprintf_unfiltered (gdb_stdlog,
3758 "infrun: stopped data address = %s\n",
3759 paddress (gdbarch, addr));
3760 else
3761 fprintf_unfiltered (gdb_stdlog,
3762 "infrun: (no data address available)\n");
3763 }
3764
3765 do_cleanups (old_chain);
3766 }
3767
3768 if (stepping_past_singlestep_breakpoint)
3769 {
3770 gdb_assert (singlestep_breakpoints_inserted_p);
3771 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3772 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3773
3774 stepping_past_singlestep_breakpoint = 0;
3775
3776 /* We've either finished single-stepping past the single-step
3777 breakpoint, or stopped for some other reason. It would be nice if
3778 we could tell, but we can't reliably. */
3779 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3780 {
3781 if (debug_infrun)
3782 fprintf_unfiltered (gdb_stdlog,
3783 "infrun: stepping_past_"
3784 "singlestep_breakpoint\n");
3785 /* Pull the single step breakpoints out of the target. */
3786 remove_single_step_breakpoints ();
3787 singlestep_breakpoints_inserted_p = 0;
3788
3789 ecs->random_signal = 0;
3790 ecs->event_thread->control.trap_expected = 0;
3791
3792 context_switch (saved_singlestep_ptid);
3793 if (deprecated_context_hook)
3794 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3795
3796 resume (1, TARGET_SIGNAL_0);
3797 prepare_to_wait (ecs);
3798 return;
3799 }
3800 }
3801
3802 if (!ptid_equal (deferred_step_ptid, null_ptid))
3803 {
3804 /* In non-stop mode, there's never a deferred_step_ptid set. */
3805 gdb_assert (!non_stop);
3806
3807 /* If we stopped for some other reason than single-stepping, ignore
3808 the fact that we were supposed to switch back. */
3809 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3810 {
3811 if (debug_infrun)
3812 fprintf_unfiltered (gdb_stdlog,
3813 "infrun: handling deferred step\n");
3814
3815 /* Pull the single step breakpoints out of the target. */
3816 if (singlestep_breakpoints_inserted_p)
3817 {
3818 remove_single_step_breakpoints ();
3819 singlestep_breakpoints_inserted_p = 0;
3820 }
3821
3822 ecs->event_thread->control.trap_expected = 0;
3823
3824 /* Note: We do not call context_switch at this point, as the
3825 context is already set up for stepping the original thread. */
3826 switch_to_thread (deferred_step_ptid);
3827 deferred_step_ptid = null_ptid;
3828 /* Suppress spurious "Switching to ..." message. */
3829 previous_inferior_ptid = inferior_ptid;
3830
3831 resume (1, TARGET_SIGNAL_0);
3832 prepare_to_wait (ecs);
3833 return;
3834 }
3835
3836 deferred_step_ptid = null_ptid;
3837 }
3838
3839 /* See if a thread hit a thread-specific breakpoint that was meant for
3840 another thread. If so, then step that thread past the breakpoint,
3841 and continue it. */
3842
3843 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3844 {
3845 int thread_hop_needed = 0;
3846 struct address_space *aspace =
3847 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3848
3849 /* Check if a regular breakpoint has been hit before checking
3850 for a potential single step breakpoint. Otherwise, GDB will
3851 not see this breakpoint hit when stepping onto breakpoints. */
3852 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3853 {
3854 ecs->random_signal = 0;
3855 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3856 thread_hop_needed = 1;
3857 }
3858 else if (singlestep_breakpoints_inserted_p)
3859 {
3860 /* We have not context switched yet, so this should be true
3861 no matter which thread hit the singlestep breakpoint. */
3862 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3863 if (debug_infrun)
3864 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3865 "trap for %s\n",
3866 target_pid_to_str (ecs->ptid));
3867
3868 ecs->random_signal = 0;
3869 /* The call to in_thread_list is necessary because PTIDs sometimes
3870 change when we go from single-threaded to multi-threaded. If
3871 the singlestep_ptid is still in the list, assume that it is
3872 really different from ecs->ptid. */
3873 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3874 && in_thread_list (singlestep_ptid))
3875 {
3876 /* If the PC of the thread we were trying to single-step
3877 has changed, discard this event (which we were going
3878 to ignore anyway), and pretend we saw that thread
3879 trap. This prevents us continuously moving the
3880 single-step breakpoint forward, one instruction at a
3881 time. If the PC has changed, then the thread we were
3882 trying to single-step has trapped or been signalled,
3883 but the event has not been reported to GDB yet.
3884
3885 There might be some cases where this loses signal
3886 information, if a signal has arrived at exactly the
3887 same time that the PC changed, but this is the best
3888 we can do with the information available. Perhaps we
3889 should arrange to report all events for all threads
3890 when they stop, or to re-poll the remote looking for
3891 this particular thread (i.e. temporarily enable
3892 schedlock). */
3893
3894 CORE_ADDR new_singlestep_pc
3895 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3896
3897 if (new_singlestep_pc != singlestep_pc)
3898 {
3899 enum target_signal stop_signal;
3900
3901 if (debug_infrun)
3902 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3903 " but expected thread advanced also\n");
3904
3905 /* The current context still belongs to
3906 singlestep_ptid. Don't swap here, since that's
3907 the context we want to use. Just fudge our
3908 state and continue. */
3909 stop_signal = ecs->event_thread->suspend.stop_signal;
3910 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3911 ecs->ptid = singlestep_ptid;
3912 ecs->event_thread = find_thread_ptid (ecs->ptid);
3913 ecs->event_thread->suspend.stop_signal = stop_signal;
3914 stop_pc = new_singlestep_pc;
3915 }
3916 else
3917 {
3918 if (debug_infrun)
3919 fprintf_unfiltered (gdb_stdlog,
3920 "infrun: unexpected thread\n");
3921
3922 thread_hop_needed = 1;
3923 stepping_past_singlestep_breakpoint = 1;
3924 saved_singlestep_ptid = singlestep_ptid;
3925 }
3926 }
3927 }
3928
3929 if (thread_hop_needed)
3930 {
3931 struct regcache *thread_regcache;
3932 int remove_status = 0;
3933
3934 if (debug_infrun)
3935 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3936
3937 /* Switch context before touching inferior memory, the
3938 previous thread may have exited. */
3939 if (!ptid_equal (inferior_ptid, ecs->ptid))
3940 context_switch (ecs->ptid);
3941
3942 /* Saw a breakpoint, but it was hit by the wrong thread.
3943 Just continue. */
3944
3945 if (singlestep_breakpoints_inserted_p)
3946 {
3947 /* Pull the single step breakpoints out of the target. */
3948 remove_single_step_breakpoints ();
3949 singlestep_breakpoints_inserted_p = 0;
3950 }
3951
3952 /* If the arch can displace step, don't remove the
3953 breakpoints. */
3954 thread_regcache = get_thread_regcache (ecs->ptid);
3955 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3956 remove_status = remove_breakpoints ();
3957
3958 /* Did we fail to remove breakpoints? If so, try
3959 to set the PC past the bp. (There's at least
3960 one situation in which we can fail to remove
3961 the bp's: On HP-UX's that use ttrace, we can't
3962 change the address space of a vforking child
3963 process until the child exits (well, okay, not
3964 then either :-) or execs. */
3965 if (remove_status != 0)
3966 error (_("Cannot step over breakpoint hit in wrong thread"));
3967 else
3968 { /* Single step */
3969 if (!non_stop)
3970 {
3971 /* Only need to require the next event from this
3972 thread in all-stop mode. */
3973 waiton_ptid = ecs->ptid;
3974 infwait_state = infwait_thread_hop_state;
3975 }
3976
3977 ecs->event_thread->stepping_over_breakpoint = 1;
3978 keep_going (ecs);
3979 return;
3980 }
3981 }
3982 else if (singlestep_breakpoints_inserted_p)
3983 {
3984 ecs->random_signal = 0;
3985 }
3986 }
3987 else
3988 ecs->random_signal = 1;
3989
3990 /* See if something interesting happened to the non-current thread. If
3991 so, then switch to that thread. */
3992 if (!ptid_equal (ecs->ptid, inferior_ptid))
3993 {
3994 if (debug_infrun)
3995 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3996
3997 context_switch (ecs->ptid);
3998
3999 if (deprecated_context_hook)
4000 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4001 }
4002
4003 /* At this point, get hold of the now-current thread's frame. */
4004 frame = get_current_frame ();
4005 gdbarch = get_frame_arch (frame);
4006
4007 if (singlestep_breakpoints_inserted_p)
4008 {
4009 /* Pull the single step breakpoints out of the target. */
4010 remove_single_step_breakpoints ();
4011 singlestep_breakpoints_inserted_p = 0;
4012 }
4013
4014 if (stepped_after_stopped_by_watchpoint)
4015 stopped_by_watchpoint = 0;
4016 else
4017 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4018
4019 /* If necessary, step over this watchpoint. We'll be back to display
4020 it in a moment. */
4021 if (stopped_by_watchpoint
4022 && (target_have_steppable_watchpoint
4023 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4024 {
4025 /* At this point, we are stopped at an instruction which has
4026 attempted to write to a piece of memory under control of
4027 a watchpoint. The instruction hasn't actually executed
4028 yet. If we were to evaluate the watchpoint expression
4029 now, we would get the old value, and therefore no change
4030 would seem to have occurred.
4031
4032 In order to make watchpoints work `right', we really need
4033 to complete the memory write, and then evaluate the
4034 watchpoint expression. We do this by single-stepping the
4035 target.
4036
4037 It may not be necessary to disable the watchpoint to stop over
4038 it. For example, the PA can (with some kernel cooperation)
4039 single step over a watchpoint without disabling the watchpoint.
4040
4041 It is far more common to need to disable a watchpoint to step
4042 the inferior over it. If we have non-steppable watchpoints,
4043 we must disable the current watchpoint; it's simplest to
4044 disable all watchpoints and breakpoints. */
4045 int hw_step = 1;
4046
4047 if (!target_have_steppable_watchpoint)
4048 {
4049 remove_breakpoints ();
4050 /* See comment in resume why we need to stop bypassing signals
4051 while breakpoints have been removed. */
4052 target_pass_signals (0, NULL);
4053 }
4054 /* Single step */
4055 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4056 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
4057 waiton_ptid = ecs->ptid;
4058 if (target_have_steppable_watchpoint)
4059 infwait_state = infwait_step_watch_state;
4060 else
4061 infwait_state = infwait_nonstep_watch_state;
4062 prepare_to_wait (ecs);
4063 return;
4064 }
4065
4066 clear_stop_func (ecs);
4067 ecs->event_thread->stepping_over_breakpoint = 0;
4068 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4069 ecs->event_thread->control.stop_step = 0;
4070 stop_print_frame = 1;
4071 ecs->random_signal = 0;
4072 stopped_by_random_signal = 0;
4073
4074 /* Hide inlined functions starting here, unless we just performed stepi or
4075 nexti. After stepi and nexti, always show the innermost frame (not any
4076 inline function call sites). */
4077 if (ecs->event_thread->control.step_range_end != 1)
4078 {
4079 struct address_space *aspace =
4080 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4081
4082 /* skip_inline_frames is expensive, so we avoid it if we can
4083 determine that the address is one where functions cannot have
4084 been inlined. This improves performance with inferiors that
4085 load a lot of shared libraries, because the solib event
4086 breakpoint is defined as the address of a function (i.e. not
4087 inline). Note that we have to check the previous PC as well
4088 as the current one to catch cases when we have just
4089 single-stepped off a breakpoint prior to reinstating it.
4090 Note that we're assuming that the code we single-step to is
4091 not inline, but that's not definitive: there's nothing
4092 preventing the event breakpoint function from containing
4093 inlined code, and the single-step ending up there. If the
4094 user had set a breakpoint on that inlined code, the missing
4095 skip_inline_frames call would break things. Fortunately
4096 that's an extremely unlikely scenario. */
4097 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4098 && !(ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4099 && ecs->event_thread->control.trap_expected
4100 && pc_at_non_inline_function (aspace,
4101 ecs->event_thread->prev_pc,
4102 &ecs->ws)))
4103 skip_inline_frames (ecs->ptid);
4104 }
4105
4106 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4107 && ecs->event_thread->control.trap_expected
4108 && gdbarch_single_step_through_delay_p (gdbarch)
4109 && currently_stepping (ecs->event_thread))
4110 {
4111 /* We're trying to step off a breakpoint. Turns out that we're
4112 also on an instruction that needs to be stepped multiple
4113 times before it's been fully executing. E.g., architectures
4114 with a delay slot. It needs to be stepped twice, once for
4115 the instruction and once for the delay slot. */
4116 int step_through_delay
4117 = gdbarch_single_step_through_delay (gdbarch, frame);
4118
4119 if (debug_infrun && step_through_delay)
4120 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4121 if (ecs->event_thread->control.step_range_end == 0
4122 && step_through_delay)
4123 {
4124 /* The user issued a continue when stopped at a breakpoint.
4125 Set up for another trap and get out of here. */
4126 ecs->event_thread->stepping_over_breakpoint = 1;
4127 keep_going (ecs);
4128 return;
4129 }
4130 else if (step_through_delay)
4131 {
4132 /* The user issued a step when stopped at a breakpoint.
4133 Maybe we should stop, maybe we should not - the delay
4134 slot *might* correspond to a line of source. In any
4135 case, don't decide that here, just set
4136 ecs->stepping_over_breakpoint, making sure we
4137 single-step again before breakpoints are re-inserted. */
4138 ecs->event_thread->stepping_over_breakpoint = 1;
4139 }
4140 }
4141
4142 /* Look at the cause of the stop, and decide what to do.
4143 The alternatives are:
4144 1) stop_stepping and return; to really stop and return to the debugger,
4145 2) keep_going and return to start up again
4146 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4147 3) set ecs->random_signal to 1, and the decision between 1 and 2
4148 will be made according to the signal handling tables. */
4149
4150 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4151 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4152 || stop_soon == STOP_QUIETLY_REMOTE)
4153 {
4154 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4155 && stop_after_trap)
4156 {
4157 if (debug_infrun)
4158 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4159 stop_print_frame = 0;
4160 stop_stepping (ecs);
4161 return;
4162 }
4163
4164 /* This is originated from start_remote(), start_inferior() and
4165 shared libraries hook functions. */
4166 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4167 {
4168 if (debug_infrun)
4169 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4170 stop_stepping (ecs);
4171 return;
4172 }
4173
4174 /* This originates from attach_command(). We need to overwrite
4175 the stop_signal here, because some kernels don't ignore a
4176 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4177 See more comments in inferior.h. On the other hand, if we
4178 get a non-SIGSTOP, report it to the user - assume the backend
4179 will handle the SIGSTOP if it should show up later.
4180
4181 Also consider that the attach is complete when we see a
4182 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4183 target extended-remote report it instead of a SIGSTOP
4184 (e.g. gdbserver). We already rely on SIGTRAP being our
4185 signal, so this is no exception.
4186
4187 Also consider that the attach is complete when we see a
4188 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4189 the target to stop all threads of the inferior, in case the
4190 low level attach operation doesn't stop them implicitly. If
4191 they weren't stopped implicitly, then the stub will report a
4192 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4193 other than GDB's request. */
4194 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4195 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4196 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4197 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4198 {
4199 stop_stepping (ecs);
4200 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4201 return;
4202 }
4203
4204 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4205 handles this event. */
4206 ecs->event_thread->control.stop_bpstat
4207 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4208 stop_pc, ecs->ptid, &ecs->ws);
4209
4210 /* Following in case break condition called a
4211 function. */
4212 stop_print_frame = 1;
4213
4214 /* This is where we handle "moribund" watchpoints. Unlike
4215 software breakpoints traps, hardware watchpoint traps are
4216 always distinguishable from random traps. If no high-level
4217 watchpoint is associated with the reported stop data address
4218 anymore, then the bpstat does not explain the signal ---
4219 simply make sure to ignore it if `stopped_by_watchpoint' is
4220 set. */
4221
4222 if (debug_infrun
4223 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4224 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4225 && stopped_by_watchpoint)
4226 fprintf_unfiltered (gdb_stdlog,
4227 "infrun: no user watchpoint explains "
4228 "watchpoint SIGTRAP, ignoring\n");
4229
4230 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4231 at one stage in the past included checks for an inferior
4232 function call's call dummy's return breakpoint. The original
4233 comment, that went with the test, read:
4234
4235 ``End of a stack dummy. Some systems (e.g. Sony news) give
4236 another signal besides SIGTRAP, so check here as well as
4237 above.''
4238
4239 If someone ever tries to get call dummys on a
4240 non-executable stack to work (where the target would stop
4241 with something like a SIGSEGV), then those tests might need
4242 to be re-instated. Given, however, that the tests were only
4243 enabled when momentary breakpoints were not being used, I
4244 suspect that it won't be the case.
4245
4246 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4247 be necessary for call dummies on a non-executable stack on
4248 SPARC. */
4249
4250 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4251 ecs->random_signal
4252 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4253 || stopped_by_watchpoint
4254 || ecs->event_thread->control.trap_expected
4255 || (ecs->event_thread->control.step_range_end
4256 && (ecs->event_thread->control.step_resume_breakpoint
4257 == NULL)));
4258 else
4259 {
4260 ecs->random_signal = !bpstat_explains_signal
4261 (ecs->event_thread->control.stop_bpstat);
4262 if (!ecs->random_signal)
4263 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4264 }
4265 }
4266
4267 /* When we reach this point, we've pretty much decided
4268 that the reason for stopping must've been a random
4269 (unexpected) signal. */
4270
4271 else
4272 ecs->random_signal = 1;
4273
4274 process_event_stop_test:
4275
4276 /* Re-fetch current thread's frame in case we did a
4277 "goto process_event_stop_test" above. */
4278 frame = get_current_frame ();
4279 gdbarch = get_frame_arch (frame);
4280
4281 /* For the program's own signals, act according to
4282 the signal handling tables. */
4283
4284 if (ecs->random_signal)
4285 {
4286 /* Signal not for debugging purposes. */
4287 int printed = 0;
4288 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4289
4290 if (debug_infrun)
4291 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4292 ecs->event_thread->suspend.stop_signal);
4293
4294 stopped_by_random_signal = 1;
4295
4296 if (signal_print[ecs->event_thread->suspend.stop_signal])
4297 {
4298 printed = 1;
4299 target_terminal_ours_for_output ();
4300 print_signal_received_reason
4301 (ecs->event_thread->suspend.stop_signal);
4302 }
4303 /* Always stop on signals if we're either just gaining control
4304 of the program, or the user explicitly requested this thread
4305 to remain stopped. */
4306 if (stop_soon != NO_STOP_QUIETLY
4307 || ecs->event_thread->stop_requested
4308 || (!inf->detaching
4309 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4310 {
4311 stop_stepping (ecs);
4312 return;
4313 }
4314 /* If not going to stop, give terminal back
4315 if we took it away. */
4316 else if (printed)
4317 target_terminal_inferior ();
4318
4319 /* Clear the signal if it should not be passed. */
4320 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4321 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4322
4323 if (ecs->event_thread->prev_pc == stop_pc
4324 && ecs->event_thread->control.trap_expected
4325 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4326 {
4327 /* We were just starting a new sequence, attempting to
4328 single-step off of a breakpoint and expecting a SIGTRAP.
4329 Instead this signal arrives. This signal will take us out
4330 of the stepping range so GDB needs to remember to, when
4331 the signal handler returns, resume stepping off that
4332 breakpoint. */
4333 /* To simplify things, "continue" is forced to use the same
4334 code paths as single-step - set a breakpoint at the
4335 signal return address and then, once hit, step off that
4336 breakpoint. */
4337 if (debug_infrun)
4338 fprintf_unfiltered (gdb_stdlog,
4339 "infrun: signal arrived while stepping over "
4340 "breakpoint\n");
4341
4342 insert_hp_step_resume_breakpoint_at_frame (frame);
4343 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4344 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4345 ecs->event_thread->control.trap_expected = 0;
4346 keep_going (ecs);
4347 return;
4348 }
4349
4350 if (ecs->event_thread->control.step_range_end != 0
4351 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4352 && (ecs->event_thread->control.step_range_start <= stop_pc
4353 && stop_pc < ecs->event_thread->control.step_range_end)
4354 && frame_id_eq (get_stack_frame_id (frame),
4355 ecs->event_thread->control.step_stack_frame_id)
4356 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4357 {
4358 /* The inferior is about to take a signal that will take it
4359 out of the single step range. Set a breakpoint at the
4360 current PC (which is presumably where the signal handler
4361 will eventually return) and then allow the inferior to
4362 run free.
4363
4364 Note that this is only needed for a signal delivered
4365 while in the single-step range. Nested signals aren't a
4366 problem as they eventually all return. */
4367 if (debug_infrun)
4368 fprintf_unfiltered (gdb_stdlog,
4369 "infrun: signal may take us out of "
4370 "single-step range\n");
4371
4372 insert_hp_step_resume_breakpoint_at_frame (frame);
4373 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4374 ecs->event_thread->control.trap_expected = 0;
4375 keep_going (ecs);
4376 return;
4377 }
4378
4379 /* Note: step_resume_breakpoint may be non-NULL. This occures
4380 when either there's a nested signal, or when there's a
4381 pending signal enabled just as the signal handler returns
4382 (leaving the inferior at the step-resume-breakpoint without
4383 actually executing it). Either way continue until the
4384 breakpoint is really hit. */
4385 keep_going (ecs);
4386 return;
4387 }
4388
4389 /* Handle cases caused by hitting a breakpoint. */
4390 {
4391 CORE_ADDR jmp_buf_pc;
4392 struct bpstat_what what;
4393
4394 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4395
4396 if (what.call_dummy)
4397 {
4398 stop_stack_dummy = what.call_dummy;
4399 }
4400
4401 /* If we hit an internal event that triggers symbol changes, the
4402 current frame will be invalidated within bpstat_what (e.g., if
4403 we hit an internal solib event). Re-fetch it. */
4404 frame = get_current_frame ();
4405 gdbarch = get_frame_arch (frame);
4406
4407 switch (what.main_action)
4408 {
4409 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4410 /* If we hit the breakpoint at longjmp while stepping, we
4411 install a momentary breakpoint at the target of the
4412 jmp_buf. */
4413
4414 if (debug_infrun)
4415 fprintf_unfiltered (gdb_stdlog,
4416 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4417
4418 ecs->event_thread->stepping_over_breakpoint = 1;
4419
4420 if (what.is_longjmp)
4421 {
4422 if (!gdbarch_get_longjmp_target_p (gdbarch)
4423 || !gdbarch_get_longjmp_target (gdbarch,
4424 frame, &jmp_buf_pc))
4425 {
4426 if (debug_infrun)
4427 fprintf_unfiltered (gdb_stdlog,
4428 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4429 "(!gdbarch_get_longjmp_target)\n");
4430 keep_going (ecs);
4431 return;
4432 }
4433
4434 /* We're going to replace the current step-resume breakpoint
4435 with a longjmp-resume breakpoint. */
4436 delete_step_resume_breakpoint (ecs->event_thread);
4437
4438 /* Insert a breakpoint at resume address. */
4439 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4440 }
4441 else
4442 {
4443 struct symbol *func = get_frame_function (frame);
4444
4445 if (func)
4446 check_exception_resume (ecs, frame, func);
4447 }
4448 keep_going (ecs);
4449 return;
4450
4451 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4452 if (debug_infrun)
4453 fprintf_unfiltered (gdb_stdlog,
4454 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4455
4456 if (what.is_longjmp)
4457 {
4458 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4459 != NULL);
4460 delete_step_resume_breakpoint (ecs->event_thread);
4461 }
4462 else
4463 {
4464 /* There are several cases to consider.
4465
4466 1. The initiating frame no longer exists. In this case
4467 we must stop, because the exception has gone too far.
4468
4469 2. The initiating frame exists, and is the same as the
4470 current frame. We stop, because the exception has been
4471 caught.
4472
4473 3. The initiating frame exists and is different from
4474 the current frame. This means the exception has been
4475 caught beneath the initiating frame, so keep going. */
4476 struct frame_info *init_frame
4477 = frame_find_by_id (ecs->event_thread->initiating_frame);
4478
4479 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4480 != NULL);
4481 delete_exception_resume_breakpoint (ecs->event_thread);
4482
4483 if (init_frame)
4484 {
4485 struct frame_id current_id
4486 = get_frame_id (get_current_frame ());
4487 if (frame_id_eq (current_id,
4488 ecs->event_thread->initiating_frame))
4489 {
4490 /* Case 2. Fall through. */
4491 }
4492 else
4493 {
4494 /* Case 3. */
4495 keep_going (ecs);
4496 return;
4497 }
4498 }
4499
4500 /* For Cases 1 and 2, remove the step-resume breakpoint,
4501 if it exists. */
4502 delete_step_resume_breakpoint (ecs->event_thread);
4503 }
4504
4505 ecs->event_thread->control.stop_step = 1;
4506 print_end_stepping_range_reason ();
4507 stop_stepping (ecs);
4508 return;
4509
4510 case BPSTAT_WHAT_SINGLE:
4511 if (debug_infrun)
4512 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4513 ecs->event_thread->stepping_over_breakpoint = 1;
4514 /* Still need to check other stuff, at least the case
4515 where we are stepping and step out of the right range. */
4516 break;
4517
4518 case BPSTAT_WHAT_STEP_RESUME:
4519 if (debug_infrun)
4520 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4521
4522 delete_step_resume_breakpoint (ecs->event_thread);
4523 if (ecs->event_thread->control.proceed_to_finish
4524 && execution_direction == EXEC_REVERSE)
4525 {
4526 struct thread_info *tp = ecs->event_thread;
4527
4528 /* We are finishing a function in reverse, and just hit
4529 the step-resume breakpoint at the start address of the
4530 function, and we're almost there -- just need to back
4531 up by one more single-step, which should take us back
4532 to the function call. */
4533 tp->control.step_range_start = tp->control.step_range_end = 1;
4534 keep_going (ecs);
4535 return;
4536 }
4537 fill_in_stop_func (gdbarch, ecs);
4538 if (stop_pc == ecs->stop_func_start
4539 && execution_direction == EXEC_REVERSE)
4540 {
4541 /* We are stepping over a function call in reverse, and
4542 just hit the step-resume breakpoint at the start
4543 address of the function. Go back to single-stepping,
4544 which should take us back to the function call. */
4545 ecs->event_thread->stepping_over_breakpoint = 1;
4546 keep_going (ecs);
4547 return;
4548 }
4549 break;
4550
4551 case BPSTAT_WHAT_STOP_NOISY:
4552 if (debug_infrun)
4553 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4554 stop_print_frame = 1;
4555
4556 /* We are about to nuke the step_resume_breakpointt via the
4557 cleanup chain, so no need to worry about it here. */
4558
4559 stop_stepping (ecs);
4560 return;
4561
4562 case BPSTAT_WHAT_STOP_SILENT:
4563 if (debug_infrun)
4564 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4565 stop_print_frame = 0;
4566
4567 /* We are about to nuke the step_resume_breakpoin via the
4568 cleanup chain, so no need to worry about it here. */
4569
4570 stop_stepping (ecs);
4571 return;
4572
4573 case BPSTAT_WHAT_HP_STEP_RESUME:
4574 if (debug_infrun)
4575 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4576
4577 delete_step_resume_breakpoint (ecs->event_thread);
4578 if (ecs->event_thread->step_after_step_resume_breakpoint)
4579 {
4580 /* Back when the step-resume breakpoint was inserted, we
4581 were trying to single-step off a breakpoint. Go back
4582 to doing that. */
4583 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4584 ecs->event_thread->stepping_over_breakpoint = 1;
4585 keep_going (ecs);
4586 return;
4587 }
4588 break;
4589
4590 case BPSTAT_WHAT_KEEP_CHECKING:
4591 break;
4592 }
4593 }
4594
4595 /* We come here if we hit a breakpoint but should not
4596 stop for it. Possibly we also were stepping
4597 and should stop for that. So fall through and
4598 test for stepping. But, if not stepping,
4599 do not stop. */
4600
4601 /* In all-stop mode, if we're currently stepping but have stopped in
4602 some other thread, we need to switch back to the stepped thread. */
4603 if (!non_stop)
4604 {
4605 struct thread_info *tp;
4606
4607 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4608 ecs->event_thread);
4609 if (tp)
4610 {
4611 /* However, if the current thread is blocked on some internal
4612 breakpoint, and we simply need to step over that breakpoint
4613 to get it going again, do that first. */
4614 if ((ecs->event_thread->control.trap_expected
4615 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4616 || ecs->event_thread->stepping_over_breakpoint)
4617 {
4618 keep_going (ecs);
4619 return;
4620 }
4621
4622 /* If the stepping thread exited, then don't try to switch
4623 back and resume it, which could fail in several different
4624 ways depending on the target. Instead, just keep going.
4625
4626 We can find a stepping dead thread in the thread list in
4627 two cases:
4628
4629 - The target supports thread exit events, and when the
4630 target tries to delete the thread from the thread list,
4631 inferior_ptid pointed at the exiting thread. In such
4632 case, calling delete_thread does not really remove the
4633 thread from the list; instead, the thread is left listed,
4634 with 'exited' state.
4635
4636 - The target's debug interface does not support thread
4637 exit events, and so we have no idea whatsoever if the
4638 previously stepping thread is still alive. For that
4639 reason, we need to synchronously query the target
4640 now. */
4641 if (is_exited (tp->ptid)
4642 || !target_thread_alive (tp->ptid))
4643 {
4644 if (debug_infrun)
4645 fprintf_unfiltered (gdb_stdlog,
4646 "infrun: not switching back to "
4647 "stepped thread, it has vanished\n");
4648
4649 delete_thread (tp->ptid);
4650 keep_going (ecs);
4651 return;
4652 }
4653
4654 /* Otherwise, we no longer expect a trap in the current thread.
4655 Clear the trap_expected flag before switching back -- this is
4656 what keep_going would do as well, if we called it. */
4657 ecs->event_thread->control.trap_expected = 0;
4658
4659 if (debug_infrun)
4660 fprintf_unfiltered (gdb_stdlog,
4661 "infrun: switching back to stepped thread\n");
4662
4663 ecs->event_thread = tp;
4664 ecs->ptid = tp->ptid;
4665 context_switch (ecs->ptid);
4666 keep_going (ecs);
4667 return;
4668 }
4669 }
4670
4671 if (ecs->event_thread->control.step_resume_breakpoint)
4672 {
4673 if (debug_infrun)
4674 fprintf_unfiltered (gdb_stdlog,
4675 "infrun: step-resume breakpoint is inserted\n");
4676
4677 /* Having a step-resume breakpoint overrides anything
4678 else having to do with stepping commands until
4679 that breakpoint is reached. */
4680 keep_going (ecs);
4681 return;
4682 }
4683
4684 if (ecs->event_thread->control.step_range_end == 0)
4685 {
4686 if (debug_infrun)
4687 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4688 /* Likewise if we aren't even stepping. */
4689 keep_going (ecs);
4690 return;
4691 }
4692
4693 /* Re-fetch current thread's frame in case the code above caused
4694 the frame cache to be re-initialized, making our FRAME variable
4695 a dangling pointer. */
4696 frame = get_current_frame ();
4697 gdbarch = get_frame_arch (frame);
4698 fill_in_stop_func (gdbarch, ecs);
4699
4700 /* If stepping through a line, keep going if still within it.
4701
4702 Note that step_range_end is the address of the first instruction
4703 beyond the step range, and NOT the address of the last instruction
4704 within it!
4705
4706 Note also that during reverse execution, we may be stepping
4707 through a function epilogue and therefore must detect when
4708 the current-frame changes in the middle of a line. */
4709
4710 if (stop_pc >= ecs->event_thread->control.step_range_start
4711 && stop_pc < ecs->event_thread->control.step_range_end
4712 && (execution_direction != EXEC_REVERSE
4713 || frame_id_eq (get_frame_id (frame),
4714 ecs->event_thread->control.step_frame_id)))
4715 {
4716 if (debug_infrun)
4717 fprintf_unfiltered
4718 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4719 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4720 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4721
4722 /* When stepping backward, stop at beginning of line range
4723 (unless it's the function entry point, in which case
4724 keep going back to the call point). */
4725 if (stop_pc == ecs->event_thread->control.step_range_start
4726 && stop_pc != ecs->stop_func_start
4727 && execution_direction == EXEC_REVERSE)
4728 {
4729 ecs->event_thread->control.stop_step = 1;
4730 print_end_stepping_range_reason ();
4731 stop_stepping (ecs);
4732 }
4733 else
4734 keep_going (ecs);
4735
4736 return;
4737 }
4738
4739 /* We stepped out of the stepping range. */
4740
4741 /* If we are stepping at the source level and entered the runtime
4742 loader dynamic symbol resolution code...
4743
4744 EXEC_FORWARD: we keep on single stepping until we exit the run
4745 time loader code and reach the callee's address.
4746
4747 EXEC_REVERSE: we've already executed the callee (backward), and
4748 the runtime loader code is handled just like any other
4749 undebuggable function call. Now we need only keep stepping
4750 backward through the trampoline code, and that's handled further
4751 down, so there is nothing for us to do here. */
4752
4753 if (execution_direction != EXEC_REVERSE
4754 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4755 && in_solib_dynsym_resolve_code (stop_pc))
4756 {
4757 CORE_ADDR pc_after_resolver =
4758 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4759
4760 if (debug_infrun)
4761 fprintf_unfiltered (gdb_stdlog,
4762 "infrun: stepped into dynsym resolve code\n");
4763
4764 if (pc_after_resolver)
4765 {
4766 /* Set up a step-resume breakpoint at the address
4767 indicated by SKIP_SOLIB_RESOLVER. */
4768 struct symtab_and_line sr_sal;
4769
4770 init_sal (&sr_sal);
4771 sr_sal.pc = pc_after_resolver;
4772 sr_sal.pspace = get_frame_program_space (frame);
4773
4774 insert_step_resume_breakpoint_at_sal (gdbarch,
4775 sr_sal, null_frame_id);
4776 }
4777
4778 keep_going (ecs);
4779 return;
4780 }
4781
4782 if (ecs->event_thread->control.step_range_end != 1
4783 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4784 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4785 && get_frame_type (frame) == SIGTRAMP_FRAME)
4786 {
4787 if (debug_infrun)
4788 fprintf_unfiltered (gdb_stdlog,
4789 "infrun: stepped into signal trampoline\n");
4790 /* The inferior, while doing a "step" or "next", has ended up in
4791 a signal trampoline (either by a signal being delivered or by
4792 the signal handler returning). Just single-step until the
4793 inferior leaves the trampoline (either by calling the handler
4794 or returning). */
4795 keep_going (ecs);
4796 return;
4797 }
4798
4799 /* Check for subroutine calls. The check for the current frame
4800 equalling the step ID is not necessary - the check of the
4801 previous frame's ID is sufficient - but it is a common case and
4802 cheaper than checking the previous frame's ID.
4803
4804 NOTE: frame_id_eq will never report two invalid frame IDs as
4805 being equal, so to get into this block, both the current and
4806 previous frame must have valid frame IDs. */
4807 /* The outer_frame_id check is a heuristic to detect stepping
4808 through startup code. If we step over an instruction which
4809 sets the stack pointer from an invalid value to a valid value,
4810 we may detect that as a subroutine call from the mythical
4811 "outermost" function. This could be fixed by marking
4812 outermost frames as !stack_p,code_p,special_p. Then the
4813 initial outermost frame, before sp was valid, would
4814 have code_addr == &_start. See the comment in frame_id_eq
4815 for more. */
4816 if (!frame_id_eq (get_stack_frame_id (frame),
4817 ecs->event_thread->control.step_stack_frame_id)
4818 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4819 ecs->event_thread->control.step_stack_frame_id)
4820 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4821 outer_frame_id)
4822 || step_start_function != find_pc_function (stop_pc))))
4823 {
4824 CORE_ADDR real_stop_pc;
4825
4826 if (debug_infrun)
4827 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4828
4829 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4830 || ((ecs->event_thread->control.step_range_end == 1)
4831 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4832 ecs->stop_func_start)))
4833 {
4834 /* I presume that step_over_calls is only 0 when we're
4835 supposed to be stepping at the assembly language level
4836 ("stepi"). Just stop. */
4837 /* Also, maybe we just did a "nexti" inside a prolog, so we
4838 thought it was a subroutine call but it was not. Stop as
4839 well. FENN */
4840 /* And this works the same backward as frontward. MVS */
4841 ecs->event_thread->control.stop_step = 1;
4842 print_end_stepping_range_reason ();
4843 stop_stepping (ecs);
4844 return;
4845 }
4846
4847 /* Reverse stepping through solib trampolines. */
4848
4849 if (execution_direction == EXEC_REVERSE
4850 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4851 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4852 || (ecs->stop_func_start == 0
4853 && in_solib_dynsym_resolve_code (stop_pc))))
4854 {
4855 /* Any solib trampoline code can be handled in reverse
4856 by simply continuing to single-step. We have already
4857 executed the solib function (backwards), and a few
4858 steps will take us back through the trampoline to the
4859 caller. */
4860 keep_going (ecs);
4861 return;
4862 }
4863
4864 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4865 {
4866 /* We're doing a "next".
4867
4868 Normal (forward) execution: set a breakpoint at the
4869 callee's return address (the address at which the caller
4870 will resume).
4871
4872 Reverse (backward) execution. set the step-resume
4873 breakpoint at the start of the function that we just
4874 stepped into (backwards), and continue to there. When we
4875 get there, we'll need to single-step back to the caller. */
4876
4877 if (execution_direction == EXEC_REVERSE)
4878 {
4879 struct symtab_and_line sr_sal;
4880
4881 /* Normal function call return (static or dynamic). */
4882 init_sal (&sr_sal);
4883 sr_sal.pc = ecs->stop_func_start;
4884 sr_sal.pspace = get_frame_program_space (frame);
4885 insert_step_resume_breakpoint_at_sal (gdbarch,
4886 sr_sal, null_frame_id);
4887 }
4888 else
4889 insert_step_resume_breakpoint_at_caller (frame);
4890
4891 keep_going (ecs);
4892 return;
4893 }
4894
4895 /* If we are in a function call trampoline (a stub between the
4896 calling routine and the real function), locate the real
4897 function. That's what tells us (a) whether we want to step
4898 into it at all, and (b) what prologue we want to run to the
4899 end of, if we do step into it. */
4900 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4901 if (real_stop_pc == 0)
4902 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4903 if (real_stop_pc != 0)
4904 ecs->stop_func_start = real_stop_pc;
4905
4906 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4907 {
4908 struct symtab_and_line sr_sal;
4909
4910 init_sal (&sr_sal);
4911 sr_sal.pc = ecs->stop_func_start;
4912 sr_sal.pspace = get_frame_program_space (frame);
4913
4914 insert_step_resume_breakpoint_at_sal (gdbarch,
4915 sr_sal, null_frame_id);
4916 keep_going (ecs);
4917 return;
4918 }
4919
4920 /* If we have line number information for the function we are
4921 thinking of stepping into and the function isn't on the skip
4922 list, step into it.
4923
4924 If there are several symtabs at that PC (e.g. with include
4925 files), just want to know whether *any* of them have line
4926 numbers. find_pc_line handles this. */
4927 {
4928 struct symtab_and_line tmp_sal;
4929
4930 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4931 if (tmp_sal.line != 0
4932 && !function_pc_is_marked_for_skip (ecs->stop_func_start))
4933 {
4934 if (execution_direction == EXEC_REVERSE)
4935 handle_step_into_function_backward (gdbarch, ecs);
4936 else
4937 handle_step_into_function (gdbarch, ecs);
4938 return;
4939 }
4940 }
4941
4942 /* If we have no line number and the step-stop-if-no-debug is
4943 set, we stop the step so that the user has a chance to switch
4944 in assembly mode. */
4945 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4946 && step_stop_if_no_debug)
4947 {
4948 ecs->event_thread->control.stop_step = 1;
4949 print_end_stepping_range_reason ();
4950 stop_stepping (ecs);
4951 return;
4952 }
4953
4954 if (execution_direction == EXEC_REVERSE)
4955 {
4956 /* Set a breakpoint at callee's start address.
4957 From there we can step once and be back in the caller. */
4958 struct symtab_and_line sr_sal;
4959
4960 init_sal (&sr_sal);
4961 sr_sal.pc = ecs->stop_func_start;
4962 sr_sal.pspace = get_frame_program_space (frame);
4963 insert_step_resume_breakpoint_at_sal (gdbarch,
4964 sr_sal, null_frame_id);
4965 }
4966 else
4967 /* Set a breakpoint at callee's return address (the address
4968 at which the caller will resume). */
4969 insert_step_resume_breakpoint_at_caller (frame);
4970
4971 keep_going (ecs);
4972 return;
4973 }
4974
4975 /* Reverse stepping through solib trampolines. */
4976
4977 if (execution_direction == EXEC_REVERSE
4978 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4979 {
4980 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4981 || (ecs->stop_func_start == 0
4982 && in_solib_dynsym_resolve_code (stop_pc)))
4983 {
4984 /* Any solib trampoline code can be handled in reverse
4985 by simply continuing to single-step. We have already
4986 executed the solib function (backwards), and a few
4987 steps will take us back through the trampoline to the
4988 caller. */
4989 keep_going (ecs);
4990 return;
4991 }
4992 else if (in_solib_dynsym_resolve_code (stop_pc))
4993 {
4994 /* Stepped backward into the solib dynsym resolver.
4995 Set a breakpoint at its start and continue, then
4996 one more step will take us out. */
4997 struct symtab_and_line sr_sal;
4998
4999 init_sal (&sr_sal);
5000 sr_sal.pc = ecs->stop_func_start;
5001 sr_sal.pspace = get_frame_program_space (frame);
5002 insert_step_resume_breakpoint_at_sal (gdbarch,
5003 sr_sal, null_frame_id);
5004 keep_going (ecs);
5005 return;
5006 }
5007 }
5008
5009 /* If we're in the return path from a shared library trampoline,
5010 we want to proceed through the trampoline when stepping. */
5011 if (gdbarch_in_solib_return_trampoline (gdbarch,
5012 stop_pc, ecs->stop_func_name))
5013 {
5014 /* Determine where this trampoline returns. */
5015 CORE_ADDR real_stop_pc;
5016
5017 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5018
5019 if (debug_infrun)
5020 fprintf_unfiltered (gdb_stdlog,
5021 "infrun: stepped into solib return tramp\n");
5022
5023 /* Only proceed through if we know where it's going. */
5024 if (real_stop_pc)
5025 {
5026 /* And put the step-breakpoint there and go until there. */
5027 struct symtab_and_line sr_sal;
5028
5029 init_sal (&sr_sal); /* initialize to zeroes */
5030 sr_sal.pc = real_stop_pc;
5031 sr_sal.section = find_pc_overlay (sr_sal.pc);
5032 sr_sal.pspace = get_frame_program_space (frame);
5033
5034 /* Do not specify what the fp should be when we stop since
5035 on some machines the prologue is where the new fp value
5036 is established. */
5037 insert_step_resume_breakpoint_at_sal (gdbarch,
5038 sr_sal, null_frame_id);
5039
5040 /* Restart without fiddling with the step ranges or
5041 other state. */
5042 keep_going (ecs);
5043 return;
5044 }
5045 }
5046
5047 stop_pc_sal = find_pc_line (stop_pc, 0);
5048
5049 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5050 the trampoline processing logic, however, there are some trampolines
5051 that have no names, so we should do trampoline handling first. */
5052 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5053 && ecs->stop_func_name == NULL
5054 && stop_pc_sal.line == 0)
5055 {
5056 if (debug_infrun)
5057 fprintf_unfiltered (gdb_stdlog,
5058 "infrun: stepped into undebuggable function\n");
5059
5060 /* The inferior just stepped into, or returned to, an
5061 undebuggable function (where there is no debugging information
5062 and no line number corresponding to the address where the
5063 inferior stopped). Since we want to skip this kind of code,
5064 we keep going until the inferior returns from this
5065 function - unless the user has asked us not to (via
5066 set step-mode) or we no longer know how to get back
5067 to the call site. */
5068 if (step_stop_if_no_debug
5069 || !frame_id_p (frame_unwind_caller_id (frame)))
5070 {
5071 /* If we have no line number and the step-stop-if-no-debug
5072 is set, we stop the step so that the user has a chance to
5073 switch in assembly mode. */
5074 ecs->event_thread->control.stop_step = 1;
5075 print_end_stepping_range_reason ();
5076 stop_stepping (ecs);
5077 return;
5078 }
5079 else
5080 {
5081 /* Set a breakpoint at callee's return address (the address
5082 at which the caller will resume). */
5083 insert_step_resume_breakpoint_at_caller (frame);
5084 keep_going (ecs);
5085 return;
5086 }
5087 }
5088
5089 if (ecs->event_thread->control.step_range_end == 1)
5090 {
5091 /* It is stepi or nexti. We always want to stop stepping after
5092 one instruction. */
5093 if (debug_infrun)
5094 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5095 ecs->event_thread->control.stop_step = 1;
5096 print_end_stepping_range_reason ();
5097 stop_stepping (ecs);
5098 return;
5099 }
5100
5101 if (stop_pc_sal.line == 0)
5102 {
5103 /* We have no line number information. That means to stop
5104 stepping (does this always happen right after one instruction,
5105 when we do "s" in a function with no line numbers,
5106 or can this happen as a result of a return or longjmp?). */
5107 if (debug_infrun)
5108 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5109 ecs->event_thread->control.stop_step = 1;
5110 print_end_stepping_range_reason ();
5111 stop_stepping (ecs);
5112 return;
5113 }
5114
5115 /* Look for "calls" to inlined functions, part one. If the inline
5116 frame machinery detected some skipped call sites, we have entered
5117 a new inline function. */
5118
5119 if (frame_id_eq (get_frame_id (get_current_frame ()),
5120 ecs->event_thread->control.step_frame_id)
5121 && inline_skipped_frames (ecs->ptid))
5122 {
5123 struct symtab_and_line call_sal;
5124
5125 if (debug_infrun)
5126 fprintf_unfiltered (gdb_stdlog,
5127 "infrun: stepped into inlined function\n");
5128
5129 find_frame_sal (get_current_frame (), &call_sal);
5130
5131 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5132 {
5133 /* For "step", we're going to stop. But if the call site
5134 for this inlined function is on the same source line as
5135 we were previously stepping, go down into the function
5136 first. Otherwise stop at the call site. */
5137
5138 if (call_sal.line == ecs->event_thread->current_line
5139 && call_sal.symtab == ecs->event_thread->current_symtab)
5140 step_into_inline_frame (ecs->ptid);
5141
5142 ecs->event_thread->control.stop_step = 1;
5143 print_end_stepping_range_reason ();
5144 stop_stepping (ecs);
5145 return;
5146 }
5147 else
5148 {
5149 /* For "next", we should stop at the call site if it is on a
5150 different source line. Otherwise continue through the
5151 inlined function. */
5152 if (call_sal.line == ecs->event_thread->current_line
5153 && call_sal.symtab == ecs->event_thread->current_symtab)
5154 keep_going (ecs);
5155 else
5156 {
5157 ecs->event_thread->control.stop_step = 1;
5158 print_end_stepping_range_reason ();
5159 stop_stepping (ecs);
5160 }
5161 return;
5162 }
5163 }
5164
5165 /* Look for "calls" to inlined functions, part two. If we are still
5166 in the same real function we were stepping through, but we have
5167 to go further up to find the exact frame ID, we are stepping
5168 through a more inlined call beyond its call site. */
5169
5170 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5171 && !frame_id_eq (get_frame_id (get_current_frame ()),
5172 ecs->event_thread->control.step_frame_id)
5173 && stepped_in_from (get_current_frame (),
5174 ecs->event_thread->control.step_frame_id))
5175 {
5176 if (debug_infrun)
5177 fprintf_unfiltered (gdb_stdlog,
5178 "infrun: stepping through inlined function\n");
5179
5180 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5181 keep_going (ecs);
5182 else
5183 {
5184 ecs->event_thread->control.stop_step = 1;
5185 print_end_stepping_range_reason ();
5186 stop_stepping (ecs);
5187 }
5188 return;
5189 }
5190
5191 if ((stop_pc == stop_pc_sal.pc)
5192 && (ecs->event_thread->current_line != stop_pc_sal.line
5193 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5194 {
5195 /* We are at the start of a different line. So stop. Note that
5196 we don't stop if we step into the middle of a different line.
5197 That is said to make things like for (;;) statements work
5198 better. */
5199 if (debug_infrun)
5200 fprintf_unfiltered (gdb_stdlog,
5201 "infrun: stepped to a different line\n");
5202 ecs->event_thread->control.stop_step = 1;
5203 print_end_stepping_range_reason ();
5204 stop_stepping (ecs);
5205 return;
5206 }
5207
5208 /* We aren't done stepping.
5209
5210 Optimize by setting the stepping range to the line.
5211 (We might not be in the original line, but if we entered a
5212 new line in mid-statement, we continue stepping. This makes
5213 things like for(;;) statements work better.) */
5214
5215 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5216 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5217 set_step_info (frame, stop_pc_sal);
5218
5219 if (debug_infrun)
5220 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5221 keep_going (ecs);
5222 }
5223
5224 /* Is thread TP in the middle of single-stepping? */
5225
5226 static int
5227 currently_stepping (struct thread_info *tp)
5228 {
5229 return ((tp->control.step_range_end
5230 && tp->control.step_resume_breakpoint == NULL)
5231 || tp->control.trap_expected
5232 || bpstat_should_step ());
5233 }
5234
5235 /* Returns true if any thread *but* the one passed in "data" is in the
5236 middle of stepping or of handling a "next". */
5237
5238 static int
5239 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5240 {
5241 if (tp == data)
5242 return 0;
5243
5244 return (tp->control.step_range_end
5245 || tp->control.trap_expected);
5246 }
5247
5248 /* Inferior has stepped into a subroutine call with source code that
5249 we should not step over. Do step to the first line of code in
5250 it. */
5251
5252 static void
5253 handle_step_into_function (struct gdbarch *gdbarch,
5254 struct execution_control_state *ecs)
5255 {
5256 struct symtab *s;
5257 struct symtab_and_line stop_func_sal, sr_sal;
5258
5259 fill_in_stop_func (gdbarch, ecs);
5260
5261 s = find_pc_symtab (stop_pc);
5262 if (s && s->language != language_asm)
5263 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5264 ecs->stop_func_start);
5265
5266 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5267 /* Use the step_resume_break to step until the end of the prologue,
5268 even if that involves jumps (as it seems to on the vax under
5269 4.2). */
5270 /* If the prologue ends in the middle of a source line, continue to
5271 the end of that source line (if it is still within the function).
5272 Otherwise, just go to end of prologue. */
5273 if (stop_func_sal.end
5274 && stop_func_sal.pc != ecs->stop_func_start
5275 && stop_func_sal.end < ecs->stop_func_end)
5276 ecs->stop_func_start = stop_func_sal.end;
5277
5278 /* Architectures which require breakpoint adjustment might not be able
5279 to place a breakpoint at the computed address. If so, the test
5280 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5281 ecs->stop_func_start to an address at which a breakpoint may be
5282 legitimately placed.
5283
5284 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5285 made, GDB will enter an infinite loop when stepping through
5286 optimized code consisting of VLIW instructions which contain
5287 subinstructions corresponding to different source lines. On
5288 FR-V, it's not permitted to place a breakpoint on any but the
5289 first subinstruction of a VLIW instruction. When a breakpoint is
5290 set, GDB will adjust the breakpoint address to the beginning of
5291 the VLIW instruction. Thus, we need to make the corresponding
5292 adjustment here when computing the stop address. */
5293
5294 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5295 {
5296 ecs->stop_func_start
5297 = gdbarch_adjust_breakpoint_address (gdbarch,
5298 ecs->stop_func_start);
5299 }
5300
5301 if (ecs->stop_func_start == stop_pc)
5302 {
5303 /* We are already there: stop now. */
5304 ecs->event_thread->control.stop_step = 1;
5305 print_end_stepping_range_reason ();
5306 stop_stepping (ecs);
5307 return;
5308 }
5309 else
5310 {
5311 /* Put the step-breakpoint there and go until there. */
5312 init_sal (&sr_sal); /* initialize to zeroes */
5313 sr_sal.pc = ecs->stop_func_start;
5314 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5315 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5316
5317 /* Do not specify what the fp should be when we stop since on
5318 some machines the prologue is where the new fp value is
5319 established. */
5320 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5321
5322 /* And make sure stepping stops right away then. */
5323 ecs->event_thread->control.step_range_end
5324 = ecs->event_thread->control.step_range_start;
5325 }
5326 keep_going (ecs);
5327 }
5328
5329 /* Inferior has stepped backward into a subroutine call with source
5330 code that we should not step over. Do step to the beginning of the
5331 last line of code in it. */
5332
5333 static void
5334 handle_step_into_function_backward (struct gdbarch *gdbarch,
5335 struct execution_control_state *ecs)
5336 {
5337 struct symtab *s;
5338 struct symtab_and_line stop_func_sal;
5339
5340 fill_in_stop_func (gdbarch, ecs);
5341
5342 s = find_pc_symtab (stop_pc);
5343 if (s && s->language != language_asm)
5344 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5345 ecs->stop_func_start);
5346
5347 stop_func_sal = find_pc_line (stop_pc, 0);
5348
5349 /* OK, we're just going to keep stepping here. */
5350 if (stop_func_sal.pc == stop_pc)
5351 {
5352 /* We're there already. Just stop stepping now. */
5353 ecs->event_thread->control.stop_step = 1;
5354 print_end_stepping_range_reason ();
5355 stop_stepping (ecs);
5356 }
5357 else
5358 {
5359 /* Else just reset the step range and keep going.
5360 No step-resume breakpoint, they don't work for
5361 epilogues, which can have multiple entry paths. */
5362 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5363 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5364 keep_going (ecs);
5365 }
5366 return;
5367 }
5368
5369 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5370 This is used to both functions and to skip over code. */
5371
5372 static void
5373 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5374 struct symtab_and_line sr_sal,
5375 struct frame_id sr_id,
5376 enum bptype sr_type)
5377 {
5378 /* There should never be more than one step-resume or longjmp-resume
5379 breakpoint per thread, so we should never be setting a new
5380 step_resume_breakpoint when one is already active. */
5381 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5382 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5383
5384 if (debug_infrun)
5385 fprintf_unfiltered (gdb_stdlog,
5386 "infrun: inserting step-resume breakpoint at %s\n",
5387 paddress (gdbarch, sr_sal.pc));
5388
5389 inferior_thread ()->control.step_resume_breakpoint
5390 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5391 }
5392
5393 void
5394 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5395 struct symtab_and_line sr_sal,
5396 struct frame_id sr_id)
5397 {
5398 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5399 sr_sal, sr_id,
5400 bp_step_resume);
5401 }
5402
5403 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5404 This is used to skip a potential signal handler.
5405
5406 This is called with the interrupted function's frame. The signal
5407 handler, when it returns, will resume the interrupted function at
5408 RETURN_FRAME.pc. */
5409
5410 static void
5411 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5412 {
5413 struct symtab_and_line sr_sal;
5414 struct gdbarch *gdbarch;
5415
5416 gdb_assert (return_frame != NULL);
5417 init_sal (&sr_sal); /* initialize to zeros */
5418
5419 gdbarch = get_frame_arch (return_frame);
5420 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5421 sr_sal.section = find_pc_overlay (sr_sal.pc);
5422 sr_sal.pspace = get_frame_program_space (return_frame);
5423
5424 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5425 get_stack_frame_id (return_frame),
5426 bp_hp_step_resume);
5427 }
5428
5429 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5430 is used to skip a function after stepping into it (for "next" or if
5431 the called function has no debugging information).
5432
5433 The current function has almost always been reached by single
5434 stepping a call or return instruction. NEXT_FRAME belongs to the
5435 current function, and the breakpoint will be set at the caller's
5436 resume address.
5437
5438 This is a separate function rather than reusing
5439 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5440 get_prev_frame, which may stop prematurely (see the implementation
5441 of frame_unwind_caller_id for an example). */
5442
5443 static void
5444 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5445 {
5446 struct symtab_and_line sr_sal;
5447 struct gdbarch *gdbarch;
5448
5449 /* We shouldn't have gotten here if we don't know where the call site
5450 is. */
5451 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5452
5453 init_sal (&sr_sal); /* initialize to zeros */
5454
5455 gdbarch = frame_unwind_caller_arch (next_frame);
5456 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5457 frame_unwind_caller_pc (next_frame));
5458 sr_sal.section = find_pc_overlay (sr_sal.pc);
5459 sr_sal.pspace = frame_unwind_program_space (next_frame);
5460
5461 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5462 frame_unwind_caller_id (next_frame));
5463 }
5464
5465 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5466 new breakpoint at the target of a jmp_buf. The handling of
5467 longjmp-resume uses the same mechanisms used for handling
5468 "step-resume" breakpoints. */
5469
5470 static void
5471 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5472 {
5473 /* There should never be more than one step-resume or longjmp-resume
5474 breakpoint per thread, so we should never be setting a new
5475 longjmp_resume_breakpoint when one is already active. */
5476 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5477
5478 if (debug_infrun)
5479 fprintf_unfiltered (gdb_stdlog,
5480 "infrun: inserting longjmp-resume breakpoint at %s\n",
5481 paddress (gdbarch, pc));
5482
5483 inferior_thread ()->control.step_resume_breakpoint =
5484 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5485 }
5486
5487 /* Insert an exception resume breakpoint. TP is the thread throwing
5488 the exception. The block B is the block of the unwinder debug hook
5489 function. FRAME is the frame corresponding to the call to this
5490 function. SYM is the symbol of the function argument holding the
5491 target PC of the exception. */
5492
5493 static void
5494 insert_exception_resume_breakpoint (struct thread_info *tp,
5495 struct block *b,
5496 struct frame_info *frame,
5497 struct symbol *sym)
5498 {
5499 volatile struct gdb_exception e;
5500
5501 /* We want to ignore errors here. */
5502 TRY_CATCH (e, RETURN_MASK_ERROR)
5503 {
5504 struct symbol *vsym;
5505 struct value *value;
5506 CORE_ADDR handler;
5507 struct breakpoint *bp;
5508
5509 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5510 value = read_var_value (vsym, frame);
5511 /* If the value was optimized out, revert to the old behavior. */
5512 if (! value_optimized_out (value))
5513 {
5514 handler = value_as_address (value);
5515
5516 if (debug_infrun)
5517 fprintf_unfiltered (gdb_stdlog,
5518 "infrun: exception resume at %lx\n",
5519 (unsigned long) handler);
5520
5521 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5522 handler, bp_exception_resume);
5523 bp->thread = tp->num;
5524 inferior_thread ()->control.exception_resume_breakpoint = bp;
5525 }
5526 }
5527 }
5528
5529 /* This is called when an exception has been intercepted. Check to
5530 see whether the exception's destination is of interest, and if so,
5531 set an exception resume breakpoint there. */
5532
5533 static void
5534 check_exception_resume (struct execution_control_state *ecs,
5535 struct frame_info *frame, struct symbol *func)
5536 {
5537 volatile struct gdb_exception e;
5538
5539 TRY_CATCH (e, RETURN_MASK_ERROR)
5540 {
5541 struct block *b;
5542 struct dict_iterator iter;
5543 struct symbol *sym;
5544 int argno = 0;
5545
5546 /* The exception breakpoint is a thread-specific breakpoint on
5547 the unwinder's debug hook, declared as:
5548
5549 void _Unwind_DebugHook (void *cfa, void *handler);
5550
5551 The CFA argument indicates the frame to which control is
5552 about to be transferred. HANDLER is the destination PC.
5553
5554 We ignore the CFA and set a temporary breakpoint at HANDLER.
5555 This is not extremely efficient but it avoids issues in gdb
5556 with computing the DWARF CFA, and it also works even in weird
5557 cases such as throwing an exception from inside a signal
5558 handler. */
5559
5560 b = SYMBOL_BLOCK_VALUE (func);
5561 ALL_BLOCK_SYMBOLS (b, iter, sym)
5562 {
5563 if (!SYMBOL_IS_ARGUMENT (sym))
5564 continue;
5565
5566 if (argno == 0)
5567 ++argno;
5568 else
5569 {
5570 insert_exception_resume_breakpoint (ecs->event_thread,
5571 b, frame, sym);
5572 break;
5573 }
5574 }
5575 }
5576 }
5577
5578 static void
5579 stop_stepping (struct execution_control_state *ecs)
5580 {
5581 if (debug_infrun)
5582 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5583
5584 /* Let callers know we don't want to wait for the inferior anymore. */
5585 ecs->wait_some_more = 0;
5586 }
5587
5588 /* This function handles various cases where we need to continue
5589 waiting for the inferior. */
5590 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5591
5592 static void
5593 keep_going (struct execution_control_state *ecs)
5594 {
5595 /* Make sure normal_stop is called if we get a QUIT handled before
5596 reaching resume. */
5597 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5598
5599 /* Save the pc before execution, to compare with pc after stop. */
5600 ecs->event_thread->prev_pc
5601 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5602
5603 /* If we did not do break;, it means we should keep running the
5604 inferior and not return to debugger. */
5605
5606 if (ecs->event_thread->control.trap_expected
5607 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5608 {
5609 /* We took a signal (which we are supposed to pass through to
5610 the inferior, else we'd not get here) and we haven't yet
5611 gotten our trap. Simply continue. */
5612
5613 discard_cleanups (old_cleanups);
5614 resume (currently_stepping (ecs->event_thread),
5615 ecs->event_thread->suspend.stop_signal);
5616 }
5617 else
5618 {
5619 /* Either the trap was not expected, but we are continuing
5620 anyway (the user asked that this signal be passed to the
5621 child)
5622 -- or --
5623 The signal was SIGTRAP, e.g. it was our signal, but we
5624 decided we should resume from it.
5625
5626 We're going to run this baby now!
5627
5628 Note that insert_breakpoints won't try to re-insert
5629 already inserted breakpoints. Therefore, we don't
5630 care if breakpoints were already inserted, or not. */
5631
5632 if (ecs->event_thread->stepping_over_breakpoint)
5633 {
5634 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5635
5636 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5637 /* Since we can't do a displaced step, we have to remove
5638 the breakpoint while we step it. To keep things
5639 simple, we remove them all. */
5640 remove_breakpoints ();
5641 }
5642 else
5643 {
5644 volatile struct gdb_exception e;
5645
5646 /* Stop stepping when inserting breakpoints
5647 has failed. */
5648 TRY_CATCH (e, RETURN_MASK_ERROR)
5649 {
5650 insert_breakpoints ();
5651 }
5652 if (e.reason < 0)
5653 {
5654 exception_print (gdb_stderr, e);
5655 stop_stepping (ecs);
5656 return;
5657 }
5658 }
5659
5660 ecs->event_thread->control.trap_expected
5661 = ecs->event_thread->stepping_over_breakpoint;
5662
5663 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5664 specifies that such a signal should be delivered to the
5665 target program).
5666
5667 Typically, this would occure when a user is debugging a
5668 target monitor on a simulator: the target monitor sets a
5669 breakpoint; the simulator encounters this break-point and
5670 halts the simulation handing control to GDB; GDB, noteing
5671 that the break-point isn't valid, returns control back to the
5672 simulator; the simulator then delivers the hardware
5673 equivalent of a SIGNAL_TRAP to the program being debugged. */
5674
5675 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5676 && !signal_program[ecs->event_thread->suspend.stop_signal])
5677 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5678
5679 discard_cleanups (old_cleanups);
5680 resume (currently_stepping (ecs->event_thread),
5681 ecs->event_thread->suspend.stop_signal);
5682 }
5683
5684 prepare_to_wait (ecs);
5685 }
5686
5687 /* This function normally comes after a resume, before
5688 handle_inferior_event exits. It takes care of any last bits of
5689 housekeeping, and sets the all-important wait_some_more flag. */
5690
5691 static void
5692 prepare_to_wait (struct execution_control_state *ecs)
5693 {
5694 if (debug_infrun)
5695 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5696
5697 /* This is the old end of the while loop. Let everybody know we
5698 want to wait for the inferior some more and get called again
5699 soon. */
5700 ecs->wait_some_more = 1;
5701 }
5702
5703 /* Several print_*_reason functions to print why the inferior has stopped.
5704 We always print something when the inferior exits, or receives a signal.
5705 The rest of the cases are dealt with later on in normal_stop and
5706 print_it_typical. Ideally there should be a call to one of these
5707 print_*_reason functions functions from handle_inferior_event each time
5708 stop_stepping is called. */
5709
5710 /* Print why the inferior has stopped.
5711 We are done with a step/next/si/ni command, print why the inferior has
5712 stopped. For now print nothing. Print a message only if not in the middle
5713 of doing a "step n" operation for n > 1. */
5714
5715 static void
5716 print_end_stepping_range_reason (void)
5717 {
5718 if ((!inferior_thread ()->step_multi
5719 || !inferior_thread ()->control.stop_step)
5720 && ui_out_is_mi_like_p (current_uiout))
5721 ui_out_field_string (current_uiout, "reason",
5722 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5723 }
5724
5725 /* The inferior was terminated by a signal, print why it stopped. */
5726
5727 static void
5728 print_signal_exited_reason (enum target_signal siggnal)
5729 {
5730 struct ui_out *uiout = current_uiout;
5731
5732 annotate_signalled ();
5733 if (ui_out_is_mi_like_p (uiout))
5734 ui_out_field_string
5735 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5736 ui_out_text (uiout, "\nProgram terminated with signal ");
5737 annotate_signal_name ();
5738 ui_out_field_string (uiout, "signal-name",
5739 target_signal_to_name (siggnal));
5740 annotate_signal_name_end ();
5741 ui_out_text (uiout, ", ");
5742 annotate_signal_string ();
5743 ui_out_field_string (uiout, "signal-meaning",
5744 target_signal_to_string (siggnal));
5745 annotate_signal_string_end ();
5746 ui_out_text (uiout, ".\n");
5747 ui_out_text (uiout, "The program no longer exists.\n");
5748 }
5749
5750 /* The inferior program is finished, print why it stopped. */
5751
5752 static void
5753 print_exited_reason (int exitstatus)
5754 {
5755 struct inferior *inf = current_inferior ();
5756 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5757 struct ui_out *uiout = current_uiout;
5758
5759 annotate_exited (exitstatus);
5760 if (exitstatus)
5761 {
5762 if (ui_out_is_mi_like_p (uiout))
5763 ui_out_field_string (uiout, "reason",
5764 async_reason_lookup (EXEC_ASYNC_EXITED));
5765 ui_out_text (uiout, "[Inferior ");
5766 ui_out_text (uiout, plongest (inf->num));
5767 ui_out_text (uiout, " (");
5768 ui_out_text (uiout, pidstr);
5769 ui_out_text (uiout, ") exited with code ");
5770 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5771 ui_out_text (uiout, "]\n");
5772 }
5773 else
5774 {
5775 if (ui_out_is_mi_like_p (uiout))
5776 ui_out_field_string
5777 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5778 ui_out_text (uiout, "[Inferior ");
5779 ui_out_text (uiout, plongest (inf->num));
5780 ui_out_text (uiout, " (");
5781 ui_out_text (uiout, pidstr);
5782 ui_out_text (uiout, ") exited normally]\n");
5783 }
5784 /* Support the --return-child-result option. */
5785 return_child_result_value = exitstatus;
5786 }
5787
5788 /* Signal received, print why the inferior has stopped. The signal table
5789 tells us to print about it. */
5790
5791 static void
5792 print_signal_received_reason (enum target_signal siggnal)
5793 {
5794 struct ui_out *uiout = current_uiout;
5795
5796 annotate_signal ();
5797
5798 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5799 {
5800 struct thread_info *t = inferior_thread ();
5801
5802 ui_out_text (uiout, "\n[");
5803 ui_out_field_string (uiout, "thread-name",
5804 target_pid_to_str (t->ptid));
5805 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5806 ui_out_text (uiout, " stopped");
5807 }
5808 else
5809 {
5810 ui_out_text (uiout, "\nProgram received signal ");
5811 annotate_signal_name ();
5812 if (ui_out_is_mi_like_p (uiout))
5813 ui_out_field_string
5814 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5815 ui_out_field_string (uiout, "signal-name",
5816 target_signal_to_name (siggnal));
5817 annotate_signal_name_end ();
5818 ui_out_text (uiout, ", ");
5819 annotate_signal_string ();
5820 ui_out_field_string (uiout, "signal-meaning",
5821 target_signal_to_string (siggnal));
5822 annotate_signal_string_end ();
5823 }
5824 ui_out_text (uiout, ".\n");
5825 }
5826
5827 /* Reverse execution: target ran out of history info, print why the inferior
5828 has stopped. */
5829
5830 static void
5831 print_no_history_reason (void)
5832 {
5833 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5834 }
5835
5836 /* Here to return control to GDB when the inferior stops for real.
5837 Print appropriate messages, remove breakpoints, give terminal our modes.
5838
5839 STOP_PRINT_FRAME nonzero means print the executing frame
5840 (pc, function, args, file, line number and line text).
5841 BREAKPOINTS_FAILED nonzero means stop was due to error
5842 attempting to insert breakpoints. */
5843
5844 void
5845 normal_stop (void)
5846 {
5847 struct target_waitstatus last;
5848 ptid_t last_ptid;
5849 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5850
5851 get_last_target_status (&last_ptid, &last);
5852
5853 /* If an exception is thrown from this point on, make sure to
5854 propagate GDB's knowledge of the executing state to the
5855 frontend/user running state. A QUIT is an easy exception to see
5856 here, so do this before any filtered output. */
5857 if (!non_stop)
5858 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5859 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5860 && last.kind != TARGET_WAITKIND_EXITED
5861 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5862 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5863
5864 /* In non-stop mode, we don't want GDB to switch threads behind the
5865 user's back, to avoid races where the user is typing a command to
5866 apply to thread x, but GDB switches to thread y before the user
5867 finishes entering the command. */
5868
5869 /* As with the notification of thread events, we want to delay
5870 notifying the user that we've switched thread context until
5871 the inferior actually stops.
5872
5873 There's no point in saying anything if the inferior has exited.
5874 Note that SIGNALLED here means "exited with a signal", not
5875 "received a signal". */
5876 if (!non_stop
5877 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5878 && target_has_execution
5879 && last.kind != TARGET_WAITKIND_SIGNALLED
5880 && last.kind != TARGET_WAITKIND_EXITED
5881 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5882 {
5883 target_terminal_ours_for_output ();
5884 printf_filtered (_("[Switching to %s]\n"),
5885 target_pid_to_str (inferior_ptid));
5886 annotate_thread_changed ();
5887 previous_inferior_ptid = inferior_ptid;
5888 }
5889
5890 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
5891 {
5892 gdb_assert (sync_execution || !target_can_async_p ());
5893
5894 target_terminal_ours_for_output ();
5895 printf_filtered (_("No unwaited-for children left.\n"));
5896 }
5897
5898 if (!breakpoints_always_inserted_mode () && target_has_execution)
5899 {
5900 if (remove_breakpoints ())
5901 {
5902 target_terminal_ours_for_output ();
5903 printf_filtered (_("Cannot remove breakpoints because "
5904 "program is no longer writable.\nFurther "
5905 "execution is probably impossible.\n"));
5906 }
5907 }
5908
5909 /* If an auto-display called a function and that got a signal,
5910 delete that auto-display to avoid an infinite recursion. */
5911
5912 if (stopped_by_random_signal)
5913 disable_current_display ();
5914
5915 /* Don't print a message if in the middle of doing a "step n"
5916 operation for n > 1 */
5917 if (target_has_execution
5918 && last.kind != TARGET_WAITKIND_SIGNALLED
5919 && last.kind != TARGET_WAITKIND_EXITED
5920 && inferior_thread ()->step_multi
5921 && inferior_thread ()->control.stop_step)
5922 goto done;
5923
5924 target_terminal_ours ();
5925 async_enable_stdin ();
5926
5927 /* Set the current source location. This will also happen if we
5928 display the frame below, but the current SAL will be incorrect
5929 during a user hook-stop function. */
5930 if (has_stack_frames () && !stop_stack_dummy)
5931 set_current_sal_from_frame (get_current_frame (), 1);
5932
5933 /* Let the user/frontend see the threads as stopped. */
5934 do_cleanups (old_chain);
5935
5936 /* Look up the hook_stop and run it (CLI internally handles problem
5937 of stop_command's pre-hook not existing). */
5938 if (stop_command)
5939 catch_errors (hook_stop_stub, stop_command,
5940 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5941
5942 if (!has_stack_frames ())
5943 goto done;
5944
5945 if (last.kind == TARGET_WAITKIND_SIGNALLED
5946 || last.kind == TARGET_WAITKIND_EXITED)
5947 goto done;
5948
5949 /* Select innermost stack frame - i.e., current frame is frame 0,
5950 and current location is based on that.
5951 Don't do this on return from a stack dummy routine,
5952 or if the program has exited. */
5953
5954 if (!stop_stack_dummy)
5955 {
5956 select_frame (get_current_frame ());
5957
5958 /* Print current location without a level number, if
5959 we have changed functions or hit a breakpoint.
5960 Print source line if we have one.
5961 bpstat_print() contains the logic deciding in detail
5962 what to print, based on the event(s) that just occurred. */
5963
5964 /* If --batch-silent is enabled then there's no need to print the current
5965 source location, and to try risks causing an error message about
5966 missing source files. */
5967 if (stop_print_frame && !batch_silent)
5968 {
5969 int bpstat_ret;
5970 int source_flag;
5971 int do_frame_printing = 1;
5972 struct thread_info *tp = inferior_thread ();
5973
5974 bpstat_ret = bpstat_print (tp->control.stop_bpstat, last.kind);
5975 switch (bpstat_ret)
5976 {
5977 case PRINT_UNKNOWN:
5978 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5979 (or should) carry around the function and does (or
5980 should) use that when doing a frame comparison. */
5981 if (tp->control.stop_step
5982 && frame_id_eq (tp->control.step_frame_id,
5983 get_frame_id (get_current_frame ()))
5984 && step_start_function == find_pc_function (stop_pc))
5985 source_flag = SRC_LINE; /* Finished step, just
5986 print source line. */
5987 else
5988 source_flag = SRC_AND_LOC; /* Print location and
5989 source line. */
5990 break;
5991 case PRINT_SRC_AND_LOC:
5992 source_flag = SRC_AND_LOC; /* Print location and
5993 source line. */
5994 break;
5995 case PRINT_SRC_ONLY:
5996 source_flag = SRC_LINE;
5997 break;
5998 case PRINT_NOTHING:
5999 source_flag = SRC_LINE; /* something bogus */
6000 do_frame_printing = 0;
6001 break;
6002 default:
6003 internal_error (__FILE__, __LINE__, _("Unknown value."));
6004 }
6005
6006 /* The behavior of this routine with respect to the source
6007 flag is:
6008 SRC_LINE: Print only source line
6009 LOCATION: Print only location
6010 SRC_AND_LOC: Print location and source line. */
6011 if (do_frame_printing)
6012 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
6013
6014 /* Display the auto-display expressions. */
6015 do_displays ();
6016 }
6017 }
6018
6019 /* Save the function value return registers, if we care.
6020 We might be about to restore their previous contents. */
6021 if (inferior_thread ()->control.proceed_to_finish
6022 && execution_direction != EXEC_REVERSE)
6023 {
6024 /* This should not be necessary. */
6025 if (stop_registers)
6026 regcache_xfree (stop_registers);
6027
6028 /* NB: The copy goes through to the target picking up the value of
6029 all the registers. */
6030 stop_registers = regcache_dup (get_current_regcache ());
6031 }
6032
6033 if (stop_stack_dummy == STOP_STACK_DUMMY)
6034 {
6035 /* Pop the empty frame that contains the stack dummy.
6036 This also restores inferior state prior to the call
6037 (struct infcall_suspend_state). */
6038 struct frame_info *frame = get_current_frame ();
6039
6040 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6041 frame_pop (frame);
6042 /* frame_pop() calls reinit_frame_cache as the last thing it
6043 does which means there's currently no selected frame. We
6044 don't need to re-establish a selected frame if the dummy call
6045 returns normally, that will be done by
6046 restore_infcall_control_state. However, we do have to handle
6047 the case where the dummy call is returning after being
6048 stopped (e.g. the dummy call previously hit a breakpoint).
6049 We can't know which case we have so just always re-establish
6050 a selected frame here. */
6051 select_frame (get_current_frame ());
6052 }
6053
6054 done:
6055 annotate_stopped ();
6056
6057 /* Suppress the stop observer if we're in the middle of:
6058
6059 - a step n (n > 1), as there still more steps to be done.
6060
6061 - a "finish" command, as the observer will be called in
6062 finish_command_continuation, so it can include the inferior
6063 function's return value.
6064
6065 - calling an inferior function, as we pretend we inferior didn't
6066 run at all. The return value of the call is handled by the
6067 expression evaluator, through call_function_by_hand. */
6068
6069 if (!target_has_execution
6070 || last.kind == TARGET_WAITKIND_SIGNALLED
6071 || last.kind == TARGET_WAITKIND_EXITED
6072 || last.kind == TARGET_WAITKIND_NO_RESUMED
6073 || (!(inferior_thread ()->step_multi
6074 && inferior_thread ()->control.stop_step)
6075 && !(inferior_thread ()->control.stop_bpstat
6076 && inferior_thread ()->control.proceed_to_finish)
6077 && !inferior_thread ()->control.in_infcall))
6078 {
6079 if (!ptid_equal (inferior_ptid, null_ptid))
6080 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6081 stop_print_frame);
6082 else
6083 observer_notify_normal_stop (NULL, stop_print_frame);
6084 }
6085
6086 if (target_has_execution)
6087 {
6088 if (last.kind != TARGET_WAITKIND_SIGNALLED
6089 && last.kind != TARGET_WAITKIND_EXITED)
6090 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6091 Delete any breakpoint that is to be deleted at the next stop. */
6092 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6093 }
6094
6095 /* Try to get rid of automatically added inferiors that are no
6096 longer needed. Keeping those around slows down things linearly.
6097 Note that this never removes the current inferior. */
6098 prune_inferiors ();
6099 }
6100
6101 static int
6102 hook_stop_stub (void *cmd)
6103 {
6104 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6105 return (0);
6106 }
6107 \f
6108 int
6109 signal_stop_state (int signo)
6110 {
6111 return signal_stop[signo];
6112 }
6113
6114 int
6115 signal_print_state (int signo)
6116 {
6117 return signal_print[signo];
6118 }
6119
6120 int
6121 signal_pass_state (int signo)
6122 {
6123 return signal_program[signo];
6124 }
6125
6126 static void
6127 signal_cache_update (int signo)
6128 {
6129 if (signo == -1)
6130 {
6131 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6132 signal_cache_update (signo);
6133
6134 return;
6135 }
6136
6137 signal_pass[signo] = (signal_stop[signo] == 0
6138 && signal_print[signo] == 0
6139 && signal_program[signo] == 1);
6140 }
6141
6142 int
6143 signal_stop_update (int signo, int state)
6144 {
6145 int ret = signal_stop[signo];
6146
6147 signal_stop[signo] = state;
6148 signal_cache_update (signo);
6149 return ret;
6150 }
6151
6152 int
6153 signal_print_update (int signo, int state)
6154 {
6155 int ret = signal_print[signo];
6156
6157 signal_print[signo] = state;
6158 signal_cache_update (signo);
6159 return ret;
6160 }
6161
6162 int
6163 signal_pass_update (int signo, int state)
6164 {
6165 int ret = signal_program[signo];
6166
6167 signal_program[signo] = state;
6168 signal_cache_update (signo);
6169 return ret;
6170 }
6171
6172 static void
6173 sig_print_header (void)
6174 {
6175 printf_filtered (_("Signal Stop\tPrint\tPass "
6176 "to program\tDescription\n"));
6177 }
6178
6179 static void
6180 sig_print_info (enum target_signal oursig)
6181 {
6182 const char *name = target_signal_to_name (oursig);
6183 int name_padding = 13 - strlen (name);
6184
6185 if (name_padding <= 0)
6186 name_padding = 0;
6187
6188 printf_filtered ("%s", name);
6189 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6190 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6191 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6192 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6193 printf_filtered ("%s\n", target_signal_to_string (oursig));
6194 }
6195
6196 /* Specify how various signals in the inferior should be handled. */
6197
6198 static void
6199 handle_command (char *args, int from_tty)
6200 {
6201 char **argv;
6202 int digits, wordlen;
6203 int sigfirst, signum, siglast;
6204 enum target_signal oursig;
6205 int allsigs;
6206 int nsigs;
6207 unsigned char *sigs;
6208 struct cleanup *old_chain;
6209
6210 if (args == NULL)
6211 {
6212 error_no_arg (_("signal to handle"));
6213 }
6214
6215 /* Allocate and zero an array of flags for which signals to handle. */
6216
6217 nsigs = (int) TARGET_SIGNAL_LAST;
6218 sigs = (unsigned char *) alloca (nsigs);
6219 memset (sigs, 0, nsigs);
6220
6221 /* Break the command line up into args. */
6222
6223 argv = gdb_buildargv (args);
6224 old_chain = make_cleanup_freeargv (argv);
6225
6226 /* Walk through the args, looking for signal oursigs, signal names, and
6227 actions. Signal numbers and signal names may be interspersed with
6228 actions, with the actions being performed for all signals cumulatively
6229 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6230
6231 while (*argv != NULL)
6232 {
6233 wordlen = strlen (*argv);
6234 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6235 {;
6236 }
6237 allsigs = 0;
6238 sigfirst = siglast = -1;
6239
6240 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6241 {
6242 /* Apply action to all signals except those used by the
6243 debugger. Silently skip those. */
6244 allsigs = 1;
6245 sigfirst = 0;
6246 siglast = nsigs - 1;
6247 }
6248 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6249 {
6250 SET_SIGS (nsigs, sigs, signal_stop);
6251 SET_SIGS (nsigs, sigs, signal_print);
6252 }
6253 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6254 {
6255 UNSET_SIGS (nsigs, sigs, signal_program);
6256 }
6257 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6258 {
6259 SET_SIGS (nsigs, sigs, signal_print);
6260 }
6261 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6262 {
6263 SET_SIGS (nsigs, sigs, signal_program);
6264 }
6265 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6266 {
6267 UNSET_SIGS (nsigs, sigs, signal_stop);
6268 }
6269 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6270 {
6271 SET_SIGS (nsigs, sigs, signal_program);
6272 }
6273 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6274 {
6275 UNSET_SIGS (nsigs, sigs, signal_print);
6276 UNSET_SIGS (nsigs, sigs, signal_stop);
6277 }
6278 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6279 {
6280 UNSET_SIGS (nsigs, sigs, signal_program);
6281 }
6282 else if (digits > 0)
6283 {
6284 /* It is numeric. The numeric signal refers to our own
6285 internal signal numbering from target.h, not to host/target
6286 signal number. This is a feature; users really should be
6287 using symbolic names anyway, and the common ones like
6288 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6289
6290 sigfirst = siglast = (int)
6291 target_signal_from_command (atoi (*argv));
6292 if ((*argv)[digits] == '-')
6293 {
6294 siglast = (int)
6295 target_signal_from_command (atoi ((*argv) + digits + 1));
6296 }
6297 if (sigfirst > siglast)
6298 {
6299 /* Bet he didn't figure we'd think of this case... */
6300 signum = sigfirst;
6301 sigfirst = siglast;
6302 siglast = signum;
6303 }
6304 }
6305 else
6306 {
6307 oursig = target_signal_from_name (*argv);
6308 if (oursig != TARGET_SIGNAL_UNKNOWN)
6309 {
6310 sigfirst = siglast = (int) oursig;
6311 }
6312 else
6313 {
6314 /* Not a number and not a recognized flag word => complain. */
6315 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6316 }
6317 }
6318
6319 /* If any signal numbers or symbol names were found, set flags for
6320 which signals to apply actions to. */
6321
6322 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6323 {
6324 switch ((enum target_signal) signum)
6325 {
6326 case TARGET_SIGNAL_TRAP:
6327 case TARGET_SIGNAL_INT:
6328 if (!allsigs && !sigs[signum])
6329 {
6330 if (query (_("%s is used by the debugger.\n\
6331 Are you sure you want to change it? "),
6332 target_signal_to_name ((enum target_signal) signum)))
6333 {
6334 sigs[signum] = 1;
6335 }
6336 else
6337 {
6338 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6339 gdb_flush (gdb_stdout);
6340 }
6341 }
6342 break;
6343 case TARGET_SIGNAL_0:
6344 case TARGET_SIGNAL_DEFAULT:
6345 case TARGET_SIGNAL_UNKNOWN:
6346 /* Make sure that "all" doesn't print these. */
6347 break;
6348 default:
6349 sigs[signum] = 1;
6350 break;
6351 }
6352 }
6353
6354 argv++;
6355 }
6356
6357 for (signum = 0; signum < nsigs; signum++)
6358 if (sigs[signum])
6359 {
6360 signal_cache_update (-1);
6361 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6362
6363 if (from_tty)
6364 {
6365 /* Show the results. */
6366 sig_print_header ();
6367 for (; signum < nsigs; signum++)
6368 if (sigs[signum])
6369 sig_print_info (signum);
6370 }
6371
6372 break;
6373 }
6374
6375 do_cleanups (old_chain);
6376 }
6377
6378 static void
6379 xdb_handle_command (char *args, int from_tty)
6380 {
6381 char **argv;
6382 struct cleanup *old_chain;
6383
6384 if (args == NULL)
6385 error_no_arg (_("xdb command"));
6386
6387 /* Break the command line up into args. */
6388
6389 argv = gdb_buildargv (args);
6390 old_chain = make_cleanup_freeargv (argv);
6391 if (argv[1] != (char *) NULL)
6392 {
6393 char *argBuf;
6394 int bufLen;
6395
6396 bufLen = strlen (argv[0]) + 20;
6397 argBuf = (char *) xmalloc (bufLen);
6398 if (argBuf)
6399 {
6400 int validFlag = 1;
6401 enum target_signal oursig;
6402
6403 oursig = target_signal_from_name (argv[0]);
6404 memset (argBuf, 0, bufLen);
6405 if (strcmp (argv[1], "Q") == 0)
6406 sprintf (argBuf, "%s %s", argv[0], "noprint");
6407 else
6408 {
6409 if (strcmp (argv[1], "s") == 0)
6410 {
6411 if (!signal_stop[oursig])
6412 sprintf (argBuf, "%s %s", argv[0], "stop");
6413 else
6414 sprintf (argBuf, "%s %s", argv[0], "nostop");
6415 }
6416 else if (strcmp (argv[1], "i") == 0)
6417 {
6418 if (!signal_program[oursig])
6419 sprintf (argBuf, "%s %s", argv[0], "pass");
6420 else
6421 sprintf (argBuf, "%s %s", argv[0], "nopass");
6422 }
6423 else if (strcmp (argv[1], "r") == 0)
6424 {
6425 if (!signal_print[oursig])
6426 sprintf (argBuf, "%s %s", argv[0], "print");
6427 else
6428 sprintf (argBuf, "%s %s", argv[0], "noprint");
6429 }
6430 else
6431 validFlag = 0;
6432 }
6433 if (validFlag)
6434 handle_command (argBuf, from_tty);
6435 else
6436 printf_filtered (_("Invalid signal handling flag.\n"));
6437 if (argBuf)
6438 xfree (argBuf);
6439 }
6440 }
6441 do_cleanups (old_chain);
6442 }
6443
6444 /* Print current contents of the tables set by the handle command.
6445 It is possible we should just be printing signals actually used
6446 by the current target (but for things to work right when switching
6447 targets, all signals should be in the signal tables). */
6448
6449 static void
6450 signals_info (char *signum_exp, int from_tty)
6451 {
6452 enum target_signal oursig;
6453
6454 sig_print_header ();
6455
6456 if (signum_exp)
6457 {
6458 /* First see if this is a symbol name. */
6459 oursig = target_signal_from_name (signum_exp);
6460 if (oursig == TARGET_SIGNAL_UNKNOWN)
6461 {
6462 /* No, try numeric. */
6463 oursig =
6464 target_signal_from_command (parse_and_eval_long (signum_exp));
6465 }
6466 sig_print_info (oursig);
6467 return;
6468 }
6469
6470 printf_filtered ("\n");
6471 /* These ugly casts brought to you by the native VAX compiler. */
6472 for (oursig = TARGET_SIGNAL_FIRST;
6473 (int) oursig < (int) TARGET_SIGNAL_LAST;
6474 oursig = (enum target_signal) ((int) oursig + 1))
6475 {
6476 QUIT;
6477
6478 if (oursig != TARGET_SIGNAL_UNKNOWN
6479 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6480 sig_print_info (oursig);
6481 }
6482
6483 printf_filtered (_("\nUse the \"handle\" command "
6484 "to change these tables.\n"));
6485 }
6486
6487 /* Check if it makes sense to read $_siginfo from the current thread
6488 at this point. If not, throw an error. */
6489
6490 static void
6491 validate_siginfo_access (void)
6492 {
6493 /* No current inferior, no siginfo. */
6494 if (ptid_equal (inferior_ptid, null_ptid))
6495 error (_("No thread selected."));
6496
6497 /* Don't try to read from a dead thread. */
6498 if (is_exited (inferior_ptid))
6499 error (_("The current thread has terminated"));
6500
6501 /* ... or from a spinning thread. */
6502 if (is_running (inferior_ptid))
6503 error (_("Selected thread is running."));
6504 }
6505
6506 /* The $_siginfo convenience variable is a bit special. We don't know
6507 for sure the type of the value until we actually have a chance to
6508 fetch the data. The type can change depending on gdbarch, so it is
6509 also dependent on which thread you have selected.
6510
6511 1. making $_siginfo be an internalvar that creates a new value on
6512 access.
6513
6514 2. making the value of $_siginfo be an lval_computed value. */
6515
6516 /* This function implements the lval_computed support for reading a
6517 $_siginfo value. */
6518
6519 static void
6520 siginfo_value_read (struct value *v)
6521 {
6522 LONGEST transferred;
6523
6524 validate_siginfo_access ();
6525
6526 transferred =
6527 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6528 NULL,
6529 value_contents_all_raw (v),
6530 value_offset (v),
6531 TYPE_LENGTH (value_type (v)));
6532
6533 if (transferred != TYPE_LENGTH (value_type (v)))
6534 error (_("Unable to read siginfo"));
6535 }
6536
6537 /* This function implements the lval_computed support for writing a
6538 $_siginfo value. */
6539
6540 static void
6541 siginfo_value_write (struct value *v, struct value *fromval)
6542 {
6543 LONGEST transferred;
6544
6545 validate_siginfo_access ();
6546
6547 transferred = target_write (&current_target,
6548 TARGET_OBJECT_SIGNAL_INFO,
6549 NULL,
6550 value_contents_all_raw (fromval),
6551 value_offset (v),
6552 TYPE_LENGTH (value_type (fromval)));
6553
6554 if (transferred != TYPE_LENGTH (value_type (fromval)))
6555 error (_("Unable to write siginfo"));
6556 }
6557
6558 static const struct lval_funcs siginfo_value_funcs =
6559 {
6560 siginfo_value_read,
6561 siginfo_value_write
6562 };
6563
6564 /* Return a new value with the correct type for the siginfo object of
6565 the current thread using architecture GDBARCH. Return a void value
6566 if there's no object available. */
6567
6568 static struct value *
6569 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6570 {
6571 if (target_has_stack
6572 && !ptid_equal (inferior_ptid, null_ptid)
6573 && gdbarch_get_siginfo_type_p (gdbarch))
6574 {
6575 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6576
6577 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6578 }
6579
6580 return allocate_value (builtin_type (gdbarch)->builtin_void);
6581 }
6582
6583 \f
6584 /* infcall_suspend_state contains state about the program itself like its
6585 registers and any signal it received when it last stopped.
6586 This state must be restored regardless of how the inferior function call
6587 ends (either successfully, or after it hits a breakpoint or signal)
6588 if the program is to properly continue where it left off. */
6589
6590 struct infcall_suspend_state
6591 {
6592 struct thread_suspend_state thread_suspend;
6593 struct inferior_suspend_state inferior_suspend;
6594
6595 /* Other fields: */
6596 CORE_ADDR stop_pc;
6597 struct regcache *registers;
6598
6599 /* Format of SIGINFO_DATA or NULL if it is not present. */
6600 struct gdbarch *siginfo_gdbarch;
6601
6602 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6603 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6604 content would be invalid. */
6605 gdb_byte *siginfo_data;
6606 };
6607
6608 struct infcall_suspend_state *
6609 save_infcall_suspend_state (void)
6610 {
6611 struct infcall_suspend_state *inf_state;
6612 struct thread_info *tp = inferior_thread ();
6613 struct inferior *inf = current_inferior ();
6614 struct regcache *regcache = get_current_regcache ();
6615 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6616 gdb_byte *siginfo_data = NULL;
6617
6618 if (gdbarch_get_siginfo_type_p (gdbarch))
6619 {
6620 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6621 size_t len = TYPE_LENGTH (type);
6622 struct cleanup *back_to;
6623
6624 siginfo_data = xmalloc (len);
6625 back_to = make_cleanup (xfree, siginfo_data);
6626
6627 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6628 siginfo_data, 0, len) == len)
6629 discard_cleanups (back_to);
6630 else
6631 {
6632 /* Errors ignored. */
6633 do_cleanups (back_to);
6634 siginfo_data = NULL;
6635 }
6636 }
6637
6638 inf_state = XZALLOC (struct infcall_suspend_state);
6639
6640 if (siginfo_data)
6641 {
6642 inf_state->siginfo_gdbarch = gdbarch;
6643 inf_state->siginfo_data = siginfo_data;
6644 }
6645
6646 inf_state->thread_suspend = tp->suspend;
6647 inf_state->inferior_suspend = inf->suspend;
6648
6649 /* run_inferior_call will not use the signal due to its `proceed' call with
6650 TARGET_SIGNAL_0 anyway. */
6651 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6652
6653 inf_state->stop_pc = stop_pc;
6654
6655 inf_state->registers = regcache_dup (regcache);
6656
6657 return inf_state;
6658 }
6659
6660 /* Restore inferior session state to INF_STATE. */
6661
6662 void
6663 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6664 {
6665 struct thread_info *tp = inferior_thread ();
6666 struct inferior *inf = current_inferior ();
6667 struct regcache *regcache = get_current_regcache ();
6668 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6669
6670 tp->suspend = inf_state->thread_suspend;
6671 inf->suspend = inf_state->inferior_suspend;
6672
6673 stop_pc = inf_state->stop_pc;
6674
6675 if (inf_state->siginfo_gdbarch == gdbarch)
6676 {
6677 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6678 size_t len = TYPE_LENGTH (type);
6679
6680 /* Errors ignored. */
6681 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6682 inf_state->siginfo_data, 0, len);
6683 }
6684
6685 /* The inferior can be gone if the user types "print exit(0)"
6686 (and perhaps other times). */
6687 if (target_has_execution)
6688 /* NB: The register write goes through to the target. */
6689 regcache_cpy (regcache, inf_state->registers);
6690
6691 discard_infcall_suspend_state (inf_state);
6692 }
6693
6694 static void
6695 do_restore_infcall_suspend_state_cleanup (void *state)
6696 {
6697 restore_infcall_suspend_state (state);
6698 }
6699
6700 struct cleanup *
6701 make_cleanup_restore_infcall_suspend_state
6702 (struct infcall_suspend_state *inf_state)
6703 {
6704 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6705 }
6706
6707 void
6708 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6709 {
6710 regcache_xfree (inf_state->registers);
6711 xfree (inf_state->siginfo_data);
6712 xfree (inf_state);
6713 }
6714
6715 struct regcache *
6716 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6717 {
6718 return inf_state->registers;
6719 }
6720
6721 /* infcall_control_state contains state regarding gdb's control of the
6722 inferior itself like stepping control. It also contains session state like
6723 the user's currently selected frame. */
6724
6725 struct infcall_control_state
6726 {
6727 struct thread_control_state thread_control;
6728 struct inferior_control_state inferior_control;
6729
6730 /* Other fields: */
6731 enum stop_stack_kind stop_stack_dummy;
6732 int stopped_by_random_signal;
6733 int stop_after_trap;
6734
6735 /* ID if the selected frame when the inferior function call was made. */
6736 struct frame_id selected_frame_id;
6737 };
6738
6739 /* Save all of the information associated with the inferior<==>gdb
6740 connection. */
6741
6742 struct infcall_control_state *
6743 save_infcall_control_state (void)
6744 {
6745 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6746 struct thread_info *tp = inferior_thread ();
6747 struct inferior *inf = current_inferior ();
6748
6749 inf_status->thread_control = tp->control;
6750 inf_status->inferior_control = inf->control;
6751
6752 tp->control.step_resume_breakpoint = NULL;
6753 tp->control.exception_resume_breakpoint = NULL;
6754
6755 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6756 chain. If caller's caller is walking the chain, they'll be happier if we
6757 hand them back the original chain when restore_infcall_control_state is
6758 called. */
6759 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6760
6761 /* Other fields: */
6762 inf_status->stop_stack_dummy = stop_stack_dummy;
6763 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6764 inf_status->stop_after_trap = stop_after_trap;
6765
6766 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6767
6768 return inf_status;
6769 }
6770
6771 static int
6772 restore_selected_frame (void *args)
6773 {
6774 struct frame_id *fid = (struct frame_id *) args;
6775 struct frame_info *frame;
6776
6777 frame = frame_find_by_id (*fid);
6778
6779 /* If inf_status->selected_frame_id is NULL, there was no previously
6780 selected frame. */
6781 if (frame == NULL)
6782 {
6783 warning (_("Unable to restore previously selected frame."));
6784 return 0;
6785 }
6786
6787 select_frame (frame);
6788
6789 return (1);
6790 }
6791
6792 /* Restore inferior session state to INF_STATUS. */
6793
6794 void
6795 restore_infcall_control_state (struct infcall_control_state *inf_status)
6796 {
6797 struct thread_info *tp = inferior_thread ();
6798 struct inferior *inf = current_inferior ();
6799
6800 if (tp->control.step_resume_breakpoint)
6801 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6802
6803 if (tp->control.exception_resume_breakpoint)
6804 tp->control.exception_resume_breakpoint->disposition
6805 = disp_del_at_next_stop;
6806
6807 /* Handle the bpstat_copy of the chain. */
6808 bpstat_clear (&tp->control.stop_bpstat);
6809
6810 tp->control = inf_status->thread_control;
6811 inf->control = inf_status->inferior_control;
6812
6813 /* Other fields: */
6814 stop_stack_dummy = inf_status->stop_stack_dummy;
6815 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6816 stop_after_trap = inf_status->stop_after_trap;
6817
6818 if (target_has_stack)
6819 {
6820 /* The point of catch_errors is that if the stack is clobbered,
6821 walking the stack might encounter a garbage pointer and
6822 error() trying to dereference it. */
6823 if (catch_errors
6824 (restore_selected_frame, &inf_status->selected_frame_id,
6825 "Unable to restore previously selected frame:\n",
6826 RETURN_MASK_ERROR) == 0)
6827 /* Error in restoring the selected frame. Select the innermost
6828 frame. */
6829 select_frame (get_current_frame ());
6830 }
6831
6832 xfree (inf_status);
6833 }
6834
6835 static void
6836 do_restore_infcall_control_state_cleanup (void *sts)
6837 {
6838 restore_infcall_control_state (sts);
6839 }
6840
6841 struct cleanup *
6842 make_cleanup_restore_infcall_control_state
6843 (struct infcall_control_state *inf_status)
6844 {
6845 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6846 }
6847
6848 void
6849 discard_infcall_control_state (struct infcall_control_state *inf_status)
6850 {
6851 if (inf_status->thread_control.step_resume_breakpoint)
6852 inf_status->thread_control.step_resume_breakpoint->disposition
6853 = disp_del_at_next_stop;
6854
6855 if (inf_status->thread_control.exception_resume_breakpoint)
6856 inf_status->thread_control.exception_resume_breakpoint->disposition
6857 = disp_del_at_next_stop;
6858
6859 /* See save_infcall_control_state for info on stop_bpstat. */
6860 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6861
6862 xfree (inf_status);
6863 }
6864 \f
6865 int
6866 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6867 {
6868 struct target_waitstatus last;
6869 ptid_t last_ptid;
6870
6871 get_last_target_status (&last_ptid, &last);
6872
6873 if (last.kind != TARGET_WAITKIND_FORKED)
6874 return 0;
6875
6876 if (!ptid_equal (last_ptid, pid))
6877 return 0;
6878
6879 *child_pid = last.value.related_pid;
6880 return 1;
6881 }
6882
6883 int
6884 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6885 {
6886 struct target_waitstatus last;
6887 ptid_t last_ptid;
6888
6889 get_last_target_status (&last_ptid, &last);
6890
6891 if (last.kind != TARGET_WAITKIND_VFORKED)
6892 return 0;
6893
6894 if (!ptid_equal (last_ptid, pid))
6895 return 0;
6896
6897 *child_pid = last.value.related_pid;
6898 return 1;
6899 }
6900
6901 int
6902 inferior_has_execd (ptid_t pid, char **execd_pathname)
6903 {
6904 struct target_waitstatus last;
6905 ptid_t last_ptid;
6906
6907 get_last_target_status (&last_ptid, &last);
6908
6909 if (last.kind != TARGET_WAITKIND_EXECD)
6910 return 0;
6911
6912 if (!ptid_equal (last_ptid, pid))
6913 return 0;
6914
6915 *execd_pathname = xstrdup (last.value.execd_pathname);
6916 return 1;
6917 }
6918
6919 int
6920 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6921 {
6922 struct target_waitstatus last;
6923 ptid_t last_ptid;
6924
6925 get_last_target_status (&last_ptid, &last);
6926
6927 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6928 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6929 return 0;
6930
6931 if (!ptid_equal (last_ptid, pid))
6932 return 0;
6933
6934 *syscall_number = last.value.syscall_number;
6935 return 1;
6936 }
6937
6938 int
6939 ptid_match (ptid_t ptid, ptid_t filter)
6940 {
6941 if (ptid_equal (filter, minus_one_ptid))
6942 return 1;
6943 if (ptid_is_pid (filter)
6944 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6945 return 1;
6946 else if (ptid_equal (ptid, filter))
6947 return 1;
6948
6949 return 0;
6950 }
6951
6952 /* restore_inferior_ptid() will be used by the cleanup machinery
6953 to restore the inferior_ptid value saved in a call to
6954 save_inferior_ptid(). */
6955
6956 static void
6957 restore_inferior_ptid (void *arg)
6958 {
6959 ptid_t *saved_ptid_ptr = arg;
6960
6961 inferior_ptid = *saved_ptid_ptr;
6962 xfree (arg);
6963 }
6964
6965 /* Save the value of inferior_ptid so that it may be restored by a
6966 later call to do_cleanups(). Returns the struct cleanup pointer
6967 needed for later doing the cleanup. */
6968
6969 struct cleanup *
6970 save_inferior_ptid (void)
6971 {
6972 ptid_t *saved_ptid_ptr;
6973
6974 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6975 *saved_ptid_ptr = inferior_ptid;
6976 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6977 }
6978 \f
6979
6980 /* User interface for reverse debugging:
6981 Set exec-direction / show exec-direction commands
6982 (returns error unless target implements to_set_exec_direction method). */
6983
6984 int execution_direction = EXEC_FORWARD;
6985 static const char exec_forward[] = "forward";
6986 static const char exec_reverse[] = "reverse";
6987 static const char *exec_direction = exec_forward;
6988 static const char *exec_direction_names[] = {
6989 exec_forward,
6990 exec_reverse,
6991 NULL
6992 };
6993
6994 static void
6995 set_exec_direction_func (char *args, int from_tty,
6996 struct cmd_list_element *cmd)
6997 {
6998 if (target_can_execute_reverse)
6999 {
7000 if (!strcmp (exec_direction, exec_forward))
7001 execution_direction = EXEC_FORWARD;
7002 else if (!strcmp (exec_direction, exec_reverse))
7003 execution_direction = EXEC_REVERSE;
7004 }
7005 else
7006 {
7007 exec_direction = exec_forward;
7008 error (_("Target does not support this operation."));
7009 }
7010 }
7011
7012 static void
7013 show_exec_direction_func (struct ui_file *out, int from_tty,
7014 struct cmd_list_element *cmd, const char *value)
7015 {
7016 switch (execution_direction) {
7017 case EXEC_FORWARD:
7018 fprintf_filtered (out, _("Forward.\n"));
7019 break;
7020 case EXEC_REVERSE:
7021 fprintf_filtered (out, _("Reverse.\n"));
7022 break;
7023 default:
7024 internal_error (__FILE__, __LINE__,
7025 _("bogus execution_direction value: %d"),
7026 (int) execution_direction);
7027 }
7028 }
7029
7030 /* User interface for non-stop mode. */
7031
7032 int non_stop = 0;
7033
7034 static void
7035 set_non_stop (char *args, int from_tty,
7036 struct cmd_list_element *c)
7037 {
7038 if (target_has_execution)
7039 {
7040 non_stop_1 = non_stop;
7041 error (_("Cannot change this setting while the inferior is running."));
7042 }
7043
7044 non_stop = non_stop_1;
7045 }
7046
7047 static void
7048 show_non_stop (struct ui_file *file, int from_tty,
7049 struct cmd_list_element *c, const char *value)
7050 {
7051 fprintf_filtered (file,
7052 _("Controlling the inferior in non-stop mode is %s.\n"),
7053 value);
7054 }
7055
7056 static void
7057 show_schedule_multiple (struct ui_file *file, int from_tty,
7058 struct cmd_list_element *c, const char *value)
7059 {
7060 fprintf_filtered (file, _("Resuming the execution of threads "
7061 "of all processes is %s.\n"), value);
7062 }
7063
7064 void
7065 _initialize_infrun (void)
7066 {
7067 int i;
7068 int numsigs;
7069
7070 add_info ("signals", signals_info, _("\
7071 What debugger does when program gets various signals.\n\
7072 Specify a signal as argument to print info on that signal only."));
7073 add_info_alias ("handle", "signals", 0);
7074
7075 add_com ("handle", class_run, handle_command, _("\
7076 Specify how to handle a signal.\n\
7077 Args are signals and actions to apply to those signals.\n\
7078 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7079 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7080 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7081 The special arg \"all\" is recognized to mean all signals except those\n\
7082 used by the debugger, typically SIGTRAP and SIGINT.\n\
7083 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7084 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7085 Stop means reenter debugger if this signal happens (implies print).\n\
7086 Print means print a message if this signal happens.\n\
7087 Pass means let program see this signal; otherwise program doesn't know.\n\
7088 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7089 Pass and Stop may be combined."));
7090 if (xdb_commands)
7091 {
7092 add_com ("lz", class_info, signals_info, _("\
7093 What debugger does when program gets various signals.\n\
7094 Specify a signal as argument to print info on that signal only."));
7095 add_com ("z", class_run, xdb_handle_command, _("\
7096 Specify how to handle a signal.\n\
7097 Args are signals and actions to apply to those signals.\n\
7098 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7099 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7100 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7101 The special arg \"all\" is recognized to mean all signals except those\n\
7102 used by the debugger, typically SIGTRAP and SIGINT.\n\
7103 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7104 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7105 nopass), \"Q\" (noprint)\n\
7106 Stop means reenter debugger if this signal happens (implies print).\n\
7107 Print means print a message if this signal happens.\n\
7108 Pass means let program see this signal; otherwise program doesn't know.\n\
7109 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7110 Pass and Stop may be combined."));
7111 }
7112
7113 if (!dbx_commands)
7114 stop_command = add_cmd ("stop", class_obscure,
7115 not_just_help_class_command, _("\
7116 There is no `stop' command, but you can set a hook on `stop'.\n\
7117 This allows you to set a list of commands to be run each time execution\n\
7118 of the program stops."), &cmdlist);
7119
7120 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7121 Set inferior debugging."), _("\
7122 Show inferior debugging."), _("\
7123 When non-zero, inferior specific debugging is enabled."),
7124 NULL,
7125 show_debug_infrun,
7126 &setdebuglist, &showdebuglist);
7127
7128 add_setshow_boolean_cmd ("displaced", class_maintenance,
7129 &debug_displaced, _("\
7130 Set displaced stepping debugging."), _("\
7131 Show displaced stepping debugging."), _("\
7132 When non-zero, displaced stepping specific debugging is enabled."),
7133 NULL,
7134 show_debug_displaced,
7135 &setdebuglist, &showdebuglist);
7136
7137 add_setshow_boolean_cmd ("non-stop", no_class,
7138 &non_stop_1, _("\
7139 Set whether gdb controls the inferior in non-stop mode."), _("\
7140 Show whether gdb controls the inferior in non-stop mode."), _("\
7141 When debugging a multi-threaded program and this setting is\n\
7142 off (the default, also called all-stop mode), when one thread stops\n\
7143 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7144 all other threads in the program while you interact with the thread of\n\
7145 interest. When you continue or step a thread, you can allow the other\n\
7146 threads to run, or have them remain stopped, but while you inspect any\n\
7147 thread's state, all threads stop.\n\
7148 \n\
7149 In non-stop mode, when one thread stops, other threads can continue\n\
7150 to run freely. You'll be able to step each thread independently,\n\
7151 leave it stopped or free to run as needed."),
7152 set_non_stop,
7153 show_non_stop,
7154 &setlist,
7155 &showlist);
7156
7157 numsigs = (int) TARGET_SIGNAL_LAST;
7158 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7159 signal_print = (unsigned char *)
7160 xmalloc (sizeof (signal_print[0]) * numsigs);
7161 signal_program = (unsigned char *)
7162 xmalloc (sizeof (signal_program[0]) * numsigs);
7163 signal_pass = (unsigned char *)
7164 xmalloc (sizeof (signal_program[0]) * numsigs);
7165 for (i = 0; i < numsigs; i++)
7166 {
7167 signal_stop[i] = 1;
7168 signal_print[i] = 1;
7169 signal_program[i] = 1;
7170 }
7171
7172 /* Signals caused by debugger's own actions
7173 should not be given to the program afterwards. */
7174 signal_program[TARGET_SIGNAL_TRAP] = 0;
7175 signal_program[TARGET_SIGNAL_INT] = 0;
7176
7177 /* Signals that are not errors should not normally enter the debugger. */
7178 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7179 signal_print[TARGET_SIGNAL_ALRM] = 0;
7180 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7181 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7182 signal_stop[TARGET_SIGNAL_PROF] = 0;
7183 signal_print[TARGET_SIGNAL_PROF] = 0;
7184 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7185 signal_print[TARGET_SIGNAL_CHLD] = 0;
7186 signal_stop[TARGET_SIGNAL_IO] = 0;
7187 signal_print[TARGET_SIGNAL_IO] = 0;
7188 signal_stop[TARGET_SIGNAL_POLL] = 0;
7189 signal_print[TARGET_SIGNAL_POLL] = 0;
7190 signal_stop[TARGET_SIGNAL_URG] = 0;
7191 signal_print[TARGET_SIGNAL_URG] = 0;
7192 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7193 signal_print[TARGET_SIGNAL_WINCH] = 0;
7194 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7195 signal_print[TARGET_SIGNAL_PRIO] = 0;
7196
7197 /* These signals are used internally by user-level thread
7198 implementations. (See signal(5) on Solaris.) Like the above
7199 signals, a healthy program receives and handles them as part of
7200 its normal operation. */
7201 signal_stop[TARGET_SIGNAL_LWP] = 0;
7202 signal_print[TARGET_SIGNAL_LWP] = 0;
7203 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7204 signal_print[TARGET_SIGNAL_WAITING] = 0;
7205 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7206 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7207
7208 /* Update cached state. */
7209 signal_cache_update (-1);
7210
7211 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7212 &stop_on_solib_events, _("\
7213 Set stopping for shared library events."), _("\
7214 Show stopping for shared library events."), _("\
7215 If nonzero, gdb will give control to the user when the dynamic linker\n\
7216 notifies gdb of shared library events. The most common event of interest\n\
7217 to the user would be loading/unloading of a new library."),
7218 NULL,
7219 show_stop_on_solib_events,
7220 &setlist, &showlist);
7221
7222 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7223 follow_fork_mode_kind_names,
7224 &follow_fork_mode_string, _("\
7225 Set debugger response to a program call of fork or vfork."), _("\
7226 Show debugger response to a program call of fork or vfork."), _("\
7227 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7228 parent - the original process is debugged after a fork\n\
7229 child - the new process is debugged after a fork\n\
7230 The unfollowed process will continue to run.\n\
7231 By default, the debugger will follow the parent process."),
7232 NULL,
7233 show_follow_fork_mode_string,
7234 &setlist, &showlist);
7235
7236 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7237 follow_exec_mode_names,
7238 &follow_exec_mode_string, _("\
7239 Set debugger response to a program call of exec."), _("\
7240 Show debugger response to a program call of exec."), _("\
7241 An exec call replaces the program image of a process.\n\
7242 \n\
7243 follow-exec-mode can be:\n\
7244 \n\
7245 new - the debugger creates a new inferior and rebinds the process\n\
7246 to this new inferior. The program the process was running before\n\
7247 the exec call can be restarted afterwards by restarting the original\n\
7248 inferior.\n\
7249 \n\
7250 same - the debugger keeps the process bound to the same inferior.\n\
7251 The new executable image replaces the previous executable loaded in\n\
7252 the inferior. Restarting the inferior after the exec call restarts\n\
7253 the executable the process was running after the exec call.\n\
7254 \n\
7255 By default, the debugger will use the same inferior."),
7256 NULL,
7257 show_follow_exec_mode_string,
7258 &setlist, &showlist);
7259
7260 add_setshow_enum_cmd ("scheduler-locking", class_run,
7261 scheduler_enums, &scheduler_mode, _("\
7262 Set mode for locking scheduler during execution."), _("\
7263 Show mode for locking scheduler during execution."), _("\
7264 off == no locking (threads may preempt at any time)\n\
7265 on == full locking (no thread except the current thread may run)\n\
7266 step == scheduler locked during every single-step operation.\n\
7267 In this mode, no other thread may run during a step command.\n\
7268 Other threads may run while stepping over a function call ('next')."),
7269 set_schedlock_func, /* traps on target vector */
7270 show_scheduler_mode,
7271 &setlist, &showlist);
7272
7273 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7274 Set mode for resuming threads of all processes."), _("\
7275 Show mode for resuming threads of all processes."), _("\
7276 When on, execution commands (such as 'continue' or 'next') resume all\n\
7277 threads of all processes. When off (which is the default), execution\n\
7278 commands only resume the threads of the current process. The set of\n\
7279 threads that are resumed is further refined by the scheduler-locking\n\
7280 mode (see help set scheduler-locking)."),
7281 NULL,
7282 show_schedule_multiple,
7283 &setlist, &showlist);
7284
7285 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7286 Set mode of the step operation."), _("\
7287 Show mode of the step operation."), _("\
7288 When set, doing a step over a function without debug line information\n\
7289 will stop at the first instruction of that function. Otherwise, the\n\
7290 function is skipped and the step command stops at a different source line."),
7291 NULL,
7292 show_step_stop_if_no_debug,
7293 &setlist, &showlist);
7294
7295 add_setshow_enum_cmd ("displaced-stepping", class_run,
7296 can_use_displaced_stepping_enum,
7297 &can_use_displaced_stepping, _("\
7298 Set debugger's willingness to use displaced stepping."), _("\
7299 Show debugger's willingness to use displaced stepping."), _("\
7300 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7301 supported by the target architecture. If off, gdb will not use displaced\n\
7302 stepping to step over breakpoints, even if such is supported by the target\n\
7303 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7304 if the target architecture supports it and non-stop mode is active, but will not\n\
7305 use it in all-stop mode (see help set non-stop)."),
7306 NULL,
7307 show_can_use_displaced_stepping,
7308 &setlist, &showlist);
7309
7310 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7311 &exec_direction, _("Set direction of execution.\n\
7312 Options are 'forward' or 'reverse'."),
7313 _("Show direction of execution (forward/reverse)."),
7314 _("Tells gdb whether to execute forward or backward."),
7315 set_exec_direction_func, show_exec_direction_func,
7316 &setlist, &showlist);
7317
7318 /* Set/show detach-on-fork: user-settable mode. */
7319
7320 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7321 Set whether gdb will detach the child of a fork."), _("\
7322 Show whether gdb will detach the child of a fork."), _("\
7323 Tells gdb whether to detach the child of a fork."),
7324 NULL, NULL, &setlist, &showlist);
7325
7326 /* Set/show disable address space randomization mode. */
7327
7328 add_setshow_boolean_cmd ("disable-randomization", class_support,
7329 &disable_randomization, _("\
7330 Set disabling of debuggee's virtual address space randomization."), _("\
7331 Show disabling of debuggee's virtual address space randomization."), _("\
7332 When this mode is on (which is the default), randomization of the virtual\n\
7333 address space is disabled. Standalone programs run with the randomization\n\
7334 enabled by default on some platforms."),
7335 &set_disable_randomization,
7336 &show_disable_randomization,
7337 &setlist, &showlist);
7338
7339 /* ptid initializations */
7340 inferior_ptid = null_ptid;
7341 target_last_wait_ptid = minus_one_ptid;
7342
7343 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7344 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7345 observer_attach_thread_exit (infrun_thread_thread_exit);
7346 observer_attach_inferior_exit (infrun_inferior_exit);
7347
7348 /* Explicitly create without lookup, since that tries to create a
7349 value with a void typed value, and when we get here, gdbarch
7350 isn't initialized yet. At this point, we're quite sure there
7351 isn't another convenience variable of the same name. */
7352 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7353
7354 add_setshow_boolean_cmd ("observer", no_class,
7355 &observer_mode_1, _("\
7356 Set whether gdb controls the inferior in observer mode."), _("\
7357 Show whether gdb controls the inferior in observer mode."), _("\
7358 In observer mode, GDB can get data from the inferior, but not\n\
7359 affect its execution. Registers and memory may not be changed,\n\
7360 breakpoints may not be set, and the program cannot be interrupted\n\
7361 or signalled."),
7362 set_observer_mode,
7363 show_observer_mode,
7364 &setlist,
7365 &showlist);
7366 }