1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001-2022 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdbsupport/gdb_wait.h"
28 #include <sys/syscall.h>
29 #include "nat/gdb_ptrace.h"
30 #include "linux-nat.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include <sys/stat.h> /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
50 #include "gdbsupport/event-loop.h"
51 #include "event-top.h"
53 #include <sys/types.h>
55 #include "xml-support.h"
58 #include "nat/linux-osdata.h"
59 #include "linux-tdep.h"
61 #include "gdbsupport/agent.h"
62 #include "tracepoint.h"
63 #include "gdbsupport/buffer.h"
64 #include "target-descriptions.h"
65 #include "gdbsupport/filestuff.h"
67 #include "nat/linux-namespaces.h"
68 #include "gdbsupport/fileio.h"
69 #include "gdbsupport/scope-exit.h"
70 #include "gdbsupport/gdb-sigmask.h"
71 #include "gdbsupport/common-debug.h"
72 #include <unordered_map>
74 /* This comment documents high-level logic of this file.
76 Waiting for events in sync mode
77 ===============================
79 When waiting for an event in a specific thread, we just use waitpid,
80 passing the specific pid, and not passing WNOHANG.
82 When waiting for an event in all threads, waitpid is not quite good:
84 - If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
89 - When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
93 The solution is to always use -1 and WNOHANG, together with
96 First, we use non-blocking waitpid to check for events. If nothing is
97 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98 it means something happened to a child process. As soon as we know
99 there's an event, we get back to calling nonblocking waitpid.
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend
102 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103 when it's blocked, the signal becomes pending and sigsuspend
104 immediately notices it and returns.
106 Waiting for events in async mode (TARGET_WNOHANG)
107 =================================================
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, the self-pipe trick is used
115 --- a pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler writes a
118 byte to this pipe. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
171 The case of a thread group (process) with 3 or more threads, and a
172 thread other than the leader execs is worth detailing:
174 On an exec, the Linux kernel destroys all threads except the execing
175 one in the thread group, and resets the execing thread's tid to the
176 tgid. No exit notification is sent for the execing thread -- from the
177 ptracer's perspective, it appears as though the execing thread just
178 vanishes. Until we reap all other threads except the leader and the
179 execing thread, the leader will be zombie, and the execing thread will
180 be in `D (disc sleep)' state. As soon as all other threads are
181 reaped, the execing thread changes its tid to the tgid, and the
182 previous (zombie) leader vanishes, giving place to the "new"
186 #define O_LARGEFILE 0
189 struct linux_nat_target
*linux_target
;
191 /* Does the current host support PTRACE_GETREGSET? */
192 enum tribool have_ptrace_getregset
= TRIBOOL_UNKNOWN
;
194 /* When true, print debug messages relating to the linux native target. */
196 static bool debug_linux_nat
;
198 /* Implement 'show debug lin-lwp'. */
201 show_debug_linux_nat (struct ui_file
*file
, int from_tty
,
202 struct cmd_list_element
*c
, const char *value
)
204 fprintf_filtered (file
, _("Debugging of GNU/Linux lwp module is %s.\n"),
208 /* Print a linux-nat debug statement. */
210 #define linux_nat_debug_printf(fmt, ...) \
211 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
213 /* Print "linux-nat" enter/exit debug statements. */
215 #define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
216 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
218 struct simple_pid_list
222 struct simple_pid_list
*next
;
224 static struct simple_pid_list
*stopped_pids
;
226 /* Whether target_thread_events is in effect. */
227 static int report_thread_events
;
229 /* Async mode support. */
231 /* The read/write ends of the pipe registered as waitable file in the
233 static int linux_nat_event_pipe
[2] = { -1, -1 };
235 /* True if we're currently in async mode. */
236 #define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
238 /* Flush the event pipe. */
241 async_file_flush (void)
248 ret
= read (linux_nat_event_pipe
[0], &buf
, 1);
250 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
253 /* Put something (anything, doesn't matter what, or how much) in event
254 pipe, so that the select/poll in the event-loop realizes we have
255 something to process. */
258 async_file_mark (void)
262 /* It doesn't really matter what the pipe contains, as long we end
263 up with something in it. Might as well flush the previous
269 ret
= write (linux_nat_event_pipe
[1], "+", 1);
271 while (ret
== -1 && errno
== EINTR
);
273 /* Ignore EAGAIN. If the pipe is full, the event loop will already
274 be awakened anyway. */
277 static int kill_lwp (int lwpid
, int signo
);
279 static int stop_callback (struct lwp_info
*lp
);
281 static void block_child_signals (sigset_t
*prev_mask
);
282 static void restore_child_signals_mask (sigset_t
*prev_mask
);
285 static struct lwp_info
*add_lwp (ptid_t ptid
);
286 static void purge_lwp_list (int pid
);
287 static void delete_lwp (ptid_t ptid
);
288 static struct lwp_info
*find_lwp_pid (ptid_t ptid
);
290 static int lwp_status_pending_p (struct lwp_info
*lp
);
292 static void save_stop_reason (struct lwp_info
*lp
);
294 static void close_proc_mem_file (pid_t pid
);
295 static void open_proc_mem_file (ptid_t ptid
);
300 /* See nat/linux-nat.h. */
303 ptid_of_lwp (struct lwp_info
*lwp
)
308 /* See nat/linux-nat.h. */
311 lwp_set_arch_private_info (struct lwp_info
*lwp
,
312 struct arch_lwp_info
*info
)
314 lwp
->arch_private
= info
;
317 /* See nat/linux-nat.h. */
319 struct arch_lwp_info
*
320 lwp_arch_private_info (struct lwp_info
*lwp
)
322 return lwp
->arch_private
;
325 /* See nat/linux-nat.h. */
328 lwp_is_stopped (struct lwp_info
*lwp
)
333 /* See nat/linux-nat.h. */
335 enum target_stop_reason
336 lwp_stop_reason (struct lwp_info
*lwp
)
338 return lwp
->stop_reason
;
341 /* See nat/linux-nat.h. */
344 lwp_is_stepping (struct lwp_info
*lwp
)
350 /* Trivial list manipulation functions to keep track of a list of
351 new stopped processes. */
353 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
355 struct simple_pid_list
*new_pid
= XNEW (struct simple_pid_list
);
358 new_pid
->status
= status
;
359 new_pid
->next
= *listp
;
364 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
366 struct simple_pid_list
**p
;
368 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
369 if ((*p
)->pid
== pid
)
371 struct simple_pid_list
*next
= (*p
)->next
;
373 *statusp
= (*p
)->status
;
381 /* Return the ptrace options that we want to try to enable. */
384 linux_nat_ptrace_options (int attached
)
389 options
|= PTRACE_O_EXITKILL
;
391 options
|= (PTRACE_O_TRACESYSGOOD
392 | PTRACE_O_TRACEVFORKDONE
393 | PTRACE_O_TRACEVFORK
395 | PTRACE_O_TRACEEXEC
);
400 /* Initialize ptrace and procfs warnings and check for supported
401 ptrace features given PID.
403 ATTACHED should be nonzero iff we attached to the inferior. */
406 linux_init_ptrace_procfs (pid_t pid
, int attached
)
408 int options
= linux_nat_ptrace_options (attached
);
410 linux_enable_event_reporting (pid
, options
);
411 linux_ptrace_init_warnings ();
412 linux_proc_init_warnings ();
415 linux_nat_target::~linux_nat_target ()
419 linux_nat_target::post_attach (int pid
)
421 linux_init_ptrace_procfs (pid
, 1);
424 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
427 linux_nat_target::post_startup_inferior (ptid_t ptid
)
429 linux_init_ptrace_procfs (ptid
.pid (), 0);
432 /* Return the number of known LWPs in the tgid given by PID. */
439 for (const lwp_info
*lp ATTRIBUTE_UNUSED
: all_lwps ())
440 if (lp
->ptid
.pid () == pid
)
446 /* Deleter for lwp_info unique_ptr specialisation. */
450 void operator() (struct lwp_info
*lwp
) const
452 delete_lwp (lwp
->ptid
);
456 /* A unique_ptr specialisation for lwp_info. */
458 typedef std::unique_ptr
<struct lwp_info
, lwp_deleter
> lwp_info_up
;
460 /* Target hook for follow_fork. */
463 linux_nat_target::follow_fork (inferior
*child_inf
, ptid_t child_ptid
,
464 target_waitkind fork_kind
, bool follow_child
,
467 inf_ptrace_target::follow_fork (child_inf
, child_ptid
, fork_kind
,
468 follow_child
, detach_fork
);
472 bool has_vforked
= fork_kind
== TARGET_WAITKIND_VFORKED
;
473 ptid_t parent_ptid
= inferior_ptid
;
474 int parent_pid
= parent_ptid
.lwp ();
475 int child_pid
= child_ptid
.lwp ();
477 /* We're already attached to the parent, by default. */
478 lwp_info
*child_lp
= add_lwp (child_ptid
);
479 child_lp
->stopped
= 1;
480 child_lp
->last_resume_kind
= resume_stop
;
482 /* Detach new forked process? */
485 int child_stop_signal
= 0;
486 bool detach_child
= true;
488 /* Move CHILD_LP into a unique_ptr and clear the source pointer
489 to prevent us doing anything stupid with it. */
490 lwp_info_up
child_lp_ptr (child_lp
);
493 linux_target
->low_prepare_to_resume (child_lp_ptr
.get ());
495 /* When debugging an inferior in an architecture that supports
496 hardware single stepping on a kernel without commit
497 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
498 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
499 set if the parent process had them set.
500 To work around this, single step the child process
501 once before detaching to clear the flags. */
503 /* Note that we consult the parent's architecture instead of
504 the child's because there's no inferior for the child at
506 if (!gdbarch_software_single_step_p (target_thread_architecture
511 linux_disable_event_reporting (child_pid
);
512 if (ptrace (PTRACE_SINGLESTEP
, child_pid
, 0, 0) < 0)
513 perror_with_name (_("Couldn't do single step"));
514 if (my_waitpid (child_pid
, &status
, 0) < 0)
515 perror_with_name (_("Couldn't wait vfork process"));
518 detach_child
= WIFSTOPPED (status
);
519 child_stop_signal
= WSTOPSIG (status
);
525 int signo
= child_stop_signal
;
528 && !signal_pass_state (gdb_signal_from_host (signo
)))
530 ptrace (PTRACE_DETACH
, child_pid
, 0, signo
);
532 close_proc_mem_file (child_pid
);
538 struct lwp_info
*parent_lp
;
540 parent_lp
= find_lwp_pid (parent_ptid
);
541 gdb_assert (linux_supports_tracefork () >= 0);
543 if (linux_supports_tracevforkdone ())
545 linux_nat_debug_printf ("waiting for VFORK_DONE on %d",
547 parent_lp
->stopped
= 1;
549 /* We'll handle the VFORK_DONE event like any other
550 event, in target_wait. */
554 /* We can't insert breakpoints until the child has
555 finished with the shared memory region. We need to
556 wait until that happens. Ideal would be to just
558 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
559 - waitpid (parent_pid, &status, __WALL);
560 However, most architectures can't handle a syscall
561 being traced on the way out if it wasn't traced on
564 We might also think to loop, continuing the child
565 until it exits or gets a SIGTRAP. One problem is
566 that the child might call ptrace with PTRACE_TRACEME.
568 There's no simple and reliable way to figure out when
569 the vforked child will be done with its copy of the
570 shared memory. We could step it out of the syscall,
571 two instructions, let it go, and then single-step the
572 parent once. When we have hardware single-step, this
573 would work; with software single-step it could still
574 be made to work but we'd have to be able to insert
575 single-step breakpoints in the child, and we'd have
576 to insert -just- the single-step breakpoint in the
577 parent. Very awkward.
579 In the end, the best we can do is to make sure it
580 runs for a little while. Hopefully it will be out of
581 range of any breakpoints we reinsert. Usually this
582 is only the single-step breakpoint at vfork's return
585 linux_nat_debug_printf ("no VFORK_DONE support, sleeping a bit");
589 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
590 and leave it pending. The next linux_nat_resume call
591 will notice a pending event, and bypasses actually
592 resuming the inferior. */
593 parent_lp
->status
= 0;
594 parent_lp
->waitstatus
.set_vfork_done ();
595 parent_lp
->stopped
= 1;
597 /* If we're in async mode, need to tell the event loop
598 there's something here to process. */
599 if (target_is_async_p ())
606 struct lwp_info
*child_lp
;
608 child_lp
= add_lwp (child_ptid
);
609 child_lp
->stopped
= 1;
610 child_lp
->last_resume_kind
= resume_stop
;
616 linux_nat_target::insert_fork_catchpoint (int pid
)
618 return !linux_supports_tracefork ();
622 linux_nat_target::remove_fork_catchpoint (int pid
)
628 linux_nat_target::insert_vfork_catchpoint (int pid
)
630 return !linux_supports_tracefork ();
634 linux_nat_target::remove_vfork_catchpoint (int pid
)
640 linux_nat_target::insert_exec_catchpoint (int pid
)
642 return !linux_supports_tracefork ();
646 linux_nat_target::remove_exec_catchpoint (int pid
)
652 linux_nat_target::set_syscall_catchpoint (int pid
, bool needed
, int any_count
,
653 gdb::array_view
<const int> syscall_counts
)
655 if (!linux_supports_tracesysgood ())
658 /* On GNU/Linux, we ignore the arguments. It means that we only
659 enable the syscall catchpoints, but do not disable them.
661 Also, we do not use the `syscall_counts' information because we do not
662 filter system calls here. We let GDB do the logic for us. */
666 /* List of known LWPs, keyed by LWP PID. This speeds up the common
667 case of mapping a PID returned from the kernel to our corresponding
668 lwp_info data structure. */
669 static htab_t lwp_lwpid_htab
;
671 /* Calculate a hash from a lwp_info's LWP PID. */
674 lwp_info_hash (const void *ap
)
676 const struct lwp_info
*lp
= (struct lwp_info
*) ap
;
677 pid_t pid
= lp
->ptid
.lwp ();
679 return iterative_hash_object (pid
, 0);
682 /* Equality function for the lwp_info hash table. Compares the LWP's
686 lwp_lwpid_htab_eq (const void *a
, const void *b
)
688 const struct lwp_info
*entry
= (const struct lwp_info
*) a
;
689 const struct lwp_info
*element
= (const struct lwp_info
*) b
;
691 return entry
->ptid
.lwp () == element
->ptid
.lwp ();
694 /* Create the lwp_lwpid_htab hash table. */
697 lwp_lwpid_htab_create (void)
699 lwp_lwpid_htab
= htab_create (100, lwp_info_hash
, lwp_lwpid_htab_eq
, NULL
);
702 /* Add LP to the hash table. */
705 lwp_lwpid_htab_add_lwp (struct lwp_info
*lp
)
709 slot
= htab_find_slot (lwp_lwpid_htab
, lp
, INSERT
);
710 gdb_assert (slot
!= NULL
&& *slot
== NULL
);
714 /* Head of doubly-linked list of known LWPs. Sorted by reverse
715 creation order. This order is assumed in some cases. E.g.,
716 reaping status after killing alls lwps of a process: the leader LWP
717 must be reaped last. */
719 static intrusive_list
<lwp_info
> lwp_list
;
721 /* See linux-nat.h. */
726 return lwp_info_range (lwp_list
.begin ());
729 /* See linux-nat.h. */
734 return lwp_info_safe_range (lwp_list
.begin ());
737 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
740 lwp_list_add (struct lwp_info
*lp
)
742 lwp_list
.push_front (*lp
);
745 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
749 lwp_list_remove (struct lwp_info
*lp
)
751 /* Remove from sorted-by-creation-order list. */
752 lwp_list
.erase (lwp_list
.iterator_to (*lp
));
757 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
758 _initialize_linux_nat. */
759 static sigset_t suspend_mask
;
761 /* Signals to block to make that sigsuspend work. */
762 static sigset_t blocked_mask
;
764 /* SIGCHLD action. */
765 static struct sigaction sigchld_action
;
767 /* Block child signals (SIGCHLD and linux threads signals), and store
768 the previous mask in PREV_MASK. */
771 block_child_signals (sigset_t
*prev_mask
)
773 /* Make sure SIGCHLD is blocked. */
774 if (!sigismember (&blocked_mask
, SIGCHLD
))
775 sigaddset (&blocked_mask
, SIGCHLD
);
777 gdb_sigmask (SIG_BLOCK
, &blocked_mask
, prev_mask
);
780 /* Restore child signals mask, previously returned by
781 block_child_signals. */
784 restore_child_signals_mask (sigset_t
*prev_mask
)
786 gdb_sigmask (SIG_SETMASK
, prev_mask
, NULL
);
789 /* Mask of signals to pass directly to the inferior. */
790 static sigset_t pass_mask
;
792 /* Update signals to pass to the inferior. */
794 linux_nat_target::pass_signals
795 (gdb::array_view
<const unsigned char> pass_signals
)
799 sigemptyset (&pass_mask
);
801 for (signo
= 1; signo
< NSIG
; signo
++)
803 int target_signo
= gdb_signal_from_host (signo
);
804 if (target_signo
< pass_signals
.size () && pass_signals
[target_signo
])
805 sigaddset (&pass_mask
, signo
);
811 /* Prototypes for local functions. */
812 static int stop_wait_callback (struct lwp_info
*lp
);
813 static int resume_stopped_resumed_lwps (struct lwp_info
*lp
, const ptid_t wait_ptid
);
814 static int check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
);
818 /* Destroy and free LP. */
820 lwp_info::~lwp_info ()
822 /* Let the arch specific bits release arch_lwp_info. */
823 linux_target
->low_delete_thread (this->arch_private
);
826 /* Traversal function for purge_lwp_list. */
829 lwp_lwpid_htab_remove_pid (void **slot
, void *info
)
831 struct lwp_info
*lp
= (struct lwp_info
*) *slot
;
832 int pid
= *(int *) info
;
834 if (lp
->ptid
.pid () == pid
)
836 htab_clear_slot (lwp_lwpid_htab
, slot
);
837 lwp_list_remove (lp
);
844 /* Remove all LWPs belong to PID from the lwp list. */
847 purge_lwp_list (int pid
)
849 htab_traverse_noresize (lwp_lwpid_htab
, lwp_lwpid_htab_remove_pid
, &pid
);
852 /* Add the LWP specified by PTID to the list. PTID is the first LWP
853 in the process. Return a pointer to the structure describing the
856 This differs from add_lwp in that we don't let the arch specific
857 bits know about this new thread. Current clients of this callback
858 take the opportunity to install watchpoints in the new thread, and
859 we shouldn't do that for the first thread. If we're spawning a
860 child ("run"), the thread executes the shell wrapper first, and we
861 shouldn't touch it until it execs the program we want to debug.
862 For "attach", it'd be okay to call the callback, but it's not
863 necessary, because watchpoints can't yet have been inserted into
866 static struct lwp_info
*
867 add_initial_lwp (ptid_t ptid
)
869 gdb_assert (ptid
.lwp_p ());
871 lwp_info
*lp
= new lwp_info (ptid
);
874 /* Add to sorted-by-reverse-creation-order list. */
877 /* Add to keyed-by-pid htab. */
878 lwp_lwpid_htab_add_lwp (lp
);
883 /* Add the LWP specified by PID to the list. Return a pointer to the
884 structure describing the new LWP. The LWP should already be
887 static struct lwp_info
*
888 add_lwp (ptid_t ptid
)
892 lp
= add_initial_lwp (ptid
);
894 /* Let the arch specific bits know about this new thread. Current
895 clients of this callback take the opportunity to install
896 watchpoints in the new thread. We don't do this for the first
897 thread though. See add_initial_lwp. */
898 linux_target
->low_new_thread (lp
);
903 /* Remove the LWP specified by PID from the list. */
906 delete_lwp (ptid_t ptid
)
908 lwp_info
dummy (ptid
);
910 void **slot
= htab_find_slot (lwp_lwpid_htab
, &dummy
, NO_INSERT
);
914 lwp_info
*lp
= *(struct lwp_info
**) slot
;
915 gdb_assert (lp
!= NULL
);
917 htab_clear_slot (lwp_lwpid_htab
, slot
);
919 /* Remove from sorted-by-creation-order list. */
920 lwp_list_remove (lp
);
926 /* Return a pointer to the structure describing the LWP corresponding
927 to PID. If no corresponding LWP could be found, return NULL. */
929 static struct lwp_info
*
930 find_lwp_pid (ptid_t ptid
)
939 lwp_info
dummy (ptid_t (0, lwp
));
940 return (struct lwp_info
*) htab_find (lwp_lwpid_htab
, &dummy
);
943 /* See nat/linux-nat.h. */
946 iterate_over_lwps (ptid_t filter
,
947 gdb::function_view
<iterate_over_lwps_ftype
> callback
)
949 for (lwp_info
*lp
: all_lwps_safe ())
951 if (lp
->ptid
.matches (filter
))
953 if (callback (lp
) != 0)
961 /* Update our internal state when changing from one checkpoint to
962 another indicated by NEW_PTID. We can only switch single-threaded
963 applications, so we only create one new LWP, and the previous list
967 linux_nat_switch_fork (ptid_t new_ptid
)
971 purge_lwp_list (inferior_ptid
.pid ());
973 lp
= add_lwp (new_ptid
);
976 /* This changes the thread's ptid while preserving the gdb thread
977 num. Also changes the inferior pid, while preserving the
979 thread_change_ptid (linux_target
, inferior_ptid
, new_ptid
);
981 /* We've just told GDB core that the thread changed target id, but,
982 in fact, it really is a different thread, with different register
984 registers_changed ();
987 /* Handle the exit of a single thread LP. */
990 exit_lwp (struct lwp_info
*lp
)
992 struct thread_info
*th
= find_thread_ptid (linux_target
, lp
->ptid
);
996 if (print_thread_events
)
997 printf_unfiltered (_("[%s exited]\n"),
998 target_pid_to_str (lp
->ptid
).c_str ());
1003 delete_lwp (lp
->ptid
);
1006 /* Wait for the LWP specified by LP, which we have just attached to.
1007 Returns a wait status for that LWP, to cache. */
1010 linux_nat_post_attach_wait (ptid_t ptid
, int *signalled
)
1012 pid_t new_pid
, pid
= ptid
.lwp ();
1015 if (linux_proc_pid_is_stopped (pid
))
1017 linux_nat_debug_printf ("Attaching to a stopped process");
1019 /* The process is definitely stopped. It is in a job control
1020 stop, unless the kernel predates the TASK_STOPPED /
1021 TASK_TRACED distinction, in which case it might be in a
1022 ptrace stop. Make sure it is in a ptrace stop; from there we
1023 can kill it, signal it, et cetera.
1025 First make sure there is a pending SIGSTOP. Since we are
1026 already attached, the process can not transition from stopped
1027 to running without a PTRACE_CONT; so we know this signal will
1028 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1029 probably already in the queue (unless this kernel is old
1030 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1031 is not an RT signal, it can only be queued once. */
1032 kill_lwp (pid
, SIGSTOP
);
1034 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1035 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1036 ptrace (PTRACE_CONT
, pid
, 0, 0);
1039 /* Make sure the initial process is stopped. The user-level threads
1040 layer might want to poke around in the inferior, and that won't
1041 work if things haven't stabilized yet. */
1042 new_pid
= my_waitpid (pid
, &status
, __WALL
);
1043 gdb_assert (pid
== new_pid
);
1045 if (!WIFSTOPPED (status
))
1047 /* The pid we tried to attach has apparently just exited. */
1048 linux_nat_debug_printf ("Failed to stop %d: %s", pid
,
1049 status_to_str (status
).c_str ());
1053 if (WSTOPSIG (status
) != SIGSTOP
)
1056 linux_nat_debug_printf ("Received %s after attaching",
1057 status_to_str (status
).c_str ());
1064 linux_nat_target::create_inferior (const char *exec_file
,
1065 const std::string
&allargs
,
1066 char **env
, int from_tty
)
1068 maybe_disable_address_space_randomization restore_personality
1069 (disable_randomization
);
1071 /* The fork_child mechanism is synchronous and calls target_wait, so
1072 we have to mask the async mode. */
1074 /* Make sure we report all signals during startup. */
1077 inf_ptrace_target::create_inferior (exec_file
, allargs
, env
, from_tty
);
1079 open_proc_mem_file (inferior_ptid
);
1082 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1083 already attached. Returns true if a new LWP is found, false
1087 attach_proc_task_lwp_callback (ptid_t ptid
)
1089 struct lwp_info
*lp
;
1091 /* Ignore LWPs we're already attached to. */
1092 lp
= find_lwp_pid (ptid
);
1095 int lwpid
= ptid
.lwp ();
1097 if (ptrace (PTRACE_ATTACH
, lwpid
, 0, 0) < 0)
1101 /* Be quiet if we simply raced with the thread exiting.
1102 EPERM is returned if the thread's task still exists, and
1103 is marked as exited or zombie, as well as other
1104 conditions, so in that case, confirm the status in
1105 /proc/PID/status. */
1107 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
1109 linux_nat_debug_printf
1110 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1111 lwpid
, err
, safe_strerror (err
));
1117 = linux_ptrace_attach_fail_reason_string (ptid
, err
);
1119 warning (_("Cannot attach to lwp %d: %s"),
1120 lwpid
, reason
.c_str ());
1125 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1126 target_pid_to_str (ptid
).c_str ());
1128 lp
= add_lwp (ptid
);
1130 /* The next time we wait for this LWP we'll see a SIGSTOP as
1131 PTRACE_ATTACH brings it to a halt. */
1134 /* We need to wait for a stop before being able to make the
1135 next ptrace call on this LWP. */
1136 lp
->must_set_ptrace_flags
= 1;
1138 /* So that wait collects the SIGSTOP. */
1141 /* Also add the LWP to gdb's thread list, in case a
1142 matching libthread_db is not found (or the process uses
1144 add_thread (linux_target
, lp
->ptid
);
1145 set_running (linux_target
, lp
->ptid
, true);
1146 set_executing (linux_target
, lp
->ptid
, true);
1155 linux_nat_target::attach (const char *args
, int from_tty
)
1157 struct lwp_info
*lp
;
1161 /* Make sure we report all signals during attach. */
1166 inf_ptrace_target::attach (args
, from_tty
);
1168 catch (const gdb_exception_error
&ex
)
1170 pid_t pid
= parse_pid_to_attach (args
);
1171 std::string reason
= linux_ptrace_attach_fail_reason (pid
);
1173 if (!reason
.empty ())
1174 throw_error (ex
.error
, "warning: %s\n%s", reason
.c_str (),
1177 throw_error (ex
.error
, "%s", ex
.what ());
1180 /* The ptrace base target adds the main thread with (pid,0,0)
1181 format. Decorate it with lwp info. */
1182 ptid
= ptid_t (inferior_ptid
.pid (),
1183 inferior_ptid
.pid ());
1184 thread_change_ptid (linux_target
, inferior_ptid
, ptid
);
1186 /* Add the initial process as the first LWP to the list. */
1187 lp
= add_initial_lwp (ptid
);
1189 status
= linux_nat_post_attach_wait (lp
->ptid
, &lp
->signalled
);
1190 if (!WIFSTOPPED (status
))
1192 if (WIFEXITED (status
))
1194 int exit_code
= WEXITSTATUS (status
);
1196 target_terminal::ours ();
1197 target_mourn_inferior (inferior_ptid
);
1199 error (_("Unable to attach: program exited normally."));
1201 error (_("Unable to attach: program exited with code %d."),
1204 else if (WIFSIGNALED (status
))
1206 enum gdb_signal signo
;
1208 target_terminal::ours ();
1209 target_mourn_inferior (inferior_ptid
);
1211 signo
= gdb_signal_from_host (WTERMSIG (status
));
1212 error (_("Unable to attach: program terminated with signal "
1214 gdb_signal_to_name (signo
),
1215 gdb_signal_to_string (signo
));
1218 internal_error (__FILE__
, __LINE__
,
1219 _("unexpected status %d for PID %ld"),
1220 status
, (long) ptid
.lwp ());
1225 open_proc_mem_file (lp
->ptid
);
1227 /* Save the wait status to report later. */
1229 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1230 (long) lp
->ptid
.pid (),
1231 status_to_str (status
).c_str ());
1233 lp
->status
= status
;
1235 /* We must attach to every LWP. If /proc is mounted, use that to
1236 find them now. The inferior may be using raw clone instead of
1237 using pthreads. But even if it is using pthreads, thread_db
1238 walks structures in the inferior's address space to find the list
1239 of threads/LWPs, and those structures may well be corrupted.
1240 Note that once thread_db is loaded, we'll still use it to list
1241 threads and associate pthread info with each LWP. */
1242 linux_proc_attach_tgid_threads (lp
->ptid
.pid (),
1243 attach_proc_task_lwp_callback
);
1245 if (target_can_async_p ())
1249 /* Ptrace-detach the thread with pid PID. */
1252 detach_one_pid (int pid
, int signo
)
1254 if (ptrace (PTRACE_DETACH
, pid
, 0, signo
) < 0)
1256 int save_errno
= errno
;
1258 /* We know the thread exists, so ESRCH must mean the lwp is
1259 zombie. This can happen if one of the already-detached
1260 threads exits the whole thread group. In that case we're
1261 still attached, and must reap the lwp. */
1262 if (save_errno
== ESRCH
)
1266 ret
= my_waitpid (pid
, &status
, __WALL
);
1269 warning (_("Couldn't reap LWP %d while detaching: %s"),
1270 pid
, safe_strerror (errno
));
1272 else if (!WIFEXITED (status
) && !WIFSIGNALED (status
))
1274 warning (_("Reaping LWP %d while detaching "
1275 "returned unexpected status 0x%x"),
1280 error (_("Can't detach %d: %s"),
1281 pid
, safe_strerror (save_errno
));
1284 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1285 pid
, strsignal (signo
));
1288 /* Get pending signal of THREAD as a host signal number, for detaching
1289 purposes. This is the signal the thread last stopped for, which we
1290 need to deliver to the thread when detaching, otherwise, it'd be
1294 get_detach_signal (struct lwp_info
*lp
)
1296 enum gdb_signal signo
= GDB_SIGNAL_0
;
1298 /* If we paused threads momentarily, we may have stored pending
1299 events in lp->status or lp->waitstatus (see stop_wait_callback),
1300 and GDB core hasn't seen any signal for those threads.
1301 Otherwise, the last signal reported to the core is found in the
1302 thread object's stop_signal.
1304 There's a corner case that isn't handled here at present. Only
1305 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1306 stop_signal make sense as a real signal to pass to the inferior.
1307 Some catchpoint related events, like
1308 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1309 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1310 those traps are debug API (ptrace in our case) related and
1311 induced; the inferior wouldn't see them if it wasn't being
1312 traced. Hence, we should never pass them to the inferior, even
1313 when set to pass state. Since this corner case isn't handled by
1314 infrun.c when proceeding with a signal, for consistency, neither
1315 do we handle it here (or elsewhere in the file we check for
1316 signal pass state). Normally SIGTRAP isn't set to pass state, so
1317 this is really a corner case. */
1319 if (lp
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
)
1320 signo
= GDB_SIGNAL_0
; /* a pending ptrace event, not a real signal. */
1321 else if (lp
->status
)
1322 signo
= gdb_signal_from_host (WSTOPSIG (lp
->status
));
1325 struct thread_info
*tp
= find_thread_ptid (linux_target
, lp
->ptid
);
1327 if (target_is_non_stop_p () && !tp
->executing ())
1329 if (tp
->has_pending_waitstatus ())
1331 /* If the thread has a pending event, and it was stopped with a
1332 signal, use that signal to resume it. If it has a pending
1333 event of another kind, it was not stopped with a signal, so
1334 resume it without a signal. */
1335 if (tp
->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED
)
1336 signo
= tp
->pending_waitstatus ().sig ();
1338 signo
= GDB_SIGNAL_0
;
1341 signo
= tp
->stop_signal ();
1343 else if (!target_is_non_stop_p ())
1346 process_stratum_target
*last_target
;
1348 get_last_target_status (&last_target
, &last_ptid
, nullptr);
1350 if (last_target
== linux_target
1351 && lp
->ptid
.lwp () == last_ptid
.lwp ())
1352 signo
= tp
->stop_signal ();
1356 if (signo
== GDB_SIGNAL_0
)
1358 linux_nat_debug_printf ("lwp %s has no pending signal",
1359 target_pid_to_str (lp
->ptid
).c_str ());
1361 else if (!signal_pass_state (signo
))
1363 linux_nat_debug_printf
1364 ("lwp %s had signal %s but it is in no pass state",
1365 target_pid_to_str (lp
->ptid
).c_str (), gdb_signal_to_string (signo
));
1369 linux_nat_debug_printf ("lwp %s has pending signal %s",
1370 target_pid_to_str (lp
->ptid
).c_str (),
1371 gdb_signal_to_string (signo
));
1373 return gdb_signal_to_host (signo
);
1379 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1380 signal number that should be passed to the LWP when detaching.
1381 Otherwise pass any pending signal the LWP may have, if any. */
1384 detach_one_lwp (struct lwp_info
*lp
, int *signo_p
)
1386 int lwpid
= lp
->ptid
.lwp ();
1389 gdb_assert (lp
->status
== 0 || WIFSTOPPED (lp
->status
));
1391 /* If the lwp/thread we are about to detach has a pending fork event,
1392 there is a process GDB is attached to that the core of GDB doesn't know
1393 about. Detach from it. */
1395 /* Check in lwp_info::status. */
1396 if (WIFSTOPPED (lp
->status
) && linux_is_extended_waitstatus (lp
->status
))
1398 int event
= linux_ptrace_get_extended_event (lp
->status
);
1400 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
1402 unsigned long child_pid
;
1403 int ret
= ptrace (PTRACE_GETEVENTMSG
, lp
->ptid
.lwp (), 0, &child_pid
);
1405 detach_one_pid (child_pid
, 0);
1407 perror_warning_with_name (_("Failed to detach fork child"));
1411 /* Check in lwp_info::waitstatus. */
1412 if (lp
->waitstatus
.kind () == TARGET_WAITKIND_VFORKED
1413 || lp
->waitstatus
.kind () == TARGET_WAITKIND_FORKED
)
1414 detach_one_pid (lp
->waitstatus
.child_ptid ().pid (), 0);
1417 /* Check in thread_info::pending_waitstatus. */
1418 thread_info
*tp
= find_thread_ptid (linux_target
, lp
->ptid
);
1419 if (tp
->has_pending_waitstatus ())
1421 const target_waitstatus
&ws
= tp
->pending_waitstatus ();
1423 if (ws
.kind () == TARGET_WAITKIND_VFORKED
1424 || ws
.kind () == TARGET_WAITKIND_FORKED
)
1425 detach_one_pid (ws
.child_ptid ().pid (), 0);
1428 /* Check in thread_info::pending_follow. */
1429 if (tp
->pending_follow
.kind () == TARGET_WAITKIND_VFORKED
1430 || tp
->pending_follow
.kind () == TARGET_WAITKIND_FORKED
)
1431 detach_one_pid (tp
->pending_follow
.child_ptid ().pid (), 0);
1433 if (lp
->status
!= 0)
1434 linux_nat_debug_printf ("Pending %s for %s on detach.",
1435 strsignal (WSTOPSIG (lp
->status
)),
1436 target_pid_to_str (lp
->ptid
).c_str ());
1438 /* If there is a pending SIGSTOP, get rid of it. */
1441 linux_nat_debug_printf ("Sending SIGCONT to %s",
1442 target_pid_to_str (lp
->ptid
).c_str ());
1444 kill_lwp (lwpid
, SIGCONT
);
1448 if (signo_p
== NULL
)
1450 /* Pass on any pending signal for this LWP. */
1451 signo
= get_detach_signal (lp
);
1456 /* Preparing to resume may try to write registers, and fail if the
1457 lwp is zombie. If that happens, ignore the error. We'll handle
1458 it below, when detach fails with ESRCH. */
1461 linux_target
->low_prepare_to_resume (lp
);
1463 catch (const gdb_exception_error
&ex
)
1465 if (!check_ptrace_stopped_lwp_gone (lp
))
1469 detach_one_pid (lwpid
, signo
);
1471 delete_lwp (lp
->ptid
);
1475 detach_callback (struct lwp_info
*lp
)
1477 /* We don't actually detach from the thread group leader just yet.
1478 If the thread group exits, we must reap the zombie clone lwps
1479 before we're able to reap the leader. */
1480 if (lp
->ptid
.lwp () != lp
->ptid
.pid ())
1481 detach_one_lwp (lp
, NULL
);
1486 linux_nat_target::detach (inferior
*inf
, int from_tty
)
1488 struct lwp_info
*main_lwp
;
1491 /* Don't unregister from the event loop, as there may be other
1492 inferiors running. */
1494 /* Stop all threads before detaching. ptrace requires that the
1495 thread is stopped to successfully detach. */
1496 iterate_over_lwps (ptid_t (pid
), stop_callback
);
1497 /* ... and wait until all of them have reported back that
1498 they're no longer running. */
1499 iterate_over_lwps (ptid_t (pid
), stop_wait_callback
);
1501 /* We can now safely remove breakpoints. We don't this in earlier
1502 in common code because this target doesn't currently support
1503 writing memory while the inferior is running. */
1504 remove_breakpoints_inf (current_inferior ());
1506 iterate_over_lwps (ptid_t (pid
), detach_callback
);
1508 /* Only the initial process should be left right now. */
1509 gdb_assert (num_lwps (pid
) == 1);
1511 main_lwp
= find_lwp_pid (ptid_t (pid
));
1513 if (forks_exist_p ())
1515 /* Multi-fork case. The current inferior_ptid is being detached
1516 from, but there are other viable forks to debug. Detach from
1517 the current fork, and context-switch to the first
1519 linux_fork_detach (from_tty
);
1523 target_announce_detach (from_tty
);
1525 /* Pass on any pending signal for the last LWP. */
1526 int signo
= get_detach_signal (main_lwp
);
1528 detach_one_lwp (main_lwp
, &signo
);
1530 detach_success (inf
);
1533 close_proc_mem_file (pid
);
1536 /* Resume execution of the inferior process. If STEP is nonzero,
1537 single-step it. If SIGNAL is nonzero, give it that signal. */
1540 linux_resume_one_lwp_throw (struct lwp_info
*lp
, int step
,
1541 enum gdb_signal signo
)
1545 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1546 We only presently need that if the LWP is stepped though (to
1547 handle the case of stepping a breakpoint instruction). */
1550 struct regcache
*regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
1552 lp
->stop_pc
= regcache_read_pc (regcache
);
1557 linux_target
->low_prepare_to_resume (lp
);
1558 linux_target
->low_resume (lp
->ptid
, step
, signo
);
1560 /* Successfully resumed. Clear state that no longer makes sense,
1561 and mark the LWP as running. Must not do this before resuming
1562 otherwise if that fails other code will be confused. E.g., we'd
1563 later try to stop the LWP and hang forever waiting for a stop
1564 status. Note that we must not throw after this is cleared,
1565 otherwise handle_zombie_lwp_error would get confused. */
1568 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
1569 registers_changed_ptid (linux_target
, lp
->ptid
);
1572 /* Called when we try to resume a stopped LWP and that errors out. If
1573 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1574 or about to become), discard the error, clear any pending status
1575 the LWP may have, and return true (we'll collect the exit status
1576 soon enough). Otherwise, return false. */
1579 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
1581 /* If we get an error after resuming the LWP successfully, we'd
1582 confuse !T state for the LWP being gone. */
1583 gdb_assert (lp
->stopped
);
1585 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1586 because even if ptrace failed with ESRCH, the tracee may be "not
1587 yet fully dead", but already refusing ptrace requests. In that
1588 case the tracee has 'R (Running)' state for a little bit
1589 (observed in Linux 3.18). See also the note on ESRCH in the
1590 ptrace(2) man page. Instead, check whether the LWP has any state
1591 other than ptrace-stopped. */
1593 /* Don't assume anything if /proc/PID/status can't be read. */
1594 if (linux_proc_pid_is_trace_stopped_nowarn (lp
->ptid
.lwp ()) == 0)
1596 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
1598 lp
->waitstatus
.set_ignore ();
1604 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1605 disappears while we try to resume it. */
1608 linux_resume_one_lwp (struct lwp_info
*lp
, int step
, enum gdb_signal signo
)
1612 linux_resume_one_lwp_throw (lp
, step
, signo
);
1614 catch (const gdb_exception_error
&ex
)
1616 if (!check_ptrace_stopped_lwp_gone (lp
))
1624 resume_lwp (struct lwp_info
*lp
, int step
, enum gdb_signal signo
)
1628 struct inferior
*inf
= find_inferior_ptid (linux_target
, lp
->ptid
);
1630 if (inf
->vfork_child
!= NULL
)
1632 linux_nat_debug_printf ("Not resuming %s (vfork parent)",
1633 target_pid_to_str (lp
->ptid
).c_str ());
1635 else if (!lwp_status_pending_p (lp
))
1637 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1638 target_pid_to_str (lp
->ptid
).c_str (),
1639 (signo
!= GDB_SIGNAL_0
1640 ? strsignal (gdb_signal_to_host (signo
))
1642 step
? "step" : "resume");
1644 linux_resume_one_lwp (lp
, step
, signo
);
1648 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1649 target_pid_to_str (lp
->ptid
).c_str ());
1653 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1654 target_pid_to_str (lp
->ptid
).c_str ());
1657 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1658 Resume LWP with the last stop signal, if it is in pass state. */
1661 linux_nat_resume_callback (struct lwp_info
*lp
, struct lwp_info
*except
)
1663 enum gdb_signal signo
= GDB_SIGNAL_0
;
1670 struct thread_info
*thread
;
1672 thread
= find_thread_ptid (linux_target
, lp
->ptid
);
1675 signo
= thread
->stop_signal ();
1676 thread
->set_stop_signal (GDB_SIGNAL_0
);
1680 resume_lwp (lp
, 0, signo
);
1685 resume_clear_callback (struct lwp_info
*lp
)
1688 lp
->last_resume_kind
= resume_stop
;
1693 resume_set_callback (struct lwp_info
*lp
)
1696 lp
->last_resume_kind
= resume_continue
;
1701 linux_nat_target::resume (ptid_t ptid
, int step
, enum gdb_signal signo
)
1703 struct lwp_info
*lp
;
1706 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1707 step
? "step" : "resume",
1708 target_pid_to_str (ptid
).c_str (),
1709 (signo
!= GDB_SIGNAL_0
1710 ? strsignal (gdb_signal_to_host (signo
)) : "0"),
1711 target_pid_to_str (inferior_ptid
).c_str ());
1713 /* A specific PTID means `step only this process id'. */
1714 resume_many
= (minus_one_ptid
== ptid
1717 /* Mark the lwps we're resuming as resumed and update their
1718 last_resume_kind to resume_continue. */
1719 iterate_over_lwps (ptid
, resume_set_callback
);
1721 /* See if it's the current inferior that should be handled
1724 lp
= find_lwp_pid (inferior_ptid
);
1726 lp
= find_lwp_pid (ptid
);
1727 gdb_assert (lp
!= NULL
);
1729 /* Remember if we're stepping. */
1730 lp
->last_resume_kind
= step
? resume_step
: resume_continue
;
1732 /* If we have a pending wait status for this thread, there is no
1733 point in resuming the process. But first make sure that
1734 linux_nat_wait won't preemptively handle the event - we
1735 should never take this short-circuit if we are going to
1736 leave LP running, since we have skipped resuming all the
1737 other threads. This bit of code needs to be synchronized
1738 with linux_nat_wait. */
1740 if (lp
->status
&& WIFSTOPPED (lp
->status
))
1743 && WSTOPSIG (lp
->status
)
1744 && sigismember (&pass_mask
, WSTOPSIG (lp
->status
)))
1746 linux_nat_debug_printf
1747 ("Not short circuiting for ignored status 0x%x", lp
->status
);
1749 /* FIXME: What should we do if we are supposed to continue
1750 this thread with a signal? */
1751 gdb_assert (signo
== GDB_SIGNAL_0
);
1752 signo
= gdb_signal_from_host (WSTOPSIG (lp
->status
));
1757 if (lwp_status_pending_p (lp
))
1759 /* FIXME: What should we do if we are supposed to continue
1760 this thread with a signal? */
1761 gdb_assert (signo
== GDB_SIGNAL_0
);
1763 linux_nat_debug_printf ("Short circuiting for status 0x%x",
1766 if (target_can_async_p ())
1769 /* Tell the event loop we have something to process. */
1776 iterate_over_lwps (ptid
, [=] (struct lwp_info
*info
)
1778 return linux_nat_resume_callback (info
, lp
);
1781 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1782 step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1783 target_pid_to_str (lp
->ptid
).c_str (),
1784 (signo
!= GDB_SIGNAL_0
1785 ? strsignal (gdb_signal_to_host (signo
)) : "0"));
1787 linux_resume_one_lwp (lp
, step
, signo
);
1789 if (target_can_async_p ())
1793 /* Send a signal to an LWP. */
1796 kill_lwp (int lwpid
, int signo
)
1801 ret
= syscall (__NR_tkill
, lwpid
, signo
);
1802 if (errno
== ENOSYS
)
1804 /* If tkill fails, then we are not using nptl threads, a
1805 configuration we no longer support. */
1806 perror_with_name (("tkill"));
1811 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1812 event, check if the core is interested in it: if not, ignore the
1813 event, and keep waiting; otherwise, we need to toggle the LWP's
1814 syscall entry/exit status, since the ptrace event itself doesn't
1815 indicate it, and report the trap to higher layers. */
1818 linux_handle_syscall_trap (struct lwp_info
*lp
, int stopping
)
1820 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
1821 struct gdbarch
*gdbarch
= target_thread_architecture (lp
->ptid
);
1822 thread_info
*thread
= find_thread_ptid (linux_target
, lp
->ptid
);
1823 int syscall_number
= (int) gdbarch_get_syscall_number (gdbarch
, thread
);
1827 /* If we're stopping threads, there's a SIGSTOP pending, which
1828 makes it so that the LWP reports an immediate syscall return,
1829 followed by the SIGSTOP. Skip seeing that "return" using
1830 PTRACE_CONT directly, and let stop_wait_callback collect the
1831 SIGSTOP. Later when the thread is resumed, a new syscall
1832 entry event. If we didn't do this (and returned 0), we'd
1833 leave a syscall entry pending, and our caller, by using
1834 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1835 itself. Later, when the user re-resumes this LWP, we'd see
1836 another syscall entry event and we'd mistake it for a return.
1838 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1839 (leaving immediately with LWP->signalled set, without issuing
1840 a PTRACE_CONT), it would still be problematic to leave this
1841 syscall enter pending, as later when the thread is resumed,
1842 it would then see the same syscall exit mentioned above,
1843 followed by the delayed SIGSTOP, while the syscall didn't
1844 actually get to execute. It seems it would be even more
1845 confusing to the user. */
1847 linux_nat_debug_printf
1848 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1849 "PTRACE_CONT for SIGSTOP", syscall_number
, lp
->ptid
.lwp ());
1851 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
1852 ptrace (PTRACE_CONT
, lp
->ptid
.lwp (), 0, 0);
1857 /* Always update the entry/return state, even if this particular
1858 syscall isn't interesting to the core now. In async mode,
1859 the user could install a new catchpoint for this syscall
1860 between syscall enter/return, and we'll need to know to
1861 report a syscall return if that happens. */
1862 lp
->syscall_state
= (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
1863 ? TARGET_WAITKIND_SYSCALL_RETURN
1864 : TARGET_WAITKIND_SYSCALL_ENTRY
);
1866 if (catch_syscall_enabled ())
1868 if (catching_syscall_number (syscall_number
))
1870 /* Alright, an event to report. */
1871 if (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
)
1872 ourstatus
->set_syscall_entry (syscall_number
);
1873 else if (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_RETURN
)
1874 ourstatus
->set_syscall_return (syscall_number
);
1876 gdb_assert_not_reached ("unexpected syscall state");
1878 linux_nat_debug_printf
1879 ("stopping for %s of syscall %d for LWP %ld",
1880 (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
1881 ? "entry" : "return"), syscall_number
, lp
->ptid
.lwp ());
1886 linux_nat_debug_printf
1887 ("ignoring %s of syscall %d for LWP %ld",
1888 (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
1889 ? "entry" : "return"), syscall_number
, lp
->ptid
.lwp ());
1893 /* If we had been syscall tracing, and hence used PT_SYSCALL
1894 before on this LWP, it could happen that the user removes all
1895 syscall catchpoints before we get to process this event.
1896 There are two noteworthy issues here:
1898 - When stopped at a syscall entry event, resuming with
1899 PT_STEP still resumes executing the syscall and reports a
1902 - Only PT_SYSCALL catches syscall enters. If we last
1903 single-stepped this thread, then this event can't be a
1904 syscall enter. If we last single-stepped this thread, this
1905 has to be a syscall exit.
1907 The points above mean that the next resume, be it PT_STEP or
1908 PT_CONTINUE, can not trigger a syscall trace event. */
1909 linux_nat_debug_printf
1910 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1911 "ignoring", syscall_number
, lp
->ptid
.lwp ());
1912 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
1915 /* The core isn't interested in this event. For efficiency, avoid
1916 stopping all threads only to have the core resume them all again.
1917 Since we're not stopping threads, if we're still syscall tracing
1918 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1919 subsequent syscall. Simply resume using the inf-ptrace layer,
1920 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1922 linux_resume_one_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
1926 /* Handle a GNU/Linux extended wait response. If we see a clone
1927 event, we need to add the new LWP to our list (and not report the
1928 trap to higher layers). This function returns non-zero if the
1929 event should be ignored and we should wait again. If STOPPING is
1930 true, the new LWP remains stopped, otherwise it is continued. */
1933 linux_handle_extended_wait (struct lwp_info
*lp
, int status
)
1935 int pid
= lp
->ptid
.lwp ();
1936 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
1937 int event
= linux_ptrace_get_extended_event (status
);
1939 /* All extended events we currently use are mid-syscall. Only
1940 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1941 you have to be using PTRACE_SEIZE to get that. */
1942 lp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
1944 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
1945 || event
== PTRACE_EVENT_CLONE
)
1947 unsigned long new_pid
;
1950 ptrace (PTRACE_GETEVENTMSG
, pid
, 0, &new_pid
);
1952 /* If we haven't already seen the new PID stop, wait for it now. */
1953 if (! pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
1955 /* The new child has a pending SIGSTOP. We can't affect it until it
1956 hits the SIGSTOP, but we're already attached. */
1957 ret
= my_waitpid (new_pid
, &status
, __WALL
);
1959 perror_with_name (_("waiting for new child"));
1960 else if (ret
!= new_pid
)
1961 internal_error (__FILE__
, __LINE__
,
1962 _("wait returned unexpected PID %d"), ret
);
1963 else if (!WIFSTOPPED (status
))
1964 internal_error (__FILE__
, __LINE__
,
1965 _("wait returned unexpected status 0x%x"), status
);
1968 ptid_t
child_ptid (new_pid
, new_pid
);
1970 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
1972 open_proc_mem_file (child_ptid
);
1974 /* The arch-specific native code may need to know about new
1975 forks even if those end up never mapped to an
1977 linux_target
->low_new_fork (lp
, new_pid
);
1979 else if (event
== PTRACE_EVENT_CLONE
)
1981 linux_target
->low_new_clone (lp
, new_pid
);
1984 if (event
== PTRACE_EVENT_FORK
1985 && linux_fork_checkpointing_p (lp
->ptid
.pid ()))
1987 /* Handle checkpointing by linux-fork.c here as a special
1988 case. We don't want the follow-fork-mode or 'catch fork'
1989 to interfere with this. */
1991 /* This won't actually modify the breakpoint list, but will
1992 physically remove the breakpoints from the child. */
1993 detach_breakpoints (ptid_t (new_pid
, new_pid
));
1995 /* Retain child fork in ptrace (stopped) state. */
1996 if (!find_fork_pid (new_pid
))
1999 /* Report as spurious, so that infrun doesn't want to follow
2000 this fork. We're actually doing an infcall in
2002 ourstatus
->set_spurious ();
2004 /* Report the stop to the core. */
2008 if (event
== PTRACE_EVENT_FORK
)
2009 ourstatus
->set_forked (child_ptid
);
2010 else if (event
== PTRACE_EVENT_VFORK
)
2011 ourstatus
->set_vforked (child_ptid
);
2012 else if (event
== PTRACE_EVENT_CLONE
)
2014 struct lwp_info
*new_lp
;
2016 ourstatus
->set_ignore ();
2018 linux_nat_debug_printf
2019 ("Got clone event from LWP %d, new child is LWP %ld", pid
, new_pid
);
2021 new_lp
= add_lwp (ptid_t (lp
->ptid
.pid (), new_pid
));
2022 new_lp
->stopped
= 1;
2023 new_lp
->resumed
= 1;
2025 /* If the thread_db layer is active, let it record the user
2026 level thread id and status, and add the thread to GDB's
2028 if (!thread_db_notice_clone (lp
->ptid
, new_lp
->ptid
))
2030 /* The process is not using thread_db. Add the LWP to
2032 target_post_attach (new_lp
->ptid
.lwp ());
2033 add_thread (linux_target
, new_lp
->ptid
);
2036 /* Even if we're stopping the thread for some reason
2037 internal to this module, from the perspective of infrun
2038 and the user/frontend, this new thread is running until
2039 it next reports a stop. */
2040 set_running (linux_target
, new_lp
->ptid
, true);
2041 set_executing (linux_target
, new_lp
->ptid
, true);
2043 if (WSTOPSIG (status
) != SIGSTOP
)
2045 /* This can happen if someone starts sending signals to
2046 the new thread before it gets a chance to run, which
2047 have a lower number than SIGSTOP (e.g. SIGUSR1).
2048 This is an unlikely case, and harder to handle for
2049 fork / vfork than for clone, so we do not try - but
2050 we handle it for clone events here. */
2052 new_lp
->signalled
= 1;
2054 /* We created NEW_LP so it cannot yet contain STATUS. */
2055 gdb_assert (new_lp
->status
== 0);
2057 /* Save the wait status to report later. */
2058 linux_nat_debug_printf
2059 ("waitpid of new LWP %ld, saving status %s",
2060 (long) new_lp
->ptid
.lwp (), status_to_str (status
).c_str ());
2061 new_lp
->status
= status
;
2063 else if (report_thread_events
)
2065 new_lp
->waitstatus
.set_thread_created ();
2066 new_lp
->status
= status
;
2075 if (event
== PTRACE_EVENT_EXEC
)
2077 linux_nat_debug_printf ("Got exec event from LWP %ld", lp
->ptid
.lwp ());
2079 /* Close the previous /proc/PID/mem file for this inferior,
2080 which was using the address space which is now gone.
2081 Reading/writing from this file would return 0/EOF. */
2082 close_proc_mem_file (lp
->ptid
.pid ());
2084 /* Open a new file for the new address space. */
2085 open_proc_mem_file (lp
->ptid
);
2087 ourstatus
->set_execd
2088 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid
)));
2090 /* The thread that execed must have been resumed, but, when a
2091 thread execs, it changes its tid to the tgid, and the old
2092 tgid thread might have not been resumed. */
2097 if (event
== PTRACE_EVENT_VFORK_DONE
)
2099 if (current_inferior ()->waiting_for_vfork_done
)
2101 linux_nat_debug_printf
2102 ("Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping",
2105 ourstatus
->set_vfork_done ();
2109 linux_nat_debug_printf
2110 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld: ignoring", lp
->ptid
.lwp ());
2115 internal_error (__FILE__
, __LINE__
,
2116 _("unknown ptrace event %d"), event
);
2119 /* Suspend waiting for a signal. We're mostly interested in
2125 linux_nat_debug_printf ("about to sigsuspend");
2126 sigsuspend (&suspend_mask
);
2128 /* If the quit flag is set, it means that the user pressed Ctrl-C
2129 and we're debugging a process that is running on a separate
2130 terminal, so we must forward the Ctrl-C to the inferior. (If the
2131 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2132 inferior directly.) We must do this here because functions that
2133 need to block waiting for a signal loop forever until there's an
2134 event to report before returning back to the event loop. */
2135 if (!target_terminal::is_ours ())
2137 if (check_quit_flag ())
2138 target_pass_ctrlc ();
2142 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2146 wait_lwp (struct lwp_info
*lp
)
2150 int thread_dead
= 0;
2153 gdb_assert (!lp
->stopped
);
2154 gdb_assert (lp
->status
== 0);
2156 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2157 block_child_signals (&prev_mask
);
2161 pid
= my_waitpid (lp
->ptid
.lwp (), &status
, __WALL
| WNOHANG
);
2162 if (pid
== -1 && errno
== ECHILD
)
2164 /* The thread has previously exited. We need to delete it
2165 now because if this was a non-leader thread execing, we
2166 won't get an exit event. See comments on exec events at
2167 the top of the file. */
2169 linux_nat_debug_printf ("%s vanished.",
2170 target_pid_to_str (lp
->ptid
).c_str ());
2175 /* Bugs 10970, 12702.
2176 Thread group leader may have exited in which case we'll lock up in
2177 waitpid if there are other threads, even if they are all zombies too.
2178 Basically, we're not supposed to use waitpid this way.
2179 tkill(pid,0) cannot be used here as it gets ESRCH for both
2180 for zombie and running processes.
2182 As a workaround, check if we're waiting for the thread group leader and
2183 if it's a zombie, and avoid calling waitpid if it is.
2185 This is racy, what if the tgl becomes a zombie right after we check?
2186 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2187 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2189 if (lp
->ptid
.pid () == lp
->ptid
.lwp ()
2190 && linux_proc_pid_is_zombie (lp
->ptid
.lwp ()))
2193 linux_nat_debug_printf ("Thread group leader %s vanished.",
2194 target_pid_to_str (lp
->ptid
).c_str ());
2198 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2199 get invoked despite our caller had them intentionally blocked by
2200 block_child_signals. This is sensitive only to the loop of
2201 linux_nat_wait_1 and there if we get called my_waitpid gets called
2202 again before it gets to sigsuspend so we can safely let the handlers
2203 get executed here. */
2207 restore_child_signals_mask (&prev_mask
);
2211 gdb_assert (pid
== lp
->ptid
.lwp ());
2213 linux_nat_debug_printf ("waitpid %s received %s",
2214 target_pid_to_str (lp
->ptid
).c_str (),
2215 status_to_str (status
).c_str ());
2217 /* Check if the thread has exited. */
2218 if (WIFEXITED (status
) || WIFSIGNALED (status
))
2220 if (report_thread_events
2221 || lp
->ptid
.pid () == lp
->ptid
.lwp ())
2223 linux_nat_debug_printf ("LWP %d exited.", lp
->ptid
.pid ());
2225 /* If this is the leader exiting, it means the whole
2226 process is gone. Store the status to report to the
2227 core. Store it in lp->waitstatus, because lp->status
2228 would be ambiguous (W_EXITCODE(0,0) == 0). */
2229 lp
->waitstatus
= host_status_to_waitstatus (status
);
2234 linux_nat_debug_printf ("%s exited.",
2235 target_pid_to_str (lp
->ptid
).c_str ());
2245 gdb_assert (WIFSTOPPED (status
));
2248 if (lp
->must_set_ptrace_flags
)
2250 inferior
*inf
= find_inferior_pid (linux_target
, lp
->ptid
.pid ());
2251 int options
= linux_nat_ptrace_options (inf
->attach_flag
);
2253 linux_enable_event_reporting (lp
->ptid
.lwp (), options
);
2254 lp
->must_set_ptrace_flags
= 0;
2257 /* Handle GNU/Linux's syscall SIGTRAPs. */
2258 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
2260 /* No longer need the sysgood bit. The ptrace event ends up
2261 recorded in lp->waitstatus if we care for it. We can carry
2262 on handling the event like a regular SIGTRAP from here
2264 status
= W_STOPCODE (SIGTRAP
);
2265 if (linux_handle_syscall_trap (lp
, 1))
2266 return wait_lwp (lp
);
2270 /* Almost all other ptrace-stops are known to be outside of system
2271 calls, with further exceptions in linux_handle_extended_wait. */
2272 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2275 /* Handle GNU/Linux's extended waitstatus for trace events. */
2276 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
2277 && linux_is_extended_waitstatus (status
))
2279 linux_nat_debug_printf ("Handling extended status 0x%06x", status
);
2280 linux_handle_extended_wait (lp
, status
);
2287 /* Send a SIGSTOP to LP. */
2290 stop_callback (struct lwp_info
*lp
)
2292 if (!lp
->stopped
&& !lp
->signalled
)
2296 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2297 target_pid_to_str (lp
->ptid
).c_str ());
2300 ret
= kill_lwp (lp
->ptid
.lwp (), SIGSTOP
);
2301 linux_nat_debug_printf ("lwp kill %d %s", ret
,
2302 errno
? safe_strerror (errno
) : "ERRNO-OK");
2305 gdb_assert (lp
->status
== 0);
2311 /* Request a stop on LWP. */
2314 linux_stop_lwp (struct lwp_info
*lwp
)
2316 stop_callback (lwp
);
2319 /* See linux-nat.h */
2322 linux_stop_and_wait_all_lwps (void)
2324 /* Stop all LWP's ... */
2325 iterate_over_lwps (minus_one_ptid
, stop_callback
);
2327 /* ... and wait until all of them have reported back that
2328 they're no longer running. */
2329 iterate_over_lwps (minus_one_ptid
, stop_wait_callback
);
2332 /* See linux-nat.h */
2335 linux_unstop_all_lwps (void)
2337 iterate_over_lwps (minus_one_ptid
,
2338 [] (struct lwp_info
*info
)
2340 return resume_stopped_resumed_lwps (info
, minus_one_ptid
);
2344 /* Return non-zero if LWP PID has a pending SIGINT. */
2347 linux_nat_has_pending_sigint (int pid
)
2349 sigset_t pending
, blocked
, ignored
;
2351 linux_proc_pending_signals (pid
, &pending
, &blocked
, &ignored
);
2353 if (sigismember (&pending
, SIGINT
)
2354 && !sigismember (&ignored
, SIGINT
))
2360 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2363 set_ignore_sigint (struct lwp_info
*lp
)
2365 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2366 flag to consume the next one. */
2367 if (lp
->stopped
&& lp
->status
!= 0 && WIFSTOPPED (lp
->status
)
2368 && WSTOPSIG (lp
->status
) == SIGINT
)
2371 lp
->ignore_sigint
= 1;
2376 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2377 This function is called after we know the LWP has stopped; if the LWP
2378 stopped before the expected SIGINT was delivered, then it will never have
2379 arrived. Also, if the signal was delivered to a shared queue and consumed
2380 by a different thread, it will never be delivered to this LWP. */
2383 maybe_clear_ignore_sigint (struct lwp_info
*lp
)
2385 if (!lp
->ignore_sigint
)
2388 if (!linux_nat_has_pending_sigint (lp
->ptid
.lwp ()))
2390 linux_nat_debug_printf ("Clearing bogus flag for %s",
2391 target_pid_to_str (lp
->ptid
).c_str ());
2392 lp
->ignore_sigint
= 0;
2396 /* Fetch the possible triggered data watchpoint info and store it in
2399 On some archs, like x86, that use debug registers to set
2400 watchpoints, it's possible that the way to know which watched
2401 address trapped, is to check the register that is used to select
2402 which address to watch. Problem is, between setting the watchpoint
2403 and reading back which data address trapped, the user may change
2404 the set of watchpoints, and, as a consequence, GDB changes the
2405 debug registers in the inferior. To avoid reading back a stale
2406 stopped-data-address when that happens, we cache in LP the fact
2407 that a watchpoint trapped, and the corresponding data address, as
2408 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2409 registers meanwhile, we have the cached data we can rely on. */
2412 check_stopped_by_watchpoint (struct lwp_info
*lp
)
2414 scoped_restore save_inferior_ptid
= make_scoped_restore (&inferior_ptid
);
2415 inferior_ptid
= lp
->ptid
;
2417 if (linux_target
->low_stopped_by_watchpoint ())
2419 lp
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2420 lp
->stopped_data_address_p
2421 = linux_target
->low_stopped_data_address (&lp
->stopped_data_address
);
2424 return lp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2427 /* Returns true if the LWP had stopped for a watchpoint. */
2430 linux_nat_target::stopped_by_watchpoint ()
2432 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2434 gdb_assert (lp
!= NULL
);
2436 return lp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2440 linux_nat_target::stopped_data_address (CORE_ADDR
*addr_p
)
2442 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2444 gdb_assert (lp
!= NULL
);
2446 *addr_p
= lp
->stopped_data_address
;
2448 return lp
->stopped_data_address_p
;
2451 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2454 linux_nat_target::low_status_is_event (int status
)
2456 return WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
;
2459 /* Wait until LP is stopped. */
2462 stop_wait_callback (struct lwp_info
*lp
)
2464 inferior
*inf
= find_inferior_ptid (linux_target
, lp
->ptid
);
2466 /* If this is a vfork parent, bail out, it is not going to report
2467 any SIGSTOP until the vfork is done with. */
2468 if (inf
->vfork_child
!= NULL
)
2475 status
= wait_lwp (lp
);
2479 if (lp
->ignore_sigint
&& WIFSTOPPED (status
)
2480 && WSTOPSIG (status
) == SIGINT
)
2482 lp
->ignore_sigint
= 0;
2485 ptrace (PTRACE_CONT
, lp
->ptid
.lwp (), 0, 0);
2487 linux_nat_debug_printf
2488 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2489 target_pid_to_str (lp
->ptid
).c_str (),
2490 errno
? safe_strerror (errno
) : "OK");
2492 return stop_wait_callback (lp
);
2495 maybe_clear_ignore_sigint (lp
);
2497 if (WSTOPSIG (status
) != SIGSTOP
)
2499 /* The thread was stopped with a signal other than SIGSTOP. */
2501 linux_nat_debug_printf ("Pending event %s in %s",
2502 status_to_str ((int) status
).c_str (),
2503 target_pid_to_str (lp
->ptid
).c_str ());
2505 /* Save the sigtrap event. */
2506 lp
->status
= status
;
2507 gdb_assert (lp
->signalled
);
2508 save_stop_reason (lp
);
2512 /* We caught the SIGSTOP that we intended to catch. */
2514 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2515 target_pid_to_str (lp
->ptid
).c_str ());
2519 /* If we are waiting for this stop so we can report the thread
2520 stopped then we need to record this status. Otherwise, we can
2521 now discard this stop event. */
2522 if (lp
->last_resume_kind
== resume_stop
)
2524 lp
->status
= status
;
2525 save_stop_reason (lp
);
2533 /* Return non-zero if LP has a wait status pending. Discard the
2534 pending event and resume the LWP if the event that originally
2535 caused the stop became uninteresting. */
2538 status_callback (struct lwp_info
*lp
)
2540 /* Only report a pending wait status if we pretend that this has
2541 indeed been resumed. */
2545 if (!lwp_status_pending_p (lp
))
2548 if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
2549 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
2551 struct regcache
*regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
2555 pc
= regcache_read_pc (regcache
);
2557 if (pc
!= lp
->stop_pc
)
2559 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2560 target_pid_to_str (lp
->ptid
).c_str (),
2561 paddress (target_gdbarch (), lp
->stop_pc
),
2562 paddress (target_gdbarch (), pc
));
2566 #if !USE_SIGTRAP_SIGINFO
2567 else if (!breakpoint_inserted_here_p (regcache
->aspace (), pc
))
2569 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
2570 target_pid_to_str (lp
->ptid
).c_str (),
2571 paddress (target_gdbarch (), lp
->stop_pc
));
2579 linux_nat_debug_printf ("pending event of %s cancelled.",
2580 target_pid_to_str (lp
->ptid
).c_str ());
2583 linux_resume_one_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
2591 /* Count the LWP's that have had events. */
2594 count_events_callback (struct lwp_info
*lp
, int *count
)
2596 gdb_assert (count
!= NULL
);
2598 /* Select only resumed LWPs that have an event pending. */
2599 if (lp
->resumed
&& lwp_status_pending_p (lp
))
2605 /* Select the LWP (if any) that is currently being single-stepped. */
2608 select_singlestep_lwp_callback (struct lwp_info
*lp
)
2610 if (lp
->last_resume_kind
== resume_step
2617 /* Returns true if LP has a status pending. */
2620 lwp_status_pending_p (struct lwp_info
*lp
)
2622 /* We check for lp->waitstatus in addition to lp->status, because we
2623 can have pending process exits recorded in lp->status and
2624 W_EXITCODE(0,0) happens to be 0. */
2625 return lp
->status
!= 0 || lp
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
;
2628 /* Select the Nth LWP that has had an event. */
2631 select_event_lwp_callback (struct lwp_info
*lp
, int *selector
)
2633 gdb_assert (selector
!= NULL
);
2635 /* Select only resumed LWPs that have an event pending. */
2636 if (lp
->resumed
&& lwp_status_pending_p (lp
))
2637 if ((*selector
)-- == 0)
2643 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2644 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2645 and save the result in the LWP's stop_reason field. If it stopped
2646 for a breakpoint, decrement the PC if necessary on the lwp's
2650 save_stop_reason (struct lwp_info
*lp
)
2652 struct regcache
*regcache
;
2653 struct gdbarch
*gdbarch
;
2656 #if USE_SIGTRAP_SIGINFO
2660 gdb_assert (lp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
);
2661 gdb_assert (lp
->status
!= 0);
2663 if (!linux_target
->low_status_is_event (lp
->status
))
2666 regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
2667 gdbarch
= regcache
->arch ();
2669 pc
= regcache_read_pc (regcache
);
2670 sw_bp_pc
= pc
- gdbarch_decr_pc_after_break (gdbarch
);
2672 #if USE_SIGTRAP_SIGINFO
2673 if (linux_nat_get_siginfo (lp
->ptid
, &siginfo
))
2675 if (siginfo
.si_signo
== SIGTRAP
)
2677 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
)
2678 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
2680 /* The si_code is ambiguous on this arch -- check debug
2682 if (!check_stopped_by_watchpoint (lp
))
2683 lp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
2685 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
))
2687 /* If we determine the LWP stopped for a SW breakpoint,
2688 trust it. Particularly don't check watchpoint
2689 registers, because, at least on s390, we'd find
2690 stopped-by-watchpoint as long as there's a watchpoint
2692 lp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
2694 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
2696 /* This can indicate either a hardware breakpoint or
2697 hardware watchpoint. Check debug registers. */
2698 if (!check_stopped_by_watchpoint (lp
))
2699 lp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
2701 else if (siginfo
.si_code
== TRAP_TRACE
)
2703 linux_nat_debug_printf ("%s stopped by trace",
2704 target_pid_to_str (lp
->ptid
).c_str ());
2706 /* We may have single stepped an instruction that
2707 triggered a watchpoint. In that case, on some
2708 architectures (such as x86), instead of TRAP_HWBKPT,
2709 si_code indicates TRAP_TRACE, and we need to check
2710 the debug registers separately. */
2711 check_stopped_by_watchpoint (lp
);
2716 if ((!lp
->step
|| lp
->stop_pc
== sw_bp_pc
)
2717 && software_breakpoint_inserted_here_p (regcache
->aspace (),
2720 /* The LWP was either continued, or stepped a software
2721 breakpoint instruction. */
2722 lp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
2725 if (hardware_breakpoint_inserted_here_p (regcache
->aspace (), pc
))
2726 lp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
2728 if (lp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
)
2729 check_stopped_by_watchpoint (lp
);
2732 if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
2734 linux_nat_debug_printf ("%s stopped by software breakpoint",
2735 target_pid_to_str (lp
->ptid
).c_str ());
2737 /* Back up the PC if necessary. */
2739 regcache_write_pc (regcache
, sw_bp_pc
);
2741 /* Update this so we record the correct stop PC below. */
2744 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
2746 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2747 target_pid_to_str (lp
->ptid
).c_str ());
2749 else if (lp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
2751 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2752 target_pid_to_str (lp
->ptid
).c_str ());
2759 /* Returns true if the LWP had stopped for a software breakpoint. */
2762 linux_nat_target::stopped_by_sw_breakpoint ()
2764 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2766 gdb_assert (lp
!= NULL
);
2768 return lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2771 /* Implement the supports_stopped_by_sw_breakpoint method. */
2774 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2776 return USE_SIGTRAP_SIGINFO
;
2779 /* Returns true if the LWP had stopped for a hardware
2780 breakpoint/watchpoint. */
2783 linux_nat_target::stopped_by_hw_breakpoint ()
2785 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2787 gdb_assert (lp
!= NULL
);
2789 return lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2792 /* Implement the supports_stopped_by_hw_breakpoint method. */
2795 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2797 return USE_SIGTRAP_SIGINFO
;
2800 /* Select one LWP out of those that have events pending. */
2803 select_event_lwp (ptid_t filter
, struct lwp_info
**orig_lp
, int *status
)
2806 int random_selector
;
2807 struct lwp_info
*event_lp
= NULL
;
2809 /* Record the wait status for the original LWP. */
2810 (*orig_lp
)->status
= *status
;
2812 /* In all-stop, give preference to the LWP that is being
2813 single-stepped. There will be at most one, and it will be the
2814 LWP that the core is most interested in. If we didn't do this,
2815 then we'd have to handle pending step SIGTRAPs somehow in case
2816 the core later continues the previously-stepped thread, as
2817 otherwise we'd report the pending SIGTRAP then, and the core, not
2818 having stepped the thread, wouldn't understand what the trap was
2819 for, and therefore would report it to the user as a random
2821 if (!target_is_non_stop_p ())
2823 event_lp
= iterate_over_lwps (filter
, select_singlestep_lwp_callback
);
2824 if (event_lp
!= NULL
)
2826 linux_nat_debug_printf ("Select single-step %s",
2827 target_pid_to_str (event_lp
->ptid
).c_str ());
2831 if (event_lp
== NULL
)
2833 /* Pick one at random, out of those which have had events. */
2835 /* First see how many events we have. */
2836 iterate_over_lwps (filter
,
2837 [&] (struct lwp_info
*info
)
2839 return count_events_callback (info
, &num_events
);
2841 gdb_assert (num_events
> 0);
2843 /* Now randomly pick a LWP out of those that have had
2845 random_selector
= (int)
2846 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2849 linux_nat_debug_printf ("Found %d events, selecting #%d",
2850 num_events
, random_selector
);
2853 = (iterate_over_lwps
2855 [&] (struct lwp_info
*info
)
2857 return select_event_lwp_callback (info
,
2862 if (event_lp
!= NULL
)
2864 /* Switch the event LWP. */
2865 *orig_lp
= event_lp
;
2866 *status
= event_lp
->status
;
2869 /* Flush the wait status for the event LWP. */
2870 (*orig_lp
)->status
= 0;
2873 /* Return non-zero if LP has been resumed. */
2876 resumed_callback (struct lwp_info
*lp
)
2881 /* Check if we should go on and pass this event to common code.
2883 If so, save the status to the lwp_info structure associated to LWPID. */
2886 linux_nat_filter_event (int lwpid
, int status
)
2888 struct lwp_info
*lp
;
2889 int event
= linux_ptrace_get_extended_event (status
);
2891 lp
= find_lwp_pid (ptid_t (lwpid
));
2893 /* Check for stop events reported by a process we didn't already
2894 know about - anything not already in our LWP list.
2896 If we're expecting to receive stopped processes after
2897 fork, vfork, and clone events, then we'll just add the
2898 new one to our list and go back to waiting for the event
2899 to be reported - the stopped process might be returned
2900 from waitpid before or after the event is.
2902 But note the case of a non-leader thread exec'ing after the
2903 leader having exited, and gone from our lists. The non-leader
2904 thread changes its tid to the tgid. */
2906 if (WIFSTOPPED (status
) && lp
== NULL
2907 && (WSTOPSIG (status
) == SIGTRAP
&& event
== PTRACE_EVENT_EXEC
))
2909 /* A multi-thread exec after we had seen the leader exiting. */
2910 linux_nat_debug_printf ("Re-adding thread group leader LWP %d.", lwpid
);
2912 lp
= add_lwp (ptid_t (lwpid
, lwpid
));
2915 add_thread (linux_target
, lp
->ptid
);
2918 if (WIFSTOPPED (status
) && !lp
)
2920 linux_nat_debug_printf ("saving LWP %ld status %s in stopped_pids list",
2921 (long) lwpid
, status_to_str (status
).c_str ());
2922 add_to_pid_list (&stopped_pids
, lwpid
, status
);
2926 /* Make sure we don't report an event for the exit of an LWP not in
2927 our list, i.e. not part of the current process. This can happen
2928 if we detach from a program we originally forked and then it
2930 if (!WIFSTOPPED (status
) && !lp
)
2933 /* This LWP is stopped now. (And if dead, this prevents it from
2934 ever being continued.) */
2937 if (WIFSTOPPED (status
) && lp
->must_set_ptrace_flags
)
2939 inferior
*inf
= find_inferior_pid (linux_target
, lp
->ptid
.pid ());
2940 int options
= linux_nat_ptrace_options (inf
->attach_flag
);
2942 linux_enable_event_reporting (lp
->ptid
.lwp (), options
);
2943 lp
->must_set_ptrace_flags
= 0;
2946 /* Handle GNU/Linux's syscall SIGTRAPs. */
2947 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
2949 /* No longer need the sysgood bit. The ptrace event ends up
2950 recorded in lp->waitstatus if we care for it. We can carry
2951 on handling the event like a regular SIGTRAP from here
2953 status
= W_STOPCODE (SIGTRAP
);
2954 if (linux_handle_syscall_trap (lp
, 0))
2959 /* Almost all other ptrace-stops are known to be outside of system
2960 calls, with further exceptions in linux_handle_extended_wait. */
2961 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2964 /* Handle GNU/Linux's extended waitstatus for trace events. */
2965 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
2966 && linux_is_extended_waitstatus (status
))
2968 linux_nat_debug_printf ("Handling extended status 0x%06x", status
);
2970 if (linux_handle_extended_wait (lp
, status
))
2974 /* Check if the thread has exited. */
2975 if (WIFEXITED (status
) || WIFSIGNALED (status
))
2977 if (!report_thread_events
2978 && num_lwps (lp
->ptid
.pid ()) > 1)
2980 linux_nat_debug_printf ("%s exited.",
2981 target_pid_to_str (lp
->ptid
).c_str ());
2983 /* If there is at least one more LWP, then the exit signal
2984 was not the end of the debugged application and should be
2990 /* Note that even if the leader was ptrace-stopped, it can still
2991 exit, if e.g., some other thread brings down the whole
2992 process (calls `exit'). So don't assert that the lwp is
2994 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2995 lp
->ptid
.lwp (), lp
->resumed
);
2997 /* Dead LWP's aren't expected to reported a pending sigstop. */
3000 /* Store the pending event in the waitstatus, because
3001 W_EXITCODE(0,0) == 0. */
3002 lp
->waitstatus
= host_status_to_waitstatus (status
);
3006 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3007 an attempt to stop an LWP. */
3009 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGSTOP
)
3013 if (lp
->last_resume_kind
== resume_stop
)
3015 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
3016 target_pid_to_str (lp
->ptid
).c_str ());
3020 /* This is a delayed SIGSTOP. Filter out the event. */
3022 linux_nat_debug_printf
3023 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3024 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3025 target_pid_to_str (lp
->ptid
).c_str ());
3027 linux_resume_one_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
3028 gdb_assert (lp
->resumed
);
3033 /* Make sure we don't report a SIGINT that we have already displayed
3034 for another thread. */
3035 if (lp
->ignore_sigint
3036 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGINT
)
3038 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
3039 target_pid_to_str (lp
->ptid
).c_str ());
3041 /* This is a delayed SIGINT. */
3042 lp
->ignore_sigint
= 0;
3044 linux_resume_one_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
3045 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3046 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3047 target_pid_to_str (lp
->ptid
).c_str ());
3048 gdb_assert (lp
->resumed
);
3050 /* Discard the event. */
3054 /* Don't report signals that GDB isn't interested in, such as
3055 signals that are neither printed nor stopped upon. Stopping all
3056 threads can be a bit time-consuming, so if we want decent
3057 performance with heavily multi-threaded programs, especially when
3058 they're using a high frequency timer, we'd better avoid it if we
3060 if (WIFSTOPPED (status
))
3062 enum gdb_signal signo
= gdb_signal_from_host (WSTOPSIG (status
));
3064 if (!target_is_non_stop_p ())
3066 /* Only do the below in all-stop, as we currently use SIGSTOP
3067 to implement target_stop (see linux_nat_stop) in
3069 if (signo
== GDB_SIGNAL_INT
&& signal_pass_state (signo
) == 0)
3071 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3072 forwarded to the entire process group, that is, all LWPs
3073 will receive it - unless they're using CLONE_THREAD to
3074 share signals. Since we only want to report it once, we
3075 mark it as ignored for all LWPs except this one. */
3076 iterate_over_lwps (ptid_t (lp
->ptid
.pid ()), set_ignore_sigint
);
3077 lp
->ignore_sigint
= 0;
3080 maybe_clear_ignore_sigint (lp
);
3083 /* When using hardware single-step, we need to report every signal.
3084 Otherwise, signals in pass_mask may be short-circuited
3085 except signals that might be caused by a breakpoint, or SIGSTOP
3086 if we sent the SIGSTOP and are waiting for it to arrive. */
3088 && WSTOPSIG (status
) && sigismember (&pass_mask
, WSTOPSIG (status
))
3089 && (WSTOPSIG (status
) != SIGSTOP
3090 || !find_thread_ptid (linux_target
, lp
->ptid
)->stop_requested
)
3091 && !linux_wstatus_maybe_breakpoint (status
))
3093 linux_resume_one_lwp (lp
, lp
->step
, signo
);
3094 linux_nat_debug_printf
3095 ("%s %s, %s (preempt 'handle')",
3096 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3097 target_pid_to_str (lp
->ptid
).c_str (),
3098 (signo
!= GDB_SIGNAL_0
3099 ? strsignal (gdb_signal_to_host (signo
)) : "0"));
3104 /* An interesting event. */
3106 lp
->status
= status
;
3107 save_stop_reason (lp
);
3110 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3111 their exits until all other threads in the group have exited. */
3114 check_zombie_leaders (void)
3116 for (inferior
*inf
: all_inferiors ())
3118 struct lwp_info
*leader_lp
;
3123 leader_lp
= find_lwp_pid (ptid_t (inf
->pid
));
3124 if (leader_lp
!= NULL
3125 /* Check if there are other threads in the group, as we may
3126 have raced with the inferior simply exiting. */
3127 && num_lwps (inf
->pid
) > 1
3128 && linux_proc_pid_is_zombie (inf
->pid
))
3130 linux_nat_debug_printf ("Thread group leader %d zombie "
3131 "(it exited, or another thread execd).",
3134 /* A leader zombie can mean one of two things:
3136 - It exited, and there's an exit status pending
3137 available, or only the leader exited (not the whole
3138 program). In the latter case, we can't waitpid the
3139 leader's exit status until all other threads are gone.
3141 - There are 3 or more threads in the group, and a thread
3142 other than the leader exec'd. See comments on exec
3143 events at the top of the file. We could try
3144 distinguishing the exit and exec cases, by waiting once
3145 more, and seeing if something comes out, but it doesn't
3146 sound useful. The previous leader _does_ go away, and
3147 we'll re-add the new one once we see the exec event
3148 (which is just the same as what would happen if the
3149 previous leader did exit voluntarily before some other
3152 linux_nat_debug_printf ("Thread group leader %d vanished.", inf
->pid
);
3153 exit_lwp (leader_lp
);
3158 /* Convenience function that is called when the kernel reports an exit
3159 event. This decides whether to report the event to GDB as a
3160 process exit event, a thread exit event, or to suppress the
3164 filter_exit_event (struct lwp_info
*event_child
,
3165 struct target_waitstatus
*ourstatus
)
3167 ptid_t ptid
= event_child
->ptid
;
3169 if (num_lwps (ptid
.pid ()) > 1)
3171 if (report_thread_events
)
3172 ourstatus
->set_thread_exited (0);
3174 ourstatus
->set_ignore ();
3176 exit_lwp (event_child
);
3183 linux_nat_wait_1 (ptid_t ptid
, struct target_waitstatus
*ourstatus
,
3184 target_wait_flags target_options
)
3187 enum resume_kind last_resume_kind
;
3188 struct lwp_info
*lp
;
3191 linux_nat_debug_printf ("enter");
3193 /* The first time we get here after starting a new inferior, we may
3194 not have added it to the LWP list yet - this is the earliest
3195 moment at which we know its PID. */
3196 if (ptid
.is_pid () && find_lwp_pid (ptid
) == nullptr)
3198 ptid_t
lwp_ptid (ptid
.pid (), ptid
.pid ());
3200 /* Upgrade the main thread's ptid. */
3201 thread_change_ptid (linux_target
, ptid
, lwp_ptid
);
3202 lp
= add_initial_lwp (lwp_ptid
);
3206 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3207 block_child_signals (&prev_mask
);
3209 /* First check if there is a LWP with a wait status pending. */
3210 lp
= iterate_over_lwps (ptid
, status_callback
);
3213 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3214 status_to_str (lp
->status
).c_str (),
3215 target_pid_to_str (lp
->ptid
).c_str ());
3218 /* But if we don't find a pending event, we'll have to wait. Always
3219 pull all events out of the kernel. We'll randomly select an
3220 event LWP out of all that have events, to prevent starvation. */
3226 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3229 - If the thread group leader exits while other threads in the
3230 thread group still exist, waitpid(TGID, ...) hangs. That
3231 waitpid won't return an exit status until the other threads
3232 in the group are reaped.
3234 - When a non-leader thread execs, that thread just vanishes
3235 without reporting an exit (so we'd hang if we waited for it
3236 explicitly in that case). The exec event is reported to
3240 lwpid
= my_waitpid (-1, &status
, __WALL
| WNOHANG
);
3242 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3244 errno
? safe_strerror (errno
) : "ERRNO-OK");
3248 linux_nat_debug_printf ("waitpid %ld received %s",
3250 status_to_str (status
).c_str ());
3252 linux_nat_filter_event (lwpid
, status
);
3253 /* Retry until nothing comes out of waitpid. A single
3254 SIGCHLD can indicate more than one child stopped. */
3258 /* Now that we've pulled all events out of the kernel, resume
3259 LWPs that don't have an interesting event to report. */
3260 iterate_over_lwps (minus_one_ptid
,
3261 [] (struct lwp_info
*info
)
3263 return resume_stopped_resumed_lwps (info
, minus_one_ptid
);
3266 /* ... and find an LWP with a status to report to the core, if
3268 lp
= iterate_over_lwps (ptid
, status_callback
);
3272 /* Check for zombie thread group leaders. Those can't be reaped
3273 until all other threads in the thread group are. */
3274 check_zombie_leaders ();
3276 /* If there are no resumed children left, bail. We'd be stuck
3277 forever in the sigsuspend call below otherwise. */
3278 if (iterate_over_lwps (ptid
, resumed_callback
) == NULL
)
3280 linux_nat_debug_printf ("exit (no resumed LWP)");
3282 ourstatus
->set_no_resumed ();
3284 restore_child_signals_mask (&prev_mask
);
3285 return minus_one_ptid
;
3288 /* No interesting event to report to the core. */
3290 if (target_options
& TARGET_WNOHANG
)
3292 linux_nat_debug_printf ("exit (ignore)");
3294 ourstatus
->set_ignore ();
3295 restore_child_signals_mask (&prev_mask
);
3296 return minus_one_ptid
;
3299 /* We shouldn't end up here unless we want to try again. */
3300 gdb_assert (lp
== NULL
);
3302 /* Block until we get an event reported with SIGCHLD. */
3308 status
= lp
->status
;
3311 if (!target_is_non_stop_p ())
3313 /* Now stop all other LWP's ... */
3314 iterate_over_lwps (minus_one_ptid
, stop_callback
);
3316 /* ... and wait until all of them have reported back that
3317 they're no longer running. */
3318 iterate_over_lwps (minus_one_ptid
, stop_wait_callback
);
3321 /* If we're not waiting for a specific LWP, choose an event LWP from
3322 among those that have had events. Giving equal priority to all
3323 LWPs that have had events helps prevent starvation. */
3324 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
3325 select_event_lwp (ptid
, &lp
, &status
);
3327 gdb_assert (lp
!= NULL
);
3329 /* Now that we've selected our final event LWP, un-adjust its PC if
3330 it was a software breakpoint, and we can't reliably support the
3331 "stopped by software breakpoint" stop reason. */
3332 if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3333 && !USE_SIGTRAP_SIGINFO
)
3335 struct regcache
*regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
3336 struct gdbarch
*gdbarch
= regcache
->arch ();
3337 int decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
3343 pc
= regcache_read_pc (regcache
);
3344 regcache_write_pc (regcache
, pc
+ decr_pc
);
3348 /* We'll need this to determine whether to report a SIGSTOP as
3349 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3351 last_resume_kind
= lp
->last_resume_kind
;
3353 if (!target_is_non_stop_p ())
3355 /* In all-stop, from the core's perspective, all LWPs are now
3356 stopped until a new resume action is sent over. */
3357 iterate_over_lwps (minus_one_ptid
, resume_clear_callback
);
3361 resume_clear_callback (lp
);
3364 if (linux_target
->low_status_is_event (status
))
3366 linux_nat_debug_printf ("trap ptid is %s.",
3367 target_pid_to_str (lp
->ptid
).c_str ());
3370 if (lp
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
)
3372 *ourstatus
= lp
->waitstatus
;
3373 lp
->waitstatus
.set_ignore ();
3376 *ourstatus
= host_status_to_waitstatus (status
);
3378 linux_nat_debug_printf ("exit");
3380 restore_child_signals_mask (&prev_mask
);
3382 if (last_resume_kind
== resume_stop
3383 && ourstatus
->kind () == TARGET_WAITKIND_STOPPED
3384 && WSTOPSIG (status
) == SIGSTOP
)
3386 /* A thread that has been requested to stop by GDB with
3387 target_stop, and it stopped cleanly, so report as SIG0. The
3388 use of SIGSTOP is an implementation detail. */
3389 ourstatus
->set_stopped (GDB_SIGNAL_0
);
3392 if (ourstatus
->kind () == TARGET_WAITKIND_EXITED
3393 || ourstatus
->kind () == TARGET_WAITKIND_SIGNALLED
)
3396 lp
->core
= linux_common_core_of_thread (lp
->ptid
);
3398 if (ourstatus
->kind () == TARGET_WAITKIND_EXITED
)
3399 return filter_exit_event (lp
, ourstatus
);
3404 /* Resume LWPs that are currently stopped without any pending status
3405 to report, but are resumed from the core's perspective. */
3408 resume_stopped_resumed_lwps (struct lwp_info
*lp
, const ptid_t wait_ptid
)
3412 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3413 target_pid_to_str (lp
->ptid
).c_str ());
3415 else if (!lp
->resumed
)
3417 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3418 target_pid_to_str (lp
->ptid
).c_str ());
3420 else if (lwp_status_pending_p (lp
))
3422 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3423 target_pid_to_str (lp
->ptid
).c_str ());
3427 struct regcache
*regcache
= get_thread_regcache (linux_target
, lp
->ptid
);
3428 struct gdbarch
*gdbarch
= regcache
->arch ();
3432 CORE_ADDR pc
= regcache_read_pc (regcache
);
3433 int leave_stopped
= 0;
3435 /* Don't bother if there's a breakpoint at PC that we'd hit
3436 immediately, and we're not waiting for this LWP. */
3437 if (!lp
->ptid
.matches (wait_ptid
))
3439 if (breakpoint_inserted_here_p (regcache
->aspace (), pc
))
3445 linux_nat_debug_printf
3446 ("resuming stopped-resumed LWP %s at %s: step=%d",
3447 target_pid_to_str (lp
->ptid
).c_str (), paddress (gdbarch
, pc
),
3450 linux_resume_one_lwp_throw (lp
, lp
->step
, GDB_SIGNAL_0
);
3453 catch (const gdb_exception_error
&ex
)
3455 if (!check_ptrace_stopped_lwp_gone (lp
))
3464 linux_nat_target::wait (ptid_t ptid
, struct target_waitstatus
*ourstatus
,
3465 target_wait_flags target_options
)
3469 linux_nat_debug_printf ("[%s], [%s]", target_pid_to_str (ptid
).c_str (),
3470 target_options_to_string (target_options
).c_str ());
3472 /* Flush the async file first. */
3473 if (target_is_async_p ())
3474 async_file_flush ();
3476 /* Resume LWPs that are currently stopped without any pending status
3477 to report, but are resumed from the core's perspective. LWPs get
3478 in this state if we find them stopping at a time we're not
3479 interested in reporting the event (target_wait on a
3480 specific_process, for example, see linux_nat_wait_1), and
3481 meanwhile the event became uninteresting. Don't bother resuming
3482 LWPs we're not going to wait for if they'd stop immediately. */
3483 if (target_is_non_stop_p ())
3484 iterate_over_lwps (minus_one_ptid
,
3485 [=] (struct lwp_info
*info
)
3487 return resume_stopped_resumed_lwps (info
, ptid
);
3490 event_ptid
= linux_nat_wait_1 (ptid
, ourstatus
, target_options
);
3492 /* If we requested any event, and something came out, assume there
3493 may be more. If we requested a specific lwp or process, also
3494 assume there may be more. */
3495 if (target_is_async_p ()
3496 && ((ourstatus
->kind () != TARGET_WAITKIND_IGNORE
3497 && ourstatus
->kind () != TARGET_WAITKIND_NO_RESUMED
)
3498 || ptid
!= minus_one_ptid
))
3507 kill_one_lwp (pid_t pid
)
3509 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3512 kill_lwp (pid
, SIGKILL
);
3514 if (debug_linux_nat
)
3516 int save_errno
= errno
;
3518 linux_nat_debug_printf
3519 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid
,
3520 save_errno
!= 0 ? safe_strerror (save_errno
) : "OK");
3523 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3526 ptrace (PTRACE_KILL
, pid
, 0, 0);
3527 if (debug_linux_nat
)
3529 int save_errno
= errno
;
3531 linux_nat_debug_printf
3532 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid
,
3533 save_errno
? safe_strerror (save_errno
) : "OK");
3537 /* Wait for an LWP to die. */
3540 kill_wait_one_lwp (pid_t pid
)
3544 /* We must make sure that there are no pending events (delayed
3545 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3546 program doesn't interfere with any following debugging session. */
3550 res
= my_waitpid (pid
, NULL
, __WALL
);
3551 if (res
!= (pid_t
) -1)
3553 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid
);
3555 /* The Linux kernel sometimes fails to kill a thread
3556 completely after PTRACE_KILL; that goes from the stop
3557 point in do_fork out to the one in get_signal_to_deliver
3558 and waits again. So kill it again. */
3564 gdb_assert (res
== -1 && errno
== ECHILD
);
3567 /* Callback for iterate_over_lwps. */
3570 kill_callback (struct lwp_info
*lp
)
3572 kill_one_lwp (lp
->ptid
.lwp ());
3576 /* Callback for iterate_over_lwps. */
3579 kill_wait_callback (struct lwp_info
*lp
)
3581 kill_wait_one_lwp (lp
->ptid
.lwp ());
3585 /* Kill the fork children of any threads of inferior INF that are
3586 stopped at a fork event. */
3589 kill_unfollowed_fork_children (struct inferior
*inf
)
3591 for (thread_info
*thread
: inf
->non_exited_threads ())
3593 struct target_waitstatus
*ws
= &thread
->pending_follow
;
3595 if (ws
->kind () == TARGET_WAITKIND_FORKED
3596 || ws
->kind () == TARGET_WAITKIND_VFORKED
)
3598 ptid_t child_ptid
= ws
->child_ptid ();
3599 int child_pid
= child_ptid
.pid ();
3600 int child_lwp
= child_ptid
.lwp ();
3602 kill_one_lwp (child_lwp
);
3603 kill_wait_one_lwp (child_lwp
);
3605 /* Let the arch-specific native code know this process is
3607 linux_target
->low_forget_process (child_pid
);
3613 linux_nat_target::kill ()
3615 /* If we're stopped while forking and we haven't followed yet,
3616 kill the other task. We need to do this first because the
3617 parent will be sleeping if this is a vfork. */
3618 kill_unfollowed_fork_children (current_inferior ());
3620 if (forks_exist_p ())
3621 linux_fork_killall ();
3624 ptid_t ptid
= ptid_t (inferior_ptid
.pid ());
3626 /* Stop all threads before killing them, since ptrace requires
3627 that the thread is stopped to successfully PTRACE_KILL. */
3628 iterate_over_lwps (ptid
, stop_callback
);
3629 /* ... and wait until all of them have reported back that
3630 they're no longer running. */
3631 iterate_over_lwps (ptid
, stop_wait_callback
);
3633 /* Kill all LWP's ... */
3634 iterate_over_lwps (ptid
, kill_callback
);
3636 /* ... and wait until we've flushed all events. */
3637 iterate_over_lwps (ptid
, kill_wait_callback
);
3640 target_mourn_inferior (inferior_ptid
);
3644 linux_nat_target::mourn_inferior ()
3646 int pid
= inferior_ptid
.pid ();
3648 purge_lwp_list (pid
);
3650 close_proc_mem_file (pid
);
3652 if (! forks_exist_p ())
3653 /* Normal case, no other forks available. */
3654 inf_ptrace_target::mourn_inferior ();
3656 /* Multi-fork case. The current inferior_ptid has exited, but
3657 there are other viable forks to debug. Delete the exiting
3658 one and context-switch to the first available. */
3659 linux_fork_mourn_inferior ();
3661 /* Let the arch-specific native code know this process is gone. */
3662 linux_target
->low_forget_process (pid
);
3665 /* Convert a native/host siginfo object, into/from the siginfo in the
3666 layout of the inferiors' architecture. */
3669 siginfo_fixup (siginfo_t
*siginfo
, gdb_byte
*inf_siginfo
, int direction
)
3671 /* If the low target didn't do anything, then just do a straight
3673 if (!linux_target
->low_siginfo_fixup (siginfo
, inf_siginfo
, direction
))
3676 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
3678 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
3682 static enum target_xfer_status
3683 linux_xfer_siginfo (enum target_object object
,
3684 const char *annex
, gdb_byte
*readbuf
,
3685 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
3686 ULONGEST
*xfered_len
)
3690 gdb_byte inf_siginfo
[sizeof (siginfo_t
)];
3692 gdb_assert (object
== TARGET_OBJECT_SIGNAL_INFO
);
3693 gdb_assert (readbuf
|| writebuf
);
3695 pid
= inferior_ptid
.lwp ();
3697 pid
= inferior_ptid
.pid ();
3699 if (offset
> sizeof (siginfo
))
3700 return TARGET_XFER_E_IO
;
3703 ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
3705 return TARGET_XFER_E_IO
;
3707 /* When GDB is built as a 64-bit application, ptrace writes into
3708 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3709 inferior with a 64-bit GDB should look the same as debugging it
3710 with a 32-bit GDB, we need to convert it. GDB core always sees
3711 the converted layout, so any read/write will have to be done
3713 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
3715 if (offset
+ len
> sizeof (siginfo
))
3716 len
= sizeof (siginfo
) - offset
;
3718 if (readbuf
!= NULL
)
3719 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
3722 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
3724 /* Convert back to ptrace layout before flushing it out. */
3725 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
3728 ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
3730 return TARGET_XFER_E_IO
;
3734 return TARGET_XFER_OK
;
3737 static enum target_xfer_status
3738 linux_nat_xfer_osdata (enum target_object object
,
3739 const char *annex
, gdb_byte
*readbuf
,
3740 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
3741 ULONGEST
*xfered_len
);
3743 static enum target_xfer_status
3744 linux_proc_xfer_memory_partial (gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
3745 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
);
3747 enum target_xfer_status
3748 linux_nat_target::xfer_partial (enum target_object object
,
3749 const char *annex
, gdb_byte
*readbuf
,
3750 const gdb_byte
*writebuf
,
3751 ULONGEST offset
, ULONGEST len
, ULONGEST
*xfered_len
)
3753 if (object
== TARGET_OBJECT_SIGNAL_INFO
)
3754 return linux_xfer_siginfo (object
, annex
, readbuf
, writebuf
,
3755 offset
, len
, xfered_len
);
3757 /* The target is connected but no live inferior is selected. Pass
3758 this request down to a lower stratum (e.g., the executable
3760 if (object
== TARGET_OBJECT_MEMORY
&& inferior_ptid
== null_ptid
)
3761 return TARGET_XFER_EOF
;
3763 if (object
== TARGET_OBJECT_AUXV
)
3764 return memory_xfer_auxv (this, object
, annex
, readbuf
, writebuf
,
3765 offset
, len
, xfered_len
);
3767 if (object
== TARGET_OBJECT_OSDATA
)
3768 return linux_nat_xfer_osdata (object
, annex
, readbuf
, writebuf
,
3769 offset
, len
, xfered_len
);
3771 if (object
== TARGET_OBJECT_MEMORY
)
3773 /* GDB calculates all addresses in the largest possible address
3774 width. The address width must be masked before its final use
3775 by linux_proc_xfer_partial.
3777 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3778 int addr_bit
= gdbarch_addr_bit (target_gdbarch ());
3780 if (addr_bit
< (sizeof (ULONGEST
) * HOST_CHAR_BIT
))
3781 offset
&= ((ULONGEST
) 1 << addr_bit
) - 1;
3783 return linux_proc_xfer_memory_partial (readbuf
, writebuf
,
3784 offset
, len
, xfered_len
);
3787 return inf_ptrace_target::xfer_partial (object
, annex
, readbuf
, writebuf
,
3788 offset
, len
, xfered_len
);
3792 linux_nat_target::thread_alive (ptid_t ptid
)
3794 /* As long as a PTID is in lwp list, consider it alive. */
3795 return find_lwp_pid (ptid
) != NULL
;
3798 /* Implement the to_update_thread_list target method for this
3802 linux_nat_target::update_thread_list ()
3804 /* We add/delete threads from the list as clone/exit events are
3805 processed, so just try deleting exited threads still in the
3807 delete_exited_threads ();
3809 /* Update the processor core that each lwp/thread was last seen
3811 for (lwp_info
*lwp
: all_lwps ())
3813 /* Avoid accessing /proc if the thread hasn't run since we last
3814 time we fetched the thread's core. Accessing /proc becomes
3815 noticeably expensive when we have thousands of LWPs. */
3816 if (lwp
->core
== -1)
3817 lwp
->core
= linux_common_core_of_thread (lwp
->ptid
);
3822 linux_nat_target::pid_to_str (ptid_t ptid
)
3825 && (ptid
.pid () != ptid
.lwp ()
3826 || num_lwps (ptid
.pid ()) > 1))
3827 return string_printf ("LWP %ld", ptid
.lwp ());
3829 return normal_pid_to_str (ptid
);
3833 linux_nat_target::thread_name (struct thread_info
*thr
)
3835 return linux_proc_tid_get_name (thr
->ptid
);
3838 /* Accepts an integer PID; Returns a string representing a file that
3839 can be opened to get the symbols for the child process. */
3842 linux_nat_target::pid_to_exec_file (int pid
)
3844 return linux_proc_pid_to_exec_file (pid
);
3847 /* Object representing an /proc/PID/mem open file. We keep one such
3848 file open per inferior.
3850 It might be tempting to think about only ever opening one file at
3851 most for all inferiors, closing/reopening the file as we access
3852 memory of different inferiors, to minimize number of file
3853 descriptors open, which can otherwise run into resource limits.
3854 However, that does not work correctly -- if the inferior execs and
3855 we haven't processed the exec event yet, and, we opened a
3856 /proc/PID/mem file, we will get a mem file accessing the post-exec
3857 address space, thinking we're opening it for the pre-exec address
3858 space. That is dangerous as we can poke memory (e.g. clearing
3859 breakpoints) in the post-exec memory by mistake, corrupting the
3860 inferior. For that reason, we open the mem file as early as
3861 possible, right after spawning, forking or attaching to the
3862 inferior, when the inferior is stopped and thus before it has a
3865 Note that after opening the file, even if the thread we opened it
3866 for subsequently exits, the open file is still usable for accessing
3867 memory. It's only when the whole process exits or execs that the
3868 file becomes invalid, at which point reads/writes return EOF. */
3873 proc_mem_file (ptid_t ptid
, int fd
)
3874 : m_ptid (ptid
), m_fd (fd
)
3876 gdb_assert (m_fd
!= -1);
3881 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
3882 m_fd
, m_ptid
.pid (), m_ptid
.lwp ());
3886 DISABLE_COPY_AND_ASSIGN (proc_mem_file
);
3894 /* The LWP this file was opened for. Just for debugging
3898 /* The file descriptor. */
3902 /* The map between an inferior process id, and the open /proc/PID/mem
3903 file. This is stored in a map instead of in a per-inferior
3904 structure because we need to be able to access memory of processes
3905 which don't have a corresponding struct inferior object. E.g.,
3906 with "detach-on-fork on" (the default), and "follow-fork parent"
3907 (also default), we don't create an inferior for the fork child, but
3908 we still need to remove breakpoints from the fork child's
3910 static std::unordered_map
<int, proc_mem_file
> proc_mem_file_map
;
3912 /* Close the /proc/PID/mem file for PID. */
3915 close_proc_mem_file (pid_t pid
)
3917 proc_mem_file_map
.erase (pid
);
3920 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
3921 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3922 exists and is stopped right now. We prefer the
3923 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3924 races, just in case this is ever called on an already-waited
3928 open_proc_mem_file (ptid_t ptid
)
3930 auto iter
= proc_mem_file_map
.find (ptid
.pid ());
3931 gdb_assert (iter
== proc_mem_file_map
.end ());
3934 xsnprintf (filename
, sizeof filename
,
3935 "/proc/%d/task/%ld/mem", ptid
.pid (), ptid
.lwp ());
3937 int fd
= gdb_open_cloexec (filename
, O_RDWR
| O_LARGEFILE
, 0).release ();
3941 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3942 ptid
.pid (), ptid
.lwp (),
3943 safe_strerror (errno
), errno
);
3947 proc_mem_file_map
.emplace (std::piecewise_construct
,
3948 std::forward_as_tuple (ptid
.pid ()),
3949 std::forward_as_tuple (ptid
, fd
));
3951 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld\n",
3952 fd
, ptid
.pid (), ptid
.lwp ());
3955 /* Implement the to_xfer_partial target method using /proc/PID/mem.
3956 Because we can use a single read/write call, this can be much more
3957 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
3958 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
3961 static enum target_xfer_status
3962 linux_proc_xfer_memory_partial (gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
3963 ULONGEST offset
, LONGEST len
,
3964 ULONGEST
*xfered_len
)
3968 auto iter
= proc_mem_file_map
.find (inferior_ptid
.pid ());
3969 if (iter
== proc_mem_file_map
.end ())
3970 return TARGET_XFER_EOF
;
3972 int fd
= iter
->second
.fd ();
3974 gdb_assert (fd
!= -1);
3976 /* Use pread64/pwrite64 if available, since they save a syscall and can
3977 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
3978 debugging a SPARC64 application). */
3980 ret
= (readbuf
? pread64 (fd
, readbuf
, len
, offset
)
3981 : pwrite64 (fd
, writebuf
, len
, offset
));
3983 ret
= lseek (fd
, offset
, SEEK_SET
);
3985 ret
= (readbuf
? read (fd
, readbuf
, len
)
3986 : write (fd
, writebuf
, len
));
3991 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)\n",
3992 fd
, inferior_ptid
.pid (),
3993 safe_strerror (errno
), errno
);
3994 return TARGET_XFER_EOF
;
3998 /* EOF means the address space is gone, the whole process exited
4000 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF\n",
4001 fd
, inferior_ptid
.pid ());
4002 return TARGET_XFER_EOF
;
4007 return TARGET_XFER_OK
;
4011 /* Parse LINE as a signal set and add its set bits to SIGS. */
4014 add_line_to_sigset (const char *line
, sigset_t
*sigs
)
4016 int len
= strlen (line
) - 1;
4020 if (line
[len
] != '\n')
4021 error (_("Could not parse signal set: %s"), line
);
4029 if (*p
>= '0' && *p
<= '9')
4031 else if (*p
>= 'a' && *p
<= 'f')
4032 digit
= *p
- 'a' + 10;
4034 error (_("Could not parse signal set: %s"), line
);
4039 sigaddset (sigs
, signum
+ 1);
4041 sigaddset (sigs
, signum
+ 2);
4043 sigaddset (sigs
, signum
+ 3);
4045 sigaddset (sigs
, signum
+ 4);
4051 /* Find process PID's pending signals from /proc/pid/status and set
4055 linux_proc_pending_signals (int pid
, sigset_t
*pending
,
4056 sigset_t
*blocked
, sigset_t
*ignored
)
4058 char buffer
[PATH_MAX
], fname
[PATH_MAX
];
4060 sigemptyset (pending
);
4061 sigemptyset (blocked
);
4062 sigemptyset (ignored
);
4063 xsnprintf (fname
, sizeof fname
, "/proc/%d/status", pid
);
4064 gdb_file_up procfile
= gdb_fopen_cloexec (fname
, "r");
4065 if (procfile
== NULL
)
4066 error (_("Could not open %s"), fname
);
4068 while (fgets (buffer
, PATH_MAX
, procfile
.get ()) != NULL
)
4070 /* Normal queued signals are on the SigPnd line in the status
4071 file. However, 2.6 kernels also have a "shared" pending
4072 queue for delivering signals to a thread group, so check for
4075 Unfortunately some Red Hat kernels include the shared pending
4076 queue but not the ShdPnd status field. */
4078 if (startswith (buffer
, "SigPnd:\t"))
4079 add_line_to_sigset (buffer
+ 8, pending
);
4080 else if (startswith (buffer
, "ShdPnd:\t"))
4081 add_line_to_sigset (buffer
+ 8, pending
);
4082 else if (startswith (buffer
, "SigBlk:\t"))
4083 add_line_to_sigset (buffer
+ 8, blocked
);
4084 else if (startswith (buffer
, "SigIgn:\t"))
4085 add_line_to_sigset (buffer
+ 8, ignored
);
4089 static enum target_xfer_status
4090 linux_nat_xfer_osdata (enum target_object object
,
4091 const char *annex
, gdb_byte
*readbuf
,
4092 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
4093 ULONGEST
*xfered_len
)
4095 gdb_assert (object
== TARGET_OBJECT_OSDATA
);
4097 *xfered_len
= linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
4098 if (*xfered_len
== 0)
4099 return TARGET_XFER_EOF
;
4101 return TARGET_XFER_OK
;
4104 std::vector
<static_tracepoint_marker
>
4105 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid
)
4107 char s
[IPA_CMD_BUF_SIZE
];
4108 int pid
= inferior_ptid
.pid ();
4109 std::vector
<static_tracepoint_marker
> markers
;
4111 ptid_t ptid
= ptid_t (pid
, 0);
4112 static_tracepoint_marker marker
;
4117 memcpy (s
, "qTfSTM", sizeof ("qTfSTM"));
4118 s
[sizeof ("qTfSTM")] = 0;
4120 agent_run_command (pid
, s
, strlen (s
) + 1);
4123 SCOPE_EXIT
{ target_continue_no_signal (ptid
); };
4129 parse_static_tracepoint_marker_definition (p
, &p
, &marker
);
4131 if (strid
== NULL
|| marker
.str_id
== strid
)
4132 markers
.push_back (std::move (marker
));
4134 while (*p
++ == ','); /* comma-separated list */
4136 memcpy (s
, "qTsSTM", sizeof ("qTsSTM"));
4137 s
[sizeof ("qTsSTM")] = 0;
4138 agent_run_command (pid
, s
, strlen (s
) + 1);
4145 /* target_is_async_p implementation. */
4148 linux_nat_target::is_async_p ()
4150 return linux_is_async_p ();
4153 /* target_can_async_p implementation. */
4156 linux_nat_target::can_async_p ()
4158 /* This flag should be checked in the common target.c code. */
4159 gdb_assert (target_async_permitted
);
4161 /* Otherwise, this targets is always able to support async mode. */
4166 linux_nat_target::supports_non_stop ()
4171 /* to_always_non_stop_p implementation. */
4174 linux_nat_target::always_non_stop_p ()
4180 linux_nat_target::supports_multi_process ()
4186 linux_nat_target::supports_disable_randomization ()
4191 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4192 so we notice when any child changes state, and notify the
4193 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4194 above to wait for the arrival of a SIGCHLD. */
4197 sigchld_handler (int signo
)
4199 int old_errno
= errno
;
4201 if (debug_linux_nat
)
4202 gdb_stdlog
->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4204 if (signo
== SIGCHLD
4205 && linux_nat_event_pipe
[0] != -1)
4206 async_file_mark (); /* Let the event loop know that there are
4207 events to handle. */
4212 /* Callback registered with the target events file descriptor. */
4215 handle_target_event (int error
, gdb_client_data client_data
)
4217 inferior_event_handler (INF_REG_EVENT
);
4220 /* Create/destroy the target events pipe. Returns previous state. */
4223 linux_async_pipe (int enable
)
4225 int previous
= linux_is_async_p ();
4227 if (previous
!= enable
)
4231 /* Block child signals while we create/destroy the pipe, as
4232 their handler writes to it. */
4233 block_child_signals (&prev_mask
);
4237 if (gdb_pipe_cloexec (linux_nat_event_pipe
) == -1)
4238 internal_error (__FILE__
, __LINE__
,
4239 "creating event pipe failed.");
4241 fcntl (linux_nat_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4242 fcntl (linux_nat_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4246 close (linux_nat_event_pipe
[0]);
4247 close (linux_nat_event_pipe
[1]);
4248 linux_nat_event_pipe
[0] = -1;
4249 linux_nat_event_pipe
[1] = -1;
4252 restore_child_signals_mask (&prev_mask
);
4259 linux_nat_target::async_wait_fd ()
4261 return linux_nat_event_pipe
[0];
4264 /* target_async implementation. */
4267 linux_nat_target::async (int enable
)
4271 if (!linux_async_pipe (1))
4273 add_file_handler (linux_nat_event_pipe
[0],
4274 handle_target_event
, NULL
,
4276 /* There may be pending events to handle. Tell the event loop
4283 delete_file_handler (linux_nat_event_pipe
[0]);
4284 linux_async_pipe (0);
4289 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4293 linux_nat_stop_lwp (struct lwp_info
*lwp
)
4297 linux_nat_debug_printf ("running -> suspending %s",
4298 target_pid_to_str (lwp
->ptid
).c_str ());
4301 if (lwp
->last_resume_kind
== resume_stop
)
4303 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4308 stop_callback (lwp
);
4309 lwp
->last_resume_kind
= resume_stop
;
4313 /* Already known to be stopped; do nothing. */
4315 if (debug_linux_nat
)
4317 if (find_thread_ptid (linux_target
, lwp
->ptid
)->stop_requested
)
4318 linux_nat_debug_printf ("already stopped/stop_requested %s",
4319 target_pid_to_str (lwp
->ptid
).c_str ());
4321 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4322 target_pid_to_str (lwp
->ptid
).c_str ());
4329 linux_nat_target::stop (ptid_t ptid
)
4331 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT
;
4332 iterate_over_lwps (ptid
, linux_nat_stop_lwp
);
4336 linux_nat_target::close ()
4338 /* Unregister from the event loop. */
4342 inf_ptrace_target::close ();
4345 /* When requests are passed down from the linux-nat layer to the
4346 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4347 used. The address space pointer is stored in the inferior object,
4348 but the common code that is passed such ptid can't tell whether
4349 lwpid is a "main" process id or not (it assumes so). We reverse
4350 look up the "main" process id from the lwp here. */
4352 struct address_space
*
4353 linux_nat_target::thread_address_space (ptid_t ptid
)
4355 struct lwp_info
*lwp
;
4356 struct inferior
*inf
;
4359 if (ptid
.lwp () == 0)
4361 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4363 lwp
= find_lwp_pid (ptid
);
4364 pid
= lwp
->ptid
.pid ();
4368 /* A (pid,lwpid,0) ptid. */
4372 inf
= find_inferior_pid (this, pid
);
4373 gdb_assert (inf
!= NULL
);
4377 /* Return the cached value of the processor core for thread PTID. */
4380 linux_nat_target::core_of_thread (ptid_t ptid
)
4382 struct lwp_info
*info
= find_lwp_pid (ptid
);
4389 /* Implementation of to_filesystem_is_local. */
4392 linux_nat_target::filesystem_is_local ()
4394 struct inferior
*inf
= current_inferior ();
4396 if (inf
->fake_pid_p
|| inf
->pid
== 0)
4399 return linux_ns_same (inf
->pid
, LINUX_NS_MNT
);
4402 /* Convert the INF argument passed to a to_fileio_* method
4403 to a process ID suitable for passing to its corresponding
4404 linux_mntns_* function. If INF is non-NULL then the
4405 caller is requesting the filesystem seen by INF. If INF
4406 is NULL then the caller is requesting the filesystem seen
4407 by the GDB. We fall back to GDB's filesystem in the case
4408 that INF is non-NULL but its PID is unknown. */
4411 linux_nat_fileio_pid_of (struct inferior
*inf
)
4413 if (inf
== NULL
|| inf
->fake_pid_p
|| inf
->pid
== 0)
4419 /* Implementation of to_fileio_open. */
4422 linux_nat_target::fileio_open (struct inferior
*inf
, const char *filename
,
4423 int flags
, int mode
, int warn_if_slow
,
4430 if (fileio_to_host_openflags (flags
, &nat_flags
) == -1
4431 || fileio_to_host_mode (mode
, &nat_mode
) == -1)
4433 *target_errno
= FILEIO_EINVAL
;
4437 fd
= linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf
),
4438 filename
, nat_flags
, nat_mode
);
4440 *target_errno
= host_to_fileio_error (errno
);
4445 /* Implementation of to_fileio_readlink. */
4447 gdb::optional
<std::string
>
4448 linux_nat_target::fileio_readlink (struct inferior
*inf
, const char *filename
,
4454 len
= linux_mntns_readlink (linux_nat_fileio_pid_of (inf
),
4455 filename
, buf
, sizeof (buf
));
4458 *target_errno
= host_to_fileio_error (errno
);
4462 return std::string (buf
, len
);
4465 /* Implementation of to_fileio_unlink. */
4468 linux_nat_target::fileio_unlink (struct inferior
*inf
, const char *filename
,
4473 ret
= linux_mntns_unlink (linux_nat_fileio_pid_of (inf
),
4476 *target_errno
= host_to_fileio_error (errno
);
4481 /* Implementation of the to_thread_events method. */
4484 linux_nat_target::thread_events (int enable
)
4486 report_thread_events
= enable
;
4489 linux_nat_target::linux_nat_target ()
4491 /* We don't change the stratum; this target will sit at
4492 process_stratum and thread_db will set at thread_stratum. This
4493 is a little strange, since this is a multi-threaded-capable
4494 target, but we want to be on the stack below thread_db, and we
4495 also want to be used for single-threaded processes. */
4498 /* See linux-nat.h. */
4501 linux_nat_get_siginfo (ptid_t ptid
, siginfo_t
*siginfo
)
4510 ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, siginfo
);
4513 memset (siginfo
, 0, sizeof (*siginfo
));
4519 /* See nat/linux-nat.h. */
4522 current_lwp_ptid (void)
4524 gdb_assert (inferior_ptid
.lwp_p ());
4525 return inferior_ptid
;
4528 void _initialize_linux_nat ();
4530 _initialize_linux_nat ()
4532 add_setshow_boolean_cmd ("lin-lwp", class_maintenance
,
4533 &debug_linux_nat
, _("\
4534 Set debugging of GNU/Linux native target."), _(" \
4535 Show debugging of GNU/Linux native target."), _(" \
4536 When on, print debug messages relating to the GNU/Linux native target."),
4538 show_debug_linux_nat
,
4539 &setdebuglist
, &showdebuglist
);
4541 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance
,
4542 &debug_linux_namespaces
, _("\
4543 Set debugging of GNU/Linux namespaces module."), _("\
4544 Show debugging of GNU/Linux namespaces module."), _("\
4545 Enables printf debugging output."),
4548 &setdebuglist
, &showdebuglist
);
4550 /* Install a SIGCHLD handler. */
4551 sigchld_action
.sa_handler
= sigchld_handler
;
4552 sigemptyset (&sigchld_action
.sa_mask
);
4553 sigchld_action
.sa_flags
= SA_RESTART
;
4555 /* Make it the default. */
4556 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
4558 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4559 gdb_sigmask (SIG_SETMASK
, NULL
, &suspend_mask
);
4560 sigdelset (&suspend_mask
, SIGCHLD
);
4562 sigemptyset (&blocked_mask
);
4564 lwp_lwpid_htab_create ();
4568 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4569 the GNU/Linux Threads library and therefore doesn't really belong
4572 /* NPTL reserves the first two RT signals, but does not provide any
4573 way for the debugger to query the signal numbers - fortunately
4574 they don't change. */
4575 static int lin_thread_signals
[] = { __SIGRTMIN
, __SIGRTMIN
+ 1 };
4577 /* See linux-nat.h. */
4580 lin_thread_get_thread_signal_num (void)
4582 return sizeof (lin_thread_signals
) / sizeof (lin_thread_signals
[0]);
4585 /* See linux-nat.h. */
4588 lin_thread_get_thread_signal (unsigned int i
)
4590 gdb_assert (i
< lin_thread_get_thread_signal_num ());
4591 return lin_thread_signals
[i
];