Add `set print repeats' tests for C/C++ arrays
[binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdbsupport/gdb_wait.h"
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #include "nat/gdb_ptrace.h"
30 #include "linux-nat.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include <sys/stat.h> /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49 #include "inf-loop.h"
50 #include "gdbsupport/event-loop.h"
51 #include "event-top.h"
52 #include <pwd.h>
53 #include <sys/types.h>
54 #include <dirent.h>
55 #include "xml-support.h"
56 #include <sys/vfs.h>
57 #include "solib.h"
58 #include "nat/linux-osdata.h"
59 #include "linux-tdep.h"
60 #include "symfile.h"
61 #include "gdbsupport/agent.h"
62 #include "tracepoint.h"
63 #include "gdbsupport/buffer.h"
64 #include "target-descriptions.h"
65 #include "gdbsupport/filestuff.h"
66 #include "objfiles.h"
67 #include "nat/linux-namespaces.h"
68 #include "gdbsupport/fileio.h"
69 #include "gdbsupport/scope-exit.h"
70 #include "gdbsupport/gdb-sigmask.h"
71 #include "gdbsupport/common-debug.h"
72 #include <unordered_map>
73
74 /* This comment documents high-level logic of this file.
75
76 Waiting for events in sync mode
77 ===============================
78
79 When waiting for an event in a specific thread, we just use waitpid,
80 passing the specific pid, and not passing WNOHANG.
81
82 When waiting for an event in all threads, waitpid is not quite good:
83
84 - If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89 - When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93 The solution is to always use -1 and WNOHANG, together with
94 sigsuspend.
95
96 First, we use non-blocking waitpid to check for events. If nothing is
97 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98 it means something happened to a child process. As soon as we know
99 there's an event, we get back to calling nonblocking waitpid.
100
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend
102 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103 when it's blocked, the signal becomes pending and sigsuspend
104 immediately notices it and returns.
105
106 Waiting for events in async mode (TARGET_WNOHANG)
107 =================================================
108
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, the self-pipe trick is used
115 --- a pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler writes a
118 byte to this pipe. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
122
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
128
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
134 sigsuspend.
135
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
143
144 Use of signals
145 ==============
146
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
152
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
157
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
166 blocked.
167
168 Exec events
169 ===========
170
171 The case of a thread group (process) with 3 or more threads, and a
172 thread other than the leader execs is worth detailing:
173
174 On an exec, the Linux kernel destroys all threads except the execing
175 one in the thread group, and resets the execing thread's tid to the
176 tgid. No exit notification is sent for the execing thread -- from the
177 ptracer's perspective, it appears as though the execing thread just
178 vanishes. Until we reap all other threads except the leader and the
179 execing thread, the leader will be zombie, and the execing thread will
180 be in `D (disc sleep)' state. As soon as all other threads are
181 reaped, the execing thread changes its tid to the tgid, and the
182 previous (zombie) leader vanishes, giving place to the "new"
183 leader. */
184
185 #ifndef O_LARGEFILE
186 #define O_LARGEFILE 0
187 #endif
188
189 struct linux_nat_target *linux_target;
190
191 /* Does the current host support PTRACE_GETREGSET? */
192 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
193
194 /* When true, print debug messages relating to the linux native target. */
195
196 static bool debug_linux_nat;
197
198 /* Implement 'show debug linux-nat'. */
199
200 static void
201 show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203 {
204 fprintf_filtered (file, _("Debugging of GNU/Linux native targets is %s.\n"),
205 value);
206 }
207
208 /* Print a linux-nat debug statement. */
209
210 #define linux_nat_debug_printf(fmt, ...) \
211 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
212
213 /* Print "linux-nat" enter/exit debug statements. */
214
215 #define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
216 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
217
218 struct simple_pid_list
219 {
220 int pid;
221 int status;
222 struct simple_pid_list *next;
223 };
224 static struct simple_pid_list *stopped_pids;
225
226 /* Whether target_thread_events is in effect. */
227 static int report_thread_events;
228
229 /* Async mode support. */
230
231 /* The read/write ends of the pipe registered as waitable file in the
232 event loop. */
233 static int linux_nat_event_pipe[2] = { -1, -1 };
234
235 /* True if we're currently in async mode. */
236 #define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
237
238 /* Flush the event pipe. */
239
240 static void
241 async_file_flush (void)
242 {
243 int ret;
244 char buf;
245
246 do
247 {
248 ret = read (linux_nat_event_pipe[0], &buf, 1);
249 }
250 while (ret >= 0 || (ret == -1 && errno == EINTR));
251 }
252
253 /* Put something (anything, doesn't matter what, or how much) in event
254 pipe, so that the select/poll in the event-loop realizes we have
255 something to process. */
256
257 static void
258 async_file_mark (void)
259 {
260 int ret;
261
262 /* It doesn't really matter what the pipe contains, as long we end
263 up with something in it. Might as well flush the previous
264 left-overs. */
265 async_file_flush ();
266
267 do
268 {
269 ret = write (linux_nat_event_pipe[1], "+", 1);
270 }
271 while (ret == -1 && errno == EINTR);
272
273 /* Ignore EAGAIN. If the pipe is full, the event loop will already
274 be awakened anyway. */
275 }
276
277 static int kill_lwp (int lwpid, int signo);
278
279 static int stop_callback (struct lwp_info *lp);
280
281 static void block_child_signals (sigset_t *prev_mask);
282 static void restore_child_signals_mask (sigset_t *prev_mask);
283
284 struct lwp_info;
285 static struct lwp_info *add_lwp (ptid_t ptid);
286 static void purge_lwp_list (int pid);
287 static void delete_lwp (ptid_t ptid);
288 static struct lwp_info *find_lwp_pid (ptid_t ptid);
289
290 static int lwp_status_pending_p (struct lwp_info *lp);
291
292 static void save_stop_reason (struct lwp_info *lp);
293
294 static void close_proc_mem_file (pid_t pid);
295 static void open_proc_mem_file (ptid_t ptid);
296
297 \f
298 /* LWP accessors. */
299
300 /* See nat/linux-nat.h. */
301
302 ptid_t
303 ptid_of_lwp (struct lwp_info *lwp)
304 {
305 return lwp->ptid;
306 }
307
308 /* See nat/linux-nat.h. */
309
310 void
311 lwp_set_arch_private_info (struct lwp_info *lwp,
312 struct arch_lwp_info *info)
313 {
314 lwp->arch_private = info;
315 }
316
317 /* See nat/linux-nat.h. */
318
319 struct arch_lwp_info *
320 lwp_arch_private_info (struct lwp_info *lwp)
321 {
322 return lwp->arch_private;
323 }
324
325 /* See nat/linux-nat.h. */
326
327 int
328 lwp_is_stopped (struct lwp_info *lwp)
329 {
330 return lwp->stopped;
331 }
332
333 /* See nat/linux-nat.h. */
334
335 enum target_stop_reason
336 lwp_stop_reason (struct lwp_info *lwp)
337 {
338 return lwp->stop_reason;
339 }
340
341 /* See nat/linux-nat.h. */
342
343 int
344 lwp_is_stepping (struct lwp_info *lwp)
345 {
346 return lwp->step;
347 }
348
349 \f
350 /* Trivial list manipulation functions to keep track of a list of
351 new stopped processes. */
352 static void
353 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
354 {
355 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
356
357 new_pid->pid = pid;
358 new_pid->status = status;
359 new_pid->next = *listp;
360 *listp = new_pid;
361 }
362
363 static int
364 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
365 {
366 struct simple_pid_list **p;
367
368 for (p = listp; *p != NULL; p = &(*p)->next)
369 if ((*p)->pid == pid)
370 {
371 struct simple_pid_list *next = (*p)->next;
372
373 *statusp = (*p)->status;
374 xfree (*p);
375 *p = next;
376 return 1;
377 }
378 return 0;
379 }
380
381 /* Return the ptrace options that we want to try to enable. */
382
383 static int
384 linux_nat_ptrace_options (int attached)
385 {
386 int options = 0;
387
388 if (!attached)
389 options |= PTRACE_O_EXITKILL;
390
391 options |= (PTRACE_O_TRACESYSGOOD
392 | PTRACE_O_TRACEVFORKDONE
393 | PTRACE_O_TRACEVFORK
394 | PTRACE_O_TRACEFORK
395 | PTRACE_O_TRACEEXEC);
396
397 return options;
398 }
399
400 /* Initialize ptrace and procfs warnings and check for supported
401 ptrace features given PID.
402
403 ATTACHED should be nonzero iff we attached to the inferior. */
404
405 static void
406 linux_init_ptrace_procfs (pid_t pid, int attached)
407 {
408 int options = linux_nat_ptrace_options (attached);
409
410 linux_enable_event_reporting (pid, options);
411 linux_ptrace_init_warnings ();
412 linux_proc_init_warnings ();
413 }
414
415 linux_nat_target::~linux_nat_target ()
416 {}
417
418 void
419 linux_nat_target::post_attach (int pid)
420 {
421 linux_init_ptrace_procfs (pid, 1);
422 }
423
424 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
425
426 void
427 linux_nat_target::post_startup_inferior (ptid_t ptid)
428 {
429 linux_init_ptrace_procfs (ptid.pid (), 0);
430 }
431
432 /* Return the number of known LWPs in the tgid given by PID. */
433
434 static int
435 num_lwps (int pid)
436 {
437 int count = 0;
438
439 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
440 if (lp->ptid.pid () == pid)
441 count++;
442
443 return count;
444 }
445
446 /* Deleter for lwp_info unique_ptr specialisation. */
447
448 struct lwp_deleter
449 {
450 void operator() (struct lwp_info *lwp) const
451 {
452 delete_lwp (lwp->ptid);
453 }
454 };
455
456 /* A unique_ptr specialisation for lwp_info. */
457
458 typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
459
460 /* Target hook for follow_fork. */
461
462 void
463 linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
464 target_waitkind fork_kind, bool follow_child,
465 bool detach_fork)
466 {
467 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
468 follow_child, detach_fork);
469
470 if (!follow_child)
471 {
472 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
473 ptid_t parent_ptid = inferior_ptid;
474 int parent_pid = parent_ptid.lwp ();
475 int child_pid = child_ptid.lwp ();
476
477 /* We're already attached to the parent, by default. */
478 lwp_info *child_lp = add_lwp (child_ptid);
479 child_lp->stopped = 1;
480 child_lp->last_resume_kind = resume_stop;
481
482 /* Detach new forked process? */
483 if (detach_fork)
484 {
485 int child_stop_signal = 0;
486 bool detach_child = true;
487
488 /* Move CHILD_LP into a unique_ptr and clear the source pointer
489 to prevent us doing anything stupid with it. */
490 lwp_info_up child_lp_ptr (child_lp);
491 child_lp = nullptr;
492
493 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
494
495 /* When debugging an inferior in an architecture that supports
496 hardware single stepping on a kernel without commit
497 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
498 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
499 set if the parent process had them set.
500 To work around this, single step the child process
501 once before detaching to clear the flags. */
502
503 /* Note that we consult the parent's architecture instead of
504 the child's because there's no inferior for the child at
505 this point. */
506 if (!gdbarch_software_single_step_p (target_thread_architecture
507 (parent_ptid)))
508 {
509 int status;
510
511 linux_disable_event_reporting (child_pid);
512 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
513 perror_with_name (_("Couldn't do single step"));
514 if (my_waitpid (child_pid, &status, 0) < 0)
515 perror_with_name (_("Couldn't wait vfork process"));
516 else
517 {
518 detach_child = WIFSTOPPED (status);
519 child_stop_signal = WSTOPSIG (status);
520 }
521 }
522
523 if (detach_child)
524 {
525 int signo = child_stop_signal;
526
527 if (signo != 0
528 && !signal_pass_state (gdb_signal_from_host (signo)))
529 signo = 0;
530 ptrace (PTRACE_DETACH, child_pid, 0, signo);
531
532 close_proc_mem_file (child_pid);
533 }
534 }
535
536 if (has_vforked)
537 {
538 struct lwp_info *parent_lp;
539
540 parent_lp = find_lwp_pid (parent_ptid);
541 gdb_assert (linux_supports_tracefork () >= 0);
542
543 if (linux_supports_tracevforkdone ())
544 {
545 linux_nat_debug_printf ("waiting for VFORK_DONE on %d",
546 parent_pid);
547 parent_lp->stopped = 1;
548
549 /* We'll handle the VFORK_DONE event like any other
550 event, in target_wait. */
551 }
552 else
553 {
554 /* We can't insert breakpoints until the child has
555 finished with the shared memory region. We need to
556 wait until that happens. Ideal would be to just
557 call:
558 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
559 - waitpid (parent_pid, &status, __WALL);
560 However, most architectures can't handle a syscall
561 being traced on the way out if it wasn't traced on
562 the way in.
563
564 We might also think to loop, continuing the child
565 until it exits or gets a SIGTRAP. One problem is
566 that the child might call ptrace with PTRACE_TRACEME.
567
568 There's no simple and reliable way to figure out when
569 the vforked child will be done with its copy of the
570 shared memory. We could step it out of the syscall,
571 two instructions, let it go, and then single-step the
572 parent once. When we have hardware single-step, this
573 would work; with software single-step it could still
574 be made to work but we'd have to be able to insert
575 single-step breakpoints in the child, and we'd have
576 to insert -just- the single-step breakpoint in the
577 parent. Very awkward.
578
579 In the end, the best we can do is to make sure it
580 runs for a little while. Hopefully it will be out of
581 range of any breakpoints we reinsert. Usually this
582 is only the single-step breakpoint at vfork's return
583 point. */
584
585 linux_nat_debug_printf ("no VFORK_DONE support, sleeping a bit");
586
587 usleep (10000);
588
589 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
590 and leave it pending. The next linux_nat_resume call
591 will notice a pending event, and bypasses actually
592 resuming the inferior. */
593 parent_lp->status = 0;
594 parent_lp->waitstatus.set_vfork_done ();
595 parent_lp->stopped = 1;
596
597 /* If we're in async mode, need to tell the event loop
598 there's something here to process. */
599 if (target_is_async_p ())
600 async_file_mark ();
601 }
602 }
603 }
604 else
605 {
606 struct lwp_info *child_lp;
607
608 child_lp = add_lwp (child_ptid);
609 child_lp->stopped = 1;
610 child_lp->last_resume_kind = resume_stop;
611 }
612 }
613
614 \f
615 int
616 linux_nat_target::insert_fork_catchpoint (int pid)
617 {
618 return !linux_supports_tracefork ();
619 }
620
621 int
622 linux_nat_target::remove_fork_catchpoint (int pid)
623 {
624 return 0;
625 }
626
627 int
628 linux_nat_target::insert_vfork_catchpoint (int pid)
629 {
630 return !linux_supports_tracefork ();
631 }
632
633 int
634 linux_nat_target::remove_vfork_catchpoint (int pid)
635 {
636 return 0;
637 }
638
639 int
640 linux_nat_target::insert_exec_catchpoint (int pid)
641 {
642 return !linux_supports_tracefork ();
643 }
644
645 int
646 linux_nat_target::remove_exec_catchpoint (int pid)
647 {
648 return 0;
649 }
650
651 int
652 linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
653 gdb::array_view<const int> syscall_counts)
654 {
655 if (!linux_supports_tracesysgood ())
656 return 1;
657
658 /* On GNU/Linux, we ignore the arguments. It means that we only
659 enable the syscall catchpoints, but do not disable them.
660
661 Also, we do not use the `syscall_counts' information because we do not
662 filter system calls here. We let GDB do the logic for us. */
663 return 0;
664 }
665
666 /* List of known LWPs, keyed by LWP PID. This speeds up the common
667 case of mapping a PID returned from the kernel to our corresponding
668 lwp_info data structure. */
669 static htab_t lwp_lwpid_htab;
670
671 /* Calculate a hash from a lwp_info's LWP PID. */
672
673 static hashval_t
674 lwp_info_hash (const void *ap)
675 {
676 const struct lwp_info *lp = (struct lwp_info *) ap;
677 pid_t pid = lp->ptid.lwp ();
678
679 return iterative_hash_object (pid, 0);
680 }
681
682 /* Equality function for the lwp_info hash table. Compares the LWP's
683 PID. */
684
685 static int
686 lwp_lwpid_htab_eq (const void *a, const void *b)
687 {
688 const struct lwp_info *entry = (const struct lwp_info *) a;
689 const struct lwp_info *element = (const struct lwp_info *) b;
690
691 return entry->ptid.lwp () == element->ptid.lwp ();
692 }
693
694 /* Create the lwp_lwpid_htab hash table. */
695
696 static void
697 lwp_lwpid_htab_create (void)
698 {
699 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
700 }
701
702 /* Add LP to the hash table. */
703
704 static void
705 lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
706 {
707 void **slot;
708
709 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
710 gdb_assert (slot != NULL && *slot == NULL);
711 *slot = lp;
712 }
713
714 /* Head of doubly-linked list of known LWPs. Sorted by reverse
715 creation order. This order is assumed in some cases. E.g.,
716 reaping status after killing alls lwps of a process: the leader LWP
717 must be reaped last. */
718
719 static intrusive_list<lwp_info> lwp_list;
720
721 /* See linux-nat.h. */
722
723 lwp_info_range
724 all_lwps ()
725 {
726 return lwp_info_range (lwp_list.begin ());
727 }
728
729 /* See linux-nat.h. */
730
731 lwp_info_safe_range
732 all_lwps_safe ()
733 {
734 return lwp_info_safe_range (lwp_list.begin ());
735 }
736
737 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
738
739 static void
740 lwp_list_add (struct lwp_info *lp)
741 {
742 lwp_list.push_front (*lp);
743 }
744
745 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
746 list. */
747
748 static void
749 lwp_list_remove (struct lwp_info *lp)
750 {
751 /* Remove from sorted-by-creation-order list. */
752 lwp_list.erase (lwp_list.iterator_to (*lp));
753 }
754
755 \f
756
757 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
758 _initialize_linux_nat. */
759 static sigset_t suspend_mask;
760
761 /* Signals to block to make that sigsuspend work. */
762 static sigset_t blocked_mask;
763
764 /* SIGCHLD action. */
765 static struct sigaction sigchld_action;
766
767 /* Block child signals (SIGCHLD and linux threads signals), and store
768 the previous mask in PREV_MASK. */
769
770 static void
771 block_child_signals (sigset_t *prev_mask)
772 {
773 /* Make sure SIGCHLD is blocked. */
774 if (!sigismember (&blocked_mask, SIGCHLD))
775 sigaddset (&blocked_mask, SIGCHLD);
776
777 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
778 }
779
780 /* Restore child signals mask, previously returned by
781 block_child_signals. */
782
783 static void
784 restore_child_signals_mask (sigset_t *prev_mask)
785 {
786 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
787 }
788
789 /* Mask of signals to pass directly to the inferior. */
790 static sigset_t pass_mask;
791
792 /* Update signals to pass to the inferior. */
793 void
794 linux_nat_target::pass_signals
795 (gdb::array_view<const unsigned char> pass_signals)
796 {
797 int signo;
798
799 sigemptyset (&pass_mask);
800
801 for (signo = 1; signo < NSIG; signo++)
802 {
803 int target_signo = gdb_signal_from_host (signo);
804 if (target_signo < pass_signals.size () && pass_signals[target_signo])
805 sigaddset (&pass_mask, signo);
806 }
807 }
808
809 \f
810
811 /* Prototypes for local functions. */
812 static int stop_wait_callback (struct lwp_info *lp);
813 static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
814 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
815
816 \f
817
818 /* Destroy and free LP. */
819
820 lwp_info::~lwp_info ()
821 {
822 /* Let the arch specific bits release arch_lwp_info. */
823 linux_target->low_delete_thread (this->arch_private);
824 }
825
826 /* Traversal function for purge_lwp_list. */
827
828 static int
829 lwp_lwpid_htab_remove_pid (void **slot, void *info)
830 {
831 struct lwp_info *lp = (struct lwp_info *) *slot;
832 int pid = *(int *) info;
833
834 if (lp->ptid.pid () == pid)
835 {
836 htab_clear_slot (lwp_lwpid_htab, slot);
837 lwp_list_remove (lp);
838 delete lp;
839 }
840
841 return 1;
842 }
843
844 /* Remove all LWPs belong to PID from the lwp list. */
845
846 static void
847 purge_lwp_list (int pid)
848 {
849 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
850 }
851
852 /* Add the LWP specified by PTID to the list. PTID is the first LWP
853 in the process. Return a pointer to the structure describing the
854 new LWP.
855
856 This differs from add_lwp in that we don't let the arch specific
857 bits know about this new thread. Current clients of this callback
858 take the opportunity to install watchpoints in the new thread, and
859 we shouldn't do that for the first thread. If we're spawning a
860 child ("run"), the thread executes the shell wrapper first, and we
861 shouldn't touch it until it execs the program we want to debug.
862 For "attach", it'd be okay to call the callback, but it's not
863 necessary, because watchpoints can't yet have been inserted into
864 the inferior. */
865
866 static struct lwp_info *
867 add_initial_lwp (ptid_t ptid)
868 {
869 gdb_assert (ptid.lwp_p ());
870
871 lwp_info *lp = new lwp_info (ptid);
872
873
874 /* Add to sorted-by-reverse-creation-order list. */
875 lwp_list_add (lp);
876
877 /* Add to keyed-by-pid htab. */
878 lwp_lwpid_htab_add_lwp (lp);
879
880 return lp;
881 }
882
883 /* Add the LWP specified by PID to the list. Return a pointer to the
884 structure describing the new LWP. The LWP should already be
885 stopped. */
886
887 static struct lwp_info *
888 add_lwp (ptid_t ptid)
889 {
890 struct lwp_info *lp;
891
892 lp = add_initial_lwp (ptid);
893
894 /* Let the arch specific bits know about this new thread. Current
895 clients of this callback take the opportunity to install
896 watchpoints in the new thread. We don't do this for the first
897 thread though. See add_initial_lwp. */
898 linux_target->low_new_thread (lp);
899
900 return lp;
901 }
902
903 /* Remove the LWP specified by PID from the list. */
904
905 static void
906 delete_lwp (ptid_t ptid)
907 {
908 lwp_info dummy (ptid);
909
910 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
911 if (slot == NULL)
912 return;
913
914 lwp_info *lp = *(struct lwp_info **) slot;
915 gdb_assert (lp != NULL);
916
917 htab_clear_slot (lwp_lwpid_htab, slot);
918
919 /* Remove from sorted-by-creation-order list. */
920 lwp_list_remove (lp);
921
922 /* Release. */
923 delete lp;
924 }
925
926 /* Return a pointer to the structure describing the LWP corresponding
927 to PID. If no corresponding LWP could be found, return NULL. */
928
929 static struct lwp_info *
930 find_lwp_pid (ptid_t ptid)
931 {
932 int lwp;
933
934 if (ptid.lwp_p ())
935 lwp = ptid.lwp ();
936 else
937 lwp = ptid.pid ();
938
939 lwp_info dummy (ptid_t (0, lwp));
940 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
941 }
942
943 /* See nat/linux-nat.h. */
944
945 struct lwp_info *
946 iterate_over_lwps (ptid_t filter,
947 gdb::function_view<iterate_over_lwps_ftype> callback)
948 {
949 for (lwp_info *lp : all_lwps_safe ())
950 {
951 if (lp->ptid.matches (filter))
952 {
953 if (callback (lp) != 0)
954 return lp;
955 }
956 }
957
958 return NULL;
959 }
960
961 /* Update our internal state when changing from one checkpoint to
962 another indicated by NEW_PTID. We can only switch single-threaded
963 applications, so we only create one new LWP, and the previous list
964 is discarded. */
965
966 void
967 linux_nat_switch_fork (ptid_t new_ptid)
968 {
969 struct lwp_info *lp;
970
971 purge_lwp_list (inferior_ptid.pid ());
972
973 lp = add_lwp (new_ptid);
974 lp->stopped = 1;
975
976 /* This changes the thread's ptid while preserving the gdb thread
977 num. Also changes the inferior pid, while preserving the
978 inferior num. */
979 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
980
981 /* We've just told GDB core that the thread changed target id, but,
982 in fact, it really is a different thread, with different register
983 contents. */
984 registers_changed ();
985 }
986
987 /* Handle the exit of a single thread LP. */
988
989 static void
990 exit_lwp (struct lwp_info *lp)
991 {
992 struct thread_info *th = find_thread_ptid (linux_target, lp->ptid);
993
994 if (th)
995 {
996 if (print_thread_events)
997 printf_unfiltered (_("[%s exited]\n"),
998 target_pid_to_str (lp->ptid).c_str ());
999
1000 delete_thread (th);
1001 }
1002
1003 delete_lwp (lp->ptid);
1004 }
1005
1006 /* Wait for the LWP specified by LP, which we have just attached to.
1007 Returns a wait status for that LWP, to cache. */
1008
1009 static int
1010 linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
1011 {
1012 pid_t new_pid, pid = ptid.lwp ();
1013 int status;
1014
1015 if (linux_proc_pid_is_stopped (pid))
1016 {
1017 linux_nat_debug_printf ("Attaching to a stopped process");
1018
1019 /* The process is definitely stopped. It is in a job control
1020 stop, unless the kernel predates the TASK_STOPPED /
1021 TASK_TRACED distinction, in which case it might be in a
1022 ptrace stop. Make sure it is in a ptrace stop; from there we
1023 can kill it, signal it, et cetera.
1024
1025 First make sure there is a pending SIGSTOP. Since we are
1026 already attached, the process can not transition from stopped
1027 to running without a PTRACE_CONT; so we know this signal will
1028 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1029 probably already in the queue (unless this kernel is old
1030 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1031 is not an RT signal, it can only be queued once. */
1032 kill_lwp (pid, SIGSTOP);
1033
1034 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1035 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1036 ptrace (PTRACE_CONT, pid, 0, 0);
1037 }
1038
1039 /* Make sure the initial process is stopped. The user-level threads
1040 layer might want to poke around in the inferior, and that won't
1041 work if things haven't stabilized yet. */
1042 new_pid = my_waitpid (pid, &status, __WALL);
1043 gdb_assert (pid == new_pid);
1044
1045 if (!WIFSTOPPED (status))
1046 {
1047 /* The pid we tried to attach has apparently just exited. */
1048 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
1049 status_to_str (status).c_str ());
1050 return status;
1051 }
1052
1053 if (WSTOPSIG (status) != SIGSTOP)
1054 {
1055 *signalled = 1;
1056 linux_nat_debug_printf ("Received %s after attaching",
1057 status_to_str (status).c_str ());
1058 }
1059
1060 return status;
1061 }
1062
1063 void
1064 linux_nat_target::create_inferior (const char *exec_file,
1065 const std::string &allargs,
1066 char **env, int from_tty)
1067 {
1068 maybe_disable_address_space_randomization restore_personality
1069 (disable_randomization);
1070
1071 /* The fork_child mechanism is synchronous and calls target_wait, so
1072 we have to mask the async mode. */
1073
1074 /* Make sure we report all signals during startup. */
1075 pass_signals ({});
1076
1077 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
1078
1079 open_proc_mem_file (inferior_ptid);
1080 }
1081
1082 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1083 already attached. Returns true if a new LWP is found, false
1084 otherwise. */
1085
1086 static int
1087 attach_proc_task_lwp_callback (ptid_t ptid)
1088 {
1089 struct lwp_info *lp;
1090
1091 /* Ignore LWPs we're already attached to. */
1092 lp = find_lwp_pid (ptid);
1093 if (lp == NULL)
1094 {
1095 int lwpid = ptid.lwp ();
1096
1097 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1098 {
1099 int err = errno;
1100
1101 /* Be quiet if we simply raced with the thread exiting.
1102 EPERM is returned if the thread's task still exists, and
1103 is marked as exited or zombie, as well as other
1104 conditions, so in that case, confirm the status in
1105 /proc/PID/status. */
1106 if (err == ESRCH
1107 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1108 {
1109 linux_nat_debug_printf
1110 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1111 lwpid, err, safe_strerror (err));
1112
1113 }
1114 else
1115 {
1116 std::string reason
1117 = linux_ptrace_attach_fail_reason_string (ptid, err);
1118
1119 warning (_("Cannot attach to lwp %d: %s"),
1120 lwpid, reason.c_str ());
1121 }
1122 }
1123 else
1124 {
1125 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1126 ptid.to_string ().c_str ());
1127
1128 lp = add_lwp (ptid);
1129
1130 /* The next time we wait for this LWP we'll see a SIGSTOP as
1131 PTRACE_ATTACH brings it to a halt. */
1132 lp->signalled = 1;
1133
1134 /* We need to wait for a stop before being able to make the
1135 next ptrace call on this LWP. */
1136 lp->must_set_ptrace_flags = 1;
1137
1138 /* So that wait collects the SIGSTOP. */
1139 lp->resumed = 1;
1140
1141 /* Also add the LWP to gdb's thread list, in case a
1142 matching libthread_db is not found (or the process uses
1143 raw clone). */
1144 add_thread (linux_target, lp->ptid);
1145 set_running (linux_target, lp->ptid, true);
1146 set_executing (linux_target, lp->ptid, true);
1147 }
1148
1149 return 1;
1150 }
1151 return 0;
1152 }
1153
1154 void
1155 linux_nat_target::attach (const char *args, int from_tty)
1156 {
1157 struct lwp_info *lp;
1158 int status;
1159 ptid_t ptid;
1160
1161 /* Make sure we report all signals during attach. */
1162 pass_signals ({});
1163
1164 try
1165 {
1166 inf_ptrace_target::attach (args, from_tty);
1167 }
1168 catch (const gdb_exception_error &ex)
1169 {
1170 pid_t pid = parse_pid_to_attach (args);
1171 std::string reason = linux_ptrace_attach_fail_reason (pid);
1172
1173 if (!reason.empty ())
1174 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1175 ex.what ());
1176 else
1177 throw_error (ex.error, "%s", ex.what ());
1178 }
1179
1180 /* The ptrace base target adds the main thread with (pid,0,0)
1181 format. Decorate it with lwp info. */
1182 ptid = ptid_t (inferior_ptid.pid (),
1183 inferior_ptid.pid ());
1184 thread_change_ptid (linux_target, inferior_ptid, ptid);
1185
1186 /* Add the initial process as the first LWP to the list. */
1187 lp = add_initial_lwp (ptid);
1188
1189 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
1190 if (!WIFSTOPPED (status))
1191 {
1192 if (WIFEXITED (status))
1193 {
1194 int exit_code = WEXITSTATUS (status);
1195
1196 target_terminal::ours ();
1197 target_mourn_inferior (inferior_ptid);
1198 if (exit_code == 0)
1199 error (_("Unable to attach: program exited normally."));
1200 else
1201 error (_("Unable to attach: program exited with code %d."),
1202 exit_code);
1203 }
1204 else if (WIFSIGNALED (status))
1205 {
1206 enum gdb_signal signo;
1207
1208 target_terminal::ours ();
1209 target_mourn_inferior (inferior_ptid);
1210
1211 signo = gdb_signal_from_host (WTERMSIG (status));
1212 error (_("Unable to attach: program terminated with signal "
1213 "%s, %s."),
1214 gdb_signal_to_name (signo),
1215 gdb_signal_to_string (signo));
1216 }
1217
1218 internal_error (__FILE__, __LINE__,
1219 _("unexpected status %d for PID %ld"),
1220 status, (long) ptid.lwp ());
1221 }
1222
1223 lp->stopped = 1;
1224
1225 open_proc_mem_file (lp->ptid);
1226
1227 /* Save the wait status to report later. */
1228 lp->resumed = 1;
1229 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1230 (long) lp->ptid.pid (),
1231 status_to_str (status).c_str ());
1232
1233 lp->status = status;
1234
1235 /* We must attach to every LWP. If /proc is mounted, use that to
1236 find them now. The inferior may be using raw clone instead of
1237 using pthreads. But even if it is using pthreads, thread_db
1238 walks structures in the inferior's address space to find the list
1239 of threads/LWPs, and those structures may well be corrupted.
1240 Note that once thread_db is loaded, we'll still use it to list
1241 threads and associate pthread info with each LWP. */
1242 linux_proc_attach_tgid_threads (lp->ptid.pid (),
1243 attach_proc_task_lwp_callback);
1244
1245 if (target_can_async_p ())
1246 target_async (1);
1247 }
1248
1249 /* Ptrace-detach the thread with pid PID. */
1250
1251 static void
1252 detach_one_pid (int pid, int signo)
1253 {
1254 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1255 {
1256 int save_errno = errno;
1257
1258 /* We know the thread exists, so ESRCH must mean the lwp is
1259 zombie. This can happen if one of the already-detached
1260 threads exits the whole thread group. In that case we're
1261 still attached, and must reap the lwp. */
1262 if (save_errno == ESRCH)
1263 {
1264 int ret, status;
1265
1266 ret = my_waitpid (pid, &status, __WALL);
1267 if (ret == -1)
1268 {
1269 warning (_("Couldn't reap LWP %d while detaching: %s"),
1270 pid, safe_strerror (errno));
1271 }
1272 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1273 {
1274 warning (_("Reaping LWP %d while detaching "
1275 "returned unexpected status 0x%x"),
1276 pid, status);
1277 }
1278 }
1279 else
1280 error (_("Can't detach %d: %s"),
1281 pid, safe_strerror (save_errno));
1282 }
1283 else
1284 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1285 pid, strsignal (signo));
1286 }
1287
1288 /* Get pending signal of THREAD as a host signal number, for detaching
1289 purposes. This is the signal the thread last stopped for, which we
1290 need to deliver to the thread when detaching, otherwise, it'd be
1291 suppressed/lost. */
1292
1293 static int
1294 get_detach_signal (struct lwp_info *lp)
1295 {
1296 enum gdb_signal signo = GDB_SIGNAL_0;
1297
1298 /* If we paused threads momentarily, we may have stored pending
1299 events in lp->status or lp->waitstatus (see stop_wait_callback),
1300 and GDB core hasn't seen any signal for those threads.
1301 Otherwise, the last signal reported to the core is found in the
1302 thread object's stop_signal.
1303
1304 There's a corner case that isn't handled here at present. Only
1305 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1306 stop_signal make sense as a real signal to pass to the inferior.
1307 Some catchpoint related events, like
1308 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1309 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1310 those traps are debug API (ptrace in our case) related and
1311 induced; the inferior wouldn't see them if it wasn't being
1312 traced. Hence, we should never pass them to the inferior, even
1313 when set to pass state. Since this corner case isn't handled by
1314 infrun.c when proceeding with a signal, for consistency, neither
1315 do we handle it here (or elsewhere in the file we check for
1316 signal pass state). Normally SIGTRAP isn't set to pass state, so
1317 this is really a corner case. */
1318
1319 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
1320 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1321 else if (lp->status)
1322 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1323 else
1324 {
1325 struct thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
1326
1327 if (target_is_non_stop_p () && !tp->executing ())
1328 {
1329 if (tp->has_pending_waitstatus ())
1330 {
1331 /* If the thread has a pending event, and it was stopped with a
1332 signal, use that signal to resume it. If it has a pending
1333 event of another kind, it was not stopped with a signal, so
1334 resume it without a signal. */
1335 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1336 signo = tp->pending_waitstatus ().sig ();
1337 else
1338 signo = GDB_SIGNAL_0;
1339 }
1340 else
1341 signo = tp->stop_signal ();
1342 }
1343 else if (!target_is_non_stop_p ())
1344 {
1345 ptid_t last_ptid;
1346 process_stratum_target *last_target;
1347
1348 get_last_target_status (&last_target, &last_ptid, nullptr);
1349
1350 if (last_target == linux_target
1351 && lp->ptid.lwp () == last_ptid.lwp ())
1352 signo = tp->stop_signal ();
1353 }
1354 }
1355
1356 if (signo == GDB_SIGNAL_0)
1357 {
1358 linux_nat_debug_printf ("lwp %s has no pending signal",
1359 lp->ptid.to_string ().c_str ());
1360 }
1361 else if (!signal_pass_state (signo))
1362 {
1363 linux_nat_debug_printf
1364 ("lwp %s had signal %s but it is in no pass state",
1365 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
1366 }
1367 else
1368 {
1369 linux_nat_debug_printf ("lwp %s has pending signal %s",
1370 lp->ptid.to_string ().c_str (),
1371 gdb_signal_to_string (signo));
1372
1373 return gdb_signal_to_host (signo);
1374 }
1375
1376 return 0;
1377 }
1378
1379 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1380 signal number that should be passed to the LWP when detaching.
1381 Otherwise pass any pending signal the LWP may have, if any. */
1382
1383 static void
1384 detach_one_lwp (struct lwp_info *lp, int *signo_p)
1385 {
1386 int lwpid = lp->ptid.lwp ();
1387 int signo;
1388
1389 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1390
1391 /* If the lwp/thread we are about to detach has a pending fork event,
1392 there is a process GDB is attached to that the core of GDB doesn't know
1393 about. Detach from it. */
1394
1395 /* Check in lwp_info::status. */
1396 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1397 {
1398 int event = linux_ptrace_get_extended_event (lp->status);
1399
1400 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1401 {
1402 unsigned long child_pid;
1403 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1404 if (ret == 0)
1405 detach_one_pid (child_pid, 0);
1406 else
1407 perror_warning_with_name (_("Failed to detach fork child"));
1408 }
1409 }
1410
1411 /* Check in lwp_info::waitstatus. */
1412 if (lp->waitstatus.kind () == TARGET_WAITKIND_VFORKED
1413 || lp->waitstatus.kind () == TARGET_WAITKIND_FORKED)
1414 detach_one_pid (lp->waitstatus.child_ptid ().pid (), 0);
1415
1416
1417 /* Check in thread_info::pending_waitstatus. */
1418 thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
1419 if (tp->has_pending_waitstatus ())
1420 {
1421 const target_waitstatus &ws = tp->pending_waitstatus ();
1422
1423 if (ws.kind () == TARGET_WAITKIND_VFORKED
1424 || ws.kind () == TARGET_WAITKIND_FORKED)
1425 detach_one_pid (ws.child_ptid ().pid (), 0);
1426 }
1427
1428 /* Check in thread_info::pending_follow. */
1429 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
1430 || tp->pending_follow.kind () == TARGET_WAITKIND_FORKED)
1431 detach_one_pid (tp->pending_follow.child_ptid ().pid (), 0);
1432
1433 if (lp->status != 0)
1434 linux_nat_debug_printf ("Pending %s for %s on detach.",
1435 strsignal (WSTOPSIG (lp->status)),
1436 lp->ptid.to_string ().c_str ());
1437
1438 /* If there is a pending SIGSTOP, get rid of it. */
1439 if (lp->signalled)
1440 {
1441 linux_nat_debug_printf ("Sending SIGCONT to %s",
1442 lp->ptid.to_string ().c_str ());
1443
1444 kill_lwp (lwpid, SIGCONT);
1445 lp->signalled = 0;
1446 }
1447
1448 if (signo_p == NULL)
1449 {
1450 /* Pass on any pending signal for this LWP. */
1451 signo = get_detach_signal (lp);
1452 }
1453 else
1454 signo = *signo_p;
1455
1456 /* Preparing to resume may try to write registers, and fail if the
1457 lwp is zombie. If that happens, ignore the error. We'll handle
1458 it below, when detach fails with ESRCH. */
1459 try
1460 {
1461 linux_target->low_prepare_to_resume (lp);
1462 }
1463 catch (const gdb_exception_error &ex)
1464 {
1465 if (!check_ptrace_stopped_lwp_gone (lp))
1466 throw;
1467 }
1468
1469 detach_one_pid (lwpid, signo);
1470
1471 delete_lwp (lp->ptid);
1472 }
1473
1474 static int
1475 detach_callback (struct lwp_info *lp)
1476 {
1477 /* We don't actually detach from the thread group leader just yet.
1478 If the thread group exits, we must reap the zombie clone lwps
1479 before we're able to reap the leader. */
1480 if (lp->ptid.lwp () != lp->ptid.pid ())
1481 detach_one_lwp (lp, NULL);
1482 return 0;
1483 }
1484
1485 void
1486 linux_nat_target::detach (inferior *inf, int from_tty)
1487 {
1488 struct lwp_info *main_lwp;
1489 int pid = inf->pid;
1490
1491 /* Don't unregister from the event loop, as there may be other
1492 inferiors running. */
1493
1494 /* Stop all threads before detaching. ptrace requires that the
1495 thread is stopped to successfully detach. */
1496 iterate_over_lwps (ptid_t (pid), stop_callback);
1497 /* ... and wait until all of them have reported back that
1498 they're no longer running. */
1499 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
1500
1501 /* We can now safely remove breakpoints. We don't this in earlier
1502 in common code because this target doesn't currently support
1503 writing memory while the inferior is running. */
1504 remove_breakpoints_inf (current_inferior ());
1505
1506 iterate_over_lwps (ptid_t (pid), detach_callback);
1507
1508 /* Only the initial process should be left right now. */
1509 gdb_assert (num_lwps (pid) == 1);
1510
1511 main_lwp = find_lwp_pid (ptid_t (pid));
1512
1513 if (forks_exist_p ())
1514 {
1515 /* Multi-fork case. The current inferior_ptid is being detached
1516 from, but there are other viable forks to debug. Detach from
1517 the current fork, and context-switch to the first
1518 available. */
1519 linux_fork_detach (from_tty);
1520 }
1521 else
1522 {
1523 target_announce_detach (from_tty);
1524
1525 /* Pass on any pending signal for the last LWP. */
1526 int signo = get_detach_signal (main_lwp);
1527
1528 detach_one_lwp (main_lwp, &signo);
1529
1530 detach_success (inf);
1531 }
1532
1533 close_proc_mem_file (pid);
1534 }
1535
1536 /* Resume execution of the inferior process. If STEP is nonzero,
1537 single-step it. If SIGNAL is nonzero, give it that signal. */
1538
1539 static void
1540 linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1541 enum gdb_signal signo)
1542 {
1543 lp->step = step;
1544
1545 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1546 We only presently need that if the LWP is stepped though (to
1547 handle the case of stepping a breakpoint instruction). */
1548 if (step)
1549 {
1550 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
1551
1552 lp->stop_pc = regcache_read_pc (regcache);
1553 }
1554 else
1555 lp->stop_pc = 0;
1556
1557 linux_target->low_prepare_to_resume (lp);
1558 linux_target->low_resume (lp->ptid, step, signo);
1559
1560 /* Successfully resumed. Clear state that no longer makes sense,
1561 and mark the LWP as running. Must not do this before resuming
1562 otherwise if that fails other code will be confused. E.g., we'd
1563 later try to stop the LWP and hang forever waiting for a stop
1564 status. Note that we must not throw after this is cleared,
1565 otherwise handle_zombie_lwp_error would get confused. */
1566 lp->stopped = 0;
1567 lp->core = -1;
1568 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1569 registers_changed_ptid (linux_target, lp->ptid);
1570 }
1571
1572 /* Called when we try to resume a stopped LWP and that errors out. If
1573 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1574 or about to become), discard the error, clear any pending status
1575 the LWP may have, and return true (we'll collect the exit status
1576 soon enough). Otherwise, return false. */
1577
1578 static int
1579 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1580 {
1581 /* If we get an error after resuming the LWP successfully, we'd
1582 confuse !T state for the LWP being gone. */
1583 gdb_assert (lp->stopped);
1584
1585 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1586 because even if ptrace failed with ESRCH, the tracee may be "not
1587 yet fully dead", but already refusing ptrace requests. In that
1588 case the tracee has 'R (Running)' state for a little bit
1589 (observed in Linux 3.18). See also the note on ESRCH in the
1590 ptrace(2) man page. Instead, check whether the LWP has any state
1591 other than ptrace-stopped. */
1592
1593 /* Don't assume anything if /proc/PID/status can't be read. */
1594 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
1595 {
1596 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1597 lp->status = 0;
1598 lp->waitstatus.set_ignore ();
1599 return 1;
1600 }
1601 return 0;
1602 }
1603
1604 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1605 disappears while we try to resume it. */
1606
1607 static void
1608 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1609 {
1610 try
1611 {
1612 linux_resume_one_lwp_throw (lp, step, signo);
1613 }
1614 catch (const gdb_exception_error &ex)
1615 {
1616 if (!check_ptrace_stopped_lwp_gone (lp))
1617 throw;
1618 }
1619 }
1620
1621 /* Resume LP. */
1622
1623 static void
1624 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1625 {
1626 if (lp->stopped)
1627 {
1628 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
1629
1630 if (inf->vfork_child != NULL)
1631 {
1632 linux_nat_debug_printf ("Not resuming %s (vfork parent)",
1633 lp->ptid.to_string ().c_str ());
1634 }
1635 else if (!lwp_status_pending_p (lp))
1636 {
1637 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1638 lp->ptid.to_string ().c_str (),
1639 (signo != GDB_SIGNAL_0
1640 ? strsignal (gdb_signal_to_host (signo))
1641 : "0"),
1642 step ? "step" : "resume");
1643
1644 linux_resume_one_lwp (lp, step, signo);
1645 }
1646 else
1647 {
1648 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1649 lp->ptid.to_string ().c_str ());
1650 }
1651 }
1652 else
1653 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1654 lp->ptid.to_string ().c_str ());
1655 }
1656
1657 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1658 Resume LWP with the last stop signal, if it is in pass state. */
1659
1660 static int
1661 linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
1662 {
1663 enum gdb_signal signo = GDB_SIGNAL_0;
1664
1665 if (lp == except)
1666 return 0;
1667
1668 if (lp->stopped)
1669 {
1670 struct thread_info *thread;
1671
1672 thread = find_thread_ptid (linux_target, lp->ptid);
1673 if (thread != NULL)
1674 {
1675 signo = thread->stop_signal ();
1676 thread->set_stop_signal (GDB_SIGNAL_0);
1677 }
1678 }
1679
1680 resume_lwp (lp, 0, signo);
1681 return 0;
1682 }
1683
1684 static int
1685 resume_clear_callback (struct lwp_info *lp)
1686 {
1687 lp->resumed = 0;
1688 lp->last_resume_kind = resume_stop;
1689 return 0;
1690 }
1691
1692 static int
1693 resume_set_callback (struct lwp_info *lp)
1694 {
1695 lp->resumed = 1;
1696 lp->last_resume_kind = resume_continue;
1697 return 0;
1698 }
1699
1700 void
1701 linux_nat_target::resume (ptid_t ptid, int step, enum gdb_signal signo)
1702 {
1703 struct lwp_info *lp;
1704 int resume_many;
1705
1706 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1707 step ? "step" : "resume",
1708 ptid.to_string ().c_str (),
1709 (signo != GDB_SIGNAL_0
1710 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1711 inferior_ptid.to_string ().c_str ());
1712
1713 /* A specific PTID means `step only this process id'. */
1714 resume_many = (minus_one_ptid == ptid
1715 || ptid.is_pid ());
1716
1717 /* Mark the lwps we're resuming as resumed and update their
1718 last_resume_kind to resume_continue. */
1719 iterate_over_lwps (ptid, resume_set_callback);
1720
1721 /* See if it's the current inferior that should be handled
1722 specially. */
1723 if (resume_many)
1724 lp = find_lwp_pid (inferior_ptid);
1725 else
1726 lp = find_lwp_pid (ptid);
1727 gdb_assert (lp != NULL);
1728
1729 /* Remember if we're stepping. */
1730 lp->last_resume_kind = step ? resume_step : resume_continue;
1731
1732 /* If we have a pending wait status for this thread, there is no
1733 point in resuming the process. But first make sure that
1734 linux_nat_wait won't preemptively handle the event - we
1735 should never take this short-circuit if we are going to
1736 leave LP running, since we have skipped resuming all the
1737 other threads. This bit of code needs to be synchronized
1738 with linux_nat_wait. */
1739
1740 if (lp->status && WIFSTOPPED (lp->status))
1741 {
1742 if (!lp->step
1743 && WSTOPSIG (lp->status)
1744 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1745 {
1746 linux_nat_debug_printf
1747 ("Not short circuiting for ignored status 0x%x", lp->status);
1748
1749 /* FIXME: What should we do if we are supposed to continue
1750 this thread with a signal? */
1751 gdb_assert (signo == GDB_SIGNAL_0);
1752 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1753 lp->status = 0;
1754 }
1755 }
1756
1757 if (lwp_status_pending_p (lp))
1758 {
1759 /* FIXME: What should we do if we are supposed to continue
1760 this thread with a signal? */
1761 gdb_assert (signo == GDB_SIGNAL_0);
1762
1763 linux_nat_debug_printf ("Short circuiting for status 0x%x",
1764 lp->status);
1765
1766 if (target_can_async_p ())
1767 {
1768 target_async (1);
1769 /* Tell the event loop we have something to process. */
1770 async_file_mark ();
1771 }
1772 return;
1773 }
1774
1775 if (resume_many)
1776 iterate_over_lwps (ptid, [=] (struct lwp_info *info)
1777 {
1778 return linux_nat_resume_callback (info, lp);
1779 });
1780
1781 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1782 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1783 lp->ptid.to_string ().c_str (),
1784 (signo != GDB_SIGNAL_0
1785 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1786
1787 linux_resume_one_lwp (lp, step, signo);
1788
1789 if (target_can_async_p ())
1790 target_async (1);
1791 }
1792
1793 /* Send a signal to an LWP. */
1794
1795 static int
1796 kill_lwp (int lwpid, int signo)
1797 {
1798 int ret;
1799
1800 errno = 0;
1801 ret = syscall (__NR_tkill, lwpid, signo);
1802 if (errno == ENOSYS)
1803 {
1804 /* If tkill fails, then we are not using nptl threads, a
1805 configuration we no longer support. */
1806 perror_with_name (("tkill"));
1807 }
1808 return ret;
1809 }
1810
1811 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1812 event, check if the core is interested in it: if not, ignore the
1813 event, and keep waiting; otherwise, we need to toggle the LWP's
1814 syscall entry/exit status, since the ptrace event itself doesn't
1815 indicate it, and report the trap to higher layers. */
1816
1817 static int
1818 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1819 {
1820 struct target_waitstatus *ourstatus = &lp->waitstatus;
1821 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1822 thread_info *thread = find_thread_ptid (linux_target, lp->ptid);
1823 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
1824
1825 if (stopping)
1826 {
1827 /* If we're stopping threads, there's a SIGSTOP pending, which
1828 makes it so that the LWP reports an immediate syscall return,
1829 followed by the SIGSTOP. Skip seeing that "return" using
1830 PTRACE_CONT directly, and let stop_wait_callback collect the
1831 SIGSTOP. Later when the thread is resumed, a new syscall
1832 entry event. If we didn't do this (and returned 0), we'd
1833 leave a syscall entry pending, and our caller, by using
1834 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1835 itself. Later, when the user re-resumes this LWP, we'd see
1836 another syscall entry event and we'd mistake it for a return.
1837
1838 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1839 (leaving immediately with LWP->signalled set, without issuing
1840 a PTRACE_CONT), it would still be problematic to leave this
1841 syscall enter pending, as later when the thread is resumed,
1842 it would then see the same syscall exit mentioned above,
1843 followed by the delayed SIGSTOP, while the syscall didn't
1844 actually get to execute. It seems it would be even more
1845 confusing to the user. */
1846
1847 linux_nat_debug_printf
1848 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1849 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
1850
1851 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1852 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
1853 lp->stopped = 0;
1854 return 1;
1855 }
1856
1857 /* Always update the entry/return state, even if this particular
1858 syscall isn't interesting to the core now. In async mode,
1859 the user could install a new catchpoint for this syscall
1860 between syscall enter/return, and we'll need to know to
1861 report a syscall return if that happens. */
1862 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1863 ? TARGET_WAITKIND_SYSCALL_RETURN
1864 : TARGET_WAITKIND_SYSCALL_ENTRY);
1865
1866 if (catch_syscall_enabled ())
1867 {
1868 if (catching_syscall_number (syscall_number))
1869 {
1870 /* Alright, an event to report. */
1871 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1872 ourstatus->set_syscall_entry (syscall_number);
1873 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1874 ourstatus->set_syscall_return (syscall_number);
1875 else
1876 gdb_assert_not_reached ("unexpected syscall state");
1877
1878 linux_nat_debug_printf
1879 ("stopping for %s of syscall %d for LWP %ld",
1880 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1881 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1882
1883 return 0;
1884 }
1885
1886 linux_nat_debug_printf
1887 ("ignoring %s of syscall %d for LWP %ld",
1888 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1889 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1890 }
1891 else
1892 {
1893 /* If we had been syscall tracing, and hence used PT_SYSCALL
1894 before on this LWP, it could happen that the user removes all
1895 syscall catchpoints before we get to process this event.
1896 There are two noteworthy issues here:
1897
1898 - When stopped at a syscall entry event, resuming with
1899 PT_STEP still resumes executing the syscall and reports a
1900 syscall return.
1901
1902 - Only PT_SYSCALL catches syscall enters. If we last
1903 single-stepped this thread, then this event can't be a
1904 syscall enter. If we last single-stepped this thread, this
1905 has to be a syscall exit.
1906
1907 The points above mean that the next resume, be it PT_STEP or
1908 PT_CONTINUE, can not trigger a syscall trace event. */
1909 linux_nat_debug_printf
1910 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1911 "ignoring", syscall_number, lp->ptid.lwp ());
1912 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1913 }
1914
1915 /* The core isn't interested in this event. For efficiency, avoid
1916 stopping all threads only to have the core resume them all again.
1917 Since we're not stopping threads, if we're still syscall tracing
1918 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1919 subsequent syscall. Simply resume using the inf-ptrace layer,
1920 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1921
1922 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1923 return 1;
1924 }
1925
1926 /* Handle a GNU/Linux extended wait response. If we see a clone
1927 event, we need to add the new LWP to our list (and not report the
1928 trap to higher layers). This function returns non-zero if the
1929 event should be ignored and we should wait again. If STOPPING is
1930 true, the new LWP remains stopped, otherwise it is continued. */
1931
1932 static int
1933 linux_handle_extended_wait (struct lwp_info *lp, int status)
1934 {
1935 int pid = lp->ptid.lwp ();
1936 struct target_waitstatus *ourstatus = &lp->waitstatus;
1937 int event = linux_ptrace_get_extended_event (status);
1938
1939 /* All extended events we currently use are mid-syscall. Only
1940 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1941 you have to be using PTRACE_SEIZE to get that. */
1942 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1943
1944 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1945 || event == PTRACE_EVENT_CLONE)
1946 {
1947 unsigned long new_pid;
1948 int ret;
1949
1950 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1951
1952 /* If we haven't already seen the new PID stop, wait for it now. */
1953 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1954 {
1955 /* The new child has a pending SIGSTOP. We can't affect it until it
1956 hits the SIGSTOP, but we're already attached. */
1957 ret = my_waitpid (new_pid, &status, __WALL);
1958 if (ret == -1)
1959 perror_with_name (_("waiting for new child"));
1960 else if (ret != new_pid)
1961 internal_error (__FILE__, __LINE__,
1962 _("wait returned unexpected PID %d"), ret);
1963 else if (!WIFSTOPPED (status))
1964 internal_error (__FILE__, __LINE__,
1965 _("wait returned unexpected status 0x%x"), status);
1966 }
1967
1968 ptid_t child_ptid (new_pid, new_pid);
1969
1970 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1971 {
1972 open_proc_mem_file (child_ptid);
1973
1974 /* The arch-specific native code may need to know about new
1975 forks even if those end up never mapped to an
1976 inferior. */
1977 linux_target->low_new_fork (lp, new_pid);
1978 }
1979 else if (event == PTRACE_EVENT_CLONE)
1980 {
1981 linux_target->low_new_clone (lp, new_pid);
1982 }
1983
1984 if (event == PTRACE_EVENT_FORK
1985 && linux_fork_checkpointing_p (lp->ptid.pid ()))
1986 {
1987 /* Handle checkpointing by linux-fork.c here as a special
1988 case. We don't want the follow-fork-mode or 'catch fork'
1989 to interfere with this. */
1990
1991 /* This won't actually modify the breakpoint list, but will
1992 physically remove the breakpoints from the child. */
1993 detach_breakpoints (ptid_t (new_pid, new_pid));
1994
1995 /* Retain child fork in ptrace (stopped) state. */
1996 if (!find_fork_pid (new_pid))
1997 add_fork (new_pid);
1998
1999 /* Report as spurious, so that infrun doesn't want to follow
2000 this fork. We're actually doing an infcall in
2001 linux-fork.c. */
2002 ourstatus->set_spurious ();
2003
2004 /* Report the stop to the core. */
2005 return 0;
2006 }
2007
2008 if (event == PTRACE_EVENT_FORK)
2009 ourstatus->set_forked (child_ptid);
2010 else if (event == PTRACE_EVENT_VFORK)
2011 ourstatus->set_vforked (child_ptid);
2012 else if (event == PTRACE_EVENT_CLONE)
2013 {
2014 struct lwp_info *new_lp;
2015
2016 ourstatus->set_ignore ();
2017
2018 linux_nat_debug_printf
2019 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
2020
2021 new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid));
2022 new_lp->stopped = 1;
2023 new_lp->resumed = 1;
2024
2025 /* If the thread_db layer is active, let it record the user
2026 level thread id and status, and add the thread to GDB's
2027 list. */
2028 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
2029 {
2030 /* The process is not using thread_db. Add the LWP to
2031 GDB's list. */
2032 target_post_attach (new_lp->ptid.lwp ());
2033 add_thread (linux_target, new_lp->ptid);
2034 }
2035
2036 /* Even if we're stopping the thread for some reason
2037 internal to this module, from the perspective of infrun
2038 and the user/frontend, this new thread is running until
2039 it next reports a stop. */
2040 set_running (linux_target, new_lp->ptid, true);
2041 set_executing (linux_target, new_lp->ptid, true);
2042
2043 if (WSTOPSIG (status) != SIGSTOP)
2044 {
2045 /* This can happen if someone starts sending signals to
2046 the new thread before it gets a chance to run, which
2047 have a lower number than SIGSTOP (e.g. SIGUSR1).
2048 This is an unlikely case, and harder to handle for
2049 fork / vfork than for clone, so we do not try - but
2050 we handle it for clone events here. */
2051
2052 new_lp->signalled = 1;
2053
2054 /* We created NEW_LP so it cannot yet contain STATUS. */
2055 gdb_assert (new_lp->status == 0);
2056
2057 /* Save the wait status to report later. */
2058 linux_nat_debug_printf
2059 ("waitpid of new LWP %ld, saving status %s",
2060 (long) new_lp->ptid.lwp (), status_to_str (status).c_str ());
2061 new_lp->status = status;
2062 }
2063 else if (report_thread_events)
2064 {
2065 new_lp->waitstatus.set_thread_created ();
2066 new_lp->status = status;
2067 }
2068
2069 return 1;
2070 }
2071
2072 return 0;
2073 }
2074
2075 if (event == PTRACE_EVENT_EXEC)
2076 {
2077 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
2078
2079 /* Close the previous /proc/PID/mem file for this inferior,
2080 which was using the address space which is now gone.
2081 Reading/writing from this file would return 0/EOF. */
2082 close_proc_mem_file (lp->ptid.pid ());
2083
2084 /* Open a new file for the new address space. */
2085 open_proc_mem_file (lp->ptid);
2086
2087 ourstatus->set_execd
2088 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
2089
2090 /* The thread that execed must have been resumed, but, when a
2091 thread execs, it changes its tid to the tgid, and the old
2092 tgid thread might have not been resumed. */
2093 lp->resumed = 1;
2094 return 0;
2095 }
2096
2097 if (event == PTRACE_EVENT_VFORK_DONE)
2098 {
2099 if (current_inferior ()->waiting_for_vfork_done)
2100 {
2101 linux_nat_debug_printf
2102 ("Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping",
2103 lp->ptid.lwp ());
2104
2105 ourstatus->set_vfork_done ();
2106 return 0;
2107 }
2108
2109 linux_nat_debug_printf
2110 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld: ignoring", lp->ptid.lwp ());
2111
2112 return 1;
2113 }
2114
2115 internal_error (__FILE__, __LINE__,
2116 _("unknown ptrace event %d"), event);
2117 }
2118
2119 /* Suspend waiting for a signal. We're mostly interested in
2120 SIGCHLD/SIGINT. */
2121
2122 static void
2123 wait_for_signal ()
2124 {
2125 linux_nat_debug_printf ("about to sigsuspend");
2126 sigsuspend (&suspend_mask);
2127
2128 /* If the quit flag is set, it means that the user pressed Ctrl-C
2129 and we're debugging a process that is running on a separate
2130 terminal, so we must forward the Ctrl-C to the inferior. (If the
2131 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2132 inferior directly.) We must do this here because functions that
2133 need to block waiting for a signal loop forever until there's an
2134 event to report before returning back to the event loop. */
2135 if (!target_terminal::is_ours ())
2136 {
2137 if (check_quit_flag ())
2138 target_pass_ctrlc ();
2139 }
2140 }
2141
2142 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2143 exited. */
2144
2145 static int
2146 wait_lwp (struct lwp_info *lp)
2147 {
2148 pid_t pid;
2149 int status = 0;
2150 int thread_dead = 0;
2151 sigset_t prev_mask;
2152
2153 gdb_assert (!lp->stopped);
2154 gdb_assert (lp->status == 0);
2155
2156 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2157 block_child_signals (&prev_mask);
2158
2159 for (;;)
2160 {
2161 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
2162 if (pid == -1 && errno == ECHILD)
2163 {
2164 /* The thread has previously exited. We need to delete it
2165 now because if this was a non-leader thread execing, we
2166 won't get an exit event. See comments on exec events at
2167 the top of the file. */
2168 thread_dead = 1;
2169 linux_nat_debug_printf ("%s vanished.",
2170 lp->ptid.to_string ().c_str ());
2171 }
2172 if (pid != 0)
2173 break;
2174
2175 /* Bugs 10970, 12702.
2176 Thread group leader may have exited in which case we'll lock up in
2177 waitpid if there are other threads, even if they are all zombies too.
2178 Basically, we're not supposed to use waitpid this way.
2179 tkill(pid,0) cannot be used here as it gets ESRCH for both
2180 for zombie and running processes.
2181
2182 As a workaround, check if we're waiting for the thread group leader and
2183 if it's a zombie, and avoid calling waitpid if it is.
2184
2185 This is racy, what if the tgl becomes a zombie right after we check?
2186 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2187 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2188
2189 if (lp->ptid.pid () == lp->ptid.lwp ()
2190 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
2191 {
2192 thread_dead = 1;
2193 linux_nat_debug_printf ("Thread group leader %s vanished.",
2194 lp->ptid.to_string ().c_str ());
2195 break;
2196 }
2197
2198 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2199 get invoked despite our caller had them intentionally blocked by
2200 block_child_signals. This is sensitive only to the loop of
2201 linux_nat_wait_1 and there if we get called my_waitpid gets called
2202 again before it gets to sigsuspend so we can safely let the handlers
2203 get executed here. */
2204 wait_for_signal ();
2205 }
2206
2207 restore_child_signals_mask (&prev_mask);
2208
2209 if (!thread_dead)
2210 {
2211 gdb_assert (pid == lp->ptid.lwp ());
2212
2213 linux_nat_debug_printf ("waitpid %s received %s",
2214 lp->ptid.to_string ().c_str (),
2215 status_to_str (status).c_str ());
2216
2217 /* Check if the thread has exited. */
2218 if (WIFEXITED (status) || WIFSIGNALED (status))
2219 {
2220 if (report_thread_events
2221 || lp->ptid.pid () == lp->ptid.lwp ())
2222 {
2223 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
2224
2225 /* If this is the leader exiting, it means the whole
2226 process is gone. Store the status to report to the
2227 core. Store it in lp->waitstatus, because lp->status
2228 would be ambiguous (W_EXITCODE(0,0) == 0). */
2229 lp->waitstatus = host_status_to_waitstatus (status);
2230 return 0;
2231 }
2232
2233 thread_dead = 1;
2234 linux_nat_debug_printf ("%s exited.",
2235 lp->ptid.to_string ().c_str ());
2236 }
2237 }
2238
2239 if (thread_dead)
2240 {
2241 exit_lwp (lp);
2242 return 0;
2243 }
2244
2245 gdb_assert (WIFSTOPPED (status));
2246 lp->stopped = 1;
2247
2248 if (lp->must_set_ptrace_flags)
2249 {
2250 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2251 int options = linux_nat_ptrace_options (inf->attach_flag);
2252
2253 linux_enable_event_reporting (lp->ptid.lwp (), options);
2254 lp->must_set_ptrace_flags = 0;
2255 }
2256
2257 /* Handle GNU/Linux's syscall SIGTRAPs. */
2258 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2259 {
2260 /* No longer need the sysgood bit. The ptrace event ends up
2261 recorded in lp->waitstatus if we care for it. We can carry
2262 on handling the event like a regular SIGTRAP from here
2263 on. */
2264 status = W_STOPCODE (SIGTRAP);
2265 if (linux_handle_syscall_trap (lp, 1))
2266 return wait_lwp (lp);
2267 }
2268 else
2269 {
2270 /* Almost all other ptrace-stops are known to be outside of system
2271 calls, with further exceptions in linux_handle_extended_wait. */
2272 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2273 }
2274
2275 /* Handle GNU/Linux's extended waitstatus for trace events. */
2276 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2277 && linux_is_extended_waitstatus (status))
2278 {
2279 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2280 linux_handle_extended_wait (lp, status);
2281 return 0;
2282 }
2283
2284 return status;
2285 }
2286
2287 /* Send a SIGSTOP to LP. */
2288
2289 static int
2290 stop_callback (struct lwp_info *lp)
2291 {
2292 if (!lp->stopped && !lp->signalled)
2293 {
2294 int ret;
2295
2296 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2297 lp->ptid.to_string ().c_str ());
2298
2299 errno = 0;
2300 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
2301 linux_nat_debug_printf ("lwp kill %d %s", ret,
2302 errno ? safe_strerror (errno) : "ERRNO-OK");
2303
2304 lp->signalled = 1;
2305 gdb_assert (lp->status == 0);
2306 }
2307
2308 return 0;
2309 }
2310
2311 /* Request a stop on LWP. */
2312
2313 void
2314 linux_stop_lwp (struct lwp_info *lwp)
2315 {
2316 stop_callback (lwp);
2317 }
2318
2319 /* See linux-nat.h */
2320
2321 void
2322 linux_stop_and_wait_all_lwps (void)
2323 {
2324 /* Stop all LWP's ... */
2325 iterate_over_lwps (minus_one_ptid, stop_callback);
2326
2327 /* ... and wait until all of them have reported back that
2328 they're no longer running. */
2329 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2330 }
2331
2332 /* See linux-nat.h */
2333
2334 void
2335 linux_unstop_all_lwps (void)
2336 {
2337 iterate_over_lwps (minus_one_ptid,
2338 [] (struct lwp_info *info)
2339 {
2340 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2341 });
2342 }
2343
2344 /* Return non-zero if LWP PID has a pending SIGINT. */
2345
2346 static int
2347 linux_nat_has_pending_sigint (int pid)
2348 {
2349 sigset_t pending, blocked, ignored;
2350
2351 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2352
2353 if (sigismember (&pending, SIGINT)
2354 && !sigismember (&ignored, SIGINT))
2355 return 1;
2356
2357 return 0;
2358 }
2359
2360 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2361
2362 static int
2363 set_ignore_sigint (struct lwp_info *lp)
2364 {
2365 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2366 flag to consume the next one. */
2367 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2368 && WSTOPSIG (lp->status) == SIGINT)
2369 lp->status = 0;
2370 else
2371 lp->ignore_sigint = 1;
2372
2373 return 0;
2374 }
2375
2376 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2377 This function is called after we know the LWP has stopped; if the LWP
2378 stopped before the expected SIGINT was delivered, then it will never have
2379 arrived. Also, if the signal was delivered to a shared queue and consumed
2380 by a different thread, it will never be delivered to this LWP. */
2381
2382 static void
2383 maybe_clear_ignore_sigint (struct lwp_info *lp)
2384 {
2385 if (!lp->ignore_sigint)
2386 return;
2387
2388 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
2389 {
2390 linux_nat_debug_printf ("Clearing bogus flag for %s",
2391 lp->ptid.to_string ().c_str ());
2392 lp->ignore_sigint = 0;
2393 }
2394 }
2395
2396 /* Fetch the possible triggered data watchpoint info and store it in
2397 LP.
2398
2399 On some archs, like x86, that use debug registers to set
2400 watchpoints, it's possible that the way to know which watched
2401 address trapped, is to check the register that is used to select
2402 which address to watch. Problem is, between setting the watchpoint
2403 and reading back which data address trapped, the user may change
2404 the set of watchpoints, and, as a consequence, GDB changes the
2405 debug registers in the inferior. To avoid reading back a stale
2406 stopped-data-address when that happens, we cache in LP the fact
2407 that a watchpoint trapped, and the corresponding data address, as
2408 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2409 registers meanwhile, we have the cached data we can rely on. */
2410
2411 static int
2412 check_stopped_by_watchpoint (struct lwp_info *lp)
2413 {
2414 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
2415 inferior_ptid = lp->ptid;
2416
2417 if (linux_target->low_stopped_by_watchpoint ())
2418 {
2419 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2420 lp->stopped_data_address_p
2421 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
2422 }
2423
2424 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2425 }
2426
2427 /* Returns true if the LWP had stopped for a watchpoint. */
2428
2429 bool
2430 linux_nat_target::stopped_by_watchpoint ()
2431 {
2432 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2433
2434 gdb_assert (lp != NULL);
2435
2436 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2437 }
2438
2439 bool
2440 linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
2441 {
2442 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2443
2444 gdb_assert (lp != NULL);
2445
2446 *addr_p = lp->stopped_data_address;
2447
2448 return lp->stopped_data_address_p;
2449 }
2450
2451 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2452
2453 bool
2454 linux_nat_target::low_status_is_event (int status)
2455 {
2456 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2457 }
2458
2459 /* Wait until LP is stopped. */
2460
2461 static int
2462 stop_wait_callback (struct lwp_info *lp)
2463 {
2464 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2465
2466 /* If this is a vfork parent, bail out, it is not going to report
2467 any SIGSTOP until the vfork is done with. */
2468 if (inf->vfork_child != NULL)
2469 return 0;
2470
2471 if (!lp->stopped)
2472 {
2473 int status;
2474
2475 status = wait_lwp (lp);
2476 if (status == 0)
2477 return 0;
2478
2479 if (lp->ignore_sigint && WIFSTOPPED (status)
2480 && WSTOPSIG (status) == SIGINT)
2481 {
2482 lp->ignore_sigint = 0;
2483
2484 errno = 0;
2485 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
2486 lp->stopped = 0;
2487 linux_nat_debug_printf
2488 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2489 lp->ptid.to_string ().c_str (),
2490 errno ? safe_strerror (errno) : "OK");
2491
2492 return stop_wait_callback (lp);
2493 }
2494
2495 maybe_clear_ignore_sigint (lp);
2496
2497 if (WSTOPSIG (status) != SIGSTOP)
2498 {
2499 /* The thread was stopped with a signal other than SIGSTOP. */
2500
2501 linux_nat_debug_printf ("Pending event %s in %s",
2502 status_to_str ((int) status).c_str (),
2503 lp->ptid.to_string ().c_str ());
2504
2505 /* Save the sigtrap event. */
2506 lp->status = status;
2507 gdb_assert (lp->signalled);
2508 save_stop_reason (lp);
2509 }
2510 else
2511 {
2512 /* We caught the SIGSTOP that we intended to catch. */
2513
2514 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2515 lp->ptid.to_string ().c_str ());
2516
2517 lp->signalled = 0;
2518
2519 /* If we are waiting for this stop so we can report the thread
2520 stopped then we need to record this status. Otherwise, we can
2521 now discard this stop event. */
2522 if (lp->last_resume_kind == resume_stop)
2523 {
2524 lp->status = status;
2525 save_stop_reason (lp);
2526 }
2527 }
2528 }
2529
2530 return 0;
2531 }
2532
2533 /* Return non-zero if LP has a wait status pending. Discard the
2534 pending event and resume the LWP if the event that originally
2535 caused the stop became uninteresting. */
2536
2537 static int
2538 status_callback (struct lwp_info *lp)
2539 {
2540 /* Only report a pending wait status if we pretend that this has
2541 indeed been resumed. */
2542 if (!lp->resumed)
2543 return 0;
2544
2545 if (!lwp_status_pending_p (lp))
2546 return 0;
2547
2548 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2549 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2550 {
2551 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
2552 CORE_ADDR pc;
2553 int discard = 0;
2554
2555 pc = regcache_read_pc (regcache);
2556
2557 if (pc != lp->stop_pc)
2558 {
2559 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2560 lp->ptid.to_string ().c_str (),
2561 paddress (target_gdbarch (), lp->stop_pc),
2562 paddress (target_gdbarch (), pc));
2563 discard = 1;
2564 }
2565
2566 #if !USE_SIGTRAP_SIGINFO
2567 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
2568 {
2569 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
2570 lp->ptid.to_string ().c_str (),
2571 paddress (target_gdbarch (), lp->stop_pc));
2572
2573 discard = 1;
2574 }
2575 #endif
2576
2577 if (discard)
2578 {
2579 linux_nat_debug_printf ("pending event of %s cancelled.",
2580 lp->ptid.to_string ().c_str ());
2581
2582 lp->status = 0;
2583 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2584 return 0;
2585 }
2586 }
2587
2588 return 1;
2589 }
2590
2591 /* Count the LWP's that have had events. */
2592
2593 static int
2594 count_events_callback (struct lwp_info *lp, int *count)
2595 {
2596 gdb_assert (count != NULL);
2597
2598 /* Select only resumed LWPs that have an event pending. */
2599 if (lp->resumed && lwp_status_pending_p (lp))
2600 (*count)++;
2601
2602 return 0;
2603 }
2604
2605 /* Select the LWP (if any) that is currently being single-stepped. */
2606
2607 static int
2608 select_singlestep_lwp_callback (struct lwp_info *lp)
2609 {
2610 if (lp->last_resume_kind == resume_step
2611 && lp->status != 0)
2612 return 1;
2613 else
2614 return 0;
2615 }
2616
2617 /* Returns true if LP has a status pending. */
2618
2619 static int
2620 lwp_status_pending_p (struct lwp_info *lp)
2621 {
2622 /* We check for lp->waitstatus in addition to lp->status, because we
2623 can have pending process exits recorded in lp->status and
2624 W_EXITCODE(0,0) happens to be 0. */
2625 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
2626 }
2627
2628 /* Select the Nth LWP that has had an event. */
2629
2630 static int
2631 select_event_lwp_callback (struct lwp_info *lp, int *selector)
2632 {
2633 gdb_assert (selector != NULL);
2634
2635 /* Select only resumed LWPs that have an event pending. */
2636 if (lp->resumed && lwp_status_pending_p (lp))
2637 if ((*selector)-- == 0)
2638 return 1;
2639
2640 return 0;
2641 }
2642
2643 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2644 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2645 and save the result in the LWP's stop_reason field. If it stopped
2646 for a breakpoint, decrement the PC if necessary on the lwp's
2647 architecture. */
2648
2649 static void
2650 save_stop_reason (struct lwp_info *lp)
2651 {
2652 struct regcache *regcache;
2653 struct gdbarch *gdbarch;
2654 CORE_ADDR pc;
2655 CORE_ADDR sw_bp_pc;
2656 #if USE_SIGTRAP_SIGINFO
2657 siginfo_t siginfo;
2658 #endif
2659
2660 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2661 gdb_assert (lp->status != 0);
2662
2663 if (!linux_target->low_status_is_event (lp->status))
2664 return;
2665
2666 regcache = get_thread_regcache (linux_target, lp->ptid);
2667 gdbarch = regcache->arch ();
2668
2669 pc = regcache_read_pc (regcache);
2670 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2671
2672 #if USE_SIGTRAP_SIGINFO
2673 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2674 {
2675 if (siginfo.si_signo == SIGTRAP)
2676 {
2677 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2678 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2679 {
2680 /* The si_code is ambiguous on this arch -- check debug
2681 registers. */
2682 if (!check_stopped_by_watchpoint (lp))
2683 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2684 }
2685 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2686 {
2687 /* If we determine the LWP stopped for a SW breakpoint,
2688 trust it. Particularly don't check watchpoint
2689 registers, because, at least on s390, we'd find
2690 stopped-by-watchpoint as long as there's a watchpoint
2691 set. */
2692 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2693 }
2694 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2695 {
2696 /* This can indicate either a hardware breakpoint or
2697 hardware watchpoint. Check debug registers. */
2698 if (!check_stopped_by_watchpoint (lp))
2699 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2700 }
2701 else if (siginfo.si_code == TRAP_TRACE)
2702 {
2703 linux_nat_debug_printf ("%s stopped by trace",
2704 lp->ptid.to_string ().c_str ());
2705
2706 /* We may have single stepped an instruction that
2707 triggered a watchpoint. In that case, on some
2708 architectures (such as x86), instead of TRAP_HWBKPT,
2709 si_code indicates TRAP_TRACE, and we need to check
2710 the debug registers separately. */
2711 check_stopped_by_watchpoint (lp);
2712 }
2713 }
2714 }
2715 #else
2716 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2717 && software_breakpoint_inserted_here_p (regcache->aspace (),
2718 sw_bp_pc))
2719 {
2720 /* The LWP was either continued, or stepped a software
2721 breakpoint instruction. */
2722 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2723 }
2724
2725 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
2726 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2727
2728 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2729 check_stopped_by_watchpoint (lp);
2730 #endif
2731
2732 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2733 {
2734 linux_nat_debug_printf ("%s stopped by software breakpoint",
2735 lp->ptid.to_string ().c_str ());
2736
2737 /* Back up the PC if necessary. */
2738 if (pc != sw_bp_pc)
2739 regcache_write_pc (regcache, sw_bp_pc);
2740
2741 /* Update this so we record the correct stop PC below. */
2742 pc = sw_bp_pc;
2743 }
2744 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2745 {
2746 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2747 lp->ptid.to_string ().c_str ());
2748 }
2749 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2750 {
2751 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2752 lp->ptid.to_string ().c_str ());
2753 }
2754
2755 lp->stop_pc = pc;
2756 }
2757
2758
2759 /* Returns true if the LWP had stopped for a software breakpoint. */
2760
2761 bool
2762 linux_nat_target::stopped_by_sw_breakpoint ()
2763 {
2764 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2765
2766 gdb_assert (lp != NULL);
2767
2768 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2769 }
2770
2771 /* Implement the supports_stopped_by_sw_breakpoint method. */
2772
2773 bool
2774 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2775 {
2776 return USE_SIGTRAP_SIGINFO;
2777 }
2778
2779 /* Returns true if the LWP had stopped for a hardware
2780 breakpoint/watchpoint. */
2781
2782 bool
2783 linux_nat_target::stopped_by_hw_breakpoint ()
2784 {
2785 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2786
2787 gdb_assert (lp != NULL);
2788
2789 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2790 }
2791
2792 /* Implement the supports_stopped_by_hw_breakpoint method. */
2793
2794 bool
2795 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2796 {
2797 return USE_SIGTRAP_SIGINFO;
2798 }
2799
2800 /* Select one LWP out of those that have events pending. */
2801
2802 static void
2803 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2804 {
2805 int num_events = 0;
2806 int random_selector;
2807 struct lwp_info *event_lp = NULL;
2808
2809 /* Record the wait status for the original LWP. */
2810 (*orig_lp)->status = *status;
2811
2812 /* In all-stop, give preference to the LWP that is being
2813 single-stepped. There will be at most one, and it will be the
2814 LWP that the core is most interested in. If we didn't do this,
2815 then we'd have to handle pending step SIGTRAPs somehow in case
2816 the core later continues the previously-stepped thread, as
2817 otherwise we'd report the pending SIGTRAP then, and the core, not
2818 having stepped the thread, wouldn't understand what the trap was
2819 for, and therefore would report it to the user as a random
2820 signal. */
2821 if (!target_is_non_stop_p ())
2822 {
2823 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
2824 if (event_lp != NULL)
2825 {
2826 linux_nat_debug_printf ("Select single-step %s",
2827 event_lp->ptid.to_string ().c_str ());
2828 }
2829 }
2830
2831 if (event_lp == NULL)
2832 {
2833 /* Pick one at random, out of those which have had events. */
2834
2835 /* First see how many events we have. */
2836 iterate_over_lwps (filter,
2837 [&] (struct lwp_info *info)
2838 {
2839 return count_events_callback (info, &num_events);
2840 });
2841 gdb_assert (num_events > 0);
2842
2843 /* Now randomly pick a LWP out of those that have had
2844 events. */
2845 random_selector = (int)
2846 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2847
2848 if (num_events > 1)
2849 linux_nat_debug_printf ("Found %d events, selecting #%d",
2850 num_events, random_selector);
2851
2852 event_lp
2853 = (iterate_over_lwps
2854 (filter,
2855 [&] (struct lwp_info *info)
2856 {
2857 return select_event_lwp_callback (info,
2858 &random_selector);
2859 }));
2860 }
2861
2862 if (event_lp != NULL)
2863 {
2864 /* Switch the event LWP. */
2865 *orig_lp = event_lp;
2866 *status = event_lp->status;
2867 }
2868
2869 /* Flush the wait status for the event LWP. */
2870 (*orig_lp)->status = 0;
2871 }
2872
2873 /* Return non-zero if LP has been resumed. */
2874
2875 static int
2876 resumed_callback (struct lwp_info *lp)
2877 {
2878 return lp->resumed;
2879 }
2880
2881 /* Check if we should go on and pass this event to common code.
2882
2883 If so, save the status to the lwp_info structure associated to LWPID. */
2884
2885 static void
2886 linux_nat_filter_event (int lwpid, int status)
2887 {
2888 struct lwp_info *lp;
2889 int event = linux_ptrace_get_extended_event (status);
2890
2891 lp = find_lwp_pid (ptid_t (lwpid));
2892
2893 /* Check for stop events reported by a process we didn't already
2894 know about - anything not already in our LWP list.
2895
2896 If we're expecting to receive stopped processes after
2897 fork, vfork, and clone events, then we'll just add the
2898 new one to our list and go back to waiting for the event
2899 to be reported - the stopped process might be returned
2900 from waitpid before or after the event is.
2901
2902 But note the case of a non-leader thread exec'ing after the
2903 leader having exited, and gone from our lists. The non-leader
2904 thread changes its tid to the tgid. */
2905
2906 if (WIFSTOPPED (status) && lp == NULL
2907 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2908 {
2909 /* A multi-thread exec after we had seen the leader exiting. */
2910 linux_nat_debug_printf ("Re-adding thread group leader LWP %d.", lwpid);
2911
2912 lp = add_lwp (ptid_t (lwpid, lwpid));
2913 lp->stopped = 1;
2914 lp->resumed = 1;
2915 add_thread (linux_target, lp->ptid);
2916 }
2917
2918 if (WIFSTOPPED (status) && !lp)
2919 {
2920 linux_nat_debug_printf ("saving LWP %ld status %s in stopped_pids list",
2921 (long) lwpid, status_to_str (status).c_str ());
2922 add_to_pid_list (&stopped_pids, lwpid, status);
2923 return;
2924 }
2925
2926 /* Make sure we don't report an event for the exit of an LWP not in
2927 our list, i.e. not part of the current process. This can happen
2928 if we detach from a program we originally forked and then it
2929 exits. */
2930 if (!WIFSTOPPED (status) && !lp)
2931 return;
2932
2933 /* This LWP is stopped now. (And if dead, this prevents it from
2934 ever being continued.) */
2935 lp->stopped = 1;
2936
2937 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2938 {
2939 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2940 int options = linux_nat_ptrace_options (inf->attach_flag);
2941
2942 linux_enable_event_reporting (lp->ptid.lwp (), options);
2943 lp->must_set_ptrace_flags = 0;
2944 }
2945
2946 /* Handle GNU/Linux's syscall SIGTRAPs. */
2947 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2948 {
2949 /* No longer need the sysgood bit. The ptrace event ends up
2950 recorded in lp->waitstatus if we care for it. We can carry
2951 on handling the event like a regular SIGTRAP from here
2952 on. */
2953 status = W_STOPCODE (SIGTRAP);
2954 if (linux_handle_syscall_trap (lp, 0))
2955 return;
2956 }
2957 else
2958 {
2959 /* Almost all other ptrace-stops are known to be outside of system
2960 calls, with further exceptions in linux_handle_extended_wait. */
2961 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2962 }
2963
2964 /* Handle GNU/Linux's extended waitstatus for trace events. */
2965 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2966 && linux_is_extended_waitstatus (status))
2967 {
2968 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2969
2970 if (linux_handle_extended_wait (lp, status))
2971 return;
2972 }
2973
2974 /* Check if the thread has exited. */
2975 if (WIFEXITED (status) || WIFSIGNALED (status))
2976 {
2977 if (!report_thread_events
2978 && num_lwps (lp->ptid.pid ()) > 1)
2979 {
2980 linux_nat_debug_printf ("%s exited.",
2981 lp->ptid.to_string ().c_str ());
2982
2983 /* If there is at least one more LWP, then the exit signal
2984 was not the end of the debugged application and should be
2985 ignored. */
2986 exit_lwp (lp);
2987 return;
2988 }
2989
2990 /* Note that even if the leader was ptrace-stopped, it can still
2991 exit, if e.g., some other thread brings down the whole
2992 process (calls `exit'). So don't assert that the lwp is
2993 resumed. */
2994 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2995 lp->ptid.lwp (), lp->resumed);
2996
2997 /* Dead LWP's aren't expected to reported a pending sigstop. */
2998 lp->signalled = 0;
2999
3000 /* Store the pending event in the waitstatus, because
3001 W_EXITCODE(0,0) == 0. */
3002 lp->waitstatus = host_status_to_waitstatus (status);
3003 return;
3004 }
3005
3006 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3007 an attempt to stop an LWP. */
3008 if (lp->signalled
3009 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3010 {
3011 lp->signalled = 0;
3012
3013 if (lp->last_resume_kind == resume_stop)
3014 {
3015 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
3016 lp->ptid.to_string ().c_str ());
3017 }
3018 else
3019 {
3020 /* This is a delayed SIGSTOP. Filter out the event. */
3021
3022 linux_nat_debug_printf
3023 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3024 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3025 lp->ptid.to_string ().c_str ());
3026
3027 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3028 gdb_assert (lp->resumed);
3029 return;
3030 }
3031 }
3032
3033 /* Make sure we don't report a SIGINT that we have already displayed
3034 for another thread. */
3035 if (lp->ignore_sigint
3036 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3037 {
3038 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
3039 lp->ptid.to_string ().c_str ());
3040
3041 /* This is a delayed SIGINT. */
3042 lp->ignore_sigint = 0;
3043
3044 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3045 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3046 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3047 lp->ptid.to_string ().c_str ());
3048 gdb_assert (lp->resumed);
3049
3050 /* Discard the event. */
3051 return;
3052 }
3053
3054 /* Don't report signals that GDB isn't interested in, such as
3055 signals that are neither printed nor stopped upon. Stopping all
3056 threads can be a bit time-consuming, so if we want decent
3057 performance with heavily multi-threaded programs, especially when
3058 they're using a high frequency timer, we'd better avoid it if we
3059 can. */
3060 if (WIFSTOPPED (status))
3061 {
3062 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3063
3064 if (!target_is_non_stop_p ())
3065 {
3066 /* Only do the below in all-stop, as we currently use SIGSTOP
3067 to implement target_stop (see linux_nat_stop) in
3068 non-stop. */
3069 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3070 {
3071 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3072 forwarded to the entire process group, that is, all LWPs
3073 will receive it - unless they're using CLONE_THREAD to
3074 share signals. Since we only want to report it once, we
3075 mark it as ignored for all LWPs except this one. */
3076 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
3077 lp->ignore_sigint = 0;
3078 }
3079 else
3080 maybe_clear_ignore_sigint (lp);
3081 }
3082
3083 /* When using hardware single-step, we need to report every signal.
3084 Otherwise, signals in pass_mask may be short-circuited
3085 except signals that might be caused by a breakpoint, or SIGSTOP
3086 if we sent the SIGSTOP and are waiting for it to arrive. */
3087 if (!lp->step
3088 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3089 && (WSTOPSIG (status) != SIGSTOP
3090 || !find_thread_ptid (linux_target, lp->ptid)->stop_requested)
3091 && !linux_wstatus_maybe_breakpoint (status))
3092 {
3093 linux_resume_one_lwp (lp, lp->step, signo);
3094 linux_nat_debug_printf
3095 ("%s %s, %s (preempt 'handle')",
3096 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3097 lp->ptid.to_string ().c_str (),
3098 (signo != GDB_SIGNAL_0
3099 ? strsignal (gdb_signal_to_host (signo)) : "0"));
3100 return;
3101 }
3102 }
3103
3104 /* An interesting event. */
3105 gdb_assert (lp);
3106 lp->status = status;
3107 save_stop_reason (lp);
3108 }
3109
3110 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3111 their exits until all other threads in the group have exited. */
3112
3113 static void
3114 check_zombie_leaders (void)
3115 {
3116 for (inferior *inf : all_inferiors ())
3117 {
3118 struct lwp_info *leader_lp;
3119
3120 if (inf->pid == 0)
3121 continue;
3122
3123 leader_lp = find_lwp_pid (ptid_t (inf->pid));
3124 if (leader_lp != NULL
3125 /* Check if there are other threads in the group, as we may
3126 have raced with the inferior simply exiting. */
3127 && num_lwps (inf->pid) > 1
3128 && linux_proc_pid_is_zombie (inf->pid))
3129 {
3130 linux_nat_debug_printf ("Thread group leader %d zombie "
3131 "(it exited, or another thread execd).",
3132 inf->pid);
3133
3134 /* A leader zombie can mean one of two things:
3135
3136 - It exited, and there's an exit status pending
3137 available, or only the leader exited (not the whole
3138 program). In the latter case, we can't waitpid the
3139 leader's exit status until all other threads are gone.
3140
3141 - There are 3 or more threads in the group, and a thread
3142 other than the leader exec'd. See comments on exec
3143 events at the top of the file. We could try
3144 distinguishing the exit and exec cases, by waiting once
3145 more, and seeing if something comes out, but it doesn't
3146 sound useful. The previous leader _does_ go away, and
3147 we'll re-add the new one once we see the exec event
3148 (which is just the same as what would happen if the
3149 previous leader did exit voluntarily before some other
3150 thread execs). */
3151
3152 linux_nat_debug_printf ("Thread group leader %d vanished.", inf->pid);
3153 exit_lwp (leader_lp);
3154 }
3155 }
3156 }
3157
3158 /* Convenience function that is called when the kernel reports an exit
3159 event. This decides whether to report the event to GDB as a
3160 process exit event, a thread exit event, or to suppress the
3161 event. */
3162
3163 static ptid_t
3164 filter_exit_event (struct lwp_info *event_child,
3165 struct target_waitstatus *ourstatus)
3166 {
3167 ptid_t ptid = event_child->ptid;
3168
3169 if (num_lwps (ptid.pid ()) > 1)
3170 {
3171 if (report_thread_events)
3172 ourstatus->set_thread_exited (0);
3173 else
3174 ourstatus->set_ignore ();
3175
3176 exit_lwp (event_child);
3177 }
3178
3179 return ptid;
3180 }
3181
3182 static ptid_t
3183 linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
3184 target_wait_flags target_options)
3185 {
3186 sigset_t prev_mask;
3187 enum resume_kind last_resume_kind;
3188 struct lwp_info *lp;
3189 int status;
3190
3191 linux_nat_debug_printf ("enter");
3192
3193 /* The first time we get here after starting a new inferior, we may
3194 not have added it to the LWP list yet - this is the earliest
3195 moment at which we know its PID. */
3196 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
3197 {
3198 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
3199
3200 /* Upgrade the main thread's ptid. */
3201 thread_change_ptid (linux_target, ptid, lwp_ptid);
3202 lp = add_initial_lwp (lwp_ptid);
3203 lp->resumed = 1;
3204 }
3205
3206 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3207 block_child_signals (&prev_mask);
3208
3209 /* First check if there is a LWP with a wait status pending. */
3210 lp = iterate_over_lwps (ptid, status_callback);
3211 if (lp != NULL)
3212 {
3213 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3214 status_to_str (lp->status).c_str (),
3215 lp->ptid.to_string ().c_str ());
3216 }
3217
3218 /* But if we don't find a pending event, we'll have to wait. Always
3219 pull all events out of the kernel. We'll randomly select an
3220 event LWP out of all that have events, to prevent starvation. */
3221
3222 while (lp == NULL)
3223 {
3224 pid_t lwpid;
3225
3226 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3227 quirks:
3228
3229 - If the thread group leader exits while other threads in the
3230 thread group still exist, waitpid(TGID, ...) hangs. That
3231 waitpid won't return an exit status until the other threads
3232 in the group are reaped.
3233
3234 - When a non-leader thread execs, that thread just vanishes
3235 without reporting an exit (so we'd hang if we waited for it
3236 explicitly in that case). The exec event is reported to
3237 the TGID pid. */
3238
3239 errno = 0;
3240 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
3241
3242 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3243 lwpid,
3244 errno ? safe_strerror (errno) : "ERRNO-OK");
3245
3246 if (lwpid > 0)
3247 {
3248 linux_nat_debug_printf ("waitpid %ld received %s",
3249 (long) lwpid,
3250 status_to_str (status).c_str ());
3251
3252 linux_nat_filter_event (lwpid, status);
3253 /* Retry until nothing comes out of waitpid. A single
3254 SIGCHLD can indicate more than one child stopped. */
3255 continue;
3256 }
3257
3258 /* Now that we've pulled all events out of the kernel, resume
3259 LWPs that don't have an interesting event to report. */
3260 iterate_over_lwps (minus_one_ptid,
3261 [] (struct lwp_info *info)
3262 {
3263 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3264 });
3265
3266 /* ... and find an LWP with a status to report to the core, if
3267 any. */
3268 lp = iterate_over_lwps (ptid, status_callback);
3269 if (lp != NULL)
3270 break;
3271
3272 /* Check for zombie thread group leaders. Those can't be reaped
3273 until all other threads in the thread group are. */
3274 check_zombie_leaders ();
3275
3276 /* If there are no resumed children left, bail. We'd be stuck
3277 forever in the sigsuspend call below otherwise. */
3278 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
3279 {
3280 linux_nat_debug_printf ("exit (no resumed LWP)");
3281
3282 ourstatus->set_no_resumed ();
3283
3284 restore_child_signals_mask (&prev_mask);
3285 return minus_one_ptid;
3286 }
3287
3288 /* No interesting event to report to the core. */
3289
3290 if (target_options & TARGET_WNOHANG)
3291 {
3292 linux_nat_debug_printf ("exit (ignore)");
3293
3294 ourstatus->set_ignore ();
3295 restore_child_signals_mask (&prev_mask);
3296 return minus_one_ptid;
3297 }
3298
3299 /* We shouldn't end up here unless we want to try again. */
3300 gdb_assert (lp == NULL);
3301
3302 /* Block until we get an event reported with SIGCHLD. */
3303 wait_for_signal ();
3304 }
3305
3306 gdb_assert (lp);
3307
3308 status = lp->status;
3309 lp->status = 0;
3310
3311 if (!target_is_non_stop_p ())
3312 {
3313 /* Now stop all other LWP's ... */
3314 iterate_over_lwps (minus_one_ptid, stop_callback);
3315
3316 /* ... and wait until all of them have reported back that
3317 they're no longer running. */
3318 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
3319 }
3320
3321 /* If we're not waiting for a specific LWP, choose an event LWP from
3322 among those that have had events. Giving equal priority to all
3323 LWPs that have had events helps prevent starvation. */
3324 if (ptid == minus_one_ptid || ptid.is_pid ())
3325 select_event_lwp (ptid, &lp, &status);
3326
3327 gdb_assert (lp != NULL);
3328
3329 /* Now that we've selected our final event LWP, un-adjust its PC if
3330 it was a software breakpoint, and we can't reliably support the
3331 "stopped by software breakpoint" stop reason. */
3332 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3333 && !USE_SIGTRAP_SIGINFO)
3334 {
3335 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3336 struct gdbarch *gdbarch = regcache->arch ();
3337 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3338
3339 if (decr_pc != 0)
3340 {
3341 CORE_ADDR pc;
3342
3343 pc = regcache_read_pc (regcache);
3344 regcache_write_pc (regcache, pc + decr_pc);
3345 }
3346 }
3347
3348 /* We'll need this to determine whether to report a SIGSTOP as
3349 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3350 clears it. */
3351 last_resume_kind = lp->last_resume_kind;
3352
3353 if (!target_is_non_stop_p ())
3354 {
3355 /* In all-stop, from the core's perspective, all LWPs are now
3356 stopped until a new resume action is sent over. */
3357 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
3358 }
3359 else
3360 {
3361 resume_clear_callback (lp);
3362 }
3363
3364 if (linux_target->low_status_is_event (status))
3365 {
3366 linux_nat_debug_printf ("trap ptid is %s.",
3367 lp->ptid.to_string ().c_str ());
3368 }
3369
3370 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3371 {
3372 *ourstatus = lp->waitstatus;
3373 lp->waitstatus.set_ignore ();
3374 }
3375 else
3376 *ourstatus = host_status_to_waitstatus (status);
3377
3378 linux_nat_debug_printf ("exit");
3379
3380 restore_child_signals_mask (&prev_mask);
3381
3382 if (last_resume_kind == resume_stop
3383 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
3384 && WSTOPSIG (status) == SIGSTOP)
3385 {
3386 /* A thread that has been requested to stop by GDB with
3387 target_stop, and it stopped cleanly, so report as SIG0. The
3388 use of SIGSTOP is an implementation detail. */
3389 ourstatus->set_stopped (GDB_SIGNAL_0);
3390 }
3391
3392 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3393 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
3394 lp->core = -1;
3395 else
3396 lp->core = linux_common_core_of_thread (lp->ptid);
3397
3398 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3399 return filter_exit_event (lp, ourstatus);
3400
3401 return lp->ptid;
3402 }
3403
3404 /* Resume LWPs that are currently stopped without any pending status
3405 to report, but are resumed from the core's perspective. */
3406
3407 static int
3408 resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
3409 {
3410 if (!lp->stopped)
3411 {
3412 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3413 lp->ptid.to_string ().c_str ());
3414 }
3415 else if (!lp->resumed)
3416 {
3417 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3418 lp->ptid.to_string ().c_str ());
3419 }
3420 else if (lwp_status_pending_p (lp))
3421 {
3422 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3423 lp->ptid.to_string ().c_str ());
3424 }
3425 else
3426 {
3427 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3428 struct gdbarch *gdbarch = regcache->arch ();
3429
3430 try
3431 {
3432 CORE_ADDR pc = regcache_read_pc (regcache);
3433 int leave_stopped = 0;
3434
3435 /* Don't bother if there's a breakpoint at PC that we'd hit
3436 immediately, and we're not waiting for this LWP. */
3437 if (!lp->ptid.matches (wait_ptid))
3438 {
3439 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
3440 leave_stopped = 1;
3441 }
3442
3443 if (!leave_stopped)
3444 {
3445 linux_nat_debug_printf
3446 ("resuming stopped-resumed LWP %s at %s: step=%d",
3447 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
3448 lp->step);
3449
3450 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3451 }
3452 }
3453 catch (const gdb_exception_error &ex)
3454 {
3455 if (!check_ptrace_stopped_lwp_gone (lp))
3456 throw;
3457 }
3458 }
3459
3460 return 0;
3461 }
3462
3463 ptid_t
3464 linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3465 target_wait_flags target_options)
3466 {
3467 ptid_t event_ptid;
3468
3469 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
3470 target_options_to_string (target_options).c_str ());
3471
3472 /* Flush the async file first. */
3473 if (target_is_async_p ())
3474 async_file_flush ();
3475
3476 /* Resume LWPs that are currently stopped without any pending status
3477 to report, but are resumed from the core's perspective. LWPs get
3478 in this state if we find them stopping at a time we're not
3479 interested in reporting the event (target_wait on a
3480 specific_process, for example, see linux_nat_wait_1), and
3481 meanwhile the event became uninteresting. Don't bother resuming
3482 LWPs we're not going to wait for if they'd stop immediately. */
3483 if (target_is_non_stop_p ())
3484 iterate_over_lwps (minus_one_ptid,
3485 [=] (struct lwp_info *info)
3486 {
3487 return resume_stopped_resumed_lwps (info, ptid);
3488 });
3489
3490 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
3491
3492 /* If we requested any event, and something came out, assume there
3493 may be more. If we requested a specific lwp or process, also
3494 assume there may be more. */
3495 if (target_is_async_p ()
3496 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3497 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
3498 || ptid != minus_one_ptid))
3499 async_file_mark ();
3500
3501 return event_ptid;
3502 }
3503
3504 /* Kill one LWP. */
3505
3506 static void
3507 kill_one_lwp (pid_t pid)
3508 {
3509 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3510
3511 errno = 0;
3512 kill_lwp (pid, SIGKILL);
3513
3514 if (debug_linux_nat)
3515 {
3516 int save_errno = errno;
3517
3518 linux_nat_debug_printf
3519 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3520 save_errno != 0 ? safe_strerror (save_errno) : "OK");
3521 }
3522
3523 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3524
3525 errno = 0;
3526 ptrace (PTRACE_KILL, pid, 0, 0);
3527 if (debug_linux_nat)
3528 {
3529 int save_errno = errno;
3530
3531 linux_nat_debug_printf
3532 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3533 save_errno ? safe_strerror (save_errno) : "OK");
3534 }
3535 }
3536
3537 /* Wait for an LWP to die. */
3538
3539 static void
3540 kill_wait_one_lwp (pid_t pid)
3541 {
3542 pid_t res;
3543
3544 /* We must make sure that there are no pending events (delayed
3545 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3546 program doesn't interfere with any following debugging session. */
3547
3548 do
3549 {
3550 res = my_waitpid (pid, NULL, __WALL);
3551 if (res != (pid_t) -1)
3552 {
3553 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3554
3555 /* The Linux kernel sometimes fails to kill a thread
3556 completely after PTRACE_KILL; that goes from the stop
3557 point in do_fork out to the one in get_signal_to_deliver
3558 and waits again. So kill it again. */
3559 kill_one_lwp (pid);
3560 }
3561 }
3562 while (res == pid);
3563
3564 gdb_assert (res == -1 && errno == ECHILD);
3565 }
3566
3567 /* Callback for iterate_over_lwps. */
3568
3569 static int
3570 kill_callback (struct lwp_info *lp)
3571 {
3572 kill_one_lwp (lp->ptid.lwp ());
3573 return 0;
3574 }
3575
3576 /* Callback for iterate_over_lwps. */
3577
3578 static int
3579 kill_wait_callback (struct lwp_info *lp)
3580 {
3581 kill_wait_one_lwp (lp->ptid.lwp ());
3582 return 0;
3583 }
3584
3585 /* Kill the fork children of any threads of inferior INF that are
3586 stopped at a fork event. */
3587
3588 static void
3589 kill_unfollowed_fork_children (struct inferior *inf)
3590 {
3591 for (thread_info *thread : inf->non_exited_threads ())
3592 {
3593 struct target_waitstatus *ws = &thread->pending_follow;
3594
3595 if (ws->kind () == TARGET_WAITKIND_FORKED
3596 || ws->kind () == TARGET_WAITKIND_VFORKED)
3597 {
3598 ptid_t child_ptid = ws->child_ptid ();
3599 int child_pid = child_ptid.pid ();
3600 int child_lwp = child_ptid.lwp ();
3601
3602 kill_one_lwp (child_lwp);
3603 kill_wait_one_lwp (child_lwp);
3604
3605 /* Let the arch-specific native code know this process is
3606 gone. */
3607 linux_target->low_forget_process (child_pid);
3608 }
3609 }
3610 }
3611
3612 void
3613 linux_nat_target::kill ()
3614 {
3615 /* If we're stopped while forking and we haven't followed yet,
3616 kill the other task. We need to do this first because the
3617 parent will be sleeping if this is a vfork. */
3618 kill_unfollowed_fork_children (current_inferior ());
3619
3620 if (forks_exist_p ())
3621 linux_fork_killall ();
3622 else
3623 {
3624 ptid_t ptid = ptid_t (inferior_ptid.pid ());
3625
3626 /* Stop all threads before killing them, since ptrace requires
3627 that the thread is stopped to successfully PTRACE_KILL. */
3628 iterate_over_lwps (ptid, stop_callback);
3629 /* ... and wait until all of them have reported back that
3630 they're no longer running. */
3631 iterate_over_lwps (ptid, stop_wait_callback);
3632
3633 /* Kill all LWP's ... */
3634 iterate_over_lwps (ptid, kill_callback);
3635
3636 /* ... and wait until we've flushed all events. */
3637 iterate_over_lwps (ptid, kill_wait_callback);
3638 }
3639
3640 target_mourn_inferior (inferior_ptid);
3641 }
3642
3643 void
3644 linux_nat_target::mourn_inferior ()
3645 {
3646 int pid = inferior_ptid.pid ();
3647
3648 purge_lwp_list (pid);
3649
3650 close_proc_mem_file (pid);
3651
3652 if (! forks_exist_p ())
3653 /* Normal case, no other forks available. */
3654 inf_ptrace_target::mourn_inferior ();
3655 else
3656 /* Multi-fork case. The current inferior_ptid has exited, but
3657 there are other viable forks to debug. Delete the exiting
3658 one and context-switch to the first available. */
3659 linux_fork_mourn_inferior ();
3660
3661 /* Let the arch-specific native code know this process is gone. */
3662 linux_target->low_forget_process (pid);
3663 }
3664
3665 /* Convert a native/host siginfo object, into/from the siginfo in the
3666 layout of the inferiors' architecture. */
3667
3668 static void
3669 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3670 {
3671 /* If the low target didn't do anything, then just do a straight
3672 memcpy. */
3673 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
3674 {
3675 if (direction == 1)
3676 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3677 else
3678 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3679 }
3680 }
3681
3682 static enum target_xfer_status
3683 linux_xfer_siginfo (enum target_object object,
3684 const char *annex, gdb_byte *readbuf,
3685 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3686 ULONGEST *xfered_len)
3687 {
3688 int pid;
3689 siginfo_t siginfo;
3690 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3691
3692 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3693 gdb_assert (readbuf || writebuf);
3694
3695 pid = inferior_ptid.lwp ();
3696 if (pid == 0)
3697 pid = inferior_ptid.pid ();
3698
3699 if (offset > sizeof (siginfo))
3700 return TARGET_XFER_E_IO;
3701
3702 errno = 0;
3703 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3704 if (errno != 0)
3705 return TARGET_XFER_E_IO;
3706
3707 /* When GDB is built as a 64-bit application, ptrace writes into
3708 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3709 inferior with a 64-bit GDB should look the same as debugging it
3710 with a 32-bit GDB, we need to convert it. GDB core always sees
3711 the converted layout, so any read/write will have to be done
3712 post-conversion. */
3713 siginfo_fixup (&siginfo, inf_siginfo, 0);
3714
3715 if (offset + len > sizeof (siginfo))
3716 len = sizeof (siginfo) - offset;
3717
3718 if (readbuf != NULL)
3719 memcpy (readbuf, inf_siginfo + offset, len);
3720 else
3721 {
3722 memcpy (inf_siginfo + offset, writebuf, len);
3723
3724 /* Convert back to ptrace layout before flushing it out. */
3725 siginfo_fixup (&siginfo, inf_siginfo, 1);
3726
3727 errno = 0;
3728 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3729 if (errno != 0)
3730 return TARGET_XFER_E_IO;
3731 }
3732
3733 *xfered_len = len;
3734 return TARGET_XFER_OK;
3735 }
3736
3737 static enum target_xfer_status
3738 linux_nat_xfer_osdata (enum target_object object,
3739 const char *annex, gdb_byte *readbuf,
3740 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3741 ULONGEST *xfered_len);
3742
3743 static enum target_xfer_status
3744 linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
3745 ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
3746
3747 enum target_xfer_status
3748 linux_nat_target::xfer_partial (enum target_object object,
3749 const char *annex, gdb_byte *readbuf,
3750 const gdb_byte *writebuf,
3751 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3752 {
3753 if (object == TARGET_OBJECT_SIGNAL_INFO)
3754 return linux_xfer_siginfo (object, annex, readbuf, writebuf,
3755 offset, len, xfered_len);
3756
3757 /* The target is connected but no live inferior is selected. Pass
3758 this request down to a lower stratum (e.g., the executable
3759 file). */
3760 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
3761 return TARGET_XFER_EOF;
3762
3763 if (object == TARGET_OBJECT_AUXV)
3764 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3765 offset, len, xfered_len);
3766
3767 if (object == TARGET_OBJECT_OSDATA)
3768 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3769 offset, len, xfered_len);
3770
3771 if (object == TARGET_OBJECT_MEMORY)
3772 {
3773 /* GDB calculates all addresses in the largest possible address
3774 width. The address width must be masked before its final use
3775 by linux_proc_xfer_partial.
3776
3777 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3778 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3779
3780 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3781 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3782
3783 return linux_proc_xfer_memory_partial (readbuf, writebuf,
3784 offset, len, xfered_len);
3785 }
3786
3787 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3788 offset, len, xfered_len);
3789 }
3790
3791 bool
3792 linux_nat_target::thread_alive (ptid_t ptid)
3793 {
3794 /* As long as a PTID is in lwp list, consider it alive. */
3795 return find_lwp_pid (ptid) != NULL;
3796 }
3797
3798 /* Implement the to_update_thread_list target method for this
3799 target. */
3800
3801 void
3802 linux_nat_target::update_thread_list ()
3803 {
3804 /* We add/delete threads from the list as clone/exit events are
3805 processed, so just try deleting exited threads still in the
3806 thread list. */
3807 delete_exited_threads ();
3808
3809 /* Update the processor core that each lwp/thread was last seen
3810 running on. */
3811 for (lwp_info *lwp : all_lwps ())
3812 {
3813 /* Avoid accessing /proc if the thread hasn't run since we last
3814 time we fetched the thread's core. Accessing /proc becomes
3815 noticeably expensive when we have thousands of LWPs. */
3816 if (lwp->core == -1)
3817 lwp->core = linux_common_core_of_thread (lwp->ptid);
3818 }
3819 }
3820
3821 std::string
3822 linux_nat_target::pid_to_str (ptid_t ptid)
3823 {
3824 if (ptid.lwp_p ()
3825 && (ptid.pid () != ptid.lwp ()
3826 || num_lwps (ptid.pid ()) > 1))
3827 return string_printf ("LWP %ld", ptid.lwp ());
3828
3829 return normal_pid_to_str (ptid);
3830 }
3831
3832 const char *
3833 linux_nat_target::thread_name (struct thread_info *thr)
3834 {
3835 return linux_proc_tid_get_name (thr->ptid);
3836 }
3837
3838 /* Accepts an integer PID; Returns a string representing a file that
3839 can be opened to get the symbols for the child process. */
3840
3841 char *
3842 linux_nat_target::pid_to_exec_file (int pid)
3843 {
3844 return linux_proc_pid_to_exec_file (pid);
3845 }
3846
3847 /* Object representing an /proc/PID/mem open file. We keep one such
3848 file open per inferior.
3849
3850 It might be tempting to think about only ever opening one file at
3851 most for all inferiors, closing/reopening the file as we access
3852 memory of different inferiors, to minimize number of file
3853 descriptors open, which can otherwise run into resource limits.
3854 However, that does not work correctly -- if the inferior execs and
3855 we haven't processed the exec event yet, and, we opened a
3856 /proc/PID/mem file, we will get a mem file accessing the post-exec
3857 address space, thinking we're opening it for the pre-exec address
3858 space. That is dangerous as we can poke memory (e.g. clearing
3859 breakpoints) in the post-exec memory by mistake, corrupting the
3860 inferior. For that reason, we open the mem file as early as
3861 possible, right after spawning, forking or attaching to the
3862 inferior, when the inferior is stopped and thus before it has a
3863 chance of execing.
3864
3865 Note that after opening the file, even if the thread we opened it
3866 for subsequently exits, the open file is still usable for accessing
3867 memory. It's only when the whole process exits or execs that the
3868 file becomes invalid, at which point reads/writes return EOF. */
3869
3870 class proc_mem_file
3871 {
3872 public:
3873 proc_mem_file (ptid_t ptid, int fd)
3874 : m_ptid (ptid), m_fd (fd)
3875 {
3876 gdb_assert (m_fd != -1);
3877 }
3878
3879 ~proc_mem_file ()
3880 {
3881 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
3882 m_fd, m_ptid.pid (), m_ptid.lwp ());
3883 close (m_fd);
3884 }
3885
3886 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3887
3888 int fd ()
3889 {
3890 return m_fd;
3891 }
3892
3893 private:
3894 /* The LWP this file was opened for. Just for debugging
3895 purposes. */
3896 ptid_t m_ptid;
3897
3898 /* The file descriptor. */
3899 int m_fd = -1;
3900 };
3901
3902 /* The map between an inferior process id, and the open /proc/PID/mem
3903 file. This is stored in a map instead of in a per-inferior
3904 structure because we need to be able to access memory of processes
3905 which don't have a corresponding struct inferior object. E.g.,
3906 with "detach-on-fork on" (the default), and "follow-fork parent"
3907 (also default), we don't create an inferior for the fork child, but
3908 we still need to remove breakpoints from the fork child's
3909 memory. */
3910 static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3911
3912 /* Close the /proc/PID/mem file for PID. */
3913
3914 static void
3915 close_proc_mem_file (pid_t pid)
3916 {
3917 proc_mem_file_map.erase (pid);
3918 }
3919
3920 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
3921 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3922 exists and is stopped right now. We prefer the
3923 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3924 races, just in case this is ever called on an already-waited
3925 LWP. */
3926
3927 static void
3928 open_proc_mem_file (ptid_t ptid)
3929 {
3930 auto iter = proc_mem_file_map.find (ptid.pid ());
3931 gdb_assert (iter == proc_mem_file_map.end ());
3932
3933 char filename[64];
3934 xsnprintf (filename, sizeof filename,
3935 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3936
3937 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
3938
3939 if (fd == -1)
3940 {
3941 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3942 ptid.pid (), ptid.lwp (),
3943 safe_strerror (errno), errno);
3944 return;
3945 }
3946
3947 proc_mem_file_map.emplace (std::piecewise_construct,
3948 std::forward_as_tuple (ptid.pid ()),
3949 std::forward_as_tuple (ptid, fd));
3950
3951 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld\n",
3952 fd, ptid.pid (), ptid.lwp ());
3953 }
3954
3955 /* Implement the to_xfer_partial target method using /proc/PID/mem.
3956 Because we can use a single read/write call, this can be much more
3957 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
3958 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
3959 threads. */
3960
3961 static enum target_xfer_status
3962 linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
3963 ULONGEST offset, LONGEST len,
3964 ULONGEST *xfered_len)
3965 {
3966 ssize_t ret;
3967
3968 auto iter = proc_mem_file_map.find (inferior_ptid.pid ());
3969 if (iter == proc_mem_file_map.end ())
3970 return TARGET_XFER_EOF;
3971
3972 int fd = iter->second.fd ();
3973
3974 gdb_assert (fd != -1);
3975
3976 /* Use pread64/pwrite64 if available, since they save a syscall and can
3977 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
3978 debugging a SPARC64 application). */
3979 #ifdef HAVE_PREAD64
3980 ret = (readbuf ? pread64 (fd, readbuf, len, offset)
3981 : pwrite64 (fd, writebuf, len, offset));
3982 #else
3983 ret = lseek (fd, offset, SEEK_SET);
3984 if (ret != -1)
3985 ret = (readbuf ? read (fd, readbuf, len)
3986 : write (fd, writebuf, len));
3987 #endif
3988
3989 if (ret == -1)
3990 {
3991 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)\n",
3992 fd, inferior_ptid.pid (),
3993 safe_strerror (errno), errno);
3994 return TARGET_XFER_EOF;
3995 }
3996 else if (ret == 0)
3997 {
3998 /* EOF means the address space is gone, the whole process exited
3999 or execed. */
4000 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF\n",
4001 fd, inferior_ptid.pid ());
4002 return TARGET_XFER_EOF;
4003 }
4004 else
4005 {
4006 *xfered_len = ret;
4007 return TARGET_XFER_OK;
4008 }
4009 }
4010
4011 /* Parse LINE as a signal set and add its set bits to SIGS. */
4012
4013 static void
4014 add_line_to_sigset (const char *line, sigset_t *sigs)
4015 {
4016 int len = strlen (line) - 1;
4017 const char *p;
4018 int signum;
4019
4020 if (line[len] != '\n')
4021 error (_("Could not parse signal set: %s"), line);
4022
4023 p = line;
4024 signum = len * 4;
4025 while (len-- > 0)
4026 {
4027 int digit;
4028
4029 if (*p >= '0' && *p <= '9')
4030 digit = *p - '0';
4031 else if (*p >= 'a' && *p <= 'f')
4032 digit = *p - 'a' + 10;
4033 else
4034 error (_("Could not parse signal set: %s"), line);
4035
4036 signum -= 4;
4037
4038 if (digit & 1)
4039 sigaddset (sigs, signum + 1);
4040 if (digit & 2)
4041 sigaddset (sigs, signum + 2);
4042 if (digit & 4)
4043 sigaddset (sigs, signum + 3);
4044 if (digit & 8)
4045 sigaddset (sigs, signum + 4);
4046
4047 p++;
4048 }
4049 }
4050
4051 /* Find process PID's pending signals from /proc/pid/status and set
4052 SIGS to match. */
4053
4054 void
4055 linux_proc_pending_signals (int pid, sigset_t *pending,
4056 sigset_t *blocked, sigset_t *ignored)
4057 {
4058 char buffer[PATH_MAX], fname[PATH_MAX];
4059
4060 sigemptyset (pending);
4061 sigemptyset (blocked);
4062 sigemptyset (ignored);
4063 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4064 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
4065 if (procfile == NULL)
4066 error (_("Could not open %s"), fname);
4067
4068 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
4069 {
4070 /* Normal queued signals are on the SigPnd line in the status
4071 file. However, 2.6 kernels also have a "shared" pending
4072 queue for delivering signals to a thread group, so check for
4073 a ShdPnd line also.
4074
4075 Unfortunately some Red Hat kernels include the shared pending
4076 queue but not the ShdPnd status field. */
4077
4078 if (startswith (buffer, "SigPnd:\t"))
4079 add_line_to_sigset (buffer + 8, pending);
4080 else if (startswith (buffer, "ShdPnd:\t"))
4081 add_line_to_sigset (buffer + 8, pending);
4082 else if (startswith (buffer, "SigBlk:\t"))
4083 add_line_to_sigset (buffer + 8, blocked);
4084 else if (startswith (buffer, "SigIgn:\t"))
4085 add_line_to_sigset (buffer + 8, ignored);
4086 }
4087 }
4088
4089 static enum target_xfer_status
4090 linux_nat_xfer_osdata (enum target_object object,
4091 const char *annex, gdb_byte *readbuf,
4092 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4093 ULONGEST *xfered_len)
4094 {
4095 gdb_assert (object == TARGET_OBJECT_OSDATA);
4096
4097 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4098 if (*xfered_len == 0)
4099 return TARGET_XFER_EOF;
4100 else
4101 return TARGET_XFER_OK;
4102 }
4103
4104 std::vector<static_tracepoint_marker>
4105 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
4106 {
4107 char s[IPA_CMD_BUF_SIZE];
4108 int pid = inferior_ptid.pid ();
4109 std::vector<static_tracepoint_marker> markers;
4110 const char *p = s;
4111 ptid_t ptid = ptid_t (pid, 0);
4112 static_tracepoint_marker marker;
4113
4114 /* Pause all */
4115 target_stop (ptid);
4116
4117 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4118 s[sizeof ("qTfSTM")] = 0;
4119
4120 agent_run_command (pid, s, strlen (s) + 1);
4121
4122 /* Unpause all. */
4123 SCOPE_EXIT { target_continue_no_signal (ptid); };
4124
4125 while (*p++ == 'm')
4126 {
4127 do
4128 {
4129 parse_static_tracepoint_marker_definition (p, &p, &marker);
4130
4131 if (strid == NULL || marker.str_id == strid)
4132 markers.push_back (std::move (marker));
4133 }
4134 while (*p++ == ','); /* comma-separated list */
4135
4136 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4137 s[sizeof ("qTsSTM")] = 0;
4138 agent_run_command (pid, s, strlen (s) + 1);
4139 p = s;
4140 }
4141
4142 return markers;
4143 }
4144
4145 /* target_is_async_p implementation. */
4146
4147 bool
4148 linux_nat_target::is_async_p ()
4149 {
4150 return linux_is_async_p ();
4151 }
4152
4153 /* target_can_async_p implementation. */
4154
4155 bool
4156 linux_nat_target::can_async_p ()
4157 {
4158 /* This flag should be checked in the common target.c code. */
4159 gdb_assert (target_async_permitted);
4160
4161 /* Otherwise, this targets is always able to support async mode. */
4162 return true;
4163 }
4164
4165 bool
4166 linux_nat_target::supports_non_stop ()
4167 {
4168 return true;
4169 }
4170
4171 /* to_always_non_stop_p implementation. */
4172
4173 bool
4174 linux_nat_target::always_non_stop_p ()
4175 {
4176 return true;
4177 }
4178
4179 bool
4180 linux_nat_target::supports_multi_process ()
4181 {
4182 return true;
4183 }
4184
4185 bool
4186 linux_nat_target::supports_disable_randomization ()
4187 {
4188 return true;
4189 }
4190
4191 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4192 so we notice when any child changes state, and notify the
4193 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4194 above to wait for the arrival of a SIGCHLD. */
4195
4196 static void
4197 sigchld_handler (int signo)
4198 {
4199 int old_errno = errno;
4200
4201 if (debug_linux_nat)
4202 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4203
4204 if (signo == SIGCHLD
4205 && linux_nat_event_pipe[0] != -1)
4206 async_file_mark (); /* Let the event loop know that there are
4207 events to handle. */
4208
4209 errno = old_errno;
4210 }
4211
4212 /* Callback registered with the target events file descriptor. */
4213
4214 static void
4215 handle_target_event (int error, gdb_client_data client_data)
4216 {
4217 inferior_event_handler (INF_REG_EVENT);
4218 }
4219
4220 /* Create/destroy the target events pipe. Returns previous state. */
4221
4222 static int
4223 linux_async_pipe (int enable)
4224 {
4225 int previous = linux_is_async_p ();
4226
4227 if (previous != enable)
4228 {
4229 sigset_t prev_mask;
4230
4231 /* Block child signals while we create/destroy the pipe, as
4232 their handler writes to it. */
4233 block_child_signals (&prev_mask);
4234
4235 if (enable)
4236 {
4237 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4238 internal_error (__FILE__, __LINE__,
4239 "creating event pipe failed.");
4240
4241 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4242 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4243 }
4244 else
4245 {
4246 close (linux_nat_event_pipe[0]);
4247 close (linux_nat_event_pipe[1]);
4248 linux_nat_event_pipe[0] = -1;
4249 linux_nat_event_pipe[1] = -1;
4250 }
4251
4252 restore_child_signals_mask (&prev_mask);
4253 }
4254
4255 return previous;
4256 }
4257
4258 int
4259 linux_nat_target::async_wait_fd ()
4260 {
4261 return linux_nat_event_pipe[0];
4262 }
4263
4264 /* target_async implementation. */
4265
4266 void
4267 linux_nat_target::async (int enable)
4268 {
4269 if (enable)
4270 {
4271 if (!linux_async_pipe (1))
4272 {
4273 add_file_handler (linux_nat_event_pipe[0],
4274 handle_target_event, NULL,
4275 "linux-nat");
4276 /* There may be pending events to handle. Tell the event loop
4277 to poll them. */
4278 async_file_mark ();
4279 }
4280 }
4281 else
4282 {
4283 delete_file_handler (linux_nat_event_pipe[0]);
4284 linux_async_pipe (0);
4285 }
4286 return;
4287 }
4288
4289 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4290 event came out. */
4291
4292 static int
4293 linux_nat_stop_lwp (struct lwp_info *lwp)
4294 {
4295 if (!lwp->stopped)
4296 {
4297 linux_nat_debug_printf ("running -> suspending %s",
4298 lwp->ptid.to_string ().c_str ());
4299
4300
4301 if (lwp->last_resume_kind == resume_stop)
4302 {
4303 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4304 lwp->ptid.lwp ());
4305 return 0;
4306 }
4307
4308 stop_callback (lwp);
4309 lwp->last_resume_kind = resume_stop;
4310 }
4311 else
4312 {
4313 /* Already known to be stopped; do nothing. */
4314
4315 if (debug_linux_nat)
4316 {
4317 if (find_thread_ptid (linux_target, lwp->ptid)->stop_requested)
4318 linux_nat_debug_printf ("already stopped/stop_requested %s",
4319 lwp->ptid.to_string ().c_str ());
4320 else
4321 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4322 lwp->ptid.to_string ().c_str ());
4323 }
4324 }
4325 return 0;
4326 }
4327
4328 void
4329 linux_nat_target::stop (ptid_t ptid)
4330 {
4331 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
4332 iterate_over_lwps (ptid, linux_nat_stop_lwp);
4333 }
4334
4335 void
4336 linux_nat_target::close ()
4337 {
4338 /* Unregister from the event loop. */
4339 if (is_async_p ())
4340 async (0);
4341
4342 inf_ptrace_target::close ();
4343 }
4344
4345 /* When requests are passed down from the linux-nat layer to the
4346 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4347 used. The address space pointer is stored in the inferior object,
4348 but the common code that is passed such ptid can't tell whether
4349 lwpid is a "main" process id or not (it assumes so). We reverse
4350 look up the "main" process id from the lwp here. */
4351
4352 struct address_space *
4353 linux_nat_target::thread_address_space (ptid_t ptid)
4354 {
4355 struct lwp_info *lwp;
4356 struct inferior *inf;
4357 int pid;
4358
4359 if (ptid.lwp () == 0)
4360 {
4361 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4362 tgid. */
4363 lwp = find_lwp_pid (ptid);
4364 pid = lwp->ptid.pid ();
4365 }
4366 else
4367 {
4368 /* A (pid,lwpid,0) ptid. */
4369 pid = ptid.pid ();
4370 }
4371
4372 inf = find_inferior_pid (this, pid);
4373 gdb_assert (inf != NULL);
4374 return inf->aspace;
4375 }
4376
4377 /* Return the cached value of the processor core for thread PTID. */
4378
4379 int
4380 linux_nat_target::core_of_thread (ptid_t ptid)
4381 {
4382 struct lwp_info *info = find_lwp_pid (ptid);
4383
4384 if (info)
4385 return info->core;
4386 return -1;
4387 }
4388
4389 /* Implementation of to_filesystem_is_local. */
4390
4391 bool
4392 linux_nat_target::filesystem_is_local ()
4393 {
4394 struct inferior *inf = current_inferior ();
4395
4396 if (inf->fake_pid_p || inf->pid == 0)
4397 return true;
4398
4399 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4400 }
4401
4402 /* Convert the INF argument passed to a to_fileio_* method
4403 to a process ID suitable for passing to its corresponding
4404 linux_mntns_* function. If INF is non-NULL then the
4405 caller is requesting the filesystem seen by INF. If INF
4406 is NULL then the caller is requesting the filesystem seen
4407 by the GDB. We fall back to GDB's filesystem in the case
4408 that INF is non-NULL but its PID is unknown. */
4409
4410 static pid_t
4411 linux_nat_fileio_pid_of (struct inferior *inf)
4412 {
4413 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4414 return getpid ();
4415 else
4416 return inf->pid;
4417 }
4418
4419 /* Implementation of to_fileio_open. */
4420
4421 int
4422 linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4423 int flags, int mode, int warn_if_slow,
4424 int *target_errno)
4425 {
4426 int nat_flags;
4427 mode_t nat_mode;
4428 int fd;
4429
4430 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4431 || fileio_to_host_mode (mode, &nat_mode) == -1)
4432 {
4433 *target_errno = FILEIO_EINVAL;
4434 return -1;
4435 }
4436
4437 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4438 filename, nat_flags, nat_mode);
4439 if (fd == -1)
4440 *target_errno = host_to_fileio_error (errno);
4441
4442 return fd;
4443 }
4444
4445 /* Implementation of to_fileio_readlink. */
4446
4447 gdb::optional<std::string>
4448 linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4449 int *target_errno)
4450 {
4451 char buf[PATH_MAX];
4452 int len;
4453
4454 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4455 filename, buf, sizeof (buf));
4456 if (len < 0)
4457 {
4458 *target_errno = host_to_fileio_error (errno);
4459 return {};
4460 }
4461
4462 return std::string (buf, len);
4463 }
4464
4465 /* Implementation of to_fileio_unlink. */
4466
4467 int
4468 linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4469 int *target_errno)
4470 {
4471 int ret;
4472
4473 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4474 filename);
4475 if (ret == -1)
4476 *target_errno = host_to_fileio_error (errno);
4477
4478 return ret;
4479 }
4480
4481 /* Implementation of the to_thread_events method. */
4482
4483 void
4484 linux_nat_target::thread_events (int enable)
4485 {
4486 report_thread_events = enable;
4487 }
4488
4489 linux_nat_target::linux_nat_target ()
4490 {
4491 /* We don't change the stratum; this target will sit at
4492 process_stratum and thread_db will set at thread_stratum. This
4493 is a little strange, since this is a multi-threaded-capable
4494 target, but we want to be on the stack below thread_db, and we
4495 also want to be used for single-threaded processes. */
4496 }
4497
4498 /* See linux-nat.h. */
4499
4500 int
4501 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4502 {
4503 int pid;
4504
4505 pid = ptid.lwp ();
4506 if (pid == 0)
4507 pid = ptid.pid ();
4508
4509 errno = 0;
4510 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4511 if (errno != 0)
4512 {
4513 memset (siginfo, 0, sizeof (*siginfo));
4514 return 0;
4515 }
4516 return 1;
4517 }
4518
4519 /* See nat/linux-nat.h. */
4520
4521 ptid_t
4522 current_lwp_ptid (void)
4523 {
4524 gdb_assert (inferior_ptid.lwp_p ());
4525 return inferior_ptid;
4526 }
4527
4528 void _initialize_linux_nat ();
4529 void
4530 _initialize_linux_nat ()
4531 {
4532 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
4533 &debug_linux_nat, _("\
4534 Set debugging of GNU/Linux native target."), _(" \
4535 Show debugging of GNU/Linux native target."), _(" \
4536 When on, print debug messages relating to the GNU/Linux native target."),
4537 nullptr,
4538 show_debug_linux_nat,
4539 &setdebuglist, &showdebuglist);
4540
4541 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4542 &debug_linux_namespaces, _("\
4543 Set debugging of GNU/Linux namespaces module."), _("\
4544 Show debugging of GNU/Linux namespaces module."), _("\
4545 Enables printf debugging output."),
4546 NULL,
4547 NULL,
4548 &setdebuglist, &showdebuglist);
4549
4550 /* Install a SIGCHLD handler. */
4551 sigchld_action.sa_handler = sigchld_handler;
4552 sigemptyset (&sigchld_action.sa_mask);
4553 sigchld_action.sa_flags = SA_RESTART;
4554
4555 /* Make it the default. */
4556 sigaction (SIGCHLD, &sigchld_action, NULL);
4557
4558 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4559 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
4560 sigdelset (&suspend_mask, SIGCHLD);
4561
4562 sigemptyset (&blocked_mask);
4563
4564 lwp_lwpid_htab_create ();
4565 }
4566 \f
4567
4568 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4569 the GNU/Linux Threads library and therefore doesn't really belong
4570 here. */
4571
4572 /* NPTL reserves the first two RT signals, but does not provide any
4573 way for the debugger to query the signal numbers - fortunately
4574 they don't change. */
4575 static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
4576
4577 /* See linux-nat.h. */
4578
4579 unsigned int
4580 lin_thread_get_thread_signal_num (void)
4581 {
4582 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4583 }
4584
4585 /* See linux-nat.h. */
4586
4587 int
4588 lin_thread_get_thread_signal (unsigned int i)
4589 {
4590 gdb_assert (i < lin_thread_get_thread_signal_num ());
4591 return lin_thread_signals[i];
4592 }