Linux: on attach, attach to lwps listed under /proc/$pid/task/
[binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "nat/linux-ptrace.h"
34 #include "nat/linux-procfs.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-child.h"
41 #include "inf-ptrace.h"
42 #include "auxv.h"
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include <sys/stat.h> /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include <dirent.h>
56 #include "xml-support.h"
57 #include <sys/vfs.h>
58 #include "solib.h"
59 #include "nat/linux-osdata.h"
60 #include "linux-tdep.h"
61 #include "symfile.h"
62 #include "agent.h"
63 #include "tracepoint.h"
64 #include "buffer.h"
65 #include "target-descriptions.h"
66 #include "filestuff.h"
67 #include "objfiles.h"
68
69 #ifndef SPUFS_MAGIC
70 #define SPUFS_MAGIC 0x23c9b64e
71 #endif
72
73 #ifdef HAVE_PERSONALITY
74 # include <sys/personality.h>
75 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
76 # define ADDR_NO_RANDOMIZE 0x0040000
77 # endif
78 #endif /* HAVE_PERSONALITY */
79
80 /* This comment documents high-level logic of this file.
81
82 Waiting for events in sync mode
83 ===============================
84
85 When waiting for an event in a specific thread, we just use waitpid, passing
86 the specific pid, and not passing WNOHANG.
87
88 When waiting for an event in all threads, waitpid is not quite good. Prior to
89 version 2.4, Linux can either wait for event in main thread, or in secondary
90 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
91 miss an event. The solution is to use non-blocking waitpid, together with
92 sigsuspend. First, we use non-blocking waitpid to get an event in the main
93 process, if any. Second, we use non-blocking waitpid with the __WCLONED
94 flag to check for events in cloned processes. If nothing is found, we use
95 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
96 happened to a child process -- and SIGCHLD will be delivered both for events
97 in main debugged process and in cloned processes. As soon as we know there's
98 an event, we get back to calling nonblocking waitpid with and without
99 __WCLONED.
100
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
102 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
103 blocked, the signal becomes pending and sigsuspend immediately
104 notices it and returns.
105
106 Waiting for events in async mode
107 ================================
108
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, the self-pipe trick is used
115 --- a pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler writes a
118 byte to this pipe. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
122
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
128
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
134 sigsuspend.
135
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
143
144 Use of signals
145 ==============
146
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
152
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
157
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
166 blocked. */
167
168 #ifndef O_LARGEFILE
169 #define O_LARGEFILE 0
170 #endif
171
172 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174 static struct target_ops *linux_ops;
175 static struct target_ops linux_ops_saved;
176
177 /* The method to call, if any, when a new thread is attached. */
178 static void (*linux_nat_new_thread) (struct lwp_info *);
179
180 /* The method to call, if any, when a new fork is attached. */
181 static linux_nat_new_fork_ftype *linux_nat_new_fork;
182
183 /* The method to call, if any, when a process is no longer
184 attached. */
185 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
186
187 /* Hook to call prior to resuming a thread. */
188 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
189
190 /* The method to call, if any, when the siginfo object needs to be
191 converted between the layout returned by ptrace, and the layout in
192 the architecture of the inferior. */
193 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
194 gdb_byte *,
195 int);
196
197 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
198 Called by our to_xfer_partial. */
199 static target_xfer_partial_ftype *super_xfer_partial;
200
201 /* The saved to_close method, inherited from inf-ptrace.c.
202 Called by our to_close. */
203 static void (*super_close) (struct target_ops *);
204
205 static unsigned int debug_linux_nat;
206 static void
207 show_debug_linux_nat (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
211 value);
212 }
213
214 struct simple_pid_list
215 {
216 int pid;
217 int status;
218 struct simple_pid_list *next;
219 };
220 struct simple_pid_list *stopped_pids;
221
222 /* Async mode support. */
223
224 /* The read/write ends of the pipe registered as waitable file in the
225 event loop. */
226 static int linux_nat_event_pipe[2] = { -1, -1 };
227
228 /* Flush the event pipe. */
229
230 static void
231 async_file_flush (void)
232 {
233 int ret;
234 char buf;
235
236 do
237 {
238 ret = read (linux_nat_event_pipe[0], &buf, 1);
239 }
240 while (ret >= 0 || (ret == -1 && errno == EINTR));
241 }
242
243 /* Put something (anything, doesn't matter what, or how much) in event
244 pipe, so that the select/poll in the event-loop realizes we have
245 something to process. */
246
247 static void
248 async_file_mark (void)
249 {
250 int ret;
251
252 /* It doesn't really matter what the pipe contains, as long we end
253 up with something in it. Might as well flush the previous
254 left-overs. */
255 async_file_flush ();
256
257 do
258 {
259 ret = write (linux_nat_event_pipe[1], "+", 1);
260 }
261 while (ret == -1 && errno == EINTR);
262
263 /* Ignore EAGAIN. If the pipe is full, the event loop will already
264 be awakened anyway. */
265 }
266
267 static int kill_lwp (int lwpid, int signo);
268
269 static int stop_callback (struct lwp_info *lp, void *data);
270
271 static void block_child_signals (sigset_t *prev_mask);
272 static void restore_child_signals_mask (sigset_t *prev_mask);
273
274 struct lwp_info;
275 static struct lwp_info *add_lwp (ptid_t ptid);
276 static void purge_lwp_list (int pid);
277 static void delete_lwp (ptid_t ptid);
278 static struct lwp_info *find_lwp_pid (ptid_t ptid);
279
280 \f
281 /* Trivial list manipulation functions to keep track of a list of
282 new stopped processes. */
283 static void
284 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
285 {
286 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
287
288 new_pid->pid = pid;
289 new_pid->status = status;
290 new_pid->next = *listp;
291 *listp = new_pid;
292 }
293
294 static int
295 in_pid_list_p (struct simple_pid_list *list, int pid)
296 {
297 struct simple_pid_list *p;
298
299 for (p = list; p != NULL; p = p->next)
300 if (p->pid == pid)
301 return 1;
302 return 0;
303 }
304
305 static int
306 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
307 {
308 struct simple_pid_list **p;
309
310 for (p = listp; *p != NULL; p = &(*p)->next)
311 if ((*p)->pid == pid)
312 {
313 struct simple_pid_list *next = (*p)->next;
314
315 *statusp = (*p)->status;
316 xfree (*p);
317 *p = next;
318 return 1;
319 }
320 return 0;
321 }
322
323 /* Initialize ptrace warnings and check for supported ptrace
324 features given PID.
325
326 ATTACHED should be nonzero iff we attached to the inferior. */
327
328 static void
329 linux_init_ptrace (pid_t pid, int attached)
330 {
331 linux_enable_event_reporting (pid, attached);
332 linux_ptrace_init_warnings ();
333 }
334
335 static void
336 linux_child_post_attach (struct target_ops *self, int pid)
337 {
338 linux_init_ptrace (pid, 1);
339 }
340
341 static void
342 linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
343 {
344 linux_init_ptrace (ptid_get_pid (ptid), 0);
345 }
346
347 /* Return the number of known LWPs in the tgid given by PID. */
348
349 static int
350 num_lwps (int pid)
351 {
352 int count = 0;
353 struct lwp_info *lp;
354
355 for (lp = lwp_list; lp; lp = lp->next)
356 if (ptid_get_pid (lp->ptid) == pid)
357 count++;
358
359 return count;
360 }
361
362 /* Call delete_lwp with prototype compatible for make_cleanup. */
363
364 static void
365 delete_lwp_cleanup (void *lp_voidp)
366 {
367 struct lwp_info *lp = lp_voidp;
368
369 delete_lwp (lp->ptid);
370 }
371
372 /* Target hook for follow_fork. On entry inferior_ptid must be the
373 ptid of the followed inferior. At return, inferior_ptid will be
374 unchanged. */
375
376 static int
377 linux_child_follow_fork (struct target_ops *ops, int follow_child,
378 int detach_fork)
379 {
380 if (!follow_child)
381 {
382 struct lwp_info *child_lp = NULL;
383 int status = W_STOPCODE (0);
384 struct cleanup *old_chain;
385 int has_vforked;
386 int parent_pid, child_pid;
387
388 has_vforked = (inferior_thread ()->pending_follow.kind
389 == TARGET_WAITKIND_VFORKED);
390 parent_pid = ptid_get_lwp (inferior_ptid);
391 if (parent_pid == 0)
392 parent_pid = ptid_get_pid (inferior_ptid);
393 child_pid
394 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
395
396
397 /* We're already attached to the parent, by default. */
398 old_chain = save_inferior_ptid ();
399 inferior_ptid = ptid_build (child_pid, child_pid, 0);
400 child_lp = add_lwp (inferior_ptid);
401 child_lp->stopped = 1;
402 child_lp->last_resume_kind = resume_stop;
403
404 /* Detach new forked process? */
405 if (detach_fork)
406 {
407 make_cleanup (delete_lwp_cleanup, child_lp);
408
409 if (linux_nat_prepare_to_resume != NULL)
410 linux_nat_prepare_to_resume (child_lp);
411
412 /* When debugging an inferior in an architecture that supports
413 hardware single stepping on a kernel without commit
414 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
415 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
416 set if the parent process had them set.
417 To work around this, single step the child process
418 once before detaching to clear the flags. */
419
420 if (!gdbarch_software_single_step_p (target_thread_architecture
421 (child_lp->ptid)))
422 {
423 linux_disable_event_reporting (child_pid);
424 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
425 perror_with_name (_("Couldn't do single step"));
426 if (my_waitpid (child_pid, &status, 0) < 0)
427 perror_with_name (_("Couldn't wait vfork process"));
428 }
429
430 if (WIFSTOPPED (status))
431 {
432 int signo;
433
434 signo = WSTOPSIG (status);
435 if (signo != 0
436 && !signal_pass_state (gdb_signal_from_host (signo)))
437 signo = 0;
438 ptrace (PTRACE_DETACH, child_pid, 0, signo);
439 }
440
441 /* Resets value of inferior_ptid to parent ptid. */
442 do_cleanups (old_chain);
443 }
444 else
445 {
446 /* Let the thread_db layer learn about this new process. */
447 check_for_thread_db ();
448 }
449
450 do_cleanups (old_chain);
451
452 if (has_vforked)
453 {
454 struct lwp_info *parent_lp;
455
456 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
457 gdb_assert (linux_supports_tracefork () >= 0);
458
459 if (linux_supports_tracevforkdone ())
460 {
461 if (debug_linux_nat)
462 fprintf_unfiltered (gdb_stdlog,
463 "LCFF: waiting for VFORK_DONE on %d\n",
464 parent_pid);
465 parent_lp->stopped = 1;
466
467 /* We'll handle the VFORK_DONE event like any other
468 event, in target_wait. */
469 }
470 else
471 {
472 /* We can't insert breakpoints until the child has
473 finished with the shared memory region. We need to
474 wait until that happens. Ideal would be to just
475 call:
476 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
477 - waitpid (parent_pid, &status, __WALL);
478 However, most architectures can't handle a syscall
479 being traced on the way out if it wasn't traced on
480 the way in.
481
482 We might also think to loop, continuing the child
483 until it exits or gets a SIGTRAP. One problem is
484 that the child might call ptrace with PTRACE_TRACEME.
485
486 There's no simple and reliable way to figure out when
487 the vforked child will be done with its copy of the
488 shared memory. We could step it out of the syscall,
489 two instructions, let it go, and then single-step the
490 parent once. When we have hardware single-step, this
491 would work; with software single-step it could still
492 be made to work but we'd have to be able to insert
493 single-step breakpoints in the child, and we'd have
494 to insert -just- the single-step breakpoint in the
495 parent. Very awkward.
496
497 In the end, the best we can do is to make sure it
498 runs for a little while. Hopefully it will be out of
499 range of any breakpoints we reinsert. Usually this
500 is only the single-step breakpoint at vfork's return
501 point. */
502
503 if (debug_linux_nat)
504 fprintf_unfiltered (gdb_stdlog,
505 "LCFF: no VFORK_DONE "
506 "support, sleeping a bit\n");
507
508 usleep (10000);
509
510 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
511 and leave it pending. The next linux_nat_resume call
512 will notice a pending event, and bypasses actually
513 resuming the inferior. */
514 parent_lp->status = 0;
515 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
516 parent_lp->stopped = 1;
517
518 /* If we're in async mode, need to tell the event loop
519 there's something here to process. */
520 if (target_can_async_p ())
521 async_file_mark ();
522 }
523 }
524 }
525 else
526 {
527 struct lwp_info *child_lp;
528
529 child_lp = add_lwp (inferior_ptid);
530 child_lp->stopped = 1;
531 child_lp->last_resume_kind = resume_stop;
532
533 /* Let the thread_db layer learn about this new process. */
534 check_for_thread_db ();
535 }
536
537 return 0;
538 }
539
540 \f
541 static int
542 linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
543 {
544 return !linux_supports_tracefork ();
545 }
546
547 static int
548 linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
549 {
550 return 0;
551 }
552
553 static int
554 linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
555 {
556 return !linux_supports_tracefork ();
557 }
558
559 static int
560 linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
561 {
562 return 0;
563 }
564
565 static int
566 linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
567 {
568 return !linux_supports_tracefork ();
569 }
570
571 static int
572 linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
573 {
574 return 0;
575 }
576
577 static int
578 linux_child_set_syscall_catchpoint (struct target_ops *self,
579 int pid, int needed, int any_count,
580 int table_size, int *table)
581 {
582 if (!linux_supports_tracesysgood ())
583 return 1;
584
585 /* On GNU/Linux, we ignore the arguments. It means that we only
586 enable the syscall catchpoints, but do not disable them.
587
588 Also, we do not use the `table' information because we do not
589 filter system calls here. We let GDB do the logic for us. */
590 return 0;
591 }
592
593 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
594 are processes sharing the same VM space. A multi-threaded process
595 is basically a group of such processes. However, such a grouping
596 is almost entirely a user-space issue; the kernel doesn't enforce
597 such a grouping at all (this might change in the future). In
598 general, we'll rely on the threads library (i.e. the GNU/Linux
599 Threads library) to provide such a grouping.
600
601 It is perfectly well possible to write a multi-threaded application
602 without the assistance of a threads library, by using the clone
603 system call directly. This module should be able to give some
604 rudimentary support for debugging such applications if developers
605 specify the CLONE_PTRACE flag in the clone system call, and are
606 using the Linux kernel 2.4 or above.
607
608 Note that there are some peculiarities in GNU/Linux that affect
609 this code:
610
611 - In general one should specify the __WCLONE flag to waitpid in
612 order to make it report events for any of the cloned processes
613 (and leave it out for the initial process). However, if a cloned
614 process has exited the exit status is only reported if the
615 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
616 we cannot use it since GDB must work on older systems too.
617
618 - When a traced, cloned process exits and is waited for by the
619 debugger, the kernel reassigns it to the original parent and
620 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
621 library doesn't notice this, which leads to the "zombie problem":
622 When debugged a multi-threaded process that spawns a lot of
623 threads will run out of processes, even if the threads exit,
624 because the "zombies" stay around. */
625
626 /* List of known LWPs. */
627 struct lwp_info *lwp_list;
628 \f
629
630 /* Original signal mask. */
631 static sigset_t normal_mask;
632
633 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
634 _initialize_linux_nat. */
635 static sigset_t suspend_mask;
636
637 /* Signals to block to make that sigsuspend work. */
638 static sigset_t blocked_mask;
639
640 /* SIGCHLD action. */
641 struct sigaction sigchld_action;
642
643 /* Block child signals (SIGCHLD and linux threads signals), and store
644 the previous mask in PREV_MASK. */
645
646 static void
647 block_child_signals (sigset_t *prev_mask)
648 {
649 /* Make sure SIGCHLD is blocked. */
650 if (!sigismember (&blocked_mask, SIGCHLD))
651 sigaddset (&blocked_mask, SIGCHLD);
652
653 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
654 }
655
656 /* Restore child signals mask, previously returned by
657 block_child_signals. */
658
659 static void
660 restore_child_signals_mask (sigset_t *prev_mask)
661 {
662 sigprocmask (SIG_SETMASK, prev_mask, NULL);
663 }
664
665 /* Mask of signals to pass directly to the inferior. */
666 static sigset_t pass_mask;
667
668 /* Update signals to pass to the inferior. */
669 static void
670 linux_nat_pass_signals (struct target_ops *self,
671 int numsigs, unsigned char *pass_signals)
672 {
673 int signo;
674
675 sigemptyset (&pass_mask);
676
677 for (signo = 1; signo < NSIG; signo++)
678 {
679 int target_signo = gdb_signal_from_host (signo);
680 if (target_signo < numsigs && pass_signals[target_signo])
681 sigaddset (&pass_mask, signo);
682 }
683 }
684
685 \f
686
687 /* Prototypes for local functions. */
688 static int stop_wait_callback (struct lwp_info *lp, void *data);
689 static int linux_thread_alive (ptid_t ptid);
690 static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
691
692 \f
693
694 /* Destroy and free LP. */
695
696 static void
697 lwp_free (struct lwp_info *lp)
698 {
699 xfree (lp->arch_private);
700 xfree (lp);
701 }
702
703 /* Remove all LWPs belong to PID from the lwp list. */
704
705 static void
706 purge_lwp_list (int pid)
707 {
708 struct lwp_info *lp, *lpprev, *lpnext;
709
710 lpprev = NULL;
711
712 for (lp = lwp_list; lp; lp = lpnext)
713 {
714 lpnext = lp->next;
715
716 if (ptid_get_pid (lp->ptid) == pid)
717 {
718 if (lp == lwp_list)
719 lwp_list = lp->next;
720 else
721 lpprev->next = lp->next;
722
723 lwp_free (lp);
724 }
725 else
726 lpprev = lp;
727 }
728 }
729
730 /* Add the LWP specified by PTID to the list. PTID is the first LWP
731 in the process. Return a pointer to the structure describing the
732 new LWP.
733
734 This differs from add_lwp in that we don't let the arch specific
735 bits know about this new thread. Current clients of this callback
736 take the opportunity to install watchpoints in the new thread, and
737 we shouldn't do that for the first thread. If we're spawning a
738 child ("run"), the thread executes the shell wrapper first, and we
739 shouldn't touch it until it execs the program we want to debug.
740 For "attach", it'd be okay to call the callback, but it's not
741 necessary, because watchpoints can't yet have been inserted into
742 the inferior. */
743
744 static struct lwp_info *
745 add_initial_lwp (ptid_t ptid)
746 {
747 struct lwp_info *lp;
748
749 gdb_assert (ptid_lwp_p (ptid));
750
751 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
752
753 memset (lp, 0, sizeof (struct lwp_info));
754
755 lp->last_resume_kind = resume_continue;
756 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
757
758 lp->ptid = ptid;
759 lp->core = -1;
760
761 lp->next = lwp_list;
762 lwp_list = lp;
763
764 return lp;
765 }
766
767 /* Add the LWP specified by PID to the list. Return a pointer to the
768 structure describing the new LWP. The LWP should already be
769 stopped. */
770
771 static struct lwp_info *
772 add_lwp (ptid_t ptid)
773 {
774 struct lwp_info *lp;
775
776 lp = add_initial_lwp (ptid);
777
778 /* Let the arch specific bits know about this new thread. Current
779 clients of this callback take the opportunity to install
780 watchpoints in the new thread. We don't do this for the first
781 thread though. See add_initial_lwp. */
782 if (linux_nat_new_thread != NULL)
783 linux_nat_new_thread (lp);
784
785 return lp;
786 }
787
788 /* Remove the LWP specified by PID from the list. */
789
790 static void
791 delete_lwp (ptid_t ptid)
792 {
793 struct lwp_info *lp, *lpprev;
794
795 lpprev = NULL;
796
797 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
798 if (ptid_equal (lp->ptid, ptid))
799 break;
800
801 if (!lp)
802 return;
803
804 if (lpprev)
805 lpprev->next = lp->next;
806 else
807 lwp_list = lp->next;
808
809 lwp_free (lp);
810 }
811
812 /* Return a pointer to the structure describing the LWP corresponding
813 to PID. If no corresponding LWP could be found, return NULL. */
814
815 static struct lwp_info *
816 find_lwp_pid (ptid_t ptid)
817 {
818 struct lwp_info *lp;
819 int lwp;
820
821 if (ptid_lwp_p (ptid))
822 lwp = ptid_get_lwp (ptid);
823 else
824 lwp = ptid_get_pid (ptid);
825
826 for (lp = lwp_list; lp; lp = lp->next)
827 if (lwp == ptid_get_lwp (lp->ptid))
828 return lp;
829
830 return NULL;
831 }
832
833 /* Call CALLBACK with its second argument set to DATA for every LWP in
834 the list. If CALLBACK returns 1 for a particular LWP, return a
835 pointer to the structure describing that LWP immediately.
836 Otherwise return NULL. */
837
838 struct lwp_info *
839 iterate_over_lwps (ptid_t filter,
840 int (*callback) (struct lwp_info *, void *),
841 void *data)
842 {
843 struct lwp_info *lp, *lpnext;
844
845 for (lp = lwp_list; lp; lp = lpnext)
846 {
847 lpnext = lp->next;
848
849 if (ptid_match (lp->ptid, filter))
850 {
851 if ((*callback) (lp, data))
852 return lp;
853 }
854 }
855
856 return NULL;
857 }
858
859 /* Update our internal state when changing from one checkpoint to
860 another indicated by NEW_PTID. We can only switch single-threaded
861 applications, so we only create one new LWP, and the previous list
862 is discarded. */
863
864 void
865 linux_nat_switch_fork (ptid_t new_ptid)
866 {
867 struct lwp_info *lp;
868
869 purge_lwp_list (ptid_get_pid (inferior_ptid));
870
871 lp = add_lwp (new_ptid);
872 lp->stopped = 1;
873
874 /* This changes the thread's ptid while preserving the gdb thread
875 num. Also changes the inferior pid, while preserving the
876 inferior num. */
877 thread_change_ptid (inferior_ptid, new_ptid);
878
879 /* We've just told GDB core that the thread changed target id, but,
880 in fact, it really is a different thread, with different register
881 contents. */
882 registers_changed ();
883 }
884
885 /* Handle the exit of a single thread LP. */
886
887 static void
888 exit_lwp (struct lwp_info *lp)
889 {
890 struct thread_info *th = find_thread_ptid (lp->ptid);
891
892 if (th)
893 {
894 if (print_thread_events)
895 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
896
897 delete_thread (lp->ptid);
898 }
899
900 delete_lwp (lp->ptid);
901 }
902
903 /* Wait for the LWP specified by LP, which we have just attached to.
904 Returns a wait status for that LWP, to cache. */
905
906 static int
907 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
908 int *signalled)
909 {
910 pid_t new_pid, pid = ptid_get_lwp (ptid);
911 int status;
912
913 if (linux_proc_pid_is_stopped (pid))
914 {
915 if (debug_linux_nat)
916 fprintf_unfiltered (gdb_stdlog,
917 "LNPAW: Attaching to a stopped process\n");
918
919 /* The process is definitely stopped. It is in a job control
920 stop, unless the kernel predates the TASK_STOPPED /
921 TASK_TRACED distinction, in which case it might be in a
922 ptrace stop. Make sure it is in a ptrace stop; from there we
923 can kill it, signal it, et cetera.
924
925 First make sure there is a pending SIGSTOP. Since we are
926 already attached, the process can not transition from stopped
927 to running without a PTRACE_CONT; so we know this signal will
928 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
929 probably already in the queue (unless this kernel is old
930 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
931 is not an RT signal, it can only be queued once. */
932 kill_lwp (pid, SIGSTOP);
933
934 /* Finally, resume the stopped process. This will deliver the SIGSTOP
935 (or a higher priority signal, just like normal PTRACE_ATTACH). */
936 ptrace (PTRACE_CONT, pid, 0, 0);
937 }
938
939 /* Make sure the initial process is stopped. The user-level threads
940 layer might want to poke around in the inferior, and that won't
941 work if things haven't stabilized yet. */
942 new_pid = my_waitpid (pid, &status, 0);
943 if (new_pid == -1 && errno == ECHILD)
944 {
945 if (first)
946 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
947
948 /* Try again with __WCLONE to check cloned processes. */
949 new_pid = my_waitpid (pid, &status, __WCLONE);
950 *cloned = 1;
951 }
952
953 gdb_assert (pid == new_pid);
954
955 if (!WIFSTOPPED (status))
956 {
957 /* The pid we tried to attach has apparently just exited. */
958 if (debug_linux_nat)
959 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
960 pid, status_to_str (status));
961 return status;
962 }
963
964 if (WSTOPSIG (status) != SIGSTOP)
965 {
966 *signalled = 1;
967 if (debug_linux_nat)
968 fprintf_unfiltered (gdb_stdlog,
969 "LNPAW: Received %s after attaching\n",
970 status_to_str (status));
971 }
972
973 return status;
974 }
975
976 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
977 the new LWP could not be attached, or 1 if we're already auto
978 attached to this thread, but haven't processed the
979 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
980 its existance, without considering it an error. */
981
982 int
983 lin_lwp_attach_lwp (ptid_t ptid)
984 {
985 struct lwp_info *lp;
986 int lwpid;
987
988 gdb_assert (ptid_lwp_p (ptid));
989
990 lp = find_lwp_pid (ptid);
991 lwpid = ptid_get_lwp (ptid);
992
993 /* We assume that we're already attached to any LWP that has an id
994 equal to the overall process id, and to any LWP that is already
995 in our list of LWPs. If we're not seeing exit events from threads
996 and we've had PID wraparound since we last tried to stop all threads,
997 this assumption might be wrong; fortunately, this is very unlikely
998 to happen. */
999 if (lwpid != ptid_get_pid (ptid) && lp == NULL)
1000 {
1001 int status, cloned = 0, signalled = 0;
1002
1003 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1004 {
1005 if (linux_supports_tracefork ())
1006 {
1007 /* If we haven't stopped all threads when we get here,
1008 we may have seen a thread listed in thread_db's list,
1009 but not processed the PTRACE_EVENT_CLONE yet. If
1010 that's the case, ignore this new thread, and let
1011 normal event handling discover it later. */
1012 if (in_pid_list_p (stopped_pids, lwpid))
1013 {
1014 /* We've already seen this thread stop, but we
1015 haven't seen the PTRACE_EVENT_CLONE extended
1016 event yet. */
1017 return 0;
1018 }
1019 else
1020 {
1021 int new_pid;
1022 int status;
1023
1024 /* See if we've got a stop for this new child
1025 pending. If so, we're already attached. */
1026 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1027 if (new_pid == -1 && errno == ECHILD)
1028 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1029 if (new_pid != -1)
1030 {
1031 if (WIFSTOPPED (status))
1032 add_to_pid_list (&stopped_pids, lwpid, status);
1033 return 1;
1034 }
1035 }
1036 }
1037
1038 /* If we fail to attach to the thread, issue a warning,
1039 but continue. One way this can happen is if thread
1040 creation is interrupted; as of Linux kernel 2.6.19, a
1041 bug may place threads in the thread list and then fail
1042 to create them. */
1043 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1044 safe_strerror (errno));
1045 return -1;
1046 }
1047
1048 if (debug_linux_nat)
1049 fprintf_unfiltered (gdb_stdlog,
1050 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1051 target_pid_to_str (ptid));
1052
1053 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1054 if (!WIFSTOPPED (status))
1055 return 1;
1056
1057 lp = add_lwp (ptid);
1058 lp->stopped = 1;
1059 lp->cloned = cloned;
1060 lp->signalled = signalled;
1061 if (WSTOPSIG (status) != SIGSTOP)
1062 {
1063 lp->resumed = 1;
1064 lp->status = status;
1065 }
1066
1067 target_post_attach (ptid_get_lwp (lp->ptid));
1068
1069 if (debug_linux_nat)
1070 {
1071 fprintf_unfiltered (gdb_stdlog,
1072 "LLAL: waitpid %s received %s\n",
1073 target_pid_to_str (ptid),
1074 status_to_str (status));
1075 }
1076 }
1077 else
1078 {
1079 /* We assume that the LWP representing the original process is
1080 already stopped. Mark it as stopped in the data structure
1081 that the GNU/linux ptrace layer uses to keep track of
1082 threads. Note that this won't have already been done since
1083 the main thread will have, we assume, been stopped by an
1084 attach from a different layer. */
1085 if (lp == NULL)
1086 lp = add_lwp (ptid);
1087 lp->stopped = 1;
1088 }
1089
1090 lp->last_resume_kind = resume_stop;
1091 return 0;
1092 }
1093
1094 static void
1095 linux_nat_create_inferior (struct target_ops *ops,
1096 char *exec_file, char *allargs, char **env,
1097 int from_tty)
1098 {
1099 #ifdef HAVE_PERSONALITY
1100 int personality_orig = 0, personality_set = 0;
1101 #endif /* HAVE_PERSONALITY */
1102
1103 /* The fork_child mechanism is synchronous and calls target_wait, so
1104 we have to mask the async mode. */
1105
1106 #ifdef HAVE_PERSONALITY
1107 if (disable_randomization)
1108 {
1109 errno = 0;
1110 personality_orig = personality (0xffffffff);
1111 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1112 {
1113 personality_set = 1;
1114 personality (personality_orig | ADDR_NO_RANDOMIZE);
1115 }
1116 if (errno != 0 || (personality_set
1117 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1118 warning (_("Error disabling address space randomization: %s"),
1119 safe_strerror (errno));
1120 }
1121 #endif /* HAVE_PERSONALITY */
1122
1123 /* Make sure we report all signals during startup. */
1124 linux_nat_pass_signals (ops, 0, NULL);
1125
1126 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1127
1128 #ifdef HAVE_PERSONALITY
1129 if (personality_set)
1130 {
1131 errno = 0;
1132 personality (personality_orig);
1133 if (errno != 0)
1134 warning (_("Error restoring address space randomization: %s"),
1135 safe_strerror (errno));
1136 }
1137 #endif /* HAVE_PERSONALITY */
1138 }
1139
1140 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1141 already attached. Returns true if a new LWP is found, false
1142 otherwise. */
1143
1144 static int
1145 attach_proc_task_lwp_callback (ptid_t ptid)
1146 {
1147 struct lwp_info *lp;
1148
1149 /* Ignore LWPs we're already attached to. */
1150 lp = find_lwp_pid (ptid);
1151 if (lp == NULL)
1152 {
1153 int lwpid = ptid_get_lwp (ptid);
1154
1155 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1156 {
1157 int err = errno;
1158
1159 /* Be quiet if we simply raced with the thread exiting.
1160 EPERM is returned if the thread's task still exists, and
1161 is marked as exited or zombie, as well as other
1162 conditions, so in that case, confirm the status in
1163 /proc/PID/status. */
1164 if (err == ESRCH
1165 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1166 {
1167 if (debug_linux_nat)
1168 {
1169 fprintf_unfiltered (gdb_stdlog,
1170 "Cannot attach to lwp %d: "
1171 "thread is gone (%d: %s)\n",
1172 lwpid, err, safe_strerror (err));
1173 }
1174 }
1175 else
1176 {
1177 warning (_("Cannot attach to lwp %d: %s\n"),
1178 lwpid,
1179 linux_ptrace_attach_fail_reason_string (ptid,
1180 err));
1181 }
1182 }
1183 else
1184 {
1185 if (debug_linux_nat)
1186 fprintf_unfiltered (gdb_stdlog,
1187 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1188 target_pid_to_str (ptid));
1189
1190 lp = add_lwp (ptid);
1191 lp->cloned = 1;
1192
1193 /* The next time we wait for this LWP we'll see a SIGSTOP as
1194 PTRACE_ATTACH brings it to a halt. */
1195 lp->signalled = 1;
1196
1197 /* We need to wait for a stop before being able to make the
1198 next ptrace call on this LWP. */
1199 lp->must_set_ptrace_flags = 1;
1200 }
1201
1202 return 1;
1203 }
1204 return 0;
1205 }
1206
1207 static void
1208 linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
1209 {
1210 struct lwp_info *lp;
1211 int status;
1212 ptid_t ptid;
1213 volatile struct gdb_exception ex;
1214
1215 /* Make sure we report all signals during attach. */
1216 linux_nat_pass_signals (ops, 0, NULL);
1217
1218 TRY_CATCH (ex, RETURN_MASK_ERROR)
1219 {
1220 linux_ops->to_attach (ops, args, from_tty);
1221 }
1222 if (ex.reason < 0)
1223 {
1224 pid_t pid = parse_pid_to_attach (args);
1225 struct buffer buffer;
1226 char *message, *buffer_s;
1227
1228 message = xstrdup (ex.message);
1229 make_cleanup (xfree, message);
1230
1231 buffer_init (&buffer);
1232 linux_ptrace_attach_fail_reason (pid, &buffer);
1233
1234 buffer_grow_str0 (&buffer, "");
1235 buffer_s = buffer_finish (&buffer);
1236 make_cleanup (xfree, buffer_s);
1237
1238 if (*buffer_s != '\0')
1239 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1240 else
1241 throw_error (ex.error, "%s", message);
1242 }
1243
1244 /* The ptrace base target adds the main thread with (pid,0,0)
1245 format. Decorate it with lwp info. */
1246 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1247 ptid_get_pid (inferior_ptid),
1248 0);
1249 thread_change_ptid (inferior_ptid, ptid);
1250
1251 /* Add the initial process as the first LWP to the list. */
1252 lp = add_initial_lwp (ptid);
1253
1254 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1255 &lp->signalled);
1256 if (!WIFSTOPPED (status))
1257 {
1258 if (WIFEXITED (status))
1259 {
1260 int exit_code = WEXITSTATUS (status);
1261
1262 target_terminal_ours ();
1263 target_mourn_inferior ();
1264 if (exit_code == 0)
1265 error (_("Unable to attach: program exited normally."));
1266 else
1267 error (_("Unable to attach: program exited with code %d."),
1268 exit_code);
1269 }
1270 else if (WIFSIGNALED (status))
1271 {
1272 enum gdb_signal signo;
1273
1274 target_terminal_ours ();
1275 target_mourn_inferior ();
1276
1277 signo = gdb_signal_from_host (WTERMSIG (status));
1278 error (_("Unable to attach: program terminated with signal "
1279 "%s, %s."),
1280 gdb_signal_to_name (signo),
1281 gdb_signal_to_string (signo));
1282 }
1283
1284 internal_error (__FILE__, __LINE__,
1285 _("unexpected status %d for PID %ld"),
1286 status, (long) ptid_get_lwp (ptid));
1287 }
1288
1289 lp->stopped = 1;
1290
1291 /* Save the wait status to report later. */
1292 lp->resumed = 1;
1293 if (debug_linux_nat)
1294 fprintf_unfiltered (gdb_stdlog,
1295 "LNA: waitpid %ld, saving status %s\n",
1296 (long) ptid_get_pid (lp->ptid), status_to_str (status));
1297
1298 lp->status = status;
1299
1300 /* We must attach to every LWP. If /proc is mounted, use that to
1301 find them now. The inferior may be using raw clone instead of
1302 using pthreads. But even if it is using pthreads, thread_db
1303 walks structures in the inferior's address space to find the list
1304 of threads/LWPs, and those structures may well be corrupted.
1305 Note that once thread_db is loaded, we'll still use it to list
1306 threads and associate pthread info with each LWP. */
1307 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1308 attach_proc_task_lwp_callback);
1309
1310 if (target_can_async_p ())
1311 target_async (inferior_event_handler, 0);
1312 }
1313
1314 /* Get pending status of LP. */
1315 static int
1316 get_pending_status (struct lwp_info *lp, int *status)
1317 {
1318 enum gdb_signal signo = GDB_SIGNAL_0;
1319
1320 /* If we paused threads momentarily, we may have stored pending
1321 events in lp->status or lp->waitstatus (see stop_wait_callback),
1322 and GDB core hasn't seen any signal for those threads.
1323 Otherwise, the last signal reported to the core is found in the
1324 thread object's stop_signal.
1325
1326 There's a corner case that isn't handled here at present. Only
1327 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1328 stop_signal make sense as a real signal to pass to the inferior.
1329 Some catchpoint related events, like
1330 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1331 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1332 those traps are debug API (ptrace in our case) related and
1333 induced; the inferior wouldn't see them if it wasn't being
1334 traced. Hence, we should never pass them to the inferior, even
1335 when set to pass state. Since this corner case isn't handled by
1336 infrun.c when proceeding with a signal, for consistency, neither
1337 do we handle it here (or elsewhere in the file we check for
1338 signal pass state). Normally SIGTRAP isn't set to pass state, so
1339 this is really a corner case. */
1340
1341 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1342 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1343 else if (lp->status)
1344 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1345 else if (non_stop && !is_executing (lp->ptid))
1346 {
1347 struct thread_info *tp = find_thread_ptid (lp->ptid);
1348
1349 signo = tp->suspend.stop_signal;
1350 }
1351 else if (!non_stop)
1352 {
1353 struct target_waitstatus last;
1354 ptid_t last_ptid;
1355
1356 get_last_target_status (&last_ptid, &last);
1357
1358 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
1359 {
1360 struct thread_info *tp = find_thread_ptid (lp->ptid);
1361
1362 signo = tp->suspend.stop_signal;
1363 }
1364 }
1365
1366 *status = 0;
1367
1368 if (signo == GDB_SIGNAL_0)
1369 {
1370 if (debug_linux_nat)
1371 fprintf_unfiltered (gdb_stdlog,
1372 "GPT: lwp %s has no pending signal\n",
1373 target_pid_to_str (lp->ptid));
1374 }
1375 else if (!signal_pass_state (signo))
1376 {
1377 if (debug_linux_nat)
1378 fprintf_unfiltered (gdb_stdlog,
1379 "GPT: lwp %s had signal %s, "
1380 "but it is in no pass state\n",
1381 target_pid_to_str (lp->ptid),
1382 gdb_signal_to_string (signo));
1383 }
1384 else
1385 {
1386 *status = W_STOPCODE (gdb_signal_to_host (signo));
1387
1388 if (debug_linux_nat)
1389 fprintf_unfiltered (gdb_stdlog,
1390 "GPT: lwp %s has pending signal %s\n",
1391 target_pid_to_str (lp->ptid),
1392 gdb_signal_to_string (signo));
1393 }
1394
1395 return 0;
1396 }
1397
1398 static int
1399 detach_callback (struct lwp_info *lp, void *data)
1400 {
1401 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1402
1403 if (debug_linux_nat && lp->status)
1404 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1405 strsignal (WSTOPSIG (lp->status)),
1406 target_pid_to_str (lp->ptid));
1407
1408 /* If there is a pending SIGSTOP, get rid of it. */
1409 if (lp->signalled)
1410 {
1411 if (debug_linux_nat)
1412 fprintf_unfiltered (gdb_stdlog,
1413 "DC: Sending SIGCONT to %s\n",
1414 target_pid_to_str (lp->ptid));
1415
1416 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
1417 lp->signalled = 0;
1418 }
1419
1420 /* We don't actually detach from the LWP that has an id equal to the
1421 overall process id just yet. */
1422 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1423 {
1424 int status = 0;
1425
1426 /* Pass on any pending signal for this LWP. */
1427 get_pending_status (lp, &status);
1428
1429 if (linux_nat_prepare_to_resume != NULL)
1430 linux_nat_prepare_to_resume (lp);
1431 errno = 0;
1432 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
1433 WSTOPSIG (status)) < 0)
1434 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1435 safe_strerror (errno));
1436
1437 if (debug_linux_nat)
1438 fprintf_unfiltered (gdb_stdlog,
1439 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1440 target_pid_to_str (lp->ptid),
1441 strsignal (WSTOPSIG (status)));
1442
1443 delete_lwp (lp->ptid);
1444 }
1445
1446 return 0;
1447 }
1448
1449 static void
1450 linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
1451 {
1452 int pid;
1453 int status;
1454 struct lwp_info *main_lwp;
1455
1456 pid = ptid_get_pid (inferior_ptid);
1457
1458 /* Don't unregister from the event loop, as there may be other
1459 inferiors running. */
1460
1461 /* Stop all threads before detaching. ptrace requires that the
1462 thread is stopped to sucessfully detach. */
1463 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1464 /* ... and wait until all of them have reported back that
1465 they're no longer running. */
1466 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1467
1468 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1469
1470 /* Only the initial process should be left right now. */
1471 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
1472
1473 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1474
1475 /* Pass on any pending signal for the last LWP. */
1476 if ((args == NULL || *args == '\0')
1477 && get_pending_status (main_lwp, &status) != -1
1478 && WIFSTOPPED (status))
1479 {
1480 char *tem;
1481
1482 /* Put the signal number in ARGS so that inf_ptrace_detach will
1483 pass it along with PTRACE_DETACH. */
1484 tem = alloca (8);
1485 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
1486 args = tem;
1487 if (debug_linux_nat)
1488 fprintf_unfiltered (gdb_stdlog,
1489 "LND: Sending signal %s to %s\n",
1490 args,
1491 target_pid_to_str (main_lwp->ptid));
1492 }
1493
1494 if (linux_nat_prepare_to_resume != NULL)
1495 linux_nat_prepare_to_resume (main_lwp);
1496 delete_lwp (main_lwp->ptid);
1497
1498 if (forks_exist_p ())
1499 {
1500 /* Multi-fork case. The current inferior_ptid is being detached
1501 from, but there are other viable forks to debug. Detach from
1502 the current fork, and context-switch to the first
1503 available. */
1504 linux_fork_detach (args, from_tty);
1505 }
1506 else
1507 linux_ops->to_detach (ops, args, from_tty);
1508 }
1509
1510 /* Resume LP. */
1511
1512 static void
1513 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1514 {
1515 if (lp->stopped)
1516 {
1517 struct inferior *inf = find_inferior_ptid (lp->ptid);
1518
1519 if (inf->vfork_child != NULL)
1520 {
1521 if (debug_linux_nat)
1522 fprintf_unfiltered (gdb_stdlog,
1523 "RC: Not resuming %s (vfork parent)\n",
1524 target_pid_to_str (lp->ptid));
1525 }
1526 else if (lp->status == 0
1527 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1528 {
1529 if (debug_linux_nat)
1530 fprintf_unfiltered (gdb_stdlog,
1531 "RC: Resuming sibling %s, %s, %s\n",
1532 target_pid_to_str (lp->ptid),
1533 (signo != GDB_SIGNAL_0
1534 ? strsignal (gdb_signal_to_host (signo))
1535 : "0"),
1536 step ? "step" : "resume");
1537
1538 if (linux_nat_prepare_to_resume != NULL)
1539 linux_nat_prepare_to_resume (lp);
1540 linux_ops->to_resume (linux_ops,
1541 pid_to_ptid (ptid_get_lwp (lp->ptid)),
1542 step, signo);
1543 lp->stopped = 0;
1544 lp->step = step;
1545 lp->stopped_by_watchpoint = 0;
1546 }
1547 else
1548 {
1549 if (debug_linux_nat)
1550 fprintf_unfiltered (gdb_stdlog,
1551 "RC: Not resuming sibling %s (has pending)\n",
1552 target_pid_to_str (lp->ptid));
1553 }
1554 }
1555 else
1556 {
1557 if (debug_linux_nat)
1558 fprintf_unfiltered (gdb_stdlog,
1559 "RC: Not resuming sibling %s (not stopped)\n",
1560 target_pid_to_str (lp->ptid));
1561 }
1562 }
1563
1564 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1565 Resume LWP with the last stop signal, if it is in pass state. */
1566
1567 static int
1568 linux_nat_resume_callback (struct lwp_info *lp, void *except)
1569 {
1570 enum gdb_signal signo = GDB_SIGNAL_0;
1571
1572 if (lp == except)
1573 return 0;
1574
1575 if (lp->stopped)
1576 {
1577 struct thread_info *thread;
1578
1579 thread = find_thread_ptid (lp->ptid);
1580 if (thread != NULL)
1581 {
1582 signo = thread->suspend.stop_signal;
1583 thread->suspend.stop_signal = GDB_SIGNAL_0;
1584 }
1585 }
1586
1587 resume_lwp (lp, 0, signo);
1588 return 0;
1589 }
1590
1591 static int
1592 resume_clear_callback (struct lwp_info *lp, void *data)
1593 {
1594 lp->resumed = 0;
1595 lp->last_resume_kind = resume_stop;
1596 return 0;
1597 }
1598
1599 static int
1600 resume_set_callback (struct lwp_info *lp, void *data)
1601 {
1602 lp->resumed = 1;
1603 lp->last_resume_kind = resume_continue;
1604 return 0;
1605 }
1606
1607 static void
1608 linux_nat_resume (struct target_ops *ops,
1609 ptid_t ptid, int step, enum gdb_signal signo)
1610 {
1611 struct lwp_info *lp;
1612 int resume_many;
1613
1614 if (debug_linux_nat)
1615 fprintf_unfiltered (gdb_stdlog,
1616 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1617 step ? "step" : "resume",
1618 target_pid_to_str (ptid),
1619 (signo != GDB_SIGNAL_0
1620 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1621 target_pid_to_str (inferior_ptid));
1622
1623 /* A specific PTID means `step only this process id'. */
1624 resume_many = (ptid_equal (minus_one_ptid, ptid)
1625 || ptid_is_pid (ptid));
1626
1627 /* Mark the lwps we're resuming as resumed. */
1628 iterate_over_lwps (ptid, resume_set_callback, NULL);
1629
1630 /* See if it's the current inferior that should be handled
1631 specially. */
1632 if (resume_many)
1633 lp = find_lwp_pid (inferior_ptid);
1634 else
1635 lp = find_lwp_pid (ptid);
1636 gdb_assert (lp != NULL);
1637
1638 /* Remember if we're stepping. */
1639 lp->step = step;
1640 lp->last_resume_kind = step ? resume_step : resume_continue;
1641
1642 /* If we have a pending wait status for this thread, there is no
1643 point in resuming the process. But first make sure that
1644 linux_nat_wait won't preemptively handle the event - we
1645 should never take this short-circuit if we are going to
1646 leave LP running, since we have skipped resuming all the
1647 other threads. This bit of code needs to be synchronized
1648 with linux_nat_wait. */
1649
1650 if (lp->status && WIFSTOPPED (lp->status))
1651 {
1652 if (!lp->step
1653 && WSTOPSIG (lp->status)
1654 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1655 {
1656 if (debug_linux_nat)
1657 fprintf_unfiltered (gdb_stdlog,
1658 "LLR: Not short circuiting for ignored "
1659 "status 0x%x\n", lp->status);
1660
1661 /* FIXME: What should we do if we are supposed to continue
1662 this thread with a signal? */
1663 gdb_assert (signo == GDB_SIGNAL_0);
1664 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1665 lp->status = 0;
1666 }
1667 }
1668
1669 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1670 {
1671 /* FIXME: What should we do if we are supposed to continue
1672 this thread with a signal? */
1673 gdb_assert (signo == GDB_SIGNAL_0);
1674
1675 if (debug_linux_nat)
1676 fprintf_unfiltered (gdb_stdlog,
1677 "LLR: Short circuiting for status 0x%x\n",
1678 lp->status);
1679
1680 if (target_can_async_p ())
1681 {
1682 target_async (inferior_event_handler, 0);
1683 /* Tell the event loop we have something to process. */
1684 async_file_mark ();
1685 }
1686 return;
1687 }
1688
1689 if (resume_many)
1690 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
1691
1692 /* Convert to something the lower layer understands. */
1693 ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
1694
1695 if (linux_nat_prepare_to_resume != NULL)
1696 linux_nat_prepare_to_resume (lp);
1697 linux_ops->to_resume (linux_ops, ptid, step, signo);
1698 lp->stopped_by_watchpoint = 0;
1699 lp->stopped = 0;
1700
1701 if (debug_linux_nat)
1702 fprintf_unfiltered (gdb_stdlog,
1703 "LLR: %s %s, %s (resume event thread)\n",
1704 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1705 target_pid_to_str (ptid),
1706 (signo != GDB_SIGNAL_0
1707 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1708
1709 if (target_can_async_p ())
1710 target_async (inferior_event_handler, 0);
1711 }
1712
1713 /* Send a signal to an LWP. */
1714
1715 static int
1716 kill_lwp (int lwpid, int signo)
1717 {
1718 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1719 fails, then we are not using nptl threads and we should be using kill. */
1720
1721 #ifdef HAVE_TKILL_SYSCALL
1722 {
1723 static int tkill_failed;
1724
1725 if (!tkill_failed)
1726 {
1727 int ret;
1728
1729 errno = 0;
1730 ret = syscall (__NR_tkill, lwpid, signo);
1731 if (errno != ENOSYS)
1732 return ret;
1733 tkill_failed = 1;
1734 }
1735 }
1736 #endif
1737
1738 return kill (lwpid, signo);
1739 }
1740
1741 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1742 event, check if the core is interested in it: if not, ignore the
1743 event, and keep waiting; otherwise, we need to toggle the LWP's
1744 syscall entry/exit status, since the ptrace event itself doesn't
1745 indicate it, and report the trap to higher layers. */
1746
1747 static int
1748 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1749 {
1750 struct target_waitstatus *ourstatus = &lp->waitstatus;
1751 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1752 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1753
1754 if (stopping)
1755 {
1756 /* If we're stopping threads, there's a SIGSTOP pending, which
1757 makes it so that the LWP reports an immediate syscall return,
1758 followed by the SIGSTOP. Skip seeing that "return" using
1759 PTRACE_CONT directly, and let stop_wait_callback collect the
1760 SIGSTOP. Later when the thread is resumed, a new syscall
1761 entry event. If we didn't do this (and returned 0), we'd
1762 leave a syscall entry pending, and our caller, by using
1763 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1764 itself. Later, when the user re-resumes this LWP, we'd see
1765 another syscall entry event and we'd mistake it for a return.
1766
1767 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1768 (leaving immediately with LWP->signalled set, without issuing
1769 a PTRACE_CONT), it would still be problematic to leave this
1770 syscall enter pending, as later when the thread is resumed,
1771 it would then see the same syscall exit mentioned above,
1772 followed by the delayed SIGSTOP, while the syscall didn't
1773 actually get to execute. It seems it would be even more
1774 confusing to the user. */
1775
1776 if (debug_linux_nat)
1777 fprintf_unfiltered (gdb_stdlog,
1778 "LHST: ignoring syscall %d "
1779 "for LWP %ld (stopping threads), "
1780 "resuming with PTRACE_CONT for SIGSTOP\n",
1781 syscall_number,
1782 ptid_get_lwp (lp->ptid));
1783
1784 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1785 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
1786 lp->stopped = 0;
1787 return 1;
1788 }
1789
1790 if (catch_syscall_enabled ())
1791 {
1792 /* Always update the entry/return state, even if this particular
1793 syscall isn't interesting to the core now. In async mode,
1794 the user could install a new catchpoint for this syscall
1795 between syscall enter/return, and we'll need to know to
1796 report a syscall return if that happens. */
1797 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1798 ? TARGET_WAITKIND_SYSCALL_RETURN
1799 : TARGET_WAITKIND_SYSCALL_ENTRY);
1800
1801 if (catching_syscall_number (syscall_number))
1802 {
1803 /* Alright, an event to report. */
1804 ourstatus->kind = lp->syscall_state;
1805 ourstatus->value.syscall_number = syscall_number;
1806
1807 if (debug_linux_nat)
1808 fprintf_unfiltered (gdb_stdlog,
1809 "LHST: stopping for %s of syscall %d"
1810 " for LWP %ld\n",
1811 lp->syscall_state
1812 == TARGET_WAITKIND_SYSCALL_ENTRY
1813 ? "entry" : "return",
1814 syscall_number,
1815 ptid_get_lwp (lp->ptid));
1816 return 0;
1817 }
1818
1819 if (debug_linux_nat)
1820 fprintf_unfiltered (gdb_stdlog,
1821 "LHST: ignoring %s of syscall %d "
1822 "for LWP %ld\n",
1823 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1824 ? "entry" : "return",
1825 syscall_number,
1826 ptid_get_lwp (lp->ptid));
1827 }
1828 else
1829 {
1830 /* If we had been syscall tracing, and hence used PT_SYSCALL
1831 before on this LWP, it could happen that the user removes all
1832 syscall catchpoints before we get to process this event.
1833 There are two noteworthy issues here:
1834
1835 - When stopped at a syscall entry event, resuming with
1836 PT_STEP still resumes executing the syscall and reports a
1837 syscall return.
1838
1839 - Only PT_SYSCALL catches syscall enters. If we last
1840 single-stepped this thread, then this event can't be a
1841 syscall enter. If we last single-stepped this thread, this
1842 has to be a syscall exit.
1843
1844 The points above mean that the next resume, be it PT_STEP or
1845 PT_CONTINUE, can not trigger a syscall trace event. */
1846 if (debug_linux_nat)
1847 fprintf_unfiltered (gdb_stdlog,
1848 "LHST: caught syscall event "
1849 "with no syscall catchpoints."
1850 " %d for LWP %ld, ignoring\n",
1851 syscall_number,
1852 ptid_get_lwp (lp->ptid));
1853 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1854 }
1855
1856 /* The core isn't interested in this event. For efficiency, avoid
1857 stopping all threads only to have the core resume them all again.
1858 Since we're not stopping threads, if we're still syscall tracing
1859 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1860 subsequent syscall. Simply resume using the inf-ptrace layer,
1861 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1862
1863 /* Note that gdbarch_get_syscall_number may access registers, hence
1864 fill a regcache. */
1865 registers_changed ();
1866 if (linux_nat_prepare_to_resume != NULL)
1867 linux_nat_prepare_to_resume (lp);
1868 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
1869 lp->step, GDB_SIGNAL_0);
1870 lp->stopped = 0;
1871 return 1;
1872 }
1873
1874 /* Handle a GNU/Linux extended wait response. If we see a clone
1875 event, we need to add the new LWP to our list (and not report the
1876 trap to higher layers). This function returns non-zero if the
1877 event should be ignored and we should wait again. If STOPPING is
1878 true, the new LWP remains stopped, otherwise it is continued. */
1879
1880 static int
1881 linux_handle_extended_wait (struct lwp_info *lp, int status,
1882 int stopping)
1883 {
1884 int pid = ptid_get_lwp (lp->ptid);
1885 struct target_waitstatus *ourstatus = &lp->waitstatus;
1886 int event = linux_ptrace_get_extended_event (status);
1887
1888 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1889 || event == PTRACE_EVENT_CLONE)
1890 {
1891 unsigned long new_pid;
1892 int ret;
1893
1894 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1895
1896 /* If we haven't already seen the new PID stop, wait for it now. */
1897 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1898 {
1899 /* The new child has a pending SIGSTOP. We can't affect it until it
1900 hits the SIGSTOP, but we're already attached. */
1901 ret = my_waitpid (new_pid, &status,
1902 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1903 if (ret == -1)
1904 perror_with_name (_("waiting for new child"));
1905 else if (ret != new_pid)
1906 internal_error (__FILE__, __LINE__,
1907 _("wait returned unexpected PID %d"), ret);
1908 else if (!WIFSTOPPED (status))
1909 internal_error (__FILE__, __LINE__,
1910 _("wait returned unexpected status 0x%x"), status);
1911 }
1912
1913 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1914
1915 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1916 {
1917 /* The arch-specific native code may need to know about new
1918 forks even if those end up never mapped to an
1919 inferior. */
1920 if (linux_nat_new_fork != NULL)
1921 linux_nat_new_fork (lp, new_pid);
1922 }
1923
1924 if (event == PTRACE_EVENT_FORK
1925 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
1926 {
1927 /* Handle checkpointing by linux-fork.c here as a special
1928 case. We don't want the follow-fork-mode or 'catch fork'
1929 to interfere with this. */
1930
1931 /* This won't actually modify the breakpoint list, but will
1932 physically remove the breakpoints from the child. */
1933 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
1934
1935 /* Retain child fork in ptrace (stopped) state. */
1936 if (!find_fork_pid (new_pid))
1937 add_fork (new_pid);
1938
1939 /* Report as spurious, so that infrun doesn't want to follow
1940 this fork. We're actually doing an infcall in
1941 linux-fork.c. */
1942 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
1943
1944 /* Report the stop to the core. */
1945 return 0;
1946 }
1947
1948 if (event == PTRACE_EVENT_FORK)
1949 ourstatus->kind = TARGET_WAITKIND_FORKED;
1950 else if (event == PTRACE_EVENT_VFORK)
1951 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1952 else
1953 {
1954 struct lwp_info *new_lp;
1955
1956 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1957
1958 if (debug_linux_nat)
1959 fprintf_unfiltered (gdb_stdlog,
1960 "LHEW: Got clone event "
1961 "from LWP %d, new child is LWP %ld\n",
1962 pid, new_pid);
1963
1964 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
1965 new_lp->cloned = 1;
1966 new_lp->stopped = 1;
1967
1968 if (WSTOPSIG (status) != SIGSTOP)
1969 {
1970 /* This can happen if someone starts sending signals to
1971 the new thread before it gets a chance to run, which
1972 have a lower number than SIGSTOP (e.g. SIGUSR1).
1973 This is an unlikely case, and harder to handle for
1974 fork / vfork than for clone, so we do not try - but
1975 we handle it for clone events here. We'll send
1976 the other signal on to the thread below. */
1977
1978 new_lp->signalled = 1;
1979 }
1980 else
1981 {
1982 struct thread_info *tp;
1983
1984 /* When we stop for an event in some other thread, and
1985 pull the thread list just as this thread has cloned,
1986 we'll have seen the new thread in the thread_db list
1987 before handling the CLONE event (glibc's
1988 pthread_create adds the new thread to the thread list
1989 before clone'ing, and has the kernel fill in the
1990 thread's tid on the clone call with
1991 CLONE_PARENT_SETTID). If that happened, and the core
1992 had requested the new thread to stop, we'll have
1993 killed it with SIGSTOP. But since SIGSTOP is not an
1994 RT signal, it can only be queued once. We need to be
1995 careful to not resume the LWP if we wanted it to
1996 stop. In that case, we'll leave the SIGSTOP pending.
1997 It will later be reported as GDB_SIGNAL_0. */
1998 tp = find_thread_ptid (new_lp->ptid);
1999 if (tp != NULL && tp->stop_requested)
2000 new_lp->last_resume_kind = resume_stop;
2001 else
2002 status = 0;
2003 }
2004
2005 if (non_stop)
2006 {
2007 /* Add the new thread to GDB's lists as soon as possible
2008 so that:
2009
2010 1) the frontend doesn't have to wait for a stop to
2011 display them, and,
2012
2013 2) we tag it with the correct running state. */
2014
2015 /* If the thread_db layer is active, let it know about
2016 this new thread, and add it to GDB's list. */
2017 if (!thread_db_attach_lwp (new_lp->ptid))
2018 {
2019 /* We're not using thread_db. Add it to GDB's
2020 list. */
2021 target_post_attach (ptid_get_lwp (new_lp->ptid));
2022 add_thread (new_lp->ptid);
2023 }
2024
2025 if (!stopping)
2026 {
2027 set_running (new_lp->ptid, 1);
2028 set_executing (new_lp->ptid, 1);
2029 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2030 resume_stop. */
2031 new_lp->last_resume_kind = resume_continue;
2032 }
2033 }
2034
2035 if (status != 0)
2036 {
2037 /* We created NEW_LP so it cannot yet contain STATUS. */
2038 gdb_assert (new_lp->status == 0);
2039
2040 /* Save the wait status to report later. */
2041 if (debug_linux_nat)
2042 fprintf_unfiltered (gdb_stdlog,
2043 "LHEW: waitpid of new LWP %ld, "
2044 "saving status %s\n",
2045 (long) ptid_get_lwp (new_lp->ptid),
2046 status_to_str (status));
2047 new_lp->status = status;
2048 }
2049
2050 /* Note the need to use the low target ops to resume, to
2051 handle resuming with PT_SYSCALL if we have syscall
2052 catchpoints. */
2053 if (!stopping)
2054 {
2055 new_lp->resumed = 1;
2056
2057 if (status == 0)
2058 {
2059 gdb_assert (new_lp->last_resume_kind == resume_continue);
2060 if (debug_linux_nat)
2061 fprintf_unfiltered (gdb_stdlog,
2062 "LHEW: resuming new LWP %ld\n",
2063 ptid_get_lwp (new_lp->ptid));
2064 if (linux_nat_prepare_to_resume != NULL)
2065 linux_nat_prepare_to_resume (new_lp);
2066 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2067 0, GDB_SIGNAL_0);
2068 new_lp->stopped = 0;
2069 }
2070 }
2071
2072 if (debug_linux_nat)
2073 fprintf_unfiltered (gdb_stdlog,
2074 "LHEW: resuming parent LWP %d\n", pid);
2075 if (linux_nat_prepare_to_resume != NULL)
2076 linux_nat_prepare_to_resume (lp);
2077 linux_ops->to_resume (linux_ops,
2078 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2079 0, GDB_SIGNAL_0);
2080 lp->stopped = 0;
2081 return 1;
2082 }
2083
2084 return 0;
2085 }
2086
2087 if (event == PTRACE_EVENT_EXEC)
2088 {
2089 if (debug_linux_nat)
2090 fprintf_unfiltered (gdb_stdlog,
2091 "LHEW: Got exec event from LWP %ld\n",
2092 ptid_get_lwp (lp->ptid));
2093
2094 ourstatus->kind = TARGET_WAITKIND_EXECD;
2095 ourstatus->value.execd_pathname
2096 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
2097
2098 return 0;
2099 }
2100
2101 if (event == PTRACE_EVENT_VFORK_DONE)
2102 {
2103 if (current_inferior ()->waiting_for_vfork_done)
2104 {
2105 if (debug_linux_nat)
2106 fprintf_unfiltered (gdb_stdlog,
2107 "LHEW: Got expected PTRACE_EVENT_"
2108 "VFORK_DONE from LWP %ld: stopping\n",
2109 ptid_get_lwp (lp->ptid));
2110
2111 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2112 return 0;
2113 }
2114
2115 if (debug_linux_nat)
2116 fprintf_unfiltered (gdb_stdlog,
2117 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2118 "from LWP %ld: resuming\n",
2119 ptid_get_lwp (lp->ptid));
2120 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2121 return 1;
2122 }
2123
2124 internal_error (__FILE__, __LINE__,
2125 _("unknown ptrace event %d"), event);
2126 }
2127
2128 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2129 exited. */
2130
2131 static int
2132 wait_lwp (struct lwp_info *lp)
2133 {
2134 pid_t pid;
2135 int status = 0;
2136 int thread_dead = 0;
2137 sigset_t prev_mask;
2138
2139 gdb_assert (!lp->stopped);
2140 gdb_assert (lp->status == 0);
2141
2142 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2143 block_child_signals (&prev_mask);
2144
2145 for (;;)
2146 {
2147 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2148 was right and we should just call sigsuspend. */
2149
2150 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
2151 if (pid == -1 && errno == ECHILD)
2152 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
2153 if (pid == -1 && errno == ECHILD)
2154 {
2155 /* The thread has previously exited. We need to delete it
2156 now because, for some vendor 2.4 kernels with NPTL
2157 support backported, there won't be an exit event unless
2158 it is the main thread. 2.6 kernels will report an exit
2159 event for each thread that exits, as expected. */
2160 thread_dead = 1;
2161 if (debug_linux_nat)
2162 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2163 target_pid_to_str (lp->ptid));
2164 }
2165 if (pid != 0)
2166 break;
2167
2168 /* Bugs 10970, 12702.
2169 Thread group leader may have exited in which case we'll lock up in
2170 waitpid if there are other threads, even if they are all zombies too.
2171 Basically, we're not supposed to use waitpid this way.
2172 __WCLONE is not applicable for the leader so we can't use that.
2173 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2174 process; it gets ESRCH both for the zombie and for running processes.
2175
2176 As a workaround, check if we're waiting for the thread group leader and
2177 if it's a zombie, and avoid calling waitpid if it is.
2178
2179 This is racy, what if the tgl becomes a zombie right after we check?
2180 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2181 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2182
2183 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2184 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
2185 {
2186 thread_dead = 1;
2187 if (debug_linux_nat)
2188 fprintf_unfiltered (gdb_stdlog,
2189 "WL: Thread group leader %s vanished.\n",
2190 target_pid_to_str (lp->ptid));
2191 break;
2192 }
2193
2194 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2195 get invoked despite our caller had them intentionally blocked by
2196 block_child_signals. This is sensitive only to the loop of
2197 linux_nat_wait_1 and there if we get called my_waitpid gets called
2198 again before it gets to sigsuspend so we can safely let the handlers
2199 get executed here. */
2200
2201 if (debug_linux_nat)
2202 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
2203 sigsuspend (&suspend_mask);
2204 }
2205
2206 restore_child_signals_mask (&prev_mask);
2207
2208 if (!thread_dead)
2209 {
2210 gdb_assert (pid == ptid_get_lwp (lp->ptid));
2211
2212 if (debug_linux_nat)
2213 {
2214 fprintf_unfiltered (gdb_stdlog,
2215 "WL: waitpid %s received %s\n",
2216 target_pid_to_str (lp->ptid),
2217 status_to_str (status));
2218 }
2219
2220 /* Check if the thread has exited. */
2221 if (WIFEXITED (status) || WIFSIGNALED (status))
2222 {
2223 thread_dead = 1;
2224 if (debug_linux_nat)
2225 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2226 target_pid_to_str (lp->ptid));
2227 }
2228 }
2229
2230 if (thread_dead)
2231 {
2232 exit_lwp (lp);
2233 return 0;
2234 }
2235
2236 gdb_assert (WIFSTOPPED (status));
2237 lp->stopped = 1;
2238
2239 if (lp->must_set_ptrace_flags)
2240 {
2241 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2242
2243 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2244 lp->must_set_ptrace_flags = 0;
2245 }
2246
2247 /* Handle GNU/Linux's syscall SIGTRAPs. */
2248 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2249 {
2250 /* No longer need the sysgood bit. The ptrace event ends up
2251 recorded in lp->waitstatus if we care for it. We can carry
2252 on handling the event like a regular SIGTRAP from here
2253 on. */
2254 status = W_STOPCODE (SIGTRAP);
2255 if (linux_handle_syscall_trap (lp, 1))
2256 return wait_lwp (lp);
2257 }
2258
2259 /* Handle GNU/Linux's extended waitstatus for trace events. */
2260 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2261 && linux_is_extended_waitstatus (status))
2262 {
2263 if (debug_linux_nat)
2264 fprintf_unfiltered (gdb_stdlog,
2265 "WL: Handling extended status 0x%06x\n",
2266 status);
2267 if (linux_handle_extended_wait (lp, status, 1))
2268 return wait_lwp (lp);
2269 }
2270
2271 return status;
2272 }
2273
2274 /* Send a SIGSTOP to LP. */
2275
2276 static int
2277 stop_callback (struct lwp_info *lp, void *data)
2278 {
2279 if (!lp->stopped && !lp->signalled)
2280 {
2281 int ret;
2282
2283 if (debug_linux_nat)
2284 {
2285 fprintf_unfiltered (gdb_stdlog,
2286 "SC: kill %s **<SIGSTOP>**\n",
2287 target_pid_to_str (lp->ptid));
2288 }
2289 errno = 0;
2290 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
2291 if (debug_linux_nat)
2292 {
2293 fprintf_unfiltered (gdb_stdlog,
2294 "SC: lwp kill %d %s\n",
2295 ret,
2296 errno ? safe_strerror (errno) : "ERRNO-OK");
2297 }
2298
2299 lp->signalled = 1;
2300 gdb_assert (lp->status == 0);
2301 }
2302
2303 return 0;
2304 }
2305
2306 /* Request a stop on LWP. */
2307
2308 void
2309 linux_stop_lwp (struct lwp_info *lwp)
2310 {
2311 stop_callback (lwp, NULL);
2312 }
2313
2314 /* Return non-zero if LWP PID has a pending SIGINT. */
2315
2316 static int
2317 linux_nat_has_pending_sigint (int pid)
2318 {
2319 sigset_t pending, blocked, ignored;
2320
2321 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2322
2323 if (sigismember (&pending, SIGINT)
2324 && !sigismember (&ignored, SIGINT))
2325 return 1;
2326
2327 return 0;
2328 }
2329
2330 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2331
2332 static int
2333 set_ignore_sigint (struct lwp_info *lp, void *data)
2334 {
2335 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2336 flag to consume the next one. */
2337 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2338 && WSTOPSIG (lp->status) == SIGINT)
2339 lp->status = 0;
2340 else
2341 lp->ignore_sigint = 1;
2342
2343 return 0;
2344 }
2345
2346 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2347 This function is called after we know the LWP has stopped; if the LWP
2348 stopped before the expected SIGINT was delivered, then it will never have
2349 arrived. Also, if the signal was delivered to a shared queue and consumed
2350 by a different thread, it will never be delivered to this LWP. */
2351
2352 static void
2353 maybe_clear_ignore_sigint (struct lwp_info *lp)
2354 {
2355 if (!lp->ignore_sigint)
2356 return;
2357
2358 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
2359 {
2360 if (debug_linux_nat)
2361 fprintf_unfiltered (gdb_stdlog,
2362 "MCIS: Clearing bogus flag for %s\n",
2363 target_pid_to_str (lp->ptid));
2364 lp->ignore_sigint = 0;
2365 }
2366 }
2367
2368 /* Fetch the possible triggered data watchpoint info and store it in
2369 LP.
2370
2371 On some archs, like x86, that use debug registers to set
2372 watchpoints, it's possible that the way to know which watched
2373 address trapped, is to check the register that is used to select
2374 which address to watch. Problem is, between setting the watchpoint
2375 and reading back which data address trapped, the user may change
2376 the set of watchpoints, and, as a consequence, GDB changes the
2377 debug registers in the inferior. To avoid reading back a stale
2378 stopped-data-address when that happens, we cache in LP the fact
2379 that a watchpoint trapped, and the corresponding data address, as
2380 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2381 registers meanwhile, we have the cached data we can rely on. */
2382
2383 static void
2384 save_sigtrap (struct lwp_info *lp)
2385 {
2386 struct cleanup *old_chain;
2387
2388 if (linux_ops->to_stopped_by_watchpoint == NULL)
2389 {
2390 lp->stopped_by_watchpoint = 0;
2391 return;
2392 }
2393
2394 old_chain = save_inferior_ptid ();
2395 inferior_ptid = lp->ptid;
2396
2397 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (linux_ops);
2398
2399 if (lp->stopped_by_watchpoint)
2400 {
2401 if (linux_ops->to_stopped_data_address != NULL)
2402 lp->stopped_data_address_p =
2403 linux_ops->to_stopped_data_address (&current_target,
2404 &lp->stopped_data_address);
2405 else
2406 lp->stopped_data_address_p = 0;
2407 }
2408
2409 do_cleanups (old_chain);
2410 }
2411
2412 /* See save_sigtrap. */
2413
2414 static int
2415 linux_nat_stopped_by_watchpoint (struct target_ops *ops)
2416 {
2417 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2418
2419 gdb_assert (lp != NULL);
2420
2421 return lp->stopped_by_watchpoint;
2422 }
2423
2424 static int
2425 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2426 {
2427 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2428
2429 gdb_assert (lp != NULL);
2430
2431 *addr_p = lp->stopped_data_address;
2432
2433 return lp->stopped_data_address_p;
2434 }
2435
2436 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2437
2438 static int
2439 sigtrap_is_event (int status)
2440 {
2441 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2442 }
2443
2444 /* SIGTRAP-like events recognizer. */
2445
2446 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2447
2448 /* Check for SIGTRAP-like events in LP. */
2449
2450 static int
2451 linux_nat_lp_status_is_event (struct lwp_info *lp)
2452 {
2453 /* We check for lp->waitstatus in addition to lp->status, because we can
2454 have pending process exits recorded in lp->status
2455 and W_EXITCODE(0,0) == 0. We should probably have an additional
2456 lp->status_p flag. */
2457
2458 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2459 && linux_nat_status_is_event (lp->status));
2460 }
2461
2462 /* Set alternative SIGTRAP-like events recognizer. If
2463 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2464 applied. */
2465
2466 void
2467 linux_nat_set_status_is_event (struct target_ops *t,
2468 int (*status_is_event) (int status))
2469 {
2470 linux_nat_status_is_event = status_is_event;
2471 }
2472
2473 /* Wait until LP is stopped. */
2474
2475 static int
2476 stop_wait_callback (struct lwp_info *lp, void *data)
2477 {
2478 struct inferior *inf = find_inferior_ptid (lp->ptid);
2479
2480 /* If this is a vfork parent, bail out, it is not going to report
2481 any SIGSTOP until the vfork is done with. */
2482 if (inf->vfork_child != NULL)
2483 return 0;
2484
2485 if (!lp->stopped)
2486 {
2487 int status;
2488
2489 status = wait_lwp (lp);
2490 if (status == 0)
2491 return 0;
2492
2493 if (lp->ignore_sigint && WIFSTOPPED (status)
2494 && WSTOPSIG (status) == SIGINT)
2495 {
2496 lp->ignore_sigint = 0;
2497
2498 errno = 0;
2499 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2500 lp->stopped = 0;
2501 if (debug_linux_nat)
2502 fprintf_unfiltered (gdb_stdlog,
2503 "PTRACE_CONT %s, 0, 0 (%s) "
2504 "(discarding SIGINT)\n",
2505 target_pid_to_str (lp->ptid),
2506 errno ? safe_strerror (errno) : "OK");
2507
2508 return stop_wait_callback (lp, NULL);
2509 }
2510
2511 maybe_clear_ignore_sigint (lp);
2512
2513 if (WSTOPSIG (status) != SIGSTOP)
2514 {
2515 /* The thread was stopped with a signal other than SIGSTOP. */
2516
2517 save_sigtrap (lp);
2518
2519 if (debug_linux_nat)
2520 fprintf_unfiltered (gdb_stdlog,
2521 "SWC: Pending event %s in %s\n",
2522 status_to_str ((int) status),
2523 target_pid_to_str (lp->ptid));
2524
2525 /* Save the sigtrap event. */
2526 lp->status = status;
2527 gdb_assert (lp->signalled);
2528 }
2529 else
2530 {
2531 /* We caught the SIGSTOP that we intended to catch, so
2532 there's no SIGSTOP pending. */
2533
2534 if (debug_linux_nat)
2535 fprintf_unfiltered (gdb_stdlog,
2536 "SWC: Delayed SIGSTOP caught for %s.\n",
2537 target_pid_to_str (lp->ptid));
2538
2539 /* Reset SIGNALLED only after the stop_wait_callback call
2540 above as it does gdb_assert on SIGNALLED. */
2541 lp->signalled = 0;
2542 }
2543 }
2544
2545 return 0;
2546 }
2547
2548 /* Return non-zero if LP has a wait status pending. */
2549
2550 static int
2551 status_callback (struct lwp_info *lp, void *data)
2552 {
2553 /* Only report a pending wait status if we pretend that this has
2554 indeed been resumed. */
2555 if (!lp->resumed)
2556 return 0;
2557
2558 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2559 {
2560 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2561 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2562 0', so a clean process exit can not be stored pending in
2563 lp->status, it is indistinguishable from
2564 no-pending-status. */
2565 return 1;
2566 }
2567
2568 if (lp->status != 0)
2569 return 1;
2570
2571 return 0;
2572 }
2573
2574 /* Return non-zero if LP isn't stopped. */
2575
2576 static int
2577 running_callback (struct lwp_info *lp, void *data)
2578 {
2579 return (!lp->stopped
2580 || ((lp->status != 0
2581 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2582 && lp->resumed));
2583 }
2584
2585 /* Count the LWP's that have had events. */
2586
2587 static int
2588 count_events_callback (struct lwp_info *lp, void *data)
2589 {
2590 int *count = data;
2591
2592 gdb_assert (count != NULL);
2593
2594 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2595 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2596 (*count)++;
2597
2598 return 0;
2599 }
2600
2601 /* Select the LWP (if any) that is currently being single-stepped. */
2602
2603 static int
2604 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2605 {
2606 if (lp->last_resume_kind == resume_step
2607 && lp->status != 0)
2608 return 1;
2609 else
2610 return 0;
2611 }
2612
2613 /* Select the Nth LWP that has had a SIGTRAP event. */
2614
2615 static int
2616 select_event_lwp_callback (struct lwp_info *lp, void *data)
2617 {
2618 int *selector = data;
2619
2620 gdb_assert (selector != NULL);
2621
2622 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2623 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2624 if ((*selector)-- == 0)
2625 return 1;
2626
2627 return 0;
2628 }
2629
2630 static int
2631 cancel_breakpoint (struct lwp_info *lp)
2632 {
2633 /* Arrange for a breakpoint to be hit again later. We don't keep
2634 the SIGTRAP status and don't forward the SIGTRAP signal to the
2635 LWP. We will handle the current event, eventually we will resume
2636 this LWP, and this breakpoint will trap again.
2637
2638 If we do not do this, then we run the risk that the user will
2639 delete or disable the breakpoint, but the LWP will have already
2640 tripped on it. */
2641
2642 struct regcache *regcache = get_thread_regcache (lp->ptid);
2643 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2644 CORE_ADDR pc;
2645
2646 pc = regcache_read_pc (regcache) - target_decr_pc_after_break (gdbarch);
2647 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2648 {
2649 if (debug_linux_nat)
2650 fprintf_unfiltered (gdb_stdlog,
2651 "CB: Push back breakpoint for %s\n",
2652 target_pid_to_str (lp->ptid));
2653
2654 /* Back up the PC if necessary. */
2655 if (target_decr_pc_after_break (gdbarch))
2656 regcache_write_pc (regcache, pc);
2657
2658 return 1;
2659 }
2660 return 0;
2661 }
2662
2663 static int
2664 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2665 {
2666 struct lwp_info *event_lp = data;
2667
2668 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2669 if (lp == event_lp)
2670 return 0;
2671
2672 /* If a LWP other than the LWP that we're reporting an event for has
2673 hit a GDB breakpoint (as opposed to some random trap signal),
2674 then just arrange for it to hit it again later. We don't keep
2675 the SIGTRAP status and don't forward the SIGTRAP signal to the
2676 LWP. We will handle the current event, eventually we will resume
2677 all LWPs, and this one will get its breakpoint trap again.
2678
2679 If we do not do this, then we run the risk that the user will
2680 delete or disable the breakpoint, but the LWP will have already
2681 tripped on it. */
2682
2683 if (linux_nat_lp_status_is_event (lp)
2684 && cancel_breakpoint (lp))
2685 /* Throw away the SIGTRAP. */
2686 lp->status = 0;
2687
2688 return 0;
2689 }
2690
2691 /* Select one LWP out of those that have events pending. */
2692
2693 static void
2694 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2695 {
2696 int num_events = 0;
2697 int random_selector;
2698 struct lwp_info *event_lp;
2699
2700 /* Record the wait status for the original LWP. */
2701 (*orig_lp)->status = *status;
2702
2703 /* Give preference to any LWP that is being single-stepped. */
2704 event_lp = iterate_over_lwps (filter,
2705 select_singlestep_lwp_callback, NULL);
2706 if (event_lp != NULL)
2707 {
2708 if (debug_linux_nat)
2709 fprintf_unfiltered (gdb_stdlog,
2710 "SEL: Select single-step %s\n",
2711 target_pid_to_str (event_lp->ptid));
2712 }
2713 else
2714 {
2715 /* No single-stepping LWP. Select one at random, out of those
2716 which have had SIGTRAP events. */
2717
2718 /* First see how many SIGTRAP events we have. */
2719 iterate_over_lwps (filter, count_events_callback, &num_events);
2720
2721 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2722 random_selector = (int)
2723 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2724
2725 if (debug_linux_nat && num_events > 1)
2726 fprintf_unfiltered (gdb_stdlog,
2727 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2728 num_events, random_selector);
2729
2730 event_lp = iterate_over_lwps (filter,
2731 select_event_lwp_callback,
2732 &random_selector);
2733 }
2734
2735 if (event_lp != NULL)
2736 {
2737 /* Switch the event LWP. */
2738 *orig_lp = event_lp;
2739 *status = event_lp->status;
2740 }
2741
2742 /* Flush the wait status for the event LWP. */
2743 (*orig_lp)->status = 0;
2744 }
2745
2746 /* Return non-zero if LP has been resumed. */
2747
2748 static int
2749 resumed_callback (struct lwp_info *lp, void *data)
2750 {
2751 return lp->resumed;
2752 }
2753
2754 /* Stop an active thread, verify it still exists, then resume it. If
2755 the thread ends up with a pending status, then it is not resumed,
2756 and *DATA (really a pointer to int), is set. */
2757
2758 static int
2759 stop_and_resume_callback (struct lwp_info *lp, void *data)
2760 {
2761 int *new_pending_p = data;
2762
2763 if (!lp->stopped)
2764 {
2765 ptid_t ptid = lp->ptid;
2766
2767 stop_callback (lp, NULL);
2768 stop_wait_callback (lp, NULL);
2769
2770 /* Resume if the lwp still exists, and the core wanted it
2771 running. */
2772 lp = find_lwp_pid (ptid);
2773 if (lp != NULL)
2774 {
2775 if (lp->last_resume_kind == resume_stop
2776 && lp->status == 0)
2777 {
2778 /* The core wanted the LWP to stop. Even if it stopped
2779 cleanly (with SIGSTOP), leave the event pending. */
2780 if (debug_linux_nat)
2781 fprintf_unfiltered (gdb_stdlog,
2782 "SARC: core wanted LWP %ld stopped "
2783 "(leaving SIGSTOP pending)\n",
2784 ptid_get_lwp (lp->ptid));
2785 lp->status = W_STOPCODE (SIGSTOP);
2786 }
2787
2788 if (lp->status == 0)
2789 {
2790 if (debug_linux_nat)
2791 fprintf_unfiltered (gdb_stdlog,
2792 "SARC: re-resuming LWP %ld\n",
2793 ptid_get_lwp (lp->ptid));
2794 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
2795 }
2796 else
2797 {
2798 if (debug_linux_nat)
2799 fprintf_unfiltered (gdb_stdlog,
2800 "SARC: not re-resuming LWP %ld "
2801 "(has pending)\n",
2802 ptid_get_lwp (lp->ptid));
2803 if (new_pending_p)
2804 *new_pending_p = 1;
2805 }
2806 }
2807 }
2808 return 0;
2809 }
2810
2811 /* Check if we should go on and pass this event to common code.
2812 Return the affected lwp if we are, or NULL otherwise. If we stop
2813 all lwps temporarily, we may end up with new pending events in some
2814 other lwp. In that case set *NEW_PENDING_P to true. */
2815
2816 static struct lwp_info *
2817 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
2818 {
2819 struct lwp_info *lp;
2820 int event = linux_ptrace_get_extended_event (status);
2821
2822 *new_pending_p = 0;
2823
2824 lp = find_lwp_pid (pid_to_ptid (lwpid));
2825
2826 /* Check for stop events reported by a process we didn't already
2827 know about - anything not already in our LWP list.
2828
2829 If we're expecting to receive stopped processes after
2830 fork, vfork, and clone events, then we'll just add the
2831 new one to our list and go back to waiting for the event
2832 to be reported - the stopped process might be returned
2833 from waitpid before or after the event is.
2834
2835 But note the case of a non-leader thread exec'ing after the
2836 leader having exited, and gone from our lists. The non-leader
2837 thread changes its tid to the tgid. */
2838
2839 if (WIFSTOPPED (status) && lp == NULL
2840 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2841 {
2842 /* A multi-thread exec after we had seen the leader exiting. */
2843 if (debug_linux_nat)
2844 fprintf_unfiltered (gdb_stdlog,
2845 "LLW: Re-adding thread group leader LWP %d.\n",
2846 lwpid);
2847
2848 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
2849 lp->stopped = 1;
2850 lp->resumed = 1;
2851 add_thread (lp->ptid);
2852 }
2853
2854 if (WIFSTOPPED (status) && !lp)
2855 {
2856 add_to_pid_list (&stopped_pids, lwpid, status);
2857 return NULL;
2858 }
2859
2860 /* Make sure we don't report an event for the exit of an LWP not in
2861 our list, i.e. not part of the current process. This can happen
2862 if we detach from a program we originally forked and then it
2863 exits. */
2864 if (!WIFSTOPPED (status) && !lp)
2865 return NULL;
2866
2867 /* This LWP is stopped now. (And if dead, this prevents it from
2868 ever being continued.) */
2869 lp->stopped = 1;
2870
2871 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2872 {
2873 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2874
2875 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2876 lp->must_set_ptrace_flags = 0;
2877 }
2878
2879 /* Handle GNU/Linux's syscall SIGTRAPs. */
2880 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2881 {
2882 /* No longer need the sysgood bit. The ptrace event ends up
2883 recorded in lp->waitstatus if we care for it. We can carry
2884 on handling the event like a regular SIGTRAP from here
2885 on. */
2886 status = W_STOPCODE (SIGTRAP);
2887 if (linux_handle_syscall_trap (lp, 0))
2888 return NULL;
2889 }
2890
2891 /* Handle GNU/Linux's extended waitstatus for trace events. */
2892 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2893 && linux_is_extended_waitstatus (status))
2894 {
2895 if (debug_linux_nat)
2896 fprintf_unfiltered (gdb_stdlog,
2897 "LLW: Handling extended status 0x%06x\n",
2898 status);
2899 if (linux_handle_extended_wait (lp, status, 0))
2900 return NULL;
2901 }
2902
2903 if (linux_nat_status_is_event (status))
2904 save_sigtrap (lp);
2905
2906 /* Check if the thread has exited. */
2907 if ((WIFEXITED (status) || WIFSIGNALED (status))
2908 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
2909 {
2910 /* If this is the main thread, we must stop all threads and verify
2911 if they are still alive. This is because in the nptl thread model
2912 on Linux 2.4, there is no signal issued for exiting LWPs
2913 other than the main thread. We only get the main thread exit
2914 signal once all child threads have already exited. If we
2915 stop all the threads and use the stop_wait_callback to check
2916 if they have exited we can determine whether this signal
2917 should be ignored or whether it means the end of the debugged
2918 application, regardless of which threading model is being
2919 used. */
2920 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2921 {
2922 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
2923 stop_and_resume_callback, new_pending_p);
2924 }
2925
2926 if (debug_linux_nat)
2927 fprintf_unfiltered (gdb_stdlog,
2928 "LLW: %s exited.\n",
2929 target_pid_to_str (lp->ptid));
2930
2931 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2932 {
2933 /* If there is at least one more LWP, then the exit signal
2934 was not the end of the debugged application and should be
2935 ignored. */
2936 exit_lwp (lp);
2937 return NULL;
2938 }
2939 }
2940
2941 /* Check if the current LWP has previously exited. In the nptl
2942 thread model, LWPs other than the main thread do not issue
2943 signals when they exit so we must check whenever the thread has
2944 stopped. A similar check is made in stop_wait_callback(). */
2945 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
2946 {
2947 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
2948
2949 if (debug_linux_nat)
2950 fprintf_unfiltered (gdb_stdlog,
2951 "LLW: %s exited.\n",
2952 target_pid_to_str (lp->ptid));
2953
2954 exit_lwp (lp);
2955
2956 /* Make sure there is at least one thread running. */
2957 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
2958
2959 /* Discard the event. */
2960 return NULL;
2961 }
2962
2963 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2964 an attempt to stop an LWP. */
2965 if (lp->signalled
2966 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2967 {
2968 if (debug_linux_nat)
2969 fprintf_unfiltered (gdb_stdlog,
2970 "LLW: Delayed SIGSTOP caught for %s.\n",
2971 target_pid_to_str (lp->ptid));
2972
2973 lp->signalled = 0;
2974
2975 if (lp->last_resume_kind != resume_stop)
2976 {
2977 /* This is a delayed SIGSTOP. */
2978
2979 registers_changed ();
2980
2981 if (linux_nat_prepare_to_resume != NULL)
2982 linux_nat_prepare_to_resume (lp);
2983 linux_ops->to_resume (linux_ops,
2984 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2985 lp->step, GDB_SIGNAL_0);
2986 if (debug_linux_nat)
2987 fprintf_unfiltered (gdb_stdlog,
2988 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2989 lp->step ?
2990 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2991 target_pid_to_str (lp->ptid));
2992
2993 lp->stopped = 0;
2994 gdb_assert (lp->resumed);
2995
2996 /* Discard the event. */
2997 return NULL;
2998 }
2999 }
3000
3001 /* Make sure we don't report a SIGINT that we have already displayed
3002 for another thread. */
3003 if (lp->ignore_sigint
3004 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3005 {
3006 if (debug_linux_nat)
3007 fprintf_unfiltered (gdb_stdlog,
3008 "LLW: Delayed SIGINT caught for %s.\n",
3009 target_pid_to_str (lp->ptid));
3010
3011 /* This is a delayed SIGINT. */
3012 lp->ignore_sigint = 0;
3013
3014 registers_changed ();
3015 if (linux_nat_prepare_to_resume != NULL)
3016 linux_nat_prepare_to_resume (lp);
3017 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
3018 lp->step, GDB_SIGNAL_0);
3019 if (debug_linux_nat)
3020 fprintf_unfiltered (gdb_stdlog,
3021 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3022 lp->step ?
3023 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3024 target_pid_to_str (lp->ptid));
3025
3026 lp->stopped = 0;
3027 gdb_assert (lp->resumed);
3028
3029 /* Discard the event. */
3030 return NULL;
3031 }
3032
3033 /* An interesting event. */
3034 gdb_assert (lp);
3035 lp->status = status;
3036 return lp;
3037 }
3038
3039 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3040 their exits until all other threads in the group have exited. */
3041
3042 static void
3043 check_zombie_leaders (void)
3044 {
3045 struct inferior *inf;
3046
3047 ALL_INFERIORS (inf)
3048 {
3049 struct lwp_info *leader_lp;
3050
3051 if (inf->pid == 0)
3052 continue;
3053
3054 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3055 if (leader_lp != NULL
3056 /* Check if there are other threads in the group, as we may
3057 have raced with the inferior simply exiting. */
3058 && num_lwps (inf->pid) > 1
3059 && linux_proc_pid_is_zombie (inf->pid))
3060 {
3061 if (debug_linux_nat)
3062 fprintf_unfiltered (gdb_stdlog,
3063 "CZL: Thread group leader %d zombie "
3064 "(it exited, or another thread execd).\n",
3065 inf->pid);
3066
3067 /* A leader zombie can mean one of two things:
3068
3069 - It exited, and there's an exit status pending
3070 available, or only the leader exited (not the whole
3071 program). In the latter case, we can't waitpid the
3072 leader's exit status until all other threads are gone.
3073
3074 - There are 3 or more threads in the group, and a thread
3075 other than the leader exec'd. On an exec, the Linux
3076 kernel destroys all other threads (except the execing
3077 one) in the thread group, and resets the execing thread's
3078 tid to the tgid. No exit notification is sent for the
3079 execing thread -- from the ptracer's perspective, it
3080 appears as though the execing thread just vanishes.
3081 Until we reap all other threads except the leader and the
3082 execing thread, the leader will be zombie, and the
3083 execing thread will be in `D (disc sleep)'. As soon as
3084 all other threads are reaped, the execing thread changes
3085 it's tid to the tgid, and the previous (zombie) leader
3086 vanishes, giving place to the "new" leader. We could try
3087 distinguishing the exit and exec cases, by waiting once
3088 more, and seeing if something comes out, but it doesn't
3089 sound useful. The previous leader _does_ go away, and
3090 we'll re-add the new one once we see the exec event
3091 (which is just the same as what would happen if the
3092 previous leader did exit voluntarily before some other
3093 thread execs). */
3094
3095 if (debug_linux_nat)
3096 fprintf_unfiltered (gdb_stdlog,
3097 "CZL: Thread group leader %d vanished.\n",
3098 inf->pid);
3099 exit_lwp (leader_lp);
3100 }
3101 }
3102 }
3103
3104 static ptid_t
3105 linux_nat_wait_1 (struct target_ops *ops,
3106 ptid_t ptid, struct target_waitstatus *ourstatus,
3107 int target_options)
3108 {
3109 sigset_t prev_mask;
3110 enum resume_kind last_resume_kind;
3111 struct lwp_info *lp;
3112 int status;
3113
3114 if (debug_linux_nat)
3115 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3116
3117 /* The first time we get here after starting a new inferior, we may
3118 not have added it to the LWP list yet - this is the earliest
3119 moment at which we know its PID. */
3120 if (ptid_is_pid (inferior_ptid))
3121 {
3122 /* Upgrade the main thread's ptid. */
3123 thread_change_ptid (inferior_ptid,
3124 ptid_build (ptid_get_pid (inferior_ptid),
3125 ptid_get_pid (inferior_ptid), 0));
3126
3127 lp = add_initial_lwp (inferior_ptid);
3128 lp->resumed = 1;
3129 }
3130
3131 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3132 block_child_signals (&prev_mask);
3133
3134 retry:
3135 lp = NULL;
3136 status = 0;
3137
3138 /* First check if there is a LWP with a wait status pending. */
3139 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3140 {
3141 /* Any LWP in the PTID group that's been resumed will do. */
3142 lp = iterate_over_lwps (ptid, status_callback, NULL);
3143 if (lp)
3144 {
3145 if (debug_linux_nat && lp->status)
3146 fprintf_unfiltered (gdb_stdlog,
3147 "LLW: Using pending wait status %s for %s.\n",
3148 status_to_str (lp->status),
3149 target_pid_to_str (lp->ptid));
3150 }
3151 }
3152 else if (ptid_lwp_p (ptid))
3153 {
3154 if (debug_linux_nat)
3155 fprintf_unfiltered (gdb_stdlog,
3156 "LLW: Waiting for specific LWP %s.\n",
3157 target_pid_to_str (ptid));
3158
3159 /* We have a specific LWP to check. */
3160 lp = find_lwp_pid (ptid);
3161 gdb_assert (lp);
3162
3163 if (debug_linux_nat && lp->status)
3164 fprintf_unfiltered (gdb_stdlog,
3165 "LLW: Using pending wait status %s for %s.\n",
3166 status_to_str (lp->status),
3167 target_pid_to_str (lp->ptid));
3168
3169 /* We check for lp->waitstatus in addition to lp->status,
3170 because we can have pending process exits recorded in
3171 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3172 an additional lp->status_p flag. */
3173 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3174 lp = NULL;
3175 }
3176
3177 if (!target_can_async_p ())
3178 {
3179 /* Causes SIGINT to be passed on to the attached process. */
3180 set_sigint_trap ();
3181 }
3182
3183 /* But if we don't find a pending event, we'll have to wait. */
3184
3185 while (lp == NULL)
3186 {
3187 pid_t lwpid;
3188
3189 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3190 quirks:
3191
3192 - If the thread group leader exits while other threads in the
3193 thread group still exist, waitpid(TGID, ...) hangs. That
3194 waitpid won't return an exit status until the other threads
3195 in the group are reapped.
3196
3197 - When a non-leader thread execs, that thread just vanishes
3198 without reporting an exit (so we'd hang if we waited for it
3199 explicitly in that case). The exec event is reported to
3200 the TGID pid. */
3201
3202 errno = 0;
3203 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3204 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3205 lwpid = my_waitpid (-1, &status, WNOHANG);
3206
3207 if (debug_linux_nat)
3208 fprintf_unfiltered (gdb_stdlog,
3209 "LNW: waitpid(-1, ...) returned %d, %s\n",
3210 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3211
3212 if (lwpid > 0)
3213 {
3214 /* If this is true, then we paused LWPs momentarily, and may
3215 now have pending events to handle. */
3216 int new_pending;
3217
3218 if (debug_linux_nat)
3219 {
3220 fprintf_unfiltered (gdb_stdlog,
3221 "LLW: waitpid %ld received %s\n",
3222 (long) lwpid, status_to_str (status));
3223 }
3224
3225 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3226
3227 /* STATUS is now no longer valid, use LP->STATUS instead. */
3228 status = 0;
3229
3230 if (lp && !ptid_match (lp->ptid, ptid))
3231 {
3232 gdb_assert (lp->resumed);
3233
3234 if (debug_linux_nat)
3235 fprintf_unfiltered (gdb_stdlog,
3236 "LWP %ld got an event %06x, "
3237 "leaving pending.\n",
3238 ptid_get_lwp (lp->ptid), lp->status);
3239
3240 if (WIFSTOPPED (lp->status))
3241 {
3242 if (WSTOPSIG (lp->status) != SIGSTOP)
3243 {
3244 /* Cancel breakpoint hits. The breakpoint may
3245 be removed before we fetch events from this
3246 process to report to the core. It is best
3247 not to assume the moribund breakpoints
3248 heuristic always handles these cases --- it
3249 could be too many events go through to the
3250 core before this one is handled. All-stop
3251 always cancels breakpoint hits in all
3252 threads. */
3253 if (non_stop
3254 && linux_nat_lp_status_is_event (lp)
3255 && cancel_breakpoint (lp))
3256 {
3257 /* Throw away the SIGTRAP. */
3258 lp->status = 0;
3259
3260 if (debug_linux_nat)
3261 fprintf_unfiltered (gdb_stdlog,
3262 "LLW: LWP %ld hit a "
3263 "breakpoint while "
3264 "waiting for another "
3265 "process; "
3266 "cancelled it\n",
3267 ptid_get_lwp (lp->ptid));
3268 }
3269 }
3270 else
3271 lp->signalled = 0;
3272 }
3273 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3274 {
3275 if (debug_linux_nat)
3276 fprintf_unfiltered (gdb_stdlog,
3277 "Process %ld exited while stopping "
3278 "LWPs\n",
3279 ptid_get_lwp (lp->ptid));
3280
3281 /* This was the last lwp in the process. Since
3282 events are serialized to GDB core, and we can't
3283 report this one right now, but GDB core and the
3284 other target layers will want to be notified
3285 about the exit code/signal, leave the status
3286 pending for the next time we're able to report
3287 it. */
3288
3289 /* Dead LWP's aren't expected to reported a pending
3290 sigstop. */
3291 lp->signalled = 0;
3292
3293 /* Store the pending event in the waitstatus as
3294 well, because W_EXITCODE(0,0) == 0. */
3295 store_waitstatus (&lp->waitstatus, lp->status);
3296 }
3297
3298 /* Keep looking. */
3299 lp = NULL;
3300 }
3301
3302 if (new_pending)
3303 {
3304 /* Some LWP now has a pending event. Go all the way
3305 back to check it. */
3306 goto retry;
3307 }
3308
3309 if (lp)
3310 {
3311 /* We got an event to report to the core. */
3312 break;
3313 }
3314
3315 /* Retry until nothing comes out of waitpid. A single
3316 SIGCHLD can indicate more than one child stopped. */
3317 continue;
3318 }
3319
3320 /* Check for zombie thread group leaders. Those can't be reaped
3321 until all other threads in the thread group are. */
3322 check_zombie_leaders ();
3323
3324 /* If there are no resumed children left, bail. We'd be stuck
3325 forever in the sigsuspend call below otherwise. */
3326 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3327 {
3328 if (debug_linux_nat)
3329 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3330
3331 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3332
3333 if (!target_can_async_p ())
3334 clear_sigint_trap ();
3335
3336 restore_child_signals_mask (&prev_mask);
3337 return minus_one_ptid;
3338 }
3339
3340 /* No interesting event to report to the core. */
3341
3342 if (target_options & TARGET_WNOHANG)
3343 {
3344 if (debug_linux_nat)
3345 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3346
3347 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3348 restore_child_signals_mask (&prev_mask);
3349 return minus_one_ptid;
3350 }
3351
3352 /* We shouldn't end up here unless we want to try again. */
3353 gdb_assert (lp == NULL);
3354
3355 /* Block until we get an event reported with SIGCHLD. */
3356 if (debug_linux_nat)
3357 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
3358 sigsuspend (&suspend_mask);
3359 }
3360
3361 if (!target_can_async_p ())
3362 clear_sigint_trap ();
3363
3364 gdb_assert (lp);
3365
3366 status = lp->status;
3367 lp->status = 0;
3368
3369 /* Don't report signals that GDB isn't interested in, such as
3370 signals that are neither printed nor stopped upon. Stopping all
3371 threads can be a bit time-consuming so if we want decent
3372 performance with heavily multi-threaded programs, especially when
3373 they're using a high frequency timer, we'd better avoid it if we
3374 can. */
3375
3376 if (WIFSTOPPED (status))
3377 {
3378 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3379
3380 /* When using hardware single-step, we need to report every signal.
3381 Otherwise, signals in pass_mask may be short-circuited. */
3382 if (!lp->step
3383 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3384 {
3385 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3386 here? It is not clear we should. GDB may not expect
3387 other threads to run. On the other hand, not resuming
3388 newly attached threads may cause an unwanted delay in
3389 getting them running. */
3390 registers_changed ();
3391 if (linux_nat_prepare_to_resume != NULL)
3392 linux_nat_prepare_to_resume (lp);
3393 linux_ops->to_resume (linux_ops,
3394 pid_to_ptid (ptid_get_lwp (lp->ptid)),
3395 lp->step, signo);
3396 if (debug_linux_nat)
3397 fprintf_unfiltered (gdb_stdlog,
3398 "LLW: %s %s, %s (preempt 'handle')\n",
3399 lp->step ?
3400 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3401 target_pid_to_str (lp->ptid),
3402 (signo != GDB_SIGNAL_0
3403 ? strsignal (gdb_signal_to_host (signo))
3404 : "0"));
3405 lp->stopped = 0;
3406 goto retry;
3407 }
3408
3409 if (!non_stop)
3410 {
3411 /* Only do the below in all-stop, as we currently use SIGINT
3412 to implement target_stop (see linux_nat_stop) in
3413 non-stop. */
3414 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3415 {
3416 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3417 forwarded to the entire process group, that is, all LWPs
3418 will receive it - unless they're using CLONE_THREAD to
3419 share signals. Since we only want to report it once, we
3420 mark it as ignored for all LWPs except this one. */
3421 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3422 set_ignore_sigint, NULL);
3423 lp->ignore_sigint = 0;
3424 }
3425 else
3426 maybe_clear_ignore_sigint (lp);
3427 }
3428 }
3429
3430 /* This LWP is stopped now. */
3431 lp->stopped = 1;
3432
3433 if (debug_linux_nat)
3434 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3435 status_to_str (status), target_pid_to_str (lp->ptid));
3436
3437 if (!non_stop)
3438 {
3439 /* Now stop all other LWP's ... */
3440 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3441
3442 /* ... and wait until all of them have reported back that
3443 they're no longer running. */
3444 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3445
3446 /* If we're not waiting for a specific LWP, choose an event LWP
3447 from among those that have had events. Giving equal priority
3448 to all LWPs that have had events helps prevent
3449 starvation. */
3450 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3451 select_event_lwp (ptid, &lp, &status);
3452
3453 /* Now that we've selected our final event LWP, cancel any
3454 breakpoints in other LWPs that have hit a GDB breakpoint.
3455 See the comment in cancel_breakpoints_callback to find out
3456 why. */
3457 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3458
3459 /* We'll need this to determine whether to report a SIGSTOP as
3460 TARGET_WAITKIND_0. Need to take a copy because
3461 resume_clear_callback clears it. */
3462 last_resume_kind = lp->last_resume_kind;
3463
3464 /* In all-stop, from the core's perspective, all LWPs are now
3465 stopped until a new resume action is sent over. */
3466 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3467 }
3468 else
3469 {
3470 /* See above. */
3471 last_resume_kind = lp->last_resume_kind;
3472 resume_clear_callback (lp, NULL);
3473 }
3474
3475 if (linux_nat_status_is_event (status))
3476 {
3477 if (debug_linux_nat)
3478 fprintf_unfiltered (gdb_stdlog,
3479 "LLW: trap ptid is %s.\n",
3480 target_pid_to_str (lp->ptid));
3481 }
3482
3483 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3484 {
3485 *ourstatus = lp->waitstatus;
3486 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3487 }
3488 else
3489 store_waitstatus (ourstatus, status);
3490
3491 if (debug_linux_nat)
3492 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3493
3494 restore_child_signals_mask (&prev_mask);
3495
3496 if (last_resume_kind == resume_stop
3497 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3498 && WSTOPSIG (status) == SIGSTOP)
3499 {
3500 /* A thread that has been requested to stop by GDB with
3501 target_stop, and it stopped cleanly, so report as SIG0. The
3502 use of SIGSTOP is an implementation detail. */
3503 ourstatus->value.sig = GDB_SIGNAL_0;
3504 }
3505
3506 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3507 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3508 lp->core = -1;
3509 else
3510 lp->core = linux_common_core_of_thread (lp->ptid);
3511
3512 return lp->ptid;
3513 }
3514
3515 /* Resume LWPs that are currently stopped without any pending status
3516 to report, but are resumed from the core's perspective. */
3517
3518 static int
3519 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3520 {
3521 ptid_t *wait_ptid_p = data;
3522
3523 if (lp->stopped
3524 && lp->resumed
3525 && lp->status == 0
3526 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3527 {
3528 struct regcache *regcache = get_thread_regcache (lp->ptid);
3529 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3530 CORE_ADDR pc = regcache_read_pc (regcache);
3531
3532 gdb_assert (is_executing (lp->ptid));
3533
3534 /* Don't bother if there's a breakpoint at PC that we'd hit
3535 immediately, and we're not waiting for this LWP. */
3536 if (!ptid_match (lp->ptid, *wait_ptid_p))
3537 {
3538 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3539 return 0;
3540 }
3541
3542 if (debug_linux_nat)
3543 fprintf_unfiltered (gdb_stdlog,
3544 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3545 target_pid_to_str (lp->ptid),
3546 paddress (gdbarch, pc),
3547 lp->step);
3548
3549 registers_changed ();
3550 if (linux_nat_prepare_to_resume != NULL)
3551 linux_nat_prepare_to_resume (lp);
3552 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
3553 lp->step, GDB_SIGNAL_0);
3554 lp->stopped = 0;
3555 lp->stopped_by_watchpoint = 0;
3556 }
3557
3558 return 0;
3559 }
3560
3561 static ptid_t
3562 linux_nat_wait (struct target_ops *ops,
3563 ptid_t ptid, struct target_waitstatus *ourstatus,
3564 int target_options)
3565 {
3566 ptid_t event_ptid;
3567
3568 if (debug_linux_nat)
3569 {
3570 char *options_string;
3571
3572 options_string = target_options_to_string (target_options);
3573 fprintf_unfiltered (gdb_stdlog,
3574 "linux_nat_wait: [%s], [%s]\n",
3575 target_pid_to_str (ptid),
3576 options_string);
3577 xfree (options_string);
3578 }
3579
3580 /* Flush the async file first. */
3581 if (target_can_async_p ())
3582 async_file_flush ();
3583
3584 /* Resume LWPs that are currently stopped without any pending status
3585 to report, but are resumed from the core's perspective. LWPs get
3586 in this state if we find them stopping at a time we're not
3587 interested in reporting the event (target_wait on a
3588 specific_process, for example, see linux_nat_wait_1), and
3589 meanwhile the event became uninteresting. Don't bother resuming
3590 LWPs we're not going to wait for if they'd stop immediately. */
3591 if (non_stop)
3592 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3593
3594 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3595
3596 /* If we requested any event, and something came out, assume there
3597 may be more. If we requested a specific lwp or process, also
3598 assume there may be more. */
3599 if (target_can_async_p ()
3600 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3601 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3602 || !ptid_equal (ptid, minus_one_ptid)))
3603 async_file_mark ();
3604
3605 /* Get ready for the next event. */
3606 if (target_can_async_p ())
3607 target_async (inferior_event_handler, 0);
3608
3609 return event_ptid;
3610 }
3611
3612 static int
3613 kill_callback (struct lwp_info *lp, void *data)
3614 {
3615 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3616
3617 errno = 0;
3618 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
3619 if (debug_linux_nat)
3620 {
3621 int save_errno = errno;
3622
3623 fprintf_unfiltered (gdb_stdlog,
3624 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3625 target_pid_to_str (lp->ptid),
3626 save_errno ? safe_strerror (save_errno) : "OK");
3627 }
3628
3629 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3630
3631 errno = 0;
3632 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
3633 if (debug_linux_nat)
3634 {
3635 int save_errno = errno;
3636
3637 fprintf_unfiltered (gdb_stdlog,
3638 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3639 target_pid_to_str (lp->ptid),
3640 save_errno ? safe_strerror (save_errno) : "OK");
3641 }
3642
3643 return 0;
3644 }
3645
3646 static int
3647 kill_wait_callback (struct lwp_info *lp, void *data)
3648 {
3649 pid_t pid;
3650
3651 /* We must make sure that there are no pending events (delayed
3652 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3653 program doesn't interfere with any following debugging session. */
3654
3655 /* For cloned processes we must check both with __WCLONE and
3656 without, since the exit status of a cloned process isn't reported
3657 with __WCLONE. */
3658 if (lp->cloned)
3659 {
3660 do
3661 {
3662 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
3663 if (pid != (pid_t) -1)
3664 {
3665 if (debug_linux_nat)
3666 fprintf_unfiltered (gdb_stdlog,
3667 "KWC: wait %s received unknown.\n",
3668 target_pid_to_str (lp->ptid));
3669 /* The Linux kernel sometimes fails to kill a thread
3670 completely after PTRACE_KILL; that goes from the stop
3671 point in do_fork out to the one in
3672 get_signal_to_deliever and waits again. So kill it
3673 again. */
3674 kill_callback (lp, NULL);
3675 }
3676 }
3677 while (pid == ptid_get_lwp (lp->ptid));
3678
3679 gdb_assert (pid == -1 && errno == ECHILD);
3680 }
3681
3682 do
3683 {
3684 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
3685 if (pid != (pid_t) -1)
3686 {
3687 if (debug_linux_nat)
3688 fprintf_unfiltered (gdb_stdlog,
3689 "KWC: wait %s received unk.\n",
3690 target_pid_to_str (lp->ptid));
3691 /* See the call to kill_callback above. */
3692 kill_callback (lp, NULL);
3693 }
3694 }
3695 while (pid == ptid_get_lwp (lp->ptid));
3696
3697 gdb_assert (pid == -1 && errno == ECHILD);
3698 return 0;
3699 }
3700
3701 static void
3702 linux_nat_kill (struct target_ops *ops)
3703 {
3704 struct target_waitstatus last;
3705 ptid_t last_ptid;
3706 int status;
3707
3708 /* If we're stopped while forking and we haven't followed yet,
3709 kill the other task. We need to do this first because the
3710 parent will be sleeping if this is a vfork. */
3711
3712 get_last_target_status (&last_ptid, &last);
3713
3714 if (last.kind == TARGET_WAITKIND_FORKED
3715 || last.kind == TARGET_WAITKIND_VFORKED)
3716 {
3717 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
3718 wait (&status);
3719
3720 /* Let the arch-specific native code know this process is
3721 gone. */
3722 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
3723 }
3724
3725 if (forks_exist_p ())
3726 linux_fork_killall ();
3727 else
3728 {
3729 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3730
3731 /* Stop all threads before killing them, since ptrace requires
3732 that the thread is stopped to sucessfully PTRACE_KILL. */
3733 iterate_over_lwps (ptid, stop_callback, NULL);
3734 /* ... and wait until all of them have reported back that
3735 they're no longer running. */
3736 iterate_over_lwps (ptid, stop_wait_callback, NULL);
3737
3738 /* Kill all LWP's ... */
3739 iterate_over_lwps (ptid, kill_callback, NULL);
3740
3741 /* ... and wait until we've flushed all events. */
3742 iterate_over_lwps (ptid, kill_wait_callback, NULL);
3743 }
3744
3745 target_mourn_inferior ();
3746 }
3747
3748 static void
3749 linux_nat_mourn_inferior (struct target_ops *ops)
3750 {
3751 int pid = ptid_get_pid (inferior_ptid);
3752
3753 purge_lwp_list (pid);
3754
3755 if (! forks_exist_p ())
3756 /* Normal case, no other forks available. */
3757 linux_ops->to_mourn_inferior (ops);
3758 else
3759 /* Multi-fork case. The current inferior_ptid has exited, but
3760 there are other viable forks to debug. Delete the exiting
3761 one and context-switch to the first available. */
3762 linux_fork_mourn_inferior ();
3763
3764 /* Let the arch-specific native code know this process is gone. */
3765 linux_nat_forget_process (pid);
3766 }
3767
3768 /* Convert a native/host siginfo object, into/from the siginfo in the
3769 layout of the inferiors' architecture. */
3770
3771 static void
3772 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3773 {
3774 int done = 0;
3775
3776 if (linux_nat_siginfo_fixup != NULL)
3777 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3778
3779 /* If there was no callback, or the callback didn't do anything,
3780 then just do a straight memcpy. */
3781 if (!done)
3782 {
3783 if (direction == 1)
3784 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3785 else
3786 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3787 }
3788 }
3789
3790 static enum target_xfer_status
3791 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3792 const char *annex, gdb_byte *readbuf,
3793 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3794 ULONGEST *xfered_len)
3795 {
3796 int pid;
3797 siginfo_t siginfo;
3798 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3799
3800 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3801 gdb_assert (readbuf || writebuf);
3802
3803 pid = ptid_get_lwp (inferior_ptid);
3804 if (pid == 0)
3805 pid = ptid_get_pid (inferior_ptid);
3806
3807 if (offset > sizeof (siginfo))
3808 return TARGET_XFER_E_IO;
3809
3810 errno = 0;
3811 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3812 if (errno != 0)
3813 return TARGET_XFER_E_IO;
3814
3815 /* When GDB is built as a 64-bit application, ptrace writes into
3816 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3817 inferior with a 64-bit GDB should look the same as debugging it
3818 with a 32-bit GDB, we need to convert it. GDB core always sees
3819 the converted layout, so any read/write will have to be done
3820 post-conversion. */
3821 siginfo_fixup (&siginfo, inf_siginfo, 0);
3822
3823 if (offset + len > sizeof (siginfo))
3824 len = sizeof (siginfo) - offset;
3825
3826 if (readbuf != NULL)
3827 memcpy (readbuf, inf_siginfo + offset, len);
3828 else
3829 {
3830 memcpy (inf_siginfo + offset, writebuf, len);
3831
3832 /* Convert back to ptrace layout before flushing it out. */
3833 siginfo_fixup (&siginfo, inf_siginfo, 1);
3834
3835 errno = 0;
3836 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3837 if (errno != 0)
3838 return TARGET_XFER_E_IO;
3839 }
3840
3841 *xfered_len = len;
3842 return TARGET_XFER_OK;
3843 }
3844
3845 static enum target_xfer_status
3846 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3847 const char *annex, gdb_byte *readbuf,
3848 const gdb_byte *writebuf,
3849 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3850 {
3851 struct cleanup *old_chain;
3852 enum target_xfer_status xfer;
3853
3854 if (object == TARGET_OBJECT_SIGNAL_INFO)
3855 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3856 offset, len, xfered_len);
3857
3858 /* The target is connected but no live inferior is selected. Pass
3859 this request down to a lower stratum (e.g., the executable
3860 file). */
3861 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3862 return TARGET_XFER_EOF;
3863
3864 old_chain = save_inferior_ptid ();
3865
3866 if (ptid_lwp_p (inferior_ptid))
3867 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
3868
3869 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3870 offset, len, xfered_len);
3871
3872 do_cleanups (old_chain);
3873 return xfer;
3874 }
3875
3876 static int
3877 linux_thread_alive (ptid_t ptid)
3878 {
3879 int err, tmp_errno;
3880
3881 gdb_assert (ptid_lwp_p (ptid));
3882
3883 /* Send signal 0 instead of anything ptrace, because ptracing a
3884 running thread errors out claiming that the thread doesn't
3885 exist. */
3886 err = kill_lwp (ptid_get_lwp (ptid), 0);
3887 tmp_errno = errno;
3888 if (debug_linux_nat)
3889 fprintf_unfiltered (gdb_stdlog,
3890 "LLTA: KILL(SIG0) %s (%s)\n",
3891 target_pid_to_str (ptid),
3892 err ? safe_strerror (tmp_errno) : "OK");
3893
3894 if (err != 0)
3895 return 0;
3896
3897 return 1;
3898 }
3899
3900 static int
3901 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3902 {
3903 return linux_thread_alive (ptid);
3904 }
3905
3906 static char *
3907 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
3908 {
3909 static char buf[64];
3910
3911 if (ptid_lwp_p (ptid)
3912 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3913 || num_lwps (ptid_get_pid (ptid)) > 1))
3914 {
3915 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
3916 return buf;
3917 }
3918
3919 return normal_pid_to_str (ptid);
3920 }
3921
3922 static char *
3923 linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
3924 {
3925 int pid = ptid_get_pid (thr->ptid);
3926 long lwp = ptid_get_lwp (thr->ptid);
3927 #define FORMAT "/proc/%d/task/%ld/comm"
3928 char buf[sizeof (FORMAT) + 30];
3929 FILE *comm_file;
3930 char *result = NULL;
3931
3932 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
3933 comm_file = gdb_fopen_cloexec (buf, "r");
3934 if (comm_file)
3935 {
3936 /* Not exported by the kernel, so we define it here. */
3937 #define COMM_LEN 16
3938 static char line[COMM_LEN + 1];
3939
3940 if (fgets (line, sizeof (line), comm_file))
3941 {
3942 char *nl = strchr (line, '\n');
3943
3944 if (nl)
3945 *nl = '\0';
3946 if (*line != '\0')
3947 result = line;
3948 }
3949
3950 fclose (comm_file);
3951 }
3952
3953 #undef COMM_LEN
3954 #undef FORMAT
3955
3956 return result;
3957 }
3958
3959 /* Accepts an integer PID; Returns a string representing a file that
3960 can be opened to get the symbols for the child process. */
3961
3962 static char *
3963 linux_child_pid_to_exec_file (struct target_ops *self, int pid)
3964 {
3965 static char buf[PATH_MAX];
3966 char name[PATH_MAX];
3967
3968 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3969 memset (buf, 0, PATH_MAX);
3970 if (readlink (name, buf, PATH_MAX - 1) <= 0)
3971 strcpy (buf, name);
3972
3973 return buf;
3974 }
3975
3976 /* Implement the to_xfer_partial interface for memory reads using the /proc
3977 filesystem. Because we can use a single read() call for /proc, this
3978 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3979 but it doesn't support writes. */
3980
3981 static enum target_xfer_status
3982 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3983 const char *annex, gdb_byte *readbuf,
3984 const gdb_byte *writebuf,
3985 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
3986 {
3987 LONGEST ret;
3988 int fd;
3989 char filename[64];
3990
3991 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3992 return 0;
3993
3994 /* Don't bother for one word. */
3995 if (len < 3 * sizeof (long))
3996 return TARGET_XFER_EOF;
3997
3998 /* We could keep this file open and cache it - possibly one per
3999 thread. That requires some juggling, but is even faster. */
4000 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
4001 ptid_get_pid (inferior_ptid));
4002 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
4003 if (fd == -1)
4004 return TARGET_XFER_EOF;
4005
4006 /* If pread64 is available, use it. It's faster if the kernel
4007 supports it (only one syscall), and it's 64-bit safe even on
4008 32-bit platforms (for instance, SPARC debugging a SPARC64
4009 application). */
4010 #ifdef HAVE_PREAD64
4011 if (pread64 (fd, readbuf, len, offset) != len)
4012 #else
4013 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4014 #endif
4015 ret = 0;
4016 else
4017 ret = len;
4018
4019 close (fd);
4020
4021 if (ret == 0)
4022 return TARGET_XFER_EOF;
4023 else
4024 {
4025 *xfered_len = ret;
4026 return TARGET_XFER_OK;
4027 }
4028 }
4029
4030
4031 /* Enumerate spufs IDs for process PID. */
4032 static LONGEST
4033 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
4034 {
4035 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
4036 LONGEST pos = 0;
4037 LONGEST written = 0;
4038 char path[128];
4039 DIR *dir;
4040 struct dirent *entry;
4041
4042 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4043 dir = opendir (path);
4044 if (!dir)
4045 return -1;
4046
4047 rewinddir (dir);
4048 while ((entry = readdir (dir)) != NULL)
4049 {
4050 struct stat st;
4051 struct statfs stfs;
4052 int fd;
4053
4054 fd = atoi (entry->d_name);
4055 if (!fd)
4056 continue;
4057
4058 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4059 if (stat (path, &st) != 0)
4060 continue;
4061 if (!S_ISDIR (st.st_mode))
4062 continue;
4063
4064 if (statfs (path, &stfs) != 0)
4065 continue;
4066 if (stfs.f_type != SPUFS_MAGIC)
4067 continue;
4068
4069 if (pos >= offset && pos + 4 <= offset + len)
4070 {
4071 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4072 written += 4;
4073 }
4074 pos += 4;
4075 }
4076
4077 closedir (dir);
4078 return written;
4079 }
4080
4081 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4082 object type, using the /proc file system. */
4083
4084 static enum target_xfer_status
4085 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4086 const char *annex, gdb_byte *readbuf,
4087 const gdb_byte *writebuf,
4088 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
4089 {
4090 char buf[128];
4091 int fd = 0;
4092 int ret = -1;
4093 int pid = ptid_get_pid (inferior_ptid);
4094
4095 if (!annex)
4096 {
4097 if (!readbuf)
4098 return TARGET_XFER_E_IO;
4099 else
4100 {
4101 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4102
4103 if (l < 0)
4104 return TARGET_XFER_E_IO;
4105 else if (l == 0)
4106 return TARGET_XFER_EOF;
4107 else
4108 {
4109 *xfered_len = (ULONGEST) l;
4110 return TARGET_XFER_OK;
4111 }
4112 }
4113 }
4114
4115 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4116 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4117 if (fd <= 0)
4118 return TARGET_XFER_E_IO;
4119
4120 if (offset != 0
4121 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4122 {
4123 close (fd);
4124 return TARGET_XFER_EOF;
4125 }
4126
4127 if (writebuf)
4128 ret = write (fd, writebuf, (size_t) len);
4129 else if (readbuf)
4130 ret = read (fd, readbuf, (size_t) len);
4131
4132 close (fd);
4133
4134 if (ret < 0)
4135 return TARGET_XFER_E_IO;
4136 else if (ret == 0)
4137 return TARGET_XFER_EOF;
4138 else
4139 {
4140 *xfered_len = (ULONGEST) ret;
4141 return TARGET_XFER_OK;
4142 }
4143 }
4144
4145
4146 /* Parse LINE as a signal set and add its set bits to SIGS. */
4147
4148 static void
4149 add_line_to_sigset (const char *line, sigset_t *sigs)
4150 {
4151 int len = strlen (line) - 1;
4152 const char *p;
4153 int signum;
4154
4155 if (line[len] != '\n')
4156 error (_("Could not parse signal set: %s"), line);
4157
4158 p = line;
4159 signum = len * 4;
4160 while (len-- > 0)
4161 {
4162 int digit;
4163
4164 if (*p >= '0' && *p <= '9')
4165 digit = *p - '0';
4166 else if (*p >= 'a' && *p <= 'f')
4167 digit = *p - 'a' + 10;
4168 else
4169 error (_("Could not parse signal set: %s"), line);
4170
4171 signum -= 4;
4172
4173 if (digit & 1)
4174 sigaddset (sigs, signum + 1);
4175 if (digit & 2)
4176 sigaddset (sigs, signum + 2);
4177 if (digit & 4)
4178 sigaddset (sigs, signum + 3);
4179 if (digit & 8)
4180 sigaddset (sigs, signum + 4);
4181
4182 p++;
4183 }
4184 }
4185
4186 /* Find process PID's pending signals from /proc/pid/status and set
4187 SIGS to match. */
4188
4189 void
4190 linux_proc_pending_signals (int pid, sigset_t *pending,
4191 sigset_t *blocked, sigset_t *ignored)
4192 {
4193 FILE *procfile;
4194 char buffer[PATH_MAX], fname[PATH_MAX];
4195 struct cleanup *cleanup;
4196
4197 sigemptyset (pending);
4198 sigemptyset (blocked);
4199 sigemptyset (ignored);
4200 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4201 procfile = gdb_fopen_cloexec (fname, "r");
4202 if (procfile == NULL)
4203 error (_("Could not open %s"), fname);
4204 cleanup = make_cleanup_fclose (procfile);
4205
4206 while (fgets (buffer, PATH_MAX, procfile) != NULL)
4207 {
4208 /* Normal queued signals are on the SigPnd line in the status
4209 file. However, 2.6 kernels also have a "shared" pending
4210 queue for delivering signals to a thread group, so check for
4211 a ShdPnd line also.
4212
4213 Unfortunately some Red Hat kernels include the shared pending
4214 queue but not the ShdPnd status field. */
4215
4216 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4217 add_line_to_sigset (buffer + 8, pending);
4218 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4219 add_line_to_sigset (buffer + 8, pending);
4220 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4221 add_line_to_sigset (buffer + 8, blocked);
4222 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4223 add_line_to_sigset (buffer + 8, ignored);
4224 }
4225
4226 do_cleanups (cleanup);
4227 }
4228
4229 static enum target_xfer_status
4230 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4231 const char *annex, gdb_byte *readbuf,
4232 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4233 ULONGEST *xfered_len)
4234 {
4235 gdb_assert (object == TARGET_OBJECT_OSDATA);
4236
4237 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4238 if (*xfered_len == 0)
4239 return TARGET_XFER_EOF;
4240 else
4241 return TARGET_XFER_OK;
4242 }
4243
4244 static enum target_xfer_status
4245 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4246 const char *annex, gdb_byte *readbuf,
4247 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4248 ULONGEST *xfered_len)
4249 {
4250 enum target_xfer_status xfer;
4251
4252 if (object == TARGET_OBJECT_AUXV)
4253 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4254 offset, len, xfered_len);
4255
4256 if (object == TARGET_OBJECT_OSDATA)
4257 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4258 offset, len, xfered_len);
4259
4260 if (object == TARGET_OBJECT_SPU)
4261 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4262 offset, len, xfered_len);
4263
4264 /* GDB calculates all the addresses in possibly larget width of the address.
4265 Address width needs to be masked before its final use - either by
4266 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4267
4268 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4269
4270 if (object == TARGET_OBJECT_MEMORY)
4271 {
4272 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4273
4274 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4275 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4276 }
4277
4278 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4279 offset, len, xfered_len);
4280 if (xfer != TARGET_XFER_EOF)
4281 return xfer;
4282
4283 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4284 offset, len, xfered_len);
4285 }
4286
4287 static void
4288 cleanup_target_stop (void *arg)
4289 {
4290 ptid_t *ptid = (ptid_t *) arg;
4291
4292 gdb_assert (arg != NULL);
4293
4294 /* Unpause all */
4295 target_resume (*ptid, 0, GDB_SIGNAL_0);
4296 }
4297
4298 static VEC(static_tracepoint_marker_p) *
4299 linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4300 const char *strid)
4301 {
4302 char s[IPA_CMD_BUF_SIZE];
4303 struct cleanup *old_chain;
4304 int pid = ptid_get_pid (inferior_ptid);
4305 VEC(static_tracepoint_marker_p) *markers = NULL;
4306 struct static_tracepoint_marker *marker = NULL;
4307 char *p = s;
4308 ptid_t ptid = ptid_build (pid, 0, 0);
4309
4310 /* Pause all */
4311 target_stop (ptid);
4312
4313 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4314 s[sizeof ("qTfSTM")] = 0;
4315
4316 agent_run_command (pid, s, strlen (s) + 1);
4317
4318 old_chain = make_cleanup (free_current_marker, &marker);
4319 make_cleanup (cleanup_target_stop, &ptid);
4320
4321 while (*p++ == 'm')
4322 {
4323 if (marker == NULL)
4324 marker = XCNEW (struct static_tracepoint_marker);
4325
4326 do
4327 {
4328 parse_static_tracepoint_marker_definition (p, &p, marker);
4329
4330 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4331 {
4332 VEC_safe_push (static_tracepoint_marker_p,
4333 markers, marker);
4334 marker = NULL;
4335 }
4336 else
4337 {
4338 release_static_tracepoint_marker (marker);
4339 memset (marker, 0, sizeof (*marker));
4340 }
4341 }
4342 while (*p++ == ','); /* comma-separated list */
4343
4344 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4345 s[sizeof ("qTsSTM")] = 0;
4346 agent_run_command (pid, s, strlen (s) + 1);
4347 p = s;
4348 }
4349
4350 do_cleanups (old_chain);
4351
4352 return markers;
4353 }
4354
4355 /* Create a prototype generic GNU/Linux target. The client can override
4356 it with local methods. */
4357
4358 static void
4359 linux_target_install_ops (struct target_ops *t)
4360 {
4361 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4362 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4363 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4364 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4365 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4366 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4367 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4368 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4369 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4370 t->to_post_attach = linux_child_post_attach;
4371 t->to_follow_fork = linux_child_follow_fork;
4372
4373 super_xfer_partial = t->to_xfer_partial;
4374 t->to_xfer_partial = linux_xfer_partial;
4375
4376 t->to_static_tracepoint_markers_by_strid
4377 = linux_child_static_tracepoint_markers_by_strid;
4378 }
4379
4380 struct target_ops *
4381 linux_target (void)
4382 {
4383 struct target_ops *t;
4384
4385 t = inf_ptrace_target ();
4386 linux_target_install_ops (t);
4387
4388 return t;
4389 }
4390
4391 struct target_ops *
4392 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4393 {
4394 struct target_ops *t;
4395
4396 t = inf_ptrace_trad_target (register_u_offset);
4397 linux_target_install_ops (t);
4398
4399 return t;
4400 }
4401
4402 /* target_is_async_p implementation. */
4403
4404 static int
4405 linux_nat_is_async_p (struct target_ops *ops)
4406 {
4407 /* NOTE: palves 2008-03-21: We're only async when the user requests
4408 it explicitly with the "set target-async" command.
4409 Someday, linux will always be async. */
4410 return target_async_permitted;
4411 }
4412
4413 /* target_can_async_p implementation. */
4414
4415 static int
4416 linux_nat_can_async_p (struct target_ops *ops)
4417 {
4418 /* NOTE: palves 2008-03-21: We're only async when the user requests
4419 it explicitly with the "set target-async" command.
4420 Someday, linux will always be async. */
4421 return target_async_permitted;
4422 }
4423
4424 static int
4425 linux_nat_supports_non_stop (struct target_ops *self)
4426 {
4427 return 1;
4428 }
4429
4430 /* True if we want to support multi-process. To be removed when GDB
4431 supports multi-exec. */
4432
4433 int linux_multi_process = 1;
4434
4435 static int
4436 linux_nat_supports_multi_process (struct target_ops *self)
4437 {
4438 return linux_multi_process;
4439 }
4440
4441 static int
4442 linux_nat_supports_disable_randomization (struct target_ops *self)
4443 {
4444 #ifdef HAVE_PERSONALITY
4445 return 1;
4446 #else
4447 return 0;
4448 #endif
4449 }
4450
4451 static int async_terminal_is_ours = 1;
4452
4453 /* target_terminal_inferior implementation.
4454
4455 This is a wrapper around child_terminal_inferior to add async support. */
4456
4457 static void
4458 linux_nat_terminal_inferior (struct target_ops *self)
4459 {
4460 if (!target_is_async_p ())
4461 {
4462 /* Async mode is disabled. */
4463 child_terminal_inferior (self);
4464 return;
4465 }
4466
4467 child_terminal_inferior (self);
4468
4469 /* Calls to target_terminal_*() are meant to be idempotent. */
4470 if (!async_terminal_is_ours)
4471 return;
4472
4473 delete_file_handler (input_fd);
4474 async_terminal_is_ours = 0;
4475 set_sigint_trap ();
4476 }
4477
4478 /* target_terminal_ours implementation.
4479
4480 This is a wrapper around child_terminal_ours to add async support (and
4481 implement the target_terminal_ours vs target_terminal_ours_for_output
4482 distinction). child_terminal_ours is currently no different than
4483 child_terminal_ours_for_output.
4484 We leave target_terminal_ours_for_output alone, leaving it to
4485 child_terminal_ours_for_output. */
4486
4487 static void
4488 linux_nat_terminal_ours (struct target_ops *self)
4489 {
4490 if (!target_is_async_p ())
4491 {
4492 /* Async mode is disabled. */
4493 child_terminal_ours (self);
4494 return;
4495 }
4496
4497 /* GDB should never give the terminal to the inferior if the
4498 inferior is running in the background (run&, continue&, etc.),
4499 but claiming it sure should. */
4500 child_terminal_ours (self);
4501
4502 if (async_terminal_is_ours)
4503 return;
4504
4505 clear_sigint_trap ();
4506 add_file_handler (input_fd, stdin_event_handler, 0);
4507 async_terminal_is_ours = 1;
4508 }
4509
4510 static void (*async_client_callback) (enum inferior_event_type event_type,
4511 void *context);
4512 static void *async_client_context;
4513
4514 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4515 so we notice when any child changes state, and notify the
4516 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4517 above to wait for the arrival of a SIGCHLD. */
4518
4519 static void
4520 sigchld_handler (int signo)
4521 {
4522 int old_errno = errno;
4523
4524 if (debug_linux_nat)
4525 ui_file_write_async_safe (gdb_stdlog,
4526 "sigchld\n", sizeof ("sigchld\n") - 1);
4527
4528 if (signo == SIGCHLD
4529 && linux_nat_event_pipe[0] != -1)
4530 async_file_mark (); /* Let the event loop know that there are
4531 events to handle. */
4532
4533 errno = old_errno;
4534 }
4535
4536 /* Callback registered with the target events file descriptor. */
4537
4538 static void
4539 handle_target_event (int error, gdb_client_data client_data)
4540 {
4541 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4542 }
4543
4544 /* Create/destroy the target events pipe. Returns previous state. */
4545
4546 static int
4547 linux_async_pipe (int enable)
4548 {
4549 int previous = (linux_nat_event_pipe[0] != -1);
4550
4551 if (previous != enable)
4552 {
4553 sigset_t prev_mask;
4554
4555 /* Block child signals while we create/destroy the pipe, as
4556 their handler writes to it. */
4557 block_child_signals (&prev_mask);
4558
4559 if (enable)
4560 {
4561 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4562 internal_error (__FILE__, __LINE__,
4563 "creating event pipe failed.");
4564
4565 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4566 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4567 }
4568 else
4569 {
4570 close (linux_nat_event_pipe[0]);
4571 close (linux_nat_event_pipe[1]);
4572 linux_nat_event_pipe[0] = -1;
4573 linux_nat_event_pipe[1] = -1;
4574 }
4575
4576 restore_child_signals_mask (&prev_mask);
4577 }
4578
4579 return previous;
4580 }
4581
4582 /* target_async implementation. */
4583
4584 static void
4585 linux_nat_async (struct target_ops *ops,
4586 void (*callback) (enum inferior_event_type event_type,
4587 void *context),
4588 void *context)
4589 {
4590 if (callback != NULL)
4591 {
4592 async_client_callback = callback;
4593 async_client_context = context;
4594 if (!linux_async_pipe (1))
4595 {
4596 add_file_handler (linux_nat_event_pipe[0],
4597 handle_target_event, NULL);
4598 /* There may be pending events to handle. Tell the event loop
4599 to poll them. */
4600 async_file_mark ();
4601 }
4602 }
4603 else
4604 {
4605 async_client_callback = callback;
4606 async_client_context = context;
4607 delete_file_handler (linux_nat_event_pipe[0]);
4608 linux_async_pipe (0);
4609 }
4610 return;
4611 }
4612
4613 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4614 event came out. */
4615
4616 static int
4617 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4618 {
4619 if (!lwp->stopped)
4620 {
4621 if (debug_linux_nat)
4622 fprintf_unfiltered (gdb_stdlog,
4623 "LNSL: running -> suspending %s\n",
4624 target_pid_to_str (lwp->ptid));
4625
4626
4627 if (lwp->last_resume_kind == resume_stop)
4628 {
4629 if (debug_linux_nat)
4630 fprintf_unfiltered (gdb_stdlog,
4631 "linux-nat: already stopping LWP %ld at "
4632 "GDB's request\n",
4633 ptid_get_lwp (lwp->ptid));
4634 return 0;
4635 }
4636
4637 stop_callback (lwp, NULL);
4638 lwp->last_resume_kind = resume_stop;
4639 }
4640 else
4641 {
4642 /* Already known to be stopped; do nothing. */
4643
4644 if (debug_linux_nat)
4645 {
4646 if (find_thread_ptid (lwp->ptid)->stop_requested)
4647 fprintf_unfiltered (gdb_stdlog,
4648 "LNSL: already stopped/stop_requested %s\n",
4649 target_pid_to_str (lwp->ptid));
4650 else
4651 fprintf_unfiltered (gdb_stdlog,
4652 "LNSL: already stopped/no "
4653 "stop_requested yet %s\n",
4654 target_pid_to_str (lwp->ptid));
4655 }
4656 }
4657 return 0;
4658 }
4659
4660 static void
4661 linux_nat_stop (struct target_ops *self, ptid_t ptid)
4662 {
4663 if (non_stop)
4664 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4665 else
4666 linux_ops->to_stop (linux_ops, ptid);
4667 }
4668
4669 static void
4670 linux_nat_close (struct target_ops *self)
4671 {
4672 /* Unregister from the event loop. */
4673 if (linux_nat_is_async_p (self))
4674 linux_nat_async (self, NULL, NULL);
4675
4676 if (linux_ops->to_close)
4677 linux_ops->to_close (linux_ops);
4678
4679 super_close (self);
4680 }
4681
4682 /* When requests are passed down from the linux-nat layer to the
4683 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4684 used. The address space pointer is stored in the inferior object,
4685 but the common code that is passed such ptid can't tell whether
4686 lwpid is a "main" process id or not (it assumes so). We reverse
4687 look up the "main" process id from the lwp here. */
4688
4689 static struct address_space *
4690 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4691 {
4692 struct lwp_info *lwp;
4693 struct inferior *inf;
4694 int pid;
4695
4696 if (ptid_get_lwp (ptid) == 0)
4697 {
4698 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4699 tgid. */
4700 lwp = find_lwp_pid (ptid);
4701 pid = ptid_get_pid (lwp->ptid);
4702 }
4703 else
4704 {
4705 /* A (pid,lwpid,0) ptid. */
4706 pid = ptid_get_pid (ptid);
4707 }
4708
4709 inf = find_inferior_pid (pid);
4710 gdb_assert (inf != NULL);
4711 return inf->aspace;
4712 }
4713
4714 /* Return the cached value of the processor core for thread PTID. */
4715
4716 static int
4717 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4718 {
4719 struct lwp_info *info = find_lwp_pid (ptid);
4720
4721 if (info)
4722 return info->core;
4723 return -1;
4724 }
4725
4726 void
4727 linux_nat_add_target (struct target_ops *t)
4728 {
4729 /* Save the provided single-threaded target. We save this in a separate
4730 variable because another target we've inherited from (e.g. inf-ptrace)
4731 may have saved a pointer to T; we want to use it for the final
4732 process stratum target. */
4733 linux_ops_saved = *t;
4734 linux_ops = &linux_ops_saved;
4735
4736 /* Override some methods for multithreading. */
4737 t->to_create_inferior = linux_nat_create_inferior;
4738 t->to_attach = linux_nat_attach;
4739 t->to_detach = linux_nat_detach;
4740 t->to_resume = linux_nat_resume;
4741 t->to_wait = linux_nat_wait;
4742 t->to_pass_signals = linux_nat_pass_signals;
4743 t->to_xfer_partial = linux_nat_xfer_partial;
4744 t->to_kill = linux_nat_kill;
4745 t->to_mourn_inferior = linux_nat_mourn_inferior;
4746 t->to_thread_alive = linux_nat_thread_alive;
4747 t->to_pid_to_str = linux_nat_pid_to_str;
4748 t->to_thread_name = linux_nat_thread_name;
4749 t->to_has_thread_control = tc_schedlock;
4750 t->to_thread_address_space = linux_nat_thread_address_space;
4751 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4752 t->to_stopped_data_address = linux_nat_stopped_data_address;
4753
4754 t->to_can_async_p = linux_nat_can_async_p;
4755 t->to_is_async_p = linux_nat_is_async_p;
4756 t->to_supports_non_stop = linux_nat_supports_non_stop;
4757 t->to_async = linux_nat_async;
4758 t->to_terminal_inferior = linux_nat_terminal_inferior;
4759 t->to_terminal_ours = linux_nat_terminal_ours;
4760
4761 super_close = t->to_close;
4762 t->to_close = linux_nat_close;
4763
4764 /* Methods for non-stop support. */
4765 t->to_stop = linux_nat_stop;
4766
4767 t->to_supports_multi_process = linux_nat_supports_multi_process;
4768
4769 t->to_supports_disable_randomization
4770 = linux_nat_supports_disable_randomization;
4771
4772 t->to_core_of_thread = linux_nat_core_of_thread;
4773
4774 /* We don't change the stratum; this target will sit at
4775 process_stratum and thread_db will set at thread_stratum. This
4776 is a little strange, since this is a multi-threaded-capable
4777 target, but we want to be on the stack below thread_db, and we
4778 also want to be used for single-threaded processes. */
4779
4780 add_target (t);
4781 }
4782
4783 /* Register a method to call whenever a new thread is attached. */
4784 void
4785 linux_nat_set_new_thread (struct target_ops *t,
4786 void (*new_thread) (struct lwp_info *))
4787 {
4788 /* Save the pointer. We only support a single registered instance
4789 of the GNU/Linux native target, so we do not need to map this to
4790 T. */
4791 linux_nat_new_thread = new_thread;
4792 }
4793
4794 /* See declaration in linux-nat.h. */
4795
4796 void
4797 linux_nat_set_new_fork (struct target_ops *t,
4798 linux_nat_new_fork_ftype *new_fork)
4799 {
4800 /* Save the pointer. */
4801 linux_nat_new_fork = new_fork;
4802 }
4803
4804 /* See declaration in linux-nat.h. */
4805
4806 void
4807 linux_nat_set_forget_process (struct target_ops *t,
4808 linux_nat_forget_process_ftype *fn)
4809 {
4810 /* Save the pointer. */
4811 linux_nat_forget_process_hook = fn;
4812 }
4813
4814 /* See declaration in linux-nat.h. */
4815
4816 void
4817 linux_nat_forget_process (pid_t pid)
4818 {
4819 if (linux_nat_forget_process_hook != NULL)
4820 linux_nat_forget_process_hook (pid);
4821 }
4822
4823 /* Register a method that converts a siginfo object between the layout
4824 that ptrace returns, and the layout in the architecture of the
4825 inferior. */
4826 void
4827 linux_nat_set_siginfo_fixup (struct target_ops *t,
4828 int (*siginfo_fixup) (siginfo_t *,
4829 gdb_byte *,
4830 int))
4831 {
4832 /* Save the pointer. */
4833 linux_nat_siginfo_fixup = siginfo_fixup;
4834 }
4835
4836 /* Register a method to call prior to resuming a thread. */
4837
4838 void
4839 linux_nat_set_prepare_to_resume (struct target_ops *t,
4840 void (*prepare_to_resume) (struct lwp_info *))
4841 {
4842 /* Save the pointer. */
4843 linux_nat_prepare_to_resume = prepare_to_resume;
4844 }
4845
4846 /* See linux-nat.h. */
4847
4848 int
4849 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4850 {
4851 int pid;
4852
4853 pid = ptid_get_lwp (ptid);
4854 if (pid == 0)
4855 pid = ptid_get_pid (ptid);
4856
4857 errno = 0;
4858 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4859 if (errno != 0)
4860 {
4861 memset (siginfo, 0, sizeof (*siginfo));
4862 return 0;
4863 }
4864 return 1;
4865 }
4866
4867 /* Provide a prototype to silence -Wmissing-prototypes. */
4868 extern initialize_file_ftype _initialize_linux_nat;
4869
4870 void
4871 _initialize_linux_nat (void)
4872 {
4873 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4874 &debug_linux_nat, _("\
4875 Set debugging of GNU/Linux lwp module."), _("\
4876 Show debugging of GNU/Linux lwp module."), _("\
4877 Enables printf debugging output."),
4878 NULL,
4879 show_debug_linux_nat,
4880 &setdebuglist, &showdebuglist);
4881
4882 /* Save this mask as the default. */
4883 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4884
4885 /* Install a SIGCHLD handler. */
4886 sigchld_action.sa_handler = sigchld_handler;
4887 sigemptyset (&sigchld_action.sa_mask);
4888 sigchld_action.sa_flags = SA_RESTART;
4889
4890 /* Make it the default. */
4891 sigaction (SIGCHLD, &sigchld_action, NULL);
4892
4893 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4894 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4895 sigdelset (&suspend_mask, SIGCHLD);
4896
4897 sigemptyset (&blocked_mask);
4898
4899 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4900 support read-only process state. */
4901 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4902 | PTRACE_O_TRACEVFORKDONE
4903 | PTRACE_O_TRACEVFORK
4904 | PTRACE_O_TRACEFORK
4905 | PTRACE_O_TRACEEXEC);
4906 }
4907 \f
4908
4909 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4910 the GNU/Linux Threads library and therefore doesn't really belong
4911 here. */
4912
4913 /* Read variable NAME in the target and return its value if found.
4914 Otherwise return zero. It is assumed that the type of the variable
4915 is `int'. */
4916
4917 static int
4918 get_signo (const char *name)
4919 {
4920 struct bound_minimal_symbol ms;
4921 int signo;
4922
4923 ms = lookup_minimal_symbol (name, NULL, NULL);
4924 if (ms.minsym == NULL)
4925 return 0;
4926
4927 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4928 sizeof (signo)) != 0)
4929 return 0;
4930
4931 return signo;
4932 }
4933
4934 /* Return the set of signals used by the threads library in *SET. */
4935
4936 void
4937 lin_thread_get_thread_signals (sigset_t *set)
4938 {
4939 struct sigaction action;
4940 int restart, cancel;
4941
4942 sigemptyset (&blocked_mask);
4943 sigemptyset (set);
4944
4945 restart = get_signo ("__pthread_sig_restart");
4946 cancel = get_signo ("__pthread_sig_cancel");
4947
4948 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4949 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4950 not provide any way for the debugger to query the signal numbers -
4951 fortunately they don't change! */
4952
4953 if (restart == 0)
4954 restart = __SIGRTMIN;
4955
4956 if (cancel == 0)
4957 cancel = __SIGRTMIN + 1;
4958
4959 sigaddset (set, restart);
4960 sigaddset (set, cancel);
4961
4962 /* The GNU/Linux Threads library makes terminating threads send a
4963 special "cancel" signal instead of SIGCHLD. Make sure we catch
4964 those (to prevent them from terminating GDB itself, which is
4965 likely to be their default action) and treat them the same way as
4966 SIGCHLD. */
4967
4968 action.sa_handler = sigchld_handler;
4969 sigemptyset (&action.sa_mask);
4970 action.sa_flags = SA_RESTART;
4971 sigaction (cancel, &action, NULL);
4972
4973 /* We block the "cancel" signal throughout this code ... */
4974 sigaddset (&blocked_mask, cancel);
4975 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4976
4977 /* ... except during a sigsuspend. */
4978 sigdelset (&suspend_mask, cancel);
4979 }