* linux-nat.c (child_post_attach): Rename to ...
[binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23 #include "defs.h"
24 #include "inferior.h"
25 #include "target.h"
26 #include "gdb_string.h"
27 #include "gdb_wait.h"
28 #include "gdb_assert.h"
29 #ifdef HAVE_TKILL_SYSCALL
30 #include <unistd.h>
31 #include <sys/syscall.h>
32 #endif
33 #include <sys/ptrace.h>
34 #include "linux-nat.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51
52 #ifndef O_LARGEFILE
53 #define O_LARGEFILE 0
54 #endif
55
56 /* If the system headers did not provide the constants, hard-code the normal
57 values. */
58 #ifndef PTRACE_EVENT_FORK
59
60 #define PTRACE_SETOPTIONS 0x4200
61 #define PTRACE_GETEVENTMSG 0x4201
62
63 /* options set using PTRACE_SETOPTIONS */
64 #define PTRACE_O_TRACESYSGOOD 0x00000001
65 #define PTRACE_O_TRACEFORK 0x00000002
66 #define PTRACE_O_TRACEVFORK 0x00000004
67 #define PTRACE_O_TRACECLONE 0x00000008
68 #define PTRACE_O_TRACEEXEC 0x00000010
69 #define PTRACE_O_TRACEVFORKDONE 0x00000020
70 #define PTRACE_O_TRACEEXIT 0x00000040
71
72 /* Wait extended result codes for the above trace options. */
73 #define PTRACE_EVENT_FORK 1
74 #define PTRACE_EVENT_VFORK 2
75 #define PTRACE_EVENT_CLONE 3
76 #define PTRACE_EVENT_EXEC 4
77 #define PTRACE_EVENT_VFORK_DONE 5
78 #define PTRACE_EVENT_EXIT 6
79
80 #endif /* PTRACE_EVENT_FORK */
81
82 /* We can't always assume that this flag is available, but all systems
83 with the ptrace event handlers also have __WALL, so it's safe to use
84 here. */
85 #ifndef __WALL
86 #define __WALL 0x40000000 /* Wait for any child. */
87 #endif
88
89 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
90 the use of the multi-threaded target. */
91 static struct target_ops *linux_ops;
92 static struct target_ops linux_ops_saved;
93
94 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
95 Called by our to_xfer_partial. */
96 static LONGEST (*super_xfer_partial) (struct target_ops *,
97 enum target_object,
98 const char *, gdb_byte *,
99 const gdb_byte *,
100 ULONGEST, LONGEST);
101
102 static int debug_linux_nat;
103 static void
104 show_debug_linux_nat (struct ui_file *file, int from_tty,
105 struct cmd_list_element *c, const char *value)
106 {
107 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
108 value);
109 }
110
111 static int linux_parent_pid;
112
113 struct simple_pid_list
114 {
115 int pid;
116 int status;
117 struct simple_pid_list *next;
118 };
119 struct simple_pid_list *stopped_pids;
120
121 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
122 can not be used, 1 if it can. */
123
124 static int linux_supports_tracefork_flag = -1;
125
126 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
127 PTRACE_O_TRACEVFORKDONE. */
128
129 static int linux_supports_tracevforkdone_flag = -1;
130
131 \f
132 /* Trivial list manipulation functions to keep track of a list of
133 new stopped processes. */
134 static void
135 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
136 {
137 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
138 new_pid->pid = pid;
139 new_pid->status = status;
140 new_pid->next = *listp;
141 *listp = new_pid;
142 }
143
144 static int
145 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
146 {
147 struct simple_pid_list **p;
148
149 for (p = listp; *p != NULL; p = &(*p)->next)
150 if ((*p)->pid == pid)
151 {
152 struct simple_pid_list *next = (*p)->next;
153 *status = (*p)->status;
154 xfree (*p);
155 *p = next;
156 return 1;
157 }
158 return 0;
159 }
160
161 static void
162 linux_record_stopped_pid (int pid, int status)
163 {
164 add_to_pid_list (&stopped_pids, pid, status);
165 }
166
167 \f
168 /* A helper function for linux_test_for_tracefork, called after fork (). */
169
170 static void
171 linux_tracefork_child (void)
172 {
173 int ret;
174
175 ptrace (PTRACE_TRACEME, 0, 0, 0);
176 kill (getpid (), SIGSTOP);
177 fork ();
178 _exit (0);
179 }
180
181 /* Wrapper function for waitpid which handles EINTR. */
182
183 static int
184 my_waitpid (int pid, int *status, int flags)
185 {
186 int ret;
187 do
188 {
189 ret = waitpid (pid, status, flags);
190 }
191 while (ret == -1 && errno == EINTR);
192
193 return ret;
194 }
195
196 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
197
198 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
199 we know that the feature is not available. This may change the tracing
200 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
201
202 However, if it succeeds, we don't know for sure that the feature is
203 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
204 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
205 fork tracing, and let it fork. If the process exits, we assume that we
206 can't use TRACEFORK; if we get the fork notification, and we can extract
207 the new child's PID, then we assume that we can. */
208
209 static void
210 linux_test_for_tracefork (int original_pid)
211 {
212 int child_pid, ret, status;
213 long second_pid;
214
215 linux_supports_tracefork_flag = 0;
216 linux_supports_tracevforkdone_flag = 0;
217
218 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
219 if (ret != 0)
220 return;
221
222 child_pid = fork ();
223 if (child_pid == -1)
224 perror_with_name (("fork"));
225
226 if (child_pid == 0)
227 linux_tracefork_child ();
228
229 ret = my_waitpid (child_pid, &status, 0);
230 if (ret == -1)
231 perror_with_name (("waitpid"));
232 else if (ret != child_pid)
233 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
234 if (! WIFSTOPPED (status))
235 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
236
237 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
238 if (ret != 0)
239 {
240 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
241 if (ret != 0)
242 {
243 warning (_("linux_test_for_tracefork: failed to kill child"));
244 return;
245 }
246
247 ret = my_waitpid (child_pid, &status, 0);
248 if (ret != child_pid)
249 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
250 else if (!WIFSIGNALED (status))
251 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
252 "killed child"), status);
253
254 return;
255 }
256
257 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
258 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
259 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
260 linux_supports_tracevforkdone_flag = (ret == 0);
261
262 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
263 if (ret != 0)
264 warning (_("linux_test_for_tracefork: failed to resume child"));
265
266 ret = my_waitpid (child_pid, &status, 0);
267
268 if (ret == child_pid && WIFSTOPPED (status)
269 && status >> 16 == PTRACE_EVENT_FORK)
270 {
271 second_pid = 0;
272 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
273 if (ret == 0 && second_pid != 0)
274 {
275 int second_status;
276
277 linux_supports_tracefork_flag = 1;
278 my_waitpid (second_pid, &second_status, 0);
279 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
280 if (ret != 0)
281 warning (_("linux_test_for_tracefork: failed to kill second child"));
282 my_waitpid (second_pid, &status, 0);
283 }
284 }
285 else
286 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
287 "(%d, status 0x%x)"), ret, status);
288
289 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
290 if (ret != 0)
291 warning (_("linux_test_for_tracefork: failed to kill child"));
292 my_waitpid (child_pid, &status, 0);
293 }
294
295 /* Return non-zero iff we have tracefork functionality available.
296 This function also sets linux_supports_tracefork_flag. */
297
298 static int
299 linux_supports_tracefork (int pid)
300 {
301 if (linux_supports_tracefork_flag == -1)
302 linux_test_for_tracefork (pid);
303 return linux_supports_tracefork_flag;
304 }
305
306 static int
307 linux_supports_tracevforkdone (int pid)
308 {
309 if (linux_supports_tracefork_flag == -1)
310 linux_test_for_tracefork (pid);
311 return linux_supports_tracevforkdone_flag;
312 }
313
314 \f
315 void
316 linux_enable_event_reporting (ptid_t ptid)
317 {
318 int pid = ptid_get_lwp (ptid);
319 int options;
320
321 if (pid == 0)
322 pid = ptid_get_pid (ptid);
323
324 if (! linux_supports_tracefork (pid))
325 return;
326
327 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
328 | PTRACE_O_TRACECLONE;
329 if (linux_supports_tracevforkdone (pid))
330 options |= PTRACE_O_TRACEVFORKDONE;
331
332 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
333 read-only process state. */
334
335 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
336 }
337
338 static void
339 linux_child_post_attach (int pid)
340 {
341 linux_enable_event_reporting (pid_to_ptid (pid));
342 check_for_thread_db ();
343 }
344
345 static void
346 linux_child_post_startup_inferior (ptid_t ptid)
347 {
348 linux_enable_event_reporting (ptid);
349 check_for_thread_db ();
350 }
351
352 static int
353 linux_child_follow_fork (struct target_ops *ops, int follow_child)
354 {
355 ptid_t last_ptid;
356 struct target_waitstatus last_status;
357 int has_vforked;
358 int parent_pid, child_pid;
359
360 get_last_target_status (&last_ptid, &last_status);
361 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
362 parent_pid = ptid_get_lwp (last_ptid);
363 if (parent_pid == 0)
364 parent_pid = ptid_get_pid (last_ptid);
365 child_pid = last_status.value.related_pid;
366
367 if (! follow_child)
368 {
369 /* We're already attached to the parent, by default. */
370
371 /* Before detaching from the child, remove all breakpoints from
372 it. (This won't actually modify the breakpoint list, but will
373 physically remove the breakpoints from the child.) */
374 /* If we vforked this will remove the breakpoints from the parent
375 also, but they'll be reinserted below. */
376 detach_breakpoints (child_pid);
377
378 /* Detach new forked process? */
379 if (detach_fork)
380 {
381 if (debug_linux_nat)
382 {
383 target_terminal_ours ();
384 fprintf_filtered (gdb_stdlog,
385 "Detaching after fork from child process %d.\n",
386 child_pid);
387 }
388
389 ptrace (PTRACE_DETACH, child_pid, 0, 0);
390 }
391 else
392 {
393 struct fork_info *fp;
394 /* Retain child fork in ptrace (stopped) state. */
395 fp = find_fork_pid (child_pid);
396 if (!fp)
397 fp = add_fork (child_pid);
398 fork_save_infrun_state (fp, 0);
399 }
400
401 if (has_vforked)
402 {
403 gdb_assert (linux_supports_tracefork_flag >= 0);
404 if (linux_supports_tracevforkdone (0))
405 {
406 int status;
407
408 ptrace (PTRACE_CONT, parent_pid, 0, 0);
409 my_waitpid (parent_pid, &status, __WALL);
410 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
411 warning (_("Unexpected waitpid result %06x when waiting for "
412 "vfork-done"), status);
413 }
414 else
415 {
416 /* We can't insert breakpoints until the child has
417 finished with the shared memory region. We need to
418 wait until that happens. Ideal would be to just
419 call:
420 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
421 - waitpid (parent_pid, &status, __WALL);
422 However, most architectures can't handle a syscall
423 being traced on the way out if it wasn't traced on
424 the way in.
425
426 We might also think to loop, continuing the child
427 until it exits or gets a SIGTRAP. One problem is
428 that the child might call ptrace with PTRACE_TRACEME.
429
430 There's no simple and reliable way to figure out when
431 the vforked child will be done with its copy of the
432 shared memory. We could step it out of the syscall,
433 two instructions, let it go, and then single-step the
434 parent once. When we have hardware single-step, this
435 would work; with software single-step it could still
436 be made to work but we'd have to be able to insert
437 single-step breakpoints in the child, and we'd have
438 to insert -just- the single-step breakpoint in the
439 parent. Very awkward.
440
441 In the end, the best we can do is to make sure it
442 runs for a little while. Hopefully it will be out of
443 range of any breakpoints we reinsert. Usually this
444 is only the single-step breakpoint at vfork's return
445 point. */
446
447 usleep (10000);
448 }
449
450 /* Since we vforked, breakpoints were removed in the parent
451 too. Put them back. */
452 reattach_breakpoints (parent_pid);
453 }
454 }
455 else
456 {
457 char child_pid_spelling[40];
458
459 /* Needed to keep the breakpoint lists in sync. */
460 if (! has_vforked)
461 detach_breakpoints (child_pid);
462
463 /* Before detaching from the parent, remove all breakpoints from it. */
464 remove_breakpoints ();
465
466 if (debug_linux_nat)
467 {
468 target_terminal_ours ();
469 fprintf_filtered (gdb_stdlog,
470 "Attaching after fork to child process %d.\n",
471 child_pid);
472 }
473
474 /* If we're vforking, we may want to hold on to the parent until
475 the child exits or execs. At exec time we can remove the old
476 breakpoints from the parent and detach it; at exit time we
477 could do the same (or even, sneakily, resume debugging it - the
478 child's exec has failed, or something similar).
479
480 This doesn't clean up "properly", because we can't call
481 target_detach, but that's OK; if the current target is "child",
482 then it doesn't need any further cleanups, and lin_lwp will
483 generally not encounter vfork (vfork is defined to fork
484 in libpthread.so).
485
486 The holding part is very easy if we have VFORKDONE events;
487 but keeping track of both processes is beyond GDB at the
488 moment. So we don't expose the parent to the rest of GDB.
489 Instead we quietly hold onto it until such time as we can
490 safely resume it. */
491
492 if (has_vforked)
493 linux_parent_pid = parent_pid;
494 else if (!detach_fork)
495 {
496 struct fork_info *fp;
497 /* Retain parent fork in ptrace (stopped) state. */
498 fp = find_fork_pid (parent_pid);
499 if (!fp)
500 fp = add_fork (parent_pid);
501 fork_save_infrun_state (fp, 0);
502 }
503 else
504 {
505 target_detach (NULL, 0);
506 }
507
508 inferior_ptid = pid_to_ptid (child_pid);
509
510 /* Reinstall ourselves, since we might have been removed in
511 target_detach (which does other necessary cleanup). */
512
513 push_target (ops);
514
515 /* Reset breakpoints in the child as appropriate. */
516 follow_inferior_reset_breakpoints ();
517 }
518
519 return 0;
520 }
521
522 \f
523 static void
524 linux_child_insert_fork_catchpoint (int pid)
525 {
526 if (! linux_supports_tracefork (pid))
527 error (_("Your system does not support fork catchpoints."));
528 }
529
530 static void
531 linux_child_insert_vfork_catchpoint (int pid)
532 {
533 if (!linux_supports_tracefork (pid))
534 error (_("Your system does not support vfork catchpoints."));
535 }
536
537 static void
538 linux_child_insert_exec_catchpoint (int pid)
539 {
540 if (!linux_supports_tracefork (pid))
541 error (_("Your system does not support exec catchpoints."));
542 }
543
544 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
545 are processes sharing the same VM space. A multi-threaded process
546 is basically a group of such processes. However, such a grouping
547 is almost entirely a user-space issue; the kernel doesn't enforce
548 such a grouping at all (this might change in the future). In
549 general, we'll rely on the threads library (i.e. the GNU/Linux
550 Threads library) to provide such a grouping.
551
552 It is perfectly well possible to write a multi-threaded application
553 without the assistance of a threads library, by using the clone
554 system call directly. This module should be able to give some
555 rudimentary support for debugging such applications if developers
556 specify the CLONE_PTRACE flag in the clone system call, and are
557 using the Linux kernel 2.4 or above.
558
559 Note that there are some peculiarities in GNU/Linux that affect
560 this code:
561
562 - In general one should specify the __WCLONE flag to waitpid in
563 order to make it report events for any of the cloned processes
564 (and leave it out for the initial process). However, if a cloned
565 process has exited the exit status is only reported if the
566 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
567 we cannot use it since GDB must work on older systems too.
568
569 - When a traced, cloned process exits and is waited for by the
570 debugger, the kernel reassigns it to the original parent and
571 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
572 library doesn't notice this, which leads to the "zombie problem":
573 When debugged a multi-threaded process that spawns a lot of
574 threads will run out of processes, even if the threads exit,
575 because the "zombies" stay around. */
576
577 /* List of known LWPs. */
578 static struct lwp_info *lwp_list;
579
580 /* Number of LWPs in the list. */
581 static int num_lwps;
582 \f
583
584 #define GET_LWP(ptid) ptid_get_lwp (ptid)
585 #define GET_PID(ptid) ptid_get_pid (ptid)
586 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
587 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
588
589 /* If the last reported event was a SIGTRAP, this variable is set to
590 the process id of the LWP/thread that got it. */
591 ptid_t trap_ptid;
592 \f
593
594 /* Since we cannot wait (in linux_nat_wait) for the initial process and
595 any cloned processes with a single call to waitpid, we have to use
596 the WNOHANG flag and call waitpid in a loop. To optimize
597 things a bit we use `sigsuspend' to wake us up when a process has
598 something to report (it will send us a SIGCHLD if it has). To make
599 this work we have to juggle with the signal mask. We save the
600 original signal mask such that we can restore it before creating a
601 new process in order to avoid blocking certain signals in the
602 inferior. We then block SIGCHLD during the waitpid/sigsuspend
603 loop. */
604
605 /* Original signal mask. */
606 static sigset_t normal_mask;
607
608 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
609 _initialize_linux_nat. */
610 static sigset_t suspend_mask;
611
612 /* Signals to block to make that sigsuspend work. */
613 static sigset_t blocked_mask;
614 \f
615
616 /* Prototypes for local functions. */
617 static int stop_wait_callback (struct lwp_info *lp, void *data);
618 static int linux_nat_thread_alive (ptid_t ptid);
619 static char *linux_child_pid_to_exec_file (int pid);
620 \f
621 /* Convert wait status STATUS to a string. Used for printing debug
622 messages only. */
623
624 static char *
625 status_to_str (int status)
626 {
627 static char buf[64];
628
629 if (WIFSTOPPED (status))
630 snprintf (buf, sizeof (buf), "%s (stopped)",
631 strsignal (WSTOPSIG (status)));
632 else if (WIFSIGNALED (status))
633 snprintf (buf, sizeof (buf), "%s (terminated)",
634 strsignal (WSTOPSIG (status)));
635 else
636 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
637
638 return buf;
639 }
640
641 /* Initialize the list of LWPs. Note that this module, contrary to
642 what GDB's generic threads layer does for its thread list,
643 re-initializes the LWP lists whenever we mourn or detach (which
644 doesn't involve mourning) the inferior. */
645
646 static void
647 init_lwp_list (void)
648 {
649 struct lwp_info *lp, *lpnext;
650
651 for (lp = lwp_list; lp; lp = lpnext)
652 {
653 lpnext = lp->next;
654 xfree (lp);
655 }
656
657 lwp_list = NULL;
658 num_lwps = 0;
659 }
660
661 /* Add the LWP specified by PID to the list. Return a pointer to the
662 structure describing the new LWP. */
663
664 static struct lwp_info *
665 add_lwp (ptid_t ptid)
666 {
667 struct lwp_info *lp;
668
669 gdb_assert (is_lwp (ptid));
670
671 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
672
673 memset (lp, 0, sizeof (struct lwp_info));
674
675 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
676
677 lp->ptid = ptid;
678
679 lp->next = lwp_list;
680 lwp_list = lp;
681 ++num_lwps;
682
683 return lp;
684 }
685
686 /* Remove the LWP specified by PID from the list. */
687
688 static void
689 delete_lwp (ptid_t ptid)
690 {
691 struct lwp_info *lp, *lpprev;
692
693 lpprev = NULL;
694
695 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
696 if (ptid_equal (lp->ptid, ptid))
697 break;
698
699 if (!lp)
700 return;
701
702 num_lwps--;
703
704 if (lpprev)
705 lpprev->next = lp->next;
706 else
707 lwp_list = lp->next;
708
709 xfree (lp);
710 }
711
712 /* Return a pointer to the structure describing the LWP corresponding
713 to PID. If no corresponding LWP could be found, return NULL. */
714
715 static struct lwp_info *
716 find_lwp_pid (ptid_t ptid)
717 {
718 struct lwp_info *lp;
719 int lwp;
720
721 if (is_lwp (ptid))
722 lwp = GET_LWP (ptid);
723 else
724 lwp = GET_PID (ptid);
725
726 for (lp = lwp_list; lp; lp = lp->next)
727 if (lwp == GET_LWP (lp->ptid))
728 return lp;
729
730 return NULL;
731 }
732
733 /* Call CALLBACK with its second argument set to DATA for every LWP in
734 the list. If CALLBACK returns 1 for a particular LWP, return a
735 pointer to the structure describing that LWP immediately.
736 Otherwise return NULL. */
737
738 struct lwp_info *
739 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
740 {
741 struct lwp_info *lp, *lpnext;
742
743 for (lp = lwp_list; lp; lp = lpnext)
744 {
745 lpnext = lp->next;
746 if ((*callback) (lp, data))
747 return lp;
748 }
749
750 return NULL;
751 }
752
753 /* Update our internal state when changing from one fork (checkpoint,
754 et cetera) to another indicated by NEW_PTID. We can only switch
755 single-threaded applications, so we only create one new LWP, and
756 the previous list is discarded. */
757
758 void
759 linux_nat_switch_fork (ptid_t new_ptid)
760 {
761 struct lwp_info *lp;
762
763 init_lwp_list ();
764 lp = add_lwp (new_ptid);
765 lp->stopped = 1;
766 }
767
768 /* Record a PTID for later deletion. */
769
770 struct saved_ptids
771 {
772 ptid_t ptid;
773 struct saved_ptids *next;
774 };
775 static struct saved_ptids *threads_to_delete;
776
777 static void
778 record_dead_thread (ptid_t ptid)
779 {
780 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
781 p->ptid = ptid;
782 p->next = threads_to_delete;
783 threads_to_delete = p;
784 }
785
786 /* Delete any dead threads which are not the current thread. */
787
788 static void
789 prune_lwps (void)
790 {
791 struct saved_ptids **p = &threads_to_delete;
792
793 while (*p)
794 if (! ptid_equal ((*p)->ptid, inferior_ptid))
795 {
796 struct saved_ptids *tmp = *p;
797 delete_thread (tmp->ptid);
798 *p = tmp->next;
799 xfree (tmp);
800 }
801 else
802 p = &(*p)->next;
803 }
804
805 /* Callback for iterate_over_threads that finds a thread corresponding
806 to the given LWP. */
807
808 static int
809 find_thread_from_lwp (struct thread_info *thr, void *dummy)
810 {
811 ptid_t *ptid_p = dummy;
812
813 if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p))
814 return 1;
815 else
816 return 0;
817 }
818
819 /* Handle the exit of a single thread LP. */
820
821 static void
822 exit_lwp (struct lwp_info *lp)
823 {
824 if (in_thread_list (lp->ptid))
825 {
826 /* Core GDB cannot deal with us deleting the current thread. */
827 if (!ptid_equal (lp->ptid, inferior_ptid))
828 delete_thread (lp->ptid);
829 else
830 record_dead_thread (lp->ptid);
831 printf_unfiltered (_("[%s exited]\n"),
832 target_pid_to_str (lp->ptid));
833 }
834 else
835 {
836 /* Even if LP->PTID is not in the global GDB thread list, the
837 LWP may be - with an additional thread ID. We don't need
838 to print anything in this case; thread_db is in use and
839 already took care of that. But it didn't delete the thread
840 in order to handle zombies correctly. */
841
842 struct thread_info *thr;
843
844 thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid);
845 if (thr)
846 {
847 if (!ptid_equal (thr->ptid, inferior_ptid))
848 delete_thread (thr->ptid);
849 else
850 record_dead_thread (thr->ptid);
851 }
852 }
853
854 delete_lwp (lp->ptid);
855 }
856
857 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
858 a message telling the user that a new LWP has been added to the
859 process. Return 0 if successful or -1 if the new LWP could not
860 be attached. */
861
862 int
863 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
864 {
865 struct lwp_info *lp;
866
867 gdb_assert (is_lwp (ptid));
868
869 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
870 to interrupt either the ptrace() or waitpid() calls below. */
871 if (!sigismember (&blocked_mask, SIGCHLD))
872 {
873 sigaddset (&blocked_mask, SIGCHLD);
874 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
875 }
876
877 lp = find_lwp_pid (ptid);
878
879 /* We assume that we're already attached to any LWP that has an id
880 equal to the overall process id, and to any LWP that is already
881 in our list of LWPs. If we're not seeing exit events from threads
882 and we've had PID wraparound since we last tried to stop all threads,
883 this assumption might be wrong; fortunately, this is very unlikely
884 to happen. */
885 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
886 {
887 pid_t pid;
888 int status;
889
890 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
891 {
892 /* If we fail to attach to the thread, issue a warning,
893 but continue. One way this can happen is if thread
894 creation is interrupted; as of Linux 2.6.19, a kernel
895 bug may place threads in the thread list and then fail
896 to create them. */
897 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
898 safe_strerror (errno));
899 return -1;
900 }
901
902 if (lp == NULL)
903 lp = add_lwp (ptid);
904
905 if (debug_linux_nat)
906 fprintf_unfiltered (gdb_stdlog,
907 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
908 target_pid_to_str (ptid));
909
910 pid = my_waitpid (GET_LWP (ptid), &status, 0);
911 if (pid == -1 && errno == ECHILD)
912 {
913 /* Try again with __WCLONE to check cloned processes. */
914 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
915 lp->cloned = 1;
916 }
917
918 gdb_assert (pid == GET_LWP (ptid)
919 && WIFSTOPPED (status) && WSTOPSIG (status));
920
921 target_post_attach (pid);
922
923 lp->stopped = 1;
924
925 if (debug_linux_nat)
926 {
927 fprintf_unfiltered (gdb_stdlog,
928 "LLAL: waitpid %s received %s\n",
929 target_pid_to_str (ptid),
930 status_to_str (status));
931 }
932 }
933 else
934 {
935 /* We assume that the LWP representing the original process is
936 already stopped. Mark it as stopped in the data structure
937 that the GNU/linux ptrace layer uses to keep track of
938 threads. Note that this won't have already been done since
939 the main thread will have, we assume, been stopped by an
940 attach from a different layer. */
941 if (lp == NULL)
942 lp = add_lwp (ptid);
943 lp->stopped = 1;
944 }
945
946 if (verbose)
947 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
948
949 return 0;
950 }
951
952 static void
953 linux_nat_attach (char *args, int from_tty)
954 {
955 struct lwp_info *lp;
956 pid_t pid;
957 int status;
958
959 /* FIXME: We should probably accept a list of process id's, and
960 attach all of them. */
961 linux_ops->to_attach (args, from_tty);
962
963 /* Add the initial process as the first LWP to the list. */
964 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
965 lp = add_lwp (inferior_ptid);
966
967 /* Make sure the initial process is stopped. The user-level threads
968 layer might want to poke around in the inferior, and that won't
969 work if things haven't stabilized yet. */
970 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
971 if (pid == -1 && errno == ECHILD)
972 {
973 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
974
975 /* Try again with __WCLONE to check cloned processes. */
976 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
977 lp->cloned = 1;
978 }
979
980 gdb_assert (pid == GET_PID (inferior_ptid)
981 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
982
983 lp->stopped = 1;
984
985 /* Fake the SIGSTOP that core GDB expects. */
986 lp->status = W_STOPCODE (SIGSTOP);
987 lp->resumed = 1;
988 if (debug_linux_nat)
989 {
990 fprintf_unfiltered (gdb_stdlog,
991 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
992 }
993 }
994
995 static int
996 detach_callback (struct lwp_info *lp, void *data)
997 {
998 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
999
1000 if (debug_linux_nat && lp->status)
1001 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1002 strsignal (WSTOPSIG (lp->status)),
1003 target_pid_to_str (lp->ptid));
1004
1005 while (lp->signalled && lp->stopped)
1006 {
1007 errno = 0;
1008 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1009 WSTOPSIG (lp->status)) < 0)
1010 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1011 safe_strerror (errno));
1012
1013 if (debug_linux_nat)
1014 fprintf_unfiltered (gdb_stdlog,
1015 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1016 target_pid_to_str (lp->ptid),
1017 status_to_str (lp->status));
1018
1019 lp->stopped = 0;
1020 lp->signalled = 0;
1021 lp->status = 0;
1022 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1023 here. But since lp->signalled was cleared above,
1024 stop_wait_callback didn't do anything; the process was left
1025 running. Shouldn't we be waiting for it to stop?
1026 I've removed the call, since stop_wait_callback now does do
1027 something when called with lp->signalled == 0. */
1028
1029 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1030 }
1031
1032 /* We don't actually detach from the LWP that has an id equal to the
1033 overall process id just yet. */
1034 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1035 {
1036 errno = 0;
1037 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1038 WSTOPSIG (lp->status)) < 0)
1039 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1040 safe_strerror (errno));
1041
1042 if (debug_linux_nat)
1043 fprintf_unfiltered (gdb_stdlog,
1044 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1045 target_pid_to_str (lp->ptid),
1046 strsignal (WSTOPSIG (lp->status)));
1047
1048 delete_lwp (lp->ptid);
1049 }
1050
1051 return 0;
1052 }
1053
1054 static void
1055 linux_nat_detach (char *args, int from_tty)
1056 {
1057 iterate_over_lwps (detach_callback, NULL);
1058
1059 /* Only the initial process should be left right now. */
1060 gdb_assert (num_lwps == 1);
1061
1062 trap_ptid = null_ptid;
1063
1064 /* Destroy LWP info; it's no longer valid. */
1065 init_lwp_list ();
1066
1067 /* Restore the original signal mask. */
1068 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1069 sigemptyset (&blocked_mask);
1070
1071 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1072 linux_ops->to_detach (args, from_tty);
1073 }
1074
1075 /* Resume LP. */
1076
1077 static int
1078 resume_callback (struct lwp_info *lp, void *data)
1079 {
1080 if (lp->stopped && lp->status == 0)
1081 {
1082 struct thread_info *tp;
1083
1084 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1085 0, TARGET_SIGNAL_0);
1086 if (debug_linux_nat)
1087 fprintf_unfiltered (gdb_stdlog,
1088 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1089 target_pid_to_str (lp->ptid));
1090 lp->stopped = 0;
1091 lp->step = 0;
1092 }
1093
1094 return 0;
1095 }
1096
1097 static int
1098 resume_clear_callback (struct lwp_info *lp, void *data)
1099 {
1100 lp->resumed = 0;
1101 return 0;
1102 }
1103
1104 static int
1105 resume_set_callback (struct lwp_info *lp, void *data)
1106 {
1107 lp->resumed = 1;
1108 return 0;
1109 }
1110
1111 static void
1112 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1113 {
1114 struct lwp_info *lp;
1115 int resume_all;
1116
1117 if (debug_linux_nat)
1118 fprintf_unfiltered (gdb_stdlog,
1119 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1120 step ? "step" : "resume",
1121 target_pid_to_str (ptid),
1122 signo ? strsignal (signo) : "0",
1123 target_pid_to_str (inferior_ptid));
1124
1125 prune_lwps ();
1126
1127 /* A specific PTID means `step only this process id'. */
1128 resume_all = (PIDGET (ptid) == -1);
1129
1130 if (resume_all)
1131 iterate_over_lwps (resume_set_callback, NULL);
1132 else
1133 iterate_over_lwps (resume_clear_callback, NULL);
1134
1135 /* If PID is -1, it's the current inferior that should be
1136 handled specially. */
1137 if (PIDGET (ptid) == -1)
1138 ptid = inferior_ptid;
1139
1140 lp = find_lwp_pid (ptid);
1141 if (lp)
1142 {
1143 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1144
1145 /* Remember if we're stepping. */
1146 lp->step = step;
1147
1148 /* Mark this LWP as resumed. */
1149 lp->resumed = 1;
1150
1151 /* If we have a pending wait status for this thread, there is no
1152 point in resuming the process. But first make sure that
1153 linux_nat_wait won't preemptively handle the event - we
1154 should never take this short-circuit if we are going to
1155 leave LP running, since we have skipped resuming all the
1156 other threads. This bit of code needs to be synchronized
1157 with linux_nat_wait. */
1158
1159 if (lp->status && WIFSTOPPED (lp->status))
1160 {
1161 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1162
1163 if (signal_stop_state (saved_signo) == 0
1164 && signal_print_state (saved_signo) == 0
1165 && signal_pass_state (saved_signo) == 1)
1166 {
1167 if (debug_linux_nat)
1168 fprintf_unfiltered (gdb_stdlog,
1169 "LLR: Not short circuiting for ignored "
1170 "status 0x%x\n", lp->status);
1171
1172 /* FIXME: What should we do if we are supposed to continue
1173 this thread with a signal? */
1174 gdb_assert (signo == TARGET_SIGNAL_0);
1175 signo = saved_signo;
1176 lp->status = 0;
1177 }
1178 }
1179
1180 if (lp->status)
1181 {
1182 /* FIXME: What should we do if we are supposed to continue
1183 this thread with a signal? */
1184 gdb_assert (signo == TARGET_SIGNAL_0);
1185
1186 if (debug_linux_nat)
1187 fprintf_unfiltered (gdb_stdlog,
1188 "LLR: Short circuiting for status 0x%x\n",
1189 lp->status);
1190
1191 return;
1192 }
1193
1194 /* Mark LWP as not stopped to prevent it from being continued by
1195 resume_callback. */
1196 lp->stopped = 0;
1197 }
1198
1199 if (resume_all)
1200 iterate_over_lwps (resume_callback, NULL);
1201
1202 linux_ops->to_resume (ptid, step, signo);
1203 if (debug_linux_nat)
1204 fprintf_unfiltered (gdb_stdlog,
1205 "LLR: %s %s, %s (resume event thread)\n",
1206 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1207 target_pid_to_str (ptid),
1208 signo ? strsignal (signo) : "0");
1209 }
1210
1211 /* Issue kill to specified lwp. */
1212
1213 static int tkill_failed;
1214
1215 static int
1216 kill_lwp (int lwpid, int signo)
1217 {
1218 errno = 0;
1219
1220 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1221 fails, then we are not using nptl threads and we should be using kill. */
1222
1223 #ifdef HAVE_TKILL_SYSCALL
1224 if (!tkill_failed)
1225 {
1226 int ret = syscall (__NR_tkill, lwpid, signo);
1227 if (errno != ENOSYS)
1228 return ret;
1229 errno = 0;
1230 tkill_failed = 1;
1231 }
1232 #endif
1233
1234 return kill (lwpid, signo);
1235 }
1236
1237 /* Handle a GNU/Linux extended wait response. If we see a clone
1238 event, we need to add the new LWP to our list (and not report the
1239 trap to higher layers). This function returns non-zero if the
1240 event should be ignored and we should wait again. If STOPPING is
1241 true, the new LWP remains stopped, otherwise it is continued. */
1242
1243 static int
1244 linux_handle_extended_wait (struct lwp_info *lp, int status,
1245 int stopping)
1246 {
1247 int pid = GET_LWP (lp->ptid);
1248 struct target_waitstatus *ourstatus = &lp->waitstatus;
1249 struct lwp_info *new_lp = NULL;
1250 int event = status >> 16;
1251
1252 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1253 || event == PTRACE_EVENT_CLONE)
1254 {
1255 unsigned long new_pid;
1256 int ret;
1257
1258 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1259
1260 /* If we haven't already seen the new PID stop, wait for it now. */
1261 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1262 {
1263 /* The new child has a pending SIGSTOP. We can't affect it until it
1264 hits the SIGSTOP, but we're already attached. */
1265 ret = my_waitpid (new_pid, &status,
1266 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1267 if (ret == -1)
1268 perror_with_name (_("waiting for new child"));
1269 else if (ret != new_pid)
1270 internal_error (__FILE__, __LINE__,
1271 _("wait returned unexpected PID %d"), ret);
1272 else if (!WIFSTOPPED (status))
1273 internal_error (__FILE__, __LINE__,
1274 _("wait returned unexpected status 0x%x"), status);
1275 }
1276
1277 ourstatus->value.related_pid = new_pid;
1278
1279 if (event == PTRACE_EVENT_FORK)
1280 ourstatus->kind = TARGET_WAITKIND_FORKED;
1281 else if (event == PTRACE_EVENT_VFORK)
1282 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1283 else
1284 {
1285 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1286 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1287 new_lp->cloned = 1;
1288
1289 if (WSTOPSIG (status) != SIGSTOP)
1290 {
1291 /* This can happen if someone starts sending signals to
1292 the new thread before it gets a chance to run, which
1293 have a lower number than SIGSTOP (e.g. SIGUSR1).
1294 This is an unlikely case, and harder to handle for
1295 fork / vfork than for clone, so we do not try - but
1296 we handle it for clone events here. We'll send
1297 the other signal on to the thread below. */
1298
1299 new_lp->signalled = 1;
1300 }
1301 else
1302 status = 0;
1303
1304 if (stopping)
1305 new_lp->stopped = 1;
1306 else
1307 {
1308 new_lp->resumed = 1;
1309 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1310 status ? WSTOPSIG (status) : 0);
1311 }
1312
1313 if (debug_linux_nat)
1314 fprintf_unfiltered (gdb_stdlog,
1315 "LHEW: Got clone event from LWP %ld, resuming\n",
1316 GET_LWP (lp->ptid));
1317 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1318
1319 return 1;
1320 }
1321
1322 return 0;
1323 }
1324
1325 if (event == PTRACE_EVENT_EXEC)
1326 {
1327 ourstatus->kind = TARGET_WAITKIND_EXECD;
1328 ourstatus->value.execd_pathname
1329 = xstrdup (linux_child_pid_to_exec_file (pid));
1330
1331 if (linux_parent_pid)
1332 {
1333 detach_breakpoints (linux_parent_pid);
1334 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1335
1336 linux_parent_pid = 0;
1337 }
1338
1339 return 0;
1340 }
1341
1342 internal_error (__FILE__, __LINE__,
1343 _("unknown ptrace event %d"), event);
1344 }
1345
1346 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1347 exited. */
1348
1349 static int
1350 wait_lwp (struct lwp_info *lp)
1351 {
1352 pid_t pid;
1353 int status;
1354 int thread_dead = 0;
1355
1356 gdb_assert (!lp->stopped);
1357 gdb_assert (lp->status == 0);
1358
1359 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1360 if (pid == -1 && errno == ECHILD)
1361 {
1362 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1363 if (pid == -1 && errno == ECHILD)
1364 {
1365 /* The thread has previously exited. We need to delete it
1366 now because, for some vendor 2.4 kernels with NPTL
1367 support backported, there won't be an exit event unless
1368 it is the main thread. 2.6 kernels will report an exit
1369 event for each thread that exits, as expected. */
1370 thread_dead = 1;
1371 if (debug_linux_nat)
1372 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1373 target_pid_to_str (lp->ptid));
1374 }
1375 }
1376
1377 if (!thread_dead)
1378 {
1379 gdb_assert (pid == GET_LWP (lp->ptid));
1380
1381 if (debug_linux_nat)
1382 {
1383 fprintf_unfiltered (gdb_stdlog,
1384 "WL: waitpid %s received %s\n",
1385 target_pid_to_str (lp->ptid),
1386 status_to_str (status));
1387 }
1388 }
1389
1390 /* Check if the thread has exited. */
1391 if (WIFEXITED (status) || WIFSIGNALED (status))
1392 {
1393 thread_dead = 1;
1394 if (debug_linux_nat)
1395 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1396 target_pid_to_str (lp->ptid));
1397 }
1398
1399 if (thread_dead)
1400 {
1401 exit_lwp (lp);
1402 return 0;
1403 }
1404
1405 gdb_assert (WIFSTOPPED (status));
1406
1407 /* Handle GNU/Linux's extended waitstatus for trace events. */
1408 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1409 {
1410 if (debug_linux_nat)
1411 fprintf_unfiltered (gdb_stdlog,
1412 "WL: Handling extended status 0x%06x\n",
1413 status);
1414 if (linux_handle_extended_wait (lp, status, 1))
1415 return wait_lwp (lp);
1416 }
1417
1418 return status;
1419 }
1420
1421 /* Send a SIGSTOP to LP. */
1422
1423 static int
1424 stop_callback (struct lwp_info *lp, void *data)
1425 {
1426 if (!lp->stopped && !lp->signalled)
1427 {
1428 int ret;
1429
1430 if (debug_linux_nat)
1431 {
1432 fprintf_unfiltered (gdb_stdlog,
1433 "SC: kill %s **<SIGSTOP>**\n",
1434 target_pid_to_str (lp->ptid));
1435 }
1436 errno = 0;
1437 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1438 if (debug_linux_nat)
1439 {
1440 fprintf_unfiltered (gdb_stdlog,
1441 "SC: lwp kill %d %s\n",
1442 ret,
1443 errno ? safe_strerror (errno) : "ERRNO-OK");
1444 }
1445
1446 lp->signalled = 1;
1447 gdb_assert (lp->status == 0);
1448 }
1449
1450 return 0;
1451 }
1452
1453 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1454 a pointer to a set of signals to be flushed immediately. */
1455
1456 static int
1457 stop_wait_callback (struct lwp_info *lp, void *data)
1458 {
1459 sigset_t *flush_mask = data;
1460
1461 if (!lp->stopped)
1462 {
1463 int status;
1464
1465 status = wait_lwp (lp);
1466 if (status == 0)
1467 return 0;
1468
1469 /* Ignore any signals in FLUSH_MASK. */
1470 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1471 {
1472 if (!lp->signalled)
1473 {
1474 lp->stopped = 1;
1475 return 0;
1476 }
1477
1478 errno = 0;
1479 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1480 if (debug_linux_nat)
1481 fprintf_unfiltered (gdb_stdlog,
1482 "PTRACE_CONT %s, 0, 0 (%s)\n",
1483 target_pid_to_str (lp->ptid),
1484 errno ? safe_strerror (errno) : "OK");
1485
1486 return stop_wait_callback (lp, flush_mask);
1487 }
1488
1489 if (WSTOPSIG (status) != SIGSTOP)
1490 {
1491 if (WSTOPSIG (status) == SIGTRAP)
1492 {
1493 /* If a LWP other than the LWP that we're reporting an
1494 event for has hit a GDB breakpoint (as opposed to
1495 some random trap signal), then just arrange for it to
1496 hit it again later. We don't keep the SIGTRAP status
1497 and don't forward the SIGTRAP signal to the LWP. We
1498 will handle the current event, eventually we will
1499 resume all LWPs, and this one will get its breakpoint
1500 trap again.
1501
1502 If we do not do this, then we run the risk that the
1503 user will delete or disable the breakpoint, but the
1504 thread will have already tripped on it. */
1505
1506 /* Now resume this LWP and get the SIGSTOP event. */
1507 errno = 0;
1508 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1509 if (debug_linux_nat)
1510 {
1511 fprintf_unfiltered (gdb_stdlog,
1512 "PTRACE_CONT %s, 0, 0 (%s)\n",
1513 target_pid_to_str (lp->ptid),
1514 errno ? safe_strerror (errno) : "OK");
1515
1516 fprintf_unfiltered (gdb_stdlog,
1517 "SWC: Candidate SIGTRAP event in %s\n",
1518 target_pid_to_str (lp->ptid));
1519 }
1520 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1521 stop_wait_callback (lp, data);
1522 /* If there's another event, throw it back into the queue. */
1523 if (lp->status)
1524 {
1525 if (debug_linux_nat)
1526 {
1527 fprintf_unfiltered (gdb_stdlog,
1528 "SWC: kill %s, %s\n",
1529 target_pid_to_str (lp->ptid),
1530 status_to_str ((int) status));
1531 }
1532 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1533 }
1534 /* Save the sigtrap event. */
1535 lp->status = status;
1536 return 0;
1537 }
1538 else
1539 {
1540 /* The thread was stopped with a signal other than
1541 SIGSTOP, and didn't accidentally trip a breakpoint. */
1542
1543 if (debug_linux_nat)
1544 {
1545 fprintf_unfiltered (gdb_stdlog,
1546 "SWC: Pending event %s in %s\n",
1547 status_to_str ((int) status),
1548 target_pid_to_str (lp->ptid));
1549 }
1550 /* Now resume this LWP and get the SIGSTOP event. */
1551 errno = 0;
1552 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1553 if (debug_linux_nat)
1554 fprintf_unfiltered (gdb_stdlog,
1555 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1556 target_pid_to_str (lp->ptid),
1557 errno ? safe_strerror (errno) : "OK");
1558
1559 /* Hold this event/waitstatus while we check to see if
1560 there are any more (we still want to get that SIGSTOP). */
1561 stop_wait_callback (lp, data);
1562 /* If the lp->status field is still empty, use it to hold
1563 this event. If not, then this event must be returned
1564 to the event queue of the LWP. */
1565 if (lp->status == 0)
1566 lp->status = status;
1567 else
1568 {
1569 if (debug_linux_nat)
1570 {
1571 fprintf_unfiltered (gdb_stdlog,
1572 "SWC: kill %s, %s\n",
1573 target_pid_to_str (lp->ptid),
1574 status_to_str ((int) status));
1575 }
1576 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1577 }
1578 return 0;
1579 }
1580 }
1581 else
1582 {
1583 /* We caught the SIGSTOP that we intended to catch, so
1584 there's no SIGSTOP pending. */
1585 lp->stopped = 1;
1586 lp->signalled = 0;
1587 }
1588 }
1589
1590 return 0;
1591 }
1592
1593 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1594 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1595
1596 static int
1597 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1598 {
1599 sigset_t blocked, ignored;
1600 int i;
1601
1602 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1603
1604 if (!flush_mask)
1605 return 0;
1606
1607 for (i = 1; i < NSIG; i++)
1608 if (sigismember (pending, i))
1609 if (!sigismember (flush_mask, i)
1610 || sigismember (&blocked, i)
1611 || sigismember (&ignored, i))
1612 sigdelset (pending, i);
1613
1614 if (sigisemptyset (pending))
1615 return 0;
1616
1617 return 1;
1618 }
1619
1620 /* DATA is interpreted as a mask of signals to flush. If LP has
1621 signals pending, and they are all in the flush mask, then arrange
1622 to flush them. LP should be stopped, as should all other threads
1623 it might share a signal queue with. */
1624
1625 static int
1626 flush_callback (struct lwp_info *lp, void *data)
1627 {
1628 sigset_t *flush_mask = data;
1629 sigset_t pending, intersection, blocked, ignored;
1630 int pid, status;
1631
1632 /* Normally, when an LWP exits, it is removed from the LWP list. The
1633 last LWP isn't removed till later, however. So if there is only
1634 one LWP on the list, make sure it's alive. */
1635 if (lwp_list == lp && lp->next == NULL)
1636 if (!linux_nat_thread_alive (lp->ptid))
1637 return 0;
1638
1639 /* Just because the LWP is stopped doesn't mean that new signals
1640 can't arrive from outside, so this function must be careful of
1641 race conditions. However, because all threads are stopped, we
1642 can assume that the pending mask will not shrink unless we resume
1643 the LWP, and that it will then get another signal. We can't
1644 control which one, however. */
1645
1646 if (lp->status)
1647 {
1648 if (debug_linux_nat)
1649 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1650 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1651 lp->status = 0;
1652 }
1653
1654 /* While there is a pending signal we would like to flush, continue
1655 the inferior and collect another signal. But if there's already
1656 a saved status that we don't want to flush, we can't resume the
1657 inferior - if it stopped for some other reason we wouldn't have
1658 anywhere to save the new status. In that case, we must leave the
1659 signal unflushed (and possibly generate an extra SIGINT stop).
1660 That's much less bad than losing a signal. */
1661 while (lp->status == 0
1662 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1663 {
1664 int ret;
1665
1666 errno = 0;
1667 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stderr,
1670 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1671
1672 lp->stopped = 0;
1673 stop_wait_callback (lp, flush_mask);
1674 if (debug_linux_nat)
1675 fprintf_unfiltered (gdb_stderr,
1676 "FC: Wait finished; saved status is %d\n",
1677 lp->status);
1678 }
1679
1680 return 0;
1681 }
1682
1683 /* Return non-zero if LP has a wait status pending. */
1684
1685 static int
1686 status_callback (struct lwp_info *lp, void *data)
1687 {
1688 /* Only report a pending wait status if we pretend that this has
1689 indeed been resumed. */
1690 return (lp->status != 0 && lp->resumed);
1691 }
1692
1693 /* Return non-zero if LP isn't stopped. */
1694
1695 static int
1696 running_callback (struct lwp_info *lp, void *data)
1697 {
1698 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1699 }
1700
1701 /* Count the LWP's that have had events. */
1702
1703 static int
1704 count_events_callback (struct lwp_info *lp, void *data)
1705 {
1706 int *count = data;
1707
1708 gdb_assert (count != NULL);
1709
1710 /* Count only LWPs that have a SIGTRAP event pending. */
1711 if (lp->status != 0
1712 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1713 (*count)++;
1714
1715 return 0;
1716 }
1717
1718 /* Select the LWP (if any) that is currently being single-stepped. */
1719
1720 static int
1721 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1722 {
1723 if (lp->step && lp->status != 0)
1724 return 1;
1725 else
1726 return 0;
1727 }
1728
1729 /* Select the Nth LWP that has had a SIGTRAP event. */
1730
1731 static int
1732 select_event_lwp_callback (struct lwp_info *lp, void *data)
1733 {
1734 int *selector = data;
1735
1736 gdb_assert (selector != NULL);
1737
1738 /* Select only LWPs that have a SIGTRAP event pending. */
1739 if (lp->status != 0
1740 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1741 if ((*selector)-- == 0)
1742 return 1;
1743
1744 return 0;
1745 }
1746
1747 static int
1748 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1749 {
1750 struct lwp_info *event_lp = data;
1751
1752 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1753 if (lp == event_lp)
1754 return 0;
1755
1756 /* If a LWP other than the LWP that we're reporting an event for has
1757 hit a GDB breakpoint (as opposed to some random trap signal),
1758 then just arrange for it to hit it again later. We don't keep
1759 the SIGTRAP status and don't forward the SIGTRAP signal to the
1760 LWP. We will handle the current event, eventually we will resume
1761 all LWPs, and this one will get its breakpoint trap again.
1762
1763 If we do not do this, then we run the risk that the user will
1764 delete or disable the breakpoint, but the LWP will have already
1765 tripped on it. */
1766
1767 if (lp->status != 0
1768 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1769 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1770 DECR_PC_AFTER_BREAK))
1771 {
1772 if (debug_linux_nat)
1773 fprintf_unfiltered (gdb_stdlog,
1774 "CBC: Push back breakpoint for %s\n",
1775 target_pid_to_str (lp->ptid));
1776
1777 /* Back up the PC if necessary. */
1778 if (DECR_PC_AFTER_BREAK)
1779 write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid);
1780
1781 /* Throw away the SIGTRAP. */
1782 lp->status = 0;
1783 }
1784
1785 return 0;
1786 }
1787
1788 /* Select one LWP out of those that have events pending. */
1789
1790 static void
1791 select_event_lwp (struct lwp_info **orig_lp, int *status)
1792 {
1793 int num_events = 0;
1794 int random_selector;
1795 struct lwp_info *event_lp;
1796
1797 /* Record the wait status for the original LWP. */
1798 (*orig_lp)->status = *status;
1799
1800 /* Give preference to any LWP that is being single-stepped. */
1801 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1802 if (event_lp != NULL)
1803 {
1804 if (debug_linux_nat)
1805 fprintf_unfiltered (gdb_stdlog,
1806 "SEL: Select single-step %s\n",
1807 target_pid_to_str (event_lp->ptid));
1808 }
1809 else
1810 {
1811 /* No single-stepping LWP. Select one at random, out of those
1812 which have had SIGTRAP events. */
1813
1814 /* First see how many SIGTRAP events we have. */
1815 iterate_over_lwps (count_events_callback, &num_events);
1816
1817 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1818 random_selector = (int)
1819 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1820
1821 if (debug_linux_nat && num_events > 1)
1822 fprintf_unfiltered (gdb_stdlog,
1823 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1824 num_events, random_selector);
1825
1826 event_lp = iterate_over_lwps (select_event_lwp_callback,
1827 &random_selector);
1828 }
1829
1830 if (event_lp != NULL)
1831 {
1832 /* Switch the event LWP. */
1833 *orig_lp = event_lp;
1834 *status = event_lp->status;
1835 }
1836
1837 /* Flush the wait status for the event LWP. */
1838 (*orig_lp)->status = 0;
1839 }
1840
1841 /* Return non-zero if LP has been resumed. */
1842
1843 static int
1844 resumed_callback (struct lwp_info *lp, void *data)
1845 {
1846 return lp->resumed;
1847 }
1848
1849 /* Stop an active thread, verify it still exists, then resume it. */
1850
1851 static int
1852 stop_and_resume_callback (struct lwp_info *lp, void *data)
1853 {
1854 struct lwp_info *ptr;
1855
1856 if (!lp->stopped && !lp->signalled)
1857 {
1858 stop_callback (lp, NULL);
1859 stop_wait_callback (lp, NULL);
1860 /* Resume if the lwp still exists. */
1861 for (ptr = lwp_list; ptr; ptr = ptr->next)
1862 if (lp == ptr)
1863 {
1864 resume_callback (lp, NULL);
1865 resume_set_callback (lp, NULL);
1866 }
1867 }
1868 return 0;
1869 }
1870
1871 static ptid_t
1872 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1873 {
1874 struct lwp_info *lp = NULL;
1875 int options = 0;
1876 int status = 0;
1877 pid_t pid = PIDGET (ptid);
1878 sigset_t flush_mask;
1879
1880 /* The first time we get here after starting a new inferior, we may
1881 not have added it to the LWP list yet - this is the earliest
1882 moment at which we know its PID. */
1883 if (num_lwps == 0)
1884 {
1885 gdb_assert (!is_lwp (inferior_ptid));
1886
1887 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1888 GET_PID (inferior_ptid));
1889 lp = add_lwp (inferior_ptid);
1890 lp->resumed = 1;
1891 }
1892
1893 sigemptyset (&flush_mask);
1894
1895 /* Make sure SIGCHLD is blocked. */
1896 if (!sigismember (&blocked_mask, SIGCHLD))
1897 {
1898 sigaddset (&blocked_mask, SIGCHLD);
1899 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1900 }
1901
1902 retry:
1903
1904 /* Make sure there is at least one LWP that has been resumed. */
1905 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
1906
1907 /* First check if there is a LWP with a wait status pending. */
1908 if (pid == -1)
1909 {
1910 /* Any LWP that's been resumed will do. */
1911 lp = iterate_over_lwps (status_callback, NULL);
1912 if (lp)
1913 {
1914 status = lp->status;
1915 lp->status = 0;
1916
1917 if (debug_linux_nat && status)
1918 fprintf_unfiltered (gdb_stdlog,
1919 "LLW: Using pending wait status %s for %s.\n",
1920 status_to_str (status),
1921 target_pid_to_str (lp->ptid));
1922 }
1923
1924 /* But if we don't fine one, we'll have to wait, and check both
1925 cloned and uncloned processes. We start with the cloned
1926 processes. */
1927 options = __WCLONE | WNOHANG;
1928 }
1929 else if (is_lwp (ptid))
1930 {
1931 if (debug_linux_nat)
1932 fprintf_unfiltered (gdb_stdlog,
1933 "LLW: Waiting for specific LWP %s.\n",
1934 target_pid_to_str (ptid));
1935
1936 /* We have a specific LWP to check. */
1937 lp = find_lwp_pid (ptid);
1938 gdb_assert (lp);
1939 status = lp->status;
1940 lp->status = 0;
1941
1942 if (debug_linux_nat && status)
1943 fprintf_unfiltered (gdb_stdlog,
1944 "LLW: Using pending wait status %s for %s.\n",
1945 status_to_str (status),
1946 target_pid_to_str (lp->ptid));
1947
1948 /* If we have to wait, take into account whether PID is a cloned
1949 process or not. And we have to convert it to something that
1950 the layer beneath us can understand. */
1951 options = lp->cloned ? __WCLONE : 0;
1952 pid = GET_LWP (ptid);
1953 }
1954
1955 if (status && lp->signalled)
1956 {
1957 /* A pending SIGSTOP may interfere with the normal stream of
1958 events. In a typical case where interference is a problem,
1959 we have a SIGSTOP signal pending for LWP A while
1960 single-stepping it, encounter an event in LWP B, and take the
1961 pending SIGSTOP while trying to stop LWP A. After processing
1962 the event in LWP B, LWP A is continued, and we'll never see
1963 the SIGTRAP associated with the last time we were
1964 single-stepping LWP A. */
1965
1966 /* Resume the thread. It should halt immediately returning the
1967 pending SIGSTOP. */
1968 registers_changed ();
1969 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1970 lp->step, TARGET_SIGNAL_0);
1971 if (debug_linux_nat)
1972 fprintf_unfiltered (gdb_stdlog,
1973 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
1974 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1975 target_pid_to_str (lp->ptid));
1976 lp->stopped = 0;
1977 gdb_assert (lp->resumed);
1978
1979 /* This should catch the pending SIGSTOP. */
1980 stop_wait_callback (lp, NULL);
1981 }
1982
1983 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1984 attached process. */
1985 set_sigio_trap ();
1986
1987 while (status == 0)
1988 {
1989 pid_t lwpid;
1990
1991 lwpid = my_waitpid (pid, &status, options);
1992 if (lwpid > 0)
1993 {
1994 gdb_assert (pid == -1 || lwpid == pid);
1995
1996 if (debug_linux_nat)
1997 {
1998 fprintf_unfiltered (gdb_stdlog,
1999 "LLW: waitpid %ld received %s\n",
2000 (long) lwpid, status_to_str (status));
2001 }
2002
2003 lp = find_lwp_pid (pid_to_ptid (lwpid));
2004
2005 /* Check for stop events reported by a process we didn't
2006 already know about - anything not already in our LWP
2007 list.
2008
2009 If we're expecting to receive stopped processes after
2010 fork, vfork, and clone events, then we'll just add the
2011 new one to our list and go back to waiting for the event
2012 to be reported - the stopped process might be returned
2013 from waitpid before or after the event is. */
2014 if (WIFSTOPPED (status) && !lp)
2015 {
2016 linux_record_stopped_pid (lwpid, status);
2017 status = 0;
2018 continue;
2019 }
2020
2021 /* Make sure we don't report an event for the exit of an LWP not in
2022 our list, i.e. not part of the current process. This can happen
2023 if we detach from a program we original forked and then it
2024 exits. */
2025 if (!WIFSTOPPED (status) && !lp)
2026 {
2027 status = 0;
2028 continue;
2029 }
2030
2031 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2032 CLONE_PTRACE processes which do not use the thread library -
2033 otherwise we wouldn't find the new LWP this way. That doesn't
2034 currently work, and the following code is currently unreachable
2035 due to the two blocks above. If it's fixed some day, this code
2036 should be broken out into a function so that we can also pick up
2037 LWPs from the new interface. */
2038 if (!lp)
2039 {
2040 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2041 if (options & __WCLONE)
2042 lp->cloned = 1;
2043
2044 gdb_assert (WIFSTOPPED (status)
2045 && WSTOPSIG (status) == SIGSTOP);
2046 lp->signalled = 1;
2047
2048 if (!in_thread_list (inferior_ptid))
2049 {
2050 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2051 GET_PID (inferior_ptid));
2052 add_thread (inferior_ptid);
2053 }
2054
2055 add_thread (lp->ptid);
2056 printf_unfiltered (_("[New %s]\n"),
2057 target_pid_to_str (lp->ptid));
2058 }
2059
2060 /* Handle GNU/Linux's extended waitstatus for trace events. */
2061 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2062 {
2063 if (debug_linux_nat)
2064 fprintf_unfiltered (gdb_stdlog,
2065 "LLW: Handling extended status 0x%06x\n",
2066 status);
2067 if (linux_handle_extended_wait (lp, status, 0))
2068 {
2069 status = 0;
2070 continue;
2071 }
2072 }
2073
2074 /* Check if the thread has exited. */
2075 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2076 {
2077 /* If this is the main thread, we must stop all threads and
2078 verify if they are still alive. This is because in the nptl
2079 thread model, there is no signal issued for exiting LWPs
2080 other than the main thread. We only get the main thread
2081 exit signal once all child threads have already exited.
2082 If we stop all the threads and use the stop_wait_callback
2083 to check if they have exited we can determine whether this
2084 signal should be ignored or whether it means the end of the
2085 debugged application, regardless of which threading model
2086 is being used. */
2087 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2088 {
2089 lp->stopped = 1;
2090 iterate_over_lwps (stop_and_resume_callback, NULL);
2091 }
2092
2093 if (debug_linux_nat)
2094 fprintf_unfiltered (gdb_stdlog,
2095 "LLW: %s exited.\n",
2096 target_pid_to_str (lp->ptid));
2097
2098 exit_lwp (lp);
2099
2100 /* If there is at least one more LWP, then the exit signal
2101 was not the end of the debugged application and should be
2102 ignored. */
2103 if (num_lwps > 0)
2104 {
2105 /* Make sure there is at least one thread running. */
2106 gdb_assert (iterate_over_lwps (running_callback, NULL));
2107
2108 /* Discard the event. */
2109 status = 0;
2110 continue;
2111 }
2112 }
2113
2114 /* Check if the current LWP has previously exited. In the nptl
2115 thread model, LWPs other than the main thread do not issue
2116 signals when they exit so we must check whenever the thread
2117 has stopped. A similar check is made in stop_wait_callback(). */
2118 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2119 {
2120 if (debug_linux_nat)
2121 fprintf_unfiltered (gdb_stdlog,
2122 "LLW: %s exited.\n",
2123 target_pid_to_str (lp->ptid));
2124
2125 exit_lwp (lp);
2126
2127 /* Make sure there is at least one thread running. */
2128 gdb_assert (iterate_over_lwps (running_callback, NULL));
2129
2130 /* Discard the event. */
2131 status = 0;
2132 continue;
2133 }
2134
2135 /* Make sure we don't report a SIGSTOP that we sent
2136 ourselves in an attempt to stop an LWP. */
2137 if (lp->signalled
2138 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2139 {
2140 if (debug_linux_nat)
2141 fprintf_unfiltered (gdb_stdlog,
2142 "LLW: Delayed SIGSTOP caught for %s.\n",
2143 target_pid_to_str (lp->ptid));
2144
2145 /* This is a delayed SIGSTOP. */
2146 lp->signalled = 0;
2147
2148 registers_changed ();
2149 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2150 lp->step, TARGET_SIGNAL_0);
2151 if (debug_linux_nat)
2152 fprintf_unfiltered (gdb_stdlog,
2153 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2154 lp->step ?
2155 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2156 target_pid_to_str (lp->ptid));
2157
2158 lp->stopped = 0;
2159 gdb_assert (lp->resumed);
2160
2161 /* Discard the event. */
2162 status = 0;
2163 continue;
2164 }
2165
2166 break;
2167 }
2168
2169 if (pid == -1)
2170 {
2171 /* Alternate between checking cloned and uncloned processes. */
2172 options ^= __WCLONE;
2173
2174 /* And suspend every time we have checked both. */
2175 if (options & __WCLONE)
2176 sigsuspend (&suspend_mask);
2177 }
2178
2179 /* We shouldn't end up here unless we want to try again. */
2180 gdb_assert (status == 0);
2181 }
2182
2183 clear_sigio_trap ();
2184 clear_sigint_trap ();
2185
2186 gdb_assert (lp);
2187
2188 /* Don't report signals that GDB isn't interested in, such as
2189 signals that are neither printed nor stopped upon. Stopping all
2190 threads can be a bit time-consuming so if we want decent
2191 performance with heavily multi-threaded programs, especially when
2192 they're using a high frequency timer, we'd better avoid it if we
2193 can. */
2194
2195 if (WIFSTOPPED (status))
2196 {
2197 int signo = target_signal_from_host (WSTOPSIG (status));
2198
2199 /* If we get a signal while single-stepping, we may need special
2200 care, e.g. to skip the signal handler. Defer to common code. */
2201 if (!lp->step
2202 && signal_stop_state (signo) == 0
2203 && signal_print_state (signo) == 0
2204 && signal_pass_state (signo) == 1)
2205 {
2206 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2207 here? It is not clear we should. GDB may not expect
2208 other threads to run. On the other hand, not resuming
2209 newly attached threads may cause an unwanted delay in
2210 getting them running. */
2211 registers_changed ();
2212 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2213 lp->step, signo);
2214 if (debug_linux_nat)
2215 fprintf_unfiltered (gdb_stdlog,
2216 "LLW: %s %s, %s (preempt 'handle')\n",
2217 lp->step ?
2218 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2219 target_pid_to_str (lp->ptid),
2220 signo ? strsignal (signo) : "0");
2221 lp->stopped = 0;
2222 status = 0;
2223 goto retry;
2224 }
2225
2226 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2227 {
2228 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2229 forwarded to the entire process group, that is, all LWP's
2230 will receive it. Since we only want to report it once,
2231 we try to flush it from all LWPs except this one. */
2232 sigaddset (&flush_mask, SIGINT);
2233 }
2234 }
2235
2236 /* This LWP is stopped now. */
2237 lp->stopped = 1;
2238
2239 if (debug_linux_nat)
2240 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2241 status_to_str (status), target_pid_to_str (lp->ptid));
2242
2243 /* Now stop all other LWP's ... */
2244 iterate_over_lwps (stop_callback, NULL);
2245
2246 /* ... and wait until all of them have reported back that they're no
2247 longer running. */
2248 iterate_over_lwps (stop_wait_callback, &flush_mask);
2249 iterate_over_lwps (flush_callback, &flush_mask);
2250
2251 /* If we're not waiting for a specific LWP, choose an event LWP from
2252 among those that have had events. Giving equal priority to all
2253 LWPs that have had events helps prevent starvation. */
2254 if (pid == -1)
2255 select_event_lwp (&lp, &status);
2256
2257 /* Now that we've selected our final event LWP, cancel any
2258 breakpoints in other LWPs that have hit a GDB breakpoint. See
2259 the comment in cancel_breakpoints_callback to find out why. */
2260 iterate_over_lwps (cancel_breakpoints_callback, lp);
2261
2262 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2263 {
2264 trap_ptid = lp->ptid;
2265 if (debug_linux_nat)
2266 fprintf_unfiltered (gdb_stdlog,
2267 "LLW: trap_ptid is %s.\n",
2268 target_pid_to_str (trap_ptid));
2269 }
2270 else
2271 trap_ptid = null_ptid;
2272
2273 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2274 {
2275 *ourstatus = lp->waitstatus;
2276 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2277 }
2278 else
2279 store_waitstatus (ourstatus, status);
2280
2281 return lp->ptid;
2282 }
2283
2284 static int
2285 kill_callback (struct lwp_info *lp, void *data)
2286 {
2287 errno = 0;
2288 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2289 if (debug_linux_nat)
2290 fprintf_unfiltered (gdb_stdlog,
2291 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2292 target_pid_to_str (lp->ptid),
2293 errno ? safe_strerror (errno) : "OK");
2294
2295 return 0;
2296 }
2297
2298 static int
2299 kill_wait_callback (struct lwp_info *lp, void *data)
2300 {
2301 pid_t pid;
2302
2303 /* We must make sure that there are no pending events (delayed
2304 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2305 program doesn't interfere with any following debugging session. */
2306
2307 /* For cloned processes we must check both with __WCLONE and
2308 without, since the exit status of a cloned process isn't reported
2309 with __WCLONE. */
2310 if (lp->cloned)
2311 {
2312 do
2313 {
2314 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2315 if (pid != (pid_t) -1 && debug_linux_nat)
2316 {
2317 fprintf_unfiltered (gdb_stdlog,
2318 "KWC: wait %s received unknown.\n",
2319 target_pid_to_str (lp->ptid));
2320 }
2321 }
2322 while (pid == GET_LWP (lp->ptid));
2323
2324 gdb_assert (pid == -1 && errno == ECHILD);
2325 }
2326
2327 do
2328 {
2329 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2330 if (pid != (pid_t) -1 && debug_linux_nat)
2331 {
2332 fprintf_unfiltered (gdb_stdlog,
2333 "KWC: wait %s received unk.\n",
2334 target_pid_to_str (lp->ptid));
2335 }
2336 }
2337 while (pid == GET_LWP (lp->ptid));
2338
2339 gdb_assert (pid == -1 && errno == ECHILD);
2340 return 0;
2341 }
2342
2343 static void
2344 linux_nat_kill (void)
2345 {
2346 struct target_waitstatus last;
2347 ptid_t last_ptid;
2348 int status;
2349
2350 /* If we're stopped while forking and we haven't followed yet,
2351 kill the other task. We need to do this first because the
2352 parent will be sleeping if this is a vfork. */
2353
2354 get_last_target_status (&last_ptid, &last);
2355
2356 if (last.kind == TARGET_WAITKIND_FORKED
2357 || last.kind == TARGET_WAITKIND_VFORKED)
2358 {
2359 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2360 wait (&status);
2361 }
2362
2363 if (forks_exist_p ())
2364 linux_fork_killall ();
2365 else
2366 {
2367 /* Kill all LWP's ... */
2368 iterate_over_lwps (kill_callback, NULL);
2369
2370 /* ... and wait until we've flushed all events. */
2371 iterate_over_lwps (kill_wait_callback, NULL);
2372 }
2373
2374 target_mourn_inferior ();
2375 }
2376
2377 static void
2378 linux_nat_mourn_inferior (void)
2379 {
2380 trap_ptid = null_ptid;
2381
2382 /* Destroy LWP info; it's no longer valid. */
2383 init_lwp_list ();
2384
2385 /* Restore the original signal mask. */
2386 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2387 sigemptyset (&blocked_mask);
2388
2389 if (! forks_exist_p ())
2390 /* Normal case, no other forks available. */
2391 linux_ops->to_mourn_inferior ();
2392 else
2393 /* Multi-fork case. The current inferior_ptid has exited, but
2394 there are other viable forks to debug. Delete the exiting
2395 one and context-switch to the first available. */
2396 linux_fork_mourn_inferior ();
2397 }
2398
2399 static LONGEST
2400 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2401 const char *annex, gdb_byte *readbuf,
2402 const gdb_byte *writebuf,
2403 ULONGEST offset, LONGEST len)
2404 {
2405 struct cleanup *old_chain = save_inferior_ptid ();
2406 LONGEST xfer;
2407
2408 if (is_lwp (inferior_ptid))
2409 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2410
2411 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2412 offset, len);
2413
2414 do_cleanups (old_chain);
2415 return xfer;
2416 }
2417
2418 static int
2419 linux_nat_thread_alive (ptid_t ptid)
2420 {
2421 gdb_assert (is_lwp (ptid));
2422
2423 errno = 0;
2424 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2425 if (debug_linux_nat)
2426 fprintf_unfiltered (gdb_stdlog,
2427 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2428 target_pid_to_str (ptid),
2429 errno ? safe_strerror (errno) : "OK");
2430
2431 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2432 handle that case gracefully since ptrace will first do a lookup
2433 for the process based upon the passed-in pid. If that fails we
2434 will get either -ESRCH or -EPERM, otherwise the child exists and
2435 is alive. */
2436 if (errno == ESRCH || errno == EPERM)
2437 return 0;
2438
2439 return 1;
2440 }
2441
2442 static char *
2443 linux_nat_pid_to_str (ptid_t ptid)
2444 {
2445 static char buf[64];
2446
2447 if (lwp_list && lwp_list->next && is_lwp (ptid))
2448 {
2449 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2450 return buf;
2451 }
2452
2453 return normal_pid_to_str (ptid);
2454 }
2455
2456 static void
2457 sigchld_handler (int signo)
2458 {
2459 /* Do nothing. The only reason for this handler is that it allows
2460 us to use sigsuspend in linux_nat_wait above to wait for the
2461 arrival of a SIGCHLD. */
2462 }
2463
2464 /* Accepts an integer PID; Returns a string representing a file that
2465 can be opened to get the symbols for the child process. */
2466
2467 static char *
2468 linux_child_pid_to_exec_file (int pid)
2469 {
2470 char *name1, *name2;
2471
2472 name1 = xmalloc (MAXPATHLEN);
2473 name2 = xmalloc (MAXPATHLEN);
2474 make_cleanup (xfree, name1);
2475 make_cleanup (xfree, name2);
2476 memset (name2, 0, MAXPATHLEN);
2477
2478 sprintf (name1, "/proc/%d/exe", pid);
2479 if (readlink (name1, name2, MAXPATHLEN) > 0)
2480 return name2;
2481 else
2482 return name1;
2483 }
2484
2485 /* Service function for corefiles and info proc. */
2486
2487 static int
2488 read_mapping (FILE *mapfile,
2489 long long *addr,
2490 long long *endaddr,
2491 char *permissions,
2492 long long *offset,
2493 char *device, long long *inode, char *filename)
2494 {
2495 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2496 addr, endaddr, permissions, offset, device, inode);
2497
2498 filename[0] = '\0';
2499 if (ret > 0 && ret != EOF)
2500 {
2501 /* Eat everything up to EOL for the filename. This will prevent
2502 weird filenames (such as one with embedded whitespace) from
2503 confusing this code. It also makes this code more robust in
2504 respect to annotations the kernel may add after the filename.
2505
2506 Note the filename is used for informational purposes
2507 only. */
2508 ret += fscanf (mapfile, "%[^\n]\n", filename);
2509 }
2510
2511 return (ret != 0 && ret != EOF);
2512 }
2513
2514 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2515 regions in the inferior for a corefile. */
2516
2517 static int
2518 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2519 unsigned long,
2520 int, int, int, void *), void *obfd)
2521 {
2522 long long pid = PIDGET (inferior_ptid);
2523 char mapsfilename[MAXPATHLEN];
2524 FILE *mapsfile;
2525 long long addr, endaddr, size, offset, inode;
2526 char permissions[8], device[8], filename[MAXPATHLEN];
2527 int read, write, exec;
2528 int ret;
2529
2530 /* Compose the filename for the /proc memory map, and open it. */
2531 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2532 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2533 error (_("Could not open %s."), mapsfilename);
2534
2535 if (info_verbose)
2536 fprintf_filtered (gdb_stdout,
2537 "Reading memory regions from %s\n", mapsfilename);
2538
2539 /* Now iterate until end-of-file. */
2540 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2541 &offset, &device[0], &inode, &filename[0]))
2542 {
2543 size = endaddr - addr;
2544
2545 /* Get the segment's permissions. */
2546 read = (strchr (permissions, 'r') != 0);
2547 write = (strchr (permissions, 'w') != 0);
2548 exec = (strchr (permissions, 'x') != 0);
2549
2550 if (info_verbose)
2551 {
2552 fprintf_filtered (gdb_stdout,
2553 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2554 size, paddr_nz (addr),
2555 read ? 'r' : ' ',
2556 write ? 'w' : ' ', exec ? 'x' : ' ');
2557 if (filename[0])
2558 fprintf_filtered (gdb_stdout, " for %s", filename);
2559 fprintf_filtered (gdb_stdout, "\n");
2560 }
2561
2562 /* Invoke the callback function to create the corefile
2563 segment. */
2564 func (addr, size, read, write, exec, obfd);
2565 }
2566 fclose (mapsfile);
2567 return 0;
2568 }
2569
2570 /* Records the thread's register state for the corefile note
2571 section. */
2572
2573 static char *
2574 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2575 char *note_data, int *note_size)
2576 {
2577 gdb_gregset_t gregs;
2578 gdb_fpregset_t fpregs;
2579 #ifdef FILL_FPXREGSET
2580 gdb_fpxregset_t fpxregs;
2581 #endif
2582 unsigned long lwp = ptid_get_lwp (ptid);
2583 struct gdbarch *gdbarch = current_gdbarch;
2584 const struct regset *regset;
2585 int core_regset_p;
2586
2587 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
2588 if (core_regset_p
2589 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2590 sizeof (gregs))) != NULL
2591 && regset->collect_regset != NULL)
2592 regset->collect_regset (regset, current_regcache, -1,
2593 &gregs, sizeof (gregs));
2594 else
2595 fill_gregset (current_regcache, &gregs, -1);
2596
2597 note_data = (char *) elfcore_write_prstatus (obfd,
2598 note_data,
2599 note_size,
2600 lwp,
2601 stop_signal, &gregs);
2602
2603 if (core_regset_p
2604 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2605 sizeof (fpregs))) != NULL
2606 && regset->collect_regset != NULL)
2607 regset->collect_regset (regset, current_regcache, -1,
2608 &fpregs, sizeof (fpregs));
2609 else
2610 fill_fpregset (current_regcache, &fpregs, -1);
2611
2612 note_data = (char *) elfcore_write_prfpreg (obfd,
2613 note_data,
2614 note_size,
2615 &fpregs, sizeof (fpregs));
2616
2617 #ifdef FILL_FPXREGSET
2618 if (core_regset_p
2619 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2620 sizeof (fpxregs))) != NULL
2621 && regset->collect_regset != NULL)
2622 regset->collect_regset (regset, current_regcache, -1,
2623 &fpxregs, sizeof (fpxregs));
2624 else
2625 fill_fpxregset (current_regcache, &fpxregs, -1);
2626
2627 note_data = (char *) elfcore_write_prxfpreg (obfd,
2628 note_data,
2629 note_size,
2630 &fpxregs, sizeof (fpxregs));
2631 #endif
2632 return note_data;
2633 }
2634
2635 struct linux_nat_corefile_thread_data
2636 {
2637 bfd *obfd;
2638 char *note_data;
2639 int *note_size;
2640 int num_notes;
2641 };
2642
2643 /* Called by gdbthread.c once per thread. Records the thread's
2644 register state for the corefile note section. */
2645
2646 static int
2647 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2648 {
2649 struct linux_nat_corefile_thread_data *args = data;
2650 ptid_t saved_ptid = inferior_ptid;
2651
2652 inferior_ptid = ti->ptid;
2653 registers_changed ();
2654 /* FIXME should not be necessary; fill_gregset should do it automatically. */
2655 target_fetch_registers (current_regcache, -1);
2656 args->note_data = linux_nat_do_thread_registers (args->obfd,
2657 ti->ptid,
2658 args->note_data,
2659 args->note_size);
2660 args->num_notes++;
2661 inferior_ptid = saved_ptid;
2662 registers_changed ();
2663 /* FIXME should not be necessary; fill_gregset should do it automatically. */
2664 target_fetch_registers (current_regcache, -1);
2665
2666 return 0;
2667 }
2668
2669 /* Records the register state for the corefile note section. */
2670
2671 static char *
2672 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2673 char *note_data, int *note_size)
2674 {
2675 registers_changed ();
2676 /* FIXME should not be necessary; fill_gregset should do it automatically. */
2677 target_fetch_registers (current_regcache, -1);
2678 return linux_nat_do_thread_registers (obfd,
2679 ptid_build (ptid_get_pid (inferior_ptid),
2680 ptid_get_pid (inferior_ptid),
2681 0),
2682 note_data, note_size);
2683 return note_data;
2684 }
2685
2686 /* Fills the "to_make_corefile_note" target vector. Builds the note
2687 section for a corefile, and returns it in a malloc buffer. */
2688
2689 static char *
2690 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2691 {
2692 struct linux_nat_corefile_thread_data thread_args;
2693 struct cleanup *old_chain;
2694 char fname[16] = { '\0' };
2695 char psargs[80] = { '\0' };
2696 char *note_data = NULL;
2697 ptid_t current_ptid = inferior_ptid;
2698 gdb_byte *auxv;
2699 int auxv_len;
2700
2701 if (get_exec_file (0))
2702 {
2703 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2704 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2705 if (get_inferior_args ())
2706 {
2707 strncat (psargs, " ", sizeof (psargs) - strlen (psargs));
2708 strncat (psargs, get_inferior_args (),
2709 sizeof (psargs) - strlen (psargs));
2710 }
2711 note_data = (char *) elfcore_write_prpsinfo (obfd,
2712 note_data,
2713 note_size, fname, psargs);
2714 }
2715
2716 /* Dump information for threads. */
2717 thread_args.obfd = obfd;
2718 thread_args.note_data = note_data;
2719 thread_args.note_size = note_size;
2720 thread_args.num_notes = 0;
2721 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2722 if (thread_args.num_notes == 0)
2723 {
2724 /* iterate_over_threads didn't come up with any threads; just
2725 use inferior_ptid. */
2726 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2727 note_data, note_size);
2728 }
2729 else
2730 {
2731 note_data = thread_args.note_data;
2732 }
2733
2734 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
2735 NULL, &auxv);
2736 if (auxv_len > 0)
2737 {
2738 note_data = elfcore_write_note (obfd, note_data, note_size,
2739 "CORE", NT_AUXV, auxv, auxv_len);
2740 xfree (auxv);
2741 }
2742
2743 make_cleanup (xfree, note_data);
2744 return note_data;
2745 }
2746
2747 /* Implement the "info proc" command. */
2748
2749 static void
2750 linux_nat_info_proc_cmd (char *args, int from_tty)
2751 {
2752 long long pid = PIDGET (inferior_ptid);
2753 FILE *procfile;
2754 char **argv = NULL;
2755 char buffer[MAXPATHLEN];
2756 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2757 int cmdline_f = 1;
2758 int cwd_f = 1;
2759 int exe_f = 1;
2760 int mappings_f = 0;
2761 int environ_f = 0;
2762 int status_f = 0;
2763 int stat_f = 0;
2764 int all = 0;
2765 struct stat dummy;
2766
2767 if (args)
2768 {
2769 /* Break up 'args' into an argv array. */
2770 if ((argv = buildargv (args)) == NULL)
2771 nomem (0);
2772 else
2773 make_cleanup_freeargv (argv);
2774 }
2775 while (argv != NULL && *argv != NULL)
2776 {
2777 if (isdigit (argv[0][0]))
2778 {
2779 pid = strtoul (argv[0], NULL, 10);
2780 }
2781 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2782 {
2783 mappings_f = 1;
2784 }
2785 else if (strcmp (argv[0], "status") == 0)
2786 {
2787 status_f = 1;
2788 }
2789 else if (strcmp (argv[0], "stat") == 0)
2790 {
2791 stat_f = 1;
2792 }
2793 else if (strcmp (argv[0], "cmd") == 0)
2794 {
2795 cmdline_f = 1;
2796 }
2797 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2798 {
2799 exe_f = 1;
2800 }
2801 else if (strcmp (argv[0], "cwd") == 0)
2802 {
2803 cwd_f = 1;
2804 }
2805 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2806 {
2807 all = 1;
2808 }
2809 else
2810 {
2811 /* [...] (future options here) */
2812 }
2813 argv++;
2814 }
2815 if (pid == 0)
2816 error (_("No current process: you must name one."));
2817
2818 sprintf (fname1, "/proc/%lld", pid);
2819 if (stat (fname1, &dummy) != 0)
2820 error (_("No /proc directory: '%s'"), fname1);
2821
2822 printf_filtered (_("process %lld\n"), pid);
2823 if (cmdline_f || all)
2824 {
2825 sprintf (fname1, "/proc/%lld/cmdline", pid);
2826 if ((procfile = fopen (fname1, "r")) != NULL)
2827 {
2828 fgets (buffer, sizeof (buffer), procfile);
2829 printf_filtered ("cmdline = '%s'\n", buffer);
2830 fclose (procfile);
2831 }
2832 else
2833 warning (_("unable to open /proc file '%s'"), fname1);
2834 }
2835 if (cwd_f || all)
2836 {
2837 sprintf (fname1, "/proc/%lld/cwd", pid);
2838 memset (fname2, 0, sizeof (fname2));
2839 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2840 printf_filtered ("cwd = '%s'\n", fname2);
2841 else
2842 warning (_("unable to read link '%s'"), fname1);
2843 }
2844 if (exe_f || all)
2845 {
2846 sprintf (fname1, "/proc/%lld/exe", pid);
2847 memset (fname2, 0, sizeof (fname2));
2848 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2849 printf_filtered ("exe = '%s'\n", fname2);
2850 else
2851 warning (_("unable to read link '%s'"), fname1);
2852 }
2853 if (mappings_f || all)
2854 {
2855 sprintf (fname1, "/proc/%lld/maps", pid);
2856 if ((procfile = fopen (fname1, "r")) != NULL)
2857 {
2858 long long addr, endaddr, size, offset, inode;
2859 char permissions[8], device[8], filename[MAXPATHLEN];
2860
2861 printf_filtered (_("Mapped address spaces:\n\n"));
2862 if (TARGET_ADDR_BIT == 32)
2863 {
2864 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2865 "Start Addr",
2866 " End Addr",
2867 " Size", " Offset", "objfile");
2868 }
2869 else
2870 {
2871 printf_filtered (" %18s %18s %10s %10s %7s\n",
2872 "Start Addr",
2873 " End Addr",
2874 " Size", " Offset", "objfile");
2875 }
2876
2877 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2878 &offset, &device[0], &inode, &filename[0]))
2879 {
2880 size = endaddr - addr;
2881
2882 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2883 calls here (and possibly above) should be abstracted
2884 out into their own functions? Andrew suggests using
2885 a generic local_address_string instead to print out
2886 the addresses; that makes sense to me, too. */
2887
2888 if (TARGET_ADDR_BIT == 32)
2889 {
2890 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2891 (unsigned long) addr, /* FIXME: pr_addr */
2892 (unsigned long) endaddr,
2893 (int) size,
2894 (unsigned int) offset,
2895 filename[0] ? filename : "");
2896 }
2897 else
2898 {
2899 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2900 (unsigned long) addr, /* FIXME: pr_addr */
2901 (unsigned long) endaddr,
2902 (int) size,
2903 (unsigned int) offset,
2904 filename[0] ? filename : "");
2905 }
2906 }
2907
2908 fclose (procfile);
2909 }
2910 else
2911 warning (_("unable to open /proc file '%s'"), fname1);
2912 }
2913 if (status_f || all)
2914 {
2915 sprintf (fname1, "/proc/%lld/status", pid);
2916 if ((procfile = fopen (fname1, "r")) != NULL)
2917 {
2918 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2919 puts_filtered (buffer);
2920 fclose (procfile);
2921 }
2922 else
2923 warning (_("unable to open /proc file '%s'"), fname1);
2924 }
2925 if (stat_f || all)
2926 {
2927 sprintf (fname1, "/proc/%lld/stat", pid);
2928 if ((procfile = fopen (fname1, "r")) != NULL)
2929 {
2930 int itmp;
2931 char ctmp;
2932
2933 if (fscanf (procfile, "%d ", &itmp) > 0)
2934 printf_filtered (_("Process: %d\n"), itmp);
2935 if (fscanf (procfile, "%s ", &buffer[0]) > 0)
2936 printf_filtered (_("Exec file: %s\n"), buffer);
2937 if (fscanf (procfile, "%c ", &ctmp) > 0)
2938 printf_filtered (_("State: %c\n"), ctmp);
2939 if (fscanf (procfile, "%d ", &itmp) > 0)
2940 printf_filtered (_("Parent process: %d\n"), itmp);
2941 if (fscanf (procfile, "%d ", &itmp) > 0)
2942 printf_filtered (_("Process group: %d\n"), itmp);
2943 if (fscanf (procfile, "%d ", &itmp) > 0)
2944 printf_filtered (_("Session id: %d\n"), itmp);
2945 if (fscanf (procfile, "%d ", &itmp) > 0)
2946 printf_filtered (_("TTY: %d\n"), itmp);
2947 if (fscanf (procfile, "%d ", &itmp) > 0)
2948 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2949 if (fscanf (procfile, "%u ", &itmp) > 0)
2950 printf_filtered (_("Flags: 0x%x\n"), itmp);
2951 if (fscanf (procfile, "%u ", &itmp) > 0)
2952 printf_filtered (_("Minor faults (no memory page): %u\n"),
2953 (unsigned int) itmp);
2954 if (fscanf (procfile, "%u ", &itmp) > 0)
2955 printf_filtered (_("Minor faults, children: %u\n"),
2956 (unsigned int) itmp);
2957 if (fscanf (procfile, "%u ", &itmp) > 0)
2958 printf_filtered (_("Major faults (memory page faults): %u\n"),
2959 (unsigned int) itmp);
2960 if (fscanf (procfile, "%u ", &itmp) > 0)
2961 printf_filtered (_("Major faults, children: %u\n"),
2962 (unsigned int) itmp);
2963 if (fscanf (procfile, "%d ", &itmp) > 0)
2964 printf_filtered ("utime: %d\n", itmp);
2965 if (fscanf (procfile, "%d ", &itmp) > 0)
2966 printf_filtered ("stime: %d\n", itmp);
2967 if (fscanf (procfile, "%d ", &itmp) > 0)
2968 printf_filtered ("utime, children: %d\n", itmp);
2969 if (fscanf (procfile, "%d ", &itmp) > 0)
2970 printf_filtered ("stime, children: %d\n", itmp);
2971 if (fscanf (procfile, "%d ", &itmp) > 0)
2972 printf_filtered (_("jiffies remaining in current time slice: %d\n"),
2973 itmp);
2974 if (fscanf (procfile, "%d ", &itmp) > 0)
2975 printf_filtered ("'nice' value: %d\n", itmp);
2976 if (fscanf (procfile, "%u ", &itmp) > 0)
2977 printf_filtered (_("jiffies until next timeout: %u\n"),
2978 (unsigned int) itmp);
2979 if (fscanf (procfile, "%u ", &itmp) > 0)
2980 printf_filtered ("jiffies until next SIGALRM: %u\n",
2981 (unsigned int) itmp);
2982 if (fscanf (procfile, "%d ", &itmp) > 0)
2983 printf_filtered (_("start time (jiffies since system boot): %d\n"),
2984 itmp);
2985 if (fscanf (procfile, "%u ", &itmp) > 0)
2986 printf_filtered (_("Virtual memory size: %u\n"),
2987 (unsigned int) itmp);
2988 if (fscanf (procfile, "%u ", &itmp) > 0)
2989 printf_filtered (_("Resident set size: %u\n"), (unsigned int) itmp);
2990 if (fscanf (procfile, "%u ", &itmp) > 0)
2991 printf_filtered ("rlim: %u\n", (unsigned int) itmp);
2992 if (fscanf (procfile, "%u ", &itmp) > 0)
2993 printf_filtered (_("Start of text: 0x%x\n"), itmp);
2994 if (fscanf (procfile, "%u ", &itmp) > 0)
2995 printf_filtered (_("End of text: 0x%x\n"), itmp);
2996 if (fscanf (procfile, "%u ", &itmp) > 0)
2997 printf_filtered (_("Start of stack: 0x%x\n"), itmp);
2998 #if 0 /* Don't know how architecture-dependent the rest is...
2999 Anyway the signal bitmap info is available from "status". */
3000 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
3001 printf_filtered (_("Kernel stack pointer: 0x%x\n"), itmp);
3002 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
3003 printf_filtered (_("Kernel instr pointer: 0x%x\n"), itmp);
3004 if (fscanf (procfile, "%d ", &itmp) > 0)
3005 printf_filtered (_("Pending signals bitmap: 0x%x\n"), itmp);
3006 if (fscanf (procfile, "%d ", &itmp) > 0)
3007 printf_filtered (_("Blocked signals bitmap: 0x%x\n"), itmp);
3008 if (fscanf (procfile, "%d ", &itmp) > 0)
3009 printf_filtered (_("Ignored signals bitmap: 0x%x\n"), itmp);
3010 if (fscanf (procfile, "%d ", &itmp) > 0)
3011 printf_filtered (_("Catched signals bitmap: 0x%x\n"), itmp);
3012 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
3013 printf_filtered (_("wchan (system call): 0x%x\n"), itmp);
3014 #endif
3015 fclose (procfile);
3016 }
3017 else
3018 warning (_("unable to open /proc file '%s'"), fname1);
3019 }
3020 }
3021
3022 /* Implement the to_xfer_partial interface for memory reads using the /proc
3023 filesystem. Because we can use a single read() call for /proc, this
3024 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3025 but it doesn't support writes. */
3026
3027 static LONGEST
3028 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3029 const char *annex, gdb_byte *readbuf,
3030 const gdb_byte *writebuf,
3031 ULONGEST offset, LONGEST len)
3032 {
3033 LONGEST ret;
3034 int fd;
3035 char filename[64];
3036
3037 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3038 return 0;
3039
3040 /* Don't bother for one word. */
3041 if (len < 3 * sizeof (long))
3042 return 0;
3043
3044 /* We could keep this file open and cache it - possibly one per
3045 thread. That requires some juggling, but is even faster. */
3046 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3047 fd = open (filename, O_RDONLY | O_LARGEFILE);
3048 if (fd == -1)
3049 return 0;
3050
3051 /* If pread64 is available, use it. It's faster if the kernel
3052 supports it (only one syscall), and it's 64-bit safe even on
3053 32-bit platforms (for instance, SPARC debugging a SPARC64
3054 application). */
3055 #ifdef HAVE_PREAD64
3056 if (pread64 (fd, readbuf, len, offset) != len)
3057 #else
3058 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3059 #endif
3060 ret = 0;
3061 else
3062 ret = len;
3063
3064 close (fd);
3065 return ret;
3066 }
3067
3068 /* Parse LINE as a signal set and add its set bits to SIGS. */
3069
3070 static void
3071 add_line_to_sigset (const char *line, sigset_t *sigs)
3072 {
3073 int len = strlen (line) - 1;
3074 const char *p;
3075 int signum;
3076
3077 if (line[len] != '\n')
3078 error (_("Could not parse signal set: %s"), line);
3079
3080 p = line;
3081 signum = len * 4;
3082 while (len-- > 0)
3083 {
3084 int digit;
3085
3086 if (*p >= '0' && *p <= '9')
3087 digit = *p - '0';
3088 else if (*p >= 'a' && *p <= 'f')
3089 digit = *p - 'a' + 10;
3090 else
3091 error (_("Could not parse signal set: %s"), line);
3092
3093 signum -= 4;
3094
3095 if (digit & 1)
3096 sigaddset (sigs, signum + 1);
3097 if (digit & 2)
3098 sigaddset (sigs, signum + 2);
3099 if (digit & 4)
3100 sigaddset (sigs, signum + 3);
3101 if (digit & 8)
3102 sigaddset (sigs, signum + 4);
3103
3104 p++;
3105 }
3106 }
3107
3108 /* Find process PID's pending signals from /proc/pid/status and set
3109 SIGS to match. */
3110
3111 void
3112 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3113 {
3114 FILE *procfile;
3115 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3116 int signum;
3117
3118 sigemptyset (pending);
3119 sigemptyset (blocked);
3120 sigemptyset (ignored);
3121 sprintf (fname, "/proc/%d/status", pid);
3122 procfile = fopen (fname, "r");
3123 if (procfile == NULL)
3124 error (_("Could not open %s"), fname);
3125
3126 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3127 {
3128 /* Normal queued signals are on the SigPnd line in the status
3129 file. However, 2.6 kernels also have a "shared" pending
3130 queue for delivering signals to a thread group, so check for
3131 a ShdPnd line also.
3132
3133 Unfortunately some Red Hat kernels include the shared pending
3134 queue but not the ShdPnd status field. */
3135
3136 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3137 add_line_to_sigset (buffer + 8, pending);
3138 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3139 add_line_to_sigset (buffer + 8, pending);
3140 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3141 add_line_to_sigset (buffer + 8, blocked);
3142 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3143 add_line_to_sigset (buffer + 8, ignored);
3144 }
3145
3146 fclose (procfile);
3147 }
3148
3149 static LONGEST
3150 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3151 const char *annex, gdb_byte *readbuf,
3152 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3153 {
3154 LONGEST xfer;
3155
3156 if (object == TARGET_OBJECT_AUXV)
3157 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3158 offset, len);
3159
3160 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3161 offset, len);
3162 if (xfer != 0)
3163 return xfer;
3164
3165 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3166 offset, len);
3167 }
3168
3169 /* Create a prototype generic Linux target. The client can override
3170 it with local methods. */
3171
3172 static void
3173 linux_target_install_ops (struct target_ops *t)
3174 {
3175 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3176 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3177 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3178 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
3179 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3180 t->to_post_attach = linux_child_post_attach;
3181 t->to_follow_fork = linux_child_follow_fork;
3182 t->to_find_memory_regions = linux_nat_find_memory_regions;
3183 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3184
3185 super_xfer_partial = t->to_xfer_partial;
3186 t->to_xfer_partial = linux_xfer_partial;
3187 }
3188
3189 struct target_ops *
3190 linux_target (void)
3191 {
3192 struct target_ops *t;
3193
3194 t = inf_ptrace_target ();
3195 linux_target_install_ops (t);
3196
3197 return t;
3198 }
3199
3200 struct target_ops *
3201 linux_trad_target (CORE_ADDR (*register_u_offset)(int))
3202 {
3203 struct target_ops *t;
3204
3205 t = inf_ptrace_trad_target (register_u_offset);
3206 linux_target_install_ops (t);
3207
3208 return t;
3209 }
3210
3211 void
3212 linux_nat_add_target (struct target_ops *t)
3213 {
3214 /* Save the provided single-threaded target. We save this in a separate
3215 variable because another target we've inherited from (e.g. inf-ptrace)
3216 may have saved a pointer to T; we want to use it for the final
3217 process stratum target. */
3218 linux_ops_saved = *t;
3219 linux_ops = &linux_ops_saved;
3220
3221 /* Override some methods for multithreading. */
3222 t->to_attach = linux_nat_attach;
3223 t->to_detach = linux_nat_detach;
3224 t->to_resume = linux_nat_resume;
3225 t->to_wait = linux_nat_wait;
3226 t->to_xfer_partial = linux_nat_xfer_partial;
3227 t->to_kill = linux_nat_kill;
3228 t->to_mourn_inferior = linux_nat_mourn_inferior;
3229 t->to_thread_alive = linux_nat_thread_alive;
3230 t->to_pid_to_str = linux_nat_pid_to_str;
3231 t->to_has_thread_control = tc_schedlock;
3232
3233 /* We don't change the stratum; this target will sit at
3234 process_stratum and thread_db will set at thread_stratum. This
3235 is a little strange, since this is a multi-threaded-capable
3236 target, but we want to be on the stack below thread_db, and we
3237 also want to be used for single-threaded processes. */
3238
3239 add_target (t);
3240
3241 /* TODO: Eliminate this and have libthread_db use
3242 find_target_beneath. */
3243 thread_db_init (t);
3244 }
3245
3246 void
3247 _initialize_linux_nat (void)
3248 {
3249 struct sigaction action;
3250
3251 add_info ("proc", linux_nat_info_proc_cmd, _("\
3252 Show /proc process information about any running process.\n\
3253 Specify any process id, or use the program being debugged by default.\n\
3254 Specify any of the following keywords for detailed info:\n\
3255 mappings -- list of mapped memory regions.\n\
3256 stat -- list a bunch of random process info.\n\
3257 status -- list a different bunch of random process info.\n\
3258 all -- list all available /proc info."));
3259
3260 /* Save the original signal mask. */
3261 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3262
3263 action.sa_handler = sigchld_handler;
3264 sigemptyset (&action.sa_mask);
3265 action.sa_flags = SA_RESTART;
3266 sigaction (SIGCHLD, &action, NULL);
3267
3268 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3269 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3270 sigdelset (&suspend_mask, SIGCHLD);
3271
3272 sigemptyset (&blocked_mask);
3273
3274 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3275 Set debugging of GNU/Linux lwp module."), _("\
3276 Show debugging of GNU/Linux lwp module."), _("\
3277 Enables printf debugging output."),
3278 NULL,
3279 show_debug_linux_nat,
3280 &setdebuglist, &showdebuglist);
3281 }
3282 \f
3283
3284 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3285 the GNU/Linux Threads library and therefore doesn't really belong
3286 here. */
3287
3288 /* Read variable NAME in the target and return its value if found.
3289 Otherwise return zero. It is assumed that the type of the variable
3290 is `int'. */
3291
3292 static int
3293 get_signo (const char *name)
3294 {
3295 struct minimal_symbol *ms;
3296 int signo;
3297
3298 ms = lookup_minimal_symbol (name, NULL, NULL);
3299 if (ms == NULL)
3300 return 0;
3301
3302 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3303 sizeof (signo)) != 0)
3304 return 0;
3305
3306 return signo;
3307 }
3308
3309 /* Return the set of signals used by the threads library in *SET. */
3310
3311 void
3312 lin_thread_get_thread_signals (sigset_t *set)
3313 {
3314 struct sigaction action;
3315 int restart, cancel;
3316
3317 sigemptyset (set);
3318
3319 restart = get_signo ("__pthread_sig_restart");
3320 cancel = get_signo ("__pthread_sig_cancel");
3321
3322 /* LinuxThreads normally uses the first two RT signals, but in some legacy
3323 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
3324 not provide any way for the debugger to query the signal numbers -
3325 fortunately they don't change! */
3326
3327 if (restart == 0)
3328 restart = __SIGRTMIN;
3329
3330 if (cancel == 0)
3331 cancel = __SIGRTMIN + 1;
3332
3333 sigaddset (set, restart);
3334 sigaddset (set, cancel);
3335
3336 /* The GNU/Linux Threads library makes terminating threads send a
3337 special "cancel" signal instead of SIGCHLD. Make sure we catch
3338 those (to prevent them from terminating GDB itself, which is
3339 likely to be their default action) and treat them the same way as
3340 SIGCHLD. */
3341
3342 action.sa_handler = sigchld_handler;
3343 sigemptyset (&action.sa_mask);
3344 action.sa_flags = SA_RESTART;
3345 sigaction (cancel, &action, NULL);
3346
3347 /* We block the "cancel" signal throughout this code ... */
3348 sigaddset (&blocked_mask, cancel);
3349 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3350
3351 /* ... except during a sigsuspend. */
3352 sigdelset (&suspend_mask, cancel);
3353 }
3354