f981e650170a651dc688bf3c7dabe062e60f2ba9
[binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49 #include "inf-loop.h"
50 #include "event-loop.h"
51 #include "event-top.h"
52
53 /* This comment documents high-level logic of this file.
54
55 Waiting for events in sync mode
56 ===============================
57
58 When waiting for an event in a specific thread, we just use waitpid, passing
59 the specific pid, and not passing WNOHANG.
60
61 When waiting for an event in all threads, waitpid is not quite good. Prior to
62 version 2.4, Linux can either wait for event in main thread, or in secondary
63 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
64 miss an event. The solution is to use non-blocking waitpid, together with
65 sigsuspend. First, we use non-blocking waitpid to get an event in the main
66 process, if any. Second, we use non-blocking waitpid with the __WCLONED
67 flag to check for events in cloned processes. If nothing is found, we use
68 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
69 happened to a child process -- and SIGCHLD will be delivered both for events
70 in main debugged process and in cloned processes. As soon as we know there's
71 an event, we get back to calling nonblocking waitpid with and without __WCLONED.
72
73 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
74 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
75 blocked, the signal becomes pending and sigsuspend immediately
76 notices it and returns.
77
78 Waiting for events in async mode
79 ================================
80
81 In async mode, GDB should always be ready to handle both user input and target
82 events, so neither blocking waitpid nor sigsuspend are viable
83 options. Instead, we should notify the GDB main event loop whenever there's
84 unprocessed event from the target. The only way to notify this event loop is
85 to make it wait on input from a pipe, and write something to the pipe whenever
86 there's event. Obviously, if we fail to notify the event loop if there's
87 target event, it's bad. If we notify the event loop when there's no event
88 from target, linux-nat.c will detect that there's no event, actually, and
89 report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
90 better avoided.
91
92 The main design point is that every time GDB is outside linux-nat.c, we have a
93 SIGCHLD handler installed that is called when something happens to the target
94 and notifies the GDB event loop. Also, the event is extracted from the target
95 using waitpid and stored for future use. Whenever GDB core decides to handle
96 the event, and calls into linux-nat.c, we disable SIGCHLD and process things
97 as in sync mode, except that before waitpid call we check if there are any
98 previously read events.
99
100 It could happen that during event processing, we'll try to get more events
101 than there are events in the local queue, which will result to waitpid call.
102 Those waitpid calls, while blocking, are guarantied to always have
103 something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
104 waiting for the lwp to stop.
105
106 The event loop is notified about new events using a pipe. SIGCHLD handler does
107 waitpid and writes the results in to a pipe. GDB event loop has the other end
108 of the pipe among the sources. When event loop starts to process the event
109 and calls a function in linux-nat.c, all events from the pipe are transferred
110 into a local queue and SIGCHLD is blocked. Further processing goes as in sync
111 mode. Before we return from linux_nat_wait, we transfer all unprocessed events
112 from local queue back to the pipe, so that when we get back to event loop,
113 event loop will notice there's something more to do.
114
115 SIGCHLD is blocked when we're inside target_wait, so that should we actually
116 want to wait for some more events, SIGCHLD handler does not steal them from
117 us. Technically, it would be possible to add new events to the local queue but
118 it's about the same amount of work as blocking SIGCHLD.
119
120 This moving of events from pipe into local queue and back into pipe when we
121 enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
122 home-grown and incapable to wait on any queue.
123
124 Use of signals
125 ==============
126
127 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
128 signal is not entirely significant; we just need for a signal to be delivered,
129 so that we can intercept it. SIGSTOP's advantage is that it can not be
130 blocked. A disadvantage is that it is not a real-time signal, so it can only
131 be queued once; we do not keep track of other sources of SIGSTOP.
132
133 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
134 use them, because they have special behavior when the signal is generated -
135 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
136 kills the entire thread group.
137
138 A delivered SIGSTOP would stop the entire thread group, not just the thread we
139 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
140 cancel it (by PTRACE_CONT without passing SIGSTOP).
141
142 We could use a real-time signal instead. This would solve those problems; we
143 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
144 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
145 generates it, and there are races with trying to find a signal that is not
146 blocked. */
147
148 #ifndef O_LARGEFILE
149 #define O_LARGEFILE 0
150 #endif
151
152 /* If the system headers did not provide the constants, hard-code the normal
153 values. */
154 #ifndef PTRACE_EVENT_FORK
155
156 #define PTRACE_SETOPTIONS 0x4200
157 #define PTRACE_GETEVENTMSG 0x4201
158
159 /* options set using PTRACE_SETOPTIONS */
160 #define PTRACE_O_TRACESYSGOOD 0x00000001
161 #define PTRACE_O_TRACEFORK 0x00000002
162 #define PTRACE_O_TRACEVFORK 0x00000004
163 #define PTRACE_O_TRACECLONE 0x00000008
164 #define PTRACE_O_TRACEEXEC 0x00000010
165 #define PTRACE_O_TRACEVFORKDONE 0x00000020
166 #define PTRACE_O_TRACEEXIT 0x00000040
167
168 /* Wait extended result codes for the above trace options. */
169 #define PTRACE_EVENT_FORK 1
170 #define PTRACE_EVENT_VFORK 2
171 #define PTRACE_EVENT_CLONE 3
172 #define PTRACE_EVENT_EXEC 4
173 #define PTRACE_EVENT_VFORK_DONE 5
174 #define PTRACE_EVENT_EXIT 6
175
176 #endif /* PTRACE_EVENT_FORK */
177
178 /* We can't always assume that this flag is available, but all systems
179 with the ptrace event handlers also have __WALL, so it's safe to use
180 here. */
181 #ifndef __WALL
182 #define __WALL 0x40000000 /* Wait for any child. */
183 #endif
184
185 #ifndef PTRACE_GETSIGINFO
186 #define PTRACE_GETSIGINFO 0x4202
187 #endif
188
189 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
190 the use of the multi-threaded target. */
191 static struct target_ops *linux_ops;
192 static struct target_ops linux_ops_saved;
193
194 /* The method to call, if any, when a new thread is attached. */
195 static void (*linux_nat_new_thread) (ptid_t);
196
197 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
198 Called by our to_xfer_partial. */
199 static LONGEST (*super_xfer_partial) (struct target_ops *,
200 enum target_object,
201 const char *, gdb_byte *,
202 const gdb_byte *,
203 ULONGEST, LONGEST);
204
205 static int debug_linux_nat;
206 static void
207 show_debug_linux_nat (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
211 value);
212 }
213
214 static int debug_linux_nat_async = 0;
215 static void
216 show_debug_linux_nat_async (struct ui_file *file, int from_tty,
217 struct cmd_list_element *c, const char *value)
218 {
219 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
220 value);
221 }
222
223 static int linux_parent_pid;
224
225 struct simple_pid_list
226 {
227 int pid;
228 int status;
229 struct simple_pid_list *next;
230 };
231 struct simple_pid_list *stopped_pids;
232
233 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
234 can not be used, 1 if it can. */
235
236 static int linux_supports_tracefork_flag = -1;
237
238 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
239 PTRACE_O_TRACEVFORKDONE. */
240
241 static int linux_supports_tracevforkdone_flag = -1;
242
243 /* Async mode support */
244
245 /* True if async mode is currently on. */
246 static int linux_nat_async_enabled;
247
248 /* Zero if the async mode, although enabled, is masked, which means
249 linux_nat_wait should behave as if async mode was off. */
250 static int linux_nat_async_mask_value = 1;
251
252 /* The read/write ends of the pipe registered as waitable file in the
253 event loop. */
254 static int linux_nat_event_pipe[2] = { -1, -1 };
255
256 /* Number of queued events in the pipe. */
257 static volatile int linux_nat_num_queued_events;
258
259 /* If async mode is on, true if we're listening for events; false if
260 target events are blocked. */
261 static int linux_nat_async_events_enabled;
262
263 static int linux_nat_async_events (int enable);
264 static void pipe_to_local_event_queue (void);
265 static void local_event_queue_to_pipe (void);
266 static void linux_nat_event_pipe_push (int pid, int status, int options);
267 static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
268 static void linux_nat_set_async_mode (int on);
269 static void linux_nat_async (void (*callback)
270 (enum inferior_event_type event_type, void *context),
271 void *context);
272 static int linux_nat_async_mask (int mask);
273 static int kill_lwp (int lwpid, int signo);
274
275 /* Captures the result of a successful waitpid call, along with the
276 options used in that call. */
277 struct waitpid_result
278 {
279 int pid;
280 int status;
281 int options;
282 struct waitpid_result *next;
283 };
284
285 /* A singly-linked list of the results of the waitpid calls performed
286 in the async SIGCHLD handler. */
287 static struct waitpid_result *waitpid_queue = NULL;
288
289 static int
290 queued_waitpid (int pid, int *status, int flags)
291 {
292 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
293
294 if (debug_linux_nat_async)
295 fprintf_unfiltered (gdb_stdlog,
296 "\
297 QWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n",
298 linux_nat_async_events_enabled,
299 linux_nat_num_queued_events);
300
301 if (flags & __WALL)
302 {
303 for (; msg; prev = msg, msg = msg->next)
304 if (pid == -1 || pid == msg->pid)
305 break;
306 }
307 else if (flags & __WCLONE)
308 {
309 for (; msg; prev = msg, msg = msg->next)
310 if (msg->options & __WCLONE
311 && (pid == -1 || pid == msg->pid))
312 break;
313 }
314 else
315 {
316 for (; msg; prev = msg, msg = msg->next)
317 if ((msg->options & __WCLONE) == 0
318 && (pid == -1 || pid == msg->pid))
319 break;
320 }
321
322 if (msg)
323 {
324 int pid;
325
326 if (prev)
327 prev->next = msg->next;
328 else
329 waitpid_queue = msg->next;
330
331 msg->next = NULL;
332 if (status)
333 *status = msg->status;
334 pid = msg->pid;
335
336 if (debug_linux_nat_async)
337 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
338 pid, msg->status);
339 xfree (msg);
340
341 return pid;
342 }
343
344 if (debug_linux_nat_async)
345 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
346
347 if (status)
348 *status = 0;
349 return -1;
350 }
351
352 static void
353 push_waitpid (int pid, int status, int options)
354 {
355 struct waitpid_result *event, *new_event;
356
357 new_event = xmalloc (sizeof (*new_event));
358 new_event->pid = pid;
359 new_event->status = status;
360 new_event->options = options;
361 new_event->next = NULL;
362
363 if (waitpid_queue)
364 {
365 for (event = waitpid_queue;
366 event && event->next;
367 event = event->next)
368 ;
369
370 event->next = new_event;
371 }
372 else
373 waitpid_queue = new_event;
374 }
375
376 /* Drain all queued events of PID. If PID is -1, the effect is of
377 draining all events. */
378 static void
379 drain_queued_events (int pid)
380 {
381 while (queued_waitpid (pid, NULL, __WALL) != -1)
382 ;
383 }
384
385 \f
386 /* Trivial list manipulation functions to keep track of a list of
387 new stopped processes. */
388 static void
389 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
390 {
391 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
392 new_pid->pid = pid;
393 new_pid->status = status;
394 new_pid->next = *listp;
395 *listp = new_pid;
396 }
397
398 static int
399 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
400 {
401 struct simple_pid_list **p;
402
403 for (p = listp; *p != NULL; p = &(*p)->next)
404 if ((*p)->pid == pid)
405 {
406 struct simple_pid_list *next = (*p)->next;
407 *status = (*p)->status;
408 xfree (*p);
409 *p = next;
410 return 1;
411 }
412 return 0;
413 }
414
415 static void
416 linux_record_stopped_pid (int pid, int status)
417 {
418 add_to_pid_list (&stopped_pids, pid, status);
419 }
420
421 \f
422 /* A helper function for linux_test_for_tracefork, called after fork (). */
423
424 static void
425 linux_tracefork_child (void)
426 {
427 int ret;
428
429 ptrace (PTRACE_TRACEME, 0, 0, 0);
430 kill (getpid (), SIGSTOP);
431 fork ();
432 _exit (0);
433 }
434
435 /* Wrapper function for waitpid which handles EINTR, and checks for
436 locally queued events. */
437
438 static int
439 my_waitpid (int pid, int *status, int flags)
440 {
441 int ret;
442
443 /* There should be no concurrent calls to waitpid. */
444 gdb_assert (!linux_nat_async_events_enabled);
445
446 ret = queued_waitpid (pid, status, flags);
447 if (ret != -1)
448 return ret;
449
450 do
451 {
452 ret = waitpid (pid, status, flags);
453 }
454 while (ret == -1 && errno == EINTR);
455
456 return ret;
457 }
458
459 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
460
461 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
462 we know that the feature is not available. This may change the tracing
463 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
464
465 However, if it succeeds, we don't know for sure that the feature is
466 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
467 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
468 fork tracing, and let it fork. If the process exits, we assume that we
469 can't use TRACEFORK; if we get the fork notification, and we can extract
470 the new child's PID, then we assume that we can. */
471
472 static void
473 linux_test_for_tracefork (int original_pid)
474 {
475 int child_pid, ret, status;
476 long second_pid;
477
478 linux_supports_tracefork_flag = 0;
479 linux_supports_tracevforkdone_flag = 0;
480
481 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
482 if (ret != 0)
483 return;
484
485 child_pid = fork ();
486 if (child_pid == -1)
487 perror_with_name (("fork"));
488
489 if (child_pid == 0)
490 linux_tracefork_child ();
491
492 ret = my_waitpid (child_pid, &status, 0);
493 if (ret == -1)
494 perror_with_name (("waitpid"));
495 else if (ret != child_pid)
496 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
497 if (! WIFSTOPPED (status))
498 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
499
500 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
501 if (ret != 0)
502 {
503 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
504 if (ret != 0)
505 {
506 warning (_("linux_test_for_tracefork: failed to kill child"));
507 return;
508 }
509
510 ret = my_waitpid (child_pid, &status, 0);
511 if (ret != child_pid)
512 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
513 else if (!WIFSIGNALED (status))
514 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
515 "killed child"), status);
516
517 return;
518 }
519
520 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
521 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
522 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
523 linux_supports_tracevforkdone_flag = (ret == 0);
524
525 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
526 if (ret != 0)
527 warning (_("linux_test_for_tracefork: failed to resume child"));
528
529 ret = my_waitpid (child_pid, &status, 0);
530
531 if (ret == child_pid && WIFSTOPPED (status)
532 && status >> 16 == PTRACE_EVENT_FORK)
533 {
534 second_pid = 0;
535 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
536 if (ret == 0 && second_pid != 0)
537 {
538 int second_status;
539
540 linux_supports_tracefork_flag = 1;
541 my_waitpid (second_pid, &second_status, 0);
542 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
543 if (ret != 0)
544 warning (_("linux_test_for_tracefork: failed to kill second child"));
545 my_waitpid (second_pid, &status, 0);
546 }
547 }
548 else
549 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
550 "(%d, status 0x%x)"), ret, status);
551
552 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
553 if (ret != 0)
554 warning (_("linux_test_for_tracefork: failed to kill child"));
555 my_waitpid (child_pid, &status, 0);
556 }
557
558 /* Return non-zero iff we have tracefork functionality available.
559 This function also sets linux_supports_tracefork_flag. */
560
561 static int
562 linux_supports_tracefork (int pid)
563 {
564 if (linux_supports_tracefork_flag == -1)
565 linux_test_for_tracefork (pid);
566 return linux_supports_tracefork_flag;
567 }
568
569 static int
570 linux_supports_tracevforkdone (int pid)
571 {
572 if (linux_supports_tracefork_flag == -1)
573 linux_test_for_tracefork (pid);
574 return linux_supports_tracevforkdone_flag;
575 }
576
577 \f
578 void
579 linux_enable_event_reporting (ptid_t ptid)
580 {
581 int pid = ptid_get_lwp (ptid);
582 int options;
583
584 if (pid == 0)
585 pid = ptid_get_pid (ptid);
586
587 if (! linux_supports_tracefork (pid))
588 return;
589
590 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
591 | PTRACE_O_TRACECLONE;
592 if (linux_supports_tracevforkdone (pid))
593 options |= PTRACE_O_TRACEVFORKDONE;
594
595 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
596 read-only process state. */
597
598 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
599 }
600
601 static void
602 linux_child_post_attach (int pid)
603 {
604 linux_enable_event_reporting (pid_to_ptid (pid));
605 check_for_thread_db ();
606 }
607
608 static void
609 linux_child_post_startup_inferior (ptid_t ptid)
610 {
611 linux_enable_event_reporting (ptid);
612 check_for_thread_db ();
613 }
614
615 static int
616 linux_child_follow_fork (struct target_ops *ops, int follow_child)
617 {
618 ptid_t last_ptid;
619 struct target_waitstatus last_status;
620 int has_vforked;
621 int parent_pid, child_pid;
622
623 if (target_can_async_p ())
624 target_async (NULL, 0);
625
626 get_last_target_status (&last_ptid, &last_status);
627 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
628 parent_pid = ptid_get_lwp (last_ptid);
629 if (parent_pid == 0)
630 parent_pid = ptid_get_pid (last_ptid);
631 child_pid = last_status.value.related_pid;
632
633 if (! follow_child)
634 {
635 /* We're already attached to the parent, by default. */
636
637 /* Before detaching from the child, remove all breakpoints from
638 it. (This won't actually modify the breakpoint list, but will
639 physically remove the breakpoints from the child.) */
640 /* If we vforked this will remove the breakpoints from the parent
641 also, but they'll be reinserted below. */
642 detach_breakpoints (child_pid);
643
644 /* Detach new forked process? */
645 if (detach_fork)
646 {
647 if (info_verbose || debug_linux_nat)
648 {
649 target_terminal_ours ();
650 fprintf_filtered (gdb_stdlog,
651 "Detaching after fork from child process %d.\n",
652 child_pid);
653 }
654
655 ptrace (PTRACE_DETACH, child_pid, 0, 0);
656 }
657 else
658 {
659 struct fork_info *fp;
660 /* Retain child fork in ptrace (stopped) state. */
661 fp = find_fork_pid (child_pid);
662 if (!fp)
663 fp = add_fork (child_pid);
664 fork_save_infrun_state (fp, 0);
665 }
666
667 if (has_vforked)
668 {
669 gdb_assert (linux_supports_tracefork_flag >= 0);
670 if (linux_supports_tracevforkdone (0))
671 {
672 int status;
673
674 ptrace (PTRACE_CONT, parent_pid, 0, 0);
675 my_waitpid (parent_pid, &status, __WALL);
676 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
677 warning (_("Unexpected waitpid result %06x when waiting for "
678 "vfork-done"), status);
679 }
680 else
681 {
682 /* We can't insert breakpoints until the child has
683 finished with the shared memory region. We need to
684 wait until that happens. Ideal would be to just
685 call:
686 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
687 - waitpid (parent_pid, &status, __WALL);
688 However, most architectures can't handle a syscall
689 being traced on the way out if it wasn't traced on
690 the way in.
691
692 We might also think to loop, continuing the child
693 until it exits or gets a SIGTRAP. One problem is
694 that the child might call ptrace with PTRACE_TRACEME.
695
696 There's no simple and reliable way to figure out when
697 the vforked child will be done with its copy of the
698 shared memory. We could step it out of the syscall,
699 two instructions, let it go, and then single-step the
700 parent once. When we have hardware single-step, this
701 would work; with software single-step it could still
702 be made to work but we'd have to be able to insert
703 single-step breakpoints in the child, and we'd have
704 to insert -just- the single-step breakpoint in the
705 parent. Very awkward.
706
707 In the end, the best we can do is to make sure it
708 runs for a little while. Hopefully it will be out of
709 range of any breakpoints we reinsert. Usually this
710 is only the single-step breakpoint at vfork's return
711 point. */
712
713 usleep (10000);
714 }
715
716 /* Since we vforked, breakpoints were removed in the parent
717 too. Put them back. */
718 reattach_breakpoints (parent_pid);
719 }
720 }
721 else
722 {
723 char child_pid_spelling[40];
724
725 /* Needed to keep the breakpoint lists in sync. */
726 if (! has_vforked)
727 detach_breakpoints (child_pid);
728
729 /* Before detaching from the parent, remove all breakpoints from it. */
730 remove_breakpoints ();
731
732 if (info_verbose || debug_linux_nat)
733 {
734 target_terminal_ours ();
735 fprintf_filtered (gdb_stdlog,
736 "Attaching after fork to child process %d.\n",
737 child_pid);
738 }
739
740 /* If we're vforking, we may want to hold on to the parent until
741 the child exits or execs. At exec time we can remove the old
742 breakpoints from the parent and detach it; at exit time we
743 could do the same (or even, sneakily, resume debugging it - the
744 child's exec has failed, or something similar).
745
746 This doesn't clean up "properly", because we can't call
747 target_detach, but that's OK; if the current target is "child",
748 then it doesn't need any further cleanups, and lin_lwp will
749 generally not encounter vfork (vfork is defined to fork
750 in libpthread.so).
751
752 The holding part is very easy if we have VFORKDONE events;
753 but keeping track of both processes is beyond GDB at the
754 moment. So we don't expose the parent to the rest of GDB.
755 Instead we quietly hold onto it until such time as we can
756 safely resume it. */
757
758 if (has_vforked)
759 linux_parent_pid = parent_pid;
760 else if (!detach_fork)
761 {
762 struct fork_info *fp;
763 /* Retain parent fork in ptrace (stopped) state. */
764 fp = find_fork_pid (parent_pid);
765 if (!fp)
766 fp = add_fork (parent_pid);
767 fork_save_infrun_state (fp, 0);
768 }
769 else
770 target_detach (NULL, 0);
771
772 inferior_ptid = ptid_build (child_pid, child_pid, 0);
773
774 /* Reinstall ourselves, since we might have been removed in
775 target_detach (which does other necessary cleanup). */
776
777 push_target (ops);
778 linux_nat_switch_fork (inferior_ptid);
779 check_for_thread_db ();
780
781 /* Reset breakpoints in the child as appropriate. */
782 follow_inferior_reset_breakpoints ();
783 }
784
785 if (target_can_async_p ())
786 target_async (inferior_event_handler, 0);
787
788 return 0;
789 }
790
791 \f
792 static void
793 linux_child_insert_fork_catchpoint (int pid)
794 {
795 if (! linux_supports_tracefork (pid))
796 error (_("Your system does not support fork catchpoints."));
797 }
798
799 static void
800 linux_child_insert_vfork_catchpoint (int pid)
801 {
802 if (!linux_supports_tracefork (pid))
803 error (_("Your system does not support vfork catchpoints."));
804 }
805
806 static void
807 linux_child_insert_exec_catchpoint (int pid)
808 {
809 if (!linux_supports_tracefork (pid))
810 error (_("Your system does not support exec catchpoints."));
811 }
812
813 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
814 are processes sharing the same VM space. A multi-threaded process
815 is basically a group of such processes. However, such a grouping
816 is almost entirely a user-space issue; the kernel doesn't enforce
817 such a grouping at all (this might change in the future). In
818 general, we'll rely on the threads library (i.e. the GNU/Linux
819 Threads library) to provide such a grouping.
820
821 It is perfectly well possible to write a multi-threaded application
822 without the assistance of a threads library, by using the clone
823 system call directly. This module should be able to give some
824 rudimentary support for debugging such applications if developers
825 specify the CLONE_PTRACE flag in the clone system call, and are
826 using the Linux kernel 2.4 or above.
827
828 Note that there are some peculiarities in GNU/Linux that affect
829 this code:
830
831 - In general one should specify the __WCLONE flag to waitpid in
832 order to make it report events for any of the cloned processes
833 (and leave it out for the initial process). However, if a cloned
834 process has exited the exit status is only reported if the
835 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
836 we cannot use it since GDB must work on older systems too.
837
838 - When a traced, cloned process exits and is waited for by the
839 debugger, the kernel reassigns it to the original parent and
840 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
841 library doesn't notice this, which leads to the "zombie problem":
842 When debugged a multi-threaded process that spawns a lot of
843 threads will run out of processes, even if the threads exit,
844 because the "zombies" stay around. */
845
846 /* List of known LWPs. */
847 struct lwp_info *lwp_list;
848
849 /* Number of LWPs in the list. */
850 static int num_lwps;
851 \f
852
853 /* Original signal mask. */
854 static sigset_t normal_mask;
855
856 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
857 _initialize_linux_nat. */
858 static sigset_t suspend_mask;
859
860 /* SIGCHLD action for synchronous mode. */
861 struct sigaction sync_sigchld_action;
862
863 /* SIGCHLD action for asynchronous mode. */
864 static struct sigaction async_sigchld_action;
865 \f
866
867 /* Prototypes for local functions. */
868 static int stop_wait_callback (struct lwp_info *lp, void *data);
869 static int linux_nat_thread_alive (ptid_t ptid);
870 static char *linux_child_pid_to_exec_file (int pid);
871 static int cancel_breakpoint (struct lwp_info *lp);
872
873 \f
874 /* Convert wait status STATUS to a string. Used for printing debug
875 messages only. */
876
877 static char *
878 status_to_str (int status)
879 {
880 static char buf[64];
881
882 if (WIFSTOPPED (status))
883 snprintf (buf, sizeof (buf), "%s (stopped)",
884 strsignal (WSTOPSIG (status)));
885 else if (WIFSIGNALED (status))
886 snprintf (buf, sizeof (buf), "%s (terminated)",
887 strsignal (WSTOPSIG (status)));
888 else
889 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
890
891 return buf;
892 }
893
894 /* Initialize the list of LWPs. Note that this module, contrary to
895 what GDB's generic threads layer does for its thread list,
896 re-initializes the LWP lists whenever we mourn or detach (which
897 doesn't involve mourning) the inferior. */
898
899 static void
900 init_lwp_list (void)
901 {
902 struct lwp_info *lp, *lpnext;
903
904 for (lp = lwp_list; lp; lp = lpnext)
905 {
906 lpnext = lp->next;
907 xfree (lp);
908 }
909
910 lwp_list = NULL;
911 num_lwps = 0;
912 }
913
914 /* Add the LWP specified by PID to the list. Return a pointer to the
915 structure describing the new LWP. The LWP should already be stopped
916 (with an exception for the very first LWP). */
917
918 static struct lwp_info *
919 add_lwp (ptid_t ptid)
920 {
921 struct lwp_info *lp;
922
923 gdb_assert (is_lwp (ptid));
924
925 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
926
927 memset (lp, 0, sizeof (struct lwp_info));
928
929 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
930
931 lp->ptid = ptid;
932
933 lp->next = lwp_list;
934 lwp_list = lp;
935 ++num_lwps;
936
937 if (num_lwps > 1 && linux_nat_new_thread != NULL)
938 linux_nat_new_thread (ptid);
939
940 return lp;
941 }
942
943 /* Remove the LWP specified by PID from the list. */
944
945 static void
946 delete_lwp (ptid_t ptid)
947 {
948 struct lwp_info *lp, *lpprev;
949
950 lpprev = NULL;
951
952 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
953 if (ptid_equal (lp->ptid, ptid))
954 break;
955
956 if (!lp)
957 return;
958
959 num_lwps--;
960
961 if (lpprev)
962 lpprev->next = lp->next;
963 else
964 lwp_list = lp->next;
965
966 xfree (lp);
967 }
968
969 /* Return a pointer to the structure describing the LWP corresponding
970 to PID. If no corresponding LWP could be found, return NULL. */
971
972 static struct lwp_info *
973 find_lwp_pid (ptid_t ptid)
974 {
975 struct lwp_info *lp;
976 int lwp;
977
978 if (is_lwp (ptid))
979 lwp = GET_LWP (ptid);
980 else
981 lwp = GET_PID (ptid);
982
983 for (lp = lwp_list; lp; lp = lp->next)
984 if (lwp == GET_LWP (lp->ptid))
985 return lp;
986
987 return NULL;
988 }
989
990 /* Call CALLBACK with its second argument set to DATA for every LWP in
991 the list. If CALLBACK returns 1 for a particular LWP, return a
992 pointer to the structure describing that LWP immediately.
993 Otherwise return NULL. */
994
995 struct lwp_info *
996 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
997 {
998 struct lwp_info *lp, *lpnext;
999
1000 for (lp = lwp_list; lp; lp = lpnext)
1001 {
1002 lpnext = lp->next;
1003 if ((*callback) (lp, data))
1004 return lp;
1005 }
1006
1007 return NULL;
1008 }
1009
1010 /* Update our internal state when changing from one fork (checkpoint,
1011 et cetera) to another indicated by NEW_PTID. We can only switch
1012 single-threaded applications, so we only create one new LWP, and
1013 the previous list is discarded. */
1014
1015 void
1016 linux_nat_switch_fork (ptid_t new_ptid)
1017 {
1018 struct lwp_info *lp;
1019
1020 init_thread_list ();
1021 init_lwp_list ();
1022 lp = add_lwp (new_ptid);
1023 add_thread_silent (new_ptid);
1024 lp->stopped = 1;
1025 }
1026
1027 /* Record a PTID for later deletion. */
1028
1029 struct saved_ptids
1030 {
1031 ptid_t ptid;
1032 struct saved_ptids *next;
1033 };
1034 static struct saved_ptids *threads_to_delete;
1035
1036 static void
1037 record_dead_thread (ptid_t ptid)
1038 {
1039 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
1040 p->ptid = ptid;
1041 p->next = threads_to_delete;
1042 threads_to_delete = p;
1043 }
1044
1045 /* Delete any dead threads which are not the current thread. */
1046
1047 static void
1048 prune_lwps (void)
1049 {
1050 struct saved_ptids **p = &threads_to_delete;
1051
1052 while (*p)
1053 if (! ptid_equal ((*p)->ptid, inferior_ptid))
1054 {
1055 struct saved_ptids *tmp = *p;
1056 delete_thread (tmp->ptid);
1057 *p = tmp->next;
1058 xfree (tmp);
1059 }
1060 else
1061 p = &(*p)->next;
1062 }
1063
1064 /* Handle the exit of a single thread LP. */
1065
1066 static void
1067 exit_lwp (struct lwp_info *lp)
1068 {
1069 struct thread_info *th = find_thread_pid (lp->ptid);
1070
1071 if (th)
1072 {
1073 if (print_thread_events)
1074 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1075
1076 /* Core GDB cannot deal with us deleting the current thread. */
1077 if (!ptid_equal (lp->ptid, inferior_ptid))
1078 delete_thread (lp->ptid);
1079 else
1080 record_dead_thread (lp->ptid);
1081 }
1082
1083 delete_lwp (lp->ptid);
1084 }
1085
1086 /* Detect `T (stopped)' in `/proc/PID/status'.
1087 Other states including `T (tracing stop)' are reported as false. */
1088
1089 static int
1090 pid_is_stopped (pid_t pid)
1091 {
1092 FILE *status_file;
1093 char buf[100];
1094 int retval = 0;
1095
1096 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1097 status_file = fopen (buf, "r");
1098 if (status_file != NULL)
1099 {
1100 int have_state = 0;
1101
1102 while (fgets (buf, sizeof (buf), status_file))
1103 {
1104 if (strncmp (buf, "State:", 6) == 0)
1105 {
1106 have_state = 1;
1107 break;
1108 }
1109 }
1110 if (have_state && strstr (buf, "T (stopped)") != NULL)
1111 retval = 1;
1112 fclose (status_file);
1113 }
1114 return retval;
1115 }
1116
1117 /* Wait for the LWP specified by LP, which we have just attached to.
1118 Returns a wait status for that LWP, to cache. */
1119
1120 static int
1121 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1122 int *signalled)
1123 {
1124 pid_t new_pid, pid = GET_LWP (ptid);
1125 int status;
1126
1127 if (pid_is_stopped (pid))
1128 {
1129 if (debug_linux_nat)
1130 fprintf_unfiltered (gdb_stdlog,
1131 "LNPAW: Attaching to a stopped process\n");
1132
1133 /* The process is definitely stopped. It is in a job control
1134 stop, unless the kernel predates the TASK_STOPPED /
1135 TASK_TRACED distinction, in which case it might be in a
1136 ptrace stop. Make sure it is in a ptrace stop; from there we
1137 can kill it, signal it, et cetera.
1138
1139 First make sure there is a pending SIGSTOP. Since we are
1140 already attached, the process can not transition from stopped
1141 to running without a PTRACE_CONT; so we know this signal will
1142 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1143 probably already in the queue (unless this kernel is old
1144 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1145 is not an RT signal, it can only be queued once. */
1146 kill_lwp (pid, SIGSTOP);
1147
1148 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1149 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1150 ptrace (PTRACE_CONT, pid, 0, 0);
1151 }
1152
1153 /* Make sure the initial process is stopped. The user-level threads
1154 layer might want to poke around in the inferior, and that won't
1155 work if things haven't stabilized yet. */
1156 new_pid = my_waitpid (pid, &status, 0);
1157 if (new_pid == -1 && errno == ECHILD)
1158 {
1159 if (first)
1160 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1161
1162 /* Try again with __WCLONE to check cloned processes. */
1163 new_pid = my_waitpid (pid, &status, __WCLONE);
1164 *cloned = 1;
1165 }
1166
1167 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1168
1169 if (WSTOPSIG (status) != SIGSTOP)
1170 {
1171 *signalled = 1;
1172 if (debug_linux_nat)
1173 fprintf_unfiltered (gdb_stdlog,
1174 "LNPAW: Received %s after attaching\n",
1175 status_to_str (status));
1176 }
1177
1178 return status;
1179 }
1180
1181 /* Attach to the LWP specified by PID. Return 0 if successful or -1
1182 if the new LWP could not be attached. */
1183
1184 int
1185 lin_lwp_attach_lwp (ptid_t ptid)
1186 {
1187 struct lwp_info *lp;
1188 int async_events_were_enabled = 0;
1189
1190 gdb_assert (is_lwp (ptid));
1191
1192 if (target_can_async_p ())
1193 async_events_were_enabled = linux_nat_async_events (0);
1194
1195 lp = find_lwp_pid (ptid);
1196
1197 /* We assume that we're already attached to any LWP that has an id
1198 equal to the overall process id, and to any LWP that is already
1199 in our list of LWPs. If we're not seeing exit events from threads
1200 and we've had PID wraparound since we last tried to stop all threads,
1201 this assumption might be wrong; fortunately, this is very unlikely
1202 to happen. */
1203 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
1204 {
1205 int status, cloned = 0, signalled = 0;
1206
1207 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
1208 {
1209 /* If we fail to attach to the thread, issue a warning,
1210 but continue. One way this can happen is if thread
1211 creation is interrupted; as of Linux kernel 2.6.19, a
1212 bug may place threads in the thread list and then fail
1213 to create them. */
1214 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1215 safe_strerror (errno));
1216 return -1;
1217 }
1218
1219 if (debug_linux_nat)
1220 fprintf_unfiltered (gdb_stdlog,
1221 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1222 target_pid_to_str (ptid));
1223
1224 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1225 lp = add_lwp (ptid);
1226 lp->stopped = 1;
1227 lp->cloned = cloned;
1228 lp->signalled = signalled;
1229 if (WSTOPSIG (status) != SIGSTOP)
1230 {
1231 lp->resumed = 1;
1232 lp->status = status;
1233 }
1234
1235 target_post_attach (GET_LWP (lp->ptid));
1236
1237 if (debug_linux_nat)
1238 {
1239 fprintf_unfiltered (gdb_stdlog,
1240 "LLAL: waitpid %s received %s\n",
1241 target_pid_to_str (ptid),
1242 status_to_str (status));
1243 }
1244 }
1245 else
1246 {
1247 /* We assume that the LWP representing the original process is
1248 already stopped. Mark it as stopped in the data structure
1249 that the GNU/linux ptrace layer uses to keep track of
1250 threads. Note that this won't have already been done since
1251 the main thread will have, we assume, been stopped by an
1252 attach from a different layer. */
1253 if (lp == NULL)
1254 lp = add_lwp (ptid);
1255 lp->stopped = 1;
1256 }
1257
1258 if (async_events_were_enabled)
1259 linux_nat_async_events (1);
1260
1261 return 0;
1262 }
1263
1264 static void
1265 linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1266 int from_tty)
1267 {
1268 int saved_async = 0;
1269
1270 /* The fork_child mechanism is synchronous and calls target_wait, so
1271 we have to mask the async mode. */
1272
1273 if (target_can_async_p ())
1274 saved_async = linux_nat_async_mask (0);
1275 else
1276 {
1277 /* Restore the original signal mask. */
1278 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1279 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1280 suspend_mask = normal_mask;
1281 sigdelset (&suspend_mask, SIGCHLD);
1282 }
1283
1284 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1285
1286 if (saved_async)
1287 linux_nat_async_mask (saved_async);
1288 }
1289
1290 static void
1291 linux_nat_attach (char *args, int from_tty)
1292 {
1293 struct lwp_info *lp;
1294 int status;
1295
1296 /* FIXME: We should probably accept a list of process id's, and
1297 attach all of them. */
1298 linux_ops->to_attach (args, from_tty);
1299
1300 if (!target_can_async_p ())
1301 {
1302 /* Restore the original signal mask. */
1303 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1304 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1305 suspend_mask = normal_mask;
1306 sigdelset (&suspend_mask, SIGCHLD);
1307 }
1308
1309 /* Add the initial process as the first LWP to the list. */
1310 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1311 lp = add_lwp (inferior_ptid);
1312
1313 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1314 &lp->signalled);
1315 lp->stopped = 1;
1316
1317 /* If this process is not using thread_db, then we still don't
1318 detect any other threads, but add at least this one. */
1319 add_thread_silent (lp->ptid);
1320
1321 /* Save the wait status to report later. */
1322 lp->resumed = 1;
1323 if (debug_linux_nat)
1324 fprintf_unfiltered (gdb_stdlog,
1325 "LNA: waitpid %ld, saving status %s\n",
1326 (long) GET_PID (lp->ptid), status_to_str (status));
1327
1328 if (!target_can_async_p ())
1329 lp->status = status;
1330 else
1331 {
1332 /* We already waited for this LWP, so put the wait result on the
1333 pipe. The event loop will wake up and gets us to handling
1334 this event. */
1335 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1336 lp->cloned ? __WCLONE : 0);
1337 /* Register in the event loop. */
1338 target_async (inferior_event_handler, 0);
1339 }
1340 }
1341
1342 /* Get pending status of LP. */
1343 static int
1344 get_pending_status (struct lwp_info *lp, int *status)
1345 {
1346 struct target_waitstatus last;
1347 ptid_t last_ptid;
1348
1349 get_last_target_status (&last_ptid, &last);
1350
1351 /* If this lwp is the ptid that GDB is processing an event from, the
1352 signal will be in stop_signal. Otherwise, in all-stop + sync
1353 mode, we may cache pending events in lp->status while trying to
1354 stop all threads (see stop_wait_callback). In async mode, the
1355 events are always cached in waitpid_queue. */
1356
1357 *status = 0;
1358 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1359 {
1360 if (stop_signal != TARGET_SIGNAL_0
1361 && signal_pass_state (stop_signal))
1362 *status = W_STOPCODE (target_signal_to_host (stop_signal));
1363 }
1364 else if (target_can_async_p ())
1365 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1366 else
1367 *status = lp->status;
1368
1369 return 0;
1370 }
1371
1372 static int
1373 detach_callback (struct lwp_info *lp, void *data)
1374 {
1375 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1376
1377 if (debug_linux_nat && lp->status)
1378 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1379 strsignal (WSTOPSIG (lp->status)),
1380 target_pid_to_str (lp->ptid));
1381
1382 /* If there is a pending SIGSTOP, get rid of it. */
1383 if (lp->signalled)
1384 {
1385 if (debug_linux_nat)
1386 fprintf_unfiltered (gdb_stdlog,
1387 "DC: Sending SIGCONT to %s\n",
1388 target_pid_to_str (lp->ptid));
1389
1390 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1391 lp->signalled = 0;
1392 }
1393
1394 /* We don't actually detach from the LWP that has an id equal to the
1395 overall process id just yet. */
1396 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1397 {
1398 int status = 0;
1399
1400 /* Pass on any pending signal for this LWP. */
1401 get_pending_status (lp, &status);
1402
1403 errno = 0;
1404 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1405 WSTOPSIG (status)) < 0)
1406 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1407 safe_strerror (errno));
1408
1409 if (debug_linux_nat)
1410 fprintf_unfiltered (gdb_stdlog,
1411 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1412 target_pid_to_str (lp->ptid),
1413 strsignal (WSTOPSIG (lp->status)));
1414
1415 delete_lwp (lp->ptid);
1416 }
1417
1418 return 0;
1419 }
1420
1421 static void
1422 linux_nat_detach (char *args, int from_tty)
1423 {
1424 int pid;
1425 int status;
1426 enum target_signal sig;
1427
1428 if (target_can_async_p ())
1429 linux_nat_async (NULL, 0);
1430
1431 iterate_over_lwps (detach_callback, NULL);
1432
1433 /* Only the initial process should be left right now. */
1434 gdb_assert (num_lwps == 1);
1435
1436 /* Pass on any pending signal for the last LWP. */
1437 if ((args == NULL || *args == '\0')
1438 && get_pending_status (lwp_list, &status) != -1
1439 && WIFSTOPPED (status))
1440 {
1441 /* Put the signal number in ARGS so that inf_ptrace_detach will
1442 pass it along with PTRACE_DETACH. */
1443 args = alloca (8);
1444 sprintf (args, "%d", (int) WSTOPSIG (status));
1445 fprintf_unfiltered (gdb_stdlog,
1446 "LND: Sending signal %s to %s\n",
1447 args,
1448 target_pid_to_str (lwp_list->ptid));
1449 }
1450
1451 /* Destroy LWP info; it's no longer valid. */
1452 init_lwp_list ();
1453
1454 pid = GET_PID (inferior_ptid);
1455 inferior_ptid = pid_to_ptid (pid);
1456 linux_ops->to_detach (args, from_tty);
1457
1458 if (target_can_async_p ())
1459 drain_queued_events (pid);
1460 }
1461
1462 /* Resume LP. */
1463
1464 static int
1465 resume_callback (struct lwp_info *lp, void *data)
1466 {
1467 if (lp->stopped && lp->status == 0)
1468 {
1469 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1470 0, TARGET_SIGNAL_0);
1471 if (debug_linux_nat)
1472 fprintf_unfiltered (gdb_stdlog,
1473 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1474 target_pid_to_str (lp->ptid));
1475 lp->stopped = 0;
1476 lp->step = 0;
1477 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1478 }
1479
1480 return 0;
1481 }
1482
1483 static int
1484 resume_clear_callback (struct lwp_info *lp, void *data)
1485 {
1486 lp->resumed = 0;
1487 return 0;
1488 }
1489
1490 static int
1491 resume_set_callback (struct lwp_info *lp, void *data)
1492 {
1493 lp->resumed = 1;
1494 return 0;
1495 }
1496
1497 static void
1498 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1499 {
1500 struct lwp_info *lp;
1501 int resume_all;
1502
1503 if (debug_linux_nat)
1504 fprintf_unfiltered (gdb_stdlog,
1505 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1506 step ? "step" : "resume",
1507 target_pid_to_str (ptid),
1508 signo ? strsignal (signo) : "0",
1509 target_pid_to_str (inferior_ptid));
1510
1511 prune_lwps ();
1512
1513 if (target_can_async_p ())
1514 /* Block events while we're here. */
1515 linux_nat_async_events (0);
1516
1517 /* A specific PTID means `step only this process id'. */
1518 resume_all = (PIDGET (ptid) == -1);
1519
1520 if (resume_all)
1521 iterate_over_lwps (resume_set_callback, NULL);
1522 else
1523 iterate_over_lwps (resume_clear_callback, NULL);
1524
1525 /* If PID is -1, it's the current inferior that should be
1526 handled specially. */
1527 if (PIDGET (ptid) == -1)
1528 ptid = inferior_ptid;
1529
1530 lp = find_lwp_pid (ptid);
1531 gdb_assert (lp != NULL);
1532
1533 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1534
1535 /* Remember if we're stepping. */
1536 lp->step = step;
1537
1538 /* Mark this LWP as resumed. */
1539 lp->resumed = 1;
1540
1541 /* If we have a pending wait status for this thread, there is no
1542 point in resuming the process. But first make sure that
1543 linux_nat_wait won't preemptively handle the event - we
1544 should never take this short-circuit if we are going to
1545 leave LP running, since we have skipped resuming all the
1546 other threads. This bit of code needs to be synchronized
1547 with linux_nat_wait. */
1548
1549 /* In async mode, we never have pending wait status. */
1550 if (target_can_async_p () && lp->status)
1551 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1552
1553 if (lp->status && WIFSTOPPED (lp->status))
1554 {
1555 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1556
1557 if (signal_stop_state (saved_signo) == 0
1558 && signal_print_state (saved_signo) == 0
1559 && signal_pass_state (saved_signo) == 1)
1560 {
1561 if (debug_linux_nat)
1562 fprintf_unfiltered (gdb_stdlog,
1563 "LLR: Not short circuiting for ignored "
1564 "status 0x%x\n", lp->status);
1565
1566 /* FIXME: What should we do if we are supposed to continue
1567 this thread with a signal? */
1568 gdb_assert (signo == TARGET_SIGNAL_0);
1569 signo = saved_signo;
1570 lp->status = 0;
1571 }
1572 }
1573
1574 if (lp->status)
1575 {
1576 /* FIXME: What should we do if we are supposed to continue
1577 this thread with a signal? */
1578 gdb_assert (signo == TARGET_SIGNAL_0);
1579
1580 if (debug_linux_nat)
1581 fprintf_unfiltered (gdb_stdlog,
1582 "LLR: Short circuiting for status 0x%x\n",
1583 lp->status);
1584
1585 return;
1586 }
1587
1588 /* Mark LWP as not stopped to prevent it from being continued by
1589 resume_callback. */
1590 lp->stopped = 0;
1591
1592 if (resume_all)
1593 iterate_over_lwps (resume_callback, NULL);
1594
1595 linux_ops->to_resume (ptid, step, signo);
1596 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1597
1598 if (debug_linux_nat)
1599 fprintf_unfiltered (gdb_stdlog,
1600 "LLR: %s %s, %s (resume event thread)\n",
1601 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1602 target_pid_to_str (ptid),
1603 signo ? strsignal (signo) : "0");
1604
1605 if (target_can_async_p ())
1606 {
1607 target_executing = 1;
1608 target_async (inferior_event_handler, 0);
1609 }
1610 }
1611
1612 /* Issue kill to specified lwp. */
1613
1614 static int tkill_failed;
1615
1616 static int
1617 kill_lwp (int lwpid, int signo)
1618 {
1619 errno = 0;
1620
1621 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1622 fails, then we are not using nptl threads and we should be using kill. */
1623
1624 #ifdef HAVE_TKILL_SYSCALL
1625 if (!tkill_failed)
1626 {
1627 int ret = syscall (__NR_tkill, lwpid, signo);
1628 if (errno != ENOSYS)
1629 return ret;
1630 errno = 0;
1631 tkill_failed = 1;
1632 }
1633 #endif
1634
1635 return kill (lwpid, signo);
1636 }
1637
1638 /* Handle a GNU/Linux extended wait response. If we see a clone
1639 event, we need to add the new LWP to our list (and not report the
1640 trap to higher layers). This function returns non-zero if the
1641 event should be ignored and we should wait again. If STOPPING is
1642 true, the new LWP remains stopped, otherwise it is continued. */
1643
1644 static int
1645 linux_handle_extended_wait (struct lwp_info *lp, int status,
1646 int stopping)
1647 {
1648 int pid = GET_LWP (lp->ptid);
1649 struct target_waitstatus *ourstatus = &lp->waitstatus;
1650 struct lwp_info *new_lp = NULL;
1651 int event = status >> 16;
1652
1653 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1654 || event == PTRACE_EVENT_CLONE)
1655 {
1656 unsigned long new_pid;
1657 int ret;
1658
1659 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1660
1661 /* If we haven't already seen the new PID stop, wait for it now. */
1662 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1663 {
1664 /* The new child has a pending SIGSTOP. We can't affect it until it
1665 hits the SIGSTOP, but we're already attached. */
1666 ret = my_waitpid (new_pid, &status,
1667 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1668 if (ret == -1)
1669 perror_with_name (_("waiting for new child"));
1670 else if (ret != new_pid)
1671 internal_error (__FILE__, __LINE__,
1672 _("wait returned unexpected PID %d"), ret);
1673 else if (!WIFSTOPPED (status))
1674 internal_error (__FILE__, __LINE__,
1675 _("wait returned unexpected status 0x%x"), status);
1676 }
1677
1678 ourstatus->value.related_pid = new_pid;
1679
1680 if (event == PTRACE_EVENT_FORK)
1681 ourstatus->kind = TARGET_WAITKIND_FORKED;
1682 else if (event == PTRACE_EVENT_VFORK)
1683 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1684 else
1685 {
1686 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1687 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1688 new_lp->cloned = 1;
1689
1690 if (WSTOPSIG (status) != SIGSTOP)
1691 {
1692 /* This can happen if someone starts sending signals to
1693 the new thread before it gets a chance to run, which
1694 have a lower number than SIGSTOP (e.g. SIGUSR1).
1695 This is an unlikely case, and harder to handle for
1696 fork / vfork than for clone, so we do not try - but
1697 we handle it for clone events here. We'll send
1698 the other signal on to the thread below. */
1699
1700 new_lp->signalled = 1;
1701 }
1702 else
1703 status = 0;
1704
1705 if (stopping)
1706 new_lp->stopped = 1;
1707 else
1708 {
1709 new_lp->resumed = 1;
1710 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1711 status ? WSTOPSIG (status) : 0);
1712 }
1713
1714 if (debug_linux_nat)
1715 fprintf_unfiltered (gdb_stdlog,
1716 "LHEW: Got clone event from LWP %ld, resuming\n",
1717 GET_LWP (lp->ptid));
1718 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1719
1720 return 1;
1721 }
1722
1723 return 0;
1724 }
1725
1726 if (event == PTRACE_EVENT_EXEC)
1727 {
1728 ourstatus->kind = TARGET_WAITKIND_EXECD;
1729 ourstatus->value.execd_pathname
1730 = xstrdup (linux_child_pid_to_exec_file (pid));
1731
1732 if (linux_parent_pid)
1733 {
1734 detach_breakpoints (linux_parent_pid);
1735 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1736
1737 linux_parent_pid = 0;
1738 }
1739
1740 return 0;
1741 }
1742
1743 internal_error (__FILE__, __LINE__,
1744 _("unknown ptrace event %d"), event);
1745 }
1746
1747 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1748 exited. */
1749
1750 static int
1751 wait_lwp (struct lwp_info *lp)
1752 {
1753 pid_t pid;
1754 int status;
1755 int thread_dead = 0;
1756
1757 gdb_assert (!lp->stopped);
1758 gdb_assert (lp->status == 0);
1759
1760 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1761 if (pid == -1 && errno == ECHILD)
1762 {
1763 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1764 if (pid == -1 && errno == ECHILD)
1765 {
1766 /* The thread has previously exited. We need to delete it
1767 now because, for some vendor 2.4 kernels with NPTL
1768 support backported, there won't be an exit event unless
1769 it is the main thread. 2.6 kernels will report an exit
1770 event for each thread that exits, as expected. */
1771 thread_dead = 1;
1772 if (debug_linux_nat)
1773 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1774 target_pid_to_str (lp->ptid));
1775 }
1776 }
1777
1778 if (!thread_dead)
1779 {
1780 gdb_assert (pid == GET_LWP (lp->ptid));
1781
1782 if (debug_linux_nat)
1783 {
1784 fprintf_unfiltered (gdb_stdlog,
1785 "WL: waitpid %s received %s\n",
1786 target_pid_to_str (lp->ptid),
1787 status_to_str (status));
1788 }
1789 }
1790
1791 /* Check if the thread has exited. */
1792 if (WIFEXITED (status) || WIFSIGNALED (status))
1793 {
1794 thread_dead = 1;
1795 if (debug_linux_nat)
1796 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1797 target_pid_to_str (lp->ptid));
1798 }
1799
1800 if (thread_dead)
1801 {
1802 exit_lwp (lp);
1803 return 0;
1804 }
1805
1806 gdb_assert (WIFSTOPPED (status));
1807
1808 /* Handle GNU/Linux's extended waitstatus for trace events. */
1809 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1810 {
1811 if (debug_linux_nat)
1812 fprintf_unfiltered (gdb_stdlog,
1813 "WL: Handling extended status 0x%06x\n",
1814 status);
1815 if (linux_handle_extended_wait (lp, status, 1))
1816 return wait_lwp (lp);
1817 }
1818
1819 return status;
1820 }
1821
1822 /* Save the most recent siginfo for LP. This is currently only called
1823 for SIGTRAP; some ports use the si_addr field for
1824 target_stopped_data_address. In the future, it may also be used to
1825 restore the siginfo of requeued signals. */
1826
1827 static void
1828 save_siginfo (struct lwp_info *lp)
1829 {
1830 errno = 0;
1831 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
1832 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
1833
1834 if (errno != 0)
1835 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1836 }
1837
1838 /* Send a SIGSTOP to LP. */
1839
1840 static int
1841 stop_callback (struct lwp_info *lp, void *data)
1842 {
1843 if (!lp->stopped && !lp->signalled)
1844 {
1845 int ret;
1846
1847 if (debug_linux_nat)
1848 {
1849 fprintf_unfiltered (gdb_stdlog,
1850 "SC: kill %s **<SIGSTOP>**\n",
1851 target_pid_to_str (lp->ptid));
1852 }
1853 errno = 0;
1854 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1855 if (debug_linux_nat)
1856 {
1857 fprintf_unfiltered (gdb_stdlog,
1858 "SC: lwp kill %d %s\n",
1859 ret,
1860 errno ? safe_strerror (errno) : "ERRNO-OK");
1861 }
1862
1863 lp->signalled = 1;
1864 gdb_assert (lp->status == 0);
1865 }
1866
1867 return 0;
1868 }
1869
1870 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1871 a pointer to a set of signals to be flushed immediately. */
1872
1873 static int
1874 stop_wait_callback (struct lwp_info *lp, void *data)
1875 {
1876 sigset_t *flush_mask = data;
1877
1878 if (!lp->stopped)
1879 {
1880 int status;
1881
1882 status = wait_lwp (lp);
1883 if (status == 0)
1884 return 0;
1885
1886 /* Ignore any signals in FLUSH_MASK. */
1887 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1888 {
1889 if (!lp->signalled)
1890 {
1891 lp->stopped = 1;
1892 return 0;
1893 }
1894
1895 errno = 0;
1896 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1897 if (debug_linux_nat)
1898 fprintf_unfiltered (gdb_stdlog,
1899 "PTRACE_CONT %s, 0, 0 (%s)\n",
1900 target_pid_to_str (lp->ptid),
1901 errno ? safe_strerror (errno) : "OK");
1902
1903 return stop_wait_callback (lp, flush_mask);
1904 }
1905
1906 if (WSTOPSIG (status) != SIGSTOP)
1907 {
1908 if (WSTOPSIG (status) == SIGTRAP)
1909 {
1910 /* If a LWP other than the LWP that we're reporting an
1911 event for has hit a GDB breakpoint (as opposed to
1912 some random trap signal), then just arrange for it to
1913 hit it again later. We don't keep the SIGTRAP status
1914 and don't forward the SIGTRAP signal to the LWP. We
1915 will handle the current event, eventually we will
1916 resume all LWPs, and this one will get its breakpoint
1917 trap again.
1918
1919 If we do not do this, then we run the risk that the
1920 user will delete or disable the breakpoint, but the
1921 thread will have already tripped on it. */
1922
1923 /* Save the trap's siginfo in case we need it later. */
1924 save_siginfo (lp);
1925
1926 /* Now resume this LWP and get the SIGSTOP event. */
1927 errno = 0;
1928 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1929 if (debug_linux_nat)
1930 {
1931 fprintf_unfiltered (gdb_stdlog,
1932 "PTRACE_CONT %s, 0, 0 (%s)\n",
1933 target_pid_to_str (lp->ptid),
1934 errno ? safe_strerror (errno) : "OK");
1935
1936 fprintf_unfiltered (gdb_stdlog,
1937 "SWC: Candidate SIGTRAP event in %s\n",
1938 target_pid_to_str (lp->ptid));
1939 }
1940 /* Hold this event/waitstatus while we check to see if
1941 there are any more (we still want to get that SIGSTOP). */
1942 stop_wait_callback (lp, data);
1943
1944 if (target_can_async_p ())
1945 {
1946 /* Don't leave a pending wait status in async mode.
1947 Retrigger the breakpoint. */
1948 if (!cancel_breakpoint (lp))
1949 {
1950 /* There was no gdb breakpoint set at pc. Put
1951 the event back in the queue. */
1952 if (debug_linux_nat)
1953 fprintf_unfiltered (gdb_stdlog,
1954 "SWC: kill %s, %s\n",
1955 target_pid_to_str (lp->ptid),
1956 status_to_str ((int) status));
1957 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1958 }
1959 }
1960 else
1961 {
1962 /* Hold the SIGTRAP for handling by
1963 linux_nat_wait. */
1964 /* If there's another event, throw it back into the
1965 queue. */
1966 if (lp->status)
1967 {
1968 if (debug_linux_nat)
1969 fprintf_unfiltered (gdb_stdlog,
1970 "SWC: kill %s, %s\n",
1971 target_pid_to_str (lp->ptid),
1972 status_to_str ((int) status));
1973 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1974 }
1975 /* Save the sigtrap event. */
1976 lp->status = status;
1977 }
1978 return 0;
1979 }
1980 else
1981 {
1982 /* The thread was stopped with a signal other than
1983 SIGSTOP, and didn't accidentally trip a breakpoint. */
1984
1985 if (debug_linux_nat)
1986 {
1987 fprintf_unfiltered (gdb_stdlog,
1988 "SWC: Pending event %s in %s\n",
1989 status_to_str ((int) status),
1990 target_pid_to_str (lp->ptid));
1991 }
1992 /* Now resume this LWP and get the SIGSTOP event. */
1993 errno = 0;
1994 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1995 if (debug_linux_nat)
1996 fprintf_unfiltered (gdb_stdlog,
1997 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1998 target_pid_to_str (lp->ptid),
1999 errno ? safe_strerror (errno) : "OK");
2000
2001 /* Hold this event/waitstatus while we check to see if
2002 there are any more (we still want to get that SIGSTOP). */
2003 stop_wait_callback (lp, data);
2004
2005 /* If the lp->status field is still empty, use it to
2006 hold this event. If not, then this event must be
2007 returned to the event queue of the LWP. */
2008 if (lp->status || target_can_async_p ())
2009 {
2010 if (debug_linux_nat)
2011 {
2012 fprintf_unfiltered (gdb_stdlog,
2013 "SWC: kill %s, %s\n",
2014 target_pid_to_str (lp->ptid),
2015 status_to_str ((int) status));
2016 }
2017 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2018 }
2019 else
2020 lp->status = status;
2021 return 0;
2022 }
2023 }
2024 else
2025 {
2026 /* We caught the SIGSTOP that we intended to catch, so
2027 there's no SIGSTOP pending. */
2028 lp->stopped = 1;
2029 lp->signalled = 0;
2030 }
2031 }
2032
2033 return 0;
2034 }
2035
2036 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
2037 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
2038
2039 static int
2040 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
2041 {
2042 sigset_t blocked, ignored;
2043 int i;
2044
2045 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
2046
2047 if (!flush_mask)
2048 return 0;
2049
2050 for (i = 1; i < NSIG; i++)
2051 if (sigismember (pending, i))
2052 if (!sigismember (flush_mask, i)
2053 || sigismember (&blocked, i)
2054 || sigismember (&ignored, i))
2055 sigdelset (pending, i);
2056
2057 if (sigisemptyset (pending))
2058 return 0;
2059
2060 return 1;
2061 }
2062
2063 /* DATA is interpreted as a mask of signals to flush. If LP has
2064 signals pending, and they are all in the flush mask, then arrange
2065 to flush them. LP should be stopped, as should all other threads
2066 it might share a signal queue with. */
2067
2068 static int
2069 flush_callback (struct lwp_info *lp, void *data)
2070 {
2071 sigset_t *flush_mask = data;
2072 sigset_t pending, intersection, blocked, ignored;
2073 int pid, status;
2074
2075 /* Normally, when an LWP exits, it is removed from the LWP list. The
2076 last LWP isn't removed till later, however. So if there is only
2077 one LWP on the list, make sure it's alive. */
2078 if (lwp_list == lp && lp->next == NULL)
2079 if (!linux_nat_thread_alive (lp->ptid))
2080 return 0;
2081
2082 /* Just because the LWP is stopped doesn't mean that new signals
2083 can't arrive from outside, so this function must be careful of
2084 race conditions. However, because all threads are stopped, we
2085 can assume that the pending mask will not shrink unless we resume
2086 the LWP, and that it will then get another signal. We can't
2087 control which one, however. */
2088
2089 if (lp->status)
2090 {
2091 if (debug_linux_nat)
2092 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
2093 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
2094 lp->status = 0;
2095 }
2096
2097 /* While there is a pending signal we would like to flush, continue
2098 the inferior and collect another signal. But if there's already
2099 a saved status that we don't want to flush, we can't resume the
2100 inferior - if it stopped for some other reason we wouldn't have
2101 anywhere to save the new status. In that case, we must leave the
2102 signal unflushed (and possibly generate an extra SIGINT stop).
2103 That's much less bad than losing a signal. */
2104 while (lp->status == 0
2105 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
2106 {
2107 int ret;
2108
2109 errno = 0;
2110 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2111 if (debug_linux_nat)
2112 fprintf_unfiltered (gdb_stderr,
2113 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
2114
2115 lp->stopped = 0;
2116 stop_wait_callback (lp, flush_mask);
2117 if (debug_linux_nat)
2118 fprintf_unfiltered (gdb_stderr,
2119 "FC: Wait finished; saved status is %d\n",
2120 lp->status);
2121 }
2122
2123 return 0;
2124 }
2125
2126 /* Return non-zero if LP has a wait status pending. */
2127
2128 static int
2129 status_callback (struct lwp_info *lp, void *data)
2130 {
2131 /* Only report a pending wait status if we pretend that this has
2132 indeed been resumed. */
2133 return (lp->status != 0 && lp->resumed);
2134 }
2135
2136 /* Return non-zero if LP isn't stopped. */
2137
2138 static int
2139 running_callback (struct lwp_info *lp, void *data)
2140 {
2141 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2142 }
2143
2144 /* Count the LWP's that have had events. */
2145
2146 static int
2147 count_events_callback (struct lwp_info *lp, void *data)
2148 {
2149 int *count = data;
2150
2151 gdb_assert (count != NULL);
2152
2153 /* Count only LWPs that have a SIGTRAP event pending. */
2154 if (lp->status != 0
2155 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2156 (*count)++;
2157
2158 return 0;
2159 }
2160
2161 /* Select the LWP (if any) that is currently being single-stepped. */
2162
2163 static int
2164 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2165 {
2166 if (lp->step && lp->status != 0)
2167 return 1;
2168 else
2169 return 0;
2170 }
2171
2172 /* Select the Nth LWP that has had a SIGTRAP event. */
2173
2174 static int
2175 select_event_lwp_callback (struct lwp_info *lp, void *data)
2176 {
2177 int *selector = data;
2178
2179 gdb_assert (selector != NULL);
2180
2181 /* Select only LWPs that have a SIGTRAP event pending. */
2182 if (lp->status != 0
2183 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2184 if ((*selector)-- == 0)
2185 return 1;
2186
2187 return 0;
2188 }
2189
2190 static int
2191 cancel_breakpoint (struct lwp_info *lp)
2192 {
2193 /* Arrange for a breakpoint to be hit again later. We don't keep
2194 the SIGTRAP status and don't forward the SIGTRAP signal to the
2195 LWP. We will handle the current event, eventually we will resume
2196 this LWP, and this breakpoint will trap again.
2197
2198 If we do not do this, then we run the risk that the user will
2199 delete or disable the breakpoint, but the LWP will have already
2200 tripped on it. */
2201
2202 struct regcache *regcache = get_thread_regcache (lp->ptid);
2203 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2204 CORE_ADDR pc;
2205
2206 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2207 if (breakpoint_inserted_here_p (pc))
2208 {
2209 if (debug_linux_nat)
2210 fprintf_unfiltered (gdb_stdlog,
2211 "CB: Push back breakpoint for %s\n",
2212 target_pid_to_str (lp->ptid));
2213
2214 /* Back up the PC if necessary. */
2215 if (gdbarch_decr_pc_after_break (gdbarch))
2216 regcache_write_pc (regcache, pc);
2217
2218 return 1;
2219 }
2220 return 0;
2221 }
2222
2223 static int
2224 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2225 {
2226 struct lwp_info *event_lp = data;
2227
2228 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2229 if (lp == event_lp)
2230 return 0;
2231
2232 /* If a LWP other than the LWP that we're reporting an event for has
2233 hit a GDB breakpoint (as opposed to some random trap signal),
2234 then just arrange for it to hit it again later. We don't keep
2235 the SIGTRAP status and don't forward the SIGTRAP signal to the
2236 LWP. We will handle the current event, eventually we will resume
2237 all LWPs, and this one will get its breakpoint trap again.
2238
2239 If we do not do this, then we run the risk that the user will
2240 delete or disable the breakpoint, but the LWP will have already
2241 tripped on it. */
2242
2243 if (lp->status != 0
2244 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
2245 && cancel_breakpoint (lp))
2246 /* Throw away the SIGTRAP. */
2247 lp->status = 0;
2248
2249 return 0;
2250 }
2251
2252 /* Select one LWP out of those that have events pending. */
2253
2254 static void
2255 select_event_lwp (struct lwp_info **orig_lp, int *status)
2256 {
2257 int num_events = 0;
2258 int random_selector;
2259 struct lwp_info *event_lp;
2260
2261 /* Record the wait status for the original LWP. */
2262 (*orig_lp)->status = *status;
2263
2264 /* Give preference to any LWP that is being single-stepped. */
2265 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2266 if (event_lp != NULL)
2267 {
2268 if (debug_linux_nat)
2269 fprintf_unfiltered (gdb_stdlog,
2270 "SEL: Select single-step %s\n",
2271 target_pid_to_str (event_lp->ptid));
2272 }
2273 else
2274 {
2275 /* No single-stepping LWP. Select one at random, out of those
2276 which have had SIGTRAP events. */
2277
2278 /* First see how many SIGTRAP events we have. */
2279 iterate_over_lwps (count_events_callback, &num_events);
2280
2281 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2282 random_selector = (int)
2283 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2284
2285 if (debug_linux_nat && num_events > 1)
2286 fprintf_unfiltered (gdb_stdlog,
2287 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2288 num_events, random_selector);
2289
2290 event_lp = iterate_over_lwps (select_event_lwp_callback,
2291 &random_selector);
2292 }
2293
2294 if (event_lp != NULL)
2295 {
2296 /* Switch the event LWP. */
2297 *orig_lp = event_lp;
2298 *status = event_lp->status;
2299 }
2300
2301 /* Flush the wait status for the event LWP. */
2302 (*orig_lp)->status = 0;
2303 }
2304
2305 /* Return non-zero if LP has been resumed. */
2306
2307 static int
2308 resumed_callback (struct lwp_info *lp, void *data)
2309 {
2310 return lp->resumed;
2311 }
2312
2313 /* Stop an active thread, verify it still exists, then resume it. */
2314
2315 static int
2316 stop_and_resume_callback (struct lwp_info *lp, void *data)
2317 {
2318 struct lwp_info *ptr;
2319
2320 if (!lp->stopped && !lp->signalled)
2321 {
2322 stop_callback (lp, NULL);
2323 stop_wait_callback (lp, NULL);
2324 /* Resume if the lwp still exists. */
2325 for (ptr = lwp_list; ptr; ptr = ptr->next)
2326 if (lp == ptr)
2327 {
2328 resume_callback (lp, NULL);
2329 resume_set_callback (lp, NULL);
2330 }
2331 }
2332 return 0;
2333 }
2334
2335 /* Check if we should go on and pass this event to common code.
2336 Return the affected lwp if we are, or NULL otherwise. */
2337 static struct lwp_info *
2338 linux_nat_filter_event (int lwpid, int status, int options)
2339 {
2340 struct lwp_info *lp;
2341
2342 lp = find_lwp_pid (pid_to_ptid (lwpid));
2343
2344 /* Check for stop events reported by a process we didn't already
2345 know about - anything not already in our LWP list.
2346
2347 If we're expecting to receive stopped processes after
2348 fork, vfork, and clone events, then we'll just add the
2349 new one to our list and go back to waiting for the event
2350 to be reported - the stopped process might be returned
2351 from waitpid before or after the event is. */
2352 if (WIFSTOPPED (status) && !lp)
2353 {
2354 linux_record_stopped_pid (lwpid, status);
2355 return NULL;
2356 }
2357
2358 /* Make sure we don't report an event for the exit of an LWP not in
2359 our list, i.e. not part of the current process. This can happen
2360 if we detach from a program we original forked and then it
2361 exits. */
2362 if (!WIFSTOPPED (status) && !lp)
2363 return NULL;
2364
2365 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2366 CLONE_PTRACE processes which do not use the thread library -
2367 otherwise we wouldn't find the new LWP this way. That doesn't
2368 currently work, and the following code is currently unreachable
2369 due to the two blocks above. If it's fixed some day, this code
2370 should be broken out into a function so that we can also pick up
2371 LWPs from the new interface. */
2372 if (!lp)
2373 {
2374 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2375 if (options & __WCLONE)
2376 lp->cloned = 1;
2377
2378 gdb_assert (WIFSTOPPED (status)
2379 && WSTOPSIG (status) == SIGSTOP);
2380 lp->signalled = 1;
2381
2382 if (!in_thread_list (inferior_ptid))
2383 {
2384 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2385 GET_PID (inferior_ptid));
2386 add_thread (inferior_ptid);
2387 }
2388
2389 add_thread (lp->ptid);
2390 }
2391
2392 /* Save the trap's siginfo in case we need it later. */
2393 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2394 save_siginfo (lp);
2395
2396 /* Handle GNU/Linux's extended waitstatus for trace events. */
2397 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2398 {
2399 if (debug_linux_nat)
2400 fprintf_unfiltered (gdb_stdlog,
2401 "LLW: Handling extended status 0x%06x\n",
2402 status);
2403 if (linux_handle_extended_wait (lp, status, 0))
2404 return NULL;
2405 }
2406
2407 /* Check if the thread has exited. */
2408 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2409 {
2410 /* If this is the main thread, we must stop all threads and
2411 verify if they are still alive. This is because in the nptl
2412 thread model, there is no signal issued for exiting LWPs
2413 other than the main thread. We only get the main thread exit
2414 signal once all child threads have already exited. If we
2415 stop all the threads and use the stop_wait_callback to check
2416 if they have exited we can determine whether this signal
2417 should be ignored or whether it means the end of the debugged
2418 application, regardless of which threading model is being
2419 used. */
2420 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2421 {
2422 lp->stopped = 1;
2423 iterate_over_lwps (stop_and_resume_callback, NULL);
2424 }
2425
2426 if (debug_linux_nat)
2427 fprintf_unfiltered (gdb_stdlog,
2428 "LLW: %s exited.\n",
2429 target_pid_to_str (lp->ptid));
2430
2431 exit_lwp (lp);
2432
2433 /* If there is at least one more LWP, then the exit signal was
2434 not the end of the debugged application and should be
2435 ignored. */
2436 if (num_lwps > 0)
2437 {
2438 /* Make sure there is at least one thread running. */
2439 gdb_assert (iterate_over_lwps (running_callback, NULL));
2440
2441 /* Discard the event. */
2442 return NULL;
2443 }
2444 }
2445
2446 /* Check if the current LWP has previously exited. In the nptl
2447 thread model, LWPs other than the main thread do not issue
2448 signals when they exit so we must check whenever the thread has
2449 stopped. A similar check is made in stop_wait_callback(). */
2450 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2451 {
2452 if (debug_linux_nat)
2453 fprintf_unfiltered (gdb_stdlog,
2454 "LLW: %s exited.\n",
2455 target_pid_to_str (lp->ptid));
2456
2457 exit_lwp (lp);
2458
2459 /* Make sure there is at least one thread running. */
2460 gdb_assert (iterate_over_lwps (running_callback, NULL));
2461
2462 /* Discard the event. */
2463 return NULL;
2464 }
2465
2466 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2467 an attempt to stop an LWP. */
2468 if (lp->signalled
2469 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2470 {
2471 if (debug_linux_nat)
2472 fprintf_unfiltered (gdb_stdlog,
2473 "LLW: Delayed SIGSTOP caught for %s.\n",
2474 target_pid_to_str (lp->ptid));
2475
2476 /* This is a delayed SIGSTOP. */
2477 lp->signalled = 0;
2478
2479 registers_changed ();
2480
2481 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2482 lp->step, TARGET_SIGNAL_0);
2483 if (debug_linux_nat)
2484 fprintf_unfiltered (gdb_stdlog,
2485 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2486 lp->step ?
2487 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2488 target_pid_to_str (lp->ptid));
2489
2490 lp->stopped = 0;
2491 gdb_assert (lp->resumed);
2492
2493 /* Discard the event. */
2494 return NULL;
2495 }
2496
2497 /* An interesting event. */
2498 gdb_assert (lp);
2499 return lp;
2500 }
2501
2502 /* Get the events stored in the pipe into the local queue, so they are
2503 accessible to queued_waitpid. We need to do this, since it is not
2504 always the case that the event at the head of the pipe is the event
2505 we want. */
2506
2507 static void
2508 pipe_to_local_event_queue (void)
2509 {
2510 if (debug_linux_nat_async)
2511 fprintf_unfiltered (gdb_stdlog,
2512 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2513 linux_nat_num_queued_events);
2514 while (linux_nat_num_queued_events)
2515 {
2516 int lwpid, status, options;
2517 lwpid = linux_nat_event_pipe_pop (&status, &options);
2518 gdb_assert (lwpid > 0);
2519 push_waitpid (lwpid, status, options);
2520 }
2521 }
2522
2523 /* Get the unprocessed events stored in the local queue back into the
2524 pipe, so the event loop realizes there's something else to
2525 process. */
2526
2527 static void
2528 local_event_queue_to_pipe (void)
2529 {
2530 struct waitpid_result *w = waitpid_queue;
2531 while (w)
2532 {
2533 struct waitpid_result *next = w->next;
2534 linux_nat_event_pipe_push (w->pid,
2535 w->status,
2536 w->options);
2537 xfree (w);
2538 w = next;
2539 }
2540 waitpid_queue = NULL;
2541
2542 if (debug_linux_nat_async)
2543 fprintf_unfiltered (gdb_stdlog,
2544 "LEQTP: linux_nat_num_queued_events(%d)\n",
2545 linux_nat_num_queued_events);
2546 }
2547
2548 static ptid_t
2549 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2550 {
2551 struct lwp_info *lp = NULL;
2552 int options = 0;
2553 int status = 0;
2554 pid_t pid = PIDGET (ptid);
2555 sigset_t flush_mask;
2556
2557 if (debug_linux_nat_async)
2558 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2559
2560 /* The first time we get here after starting a new inferior, we may
2561 not have added it to the LWP list yet - this is the earliest
2562 moment at which we know its PID. */
2563 if (num_lwps == 0)
2564 {
2565 gdb_assert (!is_lwp (inferior_ptid));
2566
2567 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2568 GET_PID (inferior_ptid));
2569 lp = add_lwp (inferior_ptid);
2570 lp->resumed = 1;
2571 /* Add the main thread to GDB's thread list. */
2572 add_thread_silent (lp->ptid);
2573 }
2574
2575 sigemptyset (&flush_mask);
2576
2577 if (target_can_async_p ())
2578 /* Block events while we're here. */
2579 target_async (NULL, 0);
2580
2581 retry:
2582
2583 /* Make sure there is at least one LWP that has been resumed. */
2584 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
2585
2586 /* First check if there is a LWP with a wait status pending. */
2587 if (pid == -1)
2588 {
2589 /* Any LWP that's been resumed will do. */
2590 lp = iterate_over_lwps (status_callback, NULL);
2591 if (lp)
2592 {
2593 if (target_can_async_p ())
2594 internal_error (__FILE__, __LINE__,
2595 "Found an LWP with a pending status in async mode.");
2596
2597 status = lp->status;
2598 lp->status = 0;
2599
2600 if (debug_linux_nat && status)
2601 fprintf_unfiltered (gdb_stdlog,
2602 "LLW: Using pending wait status %s for %s.\n",
2603 status_to_str (status),
2604 target_pid_to_str (lp->ptid));
2605 }
2606
2607 /* But if we don't find one, we'll have to wait, and check both
2608 cloned and uncloned processes. We start with the cloned
2609 processes. */
2610 options = __WCLONE | WNOHANG;
2611 }
2612 else if (is_lwp (ptid))
2613 {
2614 if (debug_linux_nat)
2615 fprintf_unfiltered (gdb_stdlog,
2616 "LLW: Waiting for specific LWP %s.\n",
2617 target_pid_to_str (ptid));
2618
2619 /* We have a specific LWP to check. */
2620 lp = find_lwp_pid (ptid);
2621 gdb_assert (lp);
2622 status = lp->status;
2623 lp->status = 0;
2624
2625 if (debug_linux_nat && status)
2626 fprintf_unfiltered (gdb_stdlog,
2627 "LLW: Using pending wait status %s for %s.\n",
2628 status_to_str (status),
2629 target_pid_to_str (lp->ptid));
2630
2631 /* If we have to wait, take into account whether PID is a cloned
2632 process or not. And we have to convert it to something that
2633 the layer beneath us can understand. */
2634 options = lp->cloned ? __WCLONE : 0;
2635 pid = GET_LWP (ptid);
2636 }
2637
2638 if (status && lp->signalled)
2639 {
2640 /* A pending SIGSTOP may interfere with the normal stream of
2641 events. In a typical case where interference is a problem,
2642 we have a SIGSTOP signal pending for LWP A while
2643 single-stepping it, encounter an event in LWP B, and take the
2644 pending SIGSTOP while trying to stop LWP A. After processing
2645 the event in LWP B, LWP A is continued, and we'll never see
2646 the SIGTRAP associated with the last time we were
2647 single-stepping LWP A. */
2648
2649 /* Resume the thread. It should halt immediately returning the
2650 pending SIGSTOP. */
2651 registers_changed ();
2652 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2653 lp->step, TARGET_SIGNAL_0);
2654 if (debug_linux_nat)
2655 fprintf_unfiltered (gdb_stdlog,
2656 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2657 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2658 target_pid_to_str (lp->ptid));
2659 lp->stopped = 0;
2660 gdb_assert (lp->resumed);
2661
2662 /* This should catch the pending SIGSTOP. */
2663 stop_wait_callback (lp, NULL);
2664 }
2665
2666 if (!target_can_async_p ())
2667 {
2668 /* Causes SIGINT to be passed on to the attached process. */
2669 set_sigint_trap ();
2670 set_sigio_trap ();
2671 }
2672
2673 while (status == 0)
2674 {
2675 pid_t lwpid;
2676
2677 if (target_can_async_p ())
2678 /* In async mode, don't ever block. Only look at the locally
2679 queued events. */
2680 lwpid = queued_waitpid (pid, &status, options);
2681 else
2682 lwpid = my_waitpid (pid, &status, options);
2683
2684 if (lwpid > 0)
2685 {
2686 gdb_assert (pid == -1 || lwpid == pid);
2687
2688 if (debug_linux_nat)
2689 {
2690 fprintf_unfiltered (gdb_stdlog,
2691 "LLW: waitpid %ld received %s\n",
2692 (long) lwpid, status_to_str (status));
2693 }
2694
2695 lp = linux_nat_filter_event (lwpid, status, options);
2696 if (!lp)
2697 {
2698 /* A discarded event. */
2699 status = 0;
2700 continue;
2701 }
2702
2703 break;
2704 }
2705
2706 if (pid == -1)
2707 {
2708 /* Alternate between checking cloned and uncloned processes. */
2709 options ^= __WCLONE;
2710
2711 /* And every time we have checked both:
2712 In async mode, return to event loop;
2713 In sync mode, suspend waiting for a SIGCHLD signal. */
2714 if (options & __WCLONE)
2715 {
2716 if (target_can_async_p ())
2717 {
2718 /* No interesting event. */
2719 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2720
2721 /* Get ready for the next event. */
2722 target_async (inferior_event_handler, 0);
2723
2724 if (debug_linux_nat_async)
2725 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2726
2727 return minus_one_ptid;
2728 }
2729
2730 sigsuspend (&suspend_mask);
2731 }
2732 }
2733
2734 /* We shouldn't end up here unless we want to try again. */
2735 gdb_assert (status == 0);
2736 }
2737
2738 if (!target_can_async_p ())
2739 {
2740 clear_sigio_trap ();
2741 clear_sigint_trap ();
2742 }
2743
2744 gdb_assert (lp);
2745
2746 /* Don't report signals that GDB isn't interested in, such as
2747 signals that are neither printed nor stopped upon. Stopping all
2748 threads can be a bit time-consuming so if we want decent
2749 performance with heavily multi-threaded programs, especially when
2750 they're using a high frequency timer, we'd better avoid it if we
2751 can. */
2752
2753 if (WIFSTOPPED (status))
2754 {
2755 int signo = target_signal_from_host (WSTOPSIG (status));
2756
2757 /* If we get a signal while single-stepping, we may need special
2758 care, e.g. to skip the signal handler. Defer to common code. */
2759 if (!lp->step
2760 && signal_stop_state (signo) == 0
2761 && signal_print_state (signo) == 0
2762 && signal_pass_state (signo) == 1)
2763 {
2764 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2765 here? It is not clear we should. GDB may not expect
2766 other threads to run. On the other hand, not resuming
2767 newly attached threads may cause an unwanted delay in
2768 getting them running. */
2769 registers_changed ();
2770 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2771 lp->step, signo);
2772 if (debug_linux_nat)
2773 fprintf_unfiltered (gdb_stdlog,
2774 "LLW: %s %s, %s (preempt 'handle')\n",
2775 lp->step ?
2776 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2777 target_pid_to_str (lp->ptid),
2778 signo ? strsignal (signo) : "0");
2779 lp->stopped = 0;
2780 status = 0;
2781 goto retry;
2782 }
2783
2784 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2785 {
2786 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2787 forwarded to the entire process group, that is, all LWP's
2788 will receive it. Since we only want to report it once,
2789 we try to flush it from all LWPs except this one. */
2790 sigaddset (&flush_mask, SIGINT);
2791 }
2792 }
2793
2794 /* This LWP is stopped now. */
2795 lp->stopped = 1;
2796
2797 if (debug_linux_nat)
2798 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2799 status_to_str (status), target_pid_to_str (lp->ptid));
2800
2801 /* Now stop all other LWP's ... */
2802 iterate_over_lwps (stop_callback, NULL);
2803
2804 /* ... and wait until all of them have reported back that they're no
2805 longer running. */
2806 iterate_over_lwps (stop_wait_callback, &flush_mask);
2807 iterate_over_lwps (flush_callback, &flush_mask);
2808
2809 /* If we're not waiting for a specific LWP, choose an event LWP from
2810 among those that have had events. Giving equal priority to all
2811 LWPs that have had events helps prevent starvation. */
2812 if (pid == -1)
2813 select_event_lwp (&lp, &status);
2814
2815 /* Now that we've selected our final event LWP, cancel any
2816 breakpoints in other LWPs that have hit a GDB breakpoint. See
2817 the comment in cancel_breakpoints_callback to find out why. */
2818 iterate_over_lwps (cancel_breakpoints_callback, lp);
2819
2820 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2821 {
2822 if (debug_linux_nat)
2823 fprintf_unfiltered (gdb_stdlog,
2824 "LLW: trap ptid is %s.\n",
2825 target_pid_to_str (lp->ptid));
2826 }
2827
2828 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2829 {
2830 *ourstatus = lp->waitstatus;
2831 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2832 }
2833 else
2834 store_waitstatus (ourstatus, status);
2835
2836 /* Get ready for the next event. */
2837 if (target_can_async_p ())
2838 target_async (inferior_event_handler, 0);
2839
2840 if (debug_linux_nat_async)
2841 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
2842
2843 return lp->ptid;
2844 }
2845
2846 static int
2847 kill_callback (struct lwp_info *lp, void *data)
2848 {
2849 errno = 0;
2850 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2851 if (debug_linux_nat)
2852 fprintf_unfiltered (gdb_stdlog,
2853 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2854 target_pid_to_str (lp->ptid),
2855 errno ? safe_strerror (errno) : "OK");
2856
2857 return 0;
2858 }
2859
2860 static int
2861 kill_wait_callback (struct lwp_info *lp, void *data)
2862 {
2863 pid_t pid;
2864
2865 /* We must make sure that there are no pending events (delayed
2866 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2867 program doesn't interfere with any following debugging session. */
2868
2869 /* For cloned processes we must check both with __WCLONE and
2870 without, since the exit status of a cloned process isn't reported
2871 with __WCLONE. */
2872 if (lp->cloned)
2873 {
2874 do
2875 {
2876 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2877 if (pid != (pid_t) -1)
2878 {
2879 if (debug_linux_nat)
2880 fprintf_unfiltered (gdb_stdlog,
2881 "KWC: wait %s received unknown.\n",
2882 target_pid_to_str (lp->ptid));
2883 /* The Linux kernel sometimes fails to kill a thread
2884 completely after PTRACE_KILL; that goes from the stop
2885 point in do_fork out to the one in
2886 get_signal_to_deliever and waits again. So kill it
2887 again. */
2888 kill_callback (lp, NULL);
2889 }
2890 }
2891 while (pid == GET_LWP (lp->ptid));
2892
2893 gdb_assert (pid == -1 && errno == ECHILD);
2894 }
2895
2896 do
2897 {
2898 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2899 if (pid != (pid_t) -1)
2900 {
2901 if (debug_linux_nat)
2902 fprintf_unfiltered (gdb_stdlog,
2903 "KWC: wait %s received unk.\n",
2904 target_pid_to_str (lp->ptid));
2905 /* See the call to kill_callback above. */
2906 kill_callback (lp, NULL);
2907 }
2908 }
2909 while (pid == GET_LWP (lp->ptid));
2910
2911 gdb_assert (pid == -1 && errno == ECHILD);
2912 return 0;
2913 }
2914
2915 static void
2916 linux_nat_kill (void)
2917 {
2918 struct target_waitstatus last;
2919 ptid_t last_ptid;
2920 int status;
2921
2922 if (target_can_async_p ())
2923 target_async (NULL, 0);
2924
2925 /* If we're stopped while forking and we haven't followed yet,
2926 kill the other task. We need to do this first because the
2927 parent will be sleeping if this is a vfork. */
2928
2929 get_last_target_status (&last_ptid, &last);
2930
2931 if (last.kind == TARGET_WAITKIND_FORKED
2932 || last.kind == TARGET_WAITKIND_VFORKED)
2933 {
2934 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2935 wait (&status);
2936 }
2937
2938 if (forks_exist_p ())
2939 {
2940 linux_fork_killall ();
2941 drain_queued_events (-1);
2942 }
2943 else
2944 {
2945 /* Kill all LWP's ... */
2946 iterate_over_lwps (kill_callback, NULL);
2947
2948 /* ... and wait until we've flushed all events. */
2949 iterate_over_lwps (kill_wait_callback, NULL);
2950 }
2951
2952 target_mourn_inferior ();
2953 }
2954
2955 static void
2956 linux_nat_mourn_inferior (void)
2957 {
2958 /* Destroy LWP info; it's no longer valid. */
2959 init_lwp_list ();
2960
2961 if (! forks_exist_p ())
2962 {
2963 /* Normal case, no other forks available. */
2964 if (target_can_async_p ())
2965 linux_nat_async (NULL, 0);
2966 linux_ops->to_mourn_inferior ();
2967 }
2968 else
2969 /* Multi-fork case. The current inferior_ptid has exited, but
2970 there are other viable forks to debug. Delete the exiting
2971 one and context-switch to the first available. */
2972 linux_fork_mourn_inferior ();
2973 }
2974
2975 static LONGEST
2976 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2977 const char *annex, gdb_byte *readbuf,
2978 const gdb_byte *writebuf,
2979 ULONGEST offset, LONGEST len)
2980 {
2981 struct cleanup *old_chain = save_inferior_ptid ();
2982 LONGEST xfer;
2983
2984 if (is_lwp (inferior_ptid))
2985 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2986
2987 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2988 offset, len);
2989
2990 do_cleanups (old_chain);
2991 return xfer;
2992 }
2993
2994 static int
2995 linux_nat_thread_alive (ptid_t ptid)
2996 {
2997 gdb_assert (is_lwp (ptid));
2998
2999 errno = 0;
3000 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
3001 if (debug_linux_nat)
3002 fprintf_unfiltered (gdb_stdlog,
3003 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
3004 target_pid_to_str (ptid),
3005 errno ? safe_strerror (errno) : "OK");
3006
3007 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
3008 handle that case gracefully since ptrace will first do a lookup
3009 for the process based upon the passed-in pid. If that fails we
3010 will get either -ESRCH or -EPERM, otherwise the child exists and
3011 is alive. */
3012 if (errno == ESRCH || errno == EPERM)
3013 return 0;
3014
3015 return 1;
3016 }
3017
3018 static char *
3019 linux_nat_pid_to_str (ptid_t ptid)
3020 {
3021 static char buf[64];
3022
3023 if (is_lwp (ptid)
3024 && ((lwp_list && lwp_list->next)
3025 || GET_PID (ptid) != GET_LWP (ptid)))
3026 {
3027 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3028 return buf;
3029 }
3030
3031 return normal_pid_to_str (ptid);
3032 }
3033
3034 static void
3035 sigchld_handler (int signo)
3036 {
3037 if (linux_nat_async_enabled
3038 && linux_nat_async_events_enabled
3039 && signo == SIGCHLD)
3040 /* It is *always* a bug to hit this. */
3041 internal_error (__FILE__, __LINE__,
3042 "sigchld_handler called when async events are enabled");
3043
3044 /* Do nothing. The only reason for this handler is that it allows
3045 us to use sigsuspend in linux_nat_wait above to wait for the
3046 arrival of a SIGCHLD. */
3047 }
3048
3049 /* Accepts an integer PID; Returns a string representing a file that
3050 can be opened to get the symbols for the child process. */
3051
3052 static char *
3053 linux_child_pid_to_exec_file (int pid)
3054 {
3055 char *name1, *name2;
3056
3057 name1 = xmalloc (MAXPATHLEN);
3058 name2 = xmalloc (MAXPATHLEN);
3059 make_cleanup (xfree, name1);
3060 make_cleanup (xfree, name2);
3061 memset (name2, 0, MAXPATHLEN);
3062
3063 sprintf (name1, "/proc/%d/exe", pid);
3064 if (readlink (name1, name2, MAXPATHLEN) > 0)
3065 return name2;
3066 else
3067 return name1;
3068 }
3069
3070 /* Service function for corefiles and info proc. */
3071
3072 static int
3073 read_mapping (FILE *mapfile,
3074 long long *addr,
3075 long long *endaddr,
3076 char *permissions,
3077 long long *offset,
3078 char *device, long long *inode, char *filename)
3079 {
3080 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3081 addr, endaddr, permissions, offset, device, inode);
3082
3083 filename[0] = '\0';
3084 if (ret > 0 && ret != EOF)
3085 {
3086 /* Eat everything up to EOL for the filename. This will prevent
3087 weird filenames (such as one with embedded whitespace) from
3088 confusing this code. It also makes this code more robust in
3089 respect to annotations the kernel may add after the filename.
3090
3091 Note the filename is used for informational purposes
3092 only. */
3093 ret += fscanf (mapfile, "%[^\n]\n", filename);
3094 }
3095
3096 return (ret != 0 && ret != EOF);
3097 }
3098
3099 /* Fills the "to_find_memory_regions" target vector. Lists the memory
3100 regions in the inferior for a corefile. */
3101
3102 static int
3103 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3104 unsigned long,
3105 int, int, int, void *), void *obfd)
3106 {
3107 long long pid = PIDGET (inferior_ptid);
3108 char mapsfilename[MAXPATHLEN];
3109 FILE *mapsfile;
3110 long long addr, endaddr, size, offset, inode;
3111 char permissions[8], device[8], filename[MAXPATHLEN];
3112 int read, write, exec;
3113 int ret;
3114
3115 /* Compose the filename for the /proc memory map, and open it. */
3116 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3117 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
3118 error (_("Could not open %s."), mapsfilename);
3119
3120 if (info_verbose)
3121 fprintf_filtered (gdb_stdout,
3122 "Reading memory regions from %s\n", mapsfilename);
3123
3124 /* Now iterate until end-of-file. */
3125 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3126 &offset, &device[0], &inode, &filename[0]))
3127 {
3128 size = endaddr - addr;
3129
3130 /* Get the segment's permissions. */
3131 read = (strchr (permissions, 'r') != 0);
3132 write = (strchr (permissions, 'w') != 0);
3133 exec = (strchr (permissions, 'x') != 0);
3134
3135 if (info_verbose)
3136 {
3137 fprintf_filtered (gdb_stdout,
3138 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3139 size, paddr_nz (addr),
3140 read ? 'r' : ' ',
3141 write ? 'w' : ' ', exec ? 'x' : ' ');
3142 if (filename[0])
3143 fprintf_filtered (gdb_stdout, " for %s", filename);
3144 fprintf_filtered (gdb_stdout, "\n");
3145 }
3146
3147 /* Invoke the callback function to create the corefile
3148 segment. */
3149 func (addr, size, read, write, exec, obfd);
3150 }
3151 fclose (mapsfile);
3152 return 0;
3153 }
3154
3155 /* Records the thread's register state for the corefile note
3156 section. */
3157
3158 static char *
3159 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
3160 char *note_data, int *note_size)
3161 {
3162 gdb_gregset_t gregs;
3163 gdb_fpregset_t fpregs;
3164 unsigned long lwp = ptid_get_lwp (ptid);
3165 struct regcache *regcache = get_thread_regcache (ptid);
3166 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3167 const struct regset *regset;
3168 int core_regset_p;
3169 struct cleanup *old_chain;
3170 struct core_regset_section *sect_list;
3171 char *gdb_regset;
3172
3173 old_chain = save_inferior_ptid ();
3174 inferior_ptid = ptid;
3175 target_fetch_registers (regcache, -1);
3176 do_cleanups (old_chain);
3177
3178 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
3179 sect_list = gdbarch_core_regset_sections (gdbarch);
3180
3181 if (core_regset_p
3182 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3183 sizeof (gregs))) != NULL
3184 && regset->collect_regset != NULL)
3185 regset->collect_regset (regset, regcache, -1,
3186 &gregs, sizeof (gregs));
3187 else
3188 fill_gregset (regcache, &gregs, -1);
3189
3190 note_data = (char *) elfcore_write_prstatus (obfd,
3191 note_data,
3192 note_size,
3193 lwp,
3194 stop_signal, &gregs);
3195
3196 /* The loop below uses the new struct core_regset_section, which stores
3197 the supported section names and sizes for the core file. Note that
3198 note PRSTATUS needs to be treated specially. But the other notes are
3199 structurally the same, so they can benefit from the new struct. */
3200 if (core_regset_p && sect_list != NULL)
3201 while (sect_list->sect_name != NULL)
3202 {
3203 /* .reg was already handled above. */
3204 if (strcmp (sect_list->sect_name, ".reg") == 0)
3205 {
3206 sect_list++;
3207 continue;
3208 }
3209 regset = gdbarch_regset_from_core_section (gdbarch,
3210 sect_list->sect_name,
3211 sect_list->size);
3212 gdb_assert (regset && regset->collect_regset);
3213 gdb_regset = xmalloc (sect_list->size);
3214 regset->collect_regset (regset, regcache, -1,
3215 gdb_regset, sect_list->size);
3216 note_data = (char *) elfcore_write_register_note (obfd,
3217 note_data,
3218 note_size,
3219 sect_list->sect_name,
3220 gdb_regset,
3221 sect_list->size);
3222 xfree (gdb_regset);
3223 sect_list++;
3224 }
3225
3226 /* For architectures that does not have the struct core_regset_section
3227 implemented, we use the old method. When all the architectures have
3228 the new support, the code below should be deleted. */
3229 else
3230 {
3231 if (core_regset_p
3232 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3233 sizeof (fpregs))) != NULL
3234 && regset->collect_regset != NULL)
3235 regset->collect_regset (regset, regcache, -1,
3236 &fpregs, sizeof (fpregs));
3237 else
3238 fill_fpregset (regcache, &fpregs, -1);
3239
3240 note_data = (char *) elfcore_write_prfpreg (obfd,
3241 note_data,
3242 note_size,
3243 &fpregs, sizeof (fpregs));
3244 }
3245
3246 return note_data;
3247 }
3248
3249 struct linux_nat_corefile_thread_data
3250 {
3251 bfd *obfd;
3252 char *note_data;
3253 int *note_size;
3254 int num_notes;
3255 };
3256
3257 /* Called by gdbthread.c once per thread. Records the thread's
3258 register state for the corefile note section. */
3259
3260 static int
3261 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3262 {
3263 struct linux_nat_corefile_thread_data *args = data;
3264
3265 args->note_data = linux_nat_do_thread_registers (args->obfd,
3266 ti->ptid,
3267 args->note_data,
3268 args->note_size);
3269 args->num_notes++;
3270
3271 return 0;
3272 }
3273
3274 /* Records the register state for the corefile note section. */
3275
3276 static char *
3277 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
3278 char *note_data, int *note_size)
3279 {
3280 return linux_nat_do_thread_registers (obfd,
3281 ptid_build (ptid_get_pid (inferior_ptid),
3282 ptid_get_pid (inferior_ptid),
3283 0),
3284 note_data, note_size);
3285 }
3286
3287 /* Fills the "to_make_corefile_note" target vector. Builds the note
3288 section for a corefile, and returns it in a malloc buffer. */
3289
3290 static char *
3291 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3292 {
3293 struct linux_nat_corefile_thread_data thread_args;
3294 struct cleanup *old_chain;
3295 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
3296 char fname[16] = { '\0' };
3297 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
3298 char psargs[80] = { '\0' };
3299 char *note_data = NULL;
3300 ptid_t current_ptid = inferior_ptid;
3301 gdb_byte *auxv;
3302 int auxv_len;
3303
3304 if (get_exec_file (0))
3305 {
3306 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3307 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3308 if (get_inferior_args ())
3309 {
3310 char *string_end;
3311 char *psargs_end = psargs + sizeof (psargs);
3312
3313 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3314 strings fine. */
3315 string_end = memchr (psargs, 0, sizeof (psargs));
3316 if (string_end != NULL)
3317 {
3318 *string_end++ = ' ';
3319 strncpy (string_end, get_inferior_args (),
3320 psargs_end - string_end);
3321 }
3322 }
3323 note_data = (char *) elfcore_write_prpsinfo (obfd,
3324 note_data,
3325 note_size, fname, psargs);
3326 }
3327
3328 /* Dump information for threads. */
3329 thread_args.obfd = obfd;
3330 thread_args.note_data = note_data;
3331 thread_args.note_size = note_size;
3332 thread_args.num_notes = 0;
3333 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3334 if (thread_args.num_notes == 0)
3335 {
3336 /* iterate_over_threads didn't come up with any threads; just
3337 use inferior_ptid. */
3338 note_data = linux_nat_do_registers (obfd, inferior_ptid,
3339 note_data, note_size);
3340 }
3341 else
3342 {
3343 note_data = thread_args.note_data;
3344 }
3345
3346 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3347 NULL, &auxv);
3348 if (auxv_len > 0)
3349 {
3350 note_data = elfcore_write_note (obfd, note_data, note_size,
3351 "CORE", NT_AUXV, auxv, auxv_len);
3352 xfree (auxv);
3353 }
3354
3355 make_cleanup (xfree, note_data);
3356 return note_data;
3357 }
3358
3359 /* Implement the "info proc" command. */
3360
3361 static void
3362 linux_nat_info_proc_cmd (char *args, int from_tty)
3363 {
3364 long long pid = PIDGET (inferior_ptid);
3365 FILE *procfile;
3366 char **argv = NULL;
3367 char buffer[MAXPATHLEN];
3368 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3369 int cmdline_f = 1;
3370 int cwd_f = 1;
3371 int exe_f = 1;
3372 int mappings_f = 0;
3373 int environ_f = 0;
3374 int status_f = 0;
3375 int stat_f = 0;
3376 int all = 0;
3377 struct stat dummy;
3378
3379 if (args)
3380 {
3381 /* Break up 'args' into an argv array. */
3382 if ((argv = buildargv (args)) == NULL)
3383 nomem (0);
3384 else
3385 make_cleanup_freeargv (argv);
3386 }
3387 while (argv != NULL && *argv != NULL)
3388 {
3389 if (isdigit (argv[0][0]))
3390 {
3391 pid = strtoul (argv[0], NULL, 10);
3392 }
3393 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3394 {
3395 mappings_f = 1;
3396 }
3397 else if (strcmp (argv[0], "status") == 0)
3398 {
3399 status_f = 1;
3400 }
3401 else if (strcmp (argv[0], "stat") == 0)
3402 {
3403 stat_f = 1;
3404 }
3405 else if (strcmp (argv[0], "cmd") == 0)
3406 {
3407 cmdline_f = 1;
3408 }
3409 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3410 {
3411 exe_f = 1;
3412 }
3413 else if (strcmp (argv[0], "cwd") == 0)
3414 {
3415 cwd_f = 1;
3416 }
3417 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3418 {
3419 all = 1;
3420 }
3421 else
3422 {
3423 /* [...] (future options here) */
3424 }
3425 argv++;
3426 }
3427 if (pid == 0)
3428 error (_("No current process: you must name one."));
3429
3430 sprintf (fname1, "/proc/%lld", pid);
3431 if (stat (fname1, &dummy) != 0)
3432 error (_("No /proc directory: '%s'"), fname1);
3433
3434 printf_filtered (_("process %lld\n"), pid);
3435 if (cmdline_f || all)
3436 {
3437 sprintf (fname1, "/proc/%lld/cmdline", pid);
3438 if ((procfile = fopen (fname1, "r")) != NULL)
3439 {
3440 fgets (buffer, sizeof (buffer), procfile);
3441 printf_filtered ("cmdline = '%s'\n", buffer);
3442 fclose (procfile);
3443 }
3444 else
3445 warning (_("unable to open /proc file '%s'"), fname1);
3446 }
3447 if (cwd_f || all)
3448 {
3449 sprintf (fname1, "/proc/%lld/cwd", pid);
3450 memset (fname2, 0, sizeof (fname2));
3451 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3452 printf_filtered ("cwd = '%s'\n", fname2);
3453 else
3454 warning (_("unable to read link '%s'"), fname1);
3455 }
3456 if (exe_f || all)
3457 {
3458 sprintf (fname1, "/proc/%lld/exe", pid);
3459 memset (fname2, 0, sizeof (fname2));
3460 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3461 printf_filtered ("exe = '%s'\n", fname2);
3462 else
3463 warning (_("unable to read link '%s'"), fname1);
3464 }
3465 if (mappings_f || all)
3466 {
3467 sprintf (fname1, "/proc/%lld/maps", pid);
3468 if ((procfile = fopen (fname1, "r")) != NULL)
3469 {
3470 long long addr, endaddr, size, offset, inode;
3471 char permissions[8], device[8], filename[MAXPATHLEN];
3472
3473 printf_filtered (_("Mapped address spaces:\n\n"));
3474 if (gdbarch_addr_bit (current_gdbarch) == 32)
3475 {
3476 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3477 "Start Addr",
3478 " End Addr",
3479 " Size", " Offset", "objfile");
3480 }
3481 else
3482 {
3483 printf_filtered (" %18s %18s %10s %10s %7s\n",
3484 "Start Addr",
3485 " End Addr",
3486 " Size", " Offset", "objfile");
3487 }
3488
3489 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3490 &offset, &device[0], &inode, &filename[0]))
3491 {
3492 size = endaddr - addr;
3493
3494 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3495 calls here (and possibly above) should be abstracted
3496 out into their own functions? Andrew suggests using
3497 a generic local_address_string instead to print out
3498 the addresses; that makes sense to me, too. */
3499
3500 if (gdbarch_addr_bit (current_gdbarch) == 32)
3501 {
3502 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3503 (unsigned long) addr, /* FIXME: pr_addr */
3504 (unsigned long) endaddr,
3505 (int) size,
3506 (unsigned int) offset,
3507 filename[0] ? filename : "");
3508 }
3509 else
3510 {
3511 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3512 (unsigned long) addr, /* FIXME: pr_addr */
3513 (unsigned long) endaddr,
3514 (int) size,
3515 (unsigned int) offset,
3516 filename[0] ? filename : "");
3517 }
3518 }
3519
3520 fclose (procfile);
3521 }
3522 else
3523 warning (_("unable to open /proc file '%s'"), fname1);
3524 }
3525 if (status_f || all)
3526 {
3527 sprintf (fname1, "/proc/%lld/status", pid);
3528 if ((procfile = fopen (fname1, "r")) != NULL)
3529 {
3530 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3531 puts_filtered (buffer);
3532 fclose (procfile);
3533 }
3534 else
3535 warning (_("unable to open /proc file '%s'"), fname1);
3536 }
3537 if (stat_f || all)
3538 {
3539 sprintf (fname1, "/proc/%lld/stat", pid);
3540 if ((procfile = fopen (fname1, "r")) != NULL)
3541 {
3542 int itmp;
3543 char ctmp;
3544 long ltmp;
3545
3546 if (fscanf (procfile, "%d ", &itmp) > 0)
3547 printf_filtered (_("Process: %d\n"), itmp);
3548 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
3549 printf_filtered (_("Exec file: %s\n"), buffer);
3550 if (fscanf (procfile, "%c ", &ctmp) > 0)
3551 printf_filtered (_("State: %c\n"), ctmp);
3552 if (fscanf (procfile, "%d ", &itmp) > 0)
3553 printf_filtered (_("Parent process: %d\n"), itmp);
3554 if (fscanf (procfile, "%d ", &itmp) > 0)
3555 printf_filtered (_("Process group: %d\n"), itmp);
3556 if (fscanf (procfile, "%d ", &itmp) > 0)
3557 printf_filtered (_("Session id: %d\n"), itmp);
3558 if (fscanf (procfile, "%d ", &itmp) > 0)
3559 printf_filtered (_("TTY: %d\n"), itmp);
3560 if (fscanf (procfile, "%d ", &itmp) > 0)
3561 printf_filtered (_("TTY owner process group: %d\n"), itmp);
3562 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3563 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3564 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3565 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3566 (unsigned long) ltmp);
3567 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3568 printf_filtered (_("Minor faults, children: %lu\n"),
3569 (unsigned long) ltmp);
3570 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3571 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3572 (unsigned long) ltmp);
3573 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3574 printf_filtered (_("Major faults, children: %lu\n"),
3575 (unsigned long) ltmp);
3576 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3577 printf_filtered (_("utime: %ld\n"), ltmp);
3578 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3579 printf_filtered (_("stime: %ld\n"), ltmp);
3580 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3581 printf_filtered (_("utime, children: %ld\n"), ltmp);
3582 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3583 printf_filtered (_("stime, children: %ld\n"), ltmp);
3584 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3585 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3586 ltmp);
3587 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3588 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3589 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3590 printf_filtered (_("jiffies until next timeout: %lu\n"),
3591 (unsigned long) ltmp);
3592 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3593 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3594 (unsigned long) ltmp);
3595 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3596 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3597 ltmp);
3598 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3599 printf_filtered (_("Virtual memory size: %lu\n"),
3600 (unsigned long) ltmp);
3601 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3602 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3603 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3604 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3605 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3606 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3607 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3608 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3609 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3610 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3611 #if 0 /* Don't know how architecture-dependent the rest is...
3612 Anyway the signal bitmap info is available from "status". */
3613 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3614 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3615 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3616 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3617 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3618 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3619 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3620 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3621 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3622 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3623 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3624 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3625 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3626 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
3627 #endif
3628 fclose (procfile);
3629 }
3630 else
3631 warning (_("unable to open /proc file '%s'"), fname1);
3632 }
3633 }
3634
3635 /* Implement the to_xfer_partial interface for memory reads using the /proc
3636 filesystem. Because we can use a single read() call for /proc, this
3637 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3638 but it doesn't support writes. */
3639
3640 static LONGEST
3641 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3642 const char *annex, gdb_byte *readbuf,
3643 const gdb_byte *writebuf,
3644 ULONGEST offset, LONGEST len)
3645 {
3646 LONGEST ret;
3647 int fd;
3648 char filename[64];
3649
3650 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3651 return 0;
3652
3653 /* Don't bother for one word. */
3654 if (len < 3 * sizeof (long))
3655 return 0;
3656
3657 /* We could keep this file open and cache it - possibly one per
3658 thread. That requires some juggling, but is even faster. */
3659 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3660 fd = open (filename, O_RDONLY | O_LARGEFILE);
3661 if (fd == -1)
3662 return 0;
3663
3664 /* If pread64 is available, use it. It's faster if the kernel
3665 supports it (only one syscall), and it's 64-bit safe even on
3666 32-bit platforms (for instance, SPARC debugging a SPARC64
3667 application). */
3668 #ifdef HAVE_PREAD64
3669 if (pread64 (fd, readbuf, len, offset) != len)
3670 #else
3671 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3672 #endif
3673 ret = 0;
3674 else
3675 ret = len;
3676
3677 close (fd);
3678 return ret;
3679 }
3680
3681 /* Parse LINE as a signal set and add its set bits to SIGS. */
3682
3683 static void
3684 add_line_to_sigset (const char *line, sigset_t *sigs)
3685 {
3686 int len = strlen (line) - 1;
3687 const char *p;
3688 int signum;
3689
3690 if (line[len] != '\n')
3691 error (_("Could not parse signal set: %s"), line);
3692
3693 p = line;
3694 signum = len * 4;
3695 while (len-- > 0)
3696 {
3697 int digit;
3698
3699 if (*p >= '0' && *p <= '9')
3700 digit = *p - '0';
3701 else if (*p >= 'a' && *p <= 'f')
3702 digit = *p - 'a' + 10;
3703 else
3704 error (_("Could not parse signal set: %s"), line);
3705
3706 signum -= 4;
3707
3708 if (digit & 1)
3709 sigaddset (sigs, signum + 1);
3710 if (digit & 2)
3711 sigaddset (sigs, signum + 2);
3712 if (digit & 4)
3713 sigaddset (sigs, signum + 3);
3714 if (digit & 8)
3715 sigaddset (sigs, signum + 4);
3716
3717 p++;
3718 }
3719 }
3720
3721 /* Find process PID's pending signals from /proc/pid/status and set
3722 SIGS to match. */
3723
3724 void
3725 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3726 {
3727 FILE *procfile;
3728 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3729 int signum;
3730
3731 sigemptyset (pending);
3732 sigemptyset (blocked);
3733 sigemptyset (ignored);
3734 sprintf (fname, "/proc/%d/status", pid);
3735 procfile = fopen (fname, "r");
3736 if (procfile == NULL)
3737 error (_("Could not open %s"), fname);
3738
3739 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3740 {
3741 /* Normal queued signals are on the SigPnd line in the status
3742 file. However, 2.6 kernels also have a "shared" pending
3743 queue for delivering signals to a thread group, so check for
3744 a ShdPnd line also.
3745
3746 Unfortunately some Red Hat kernels include the shared pending
3747 queue but not the ShdPnd status field. */
3748
3749 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3750 add_line_to_sigset (buffer + 8, pending);
3751 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3752 add_line_to_sigset (buffer + 8, pending);
3753 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3754 add_line_to_sigset (buffer + 8, blocked);
3755 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3756 add_line_to_sigset (buffer + 8, ignored);
3757 }
3758
3759 fclose (procfile);
3760 }
3761
3762 static LONGEST
3763 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3764 const char *annex, gdb_byte *readbuf,
3765 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3766 {
3767 LONGEST xfer;
3768
3769 if (object == TARGET_OBJECT_AUXV)
3770 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3771 offset, len);
3772
3773 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3774 offset, len);
3775 if (xfer != 0)
3776 return xfer;
3777
3778 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3779 offset, len);
3780 }
3781
3782 /* Create a prototype generic GNU/Linux target. The client can override
3783 it with local methods. */
3784
3785 static void
3786 linux_target_install_ops (struct target_ops *t)
3787 {
3788 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3789 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3790 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3791 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
3792 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3793 t->to_post_attach = linux_child_post_attach;
3794 t->to_follow_fork = linux_child_follow_fork;
3795 t->to_find_memory_regions = linux_nat_find_memory_regions;
3796 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3797
3798 super_xfer_partial = t->to_xfer_partial;
3799 t->to_xfer_partial = linux_xfer_partial;
3800 }
3801
3802 struct target_ops *
3803 linux_target (void)
3804 {
3805 struct target_ops *t;
3806
3807 t = inf_ptrace_target ();
3808 linux_target_install_ops (t);
3809
3810 return t;
3811 }
3812
3813 struct target_ops *
3814 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
3815 {
3816 struct target_ops *t;
3817
3818 t = inf_ptrace_trad_target (register_u_offset);
3819 linux_target_install_ops (t);
3820
3821 return t;
3822 }
3823
3824 /* Controls if async mode is permitted. */
3825 static int linux_async_permitted = 0;
3826
3827 /* The set command writes to this variable. If the inferior is
3828 executing, linux_nat_async_permitted is *not* updated. */
3829 static int linux_async_permitted_1 = 0;
3830
3831 static void
3832 set_maintenance_linux_async_permitted (char *args, int from_tty,
3833 struct cmd_list_element *c)
3834 {
3835 if (target_has_execution)
3836 {
3837 linux_async_permitted_1 = linux_async_permitted;
3838 error (_("Cannot change this setting while the inferior is running."));
3839 }
3840
3841 linux_async_permitted = linux_async_permitted_1;
3842 linux_nat_set_async_mode (linux_async_permitted);
3843 }
3844
3845 static void
3846 show_maintenance_linux_async_permitted (struct ui_file *file, int from_tty,
3847 struct cmd_list_element *c, const char *value)
3848 {
3849 fprintf_filtered (file, _("\
3850 Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"),
3851 value);
3852 }
3853
3854 /* target_is_async_p implementation. */
3855
3856 static int
3857 linux_nat_is_async_p (void)
3858 {
3859 /* NOTE: palves 2008-03-21: We're only async when the user requests
3860 it explicitly with the "maintenance set linux-async" command.
3861 Someday, linux will always be async. */
3862 if (!linux_async_permitted)
3863 return 0;
3864
3865 return 1;
3866 }
3867
3868 /* target_can_async_p implementation. */
3869
3870 static int
3871 linux_nat_can_async_p (void)
3872 {
3873 /* NOTE: palves 2008-03-21: We're only async when the user requests
3874 it explicitly with the "maintenance set linux-async" command.
3875 Someday, linux will always be async. */
3876 if (!linux_async_permitted)
3877 return 0;
3878
3879 /* See target.h/target_async_mask. */
3880 return linux_nat_async_mask_value;
3881 }
3882
3883 /* target_async_mask implementation. */
3884
3885 static int
3886 linux_nat_async_mask (int mask)
3887 {
3888 int current_state;
3889 current_state = linux_nat_async_mask_value;
3890
3891 if (current_state != mask)
3892 {
3893 if (mask == 0)
3894 {
3895 linux_nat_async (NULL, 0);
3896 linux_nat_async_mask_value = mask;
3897 /* We're in sync mode. Make sure SIGCHLD isn't handled by
3898 async_sigchld_handler when we come out of sigsuspend in
3899 linux_nat_wait. */
3900 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
3901 }
3902 else
3903 {
3904 /* Restore the async handler. */
3905 sigaction (SIGCHLD, &async_sigchld_action, NULL);
3906 linux_nat_async_mask_value = mask;
3907 linux_nat_async (inferior_event_handler, 0);
3908 }
3909 }
3910
3911 return current_state;
3912 }
3913
3914 /* Pop an event from the event pipe. */
3915
3916 static int
3917 linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
3918 {
3919 struct waitpid_result event = {0};
3920 int ret;
3921
3922 do
3923 {
3924 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
3925 }
3926 while (ret == -1 && errno == EINTR);
3927
3928 gdb_assert (ret == sizeof (event));
3929
3930 *ptr_status = event.status;
3931 *ptr_options = event.options;
3932
3933 linux_nat_num_queued_events--;
3934
3935 return event.pid;
3936 }
3937
3938 /* Push an event into the event pipe. */
3939
3940 static void
3941 linux_nat_event_pipe_push (int pid, int status, int options)
3942 {
3943 int ret;
3944 struct waitpid_result event = {0};
3945 event.pid = pid;
3946 event.status = status;
3947 event.options = options;
3948
3949 do
3950 {
3951 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
3952 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
3953 } while (ret == -1 && errno == EINTR);
3954
3955 linux_nat_num_queued_events++;
3956 }
3957
3958 static void
3959 get_pending_events (void)
3960 {
3961 int status, options, pid;
3962
3963 if (!linux_nat_async_enabled || !linux_nat_async_events_enabled)
3964 internal_error (__FILE__, __LINE__,
3965 "get_pending_events called with async masked");
3966
3967 while (1)
3968 {
3969 status = 0;
3970 options = __WCLONE | WNOHANG;
3971
3972 do
3973 {
3974 pid = waitpid (-1, &status, options);
3975 }
3976 while (pid == -1 && errno == EINTR);
3977
3978 if (pid <= 0)
3979 {
3980 options = WNOHANG;
3981 do
3982 {
3983 pid = waitpid (-1, &status, options);
3984 }
3985 while (pid == -1 && errno == EINTR);
3986 }
3987
3988 if (pid <= 0)
3989 /* No more children reporting events. */
3990 break;
3991
3992 if (debug_linux_nat_async)
3993 fprintf_unfiltered (gdb_stdlog, "\
3994 get_pending_events: pid(%d), status(%x), options (%x)\n",
3995 pid, status, options);
3996
3997 linux_nat_event_pipe_push (pid, status, options);
3998 }
3999
4000 if (debug_linux_nat_async)
4001 fprintf_unfiltered (gdb_stdlog, "\
4002 get_pending_events: linux_nat_num_queued_events(%d)\n",
4003 linux_nat_num_queued_events);
4004 }
4005
4006 /* SIGCHLD handler for async mode. */
4007
4008 static void
4009 async_sigchld_handler (int signo)
4010 {
4011 if (debug_linux_nat_async)
4012 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4013
4014 get_pending_events ();
4015 }
4016
4017 /* Enable or disable async SIGCHLD handling. */
4018
4019 static int
4020 linux_nat_async_events (int enable)
4021 {
4022 int current_state = linux_nat_async_events_enabled;
4023
4024 if (debug_linux_nat_async)
4025 fprintf_unfiltered (gdb_stdlog,
4026 "LNAE: enable(%d): linux_nat_async_events_enabled(%d), "
4027 "linux_nat_num_queued_events(%d)\n",
4028 enable, linux_nat_async_events_enabled,
4029 linux_nat_num_queued_events);
4030
4031 if (current_state != enable)
4032 {
4033 sigset_t mask;
4034 sigemptyset (&mask);
4035 sigaddset (&mask, SIGCHLD);
4036 if (enable)
4037 {
4038 /* Unblock target events. */
4039 linux_nat_async_events_enabled = 1;
4040
4041 local_event_queue_to_pipe ();
4042 /* While in masked async, we may have not collected all the
4043 pending events. Get them out now. */
4044 get_pending_events ();
4045 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4046 }
4047 else
4048 {
4049 /* Block target events. */
4050 sigprocmask (SIG_BLOCK, &mask, NULL);
4051 linux_nat_async_events_enabled = 0;
4052 /* Get events out of queue, and make them available to
4053 queued_waitpid / my_waitpid. */
4054 pipe_to_local_event_queue ();
4055 }
4056 }
4057
4058 return current_state;
4059 }
4060
4061 static int async_terminal_is_ours = 1;
4062
4063 /* target_terminal_inferior implementation. */
4064
4065 static void
4066 linux_nat_terminal_inferior (void)
4067 {
4068 if (!target_is_async_p ())
4069 {
4070 /* Async mode is disabled. */
4071 terminal_inferior ();
4072 return;
4073 }
4074
4075 /* GDB should never give the terminal to the inferior, if the
4076 inferior is running in the background (run&, continue&, etc.).
4077 This check can be removed when the common code is fixed. */
4078 if (!sync_execution)
4079 return;
4080
4081 terminal_inferior ();
4082
4083 if (!async_terminal_is_ours)
4084 return;
4085
4086 delete_file_handler (input_fd);
4087 async_terminal_is_ours = 0;
4088 set_sigint_trap ();
4089 }
4090
4091 /* target_terminal_ours implementation. */
4092
4093 void
4094 linux_nat_terminal_ours (void)
4095 {
4096 if (!target_is_async_p ())
4097 {
4098 /* Async mode is disabled. */
4099 terminal_ours ();
4100 return;
4101 }
4102
4103 /* GDB should never give the terminal to the inferior if the
4104 inferior is running in the background (run&, continue&, etc.),
4105 but claiming it sure should. */
4106 terminal_ours ();
4107
4108 if (!sync_execution)
4109 return;
4110
4111 if (async_terminal_is_ours)
4112 return;
4113
4114 clear_sigint_trap ();
4115 add_file_handler (input_fd, stdin_event_handler, 0);
4116 async_terminal_is_ours = 1;
4117 }
4118
4119 static void (*async_client_callback) (enum inferior_event_type event_type,
4120 void *context);
4121 static void *async_client_context;
4122
4123 static void
4124 linux_nat_async_file_handler (int error, gdb_client_data client_data)
4125 {
4126 async_client_callback (INF_REG_EVENT, async_client_context);
4127 }
4128
4129 /* target_async implementation. */
4130
4131 static void
4132 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4133 void *context), void *context)
4134 {
4135 if (linux_nat_async_mask_value == 0 || !linux_nat_async_enabled)
4136 internal_error (__FILE__, __LINE__,
4137 "Calling target_async when async is masked");
4138
4139 if (callback != NULL)
4140 {
4141 async_client_callback = callback;
4142 async_client_context = context;
4143 add_file_handler (linux_nat_event_pipe[0],
4144 linux_nat_async_file_handler, NULL);
4145
4146 linux_nat_async_events (1);
4147 }
4148 else
4149 {
4150 async_client_callback = callback;
4151 async_client_context = context;
4152
4153 linux_nat_async_events (0);
4154 delete_file_handler (linux_nat_event_pipe[0]);
4155 }
4156 return;
4157 }
4158
4159 /* Enable/Disable async mode. */
4160
4161 static void
4162 linux_nat_set_async_mode (int on)
4163 {
4164 if (linux_nat_async_enabled != on)
4165 {
4166 if (on)
4167 {
4168 gdb_assert (waitpid_queue == NULL);
4169 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4170
4171 if (pipe (linux_nat_event_pipe) == -1)
4172 internal_error (__FILE__, __LINE__,
4173 "creating event pipe failed.");
4174
4175 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4176 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4177 }
4178 else
4179 {
4180 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4181
4182 drain_queued_events (-1);
4183
4184 linux_nat_num_queued_events = 0;
4185 close (linux_nat_event_pipe[0]);
4186 close (linux_nat_event_pipe[1]);
4187 linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1;
4188
4189 }
4190 }
4191 linux_nat_async_enabled = on;
4192 }
4193
4194 void
4195 linux_nat_add_target (struct target_ops *t)
4196 {
4197 /* Save the provided single-threaded target. We save this in a separate
4198 variable because another target we've inherited from (e.g. inf-ptrace)
4199 may have saved a pointer to T; we want to use it for the final
4200 process stratum target. */
4201 linux_ops_saved = *t;
4202 linux_ops = &linux_ops_saved;
4203
4204 /* Override some methods for multithreading. */
4205 t->to_create_inferior = linux_nat_create_inferior;
4206 t->to_attach = linux_nat_attach;
4207 t->to_detach = linux_nat_detach;
4208 t->to_resume = linux_nat_resume;
4209 t->to_wait = linux_nat_wait;
4210 t->to_xfer_partial = linux_nat_xfer_partial;
4211 t->to_kill = linux_nat_kill;
4212 t->to_mourn_inferior = linux_nat_mourn_inferior;
4213 t->to_thread_alive = linux_nat_thread_alive;
4214 t->to_pid_to_str = linux_nat_pid_to_str;
4215 t->to_has_thread_control = tc_schedlock;
4216
4217 t->to_can_async_p = linux_nat_can_async_p;
4218 t->to_is_async_p = linux_nat_is_async_p;
4219 t->to_async = linux_nat_async;
4220 t->to_async_mask = linux_nat_async_mask;
4221 t->to_terminal_inferior = linux_nat_terminal_inferior;
4222 t->to_terminal_ours = linux_nat_terminal_ours;
4223
4224 /* We don't change the stratum; this target will sit at
4225 process_stratum and thread_db will set at thread_stratum. This
4226 is a little strange, since this is a multi-threaded-capable
4227 target, but we want to be on the stack below thread_db, and we
4228 also want to be used for single-threaded processes. */
4229
4230 add_target (t);
4231
4232 /* TODO: Eliminate this and have libthread_db use
4233 find_target_beneath. */
4234 thread_db_init (t);
4235 }
4236
4237 /* Register a method to call whenever a new thread is attached. */
4238 void
4239 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4240 {
4241 /* Save the pointer. We only support a single registered instance
4242 of the GNU/Linux native target, so we do not need to map this to
4243 T. */
4244 linux_nat_new_thread = new_thread;
4245 }
4246
4247 /* Return the saved siginfo associated with PTID. */
4248 struct siginfo *
4249 linux_nat_get_siginfo (ptid_t ptid)
4250 {
4251 struct lwp_info *lp = find_lwp_pid (ptid);
4252
4253 gdb_assert (lp != NULL);
4254
4255 return &lp->siginfo;
4256 }
4257
4258 void
4259 _initialize_linux_nat (void)
4260 {
4261 sigset_t mask;
4262
4263 add_info ("proc", linux_nat_info_proc_cmd, _("\
4264 Show /proc process information about any running process.\n\
4265 Specify any process id, or use the program being debugged by default.\n\
4266 Specify any of the following keywords for detailed info:\n\
4267 mappings -- list of mapped memory regions.\n\
4268 stat -- list a bunch of random process info.\n\
4269 status -- list a different bunch of random process info.\n\
4270 all -- list all available /proc info."));
4271
4272 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4273 &debug_linux_nat, _("\
4274 Set debugging of GNU/Linux lwp module."), _("\
4275 Show debugging of GNU/Linux lwp module."), _("\
4276 Enables printf debugging output."),
4277 NULL,
4278 show_debug_linux_nat,
4279 &setdebuglist, &showdebuglist);
4280
4281 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4282 &debug_linux_nat_async, _("\
4283 Set debugging of GNU/Linux async lwp module."), _("\
4284 Show debugging of GNU/Linux async lwp module."), _("\
4285 Enables printf debugging output."),
4286 NULL,
4287 show_debug_linux_nat_async,
4288 &setdebuglist, &showdebuglist);
4289
4290 add_setshow_boolean_cmd ("linux-async", class_maintenance,
4291 &linux_async_permitted_1, _("\
4292 Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4293 Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4294 Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."),
4295 set_maintenance_linux_async_permitted,
4296 show_maintenance_linux_async_permitted,
4297 &maintenance_set_cmdlist,
4298 &maintenance_show_cmdlist);
4299
4300 /* Block SIGCHLD by default. Doing this early prevents it getting
4301 unblocked if an exception is thrown due to an error while the
4302 inferior is starting (sigsetjmp/siglongjmp). */
4303 sigemptyset (&mask);
4304 sigaddset (&mask, SIGCHLD);
4305 sigprocmask (SIG_BLOCK, &mask, NULL);
4306
4307 /* Save this mask as the default. */
4308 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4309
4310 /* The synchronous SIGCHLD handler. */
4311 sync_sigchld_action.sa_handler = sigchld_handler;
4312 sigemptyset (&sync_sigchld_action.sa_mask);
4313 sync_sigchld_action.sa_flags = SA_RESTART;
4314
4315 /* Make it the default. */
4316 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4317
4318 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4319 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4320 sigdelset (&suspend_mask, SIGCHLD);
4321
4322 /* SIGCHLD handler for async mode. */
4323 async_sigchld_action.sa_handler = async_sigchld_handler;
4324 sigemptyset (&async_sigchld_action.sa_mask);
4325 async_sigchld_action.sa_flags = SA_RESTART;
4326
4327 /* Install the default mode. */
4328 linux_nat_set_async_mode (linux_async_permitted);
4329 }
4330 \f
4331
4332 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4333 the GNU/Linux Threads library and therefore doesn't really belong
4334 here. */
4335
4336 /* Read variable NAME in the target and return its value if found.
4337 Otherwise return zero. It is assumed that the type of the variable
4338 is `int'. */
4339
4340 static int
4341 get_signo (const char *name)
4342 {
4343 struct minimal_symbol *ms;
4344 int signo;
4345
4346 ms = lookup_minimal_symbol (name, NULL, NULL);
4347 if (ms == NULL)
4348 return 0;
4349
4350 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4351 sizeof (signo)) != 0)
4352 return 0;
4353
4354 return signo;
4355 }
4356
4357 /* Return the set of signals used by the threads library in *SET. */
4358
4359 void
4360 lin_thread_get_thread_signals (sigset_t *set)
4361 {
4362 struct sigaction action;
4363 int restart, cancel;
4364 sigset_t blocked_mask;
4365
4366 sigemptyset (&blocked_mask);
4367 sigemptyset (set);
4368
4369 restart = get_signo ("__pthread_sig_restart");
4370 cancel = get_signo ("__pthread_sig_cancel");
4371
4372 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4373 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4374 not provide any way for the debugger to query the signal numbers -
4375 fortunately they don't change! */
4376
4377 if (restart == 0)
4378 restart = __SIGRTMIN;
4379
4380 if (cancel == 0)
4381 cancel = __SIGRTMIN + 1;
4382
4383 sigaddset (set, restart);
4384 sigaddset (set, cancel);
4385
4386 /* The GNU/Linux Threads library makes terminating threads send a
4387 special "cancel" signal instead of SIGCHLD. Make sure we catch
4388 those (to prevent them from terminating GDB itself, which is
4389 likely to be their default action) and treat them the same way as
4390 SIGCHLD. */
4391
4392 action.sa_handler = sigchld_handler;
4393 sigemptyset (&action.sa_mask);
4394 action.sa_flags = SA_RESTART;
4395 sigaction (cancel, &action, NULL);
4396
4397 /* We block the "cancel" signal throughout this code ... */
4398 sigaddset (&blocked_mask, cancel);
4399 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4400
4401 /* ... except during a sigsuspend. */
4402 sigdelset (&suspend_mask, cancel);
4403 }