gdb/gdbserver/
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 #ifdef __UCLIBC__
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103 #define HAS_NOMMU
104 #endif
105 #endif
106
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
109
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
115
116 struct inferior_list all_lwps;
117
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122 struct inferior_list stopped_pids;
123
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads;
126
127 /* FIXME make into a target method? */
128 int using_threads = 1;
129
130 /* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
137 static int new_inferior;
138
139 static void linux_resume_one_lwp (struct lwp_info *lwp,
140 int step, int signal, siginfo_t *info);
141 static void linux_resume (struct thread_resume *resume_info, size_t n);
142 static void stop_all_lwps (void);
143 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
144 static void *add_lwp (ptid_t ptid);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147 static int linux_core_of_thread (ptid_t ptid);
148 static void proceed_all_lwps (void);
149 static void unstop_all_lwps (struct lwp_info *except);
150 static int finish_step_over (struct lwp_info *lwp);
151 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152 static int kill_lwp (unsigned long lwpid, int signo);
153
154 /* True if the low target can hardware single-step. Such targets
155 don't need a BREAKPOINT_REINSERT_ADDR callback. */
156
157 static int
158 can_hardware_single_step (void)
159 {
160 return (the_low_target.breakpoint_reinsert_addr == NULL);
161 }
162
163 /* True if the low target supports memory breakpoints. If so, we'll
164 have a GET_PC implementation. */
165
166 static int
167 supports_breakpoints (void)
168 {
169 return (the_low_target.get_pc != NULL);
170 }
171
172 struct pending_signals
173 {
174 int signal;
175 siginfo_t info;
176 struct pending_signals *prev;
177 };
178
179 #define PTRACE_ARG3_TYPE void *
180 #define PTRACE_ARG4_TYPE void *
181 #define PTRACE_XFER_TYPE long
182
183 #ifdef HAVE_LINUX_REGSETS
184 static char *disabled_regsets;
185 static int num_regsets;
186 #endif
187
188 /* The read/write ends of the pipe registered as waitable file in the
189 event loop. */
190 static int linux_event_pipe[2] = { -1, -1 };
191
192 /* True if we're currently in async mode. */
193 #define target_is_async_p() (linux_event_pipe[0] != -1)
194
195 static void send_sigstop (struct inferior_list_entry *entry);
196 static void wait_for_sigstop (struct inferior_list_entry *entry);
197
198 /* Accepts an integer PID; Returns a string representing a file that
199 can be opened to get info for the child process.
200 Space for the result is malloc'd, caller must free. */
201
202 char *
203 linux_child_pid_to_exec_file (int pid)
204 {
205 char *name1, *name2;
206
207 name1 = xmalloc (MAXPATHLEN);
208 name2 = xmalloc (MAXPATHLEN);
209 memset (name2, 0, MAXPATHLEN);
210
211 sprintf (name1, "/proc/%d/exe", pid);
212 if (readlink (name1, name2, MAXPATHLEN) > 0)
213 {
214 free (name1);
215 return name2;
216 }
217 else
218 {
219 free (name2);
220 return name1;
221 }
222 }
223
224 /* Return non-zero if HEADER is a 64-bit ELF file. */
225
226 static int
227 elf_64_header_p (const Elf64_Ehdr *header)
228 {
229 return (header->e_ident[EI_MAG0] == ELFMAG0
230 && header->e_ident[EI_MAG1] == ELFMAG1
231 && header->e_ident[EI_MAG2] == ELFMAG2
232 && header->e_ident[EI_MAG3] == ELFMAG3
233 && header->e_ident[EI_CLASS] == ELFCLASS64);
234 }
235
236 /* Return non-zero if FILE is a 64-bit ELF file,
237 zero if the file is not a 64-bit ELF file,
238 and -1 if the file is not accessible or doesn't exist. */
239
240 int
241 elf_64_file_p (const char *file)
242 {
243 Elf64_Ehdr header;
244 int fd;
245
246 fd = open (file, O_RDONLY);
247 if (fd < 0)
248 return -1;
249
250 if (read (fd, &header, sizeof (header)) != sizeof (header))
251 {
252 close (fd);
253 return 0;
254 }
255 close (fd);
256
257 return elf_64_header_p (&header);
258 }
259
260 static void
261 delete_lwp (struct lwp_info *lwp)
262 {
263 remove_thread (get_lwp_thread (lwp));
264 remove_inferior (&all_lwps, &lwp->head);
265 free (lwp->arch_private);
266 free (lwp);
267 }
268
269 /* Add a process to the common process list, and set its private
270 data. */
271
272 static struct process_info *
273 linux_add_process (int pid, int attached)
274 {
275 struct process_info *proc;
276
277 /* Is this the first process? If so, then set the arch. */
278 if (all_processes.head == NULL)
279 new_inferior = 1;
280
281 proc = add_process (pid, attached);
282 proc->private = xcalloc (1, sizeof (*proc->private));
283
284 if (the_low_target.new_process != NULL)
285 proc->private->arch_private = the_low_target.new_process ();
286
287 return proc;
288 }
289
290 /* Remove a process from the common process list,
291 also freeing all private data. */
292
293 static void
294 linux_remove_process (struct process_info *process)
295 {
296 struct process_info_private *priv = process->private;
297
298 free (priv->arch_private);
299 free (priv);
300 remove_process (process);
301 }
302
303 /* Wrapper function for waitpid which handles EINTR, and emulates
304 __WALL for systems where that is not available. */
305
306 static int
307 my_waitpid (int pid, int *status, int flags)
308 {
309 int ret, out_errno;
310
311 if (debug_threads)
312 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
313
314 if (flags & __WALL)
315 {
316 sigset_t block_mask, org_mask, wake_mask;
317 int wnohang;
318
319 wnohang = (flags & WNOHANG) != 0;
320 flags &= ~(__WALL | __WCLONE);
321 flags |= WNOHANG;
322
323 /* Block all signals while here. This avoids knowing about
324 LinuxThread's signals. */
325 sigfillset (&block_mask);
326 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
327
328 /* ... except during the sigsuspend below. */
329 sigemptyset (&wake_mask);
330
331 while (1)
332 {
333 /* Since all signals are blocked, there's no need to check
334 for EINTR here. */
335 ret = waitpid (pid, status, flags);
336 out_errno = errno;
337
338 if (ret == -1 && out_errno != ECHILD)
339 break;
340 else if (ret > 0)
341 break;
342
343 if (flags & __WCLONE)
344 {
345 /* We've tried both flavors now. If WNOHANG is set,
346 there's nothing else to do, just bail out. */
347 if (wnohang)
348 break;
349
350 if (debug_threads)
351 fprintf (stderr, "blocking\n");
352
353 /* Block waiting for signals. */
354 sigsuspend (&wake_mask);
355 }
356
357 flags ^= __WCLONE;
358 }
359
360 sigprocmask (SIG_SETMASK, &org_mask, NULL);
361 }
362 else
363 {
364 do
365 ret = waitpid (pid, status, flags);
366 while (ret == -1 && errno == EINTR);
367 out_errno = errno;
368 }
369
370 if (debug_threads)
371 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
372 pid, flags, status ? *status : -1, ret);
373
374 errno = out_errno;
375 return ret;
376 }
377
378 /* Handle a GNU/Linux extended wait response. If we see a clone
379 event, we need to add the new LWP to our list (and not report the
380 trap to higher layers). */
381
382 static void
383 handle_extended_wait (struct lwp_info *event_child, int wstat)
384 {
385 int event = wstat >> 16;
386 struct lwp_info *new_lwp;
387
388 if (event == PTRACE_EVENT_CLONE)
389 {
390 ptid_t ptid;
391 unsigned long new_pid;
392 int ret, status = W_STOPCODE (SIGSTOP);
393
394 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
395
396 /* If we haven't already seen the new PID stop, wait for it now. */
397 if (! pull_pid_from_list (&stopped_pids, new_pid))
398 {
399 /* The new child has a pending SIGSTOP. We can't affect it until it
400 hits the SIGSTOP, but we're already attached. */
401
402 ret = my_waitpid (new_pid, &status, __WALL);
403
404 if (ret == -1)
405 perror_with_name ("waiting for new child");
406 else if (ret != new_pid)
407 warning ("wait returned unexpected PID %d", ret);
408 else if (!WIFSTOPPED (status))
409 warning ("wait returned unexpected status 0x%x", status);
410 }
411
412 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
413
414 ptid = ptid_build (pid_of (event_child), new_pid, 0);
415 new_lwp = (struct lwp_info *) add_lwp (ptid);
416 add_thread (ptid, new_lwp);
417
418 /* Either we're going to immediately resume the new thread
419 or leave it stopped. linux_resume_one_lwp is a nop if it
420 thinks the thread is currently running, so set this first
421 before calling linux_resume_one_lwp. */
422 new_lwp->stopped = 1;
423
424 /* Normally we will get the pending SIGSTOP. But in some cases
425 we might get another signal delivered to the group first.
426 If we do get another signal, be sure not to lose it. */
427 if (WSTOPSIG (status) == SIGSTOP)
428 {
429 if (stopping_threads)
430 new_lwp->stop_pc = get_stop_pc (new_lwp);
431 else
432 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
433 }
434 else
435 {
436 new_lwp->stop_expected = 1;
437
438 if (stopping_threads)
439 {
440 new_lwp->stop_pc = get_stop_pc (new_lwp);
441 new_lwp->status_pending_p = 1;
442 new_lwp->status_pending = status;
443 }
444 else
445 /* Pass the signal on. This is what GDB does - except
446 shouldn't we really report it instead? */
447 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
448 }
449
450 /* Always resume the current thread. If we are stopping
451 threads, it will have a pending SIGSTOP; we may as well
452 collect it now. */
453 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
454 }
455 }
456
457 /* Return the PC as read from the regcache of LWP, without any
458 adjustment. */
459
460 static CORE_ADDR
461 get_pc (struct lwp_info *lwp)
462 {
463 struct thread_info *saved_inferior;
464 struct regcache *regcache;
465 CORE_ADDR pc;
466
467 if (the_low_target.get_pc == NULL)
468 return 0;
469
470 saved_inferior = current_inferior;
471 current_inferior = get_lwp_thread (lwp);
472
473 regcache = get_thread_regcache (current_inferior, 1);
474 pc = (*the_low_target.get_pc) (regcache);
475
476 if (debug_threads)
477 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
478
479 current_inferior = saved_inferior;
480 return pc;
481 }
482
483 /* This function should only be called if LWP got a SIGTRAP.
484 The SIGTRAP could mean several things.
485
486 On i386, where decr_pc_after_break is non-zero:
487 If we were single-stepping this process using PTRACE_SINGLESTEP,
488 we will get only the one SIGTRAP (even if the instruction we
489 stepped over was a breakpoint). The value of $eip will be the
490 next instruction.
491 If we continue the process using PTRACE_CONT, we will get a
492 SIGTRAP when we hit a breakpoint. The value of $eip will be
493 the instruction after the breakpoint (i.e. needs to be
494 decremented). If we report the SIGTRAP to GDB, we must also
495 report the undecremented PC. If we cancel the SIGTRAP, we
496 must resume at the decremented PC.
497
498 (Presumably, not yet tested) On a non-decr_pc_after_break machine
499 with hardware or kernel single-step:
500 If we single-step over a breakpoint instruction, our PC will
501 point at the following instruction. If we continue and hit a
502 breakpoint instruction, our PC will point at the breakpoint
503 instruction. */
504
505 static CORE_ADDR
506 get_stop_pc (struct lwp_info *lwp)
507 {
508 CORE_ADDR stop_pc;
509
510 if (the_low_target.get_pc == NULL)
511 return 0;
512
513 stop_pc = get_pc (lwp);
514
515 if (WSTOPSIG (lwp->last_status) == SIGTRAP
516 && !lwp->stepping
517 && !lwp->stopped_by_watchpoint
518 && lwp->last_status >> 16 == 0)
519 stop_pc -= the_low_target.decr_pc_after_break;
520
521 if (debug_threads)
522 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
523
524 return stop_pc;
525 }
526
527 static void *
528 add_lwp (ptid_t ptid)
529 {
530 struct lwp_info *lwp;
531
532 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
533 memset (lwp, 0, sizeof (*lwp));
534
535 lwp->head.id = ptid;
536
537 lwp->last_resume_kind = resume_continue;
538
539 if (the_low_target.new_thread != NULL)
540 lwp->arch_private = the_low_target.new_thread ();
541
542 add_inferior_to_list (&all_lwps, &lwp->head);
543
544 return lwp;
545 }
546
547 /* Start an inferior process and returns its pid.
548 ALLARGS is a vector of program-name and args. */
549
550 static int
551 linux_create_inferior (char *program, char **allargs)
552 {
553 struct lwp_info *new_lwp;
554 int pid;
555 ptid_t ptid;
556
557 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
558 pid = vfork ();
559 #else
560 pid = fork ();
561 #endif
562 if (pid < 0)
563 perror_with_name ("fork");
564
565 if (pid == 0)
566 {
567 ptrace (PTRACE_TRACEME, 0, 0, 0);
568
569 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
570 signal (__SIGRTMIN + 1, SIG_DFL);
571 #endif
572
573 setpgid (0, 0);
574
575 execv (program, allargs);
576 if (errno == ENOENT)
577 execvp (program, allargs);
578
579 fprintf (stderr, "Cannot exec %s: %s.\n", program,
580 strerror (errno));
581 fflush (stderr);
582 _exit (0177);
583 }
584
585 linux_add_process (pid, 0);
586
587 ptid = ptid_build (pid, pid, 0);
588 new_lwp = add_lwp (ptid);
589 add_thread (ptid, new_lwp);
590 new_lwp->must_set_ptrace_flags = 1;
591
592 return pid;
593 }
594
595 /* Attach to an inferior process. */
596
597 static void
598 linux_attach_lwp_1 (unsigned long lwpid, int initial)
599 {
600 ptid_t ptid;
601 struct lwp_info *new_lwp;
602
603 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
604 {
605 if (!initial)
606 {
607 /* If we fail to attach to an LWP, just warn. */
608 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
609 strerror (errno), errno);
610 fflush (stderr);
611 return;
612 }
613 else
614 /* If we fail to attach to a process, report an error. */
615 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
616 strerror (errno), errno);
617 }
618
619 if (initial)
620 /* NOTE/FIXME: This lwp might have not been the tgid. */
621 ptid = ptid_build (lwpid, lwpid, 0);
622 else
623 {
624 /* Note that extracting the pid from the current inferior is
625 safe, since we're always called in the context of the same
626 process as this new thread. */
627 int pid = pid_of (get_thread_lwp (current_inferior));
628 ptid = ptid_build (pid, lwpid, 0);
629 }
630
631 new_lwp = (struct lwp_info *) add_lwp (ptid);
632 add_thread (ptid, new_lwp);
633
634 /* We need to wait for SIGSTOP before being able to make the next
635 ptrace call on this LWP. */
636 new_lwp->must_set_ptrace_flags = 1;
637
638 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
639 brings it to a halt.
640
641 There are several cases to consider here:
642
643 1) gdbserver has already attached to the process and is being notified
644 of a new thread that is being created.
645 In this case we should ignore that SIGSTOP and resume the
646 process. This is handled below by setting stop_expected = 1,
647 and the fact that add_lwp sets last_resume_kind ==
648 resume_continue.
649
650 2) This is the first thread (the process thread), and we're attaching
651 to it via attach_inferior.
652 In this case we want the process thread to stop.
653 This is handled by having linux_attach set last_resume_kind ==
654 resume_stop after we return.
655 ??? If the process already has several threads we leave the other
656 threads running.
657
658 3) GDB is connecting to gdbserver and is requesting an enumeration of all
659 existing threads.
660 In this case we want the thread to stop.
661 FIXME: This case is currently not properly handled.
662 We should wait for the SIGSTOP but don't. Things work apparently
663 because enough time passes between when we ptrace (ATTACH) and when
664 gdb makes the next ptrace call on the thread.
665
666 On the other hand, if we are currently trying to stop all threads, we
667 should treat the new thread as if we had sent it a SIGSTOP. This works
668 because we are guaranteed that the add_lwp call above added us to the
669 end of the list, and so the new thread has not yet reached
670 wait_for_sigstop (but will). */
671 new_lwp->stop_expected = 1;
672 }
673
674 void
675 linux_attach_lwp (unsigned long lwpid)
676 {
677 linux_attach_lwp_1 (lwpid, 0);
678 }
679
680 int
681 linux_attach (unsigned long pid)
682 {
683 struct lwp_info *lwp;
684
685 linux_attach_lwp_1 (pid, 1);
686
687 linux_add_process (pid, 1);
688
689 if (!non_stop)
690 {
691 /* Don't ignore the initial SIGSTOP if we just attached to this
692 process. It will be collected by wait shortly. */
693 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
694 ptid_build (pid, pid, 0));
695 lwp->last_resume_kind = resume_stop;
696 }
697
698 return 0;
699 }
700
701 struct counter
702 {
703 int pid;
704 int count;
705 };
706
707 static int
708 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
709 {
710 struct counter *counter = args;
711
712 if (ptid_get_pid (entry->id) == counter->pid)
713 {
714 if (++counter->count > 1)
715 return 1;
716 }
717
718 return 0;
719 }
720
721 static int
722 last_thread_of_process_p (struct thread_info *thread)
723 {
724 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
725 int pid = ptid_get_pid (ptid);
726 struct counter counter = { pid , 0 };
727
728 return (find_inferior (&all_threads,
729 second_thread_of_pid_p, &counter) == NULL);
730 }
731
732 /* Kill the inferior lwp. */
733
734 static int
735 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
736 {
737 struct thread_info *thread = (struct thread_info *) entry;
738 struct lwp_info *lwp = get_thread_lwp (thread);
739 int wstat;
740 int pid = * (int *) args;
741
742 if (ptid_get_pid (entry->id) != pid)
743 return 0;
744
745 /* We avoid killing the first thread here, because of a Linux kernel (at
746 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
747 the children get a chance to be reaped, it will remain a zombie
748 forever. */
749
750 if (lwpid_of (lwp) == pid)
751 {
752 if (debug_threads)
753 fprintf (stderr, "lkop: is last of process %s\n",
754 target_pid_to_str (entry->id));
755 return 0;
756 }
757
758 /* If we're killing a running inferior, make sure it is stopped
759 first, as PTRACE_KILL will not work otherwise. */
760 if (!lwp->stopped)
761 send_sigstop (&lwp->head);
762
763 do
764 {
765 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
766
767 /* Make sure it died. The loop is most likely unnecessary. */
768 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
769 } while (pid > 0 && WIFSTOPPED (wstat));
770
771 return 0;
772 }
773
774 static int
775 linux_kill (int pid)
776 {
777 struct process_info *process;
778 struct lwp_info *lwp;
779 struct thread_info *thread;
780 int wstat;
781 int lwpid;
782
783 process = find_process_pid (pid);
784 if (process == NULL)
785 return -1;
786
787 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
788
789 /* See the comment in linux_kill_one_lwp. We did not kill the first
790 thread in the list, so do so now. */
791 lwp = find_lwp_pid (pid_to_ptid (pid));
792 thread = get_lwp_thread (lwp);
793
794 if (debug_threads)
795 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
796 lwpid_of (lwp), pid);
797
798 /* If we're killing a running inferior, make sure it is stopped
799 first, as PTRACE_KILL will not work otherwise. */
800 if (!lwp->stopped)
801 send_sigstop (&lwp->head);
802
803 do
804 {
805 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
806
807 /* Make sure it died. The loop is most likely unnecessary. */
808 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
809 } while (lwpid > 0 && WIFSTOPPED (wstat));
810
811 #ifdef USE_THREAD_DB
812 thread_db_free (process, 0);
813 #endif
814 delete_lwp (lwp);
815 linux_remove_process (process);
816 return 0;
817 }
818
819 static int
820 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
821 {
822 struct thread_info *thread = (struct thread_info *) entry;
823 struct lwp_info *lwp = get_thread_lwp (thread);
824 int pid = * (int *) args;
825
826 if (ptid_get_pid (entry->id) != pid)
827 return 0;
828
829 /* If we're detaching from a running inferior, make sure it is
830 stopped first, as PTRACE_DETACH will not work otherwise. */
831 if (!lwp->stopped)
832 {
833 int lwpid = lwpid_of (lwp);
834
835 stopping_threads = 1;
836 send_sigstop (&lwp->head);
837
838 /* If this detects a new thread through a clone event, the new
839 thread is appended to the end of the lwp list, so we'll
840 eventually detach from it. */
841 wait_for_sigstop (&lwp->head);
842 stopping_threads = 0;
843
844 /* If LWP exits while we're trying to stop it, there's nothing
845 left to do. */
846 lwp = find_lwp_pid (pid_to_ptid (lwpid));
847 if (lwp == NULL)
848 return 0;
849 }
850
851 /* If this process is stopped but is expecting a SIGSTOP, then make
852 sure we take care of that now. This isn't absolutely guaranteed
853 to collect the SIGSTOP, but is fairly likely to. */
854 if (lwp->stop_expected)
855 {
856 int wstat;
857 /* Clear stop_expected, so that the SIGSTOP will be reported. */
858 lwp->stop_expected = 0;
859 if (lwp->stopped)
860 linux_resume_one_lwp (lwp, 0, 0, NULL);
861 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
862 }
863
864 /* Flush any pending changes to the process's registers. */
865 regcache_invalidate_one ((struct inferior_list_entry *)
866 get_lwp_thread (lwp));
867
868 /* Finally, let it resume. */
869 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
870
871 delete_lwp (lwp);
872 return 0;
873 }
874
875 static int
876 any_thread_of (struct inferior_list_entry *entry, void *args)
877 {
878 int *pid_p = args;
879
880 if (ptid_get_pid (entry->id) == *pid_p)
881 return 1;
882
883 return 0;
884 }
885
886 static int
887 linux_detach (int pid)
888 {
889 struct process_info *process;
890
891 process = find_process_pid (pid);
892 if (process == NULL)
893 return -1;
894
895 #ifdef USE_THREAD_DB
896 thread_db_free (process, 1);
897 #endif
898
899 current_inferior =
900 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
901
902 delete_all_breakpoints ();
903 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
904 linux_remove_process (process);
905 return 0;
906 }
907
908 static void
909 linux_join (int pid)
910 {
911 int status, ret;
912 struct process_info *process;
913
914 process = find_process_pid (pid);
915 if (process == NULL)
916 return;
917
918 do {
919 ret = my_waitpid (pid, &status, 0);
920 if (WIFEXITED (status) || WIFSIGNALED (status))
921 break;
922 } while (ret != -1 || errno != ECHILD);
923 }
924
925 /* Return nonzero if the given thread is still alive. */
926 static int
927 linux_thread_alive (ptid_t ptid)
928 {
929 struct lwp_info *lwp = find_lwp_pid (ptid);
930
931 /* We assume we always know if a thread exits. If a whole process
932 exited but we still haven't been able to report it to GDB, we'll
933 hold on to the last lwp of the dead process. */
934 if (lwp != NULL)
935 return !lwp->dead;
936 else
937 return 0;
938 }
939
940 /* Return 1 if this lwp has an interesting status pending. */
941 static int
942 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
943 {
944 struct lwp_info *lwp = (struct lwp_info *) entry;
945 ptid_t ptid = * (ptid_t *) arg;
946 struct thread_info *thread = get_lwp_thread (lwp);
947
948 /* Check if we're only interested in events from a specific process
949 or its lwps. */
950 if (!ptid_equal (minus_one_ptid, ptid)
951 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
952 return 0;
953
954 thread = get_lwp_thread (lwp);
955
956 /* If we got a `vCont;t', but we haven't reported a stop yet, do
957 report any status pending the LWP may have. */
958 if (lwp->last_resume_kind == resume_stop
959 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
960 return 0;
961
962 return lwp->status_pending_p;
963 }
964
965 static int
966 same_lwp (struct inferior_list_entry *entry, void *data)
967 {
968 ptid_t ptid = *(ptid_t *) data;
969 int lwp;
970
971 if (ptid_get_lwp (ptid) != 0)
972 lwp = ptid_get_lwp (ptid);
973 else
974 lwp = ptid_get_pid (ptid);
975
976 if (ptid_get_lwp (entry->id) == lwp)
977 return 1;
978
979 return 0;
980 }
981
982 struct lwp_info *
983 find_lwp_pid (ptid_t ptid)
984 {
985 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
986 }
987
988 static struct lwp_info *
989 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
990 {
991 int ret;
992 int to_wait_for = -1;
993 struct lwp_info *child = NULL;
994
995 if (debug_threads)
996 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
997
998 if (ptid_equal (ptid, minus_one_ptid))
999 to_wait_for = -1; /* any child */
1000 else
1001 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1002
1003 options |= __WALL;
1004
1005 retry:
1006
1007 ret = my_waitpid (to_wait_for, wstatp, options);
1008 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1009 return NULL;
1010 else if (ret == -1)
1011 perror_with_name ("waitpid");
1012
1013 if (debug_threads
1014 && (!WIFSTOPPED (*wstatp)
1015 || (WSTOPSIG (*wstatp) != 32
1016 && WSTOPSIG (*wstatp) != 33)))
1017 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1018
1019 child = find_lwp_pid (pid_to_ptid (ret));
1020
1021 /* If we didn't find a process, one of two things presumably happened:
1022 - A process we started and then detached from has exited. Ignore it.
1023 - A process we are controlling has forked and the new child's stop
1024 was reported to us by the kernel. Save its PID. */
1025 if (child == NULL && WIFSTOPPED (*wstatp))
1026 {
1027 add_pid_to_list (&stopped_pids, ret);
1028 goto retry;
1029 }
1030 else if (child == NULL)
1031 goto retry;
1032
1033 child->stopped = 1;
1034
1035 child->last_status = *wstatp;
1036
1037 /* Architecture-specific setup after inferior is running.
1038 This needs to happen after we have attached to the inferior
1039 and it is stopped for the first time, but before we access
1040 any inferior registers. */
1041 if (new_inferior)
1042 {
1043 the_low_target.arch_setup ();
1044 #ifdef HAVE_LINUX_REGSETS
1045 memset (disabled_regsets, 0, num_regsets);
1046 #endif
1047 new_inferior = 0;
1048 }
1049
1050 /* Fetch the possibly triggered data watchpoint info and store it in
1051 CHILD.
1052
1053 On some archs, like x86, that use debug registers to set
1054 watchpoints, it's possible that the way to know which watched
1055 address trapped, is to check the register that is used to select
1056 which address to watch. Problem is, between setting the
1057 watchpoint and reading back which data address trapped, the user
1058 may change the set of watchpoints, and, as a consequence, GDB
1059 changes the debug registers in the inferior. To avoid reading
1060 back a stale stopped-data-address when that happens, we cache in
1061 LP the fact that a watchpoint trapped, and the corresponding data
1062 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1063 changes the debug registers meanwhile, we have the cached data we
1064 can rely on. */
1065
1066 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1067 {
1068 if (the_low_target.stopped_by_watchpoint == NULL)
1069 {
1070 child->stopped_by_watchpoint = 0;
1071 }
1072 else
1073 {
1074 struct thread_info *saved_inferior;
1075
1076 saved_inferior = current_inferior;
1077 current_inferior = get_lwp_thread (child);
1078
1079 child->stopped_by_watchpoint
1080 = the_low_target.stopped_by_watchpoint ();
1081
1082 if (child->stopped_by_watchpoint)
1083 {
1084 if (the_low_target.stopped_data_address != NULL)
1085 child->stopped_data_address
1086 = the_low_target.stopped_data_address ();
1087 else
1088 child->stopped_data_address = 0;
1089 }
1090
1091 current_inferior = saved_inferior;
1092 }
1093 }
1094
1095 /* Store the STOP_PC, with adjustment applied. This depends on the
1096 architecture being defined already (so that CHILD has a valid
1097 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1098 not). */
1099 if (WIFSTOPPED (*wstatp))
1100 child->stop_pc = get_stop_pc (child);
1101
1102 if (debug_threads
1103 && WIFSTOPPED (*wstatp)
1104 && the_low_target.get_pc != NULL)
1105 {
1106 struct thread_info *saved_inferior = current_inferior;
1107 struct regcache *regcache;
1108 CORE_ADDR pc;
1109
1110 current_inferior = get_lwp_thread (child);
1111 regcache = get_thread_regcache (current_inferior, 1);
1112 pc = (*the_low_target.get_pc) (regcache);
1113 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1114 current_inferior = saved_inferior;
1115 }
1116
1117 return child;
1118 }
1119
1120 /* This function should only be called if the LWP got a SIGTRAP.
1121
1122 Handle any tracepoint steps or hits. Return true if a tracepoint
1123 event was handled, 0 otherwise. */
1124
1125 static int
1126 handle_tracepoints (struct lwp_info *lwp)
1127 {
1128 struct thread_info *tinfo = get_lwp_thread (lwp);
1129 int tpoint_related_event = 0;
1130
1131 /* And we need to be sure that any all-threads-stopping doesn't try
1132 to move threads out of the jump pads, as it could deadlock the
1133 inferior (LWP could be in the jump pad, maybe even holding the
1134 lock.) */
1135
1136 /* Do any necessary step collect actions. */
1137 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1138
1139 /* See if we just hit a tracepoint and do its main collect
1140 actions. */
1141 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1142
1143 if (tpoint_related_event)
1144 {
1145 if (debug_threads)
1146 fprintf (stderr, "got a tracepoint event\n");
1147 return 1;
1148 }
1149
1150 return 0;
1151 }
1152
1153 /* Arrange for a breakpoint to be hit again later. We don't keep the
1154 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1155 will handle the current event, eventually we will resume this LWP,
1156 and this breakpoint will trap again. */
1157
1158 static int
1159 cancel_breakpoint (struct lwp_info *lwp)
1160 {
1161 struct thread_info *saved_inferior;
1162
1163 /* There's nothing to do if we don't support breakpoints. */
1164 if (!supports_breakpoints ())
1165 return 0;
1166
1167 /* breakpoint_at reads from current inferior. */
1168 saved_inferior = current_inferior;
1169 current_inferior = get_lwp_thread (lwp);
1170
1171 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1172 {
1173 if (debug_threads)
1174 fprintf (stderr,
1175 "CB: Push back breakpoint for %s\n",
1176 target_pid_to_str (ptid_of (lwp)));
1177
1178 /* Back up the PC if necessary. */
1179 if (the_low_target.decr_pc_after_break)
1180 {
1181 struct regcache *regcache
1182 = get_thread_regcache (current_inferior, 1);
1183 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1184 }
1185
1186 current_inferior = saved_inferior;
1187 return 1;
1188 }
1189 else
1190 {
1191 if (debug_threads)
1192 fprintf (stderr,
1193 "CB: No breakpoint found at %s for [%s]\n",
1194 paddress (lwp->stop_pc),
1195 target_pid_to_str (ptid_of (lwp)));
1196 }
1197
1198 current_inferior = saved_inferior;
1199 return 0;
1200 }
1201
1202 /* When the event-loop is doing a step-over, this points at the thread
1203 being stepped. */
1204 ptid_t step_over_bkpt;
1205
1206 /* Wait for an event from child PID. If PID is -1, wait for any
1207 child. Store the stop status through the status pointer WSTAT.
1208 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1209 event was found and OPTIONS contains WNOHANG. Return the PID of
1210 the stopped child otherwise. */
1211
1212 static int
1213 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1214 {
1215 struct lwp_info *event_child, *requested_child;
1216
1217 event_child = NULL;
1218 requested_child = NULL;
1219
1220 /* Check for a lwp with a pending status. */
1221
1222 if (ptid_equal (ptid, minus_one_ptid)
1223 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1224 {
1225 event_child = (struct lwp_info *)
1226 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1227 if (debug_threads && event_child)
1228 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1229 }
1230 else
1231 {
1232 requested_child = find_lwp_pid (ptid);
1233
1234 if (requested_child->status_pending_p)
1235 event_child = requested_child;
1236 }
1237
1238 if (event_child != NULL)
1239 {
1240 if (debug_threads)
1241 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1242 lwpid_of (event_child), event_child->status_pending);
1243 *wstat = event_child->status_pending;
1244 event_child->status_pending_p = 0;
1245 event_child->status_pending = 0;
1246 current_inferior = get_lwp_thread (event_child);
1247 return lwpid_of (event_child);
1248 }
1249
1250 /* We only enter this loop if no process has a pending wait status. Thus
1251 any action taken in response to a wait status inside this loop is
1252 responding as soon as we detect the status, not after any pending
1253 events. */
1254 while (1)
1255 {
1256 event_child = linux_wait_for_lwp (ptid, wstat, options);
1257
1258 if ((options & WNOHANG) && event_child == NULL)
1259 {
1260 if (debug_threads)
1261 fprintf (stderr, "WNOHANG set, no event found\n");
1262 return 0;
1263 }
1264
1265 if (event_child == NULL)
1266 error ("event from unknown child");
1267
1268 current_inferior = get_lwp_thread (event_child);
1269
1270 /* Check for thread exit. */
1271 if (! WIFSTOPPED (*wstat))
1272 {
1273 if (debug_threads)
1274 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1275
1276 /* If the last thread is exiting, just return. */
1277 if (last_thread_of_process_p (current_inferior))
1278 {
1279 if (debug_threads)
1280 fprintf (stderr, "LWP %ld is last lwp of process\n",
1281 lwpid_of (event_child));
1282 return lwpid_of (event_child);
1283 }
1284
1285 if (!non_stop)
1286 {
1287 current_inferior = (struct thread_info *) all_threads.head;
1288 if (debug_threads)
1289 fprintf (stderr, "Current inferior is now %ld\n",
1290 lwpid_of (get_thread_lwp (current_inferior)));
1291 }
1292 else
1293 {
1294 current_inferior = NULL;
1295 if (debug_threads)
1296 fprintf (stderr, "Current inferior is now <NULL>\n");
1297 }
1298
1299 /* If we were waiting for this particular child to do something...
1300 well, it did something. */
1301 if (requested_child != NULL)
1302 {
1303 int lwpid = lwpid_of (event_child);
1304
1305 /* Cancel the step-over operation --- the thread that
1306 started it is gone. */
1307 if (finish_step_over (event_child))
1308 unstop_all_lwps (event_child);
1309 delete_lwp (event_child);
1310 return lwpid;
1311 }
1312
1313 delete_lwp (event_child);
1314
1315 /* Wait for a more interesting event. */
1316 continue;
1317 }
1318
1319 if (event_child->must_set_ptrace_flags)
1320 {
1321 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1322 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
1323 event_child->must_set_ptrace_flags = 0;
1324 }
1325
1326 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1327 && *wstat >> 16 != 0)
1328 {
1329 handle_extended_wait (event_child, *wstat);
1330 continue;
1331 }
1332
1333 /* If GDB is not interested in this signal, don't stop other
1334 threads, and don't report it to GDB. Just resume the
1335 inferior right away. We do this for threading-related
1336 signals as well as any that GDB specifically requested we
1337 ignore. But never ignore SIGSTOP if we sent it ourselves,
1338 and do not ignore signals when stepping - they may require
1339 special handling to skip the signal handler. */
1340 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1341 thread library? */
1342 if (WIFSTOPPED (*wstat)
1343 && !event_child->stepping
1344 && (
1345 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1346 (current_process ()->private->thread_db != NULL
1347 && (WSTOPSIG (*wstat) == __SIGRTMIN
1348 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1349 ||
1350 #endif
1351 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1352 && !(WSTOPSIG (*wstat) == SIGSTOP
1353 && event_child->stop_expected))))
1354 {
1355 siginfo_t info, *info_p;
1356
1357 if (debug_threads)
1358 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1359 WSTOPSIG (*wstat), lwpid_of (event_child));
1360
1361 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1362 info_p = &info;
1363 else
1364 info_p = NULL;
1365 linux_resume_one_lwp (event_child, event_child->stepping,
1366 WSTOPSIG (*wstat), info_p);
1367 continue;
1368 }
1369
1370 if (WIFSTOPPED (*wstat)
1371 && WSTOPSIG (*wstat) == SIGSTOP
1372 && event_child->stop_expected)
1373 {
1374 int should_stop;
1375
1376 if (debug_threads)
1377 fprintf (stderr, "Expected stop.\n");
1378 event_child->stop_expected = 0;
1379
1380 should_stop = (event_child->last_resume_kind == resume_stop
1381 || stopping_threads);
1382
1383 if (!should_stop)
1384 {
1385 linux_resume_one_lwp (event_child,
1386 event_child->stepping, 0, NULL);
1387 continue;
1388 }
1389 }
1390
1391 return lwpid_of (event_child);
1392 }
1393
1394 /* NOTREACHED */
1395 return 0;
1396 }
1397
1398 static int
1399 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1400 {
1401 ptid_t wait_ptid;
1402
1403 if (ptid_is_pid (ptid))
1404 {
1405 /* A request to wait for a specific tgid. This is not possible
1406 with waitpid, so instead, we wait for any child, and leave
1407 children we're not interested in right now with a pending
1408 status to report later. */
1409 wait_ptid = minus_one_ptid;
1410 }
1411 else
1412 wait_ptid = ptid;
1413
1414 while (1)
1415 {
1416 int event_pid;
1417
1418 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1419
1420 if (event_pid > 0
1421 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1422 {
1423 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1424
1425 if (! WIFSTOPPED (*wstat))
1426 mark_lwp_dead (event_child, *wstat);
1427 else
1428 {
1429 event_child->status_pending_p = 1;
1430 event_child->status_pending = *wstat;
1431 }
1432 }
1433 else
1434 return event_pid;
1435 }
1436 }
1437
1438
1439 /* Count the LWP's that have had events. */
1440
1441 static int
1442 count_events_callback (struct inferior_list_entry *entry, void *data)
1443 {
1444 struct lwp_info *lp = (struct lwp_info *) entry;
1445 int *count = data;
1446
1447 gdb_assert (count != NULL);
1448
1449 /* Count only resumed LWPs that have a SIGTRAP event pending that
1450 should be reported to GDB. */
1451 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1452 && lp->last_resume_kind != resume_stop
1453 && lp->status_pending_p
1454 && WIFSTOPPED (lp->status_pending)
1455 && WSTOPSIG (lp->status_pending) == SIGTRAP
1456 && !breakpoint_inserted_here (lp->stop_pc))
1457 (*count)++;
1458
1459 return 0;
1460 }
1461
1462 /* Select the LWP (if any) that is currently being single-stepped. */
1463
1464 static int
1465 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1466 {
1467 struct lwp_info *lp = (struct lwp_info *) entry;
1468
1469 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1470 && lp->last_resume_kind == resume_step
1471 && lp->status_pending_p)
1472 return 1;
1473 else
1474 return 0;
1475 }
1476
1477 /* Select the Nth LWP that has had a SIGTRAP event that should be
1478 reported to GDB. */
1479
1480 static int
1481 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1482 {
1483 struct lwp_info *lp = (struct lwp_info *) entry;
1484 int *selector = data;
1485
1486 gdb_assert (selector != NULL);
1487
1488 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1489 if (lp->last_resume_kind != resume_stop
1490 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1491 && lp->status_pending_p
1492 && WIFSTOPPED (lp->status_pending)
1493 && WSTOPSIG (lp->status_pending) == SIGTRAP
1494 && !breakpoint_inserted_here (lp->stop_pc))
1495 if ((*selector)-- == 0)
1496 return 1;
1497
1498 return 0;
1499 }
1500
1501 static int
1502 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1503 {
1504 struct lwp_info *lp = (struct lwp_info *) entry;
1505 struct lwp_info *event_lp = data;
1506
1507 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1508 if (lp == event_lp)
1509 return 0;
1510
1511 /* If a LWP other than the LWP that we're reporting an event for has
1512 hit a GDB breakpoint (as opposed to some random trap signal),
1513 then just arrange for it to hit it again later. We don't keep
1514 the SIGTRAP status and don't forward the SIGTRAP signal to the
1515 LWP. We will handle the current event, eventually we will resume
1516 all LWPs, and this one will get its breakpoint trap again.
1517
1518 If we do not do this, then we run the risk that the user will
1519 delete or disable the breakpoint, but the LWP will have already
1520 tripped on it. */
1521
1522 if (lp->last_resume_kind != resume_stop
1523 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1524 && lp->status_pending_p
1525 && WIFSTOPPED (lp->status_pending)
1526 && WSTOPSIG (lp->status_pending) == SIGTRAP
1527 && !lp->stepping
1528 && !lp->stopped_by_watchpoint
1529 && cancel_breakpoint (lp))
1530 /* Throw away the SIGTRAP. */
1531 lp->status_pending_p = 0;
1532
1533 return 0;
1534 }
1535
1536 /* Select one LWP out of those that have events pending. */
1537
1538 static void
1539 select_event_lwp (struct lwp_info **orig_lp)
1540 {
1541 int num_events = 0;
1542 int random_selector;
1543 struct lwp_info *event_lp;
1544
1545 /* Give preference to any LWP that is being single-stepped. */
1546 event_lp
1547 = (struct lwp_info *) find_inferior (&all_lwps,
1548 select_singlestep_lwp_callback, NULL);
1549 if (event_lp != NULL)
1550 {
1551 if (debug_threads)
1552 fprintf (stderr,
1553 "SEL: Select single-step %s\n",
1554 target_pid_to_str (ptid_of (event_lp)));
1555 }
1556 else
1557 {
1558 /* No single-stepping LWP. Select one at random, out of those
1559 which have had SIGTRAP events. */
1560
1561 /* First see how many SIGTRAP events we have. */
1562 find_inferior (&all_lwps, count_events_callback, &num_events);
1563
1564 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1565 random_selector = (int)
1566 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1567
1568 if (debug_threads && num_events > 1)
1569 fprintf (stderr,
1570 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1571 num_events, random_selector);
1572
1573 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1574 select_event_lwp_callback,
1575 &random_selector);
1576 }
1577
1578 if (event_lp != NULL)
1579 {
1580 /* Switch the event LWP. */
1581 *orig_lp = event_lp;
1582 }
1583 }
1584
1585 /* Set this inferior LWP's state as "want-stopped". We won't resume
1586 this LWP until the client gives us another action for it. */
1587
1588 static void
1589 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1590 {
1591 struct lwp_info *lwp = (struct lwp_info *) entry;
1592 struct thread_info *thread = get_lwp_thread (lwp);
1593
1594 /* Most threads are stopped implicitly (all-stop); tag that with
1595 signal 0. The thread being explicitly reported stopped to the
1596 client, gets it's status fixed up afterwards. */
1597 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1598 thread->last_status.value.sig = TARGET_SIGNAL_0;
1599
1600 lwp->last_resume_kind = resume_stop;
1601 }
1602
1603 /* Set all LWP's states as "want-stopped". */
1604
1605 static void
1606 gdb_wants_all_stopped (void)
1607 {
1608 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1609 }
1610
1611 /* Wait for process, returns status. */
1612
1613 static ptid_t
1614 linux_wait_1 (ptid_t ptid,
1615 struct target_waitstatus *ourstatus, int target_options)
1616 {
1617 int w;
1618 struct lwp_info *event_child;
1619 int options;
1620 int pid;
1621 int step_over_finished;
1622 int bp_explains_trap;
1623 int maybe_internal_trap;
1624 int report_to_gdb;
1625 int trace_event;
1626
1627 /* Translate generic target options into linux options. */
1628 options = __WALL;
1629 if (target_options & TARGET_WNOHANG)
1630 options |= WNOHANG;
1631
1632 retry:
1633 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1634
1635 /* If we were only supposed to resume one thread, only wait for
1636 that thread - if it's still alive. If it died, however - which
1637 can happen if we're coming from the thread death case below -
1638 then we need to make sure we restart the other threads. We could
1639 pick a thread at random or restart all; restarting all is less
1640 arbitrary. */
1641 if (!non_stop
1642 && !ptid_equal (cont_thread, null_ptid)
1643 && !ptid_equal (cont_thread, minus_one_ptid))
1644 {
1645 struct thread_info *thread;
1646
1647 thread = (struct thread_info *) find_inferior_id (&all_threads,
1648 cont_thread);
1649
1650 /* No stepping, no signal - unless one is pending already, of course. */
1651 if (thread == NULL)
1652 {
1653 struct thread_resume resume_info;
1654 resume_info.thread = minus_one_ptid;
1655 resume_info.kind = resume_continue;
1656 resume_info.sig = 0;
1657 linux_resume (&resume_info, 1);
1658 }
1659 else
1660 ptid = cont_thread;
1661 }
1662
1663 if (ptid_equal (step_over_bkpt, null_ptid))
1664 pid = linux_wait_for_event (ptid, &w, options);
1665 else
1666 {
1667 if (debug_threads)
1668 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1669 target_pid_to_str (step_over_bkpt));
1670 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1671 }
1672
1673 if (pid == 0) /* only if TARGET_WNOHANG */
1674 return null_ptid;
1675
1676 event_child = get_thread_lwp (current_inferior);
1677
1678 /* If we are waiting for a particular child, and it exited,
1679 linux_wait_for_event will return its exit status. Similarly if
1680 the last child exited. If this is not the last child, however,
1681 do not report it as exited until there is a 'thread exited' response
1682 available in the remote protocol. Instead, just wait for another event.
1683 This should be safe, because if the thread crashed we will already
1684 have reported the termination signal to GDB; that should stop any
1685 in-progress stepping operations, etc.
1686
1687 Report the exit status of the last thread to exit. This matches
1688 LinuxThreads' behavior. */
1689
1690 if (last_thread_of_process_p (current_inferior))
1691 {
1692 if (WIFEXITED (w) || WIFSIGNALED (w))
1693 {
1694 int pid = pid_of (event_child);
1695 struct process_info *process = find_process_pid (pid);
1696
1697 #ifdef USE_THREAD_DB
1698 thread_db_free (process, 0);
1699 #endif
1700 delete_lwp (event_child);
1701 linux_remove_process (process);
1702
1703 current_inferior = NULL;
1704
1705 if (WIFEXITED (w))
1706 {
1707 ourstatus->kind = TARGET_WAITKIND_EXITED;
1708 ourstatus->value.integer = WEXITSTATUS (w);
1709
1710 if (debug_threads)
1711 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1712 }
1713 else
1714 {
1715 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1716 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1717
1718 if (debug_threads)
1719 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1720
1721 }
1722
1723 return pid_to_ptid (pid);
1724 }
1725 }
1726 else
1727 {
1728 if (!WIFSTOPPED (w))
1729 goto retry;
1730 }
1731
1732 /* If this event was not handled before, and is not a SIGTRAP, we
1733 report it. SIGILL and SIGSEGV are also treated as traps in case
1734 a breakpoint is inserted at the current PC. If this target does
1735 not support internal breakpoints at all, we also report the
1736 SIGTRAP without further processing; it's of no concern to us. */
1737 maybe_internal_trap
1738 = (supports_breakpoints ()
1739 && (WSTOPSIG (w) == SIGTRAP
1740 || ((WSTOPSIG (w) == SIGILL
1741 || WSTOPSIG (w) == SIGSEGV)
1742 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1743
1744 if (maybe_internal_trap)
1745 {
1746 /* Handle anything that requires bookkeeping before deciding to
1747 report the event or continue waiting. */
1748
1749 /* First check if we can explain the SIGTRAP with an internal
1750 breakpoint, or if we should possibly report the event to GDB.
1751 Do this before anything that may remove or insert a
1752 breakpoint. */
1753 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1754
1755 /* We have a SIGTRAP, possibly a step-over dance has just
1756 finished. If so, tweak the state machine accordingly,
1757 reinsert breakpoints and delete any reinsert (software
1758 single-step) breakpoints. */
1759 step_over_finished = finish_step_over (event_child);
1760
1761 /* Now invoke the callbacks of any internal breakpoints there. */
1762 check_breakpoints (event_child->stop_pc);
1763
1764 /* Handle tracepoint data collecting. This may overflow the
1765 trace buffer, and cause a tracing stop, removing
1766 breakpoints. */
1767 trace_event = handle_tracepoints (event_child);
1768
1769 if (bp_explains_trap)
1770 {
1771 /* If we stepped or ran into an internal breakpoint, we've
1772 already handled it. So next time we resume (from this
1773 PC), we should step over it. */
1774 if (debug_threads)
1775 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1776
1777 if (breakpoint_here (event_child->stop_pc))
1778 event_child->need_step_over = 1;
1779 }
1780 }
1781 else
1782 {
1783 /* We have some other signal, possibly a step-over dance was in
1784 progress, and it should be cancelled too. */
1785 step_over_finished = finish_step_over (event_child);
1786
1787 trace_event = 0;
1788 }
1789
1790 /* We have all the data we need. Either report the event to GDB, or
1791 resume threads and keep waiting for more. */
1792
1793 /* Check If GDB would be interested in this event. If GDB wanted
1794 this thread to single step, we always want to report the SIGTRAP,
1795 and let GDB handle it. Watchpoints should always be reported.
1796 So should signals we can't explain. A SIGTRAP we can't explain
1797 could be a GDB breakpoint --- we may or not support Z0
1798 breakpoints. If we do, we're be able to handle GDB breakpoints
1799 on top of internal breakpoints, by handling the internal
1800 breakpoint and still reporting the event to GDB. If we don't,
1801 we're out of luck, GDB won't see the breakpoint hit. */
1802 report_to_gdb = (!maybe_internal_trap
1803 || event_child->last_resume_kind == resume_step
1804 || event_child->stopped_by_watchpoint
1805 || (!step_over_finished && !bp_explains_trap && !trace_event)
1806 || gdb_breakpoint_here (event_child->stop_pc));
1807
1808 /* We found no reason GDB would want us to stop. We either hit one
1809 of our own breakpoints, or finished an internal step GDB
1810 shouldn't know about. */
1811 if (!report_to_gdb)
1812 {
1813 if (debug_threads)
1814 {
1815 if (bp_explains_trap)
1816 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1817 if (step_over_finished)
1818 fprintf (stderr, "Step-over finished.\n");
1819 if (trace_event)
1820 fprintf (stderr, "Tracepoint event.\n");
1821 }
1822
1823 /* We're not reporting this breakpoint to GDB, so apply the
1824 decr_pc_after_break adjustment to the inferior's regcache
1825 ourselves. */
1826
1827 if (the_low_target.set_pc != NULL)
1828 {
1829 struct regcache *regcache
1830 = get_thread_regcache (get_lwp_thread (event_child), 1);
1831 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1832 }
1833
1834 /* We've finished stepping over a breakpoint. We've stopped all
1835 LWPs momentarily except the stepping one. This is where we
1836 resume them all again. We're going to keep waiting, so use
1837 proceed, which handles stepping over the next breakpoint. */
1838 if (debug_threads)
1839 fprintf (stderr, "proceeding all threads.\n");
1840 proceed_all_lwps ();
1841 goto retry;
1842 }
1843
1844 if (debug_threads)
1845 {
1846 if (event_child->last_resume_kind == resume_step)
1847 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1848 if (event_child->stopped_by_watchpoint)
1849 fprintf (stderr, "Stopped by watchpoint.\n");
1850 if (gdb_breakpoint_here (event_child->stop_pc))
1851 fprintf (stderr, "Stopped by GDB breakpoint.\n");
1852 if (debug_threads)
1853 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1854 }
1855
1856 /* Alright, we're going to report a stop. */
1857
1858 if (!non_stop)
1859 {
1860 /* In all-stop, stop all threads. */
1861 stop_all_lwps ();
1862
1863 /* If we're not waiting for a specific LWP, choose an event LWP
1864 from among those that have had events. Giving equal priority
1865 to all LWPs that have had events helps prevent
1866 starvation. */
1867 if (ptid_equal (ptid, minus_one_ptid))
1868 {
1869 event_child->status_pending_p = 1;
1870 event_child->status_pending = w;
1871
1872 select_event_lwp (&event_child);
1873
1874 event_child->status_pending_p = 0;
1875 w = event_child->status_pending;
1876 }
1877
1878 /* Now that we've selected our final event LWP, cancel any
1879 breakpoints in other LWPs that have hit a GDB breakpoint.
1880 See the comment in cancel_breakpoints_callback to find out
1881 why. */
1882 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1883 }
1884 else
1885 {
1886 /* If we just finished a step-over, then all threads had been
1887 momentarily paused. In all-stop, that's fine, we want
1888 threads stopped by now anyway. In non-stop, we need to
1889 re-resume threads that GDB wanted to be running. */
1890 if (step_over_finished)
1891 unstop_all_lwps (event_child);
1892 }
1893
1894 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1895
1896 /* Do this before the gdb_wants_all_stopped calls below, since they
1897 always set last_resume_kind to resume_stop. */
1898 if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) == SIGSTOP)
1899 {
1900 /* A thread that has been requested to stop by GDB with vCont;t,
1901 and it stopped cleanly, so report as SIG0. The use of
1902 SIGSTOP is an implementation detail. */
1903 ourstatus->value.sig = TARGET_SIGNAL_0;
1904 }
1905 else if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) != SIGSTOP)
1906 {
1907 /* A thread that has been requested to stop by GDB with vCont;t,
1908 but, it stopped for other reasons. */
1909 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1910 }
1911 else
1912 {
1913 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1914 }
1915
1916 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1917
1918 if (!non_stop)
1919 {
1920 /* From GDB's perspective, all-stop mode always stops all
1921 threads implicitly. Tag all threads as "want-stopped". */
1922 gdb_wants_all_stopped ();
1923 }
1924 else
1925 {
1926 /* We're reporting this LWP as stopped. Update it's
1927 "want-stopped" state to what the client wants, until it gets
1928 a new resume action. */
1929 gdb_wants_lwp_stopped (&event_child->head);
1930 }
1931
1932 if (debug_threads)
1933 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1934 target_pid_to_str (ptid_of (event_child)),
1935 ourstatus->kind,
1936 ourstatus->value.sig);
1937
1938 get_lwp_thread (event_child)->last_status = *ourstatus;
1939 return ptid_of (event_child);
1940 }
1941
1942 /* Get rid of any pending event in the pipe. */
1943 static void
1944 async_file_flush (void)
1945 {
1946 int ret;
1947 char buf;
1948
1949 do
1950 ret = read (linux_event_pipe[0], &buf, 1);
1951 while (ret >= 0 || (ret == -1 && errno == EINTR));
1952 }
1953
1954 /* Put something in the pipe, so the event loop wakes up. */
1955 static void
1956 async_file_mark (void)
1957 {
1958 int ret;
1959
1960 async_file_flush ();
1961
1962 do
1963 ret = write (linux_event_pipe[1], "+", 1);
1964 while (ret == 0 || (ret == -1 && errno == EINTR));
1965
1966 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1967 be awakened anyway. */
1968 }
1969
1970 static ptid_t
1971 linux_wait (ptid_t ptid,
1972 struct target_waitstatus *ourstatus, int target_options)
1973 {
1974 ptid_t event_ptid;
1975
1976 if (debug_threads)
1977 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1978
1979 /* Flush the async file first. */
1980 if (target_is_async_p ())
1981 async_file_flush ();
1982
1983 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1984
1985 /* If at least one stop was reported, there may be more. A single
1986 SIGCHLD can signal more than one child stop. */
1987 if (target_is_async_p ()
1988 && (target_options & TARGET_WNOHANG) != 0
1989 && !ptid_equal (event_ptid, null_ptid))
1990 async_file_mark ();
1991
1992 return event_ptid;
1993 }
1994
1995 /* Send a signal to an LWP. */
1996
1997 static int
1998 kill_lwp (unsigned long lwpid, int signo)
1999 {
2000 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2001 fails, then we are not using nptl threads and we should be using kill. */
2002
2003 #ifdef __NR_tkill
2004 {
2005 static int tkill_failed;
2006
2007 if (!tkill_failed)
2008 {
2009 int ret;
2010
2011 errno = 0;
2012 ret = syscall (__NR_tkill, lwpid, signo);
2013 if (errno != ENOSYS)
2014 return ret;
2015 tkill_failed = 1;
2016 }
2017 }
2018 #endif
2019
2020 return kill (lwpid, signo);
2021 }
2022
2023 static void
2024 send_sigstop (struct inferior_list_entry *entry)
2025 {
2026 struct lwp_info *lwp = (struct lwp_info *) entry;
2027 int pid;
2028
2029 if (lwp->stopped)
2030 return;
2031
2032 pid = lwpid_of (lwp);
2033
2034 /* If we already have a pending stop signal for this process, don't
2035 send another. */
2036 if (lwp->stop_expected)
2037 {
2038 if (debug_threads)
2039 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2040
2041 return;
2042 }
2043
2044 if (debug_threads)
2045 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2046
2047 lwp->stop_expected = 1;
2048 kill_lwp (pid, SIGSTOP);
2049 }
2050
2051 static void
2052 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2053 {
2054 /* It's dead, really. */
2055 lwp->dead = 1;
2056
2057 /* Store the exit status for later. */
2058 lwp->status_pending_p = 1;
2059 lwp->status_pending = wstat;
2060
2061 /* Prevent trying to stop it. */
2062 lwp->stopped = 1;
2063
2064 /* No further stops are expected from a dead lwp. */
2065 lwp->stop_expected = 0;
2066 }
2067
2068 static void
2069 wait_for_sigstop (struct inferior_list_entry *entry)
2070 {
2071 struct lwp_info *lwp = (struct lwp_info *) entry;
2072 struct thread_info *saved_inferior;
2073 int wstat;
2074 ptid_t saved_tid;
2075 ptid_t ptid;
2076 int pid;
2077
2078 if (lwp->stopped)
2079 {
2080 if (debug_threads)
2081 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2082 lwpid_of (lwp));
2083 return;
2084 }
2085
2086 saved_inferior = current_inferior;
2087 if (saved_inferior != NULL)
2088 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2089 else
2090 saved_tid = null_ptid; /* avoid bogus unused warning */
2091
2092 ptid = lwp->head.id;
2093
2094 if (debug_threads)
2095 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2096
2097 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2098
2099 /* If we stopped with a non-SIGSTOP signal, save it for later
2100 and record the pending SIGSTOP. If the process exited, just
2101 return. */
2102 if (WIFSTOPPED (wstat))
2103 {
2104 if (debug_threads)
2105 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2106 lwpid_of (lwp), WSTOPSIG (wstat));
2107
2108 if (WSTOPSIG (wstat) != SIGSTOP)
2109 {
2110 if (debug_threads)
2111 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2112 lwpid_of (lwp), wstat);
2113
2114 lwp->status_pending_p = 1;
2115 lwp->status_pending = wstat;
2116 }
2117 }
2118 else
2119 {
2120 if (debug_threads)
2121 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2122
2123 lwp = find_lwp_pid (pid_to_ptid (pid));
2124 if (lwp)
2125 {
2126 /* Leave this status pending for the next time we're able to
2127 report it. In the mean time, we'll report this lwp as
2128 dead to GDB, so GDB doesn't try to read registers and
2129 memory from it. This can only happen if this was the
2130 last thread of the process; otherwise, PID is removed
2131 from the thread tables before linux_wait_for_event
2132 returns. */
2133 mark_lwp_dead (lwp, wstat);
2134 }
2135 }
2136
2137 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2138 current_inferior = saved_inferior;
2139 else
2140 {
2141 if (debug_threads)
2142 fprintf (stderr, "Previously current thread died.\n");
2143
2144 if (non_stop)
2145 {
2146 /* We can't change the current inferior behind GDB's back,
2147 otherwise, a subsequent command may apply to the wrong
2148 process. */
2149 current_inferior = NULL;
2150 }
2151 else
2152 {
2153 /* Set a valid thread as current. */
2154 set_desired_inferior (0);
2155 }
2156 }
2157 }
2158
2159 static void
2160 stop_all_lwps (void)
2161 {
2162 stopping_threads = 1;
2163 for_each_inferior (&all_lwps, send_sigstop);
2164 for_each_inferior (&all_lwps, wait_for_sigstop);
2165 stopping_threads = 0;
2166 }
2167
2168 /* Resume execution of the inferior process.
2169 If STEP is nonzero, single-step it.
2170 If SIGNAL is nonzero, give it that signal. */
2171
2172 static void
2173 linux_resume_one_lwp (struct lwp_info *lwp,
2174 int step, int signal, siginfo_t *info)
2175 {
2176 struct thread_info *saved_inferior;
2177
2178 if (lwp->stopped == 0)
2179 return;
2180
2181 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2182 user used the "jump" command, or "set $pc = foo"). */
2183 if (lwp->stop_pc != get_pc (lwp))
2184 {
2185 /* Collecting 'while-stepping' actions doesn't make sense
2186 anymore. */
2187 release_while_stepping_state_list (get_lwp_thread (lwp));
2188 }
2189
2190 /* If we have pending signals or status, and a new signal, enqueue the
2191 signal. Also enqueue the signal if we are waiting to reinsert a
2192 breakpoint; it will be picked up again below. */
2193 if (signal != 0
2194 && (lwp->status_pending_p || lwp->pending_signals != NULL
2195 || lwp->bp_reinsert != 0))
2196 {
2197 struct pending_signals *p_sig;
2198 p_sig = xmalloc (sizeof (*p_sig));
2199 p_sig->prev = lwp->pending_signals;
2200 p_sig->signal = signal;
2201 if (info == NULL)
2202 memset (&p_sig->info, 0, sizeof (siginfo_t));
2203 else
2204 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2205 lwp->pending_signals = p_sig;
2206 }
2207
2208 if (lwp->status_pending_p)
2209 {
2210 if (debug_threads)
2211 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2212 " has pending status\n",
2213 lwpid_of (lwp), step ? "step" : "continue", signal,
2214 lwp->stop_expected ? "expected" : "not expected");
2215 return;
2216 }
2217
2218 saved_inferior = current_inferior;
2219 current_inferior = get_lwp_thread (lwp);
2220
2221 if (debug_threads)
2222 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2223 lwpid_of (lwp), step ? "step" : "continue", signal,
2224 lwp->stop_expected ? "expected" : "not expected");
2225
2226 /* This bit needs some thinking about. If we get a signal that
2227 we must report while a single-step reinsert is still pending,
2228 we often end up resuming the thread. It might be better to
2229 (ew) allow a stack of pending events; then we could be sure that
2230 the reinsert happened right away and not lose any signals.
2231
2232 Making this stack would also shrink the window in which breakpoints are
2233 uninserted (see comment in linux_wait_for_lwp) but not enough for
2234 complete correctness, so it won't solve that problem. It may be
2235 worthwhile just to solve this one, however. */
2236 if (lwp->bp_reinsert != 0)
2237 {
2238 if (debug_threads)
2239 fprintf (stderr, " pending reinsert at 0x%s\n",
2240 paddress (lwp->bp_reinsert));
2241
2242 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2243 {
2244 if (step == 0)
2245 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2246
2247 step = 1;
2248 }
2249
2250 /* Postpone any pending signal. It was enqueued above. */
2251 signal = 0;
2252 }
2253
2254 /* If we have while-stepping actions in this thread set it stepping.
2255 If we have a signal to deliver, it may or may not be set to
2256 SIG_IGN, we don't know. Assume so, and allow collecting
2257 while-stepping into a signal handler. A possible smart thing to
2258 do would be to set an internal breakpoint at the signal return
2259 address, continue, and carry on catching this while-stepping
2260 action only when that breakpoint is hit. A future
2261 enhancement. */
2262 if (get_lwp_thread (lwp)->while_stepping != NULL
2263 && can_hardware_single_step ())
2264 {
2265 if (debug_threads)
2266 fprintf (stderr,
2267 "lwp %ld has a while-stepping action -> forcing step.\n",
2268 lwpid_of (lwp));
2269 step = 1;
2270 }
2271
2272 if (debug_threads && the_low_target.get_pc != NULL)
2273 {
2274 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2275 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2276 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2277 }
2278
2279 /* If we have pending signals, consume one unless we are trying to reinsert
2280 a breakpoint. */
2281 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2282 {
2283 struct pending_signals **p_sig;
2284
2285 p_sig = &lwp->pending_signals;
2286 while ((*p_sig)->prev != NULL)
2287 p_sig = &(*p_sig)->prev;
2288
2289 signal = (*p_sig)->signal;
2290 if ((*p_sig)->info.si_signo != 0)
2291 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2292
2293 free (*p_sig);
2294 *p_sig = NULL;
2295 }
2296
2297 if (the_low_target.prepare_to_resume != NULL)
2298 the_low_target.prepare_to_resume (lwp);
2299
2300 regcache_invalidate_one ((struct inferior_list_entry *)
2301 get_lwp_thread (lwp));
2302 errno = 0;
2303 lwp->stopped = 0;
2304 lwp->stopped_by_watchpoint = 0;
2305 lwp->stepping = step;
2306 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2307 /* Coerce to a uintptr_t first to avoid potential gcc warning
2308 of coercing an 8 byte integer to a 4 byte pointer. */
2309 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2310
2311 current_inferior = saved_inferior;
2312 if (errno)
2313 {
2314 /* ESRCH from ptrace either means that the thread was already
2315 running (an error) or that it is gone (a race condition). If
2316 it's gone, we will get a notification the next time we wait,
2317 so we can ignore the error. We could differentiate these
2318 two, but it's tricky without waiting; the thread still exists
2319 as a zombie, so sending it signal 0 would succeed. So just
2320 ignore ESRCH. */
2321 if (errno == ESRCH)
2322 return;
2323
2324 perror_with_name ("ptrace");
2325 }
2326 }
2327
2328 struct thread_resume_array
2329 {
2330 struct thread_resume *resume;
2331 size_t n;
2332 };
2333
2334 /* This function is called once per thread. We look up the thread
2335 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2336 resume request.
2337
2338 This algorithm is O(threads * resume elements), but resume elements
2339 is small (and will remain small at least until GDB supports thread
2340 suspension). */
2341 static int
2342 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2343 {
2344 struct lwp_info *lwp;
2345 struct thread_info *thread;
2346 int ndx;
2347 struct thread_resume_array *r;
2348
2349 thread = (struct thread_info *) entry;
2350 lwp = get_thread_lwp (thread);
2351 r = arg;
2352
2353 for (ndx = 0; ndx < r->n; ndx++)
2354 {
2355 ptid_t ptid = r->resume[ndx].thread;
2356 if (ptid_equal (ptid, minus_one_ptid)
2357 || ptid_equal (ptid, entry->id)
2358 || (ptid_is_pid (ptid)
2359 && (ptid_get_pid (ptid) == pid_of (lwp)))
2360 || (ptid_get_lwp (ptid) == -1
2361 && (ptid_get_pid (ptid) == pid_of (lwp))))
2362 {
2363 if (r->resume[ndx].kind == resume_stop
2364 && lwp->last_resume_kind == resume_stop)
2365 {
2366 if (debug_threads)
2367 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2368 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2369 ? "stopped"
2370 : "stopping",
2371 lwpid_of (lwp));
2372
2373 continue;
2374 }
2375
2376 lwp->resume = &r->resume[ndx];
2377 lwp->last_resume_kind = lwp->resume->kind;
2378 return 0;
2379 }
2380 }
2381
2382 /* No resume action for this thread. */
2383 lwp->resume = NULL;
2384
2385 return 0;
2386 }
2387
2388
2389 /* Set *FLAG_P if this lwp has an interesting status pending. */
2390 static int
2391 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2392 {
2393 struct lwp_info *lwp = (struct lwp_info *) entry;
2394
2395 /* LWPs which will not be resumed are not interesting, because
2396 we might not wait for them next time through linux_wait. */
2397 if (lwp->resume == NULL)
2398 return 0;
2399
2400 if (lwp->status_pending_p)
2401 * (int *) flag_p = 1;
2402
2403 return 0;
2404 }
2405
2406 /* Return 1 if this lwp that GDB wants running is stopped at an
2407 internal breakpoint that we need to step over. It assumes that any
2408 required STOP_PC adjustment has already been propagated to the
2409 inferior's regcache. */
2410
2411 static int
2412 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2413 {
2414 struct lwp_info *lwp = (struct lwp_info *) entry;
2415 struct thread_info *saved_inferior;
2416 CORE_ADDR pc;
2417
2418 /* LWPs which will not be resumed are not interesting, because we
2419 might not wait for them next time through linux_wait. */
2420
2421 if (!lwp->stopped)
2422 {
2423 if (debug_threads)
2424 fprintf (stderr,
2425 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2426 lwpid_of (lwp));
2427 return 0;
2428 }
2429
2430 if (lwp->last_resume_kind == resume_stop)
2431 {
2432 if (debug_threads)
2433 fprintf (stderr,
2434 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2435 lwpid_of (lwp));
2436 return 0;
2437 }
2438
2439 if (!lwp->need_step_over)
2440 {
2441 if (debug_threads)
2442 fprintf (stderr,
2443 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2444 }
2445
2446 if (lwp->status_pending_p)
2447 {
2448 if (debug_threads)
2449 fprintf (stderr,
2450 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2451 lwpid_of (lwp));
2452 return 0;
2453 }
2454
2455 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2456 or we have. */
2457 pc = get_pc (lwp);
2458
2459 /* If the PC has changed since we stopped, then don't do anything,
2460 and let the breakpoint/tracepoint be hit. This happens if, for
2461 instance, GDB handled the decr_pc_after_break subtraction itself,
2462 GDB is OOL stepping this thread, or the user has issued a "jump"
2463 command, or poked thread's registers herself. */
2464 if (pc != lwp->stop_pc)
2465 {
2466 if (debug_threads)
2467 fprintf (stderr,
2468 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2469 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2470 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2471
2472 lwp->need_step_over = 0;
2473 return 0;
2474 }
2475
2476 saved_inferior = current_inferior;
2477 current_inferior = get_lwp_thread (lwp);
2478
2479 /* We can only step over breakpoints we know about. */
2480 if (breakpoint_here (pc))
2481 {
2482 /* Don't step over a breakpoint that GDB expects to hit
2483 though. */
2484 if (gdb_breakpoint_here (pc))
2485 {
2486 if (debug_threads)
2487 fprintf (stderr,
2488 "Need step over [LWP %ld]? yes, but found"
2489 " GDB breakpoint at 0x%s; skipping step over\n",
2490 lwpid_of (lwp), paddress (pc));
2491
2492 current_inferior = saved_inferior;
2493 return 0;
2494 }
2495 else
2496 {
2497 if (debug_threads)
2498 fprintf (stderr,
2499 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2500 lwpid_of (lwp), paddress (pc));
2501
2502 /* We've found an lwp that needs stepping over --- return 1 so
2503 that find_inferior stops looking. */
2504 current_inferior = saved_inferior;
2505
2506 /* If the step over is cancelled, this is set again. */
2507 lwp->need_step_over = 0;
2508 return 1;
2509 }
2510 }
2511
2512 current_inferior = saved_inferior;
2513
2514 if (debug_threads)
2515 fprintf (stderr,
2516 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2517 lwpid_of (lwp), paddress (pc));
2518
2519 return 0;
2520 }
2521
2522 /* Start a step-over operation on LWP. When LWP stopped at a
2523 breakpoint, to make progress, we need to remove the breakpoint out
2524 of the way. If we let other threads run while we do that, they may
2525 pass by the breakpoint location and miss hitting it. To avoid
2526 that, a step-over momentarily stops all threads while LWP is
2527 single-stepped while the breakpoint is temporarily uninserted from
2528 the inferior. When the single-step finishes, we reinsert the
2529 breakpoint, and let all threads that are supposed to be running,
2530 run again.
2531
2532 On targets that don't support hardware single-step, we don't
2533 currently support full software single-stepping. Instead, we only
2534 support stepping over the thread event breakpoint, by asking the
2535 low target where to place a reinsert breakpoint. Since this
2536 routine assumes the breakpoint being stepped over is a thread event
2537 breakpoint, it usually assumes the return address of the current
2538 function is a good enough place to set the reinsert breakpoint. */
2539
2540 static int
2541 start_step_over (struct lwp_info *lwp)
2542 {
2543 struct thread_info *saved_inferior;
2544 CORE_ADDR pc;
2545 int step;
2546
2547 if (debug_threads)
2548 fprintf (stderr,
2549 "Starting step-over on LWP %ld. Stopping all threads\n",
2550 lwpid_of (lwp));
2551
2552 stop_all_lwps ();
2553
2554 if (debug_threads)
2555 fprintf (stderr, "Done stopping all threads for step-over.\n");
2556
2557 /* Note, we should always reach here with an already adjusted PC,
2558 either by GDB (if we're resuming due to GDB's request), or by our
2559 caller, if we just finished handling an internal breakpoint GDB
2560 shouldn't care about. */
2561 pc = get_pc (lwp);
2562
2563 saved_inferior = current_inferior;
2564 current_inferior = get_lwp_thread (lwp);
2565
2566 lwp->bp_reinsert = pc;
2567 uninsert_breakpoints_at (pc);
2568
2569 if (can_hardware_single_step ())
2570 {
2571 step = 1;
2572 }
2573 else
2574 {
2575 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2576 set_reinsert_breakpoint (raddr);
2577 step = 0;
2578 }
2579
2580 current_inferior = saved_inferior;
2581
2582 linux_resume_one_lwp (lwp, step, 0, NULL);
2583
2584 /* Require next event from this LWP. */
2585 step_over_bkpt = lwp->head.id;
2586 return 1;
2587 }
2588
2589 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2590 start_step_over, if still there, and delete any reinsert
2591 breakpoints we've set, on non hardware single-step targets. */
2592
2593 static int
2594 finish_step_over (struct lwp_info *lwp)
2595 {
2596 if (lwp->bp_reinsert != 0)
2597 {
2598 if (debug_threads)
2599 fprintf (stderr, "Finished step over.\n");
2600
2601 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2602 may be no breakpoint to reinsert there by now. */
2603 reinsert_breakpoints_at (lwp->bp_reinsert);
2604
2605 lwp->bp_reinsert = 0;
2606
2607 /* Delete any software-single-step reinsert breakpoints. No
2608 longer needed. We don't have to worry about other threads
2609 hitting this trap, and later not being able to explain it,
2610 because we were stepping over a breakpoint, and we hold all
2611 threads but LWP stopped while doing that. */
2612 if (!can_hardware_single_step ())
2613 delete_reinsert_breakpoints ();
2614
2615 step_over_bkpt = null_ptid;
2616 return 1;
2617 }
2618 else
2619 return 0;
2620 }
2621
2622 /* This function is called once per thread. We check the thread's resume
2623 request, which will tell us whether to resume, step, or leave the thread
2624 stopped; and what signal, if any, it should be sent.
2625
2626 For threads which we aren't explicitly told otherwise, we preserve
2627 the stepping flag; this is used for stepping over gdbserver-placed
2628 breakpoints.
2629
2630 If pending_flags was set in any thread, we queue any needed
2631 signals, since we won't actually resume. We already have a pending
2632 event to report, so we don't need to preserve any step requests;
2633 they should be re-issued if necessary. */
2634
2635 static int
2636 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2637 {
2638 struct lwp_info *lwp;
2639 struct thread_info *thread;
2640 int step;
2641 int leave_all_stopped = * (int *) arg;
2642 int leave_pending;
2643
2644 thread = (struct thread_info *) entry;
2645 lwp = get_thread_lwp (thread);
2646
2647 if (lwp->resume == NULL)
2648 return 0;
2649
2650 if (lwp->resume->kind == resume_stop)
2651 {
2652 if (debug_threads)
2653 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2654
2655 if (!lwp->stopped)
2656 {
2657 if (debug_threads)
2658 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2659
2660 /* Stop the thread, and wait for the event asynchronously,
2661 through the event loop. */
2662 send_sigstop (&lwp->head);
2663 }
2664 else
2665 {
2666 if (debug_threads)
2667 fprintf (stderr, "already stopped LWP %ld\n",
2668 lwpid_of (lwp));
2669
2670 /* The LWP may have been stopped in an internal event that
2671 was not meant to be notified back to GDB (e.g., gdbserver
2672 breakpoint), so we should be reporting a stop event in
2673 this case too. */
2674
2675 /* If the thread already has a pending SIGSTOP, this is a
2676 no-op. Otherwise, something later will presumably resume
2677 the thread and this will cause it to cancel any pending
2678 operation, due to last_resume_kind == resume_stop. If
2679 the thread already has a pending status to report, we
2680 will still report it the next time we wait - see
2681 status_pending_p_callback. */
2682 send_sigstop (&lwp->head);
2683 }
2684
2685 /* For stop requests, we're done. */
2686 lwp->resume = NULL;
2687 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2688 return 0;
2689 }
2690
2691 /* If this thread which is about to be resumed has a pending status,
2692 then don't resume any threads - we can just report the pending
2693 status. Make sure to queue any signals that would otherwise be
2694 sent. In all-stop mode, we do this decision based on if *any*
2695 thread has a pending status. If there's a thread that needs the
2696 step-over-breakpoint dance, then don't resume any other thread
2697 but that particular one. */
2698 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2699
2700 if (!leave_pending)
2701 {
2702 if (debug_threads)
2703 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2704
2705 step = (lwp->resume->kind == resume_step);
2706 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2707 }
2708 else
2709 {
2710 if (debug_threads)
2711 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2712
2713 /* If we have a new signal, enqueue the signal. */
2714 if (lwp->resume->sig != 0)
2715 {
2716 struct pending_signals *p_sig;
2717 p_sig = xmalloc (sizeof (*p_sig));
2718 p_sig->prev = lwp->pending_signals;
2719 p_sig->signal = lwp->resume->sig;
2720 memset (&p_sig->info, 0, sizeof (siginfo_t));
2721
2722 /* If this is the same signal we were previously stopped by,
2723 make sure to queue its siginfo. We can ignore the return
2724 value of ptrace; if it fails, we'll skip
2725 PTRACE_SETSIGINFO. */
2726 if (WIFSTOPPED (lwp->last_status)
2727 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2728 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2729
2730 lwp->pending_signals = p_sig;
2731 }
2732 }
2733
2734 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2735 lwp->resume = NULL;
2736 return 0;
2737 }
2738
2739 static void
2740 linux_resume (struct thread_resume *resume_info, size_t n)
2741 {
2742 struct thread_resume_array array = { resume_info, n };
2743 struct lwp_info *need_step_over = NULL;
2744 int any_pending;
2745 int leave_all_stopped;
2746
2747 find_inferior (&all_threads, linux_set_resume_request, &array);
2748
2749 /* If there is a thread which would otherwise be resumed, which has
2750 a pending status, then don't resume any threads - we can just
2751 report the pending status. Make sure to queue any signals that
2752 would otherwise be sent. In non-stop mode, we'll apply this
2753 logic to each thread individually. We consume all pending events
2754 before considering to start a step-over (in all-stop). */
2755 any_pending = 0;
2756 if (!non_stop)
2757 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2758
2759 /* If there is a thread which would otherwise be resumed, which is
2760 stopped at a breakpoint that needs stepping over, then don't
2761 resume any threads - have it step over the breakpoint with all
2762 other threads stopped, then resume all threads again. Make sure
2763 to queue any signals that would otherwise be delivered or
2764 queued. */
2765 if (!any_pending && supports_breakpoints ())
2766 need_step_over
2767 = (struct lwp_info *) find_inferior (&all_lwps,
2768 need_step_over_p, NULL);
2769
2770 leave_all_stopped = (need_step_over != NULL || any_pending);
2771
2772 if (debug_threads)
2773 {
2774 if (need_step_over != NULL)
2775 fprintf (stderr, "Not resuming all, need step over\n");
2776 else if (any_pending)
2777 fprintf (stderr,
2778 "Not resuming, all-stop and found "
2779 "an LWP with pending status\n");
2780 else
2781 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2782 }
2783
2784 /* Even if we're leaving threads stopped, queue all signals we'd
2785 otherwise deliver. */
2786 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2787
2788 if (need_step_over)
2789 start_step_over (need_step_over);
2790 }
2791
2792 /* This function is called once per thread. We check the thread's
2793 last resume request, which will tell us whether to resume, step, or
2794 leave the thread stopped. Any signal the client requested to be
2795 delivered has already been enqueued at this point.
2796
2797 If any thread that GDB wants running is stopped at an internal
2798 breakpoint that needs stepping over, we start a step-over operation
2799 on that particular thread, and leave all others stopped. */
2800
2801 static void
2802 proceed_one_lwp (struct inferior_list_entry *entry)
2803 {
2804 struct lwp_info *lwp;
2805 int step;
2806
2807 lwp = (struct lwp_info *) entry;
2808
2809 if (debug_threads)
2810 fprintf (stderr,
2811 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2812
2813 if (!lwp->stopped)
2814 {
2815 if (debug_threads)
2816 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2817 return;
2818 }
2819
2820 if (lwp->last_resume_kind == resume_stop)
2821 {
2822 if (debug_threads)
2823 fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp));
2824 return;
2825 }
2826
2827 if (lwp->status_pending_p)
2828 {
2829 if (debug_threads)
2830 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2831 lwpid_of (lwp));
2832 return;
2833 }
2834
2835 if (lwp->suspended)
2836 {
2837 if (debug_threads)
2838 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2839 return;
2840 }
2841
2842 step = lwp->last_resume_kind == resume_step;
2843 linux_resume_one_lwp (lwp, step, 0, NULL);
2844 }
2845
2846 /* When we finish a step-over, set threads running again. If there's
2847 another thread that may need a step-over, now's the time to start
2848 it. Eventually, we'll move all threads past their breakpoints. */
2849
2850 static void
2851 proceed_all_lwps (void)
2852 {
2853 struct lwp_info *need_step_over;
2854
2855 /* If there is a thread which would otherwise be resumed, which is
2856 stopped at a breakpoint that needs stepping over, then don't
2857 resume any threads - have it step over the breakpoint with all
2858 other threads stopped, then resume all threads again. */
2859
2860 if (supports_breakpoints ())
2861 {
2862 need_step_over
2863 = (struct lwp_info *) find_inferior (&all_lwps,
2864 need_step_over_p, NULL);
2865
2866 if (need_step_over != NULL)
2867 {
2868 if (debug_threads)
2869 fprintf (stderr, "proceed_all_lwps: found "
2870 "thread %ld needing a step-over\n",
2871 lwpid_of (need_step_over));
2872
2873 start_step_over (need_step_over);
2874 return;
2875 }
2876 }
2877
2878 if (debug_threads)
2879 fprintf (stderr, "Proceeding, no step-over needed\n");
2880
2881 for_each_inferior (&all_lwps, proceed_one_lwp);
2882 }
2883
2884 /* Stopped LWPs that the client wanted to be running, that don't have
2885 pending statuses, are set to run again, except for EXCEPT, if not
2886 NULL. This undoes a stop_all_lwps call. */
2887
2888 static void
2889 unstop_all_lwps (struct lwp_info *except)
2890 {
2891 if (debug_threads)
2892 {
2893 if (except)
2894 fprintf (stderr,
2895 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
2896 else
2897 fprintf (stderr,
2898 "unstopping all lwps\n");
2899 }
2900
2901 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2902 if (except != NULL)
2903 ++except->suspended;
2904
2905 for_each_inferior (&all_lwps, proceed_one_lwp);
2906
2907 if (except != NULL)
2908 --except->suspended;
2909 }
2910
2911 #ifdef HAVE_LINUX_USRREGS
2912
2913 int
2914 register_addr (int regnum)
2915 {
2916 int addr;
2917
2918 if (regnum < 0 || regnum >= the_low_target.num_regs)
2919 error ("Invalid register number %d.", regnum);
2920
2921 addr = the_low_target.regmap[regnum];
2922
2923 return addr;
2924 }
2925
2926 /* Fetch one register. */
2927 static void
2928 fetch_register (struct regcache *regcache, int regno)
2929 {
2930 CORE_ADDR regaddr;
2931 int i, size;
2932 char *buf;
2933 int pid;
2934
2935 if (regno >= the_low_target.num_regs)
2936 return;
2937 if ((*the_low_target.cannot_fetch_register) (regno))
2938 return;
2939
2940 regaddr = register_addr (regno);
2941 if (regaddr == -1)
2942 return;
2943
2944 pid = lwpid_of (get_thread_lwp (current_inferior));
2945 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2946 & - sizeof (PTRACE_XFER_TYPE));
2947 buf = alloca (size);
2948 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2949 {
2950 errno = 0;
2951 *(PTRACE_XFER_TYPE *) (buf + i) =
2952 ptrace (PTRACE_PEEKUSER, pid,
2953 /* Coerce to a uintptr_t first to avoid potential gcc warning
2954 of coercing an 8 byte integer to a 4 byte pointer. */
2955 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2956 regaddr += sizeof (PTRACE_XFER_TYPE);
2957 if (errno != 0)
2958 error ("reading register %d: %s", regno, strerror (errno));
2959 }
2960
2961 if (the_low_target.supply_ptrace_register)
2962 the_low_target.supply_ptrace_register (regcache, regno, buf);
2963 else
2964 supply_register (regcache, regno, buf);
2965 }
2966
2967 /* Fetch all registers, or just one, from the child process. */
2968 static void
2969 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2970 {
2971 if (regno == -1)
2972 for (regno = 0; regno < the_low_target.num_regs; regno++)
2973 fetch_register (regcache, regno);
2974 else
2975 fetch_register (regcache, regno);
2976 }
2977
2978 /* Store our register values back into the inferior.
2979 If REGNO is -1, do this for all registers.
2980 Otherwise, REGNO specifies which register (so we can save time). */
2981 static void
2982 usr_store_inferior_registers (struct regcache *regcache, int regno)
2983 {
2984 CORE_ADDR regaddr;
2985 int i, size;
2986 char *buf;
2987 int pid;
2988
2989 if (regno >= 0)
2990 {
2991 if (regno >= the_low_target.num_regs)
2992 return;
2993
2994 if ((*the_low_target.cannot_store_register) (regno) == 1)
2995 return;
2996
2997 regaddr = register_addr (regno);
2998 if (regaddr == -1)
2999 return;
3000 errno = 0;
3001 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3002 & - sizeof (PTRACE_XFER_TYPE);
3003 buf = alloca (size);
3004 memset (buf, 0, size);
3005
3006 if (the_low_target.collect_ptrace_register)
3007 the_low_target.collect_ptrace_register (regcache, regno, buf);
3008 else
3009 collect_register (regcache, regno, buf);
3010
3011 pid = lwpid_of (get_thread_lwp (current_inferior));
3012 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3013 {
3014 errno = 0;
3015 ptrace (PTRACE_POKEUSER, pid,
3016 /* Coerce to a uintptr_t first to avoid potential gcc warning
3017 about coercing an 8 byte integer to a 4 byte pointer. */
3018 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3019 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3020 if (errno != 0)
3021 {
3022 /* At this point, ESRCH should mean the process is
3023 already gone, in which case we simply ignore attempts
3024 to change its registers. See also the related
3025 comment in linux_resume_one_lwp. */
3026 if (errno == ESRCH)
3027 return;
3028
3029 if ((*the_low_target.cannot_store_register) (regno) == 0)
3030 error ("writing register %d: %s", regno, strerror (errno));
3031 }
3032 regaddr += sizeof (PTRACE_XFER_TYPE);
3033 }
3034 }
3035 else
3036 for (regno = 0; regno < the_low_target.num_regs; regno++)
3037 usr_store_inferior_registers (regcache, regno);
3038 }
3039 #endif /* HAVE_LINUX_USRREGS */
3040
3041
3042
3043 #ifdef HAVE_LINUX_REGSETS
3044
3045 static int
3046 regsets_fetch_inferior_registers (struct regcache *regcache)
3047 {
3048 struct regset_info *regset;
3049 int saw_general_regs = 0;
3050 int pid;
3051 struct iovec iov;
3052
3053 regset = target_regsets;
3054
3055 pid = lwpid_of (get_thread_lwp (current_inferior));
3056 while (regset->size >= 0)
3057 {
3058 void *buf, *data;
3059 int nt_type, res;
3060
3061 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3062 {
3063 regset ++;
3064 continue;
3065 }
3066
3067 buf = xmalloc (regset->size);
3068
3069 nt_type = regset->nt_type;
3070 if (nt_type)
3071 {
3072 iov.iov_base = buf;
3073 iov.iov_len = regset->size;
3074 data = (void *) &iov;
3075 }
3076 else
3077 data = buf;
3078
3079 #ifndef __sparc__
3080 res = ptrace (regset->get_request, pid, nt_type, data);
3081 #else
3082 res = ptrace (regset->get_request, pid, data, nt_type);
3083 #endif
3084 if (res < 0)
3085 {
3086 if (errno == EIO)
3087 {
3088 /* If we get EIO on a regset, do not try it again for
3089 this process. */
3090 disabled_regsets[regset - target_regsets] = 1;
3091 free (buf);
3092 continue;
3093 }
3094 else
3095 {
3096 char s[256];
3097 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3098 pid);
3099 perror (s);
3100 }
3101 }
3102 else if (regset->type == GENERAL_REGS)
3103 saw_general_regs = 1;
3104 regset->store_function (regcache, buf);
3105 regset ++;
3106 free (buf);
3107 }
3108 if (saw_general_regs)
3109 return 0;
3110 else
3111 return 1;
3112 }
3113
3114 static int
3115 regsets_store_inferior_registers (struct regcache *regcache)
3116 {
3117 struct regset_info *regset;
3118 int saw_general_regs = 0;
3119 int pid;
3120 struct iovec iov;
3121
3122 regset = target_regsets;
3123
3124 pid = lwpid_of (get_thread_lwp (current_inferior));
3125 while (regset->size >= 0)
3126 {
3127 void *buf, *data;
3128 int nt_type, res;
3129
3130 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3131 {
3132 regset ++;
3133 continue;
3134 }
3135
3136 buf = xmalloc (regset->size);
3137
3138 /* First fill the buffer with the current register set contents,
3139 in case there are any items in the kernel's regset that are
3140 not in gdbserver's regcache. */
3141
3142 nt_type = regset->nt_type;
3143 if (nt_type)
3144 {
3145 iov.iov_base = buf;
3146 iov.iov_len = regset->size;
3147 data = (void *) &iov;
3148 }
3149 else
3150 data = buf;
3151
3152 #ifndef __sparc__
3153 res = ptrace (regset->get_request, pid, nt_type, data);
3154 #else
3155 res = ptrace (regset->get_request, pid, &iov, data);
3156 #endif
3157
3158 if (res == 0)
3159 {
3160 /* Then overlay our cached registers on that. */
3161 regset->fill_function (regcache, buf);
3162
3163 /* Only now do we write the register set. */
3164 #ifndef __sparc__
3165 res = ptrace (regset->set_request, pid, nt_type, data);
3166 #else
3167 res = ptrace (regset->set_request, pid, data, nt_type);
3168 #endif
3169 }
3170
3171 if (res < 0)
3172 {
3173 if (errno == EIO)
3174 {
3175 /* If we get EIO on a regset, do not try it again for
3176 this process. */
3177 disabled_regsets[regset - target_regsets] = 1;
3178 free (buf);
3179 continue;
3180 }
3181 else if (errno == ESRCH)
3182 {
3183 /* At this point, ESRCH should mean the process is
3184 already gone, in which case we simply ignore attempts
3185 to change its registers. See also the related
3186 comment in linux_resume_one_lwp. */
3187 free (buf);
3188 return 0;
3189 }
3190 else
3191 {
3192 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3193 }
3194 }
3195 else if (regset->type == GENERAL_REGS)
3196 saw_general_regs = 1;
3197 regset ++;
3198 free (buf);
3199 }
3200 if (saw_general_regs)
3201 return 0;
3202 else
3203 return 1;
3204 return 0;
3205 }
3206
3207 #endif /* HAVE_LINUX_REGSETS */
3208
3209
3210 void
3211 linux_fetch_registers (struct regcache *regcache, int regno)
3212 {
3213 #ifdef HAVE_LINUX_REGSETS
3214 if (regsets_fetch_inferior_registers (regcache) == 0)
3215 return;
3216 #endif
3217 #ifdef HAVE_LINUX_USRREGS
3218 usr_fetch_inferior_registers (regcache, regno);
3219 #endif
3220 }
3221
3222 void
3223 linux_store_registers (struct regcache *regcache, int regno)
3224 {
3225 #ifdef HAVE_LINUX_REGSETS
3226 if (regsets_store_inferior_registers (regcache) == 0)
3227 return;
3228 #endif
3229 #ifdef HAVE_LINUX_USRREGS
3230 usr_store_inferior_registers (regcache, regno);
3231 #endif
3232 }
3233
3234
3235 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3236 to debugger memory starting at MYADDR. */
3237
3238 static int
3239 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3240 {
3241 register int i;
3242 /* Round starting address down to longword boundary. */
3243 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3244 /* Round ending address up; get number of longwords that makes. */
3245 register int count
3246 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3247 / sizeof (PTRACE_XFER_TYPE);
3248 /* Allocate buffer of that many longwords. */
3249 register PTRACE_XFER_TYPE *buffer
3250 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3251 int fd;
3252 char filename[64];
3253 int pid = lwpid_of (get_thread_lwp (current_inferior));
3254
3255 /* Try using /proc. Don't bother for one word. */
3256 if (len >= 3 * sizeof (long))
3257 {
3258 /* We could keep this file open and cache it - possibly one per
3259 thread. That requires some juggling, but is even faster. */
3260 sprintf (filename, "/proc/%d/mem", pid);
3261 fd = open (filename, O_RDONLY | O_LARGEFILE);
3262 if (fd == -1)
3263 goto no_proc;
3264
3265 /* If pread64 is available, use it. It's faster if the kernel
3266 supports it (only one syscall), and it's 64-bit safe even on
3267 32-bit platforms (for instance, SPARC debugging a SPARC64
3268 application). */
3269 #ifdef HAVE_PREAD64
3270 if (pread64 (fd, myaddr, len, memaddr) != len)
3271 #else
3272 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3273 #endif
3274 {
3275 close (fd);
3276 goto no_proc;
3277 }
3278
3279 close (fd);
3280 return 0;
3281 }
3282
3283 no_proc:
3284 /* Read all the longwords */
3285 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3286 {
3287 errno = 0;
3288 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3289 about coercing an 8 byte integer to a 4 byte pointer. */
3290 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3291 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3292 if (errno)
3293 return errno;
3294 }
3295
3296 /* Copy appropriate bytes out of the buffer. */
3297 memcpy (myaddr,
3298 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3299 len);
3300
3301 return 0;
3302 }
3303
3304 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3305 memory at MEMADDR. On failure (cannot write to the inferior)
3306 returns the value of errno. */
3307
3308 static int
3309 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3310 {
3311 register int i;
3312 /* Round starting address down to longword boundary. */
3313 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3314 /* Round ending address up; get number of longwords that makes. */
3315 register int count
3316 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3317 /* Allocate buffer of that many longwords. */
3318 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3319 int pid = lwpid_of (get_thread_lwp (current_inferior));
3320
3321 if (debug_threads)
3322 {
3323 /* Dump up to four bytes. */
3324 unsigned int val = * (unsigned int *) myaddr;
3325 if (len == 1)
3326 val = val & 0xff;
3327 else if (len == 2)
3328 val = val & 0xffff;
3329 else if (len == 3)
3330 val = val & 0xffffff;
3331 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3332 val, (long)memaddr);
3333 }
3334
3335 /* Fill start and end extra bytes of buffer with existing memory data. */
3336
3337 errno = 0;
3338 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3339 about coercing an 8 byte integer to a 4 byte pointer. */
3340 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3341 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3342 if (errno)
3343 return errno;
3344
3345 if (count > 1)
3346 {
3347 errno = 0;
3348 buffer[count - 1]
3349 = ptrace (PTRACE_PEEKTEXT, pid,
3350 /* Coerce to a uintptr_t first to avoid potential gcc warning
3351 about coercing an 8 byte integer to a 4 byte pointer. */
3352 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3353 * sizeof (PTRACE_XFER_TYPE)),
3354 0);
3355 if (errno)
3356 return errno;
3357 }
3358
3359 /* Copy data to be written over corresponding part of buffer. */
3360
3361 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3362
3363 /* Write the entire buffer. */
3364
3365 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3366 {
3367 errno = 0;
3368 ptrace (PTRACE_POKETEXT, pid,
3369 /* Coerce to a uintptr_t first to avoid potential gcc warning
3370 about coercing an 8 byte integer to a 4 byte pointer. */
3371 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3372 (PTRACE_ARG4_TYPE) buffer[i]);
3373 if (errno)
3374 return errno;
3375 }
3376
3377 return 0;
3378 }
3379
3380 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3381 static int linux_supports_tracefork_flag;
3382
3383 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3384
3385 static int
3386 linux_tracefork_grandchild (void *arg)
3387 {
3388 _exit (0);
3389 }
3390
3391 #define STACK_SIZE 4096
3392
3393 static int
3394 linux_tracefork_child (void *arg)
3395 {
3396 ptrace (PTRACE_TRACEME, 0, 0, 0);
3397 kill (getpid (), SIGSTOP);
3398
3399 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3400
3401 if (fork () == 0)
3402 linux_tracefork_grandchild (NULL);
3403
3404 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3405
3406 #ifdef __ia64__
3407 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3408 CLONE_VM | SIGCHLD, NULL);
3409 #else
3410 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3411 CLONE_VM | SIGCHLD, NULL);
3412 #endif
3413
3414 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3415
3416 _exit (0);
3417 }
3418
3419 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3420 sure that we can enable the option, and that it had the desired
3421 effect. */
3422
3423 static void
3424 linux_test_for_tracefork (void)
3425 {
3426 int child_pid, ret, status;
3427 long second_pid;
3428 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3429 char *stack = xmalloc (STACK_SIZE * 4);
3430 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3431
3432 linux_supports_tracefork_flag = 0;
3433
3434 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3435
3436 child_pid = fork ();
3437 if (child_pid == 0)
3438 linux_tracefork_child (NULL);
3439
3440 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3441
3442 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3443 #ifdef __ia64__
3444 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3445 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3446 #else /* !__ia64__ */
3447 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3448 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3449 #endif /* !__ia64__ */
3450
3451 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3452
3453 if (child_pid == -1)
3454 perror_with_name ("clone");
3455
3456 ret = my_waitpid (child_pid, &status, 0);
3457 if (ret == -1)
3458 perror_with_name ("waitpid");
3459 else if (ret != child_pid)
3460 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3461 if (! WIFSTOPPED (status))
3462 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3463
3464 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3465 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3466 if (ret != 0)
3467 {
3468 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3469 if (ret != 0)
3470 {
3471 warning ("linux_test_for_tracefork: failed to kill child");
3472 return;
3473 }
3474
3475 ret = my_waitpid (child_pid, &status, 0);
3476 if (ret != child_pid)
3477 warning ("linux_test_for_tracefork: failed to wait for killed child");
3478 else if (!WIFSIGNALED (status))
3479 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3480 "killed child", status);
3481
3482 return;
3483 }
3484
3485 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3486 if (ret != 0)
3487 warning ("linux_test_for_tracefork: failed to resume child");
3488
3489 ret = my_waitpid (child_pid, &status, 0);
3490
3491 if (ret == child_pid && WIFSTOPPED (status)
3492 && status >> 16 == PTRACE_EVENT_FORK)
3493 {
3494 second_pid = 0;
3495 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3496 if (ret == 0 && second_pid != 0)
3497 {
3498 int second_status;
3499
3500 linux_supports_tracefork_flag = 1;
3501 my_waitpid (second_pid, &second_status, 0);
3502 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3503 if (ret != 0)
3504 warning ("linux_test_for_tracefork: failed to kill second child");
3505 my_waitpid (second_pid, &status, 0);
3506 }
3507 }
3508 else
3509 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3510 "(%d, status 0x%x)", ret, status);
3511
3512 do
3513 {
3514 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3515 if (ret != 0)
3516 warning ("linux_test_for_tracefork: failed to kill child");
3517 my_waitpid (child_pid, &status, 0);
3518 }
3519 while (WIFSTOPPED (status));
3520
3521 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3522 free (stack);
3523 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3524 }
3525
3526
3527 static void
3528 linux_look_up_symbols (void)
3529 {
3530 #ifdef USE_THREAD_DB
3531 struct process_info *proc = current_process ();
3532
3533 if (proc->private->thread_db != NULL)
3534 return;
3535
3536 /* If the kernel supports tracing forks then it also supports tracing
3537 clones, and then we don't need to use the magic thread event breakpoint
3538 to learn about threads. */
3539 thread_db_init (!linux_supports_tracefork_flag);
3540 #endif
3541 }
3542
3543 static void
3544 linux_request_interrupt (void)
3545 {
3546 extern unsigned long signal_pid;
3547
3548 if (!ptid_equal (cont_thread, null_ptid)
3549 && !ptid_equal (cont_thread, minus_one_ptid))
3550 {
3551 struct lwp_info *lwp;
3552 int lwpid;
3553
3554 lwp = get_thread_lwp (current_inferior);
3555 lwpid = lwpid_of (lwp);
3556 kill_lwp (lwpid, SIGINT);
3557 }
3558 else
3559 kill_lwp (signal_pid, SIGINT);
3560 }
3561
3562 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3563 to debugger memory starting at MYADDR. */
3564
3565 static int
3566 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3567 {
3568 char filename[PATH_MAX];
3569 int fd, n;
3570 int pid = lwpid_of (get_thread_lwp (current_inferior));
3571
3572 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3573
3574 fd = open (filename, O_RDONLY);
3575 if (fd < 0)
3576 return -1;
3577
3578 if (offset != (CORE_ADDR) 0
3579 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3580 n = -1;
3581 else
3582 n = read (fd, myaddr, len);
3583
3584 close (fd);
3585
3586 return n;
3587 }
3588
3589 /* These breakpoint and watchpoint related wrapper functions simply
3590 pass on the function call if the target has registered a
3591 corresponding function. */
3592
3593 static int
3594 linux_insert_point (char type, CORE_ADDR addr, int len)
3595 {
3596 if (the_low_target.insert_point != NULL)
3597 return the_low_target.insert_point (type, addr, len);
3598 else
3599 /* Unsupported (see target.h). */
3600 return 1;
3601 }
3602
3603 static int
3604 linux_remove_point (char type, CORE_ADDR addr, int len)
3605 {
3606 if (the_low_target.remove_point != NULL)
3607 return the_low_target.remove_point (type, addr, len);
3608 else
3609 /* Unsupported (see target.h). */
3610 return 1;
3611 }
3612
3613 static int
3614 linux_stopped_by_watchpoint (void)
3615 {
3616 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3617
3618 return lwp->stopped_by_watchpoint;
3619 }
3620
3621 static CORE_ADDR
3622 linux_stopped_data_address (void)
3623 {
3624 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3625
3626 return lwp->stopped_data_address;
3627 }
3628
3629 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3630 #if defined(__mcoldfire__)
3631 /* These should really be defined in the kernel's ptrace.h header. */
3632 #define PT_TEXT_ADDR 49*4
3633 #define PT_DATA_ADDR 50*4
3634 #define PT_TEXT_END_ADDR 51*4
3635 #endif
3636
3637 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3638 to tell gdb about. */
3639
3640 static int
3641 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3642 {
3643 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3644 unsigned long text, text_end, data;
3645 int pid = lwpid_of (get_thread_lwp (current_inferior));
3646
3647 errno = 0;
3648
3649 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3650 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3651 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3652
3653 if (errno == 0)
3654 {
3655 /* Both text and data offsets produced at compile-time (and so
3656 used by gdb) are relative to the beginning of the program,
3657 with the data segment immediately following the text segment.
3658 However, the actual runtime layout in memory may put the data
3659 somewhere else, so when we send gdb a data base-address, we
3660 use the real data base address and subtract the compile-time
3661 data base-address from it (which is just the length of the
3662 text segment). BSS immediately follows data in both
3663 cases. */
3664 *text_p = text;
3665 *data_p = data - (text_end - text);
3666
3667 return 1;
3668 }
3669 #endif
3670 return 0;
3671 }
3672 #endif
3673
3674 static int
3675 compare_ints (const void *xa, const void *xb)
3676 {
3677 int a = *(const int *)xa;
3678 int b = *(const int *)xb;
3679
3680 return a - b;
3681 }
3682
3683 static int *
3684 unique (int *b, int *e)
3685 {
3686 int *d = b;
3687 while (++b != e)
3688 if (*d != *b)
3689 *++d = *b;
3690 return ++d;
3691 }
3692
3693 /* Given PID, iterates over all threads in that process.
3694
3695 Information about each thread, in a format suitable for qXfer:osdata:thread
3696 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3697 initialized, and the caller is responsible for finishing and appending '\0'
3698 to it.
3699
3700 The list of cores that threads are running on is assigned to *CORES, if it
3701 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3702 should free *CORES. */
3703
3704 static void
3705 list_threads (int pid, struct buffer *buffer, char **cores)
3706 {
3707 int count = 0;
3708 int allocated = 10;
3709 int *core_numbers = xmalloc (sizeof (int) * allocated);
3710 char pathname[128];
3711 DIR *dir;
3712 struct dirent *dp;
3713 struct stat statbuf;
3714
3715 sprintf (pathname, "/proc/%d/task", pid);
3716 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3717 {
3718 dir = opendir (pathname);
3719 if (!dir)
3720 {
3721 free (core_numbers);
3722 return;
3723 }
3724
3725 while ((dp = readdir (dir)) != NULL)
3726 {
3727 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3728
3729 if (lwp != 0)
3730 {
3731 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3732
3733 if (core != -1)
3734 {
3735 char s[sizeof ("4294967295")];
3736 sprintf (s, "%u", core);
3737
3738 if (count == allocated)
3739 {
3740 allocated *= 2;
3741 core_numbers = realloc (core_numbers,
3742 sizeof (int) * allocated);
3743 }
3744 core_numbers[count++] = core;
3745 if (buffer)
3746 buffer_xml_printf (buffer,
3747 "<item>"
3748 "<column name=\"pid\">%d</column>"
3749 "<column name=\"tid\">%s</column>"
3750 "<column name=\"core\">%s</column>"
3751 "</item>", pid, dp->d_name, s);
3752 }
3753 else
3754 {
3755 if (buffer)
3756 buffer_xml_printf (buffer,
3757 "<item>"
3758 "<column name=\"pid\">%d</column>"
3759 "<column name=\"tid\">%s</column>"
3760 "</item>", pid, dp->d_name);
3761 }
3762 }
3763 }
3764 }
3765
3766 if (cores)
3767 {
3768 *cores = NULL;
3769 if (count > 0)
3770 {
3771 struct buffer buffer2;
3772 int *b;
3773 int *e;
3774 qsort (core_numbers, count, sizeof (int), compare_ints);
3775
3776 /* Remove duplicates. */
3777 b = core_numbers;
3778 e = unique (b, core_numbers + count);
3779
3780 buffer_init (&buffer2);
3781
3782 for (b = core_numbers; b != e; ++b)
3783 {
3784 char number[sizeof ("4294967295")];
3785 sprintf (number, "%u", *b);
3786 buffer_xml_printf (&buffer2, "%s%s",
3787 (b == core_numbers) ? "" : ",", number);
3788 }
3789 buffer_grow_str0 (&buffer2, "");
3790
3791 *cores = buffer_finish (&buffer2);
3792 }
3793 }
3794 free (core_numbers);
3795 }
3796
3797 static void
3798 show_process (int pid, const char *username, struct buffer *buffer)
3799 {
3800 char pathname[128];
3801 FILE *f;
3802 char cmd[MAXPATHLEN + 1];
3803
3804 sprintf (pathname, "/proc/%d/cmdline", pid);
3805
3806 if ((f = fopen (pathname, "r")) != NULL)
3807 {
3808 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3809 if (len > 0)
3810 {
3811 char *cores = 0;
3812 int i;
3813 for (i = 0; i < len; i++)
3814 if (cmd[i] == '\0')
3815 cmd[i] = ' ';
3816 cmd[len] = '\0';
3817
3818 buffer_xml_printf (buffer,
3819 "<item>"
3820 "<column name=\"pid\">%d</column>"
3821 "<column name=\"user\">%s</column>"
3822 "<column name=\"command\">%s</column>",
3823 pid,
3824 username,
3825 cmd);
3826
3827 /* This only collects core numbers, and does not print threads. */
3828 list_threads (pid, NULL, &cores);
3829
3830 if (cores)
3831 {
3832 buffer_xml_printf (buffer,
3833 "<column name=\"cores\">%s</column>", cores);
3834 free (cores);
3835 }
3836
3837 buffer_xml_printf (buffer, "</item>");
3838 }
3839 fclose (f);
3840 }
3841 }
3842
3843 static int
3844 linux_qxfer_osdata (const char *annex,
3845 unsigned char *readbuf, unsigned const char *writebuf,
3846 CORE_ADDR offset, int len)
3847 {
3848 /* We make the process list snapshot when the object starts to be
3849 read. */
3850 static const char *buf;
3851 static long len_avail = -1;
3852 static struct buffer buffer;
3853 int processes = 0;
3854 int threads = 0;
3855
3856 DIR *dirp;
3857
3858 if (strcmp (annex, "processes") == 0)
3859 processes = 1;
3860 else if (strcmp (annex, "threads") == 0)
3861 threads = 1;
3862 else
3863 return 0;
3864
3865 if (!readbuf || writebuf)
3866 return 0;
3867
3868 if (offset == 0)
3869 {
3870 if (len_avail != -1 && len_avail != 0)
3871 buffer_free (&buffer);
3872 len_avail = 0;
3873 buf = NULL;
3874 buffer_init (&buffer);
3875 if (processes)
3876 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3877 else if (threads)
3878 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3879
3880 dirp = opendir ("/proc");
3881 if (dirp)
3882 {
3883 struct dirent *dp;
3884 while ((dp = readdir (dirp)) != NULL)
3885 {
3886 struct stat statbuf;
3887 char procentry[sizeof ("/proc/4294967295")];
3888
3889 if (!isdigit (dp->d_name[0])
3890 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3891 continue;
3892
3893 sprintf (procentry, "/proc/%s", dp->d_name);
3894 if (stat (procentry, &statbuf) == 0
3895 && S_ISDIR (statbuf.st_mode))
3896 {
3897 int pid = (int) strtoul (dp->d_name, NULL, 10);
3898
3899 if (processes)
3900 {
3901 struct passwd *entry = getpwuid (statbuf.st_uid);
3902 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3903 }
3904 else if (threads)
3905 {
3906 list_threads (pid, &buffer, NULL);
3907 }
3908 }
3909 }
3910
3911 closedir (dirp);
3912 }
3913 buffer_grow_str0 (&buffer, "</osdata>\n");
3914 buf = buffer_finish (&buffer);
3915 len_avail = strlen (buf);
3916 }
3917
3918 if (offset >= len_avail)
3919 {
3920 /* Done. Get rid of the data. */
3921 buffer_free (&buffer);
3922 buf = NULL;
3923 len_avail = 0;
3924 return 0;
3925 }
3926
3927 if (len > len_avail - offset)
3928 len = len_avail - offset;
3929 memcpy (readbuf, buf + offset, len);
3930
3931 return len;
3932 }
3933
3934 /* Convert a native/host siginfo object, into/from the siginfo in the
3935 layout of the inferiors' architecture. */
3936
3937 static void
3938 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3939 {
3940 int done = 0;
3941
3942 if (the_low_target.siginfo_fixup != NULL)
3943 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3944
3945 /* If there was no callback, or the callback didn't do anything,
3946 then just do a straight memcpy. */
3947 if (!done)
3948 {
3949 if (direction == 1)
3950 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3951 else
3952 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3953 }
3954 }
3955
3956 static int
3957 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3958 unsigned const char *writebuf, CORE_ADDR offset, int len)
3959 {
3960 int pid;
3961 struct siginfo siginfo;
3962 char inf_siginfo[sizeof (struct siginfo)];
3963
3964 if (current_inferior == NULL)
3965 return -1;
3966
3967 pid = lwpid_of (get_thread_lwp (current_inferior));
3968
3969 if (debug_threads)
3970 fprintf (stderr, "%s siginfo for lwp %d.\n",
3971 readbuf != NULL ? "Reading" : "Writing",
3972 pid);
3973
3974 if (offset > sizeof (siginfo))
3975 return -1;
3976
3977 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3978 return -1;
3979
3980 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3981 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3982 inferior with a 64-bit GDBSERVER should look the same as debugging it
3983 with a 32-bit GDBSERVER, we need to convert it. */
3984 siginfo_fixup (&siginfo, inf_siginfo, 0);
3985
3986 if (offset + len > sizeof (siginfo))
3987 len = sizeof (siginfo) - offset;
3988
3989 if (readbuf != NULL)
3990 memcpy (readbuf, inf_siginfo + offset, len);
3991 else
3992 {
3993 memcpy (inf_siginfo + offset, writebuf, len);
3994
3995 /* Convert back to ptrace layout before flushing it out. */
3996 siginfo_fixup (&siginfo, inf_siginfo, 1);
3997
3998 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3999 return -1;
4000 }
4001
4002 return len;
4003 }
4004
4005 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4006 so we notice when children change state; as the handler for the
4007 sigsuspend in my_waitpid. */
4008
4009 static void
4010 sigchld_handler (int signo)
4011 {
4012 int old_errno = errno;
4013
4014 if (debug_threads)
4015 /* fprintf is not async-signal-safe, so call write directly. */
4016 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4017
4018 if (target_is_async_p ())
4019 async_file_mark (); /* trigger a linux_wait */
4020
4021 errno = old_errno;
4022 }
4023
4024 static int
4025 linux_supports_non_stop (void)
4026 {
4027 return 1;
4028 }
4029
4030 static int
4031 linux_async (int enable)
4032 {
4033 int previous = (linux_event_pipe[0] != -1);
4034
4035 if (previous != enable)
4036 {
4037 sigset_t mask;
4038 sigemptyset (&mask);
4039 sigaddset (&mask, SIGCHLD);
4040
4041 sigprocmask (SIG_BLOCK, &mask, NULL);
4042
4043 if (enable)
4044 {
4045 if (pipe (linux_event_pipe) == -1)
4046 fatal ("creating event pipe failed.");
4047
4048 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4049 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4050
4051 /* Register the event loop handler. */
4052 add_file_handler (linux_event_pipe[0],
4053 handle_target_event, NULL);
4054
4055 /* Always trigger a linux_wait. */
4056 async_file_mark ();
4057 }
4058 else
4059 {
4060 delete_file_handler (linux_event_pipe[0]);
4061
4062 close (linux_event_pipe[0]);
4063 close (linux_event_pipe[1]);
4064 linux_event_pipe[0] = -1;
4065 linux_event_pipe[1] = -1;
4066 }
4067
4068 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4069 }
4070
4071 return previous;
4072 }
4073
4074 static int
4075 linux_start_non_stop (int nonstop)
4076 {
4077 /* Register or unregister from event-loop accordingly. */
4078 linux_async (nonstop);
4079 return 0;
4080 }
4081
4082 static int
4083 linux_supports_multi_process (void)
4084 {
4085 return 1;
4086 }
4087
4088
4089 /* Enumerate spufs IDs for process PID. */
4090 static int
4091 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4092 {
4093 int pos = 0;
4094 int written = 0;
4095 char path[128];
4096 DIR *dir;
4097 struct dirent *entry;
4098
4099 sprintf (path, "/proc/%ld/fd", pid);
4100 dir = opendir (path);
4101 if (!dir)
4102 return -1;
4103
4104 rewinddir (dir);
4105 while ((entry = readdir (dir)) != NULL)
4106 {
4107 struct stat st;
4108 struct statfs stfs;
4109 int fd;
4110
4111 fd = atoi (entry->d_name);
4112 if (!fd)
4113 continue;
4114
4115 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4116 if (stat (path, &st) != 0)
4117 continue;
4118 if (!S_ISDIR (st.st_mode))
4119 continue;
4120
4121 if (statfs (path, &stfs) != 0)
4122 continue;
4123 if (stfs.f_type != SPUFS_MAGIC)
4124 continue;
4125
4126 if (pos >= offset && pos + 4 <= offset + len)
4127 {
4128 *(unsigned int *)(buf + pos - offset) = fd;
4129 written += 4;
4130 }
4131 pos += 4;
4132 }
4133
4134 closedir (dir);
4135 return written;
4136 }
4137
4138 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4139 object type, using the /proc file system. */
4140 static int
4141 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4142 unsigned const char *writebuf,
4143 CORE_ADDR offset, int len)
4144 {
4145 long pid = lwpid_of (get_thread_lwp (current_inferior));
4146 char buf[128];
4147 int fd = 0;
4148 int ret = 0;
4149
4150 if (!writebuf && !readbuf)
4151 return -1;
4152
4153 if (!*annex)
4154 {
4155 if (!readbuf)
4156 return -1;
4157 else
4158 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4159 }
4160
4161 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4162 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4163 if (fd <= 0)
4164 return -1;
4165
4166 if (offset != 0
4167 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4168 {
4169 close (fd);
4170 return 0;
4171 }
4172
4173 if (writebuf)
4174 ret = write (fd, writebuf, (size_t) len);
4175 else
4176 ret = read (fd, readbuf, (size_t) len);
4177
4178 close (fd);
4179 return ret;
4180 }
4181
4182 static int
4183 linux_core_of_thread (ptid_t ptid)
4184 {
4185 char filename[sizeof ("/proc//task//stat")
4186 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4187 + 1];
4188 FILE *f;
4189 char *content = NULL;
4190 char *p;
4191 char *ts = 0;
4192 int content_read = 0;
4193 int i;
4194 int core;
4195
4196 sprintf (filename, "/proc/%d/task/%ld/stat",
4197 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4198 f = fopen (filename, "r");
4199 if (!f)
4200 return -1;
4201
4202 for (;;)
4203 {
4204 int n;
4205 content = realloc (content, content_read + 1024);
4206 n = fread (content + content_read, 1, 1024, f);
4207 content_read += n;
4208 if (n < 1024)
4209 {
4210 content[content_read] = '\0';
4211 break;
4212 }
4213 }
4214
4215 p = strchr (content, '(');
4216 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4217
4218 p = strtok_r (p, " ", &ts);
4219 for (i = 0; i != 36; ++i)
4220 p = strtok_r (NULL, " ", &ts);
4221
4222 if (sscanf (p, "%d", &core) == 0)
4223 core = -1;
4224
4225 free (content);
4226 fclose (f);
4227
4228 return core;
4229 }
4230
4231 static void
4232 linux_process_qsupported (const char *query)
4233 {
4234 if (the_low_target.process_qsupported != NULL)
4235 the_low_target.process_qsupported (query);
4236 }
4237
4238 static int
4239 linux_supports_tracepoints (void)
4240 {
4241 if (*the_low_target.supports_tracepoints == NULL)
4242 return 0;
4243
4244 return (*the_low_target.supports_tracepoints) ();
4245 }
4246
4247 static CORE_ADDR
4248 linux_read_pc (struct regcache *regcache)
4249 {
4250 if (the_low_target.get_pc == NULL)
4251 return 0;
4252
4253 return (*the_low_target.get_pc) (regcache);
4254 }
4255
4256 static void
4257 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4258 {
4259 gdb_assert (the_low_target.set_pc != NULL);
4260
4261 (*the_low_target.set_pc) (regcache, pc);
4262 }
4263
4264 static struct target_ops linux_target_ops = {
4265 linux_create_inferior,
4266 linux_attach,
4267 linux_kill,
4268 linux_detach,
4269 linux_join,
4270 linux_thread_alive,
4271 linux_resume,
4272 linux_wait,
4273 linux_fetch_registers,
4274 linux_store_registers,
4275 linux_read_memory,
4276 linux_write_memory,
4277 linux_look_up_symbols,
4278 linux_request_interrupt,
4279 linux_read_auxv,
4280 linux_insert_point,
4281 linux_remove_point,
4282 linux_stopped_by_watchpoint,
4283 linux_stopped_data_address,
4284 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4285 linux_read_offsets,
4286 #else
4287 NULL,
4288 #endif
4289 #ifdef USE_THREAD_DB
4290 thread_db_get_tls_address,
4291 #else
4292 NULL,
4293 #endif
4294 linux_qxfer_spu,
4295 hostio_last_error_from_errno,
4296 linux_qxfer_osdata,
4297 linux_xfer_siginfo,
4298 linux_supports_non_stop,
4299 linux_async,
4300 linux_start_non_stop,
4301 linux_supports_multi_process,
4302 #ifdef USE_THREAD_DB
4303 thread_db_handle_monitor_command,
4304 #else
4305 NULL,
4306 #endif
4307 linux_core_of_thread,
4308 linux_process_qsupported,
4309 linux_supports_tracepoints,
4310 linux_read_pc,
4311 linux_write_pc
4312 };
4313
4314 static void
4315 linux_init_signals ()
4316 {
4317 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4318 to find what the cancel signal actually is. */
4319 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4320 signal (__SIGRTMIN+1, SIG_IGN);
4321 #endif
4322 }
4323
4324 void
4325 initialize_low (void)
4326 {
4327 struct sigaction sigchld_action;
4328 memset (&sigchld_action, 0, sizeof (sigchld_action));
4329 set_target_ops (&linux_target_ops);
4330 set_breakpoint_data (the_low_target.breakpoint,
4331 the_low_target.breakpoint_len);
4332 linux_init_signals ();
4333 linux_test_for_tracefork ();
4334 #ifdef HAVE_LINUX_REGSETS
4335 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4336 ;
4337 disabled_regsets = xmalloc (num_regsets);
4338 #endif
4339
4340 sigchld_action.sa_handler = sigchld_handler;
4341 sigemptyset (&sigchld_action.sa_mask);
4342 sigchld_action.sa_flags = SA_RESTART;
4343 sigaction (SIGCHLD, &sigchld_action, NULL);
4344 }