* linux-low.c (linux_create_inferior): Wrap use of __SIGRTMIN in
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #ifndef ELFMAG0
43 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
44 then ELFMAG0 will have been defined. If it didn't get included by
45 gdb_proc_service.h then including it will likely introduce a duplicate
46 definition of elf_fpregset_t. */
47 #include <elf.h>
48 #endif
49
50 #ifndef SPUFS_MAGIC
51 #define SPUFS_MAGIC 0x23c9b64e
52 #endif
53
54 #ifndef PTRACE_GETSIGINFO
55 # define PTRACE_GETSIGINFO 0x4202
56 # define PTRACE_SETSIGINFO 0x4203
57 #endif
58
59 #ifndef O_LARGEFILE
60 #define O_LARGEFILE 0
61 #endif
62
63 /* If the system headers did not provide the constants, hard-code the normal
64 values. */
65 #ifndef PTRACE_EVENT_FORK
66
67 #define PTRACE_SETOPTIONS 0x4200
68 #define PTRACE_GETEVENTMSG 0x4201
69
70 /* options set using PTRACE_SETOPTIONS */
71 #define PTRACE_O_TRACESYSGOOD 0x00000001
72 #define PTRACE_O_TRACEFORK 0x00000002
73 #define PTRACE_O_TRACEVFORK 0x00000004
74 #define PTRACE_O_TRACECLONE 0x00000008
75 #define PTRACE_O_TRACEEXEC 0x00000010
76 #define PTRACE_O_TRACEVFORKDONE 0x00000020
77 #define PTRACE_O_TRACEEXIT 0x00000040
78
79 /* Wait extended result codes for the above trace options. */
80 #define PTRACE_EVENT_FORK 1
81 #define PTRACE_EVENT_VFORK 2
82 #define PTRACE_EVENT_CLONE 3
83 #define PTRACE_EVENT_EXEC 4
84 #define PTRACE_EVENT_VFORK_DONE 5
85 #define PTRACE_EVENT_EXIT 6
86
87 #endif /* PTRACE_EVENT_FORK */
88
89 /* We can't always assume that this flag is available, but all systems
90 with the ptrace event handlers also have __WALL, so it's safe to use
91 in some contexts. */
92 #ifndef __WALL
93 #define __WALL 0x40000000 /* Wait for any child. */
94 #endif
95
96 #ifdef __UCLIBC__
97 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
98 #define HAS_NOMMU
99 #endif
100 #endif
101
102 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
103 representation of the thread ID.
104
105 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
106 the same as the LWP ID.
107
108 ``all_processes'' is keyed by the "overall process ID", which
109 GNU/Linux calls tgid, "thread group ID". */
110
111 struct inferior_list all_lwps;
112
113 /* A list of all unknown processes which receive stop signals. Some other
114 process will presumably claim each of these as forked children
115 momentarily. */
116
117 struct inferior_list stopped_pids;
118
119 /* FIXME this is a bit of a hack, and could be removed. */
120 int stopping_threads;
121
122 /* FIXME make into a target method? */
123 int using_threads = 1;
124
125 /* This flag is true iff we've just created or attached to our first
126 inferior but it has not stopped yet. As soon as it does, we need
127 to call the low target's arch_setup callback. Doing this only on
128 the first inferior avoids reinializing the architecture on every
129 inferior, and avoids messing with the register caches of the
130 already running inferiors. NOTE: this assumes all inferiors under
131 control of gdbserver have the same architecture. */
132 static int new_inferior;
133
134 static void linux_resume_one_lwp (struct lwp_info *lwp,
135 int step, int signal, siginfo_t *info);
136 static void linux_resume (struct thread_resume *resume_info, size_t n);
137 static void stop_all_lwps (void);
138 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
139 static int check_removed_breakpoint (struct lwp_info *event_child);
140 static void *add_lwp (ptid_t ptid);
141 static int linux_stopped_by_watchpoint (void);
142 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
143 static int linux_core_of_thread (ptid_t ptid);
144
145 struct pending_signals
146 {
147 int signal;
148 siginfo_t info;
149 struct pending_signals *prev;
150 };
151
152 #define PTRACE_ARG3_TYPE long
153 #define PTRACE_XFER_TYPE long
154
155 #ifdef HAVE_LINUX_REGSETS
156 static char *disabled_regsets;
157 static int num_regsets;
158 #endif
159
160 /* The read/write ends of the pipe registered as waitable file in the
161 event loop. */
162 static int linux_event_pipe[2] = { -1, -1 };
163
164 /* True if we're currently in async mode. */
165 #define target_is_async_p() (linux_event_pipe[0] != -1)
166
167 static void send_sigstop (struct inferior_list_entry *entry);
168 static void wait_for_sigstop (struct inferior_list_entry *entry);
169
170 /* Accepts an integer PID; Returns a string representing a file that
171 can be opened to get info for the child process.
172 Space for the result is malloc'd, caller must free. */
173
174 char *
175 linux_child_pid_to_exec_file (int pid)
176 {
177 char *name1, *name2;
178
179 name1 = xmalloc (MAXPATHLEN);
180 name2 = xmalloc (MAXPATHLEN);
181 memset (name2, 0, MAXPATHLEN);
182
183 sprintf (name1, "/proc/%d/exe", pid);
184 if (readlink (name1, name2, MAXPATHLEN) > 0)
185 {
186 free (name1);
187 return name2;
188 }
189 else
190 {
191 free (name2);
192 return name1;
193 }
194 }
195
196 /* Return non-zero if HEADER is a 64-bit ELF file. */
197
198 static int
199 elf_64_header_p (const Elf64_Ehdr *header)
200 {
201 return (header->e_ident[EI_MAG0] == ELFMAG0
202 && header->e_ident[EI_MAG1] == ELFMAG1
203 && header->e_ident[EI_MAG2] == ELFMAG2
204 && header->e_ident[EI_MAG3] == ELFMAG3
205 && header->e_ident[EI_CLASS] == ELFCLASS64);
206 }
207
208 /* Return non-zero if FILE is a 64-bit ELF file,
209 zero if the file is not a 64-bit ELF file,
210 and -1 if the file is not accessible or doesn't exist. */
211
212 int
213 elf_64_file_p (const char *file)
214 {
215 Elf64_Ehdr header;
216 int fd;
217
218 fd = open (file, O_RDONLY);
219 if (fd < 0)
220 return -1;
221
222 if (read (fd, &header, sizeof (header)) != sizeof (header))
223 {
224 close (fd);
225 return 0;
226 }
227 close (fd);
228
229 return elf_64_header_p (&header);
230 }
231
232 static void
233 delete_lwp (struct lwp_info *lwp)
234 {
235 remove_thread (get_lwp_thread (lwp));
236 remove_inferior (&all_lwps, &lwp->head);
237 free (lwp->arch_private);
238 free (lwp);
239 }
240
241 /* Add a process to the common process list, and set its private
242 data. */
243
244 static struct process_info *
245 linux_add_process (int pid, int attached)
246 {
247 struct process_info *proc;
248
249 /* Is this the first process? If so, then set the arch. */
250 if (all_processes.head == NULL)
251 new_inferior = 1;
252
253 proc = add_process (pid, attached);
254 proc->private = xcalloc (1, sizeof (*proc->private));
255
256 if (the_low_target.new_process != NULL)
257 proc->private->arch_private = the_low_target.new_process ();
258
259 return proc;
260 }
261
262 /* Remove a process from the common process list,
263 also freeing all private data. */
264
265 static void
266 linux_remove_process (struct process_info *process)
267 {
268 struct process_info_private *priv = process->private;
269
270 free (priv->arch_private);
271 free (priv);
272 remove_process (process);
273 }
274
275 /* Wrapper function for waitpid which handles EINTR, and emulates
276 __WALL for systems where that is not available. */
277
278 static int
279 my_waitpid (int pid, int *status, int flags)
280 {
281 int ret, out_errno;
282
283 if (debug_threads)
284 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
285
286 if (flags & __WALL)
287 {
288 sigset_t block_mask, org_mask, wake_mask;
289 int wnohang;
290
291 wnohang = (flags & WNOHANG) != 0;
292 flags &= ~(__WALL | __WCLONE);
293 flags |= WNOHANG;
294
295 /* Block all signals while here. This avoids knowing about
296 LinuxThread's signals. */
297 sigfillset (&block_mask);
298 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
299
300 /* ... except during the sigsuspend below. */
301 sigemptyset (&wake_mask);
302
303 while (1)
304 {
305 /* Since all signals are blocked, there's no need to check
306 for EINTR here. */
307 ret = waitpid (pid, status, flags);
308 out_errno = errno;
309
310 if (ret == -1 && out_errno != ECHILD)
311 break;
312 else if (ret > 0)
313 break;
314
315 if (flags & __WCLONE)
316 {
317 /* We've tried both flavors now. If WNOHANG is set,
318 there's nothing else to do, just bail out. */
319 if (wnohang)
320 break;
321
322 if (debug_threads)
323 fprintf (stderr, "blocking\n");
324
325 /* Block waiting for signals. */
326 sigsuspend (&wake_mask);
327 }
328
329 flags ^= __WCLONE;
330 }
331
332 sigprocmask (SIG_SETMASK, &org_mask, NULL);
333 }
334 else
335 {
336 do
337 ret = waitpid (pid, status, flags);
338 while (ret == -1 && errno == EINTR);
339 out_errno = errno;
340 }
341
342 if (debug_threads)
343 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
344 pid, flags, status ? *status : -1, ret);
345
346 errno = out_errno;
347 return ret;
348 }
349
350 /* Handle a GNU/Linux extended wait response. If we see a clone
351 event, we need to add the new LWP to our list (and not report the
352 trap to higher layers). */
353
354 static void
355 handle_extended_wait (struct lwp_info *event_child, int wstat)
356 {
357 int event = wstat >> 16;
358 struct lwp_info *new_lwp;
359
360 if (event == PTRACE_EVENT_CLONE)
361 {
362 ptid_t ptid;
363 unsigned long new_pid;
364 int ret, status = W_STOPCODE (SIGSTOP);
365
366 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
367
368 /* If we haven't already seen the new PID stop, wait for it now. */
369 if (! pull_pid_from_list (&stopped_pids, new_pid))
370 {
371 /* The new child has a pending SIGSTOP. We can't affect it until it
372 hits the SIGSTOP, but we're already attached. */
373
374 ret = my_waitpid (new_pid, &status, __WALL);
375
376 if (ret == -1)
377 perror_with_name ("waiting for new child");
378 else if (ret != new_pid)
379 warning ("wait returned unexpected PID %d", ret);
380 else if (!WIFSTOPPED (status))
381 warning ("wait returned unexpected status 0x%x", status);
382 }
383
384 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
385
386 ptid = ptid_build (pid_of (event_child), new_pid, 0);
387 new_lwp = (struct lwp_info *) add_lwp (ptid);
388 add_thread (ptid, new_lwp);
389
390 /* Either we're going to immediately resume the new thread
391 or leave it stopped. linux_resume_one_lwp is a nop if it
392 thinks the thread is currently running, so set this first
393 before calling linux_resume_one_lwp. */
394 new_lwp->stopped = 1;
395
396 /* Normally we will get the pending SIGSTOP. But in some cases
397 we might get another signal delivered to the group first.
398 If we do get another signal, be sure not to lose it. */
399 if (WSTOPSIG (status) == SIGSTOP)
400 {
401 if (! stopping_threads)
402 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
403 }
404 else
405 {
406 new_lwp->stop_expected = 1;
407 if (stopping_threads)
408 {
409 new_lwp->status_pending_p = 1;
410 new_lwp->status_pending = status;
411 }
412 else
413 /* Pass the signal on. This is what GDB does - except
414 shouldn't we really report it instead? */
415 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
416 }
417
418 /* Always resume the current thread. If we are stopping
419 threads, it will have a pending SIGSTOP; we may as well
420 collect it now. */
421 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
422 }
423 }
424
425 /* This function should only be called if the process got a SIGTRAP.
426 The SIGTRAP could mean several things.
427
428 On i386, where decr_pc_after_break is non-zero:
429 If we were single-stepping this process using PTRACE_SINGLESTEP,
430 we will get only the one SIGTRAP (even if the instruction we
431 stepped over was a breakpoint). The value of $eip will be the
432 next instruction.
433 If we continue the process using PTRACE_CONT, we will get a
434 SIGTRAP when we hit a breakpoint. The value of $eip will be
435 the instruction after the breakpoint (i.e. needs to be
436 decremented). If we report the SIGTRAP to GDB, we must also
437 report the undecremented PC. If we cancel the SIGTRAP, we
438 must resume at the decremented PC.
439
440 (Presumably, not yet tested) On a non-decr_pc_after_break machine
441 with hardware or kernel single-step:
442 If we single-step over a breakpoint instruction, our PC will
443 point at the following instruction. If we continue and hit a
444 breakpoint instruction, our PC will point at the breakpoint
445 instruction. */
446
447 static CORE_ADDR
448 get_stop_pc (void)
449 {
450 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
451
452 if (! get_thread_lwp (current_inferior)->stepping)
453 stop_pc -= the_low_target.decr_pc_after_break;
454
455 if (debug_threads)
456 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
457
458 return stop_pc;
459 }
460
461 static void *
462 add_lwp (ptid_t ptid)
463 {
464 struct lwp_info *lwp;
465
466 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
467 memset (lwp, 0, sizeof (*lwp));
468
469 lwp->head.id = ptid;
470
471 if (the_low_target.new_thread != NULL)
472 lwp->arch_private = the_low_target.new_thread ();
473
474 add_inferior_to_list (&all_lwps, &lwp->head);
475
476 return lwp;
477 }
478
479 /* Start an inferior process and returns its pid.
480 ALLARGS is a vector of program-name and args. */
481
482 static int
483 linux_create_inferior (char *program, char **allargs)
484 {
485 struct lwp_info *new_lwp;
486 int pid;
487 ptid_t ptid;
488
489 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
490 pid = vfork ();
491 #else
492 pid = fork ();
493 #endif
494 if (pid < 0)
495 perror_with_name ("fork");
496
497 if (pid == 0)
498 {
499 ptrace (PTRACE_TRACEME, 0, 0, 0);
500
501 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
502 signal (__SIGRTMIN + 1, SIG_DFL);
503 #endif
504
505 setpgid (0, 0);
506
507 execv (program, allargs);
508 if (errno == ENOENT)
509 execvp (program, allargs);
510
511 fprintf (stderr, "Cannot exec %s: %s.\n", program,
512 strerror (errno));
513 fflush (stderr);
514 _exit (0177);
515 }
516
517 linux_add_process (pid, 0);
518
519 ptid = ptid_build (pid, pid, 0);
520 new_lwp = add_lwp (ptid);
521 add_thread (ptid, new_lwp);
522 new_lwp->must_set_ptrace_flags = 1;
523
524 return pid;
525 }
526
527 /* Attach to an inferior process. */
528
529 static void
530 linux_attach_lwp_1 (unsigned long lwpid, int initial)
531 {
532 ptid_t ptid;
533 struct lwp_info *new_lwp;
534
535 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
536 {
537 if (!initial)
538 {
539 /* If we fail to attach to an LWP, just warn. */
540 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
541 strerror (errno), errno);
542 fflush (stderr);
543 return;
544 }
545 else
546 /* If we fail to attach to a process, report an error. */
547 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
548 strerror (errno), errno);
549 }
550
551 if (initial)
552 /* NOTE/FIXME: This lwp might have not been the tgid. */
553 ptid = ptid_build (lwpid, lwpid, 0);
554 else
555 {
556 /* Note that extracting the pid from the current inferior is
557 safe, since we're always called in the context of the same
558 process as this new thread. */
559 int pid = pid_of (get_thread_lwp (current_inferior));
560 ptid = ptid_build (pid, lwpid, 0);
561 }
562
563 new_lwp = (struct lwp_info *) add_lwp (ptid);
564 add_thread (ptid, new_lwp);
565
566 /* We need to wait for SIGSTOP before being able to make the next
567 ptrace call on this LWP. */
568 new_lwp->must_set_ptrace_flags = 1;
569
570 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
571 brings it to a halt.
572
573 There are several cases to consider here:
574
575 1) gdbserver has already attached to the process and is being notified
576 of a new thread that is being created.
577 In this case we should ignore that SIGSTOP and resume the process.
578 This is handled below by setting stop_expected = 1.
579
580 2) This is the first thread (the process thread), and we're attaching
581 to it via attach_inferior.
582 In this case we want the process thread to stop.
583 This is handled by having linux_attach clear stop_expected after
584 we return.
585 ??? If the process already has several threads we leave the other
586 threads running.
587
588 3) GDB is connecting to gdbserver and is requesting an enumeration of all
589 existing threads.
590 In this case we want the thread to stop.
591 FIXME: This case is currently not properly handled.
592 We should wait for the SIGSTOP but don't. Things work apparently
593 because enough time passes between when we ptrace (ATTACH) and when
594 gdb makes the next ptrace call on the thread.
595
596 On the other hand, if we are currently trying to stop all threads, we
597 should treat the new thread as if we had sent it a SIGSTOP. This works
598 because we are guaranteed that the add_lwp call above added us to the
599 end of the list, and so the new thread has not yet reached
600 wait_for_sigstop (but will). */
601 if (! stopping_threads)
602 new_lwp->stop_expected = 1;
603 }
604
605 void
606 linux_attach_lwp (unsigned long lwpid)
607 {
608 linux_attach_lwp_1 (lwpid, 0);
609 }
610
611 int
612 linux_attach (unsigned long pid)
613 {
614 struct lwp_info *lwp;
615
616 linux_attach_lwp_1 (pid, 1);
617
618 linux_add_process (pid, 1);
619
620 if (!non_stop)
621 {
622 /* Don't ignore the initial SIGSTOP if we just attached to this
623 process. It will be collected by wait shortly. */
624 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
625 ptid_build (pid, pid, 0));
626 lwp->stop_expected = 0;
627 }
628
629 return 0;
630 }
631
632 struct counter
633 {
634 int pid;
635 int count;
636 };
637
638 static int
639 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
640 {
641 struct counter *counter = args;
642
643 if (ptid_get_pid (entry->id) == counter->pid)
644 {
645 if (++counter->count > 1)
646 return 1;
647 }
648
649 return 0;
650 }
651
652 static int
653 last_thread_of_process_p (struct thread_info *thread)
654 {
655 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
656 int pid = ptid_get_pid (ptid);
657 struct counter counter = { pid , 0 };
658
659 return (find_inferior (&all_threads,
660 second_thread_of_pid_p, &counter) == NULL);
661 }
662
663 /* Kill the inferior lwp. */
664
665 static int
666 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
667 {
668 struct thread_info *thread = (struct thread_info *) entry;
669 struct lwp_info *lwp = get_thread_lwp (thread);
670 int wstat;
671 int pid = * (int *) args;
672
673 if (ptid_get_pid (entry->id) != pid)
674 return 0;
675
676 /* We avoid killing the first thread here, because of a Linux kernel (at
677 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
678 the children get a chance to be reaped, it will remain a zombie
679 forever. */
680
681 if (lwpid_of (lwp) == pid)
682 {
683 if (debug_threads)
684 fprintf (stderr, "lkop: is last of process %s\n",
685 target_pid_to_str (entry->id));
686 return 0;
687 }
688
689 /* If we're killing a running inferior, make sure it is stopped
690 first, as PTRACE_KILL will not work otherwise. */
691 if (!lwp->stopped)
692 send_sigstop (&lwp->head);
693
694 do
695 {
696 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
697
698 /* Make sure it died. The loop is most likely unnecessary. */
699 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
700 } while (pid > 0 && WIFSTOPPED (wstat));
701
702 return 0;
703 }
704
705 static int
706 linux_kill (int pid)
707 {
708 struct process_info *process;
709 struct lwp_info *lwp;
710 struct thread_info *thread;
711 int wstat;
712 int lwpid;
713
714 process = find_process_pid (pid);
715 if (process == NULL)
716 return -1;
717
718 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
719
720 /* See the comment in linux_kill_one_lwp. We did not kill the first
721 thread in the list, so do so now. */
722 lwp = find_lwp_pid (pid_to_ptid (pid));
723 thread = get_lwp_thread (lwp);
724
725 if (debug_threads)
726 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
727 lwpid_of (lwp), pid);
728
729 /* If we're killing a running inferior, make sure it is stopped
730 first, as PTRACE_KILL will not work otherwise. */
731 if (!lwp->stopped)
732 send_sigstop (&lwp->head);
733
734 do
735 {
736 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
737
738 /* Make sure it died. The loop is most likely unnecessary. */
739 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
740 } while (lwpid > 0 && WIFSTOPPED (wstat));
741
742 #ifdef USE_THREAD_DB
743 thread_db_free (process, 0);
744 #endif
745 delete_lwp (lwp);
746 linux_remove_process (process);
747 return 0;
748 }
749
750 static int
751 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
752 {
753 struct thread_info *thread = (struct thread_info *) entry;
754 struct lwp_info *lwp = get_thread_lwp (thread);
755 int pid = * (int *) args;
756
757 if (ptid_get_pid (entry->id) != pid)
758 return 0;
759
760 /* If we're detaching from a running inferior, make sure it is
761 stopped first, as PTRACE_DETACH will not work otherwise. */
762 if (!lwp->stopped)
763 {
764 int lwpid = lwpid_of (lwp);
765
766 stopping_threads = 1;
767 send_sigstop (&lwp->head);
768
769 /* If this detects a new thread through a clone event, the new
770 thread is appended to the end of the lwp list, so we'll
771 eventually detach from it. */
772 wait_for_sigstop (&lwp->head);
773 stopping_threads = 0;
774
775 /* If LWP exits while we're trying to stop it, there's nothing
776 left to do. */
777 lwp = find_lwp_pid (pid_to_ptid (lwpid));
778 if (lwp == NULL)
779 return 0;
780 }
781
782 /* Make sure the process isn't stopped at a breakpoint that's
783 no longer there. */
784 check_removed_breakpoint (lwp);
785
786 /* If this process is stopped but is expecting a SIGSTOP, then make
787 sure we take care of that now. This isn't absolutely guaranteed
788 to collect the SIGSTOP, but is fairly likely to. */
789 if (lwp->stop_expected)
790 {
791 int wstat;
792 /* Clear stop_expected, so that the SIGSTOP will be reported. */
793 lwp->stop_expected = 0;
794 if (lwp->stopped)
795 linux_resume_one_lwp (lwp, 0, 0, NULL);
796 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
797 }
798
799 /* Flush any pending changes to the process's registers. */
800 regcache_invalidate_one ((struct inferior_list_entry *)
801 get_lwp_thread (lwp));
802
803 /* Finally, let it resume. */
804 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
805
806 delete_lwp (lwp);
807 return 0;
808 }
809
810 static int
811 any_thread_of (struct inferior_list_entry *entry, void *args)
812 {
813 int *pid_p = args;
814
815 if (ptid_get_pid (entry->id) == *pid_p)
816 return 1;
817
818 return 0;
819 }
820
821 static int
822 linux_detach (int pid)
823 {
824 struct process_info *process;
825
826 process = find_process_pid (pid);
827 if (process == NULL)
828 return -1;
829
830 #ifdef USE_THREAD_DB
831 thread_db_free (process, 1);
832 #endif
833
834 current_inferior =
835 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
836
837 delete_all_breakpoints ();
838 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
839 linux_remove_process (process);
840 return 0;
841 }
842
843 static void
844 linux_join (int pid)
845 {
846 int status, ret;
847 struct process_info *process;
848
849 process = find_process_pid (pid);
850 if (process == NULL)
851 return;
852
853 do {
854 ret = my_waitpid (pid, &status, 0);
855 if (WIFEXITED (status) || WIFSIGNALED (status))
856 break;
857 } while (ret != -1 || errno != ECHILD);
858 }
859
860 /* Return nonzero if the given thread is still alive. */
861 static int
862 linux_thread_alive (ptid_t ptid)
863 {
864 struct lwp_info *lwp = find_lwp_pid (ptid);
865
866 /* We assume we always know if a thread exits. If a whole process
867 exited but we still haven't been able to report it to GDB, we'll
868 hold on to the last lwp of the dead process. */
869 if (lwp != NULL)
870 return !lwp->dead;
871 else
872 return 0;
873 }
874
875 /* Return nonzero if this process stopped at a breakpoint which
876 no longer appears to be inserted. Also adjust the PC
877 appropriately to resume where the breakpoint used to be. */
878 static int
879 check_removed_breakpoint (struct lwp_info *event_child)
880 {
881 CORE_ADDR stop_pc;
882 struct thread_info *saved_inferior;
883
884 if (event_child->pending_is_breakpoint == 0)
885 return 0;
886
887 if (debug_threads)
888 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
889 lwpid_of (event_child));
890
891 saved_inferior = current_inferior;
892 current_inferior = get_lwp_thread (event_child);
893
894 stop_pc = get_stop_pc ();
895
896 /* If the PC has changed since we stopped, then we shouldn't do
897 anything. This happens if, for instance, GDB handled the
898 decr_pc_after_break subtraction itself. */
899 if (stop_pc != event_child->pending_stop_pc)
900 {
901 if (debug_threads)
902 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
903 event_child->pending_stop_pc);
904
905 event_child->pending_is_breakpoint = 0;
906 current_inferior = saved_inferior;
907 return 0;
908 }
909
910 /* If the breakpoint is still there, we will report hitting it. */
911 if ((*the_low_target.breakpoint_at) (stop_pc))
912 {
913 if (debug_threads)
914 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
915 current_inferior = saved_inferior;
916 return 0;
917 }
918
919 if (debug_threads)
920 fprintf (stderr, "Removed breakpoint.\n");
921
922 /* For decr_pc_after_break targets, here is where we perform the
923 decrement. We go immediately from this function to resuming,
924 and can not safely call get_stop_pc () again. */
925 if (the_low_target.set_pc != NULL)
926 {
927 if (debug_threads)
928 fprintf (stderr, "Set pc to 0x%lx\n", (long) stop_pc);
929 (*the_low_target.set_pc) (stop_pc);
930 }
931
932 /* We consumed the pending SIGTRAP. */
933 event_child->pending_is_breakpoint = 0;
934 event_child->status_pending_p = 0;
935 event_child->status_pending = 0;
936
937 current_inferior = saved_inferior;
938 return 1;
939 }
940
941 /* Return 1 if this lwp has an interesting status pending. This
942 function may silently resume an inferior lwp. */
943 static int
944 status_pending_p (struct inferior_list_entry *entry, void *arg)
945 {
946 struct lwp_info *lwp = (struct lwp_info *) entry;
947 ptid_t ptid = * (ptid_t *) arg;
948
949 /* Check if we're only interested in events from a specific process
950 or its lwps. */
951 if (!ptid_equal (minus_one_ptid, ptid)
952 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
953 return 0;
954
955 if (lwp->status_pending_p && !lwp->suspended)
956 if (check_removed_breakpoint (lwp))
957 {
958 /* This thread was stopped at a breakpoint, and the breakpoint
959 is now gone. We were told to continue (or step...) all threads,
960 so GDB isn't trying to single-step past this breakpoint.
961 So instead of reporting the old SIGTRAP, pretend we got to
962 the breakpoint just after it was removed instead of just
963 before; resume the process. */
964 linux_resume_one_lwp (lwp, 0, 0, NULL);
965 return 0;
966 }
967
968 return (lwp->status_pending_p && !lwp->suspended);
969 }
970
971 static int
972 same_lwp (struct inferior_list_entry *entry, void *data)
973 {
974 ptid_t ptid = *(ptid_t *) data;
975 int lwp;
976
977 if (ptid_get_lwp (ptid) != 0)
978 lwp = ptid_get_lwp (ptid);
979 else
980 lwp = ptid_get_pid (ptid);
981
982 if (ptid_get_lwp (entry->id) == lwp)
983 return 1;
984
985 return 0;
986 }
987
988 struct lwp_info *
989 find_lwp_pid (ptid_t ptid)
990 {
991 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
992 }
993
994 static struct lwp_info *
995 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
996 {
997 int ret;
998 int to_wait_for = -1;
999 struct lwp_info *child = NULL;
1000
1001 if (debug_threads)
1002 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1003
1004 if (ptid_equal (ptid, minus_one_ptid))
1005 to_wait_for = -1; /* any child */
1006 else
1007 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1008
1009 options |= __WALL;
1010
1011 retry:
1012
1013 ret = my_waitpid (to_wait_for, wstatp, options);
1014 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1015 return NULL;
1016 else if (ret == -1)
1017 perror_with_name ("waitpid");
1018
1019 if (debug_threads
1020 && (!WIFSTOPPED (*wstatp)
1021 || (WSTOPSIG (*wstatp) != 32
1022 && WSTOPSIG (*wstatp) != 33)))
1023 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1024
1025 child = find_lwp_pid (pid_to_ptid (ret));
1026
1027 /* If we didn't find a process, one of two things presumably happened:
1028 - A process we started and then detached from has exited. Ignore it.
1029 - A process we are controlling has forked and the new child's stop
1030 was reported to us by the kernel. Save its PID. */
1031 if (child == NULL && WIFSTOPPED (*wstatp))
1032 {
1033 add_pid_to_list (&stopped_pids, ret);
1034 goto retry;
1035 }
1036 else if (child == NULL)
1037 goto retry;
1038
1039 child->stopped = 1;
1040 child->pending_is_breakpoint = 0;
1041
1042 child->last_status = *wstatp;
1043
1044 /* Architecture-specific setup after inferior is running.
1045 This needs to happen after we have attached to the inferior
1046 and it is stopped for the first time, but before we access
1047 any inferior registers. */
1048 if (new_inferior)
1049 {
1050 the_low_target.arch_setup ();
1051 #ifdef HAVE_LINUX_REGSETS
1052 memset (disabled_regsets, 0, num_regsets);
1053 #endif
1054 new_inferior = 0;
1055 }
1056
1057 if (debug_threads
1058 && WIFSTOPPED (*wstatp)
1059 && the_low_target.get_pc != NULL)
1060 {
1061 struct thread_info *saved_inferior = current_inferior;
1062 CORE_ADDR pc;
1063
1064 current_inferior = (struct thread_info *)
1065 find_inferior_id (&all_threads, child->head.id);
1066 pc = (*the_low_target.get_pc) ();
1067 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1068 current_inferior = saved_inferior;
1069 }
1070
1071 return child;
1072 }
1073
1074 /* Wait for an event from child PID. If PID is -1, wait for any
1075 child. Store the stop status through the status pointer WSTAT.
1076 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1077 event was found and OPTIONS contains WNOHANG. Return the PID of
1078 the stopped child otherwise. */
1079
1080 static int
1081 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1082 {
1083 CORE_ADDR stop_pc;
1084 struct lwp_info *event_child = NULL;
1085 int bp_status;
1086 struct lwp_info *requested_child = NULL;
1087
1088 /* Check for a lwp with a pending status. */
1089 /* It is possible that the user changed the pending task's registers since
1090 it stopped. We correctly handle the change of PC if we hit a breakpoint
1091 (in check_removed_breakpoint); signals should be reported anyway. */
1092
1093 if (ptid_equal (ptid, minus_one_ptid)
1094 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1095 {
1096 event_child = (struct lwp_info *)
1097 find_inferior (&all_lwps, status_pending_p, &ptid);
1098 if (debug_threads && event_child)
1099 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1100 }
1101 else
1102 {
1103 requested_child = find_lwp_pid (ptid);
1104 if (requested_child->status_pending_p
1105 && !check_removed_breakpoint (requested_child))
1106 event_child = requested_child;
1107 }
1108
1109 if (event_child != NULL)
1110 {
1111 if (debug_threads)
1112 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1113 lwpid_of (event_child), event_child->status_pending);
1114 *wstat = event_child->status_pending;
1115 event_child->status_pending_p = 0;
1116 event_child->status_pending = 0;
1117 current_inferior = get_lwp_thread (event_child);
1118 return lwpid_of (event_child);
1119 }
1120
1121 /* We only enter this loop if no process has a pending wait status. Thus
1122 any action taken in response to a wait status inside this loop is
1123 responding as soon as we detect the status, not after any pending
1124 events. */
1125 while (1)
1126 {
1127 event_child = linux_wait_for_lwp (ptid, wstat, options);
1128
1129 if ((options & WNOHANG) && event_child == NULL)
1130 return 0;
1131
1132 if (event_child == NULL)
1133 error ("event from unknown child");
1134
1135 current_inferior = get_lwp_thread (event_child);
1136
1137 /* Check for thread exit. */
1138 if (! WIFSTOPPED (*wstat))
1139 {
1140 if (debug_threads)
1141 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1142
1143 /* If the last thread is exiting, just return. */
1144 if (last_thread_of_process_p (current_inferior))
1145 {
1146 if (debug_threads)
1147 fprintf (stderr, "LWP %ld is last lwp of process\n",
1148 lwpid_of (event_child));
1149 return lwpid_of (event_child);
1150 }
1151
1152 delete_lwp (event_child);
1153
1154 if (!non_stop)
1155 {
1156 current_inferior = (struct thread_info *) all_threads.head;
1157 if (debug_threads)
1158 fprintf (stderr, "Current inferior is now %ld\n",
1159 lwpid_of (get_thread_lwp (current_inferior)));
1160 }
1161 else
1162 {
1163 current_inferior = NULL;
1164 if (debug_threads)
1165 fprintf (stderr, "Current inferior is now <NULL>\n");
1166 }
1167
1168 /* If we were waiting for this particular child to do something...
1169 well, it did something. */
1170 if (requested_child != NULL)
1171 return lwpid_of (event_child);
1172
1173 /* Wait for a more interesting event. */
1174 continue;
1175 }
1176
1177 if (event_child->must_set_ptrace_flags)
1178 {
1179 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1180 0, PTRACE_O_TRACECLONE);
1181 event_child->must_set_ptrace_flags = 0;
1182 }
1183
1184 if (WIFSTOPPED (*wstat)
1185 && WSTOPSIG (*wstat) == SIGSTOP
1186 && event_child->stop_expected)
1187 {
1188 if (debug_threads)
1189 fprintf (stderr, "Expected stop.\n");
1190 event_child->stop_expected = 0;
1191 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
1192 continue;
1193 }
1194
1195 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1196 && *wstat >> 16 != 0)
1197 {
1198 handle_extended_wait (event_child, *wstat);
1199 continue;
1200 }
1201
1202 /* If GDB is not interested in this signal, don't stop other
1203 threads, and don't report it to GDB. Just resume the
1204 inferior right away. We do this for threading-related
1205 signals as well as any that GDB specifically requested we
1206 ignore. But never ignore SIGSTOP if we sent it ourselves,
1207 and do not ignore signals when stepping - they may require
1208 special handling to skip the signal handler. */
1209 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1210 thread library? */
1211 if (WIFSTOPPED (*wstat)
1212 && !event_child->stepping
1213 && (
1214 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1215 (current_process ()->private->thread_db != NULL
1216 && (WSTOPSIG (*wstat) == __SIGRTMIN
1217 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1218 ||
1219 #endif
1220 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1221 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1222 {
1223 siginfo_t info, *info_p;
1224
1225 if (debug_threads)
1226 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1227 WSTOPSIG (*wstat), lwpid_of (event_child));
1228
1229 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1230 info_p = &info;
1231 else
1232 info_p = NULL;
1233 linux_resume_one_lwp (event_child,
1234 event_child->stepping,
1235 WSTOPSIG (*wstat), info_p);
1236 continue;
1237 }
1238
1239 /* If this event was not handled above, and is not a SIGTRAP, report
1240 it. */
1241 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
1242 return lwpid_of (event_child);
1243
1244 /* If this target does not support breakpoints, we simply report the
1245 SIGTRAP; it's of no concern to us. */
1246 if (the_low_target.get_pc == NULL)
1247 return lwpid_of (event_child);
1248
1249 stop_pc = get_stop_pc ();
1250
1251 /* bp_reinsert will only be set if we were single-stepping.
1252 Notice that we will resume the process after hitting
1253 a gdbserver breakpoint; single-stepping to/over one
1254 is not supported (yet). */
1255 if (event_child->bp_reinsert != 0)
1256 {
1257 if (debug_threads)
1258 fprintf (stderr, "Reinserted breakpoint.\n");
1259 reinsert_breakpoint (event_child->bp_reinsert);
1260 event_child->bp_reinsert = 0;
1261
1262 /* Clear the single-stepping flag and SIGTRAP as we resume. */
1263 linux_resume_one_lwp (event_child, 0, 0, NULL);
1264 continue;
1265 }
1266
1267 bp_status = check_breakpoints (stop_pc);
1268
1269 if (bp_status != 0)
1270 {
1271 if (debug_threads)
1272 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1273
1274 /* We hit one of our own breakpoints. We mark it as a pending
1275 breakpoint, so that check_removed_breakpoint () will do the PC
1276 adjustment for us at the appropriate time. */
1277 event_child->pending_is_breakpoint = 1;
1278 event_child->pending_stop_pc = stop_pc;
1279
1280 /* We may need to put the breakpoint back. We continue in the event
1281 loop instead of simply replacing the breakpoint right away,
1282 in order to not lose signals sent to the thread that hit the
1283 breakpoint. Unfortunately this increases the window where another
1284 thread could sneak past the removed breakpoint. For the current
1285 use of server-side breakpoints (thread creation) this is
1286 acceptable; but it needs to be considered before this breakpoint
1287 mechanism can be used in more general ways. For some breakpoints
1288 it may be necessary to stop all other threads, but that should
1289 be avoided where possible.
1290
1291 If breakpoint_reinsert_addr is NULL, that means that we can
1292 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
1293 mark it for reinsertion, and single-step.
1294
1295 Otherwise, call the target function to figure out where we need
1296 our temporary breakpoint, create it, and continue executing this
1297 process. */
1298
1299 /* NOTE: we're lifting breakpoints in non-stop mode. This
1300 is currently only used for thread event breakpoints, so
1301 it isn't that bad as long as we have PTRACE_EVENT_CLONE
1302 events. */
1303 if (bp_status == 2)
1304 /* No need to reinsert. */
1305 linux_resume_one_lwp (event_child, 0, 0, NULL);
1306 else if (the_low_target.breakpoint_reinsert_addr == NULL)
1307 {
1308 event_child->bp_reinsert = stop_pc;
1309 uninsert_breakpoint (stop_pc);
1310 linux_resume_one_lwp (event_child, 1, 0, NULL);
1311 }
1312 else
1313 {
1314 reinsert_breakpoint_by_bp
1315 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1316 linux_resume_one_lwp (event_child, 0, 0, NULL);
1317 }
1318
1319 continue;
1320 }
1321
1322 if (debug_threads)
1323 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1324
1325 /* If we were single-stepping, we definitely want to report the
1326 SIGTRAP. Although the single-step operation has completed,
1327 do not clear clear the stepping flag yet; we need to check it
1328 in wait_for_sigstop. */
1329 if (event_child->stepping)
1330 return lwpid_of (event_child);
1331
1332 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
1333 Check if it is a breakpoint, and if so mark the process information
1334 accordingly. This will handle both the necessary fiddling with the
1335 PC on decr_pc_after_break targets and suppressing extra threads
1336 hitting a breakpoint if two hit it at once and then GDB removes it
1337 after the first is reported. Arguably it would be better to report
1338 multiple threads hitting breakpoints simultaneously, but the current
1339 remote protocol does not allow this. */
1340 if ((*the_low_target.breakpoint_at) (stop_pc))
1341 {
1342 event_child->pending_is_breakpoint = 1;
1343 event_child->pending_stop_pc = stop_pc;
1344 }
1345
1346 return lwpid_of (event_child);
1347 }
1348
1349 /* NOTREACHED */
1350 return 0;
1351 }
1352
1353 static int
1354 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1355 {
1356 ptid_t wait_ptid;
1357
1358 if (ptid_is_pid (ptid))
1359 {
1360 /* A request to wait for a specific tgid. This is not possible
1361 with waitpid, so instead, we wait for any child, and leave
1362 children we're not interested in right now with a pending
1363 status to report later. */
1364 wait_ptid = minus_one_ptid;
1365 }
1366 else
1367 wait_ptid = ptid;
1368
1369 while (1)
1370 {
1371 int event_pid;
1372
1373 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1374
1375 if (event_pid > 0
1376 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1377 {
1378 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1379
1380 if (! WIFSTOPPED (*wstat))
1381 mark_lwp_dead (event_child, *wstat);
1382 else
1383 {
1384 event_child->status_pending_p = 1;
1385 event_child->status_pending = *wstat;
1386 }
1387 }
1388 else
1389 return event_pid;
1390 }
1391 }
1392
1393 /* Wait for process, returns status. */
1394
1395 static ptid_t
1396 linux_wait_1 (ptid_t ptid,
1397 struct target_waitstatus *ourstatus, int target_options)
1398 {
1399 int w;
1400 struct thread_info *thread = NULL;
1401 struct lwp_info *lwp = NULL;
1402 int options;
1403 int pid;
1404
1405 /* Translate generic target options into linux options. */
1406 options = __WALL;
1407 if (target_options & TARGET_WNOHANG)
1408 options |= WNOHANG;
1409
1410 retry:
1411 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1412
1413 /* If we were only supposed to resume one thread, only wait for
1414 that thread - if it's still alive. If it died, however - which
1415 can happen if we're coming from the thread death case below -
1416 then we need to make sure we restart the other threads. We could
1417 pick a thread at random or restart all; restarting all is less
1418 arbitrary. */
1419 if (!non_stop
1420 && !ptid_equal (cont_thread, null_ptid)
1421 && !ptid_equal (cont_thread, minus_one_ptid))
1422 {
1423 thread = (struct thread_info *) find_inferior_id (&all_threads,
1424 cont_thread);
1425
1426 /* No stepping, no signal - unless one is pending already, of course. */
1427 if (thread == NULL)
1428 {
1429 struct thread_resume resume_info;
1430 resume_info.thread = minus_one_ptid;
1431 resume_info.kind = resume_continue;
1432 resume_info.sig = 0;
1433 linux_resume (&resume_info, 1);
1434 }
1435 else
1436 ptid = cont_thread;
1437 }
1438
1439 pid = linux_wait_for_event (ptid, &w, options);
1440 if (pid == 0) /* only if TARGET_WNOHANG */
1441 return null_ptid;
1442
1443 lwp = get_thread_lwp (current_inferior);
1444
1445 /* If we are waiting for a particular child, and it exited,
1446 linux_wait_for_event will return its exit status. Similarly if
1447 the last child exited. If this is not the last child, however,
1448 do not report it as exited until there is a 'thread exited' response
1449 available in the remote protocol. Instead, just wait for another event.
1450 This should be safe, because if the thread crashed we will already
1451 have reported the termination signal to GDB; that should stop any
1452 in-progress stepping operations, etc.
1453
1454 Report the exit status of the last thread to exit. This matches
1455 LinuxThreads' behavior. */
1456
1457 if (last_thread_of_process_p (current_inferior))
1458 {
1459 if (WIFEXITED (w) || WIFSIGNALED (w))
1460 {
1461 int pid = pid_of (lwp);
1462 struct process_info *process = find_process_pid (pid);
1463
1464 #ifdef USE_THREAD_DB
1465 thread_db_free (process, 0);
1466 #endif
1467 delete_lwp (lwp);
1468 linux_remove_process (process);
1469
1470 current_inferior = NULL;
1471
1472 if (WIFEXITED (w))
1473 {
1474 ourstatus->kind = TARGET_WAITKIND_EXITED;
1475 ourstatus->value.integer = WEXITSTATUS (w);
1476
1477 if (debug_threads)
1478 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1479 }
1480 else
1481 {
1482 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1483 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1484
1485 if (debug_threads)
1486 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1487
1488 }
1489
1490 return pid_to_ptid (pid);
1491 }
1492 }
1493 else
1494 {
1495 if (!WIFSTOPPED (w))
1496 goto retry;
1497 }
1498
1499 /* In all-stop, stop all threads. Be careful to only do this if
1500 we're about to report an event to GDB. */
1501 if (!non_stop)
1502 stop_all_lwps ();
1503
1504 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1505
1506 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1507 {
1508 /* A thread that has been requested to stop by GDB with vCont;t,
1509 and it stopped cleanly, so report as SIG0. The use of
1510 SIGSTOP is an implementation detail. */
1511 ourstatus->value.sig = TARGET_SIGNAL_0;
1512 }
1513 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1514 {
1515 /* A thread that has been requested to stop by GDB with vCont;t,
1516 but, it stopped for other reasons. Set stop_expected so the
1517 pending SIGSTOP is ignored and the LWP is resumed. */
1518 lwp->stop_expected = 1;
1519 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1520 }
1521 else
1522 {
1523 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1524 }
1525
1526 if (debug_threads)
1527 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1528 target_pid_to_str (lwp->head.id),
1529 ourstatus->kind,
1530 ourstatus->value.sig);
1531
1532 return lwp->head.id;
1533 }
1534
1535 /* Get rid of any pending event in the pipe. */
1536 static void
1537 async_file_flush (void)
1538 {
1539 int ret;
1540 char buf;
1541
1542 do
1543 ret = read (linux_event_pipe[0], &buf, 1);
1544 while (ret >= 0 || (ret == -1 && errno == EINTR));
1545 }
1546
1547 /* Put something in the pipe, so the event loop wakes up. */
1548 static void
1549 async_file_mark (void)
1550 {
1551 int ret;
1552
1553 async_file_flush ();
1554
1555 do
1556 ret = write (linux_event_pipe[1], "+", 1);
1557 while (ret == 0 || (ret == -1 && errno == EINTR));
1558
1559 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1560 be awakened anyway. */
1561 }
1562
1563 static ptid_t
1564 linux_wait (ptid_t ptid,
1565 struct target_waitstatus *ourstatus, int target_options)
1566 {
1567 ptid_t event_ptid;
1568
1569 if (debug_threads)
1570 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1571
1572 /* Flush the async file first. */
1573 if (target_is_async_p ())
1574 async_file_flush ();
1575
1576 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1577
1578 /* If at least one stop was reported, there may be more. A single
1579 SIGCHLD can signal more than one child stop. */
1580 if (target_is_async_p ()
1581 && (target_options & TARGET_WNOHANG) != 0
1582 && !ptid_equal (event_ptid, null_ptid))
1583 async_file_mark ();
1584
1585 return event_ptid;
1586 }
1587
1588 /* Send a signal to an LWP. */
1589
1590 static int
1591 kill_lwp (unsigned long lwpid, int signo)
1592 {
1593 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1594 fails, then we are not using nptl threads and we should be using kill. */
1595
1596 #ifdef __NR_tkill
1597 {
1598 static int tkill_failed;
1599
1600 if (!tkill_failed)
1601 {
1602 int ret;
1603
1604 errno = 0;
1605 ret = syscall (__NR_tkill, lwpid, signo);
1606 if (errno != ENOSYS)
1607 return ret;
1608 tkill_failed = 1;
1609 }
1610 }
1611 #endif
1612
1613 return kill (lwpid, signo);
1614 }
1615
1616 static void
1617 send_sigstop (struct inferior_list_entry *entry)
1618 {
1619 struct lwp_info *lwp = (struct lwp_info *) entry;
1620 int pid;
1621
1622 if (lwp->stopped)
1623 return;
1624
1625 pid = lwpid_of (lwp);
1626
1627 /* If we already have a pending stop signal for this process, don't
1628 send another. */
1629 if (lwp->stop_expected)
1630 {
1631 if (debug_threads)
1632 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1633
1634 /* We clear the stop_expected flag so that wait_for_sigstop
1635 will receive the SIGSTOP event (instead of silently resuming and
1636 waiting again). It'll be reset below. */
1637 lwp->stop_expected = 0;
1638 return;
1639 }
1640
1641 if (debug_threads)
1642 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1643
1644 kill_lwp (pid, SIGSTOP);
1645 }
1646
1647 static void
1648 mark_lwp_dead (struct lwp_info *lwp, int wstat)
1649 {
1650 /* It's dead, really. */
1651 lwp->dead = 1;
1652
1653 /* Store the exit status for later. */
1654 lwp->status_pending_p = 1;
1655 lwp->status_pending = wstat;
1656
1657 /* So that check_removed_breakpoint doesn't try to figure out if
1658 this is stopped at a breakpoint. */
1659 lwp->pending_is_breakpoint = 0;
1660
1661 /* Prevent trying to stop it. */
1662 lwp->stopped = 1;
1663
1664 /* No further stops are expected from a dead lwp. */
1665 lwp->stop_expected = 0;
1666 }
1667
1668 static void
1669 wait_for_sigstop (struct inferior_list_entry *entry)
1670 {
1671 struct lwp_info *lwp = (struct lwp_info *) entry;
1672 struct thread_info *saved_inferior;
1673 int wstat;
1674 ptid_t saved_tid;
1675 ptid_t ptid;
1676
1677 if (lwp->stopped)
1678 return;
1679
1680 saved_inferior = current_inferior;
1681 if (saved_inferior != NULL)
1682 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1683 else
1684 saved_tid = null_ptid; /* avoid bogus unused warning */
1685
1686 ptid = lwp->head.id;
1687
1688 linux_wait_for_event (ptid, &wstat, __WALL);
1689
1690 /* If we stopped with a non-SIGSTOP signal, save it for later
1691 and record the pending SIGSTOP. If the process exited, just
1692 return. */
1693 if (WIFSTOPPED (wstat)
1694 && WSTOPSIG (wstat) != SIGSTOP)
1695 {
1696 if (debug_threads)
1697 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1698 lwpid_of (lwp), wstat);
1699
1700 /* Do not leave a pending single-step finish to be reported to
1701 the client. The client will give us a new action for this
1702 thread, possibly a continue request --- otherwise, the client
1703 would consider this pending SIGTRAP reported later a spurious
1704 signal. */
1705 if (WSTOPSIG (wstat) == SIGTRAP
1706 && lwp->stepping
1707 && !linux_stopped_by_watchpoint ())
1708 {
1709 if (debug_threads)
1710 fprintf (stderr, " single-step SIGTRAP ignored\n");
1711 }
1712 else
1713 {
1714 lwp->status_pending_p = 1;
1715 lwp->status_pending = wstat;
1716 }
1717 lwp->stop_expected = 1;
1718 }
1719 else if (!WIFSTOPPED (wstat))
1720 {
1721 if (debug_threads)
1722 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1723 lwpid_of (lwp));
1724
1725 /* Leave this status pending for the next time we're able to
1726 report it. In the mean time, we'll report this lwp as dead
1727 to GDB, so GDB doesn't try to read registers and memory from
1728 it. */
1729 mark_lwp_dead (lwp, wstat);
1730 }
1731
1732 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1733 current_inferior = saved_inferior;
1734 else
1735 {
1736 if (debug_threads)
1737 fprintf (stderr, "Previously current thread died.\n");
1738
1739 if (non_stop)
1740 {
1741 /* We can't change the current inferior behind GDB's back,
1742 otherwise, a subsequent command may apply to the wrong
1743 process. */
1744 current_inferior = NULL;
1745 }
1746 else
1747 {
1748 /* Set a valid thread as current. */
1749 set_desired_inferior (0);
1750 }
1751 }
1752 }
1753
1754 static void
1755 stop_all_lwps (void)
1756 {
1757 stopping_threads = 1;
1758 for_each_inferior (&all_lwps, send_sigstop);
1759 for_each_inferior (&all_lwps, wait_for_sigstop);
1760 stopping_threads = 0;
1761 }
1762
1763 /* Resume execution of the inferior process.
1764 If STEP is nonzero, single-step it.
1765 If SIGNAL is nonzero, give it that signal. */
1766
1767 static void
1768 linux_resume_one_lwp (struct lwp_info *lwp,
1769 int step, int signal, siginfo_t *info)
1770 {
1771 struct thread_info *saved_inferior;
1772
1773 if (lwp->stopped == 0)
1774 return;
1775
1776 /* If we have pending signals or status, and a new signal, enqueue the
1777 signal. Also enqueue the signal if we are waiting to reinsert a
1778 breakpoint; it will be picked up again below. */
1779 if (signal != 0
1780 && (lwp->status_pending_p || lwp->pending_signals != NULL
1781 || lwp->bp_reinsert != 0))
1782 {
1783 struct pending_signals *p_sig;
1784 p_sig = xmalloc (sizeof (*p_sig));
1785 p_sig->prev = lwp->pending_signals;
1786 p_sig->signal = signal;
1787 if (info == NULL)
1788 memset (&p_sig->info, 0, sizeof (siginfo_t));
1789 else
1790 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1791 lwp->pending_signals = p_sig;
1792 }
1793
1794 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1795 return;
1796
1797 saved_inferior = current_inferior;
1798 current_inferior = get_lwp_thread (lwp);
1799
1800 if (debug_threads)
1801 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1802 lwpid_of (lwp), step ? "step" : "continue", signal,
1803 lwp->stop_expected ? "expected" : "not expected");
1804
1805 /* This bit needs some thinking about. If we get a signal that
1806 we must report while a single-step reinsert is still pending,
1807 we often end up resuming the thread. It might be better to
1808 (ew) allow a stack of pending events; then we could be sure that
1809 the reinsert happened right away and not lose any signals.
1810
1811 Making this stack would also shrink the window in which breakpoints are
1812 uninserted (see comment in linux_wait_for_lwp) but not enough for
1813 complete correctness, so it won't solve that problem. It may be
1814 worthwhile just to solve this one, however. */
1815 if (lwp->bp_reinsert != 0)
1816 {
1817 if (debug_threads)
1818 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1819 if (step == 0)
1820 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1821 step = 1;
1822
1823 /* Postpone any pending signal. It was enqueued above. */
1824 signal = 0;
1825 }
1826
1827 check_removed_breakpoint (lwp);
1828
1829 if (debug_threads && the_low_target.get_pc != NULL)
1830 {
1831 CORE_ADDR pc = (*the_low_target.get_pc) ();
1832 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
1833 }
1834
1835 /* If we have pending signals, consume one unless we are trying to reinsert
1836 a breakpoint. */
1837 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1838 {
1839 struct pending_signals **p_sig;
1840
1841 p_sig = &lwp->pending_signals;
1842 while ((*p_sig)->prev != NULL)
1843 p_sig = &(*p_sig)->prev;
1844
1845 signal = (*p_sig)->signal;
1846 if ((*p_sig)->info.si_signo != 0)
1847 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1848
1849 free (*p_sig);
1850 *p_sig = NULL;
1851 }
1852
1853 if (the_low_target.prepare_to_resume != NULL)
1854 the_low_target.prepare_to_resume (lwp);
1855
1856 regcache_invalidate_one ((struct inferior_list_entry *)
1857 get_lwp_thread (lwp));
1858 errno = 0;
1859 lwp->stopped = 0;
1860 lwp->stepping = step;
1861 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal);
1862
1863 current_inferior = saved_inferior;
1864 if (errno)
1865 {
1866 /* ESRCH from ptrace either means that the thread was already
1867 running (an error) or that it is gone (a race condition). If
1868 it's gone, we will get a notification the next time we wait,
1869 so we can ignore the error. We could differentiate these
1870 two, but it's tricky without waiting; the thread still exists
1871 as a zombie, so sending it signal 0 would succeed. So just
1872 ignore ESRCH. */
1873 if (errno == ESRCH)
1874 return;
1875
1876 perror_with_name ("ptrace");
1877 }
1878 }
1879
1880 struct thread_resume_array
1881 {
1882 struct thread_resume *resume;
1883 size_t n;
1884 };
1885
1886 /* This function is called once per thread. We look up the thread
1887 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1888 resume request.
1889
1890 This algorithm is O(threads * resume elements), but resume elements
1891 is small (and will remain small at least until GDB supports thread
1892 suspension). */
1893 static int
1894 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1895 {
1896 struct lwp_info *lwp;
1897 struct thread_info *thread;
1898 int ndx;
1899 struct thread_resume_array *r;
1900
1901 thread = (struct thread_info *) entry;
1902 lwp = get_thread_lwp (thread);
1903 r = arg;
1904
1905 for (ndx = 0; ndx < r->n; ndx++)
1906 {
1907 ptid_t ptid = r->resume[ndx].thread;
1908 if (ptid_equal (ptid, minus_one_ptid)
1909 || ptid_equal (ptid, entry->id)
1910 || (ptid_is_pid (ptid)
1911 && (ptid_get_pid (ptid) == pid_of (lwp)))
1912 || (ptid_get_lwp (ptid) == -1
1913 && (ptid_get_pid (ptid) == pid_of (lwp))))
1914 {
1915 lwp->resume = &r->resume[ndx];
1916 return 0;
1917 }
1918 }
1919
1920 /* No resume action for this thread. */
1921 lwp->resume = NULL;
1922
1923 return 0;
1924 }
1925
1926
1927 /* Set *FLAG_P if this lwp has an interesting status pending. */
1928 static int
1929 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1930 {
1931 struct lwp_info *lwp = (struct lwp_info *) entry;
1932
1933 /* LWPs which will not be resumed are not interesting, because
1934 we might not wait for them next time through linux_wait. */
1935 if (lwp->resume == NULL)
1936 return 0;
1937
1938 /* If this thread has a removed breakpoint, we won't have any
1939 events to report later, so check now. check_removed_breakpoint
1940 may clear status_pending_p. We avoid calling check_removed_breakpoint
1941 for any thread that we are not otherwise going to resume - this
1942 lets us preserve stopped status when two threads hit a breakpoint.
1943 GDB removes the breakpoint to single-step a particular thread
1944 past it, then re-inserts it and resumes all threads. We want
1945 to report the second thread without resuming it in the interim. */
1946 if (lwp->status_pending_p)
1947 check_removed_breakpoint (lwp);
1948
1949 if (lwp->status_pending_p)
1950 * (int *) flag_p = 1;
1951
1952 return 0;
1953 }
1954
1955 /* This function is called once per thread. We check the thread's resume
1956 request, which will tell us whether to resume, step, or leave the thread
1957 stopped; and what signal, if any, it should be sent.
1958
1959 For threads which we aren't explicitly told otherwise, we preserve
1960 the stepping flag; this is used for stepping over gdbserver-placed
1961 breakpoints.
1962
1963 If pending_flags was set in any thread, we queue any needed
1964 signals, since we won't actually resume. We already have a pending
1965 event to report, so we don't need to preserve any step requests;
1966 they should be re-issued if necessary. */
1967
1968 static int
1969 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1970 {
1971 struct lwp_info *lwp;
1972 struct thread_info *thread;
1973 int step;
1974 int pending_flag = * (int *) arg;
1975
1976 thread = (struct thread_info *) entry;
1977 lwp = get_thread_lwp (thread);
1978
1979 if (lwp->resume == NULL)
1980 return 0;
1981
1982 if (lwp->resume->kind == resume_stop)
1983 {
1984 if (debug_threads)
1985 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1986
1987 if (!lwp->stopped)
1988 {
1989 if (debug_threads)
1990 fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
1991
1992 lwp->suspended = 1;
1993 send_sigstop (&lwp->head);
1994 }
1995 else
1996 {
1997 if (debug_threads)
1998 {
1999 if (lwp->suspended)
2000 fprintf (stderr, "already stopped/suspended LWP %ld\n",
2001 lwpid_of (lwp));
2002 else
2003 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
2004 lwpid_of (lwp));
2005 }
2006
2007 /* Make sure we leave the LWP suspended, so we don't try to
2008 resume it without GDB telling us to. FIXME: The LWP may
2009 have been stopped in an internal event that was not meant
2010 to be notified back to GDB (e.g., gdbserver breakpoint),
2011 so we should be reporting a stop event in that case
2012 too. */
2013 lwp->suspended = 1;
2014 }
2015
2016 /* For stop requests, we're done. */
2017 lwp->resume = NULL;
2018 return 0;
2019 }
2020 else
2021 lwp->suspended = 0;
2022
2023 /* If this thread which is about to be resumed has a pending status,
2024 then don't resume any threads - we can just report the pending
2025 status. Make sure to queue any signals that would otherwise be
2026 sent. In all-stop mode, we do this decision based on if *any*
2027 thread has a pending status. */
2028 if (non_stop)
2029 resume_status_pending_p (&lwp->head, &pending_flag);
2030
2031 if (!pending_flag)
2032 {
2033 if (debug_threads)
2034 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2035
2036 if (ptid_equal (lwp->resume->thread, minus_one_ptid)
2037 && lwp->stepping
2038 && lwp->pending_is_breakpoint)
2039 step = 1;
2040 else
2041 step = (lwp->resume->kind == resume_step);
2042
2043 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2044 }
2045 else
2046 {
2047 if (debug_threads)
2048 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2049
2050 /* If we have a new signal, enqueue the signal. */
2051 if (lwp->resume->sig != 0)
2052 {
2053 struct pending_signals *p_sig;
2054 p_sig = xmalloc (sizeof (*p_sig));
2055 p_sig->prev = lwp->pending_signals;
2056 p_sig->signal = lwp->resume->sig;
2057 memset (&p_sig->info, 0, sizeof (siginfo_t));
2058
2059 /* If this is the same signal we were previously stopped by,
2060 make sure to queue its siginfo. We can ignore the return
2061 value of ptrace; if it fails, we'll skip
2062 PTRACE_SETSIGINFO. */
2063 if (WIFSTOPPED (lwp->last_status)
2064 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2065 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2066
2067 lwp->pending_signals = p_sig;
2068 }
2069 }
2070
2071 lwp->resume = NULL;
2072 return 0;
2073 }
2074
2075 static void
2076 linux_resume (struct thread_resume *resume_info, size_t n)
2077 {
2078 int pending_flag;
2079 struct thread_resume_array array = { resume_info, n };
2080
2081 find_inferior (&all_threads, linux_set_resume_request, &array);
2082
2083 /* If there is a thread which would otherwise be resumed, which
2084 has a pending status, then don't resume any threads - we can just
2085 report the pending status. Make sure to queue any signals
2086 that would otherwise be sent. In non-stop mode, we'll apply this
2087 logic to each thread individually. */
2088 pending_flag = 0;
2089 if (!non_stop)
2090 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
2091
2092 if (debug_threads)
2093 {
2094 if (pending_flag)
2095 fprintf (stderr, "Not resuming, pending status\n");
2096 else
2097 fprintf (stderr, "Resuming, no pending status\n");
2098 }
2099
2100 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
2101 }
2102
2103 #ifdef HAVE_LINUX_USRREGS
2104
2105 int
2106 register_addr (int regnum)
2107 {
2108 int addr;
2109
2110 if (regnum < 0 || regnum >= the_low_target.num_regs)
2111 error ("Invalid register number %d.", regnum);
2112
2113 addr = the_low_target.regmap[regnum];
2114
2115 return addr;
2116 }
2117
2118 /* Fetch one register. */
2119 static void
2120 fetch_register (int regno)
2121 {
2122 CORE_ADDR regaddr;
2123 int i, size;
2124 char *buf;
2125 int pid;
2126
2127 if (regno >= the_low_target.num_regs)
2128 return;
2129 if ((*the_low_target.cannot_fetch_register) (regno))
2130 return;
2131
2132 regaddr = register_addr (regno);
2133 if (regaddr == -1)
2134 return;
2135
2136 pid = lwpid_of (get_thread_lwp (current_inferior));
2137 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2138 & - sizeof (PTRACE_XFER_TYPE));
2139 buf = alloca (size);
2140 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2141 {
2142 errno = 0;
2143 *(PTRACE_XFER_TYPE *) (buf + i) =
2144 ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) regaddr, 0);
2145 regaddr += sizeof (PTRACE_XFER_TYPE);
2146 if (errno != 0)
2147 {
2148 /* Warning, not error, in case we are attached; sometimes the
2149 kernel doesn't let us at the registers. */
2150 char *err = strerror (errno);
2151 char *msg = alloca (strlen (err) + 128);
2152 sprintf (msg, "reading register %d: %s", regno, err);
2153 error (msg);
2154 goto error_exit;
2155 }
2156 }
2157
2158 if (the_low_target.supply_ptrace_register)
2159 the_low_target.supply_ptrace_register (regno, buf);
2160 else
2161 supply_register (regno, buf);
2162
2163 error_exit:;
2164 }
2165
2166 /* Fetch all registers, or just one, from the child process. */
2167 static void
2168 usr_fetch_inferior_registers (int regno)
2169 {
2170 if (regno == -1)
2171 for (regno = 0; regno < the_low_target.num_regs; regno++)
2172 fetch_register (regno);
2173 else
2174 fetch_register (regno);
2175 }
2176
2177 /* Store our register values back into the inferior.
2178 If REGNO is -1, do this for all registers.
2179 Otherwise, REGNO specifies which register (so we can save time). */
2180 static void
2181 usr_store_inferior_registers (int regno)
2182 {
2183 CORE_ADDR regaddr;
2184 int i, size;
2185 char *buf;
2186 int pid;
2187
2188 if (regno >= 0)
2189 {
2190 if (regno >= the_low_target.num_regs)
2191 return;
2192
2193 if ((*the_low_target.cannot_store_register) (regno) == 1)
2194 return;
2195
2196 regaddr = register_addr (regno);
2197 if (regaddr == -1)
2198 return;
2199 errno = 0;
2200 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2201 & - sizeof (PTRACE_XFER_TYPE);
2202 buf = alloca (size);
2203 memset (buf, 0, size);
2204
2205 if (the_low_target.collect_ptrace_register)
2206 the_low_target.collect_ptrace_register (regno, buf);
2207 else
2208 collect_register (regno, buf);
2209
2210 pid = lwpid_of (get_thread_lwp (current_inferior));
2211 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2212 {
2213 errno = 0;
2214 ptrace (PTRACE_POKEUSER, pid, (PTRACE_ARG3_TYPE) regaddr,
2215 *(PTRACE_XFER_TYPE *) (buf + i));
2216 if (errno != 0)
2217 {
2218 /* At this point, ESRCH should mean the process is
2219 already gone, in which case we simply ignore attempts
2220 to change its registers. See also the related
2221 comment in linux_resume_one_lwp. */
2222 if (errno == ESRCH)
2223 return;
2224
2225 if ((*the_low_target.cannot_store_register) (regno) == 0)
2226 {
2227 char *err = strerror (errno);
2228 char *msg = alloca (strlen (err) + 128);
2229 sprintf (msg, "writing register %d: %s",
2230 regno, err);
2231 error (msg);
2232 return;
2233 }
2234 }
2235 regaddr += sizeof (PTRACE_XFER_TYPE);
2236 }
2237 }
2238 else
2239 for (regno = 0; regno < the_low_target.num_regs; regno++)
2240 usr_store_inferior_registers (regno);
2241 }
2242 #endif /* HAVE_LINUX_USRREGS */
2243
2244
2245
2246 #ifdef HAVE_LINUX_REGSETS
2247
2248 static int
2249 regsets_fetch_inferior_registers ()
2250 {
2251 struct regset_info *regset;
2252 int saw_general_regs = 0;
2253 int pid;
2254
2255 regset = target_regsets;
2256
2257 pid = lwpid_of (get_thread_lwp (current_inferior));
2258 while (regset->size >= 0)
2259 {
2260 void *buf;
2261 int res;
2262
2263 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2264 {
2265 regset ++;
2266 continue;
2267 }
2268
2269 buf = xmalloc (regset->size);
2270 #ifndef __sparc__
2271 res = ptrace (regset->get_request, pid, 0, buf);
2272 #else
2273 res = ptrace (regset->get_request, pid, buf, 0);
2274 #endif
2275 if (res < 0)
2276 {
2277 if (errno == EIO)
2278 {
2279 /* If we get EIO on a regset, do not try it again for
2280 this process. */
2281 disabled_regsets[regset - target_regsets] = 1;
2282 free (buf);
2283 continue;
2284 }
2285 else
2286 {
2287 char s[256];
2288 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2289 pid);
2290 perror (s);
2291 }
2292 }
2293 else if (regset->type == GENERAL_REGS)
2294 saw_general_regs = 1;
2295 regset->store_function (buf);
2296 regset ++;
2297 free (buf);
2298 }
2299 if (saw_general_regs)
2300 return 0;
2301 else
2302 return 1;
2303 }
2304
2305 static int
2306 regsets_store_inferior_registers ()
2307 {
2308 struct regset_info *regset;
2309 int saw_general_regs = 0;
2310 int pid;
2311
2312 regset = target_regsets;
2313
2314 pid = lwpid_of (get_thread_lwp (current_inferior));
2315 while (regset->size >= 0)
2316 {
2317 void *buf;
2318 int res;
2319
2320 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2321 {
2322 regset ++;
2323 continue;
2324 }
2325
2326 buf = xmalloc (regset->size);
2327
2328 /* First fill the buffer with the current register set contents,
2329 in case there are any items in the kernel's regset that are
2330 not in gdbserver's regcache. */
2331 #ifndef __sparc__
2332 res = ptrace (regset->get_request, pid, 0, buf);
2333 #else
2334 res = ptrace (regset->get_request, pid, buf, 0);
2335 #endif
2336
2337 if (res == 0)
2338 {
2339 /* Then overlay our cached registers on that. */
2340 regset->fill_function (buf);
2341
2342 /* Only now do we write the register set. */
2343 #ifndef __sparc__
2344 res = ptrace (regset->set_request, pid, 0, buf);
2345 #else
2346 res = ptrace (regset->set_request, pid, buf, 0);
2347 #endif
2348 }
2349
2350 if (res < 0)
2351 {
2352 if (errno == EIO)
2353 {
2354 /* If we get EIO on a regset, do not try it again for
2355 this process. */
2356 disabled_regsets[regset - target_regsets] = 1;
2357 free (buf);
2358 continue;
2359 }
2360 else if (errno == ESRCH)
2361 {
2362 /* At this point, ESRCH should mean the process is
2363 already gone, in which case we simply ignore attempts
2364 to change its registers. See also the related
2365 comment in linux_resume_one_lwp. */
2366 free (buf);
2367 return 0;
2368 }
2369 else
2370 {
2371 perror ("Warning: ptrace(regsets_store_inferior_registers)");
2372 }
2373 }
2374 else if (regset->type == GENERAL_REGS)
2375 saw_general_regs = 1;
2376 regset ++;
2377 free (buf);
2378 }
2379 if (saw_general_regs)
2380 return 0;
2381 else
2382 return 1;
2383 return 0;
2384 }
2385
2386 #endif /* HAVE_LINUX_REGSETS */
2387
2388
2389 void
2390 linux_fetch_registers (int regno)
2391 {
2392 #ifdef HAVE_LINUX_REGSETS
2393 if (regsets_fetch_inferior_registers () == 0)
2394 return;
2395 #endif
2396 #ifdef HAVE_LINUX_USRREGS
2397 usr_fetch_inferior_registers (regno);
2398 #endif
2399 }
2400
2401 void
2402 linux_store_registers (int regno)
2403 {
2404 #ifdef HAVE_LINUX_REGSETS
2405 if (regsets_store_inferior_registers () == 0)
2406 return;
2407 #endif
2408 #ifdef HAVE_LINUX_USRREGS
2409 usr_store_inferior_registers (regno);
2410 #endif
2411 }
2412
2413
2414 /* Copy LEN bytes from inferior's memory starting at MEMADDR
2415 to debugger memory starting at MYADDR. */
2416
2417 static int
2418 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2419 {
2420 register int i;
2421 /* Round starting address down to longword boundary. */
2422 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2423 /* Round ending address up; get number of longwords that makes. */
2424 register int count
2425 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2426 / sizeof (PTRACE_XFER_TYPE);
2427 /* Allocate buffer of that many longwords. */
2428 register PTRACE_XFER_TYPE *buffer
2429 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2430 int fd;
2431 char filename[64];
2432 int pid = lwpid_of (get_thread_lwp (current_inferior));
2433
2434 /* Try using /proc. Don't bother for one word. */
2435 if (len >= 3 * sizeof (long))
2436 {
2437 /* We could keep this file open and cache it - possibly one per
2438 thread. That requires some juggling, but is even faster. */
2439 sprintf (filename, "/proc/%d/mem", pid);
2440 fd = open (filename, O_RDONLY | O_LARGEFILE);
2441 if (fd == -1)
2442 goto no_proc;
2443
2444 /* If pread64 is available, use it. It's faster if the kernel
2445 supports it (only one syscall), and it's 64-bit safe even on
2446 32-bit platforms (for instance, SPARC debugging a SPARC64
2447 application). */
2448 #ifdef HAVE_PREAD64
2449 if (pread64 (fd, myaddr, len, memaddr) != len)
2450 #else
2451 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
2452 #endif
2453 {
2454 close (fd);
2455 goto no_proc;
2456 }
2457
2458 close (fd);
2459 return 0;
2460 }
2461
2462 no_proc:
2463 /* Read all the longwords */
2464 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2465 {
2466 errno = 0;
2467 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2468 if (errno)
2469 return errno;
2470 }
2471
2472 /* Copy appropriate bytes out of the buffer. */
2473 memcpy (myaddr,
2474 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2475 len);
2476
2477 return 0;
2478 }
2479
2480 /* Copy LEN bytes of data from debugger memory at MYADDR
2481 to inferior's memory at MEMADDR.
2482 On failure (cannot write the inferior)
2483 returns the value of errno. */
2484
2485 static int
2486 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2487 {
2488 register int i;
2489 /* Round starting address down to longword boundary. */
2490 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2491 /* Round ending address up; get number of longwords that makes. */
2492 register int count
2493 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2494 /* Allocate buffer of that many longwords. */
2495 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2496 int pid = lwpid_of (get_thread_lwp (current_inferior));
2497
2498 if (debug_threads)
2499 {
2500 /* Dump up to four bytes. */
2501 unsigned int val = * (unsigned int *) myaddr;
2502 if (len == 1)
2503 val = val & 0xff;
2504 else if (len == 2)
2505 val = val & 0xffff;
2506 else if (len == 3)
2507 val = val & 0xffffff;
2508 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
2509 val, (long)memaddr);
2510 }
2511
2512 /* Fill start and end extra bytes of buffer with existing memory data. */
2513
2514 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2515
2516 if (count > 1)
2517 {
2518 buffer[count - 1]
2519 = ptrace (PTRACE_PEEKTEXT, pid,
2520 (PTRACE_ARG3_TYPE) (addr + (count - 1)
2521 * sizeof (PTRACE_XFER_TYPE)),
2522 0);
2523 }
2524
2525 /* Copy data to be written over corresponding part of buffer */
2526
2527 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2528
2529 /* Write the entire buffer. */
2530
2531 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2532 {
2533 errno = 0;
2534 ptrace (PTRACE_POKETEXT, pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
2535 if (errno)
2536 return errno;
2537 }
2538
2539 return 0;
2540 }
2541
2542 static int linux_supports_tracefork_flag;
2543
2544 /* Helper functions for linux_test_for_tracefork, called via clone (). */
2545
2546 static int
2547 linux_tracefork_grandchild (void *arg)
2548 {
2549 _exit (0);
2550 }
2551
2552 #define STACK_SIZE 4096
2553
2554 static int
2555 linux_tracefork_child (void *arg)
2556 {
2557 ptrace (PTRACE_TRACEME, 0, 0, 0);
2558 kill (getpid (), SIGSTOP);
2559 #ifdef __ia64__
2560 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2561 CLONE_VM | SIGCHLD, NULL);
2562 #else
2563 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2564 CLONE_VM | SIGCHLD, NULL);
2565 #endif
2566 _exit (0);
2567 }
2568
2569 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2570 sure that we can enable the option, and that it had the desired
2571 effect. */
2572
2573 static void
2574 linux_test_for_tracefork (void)
2575 {
2576 int child_pid, ret, status;
2577 long second_pid;
2578 char *stack = xmalloc (STACK_SIZE * 4);
2579
2580 linux_supports_tracefork_flag = 0;
2581
2582 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2583 #ifdef __ia64__
2584 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2585 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2586 #else
2587 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2588 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2589 #endif
2590 if (child_pid == -1)
2591 perror_with_name ("clone");
2592
2593 ret = my_waitpid (child_pid, &status, 0);
2594 if (ret == -1)
2595 perror_with_name ("waitpid");
2596 else if (ret != child_pid)
2597 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2598 if (! WIFSTOPPED (status))
2599 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2600
2601 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
2602 if (ret != 0)
2603 {
2604 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2605 if (ret != 0)
2606 {
2607 warning ("linux_test_for_tracefork: failed to kill child");
2608 return;
2609 }
2610
2611 ret = my_waitpid (child_pid, &status, 0);
2612 if (ret != child_pid)
2613 warning ("linux_test_for_tracefork: failed to wait for killed child");
2614 else if (!WIFSIGNALED (status))
2615 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2616 "killed child", status);
2617
2618 return;
2619 }
2620
2621 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2622 if (ret != 0)
2623 warning ("linux_test_for_tracefork: failed to resume child");
2624
2625 ret = my_waitpid (child_pid, &status, 0);
2626
2627 if (ret == child_pid && WIFSTOPPED (status)
2628 && status >> 16 == PTRACE_EVENT_FORK)
2629 {
2630 second_pid = 0;
2631 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2632 if (ret == 0 && second_pid != 0)
2633 {
2634 int second_status;
2635
2636 linux_supports_tracefork_flag = 1;
2637 my_waitpid (second_pid, &second_status, 0);
2638 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2639 if (ret != 0)
2640 warning ("linux_test_for_tracefork: failed to kill second child");
2641 my_waitpid (second_pid, &status, 0);
2642 }
2643 }
2644 else
2645 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2646 "(%d, status 0x%x)", ret, status);
2647
2648 do
2649 {
2650 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2651 if (ret != 0)
2652 warning ("linux_test_for_tracefork: failed to kill child");
2653 my_waitpid (child_pid, &status, 0);
2654 }
2655 while (WIFSTOPPED (status));
2656
2657 free (stack);
2658 }
2659
2660
2661 static void
2662 linux_look_up_symbols (void)
2663 {
2664 #ifdef USE_THREAD_DB
2665 struct process_info *proc = current_process ();
2666
2667 if (proc->private->thread_db != NULL)
2668 return;
2669
2670 thread_db_init (!linux_supports_tracefork_flag);
2671 #endif
2672 }
2673
2674 static void
2675 linux_request_interrupt (void)
2676 {
2677 extern unsigned long signal_pid;
2678
2679 if (!ptid_equal (cont_thread, null_ptid)
2680 && !ptid_equal (cont_thread, minus_one_ptid))
2681 {
2682 struct lwp_info *lwp;
2683 int lwpid;
2684
2685 lwp = get_thread_lwp (current_inferior);
2686 lwpid = lwpid_of (lwp);
2687 kill_lwp (lwpid, SIGINT);
2688 }
2689 else
2690 kill_lwp (signal_pid, SIGINT);
2691 }
2692
2693 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2694 to debugger memory starting at MYADDR. */
2695
2696 static int
2697 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2698 {
2699 char filename[PATH_MAX];
2700 int fd, n;
2701 int pid = lwpid_of (get_thread_lwp (current_inferior));
2702
2703 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2704
2705 fd = open (filename, O_RDONLY);
2706 if (fd < 0)
2707 return -1;
2708
2709 if (offset != (CORE_ADDR) 0
2710 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2711 n = -1;
2712 else
2713 n = read (fd, myaddr, len);
2714
2715 close (fd);
2716
2717 return n;
2718 }
2719
2720 /* These breakpoint and watchpoint related wrapper functions simply
2721 pass on the function call if the target has registered a
2722 corresponding function. */
2723
2724 static int
2725 linux_insert_point (char type, CORE_ADDR addr, int len)
2726 {
2727 if (the_low_target.insert_point != NULL)
2728 return the_low_target.insert_point (type, addr, len);
2729 else
2730 /* Unsupported (see target.h). */
2731 return 1;
2732 }
2733
2734 static int
2735 linux_remove_point (char type, CORE_ADDR addr, int len)
2736 {
2737 if (the_low_target.remove_point != NULL)
2738 return the_low_target.remove_point (type, addr, len);
2739 else
2740 /* Unsupported (see target.h). */
2741 return 1;
2742 }
2743
2744 static int
2745 linux_stopped_by_watchpoint (void)
2746 {
2747 if (the_low_target.stopped_by_watchpoint != NULL)
2748 return the_low_target.stopped_by_watchpoint ();
2749 else
2750 return 0;
2751 }
2752
2753 static CORE_ADDR
2754 linux_stopped_data_address (void)
2755 {
2756 if (the_low_target.stopped_data_address != NULL)
2757 return the_low_target.stopped_data_address ();
2758 else
2759 return 0;
2760 }
2761
2762 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2763 #if defined(__mcoldfire__)
2764 /* These should really be defined in the kernel's ptrace.h header. */
2765 #define PT_TEXT_ADDR 49*4
2766 #define PT_DATA_ADDR 50*4
2767 #define PT_TEXT_END_ADDR 51*4
2768 #endif
2769
2770 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2771 to tell gdb about. */
2772
2773 static int
2774 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2775 {
2776 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2777 unsigned long text, text_end, data;
2778 int pid = lwpid_of (get_thread_lwp (current_inferior));
2779
2780 errno = 0;
2781
2782 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2783 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2784 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2785
2786 if (errno == 0)
2787 {
2788 /* Both text and data offsets produced at compile-time (and so
2789 used by gdb) are relative to the beginning of the program,
2790 with the data segment immediately following the text segment.
2791 However, the actual runtime layout in memory may put the data
2792 somewhere else, so when we send gdb a data base-address, we
2793 use the real data base address and subtract the compile-time
2794 data base-address from it (which is just the length of the
2795 text segment). BSS immediately follows data in both
2796 cases. */
2797 *text_p = text;
2798 *data_p = data - (text_end - text);
2799
2800 return 1;
2801 }
2802 #endif
2803 return 0;
2804 }
2805 #endif
2806
2807 static int
2808 compare_ints (const void *xa, const void *xb)
2809 {
2810 int a = *(const int *)xa;
2811 int b = *(const int *)xb;
2812
2813 return a - b;
2814 }
2815
2816 static int *
2817 unique (int *b, int *e)
2818 {
2819 int *d = b;
2820 while (++b != e)
2821 if (*d != *b)
2822 *++d = *b;
2823 return ++d;
2824 }
2825
2826 /* Given PID, iterates over all threads in that process.
2827
2828 Information about each thread, in a format suitable for qXfer:osdata:thread
2829 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
2830 initialized, and the caller is responsible for finishing and appending '\0'
2831 to it.
2832
2833 The list of cores that threads are running on is assigned to *CORES, if it
2834 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
2835 should free *CORES. */
2836
2837 static void
2838 list_threads (int pid, struct buffer *buffer, char **cores)
2839 {
2840 int count = 0;
2841 int allocated = 10;
2842 int *core_numbers = xmalloc (sizeof (int) * allocated);
2843 char pathname[128];
2844 DIR *dir;
2845 struct dirent *dp;
2846 struct stat statbuf;
2847
2848 sprintf (pathname, "/proc/%d/task", pid);
2849 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
2850 {
2851 dir = opendir (pathname);
2852 if (!dir)
2853 {
2854 free (core_numbers);
2855 return;
2856 }
2857
2858 while ((dp = readdir (dir)) != NULL)
2859 {
2860 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
2861
2862 if (lwp != 0)
2863 {
2864 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
2865
2866 if (core != -1)
2867 {
2868 char s[sizeof ("4294967295")];
2869 sprintf (s, "%u", core);
2870
2871 if (count == allocated)
2872 {
2873 allocated *= 2;
2874 core_numbers = realloc (core_numbers,
2875 sizeof (int) * allocated);
2876 }
2877 core_numbers[count++] = core;
2878 if (buffer)
2879 buffer_xml_printf (buffer,
2880 "<item>"
2881 "<column name=\"pid\">%d</column>"
2882 "<column name=\"tid\">%s</column>"
2883 "<column name=\"core\">%s</column>"
2884 "</item>", pid, dp->d_name, s);
2885 }
2886 else
2887 {
2888 if (buffer)
2889 buffer_xml_printf (buffer,
2890 "<item>"
2891 "<column name=\"pid\">%d</column>"
2892 "<column name=\"tid\">%s</column>"
2893 "</item>", pid, dp->d_name);
2894 }
2895 }
2896 }
2897 }
2898
2899 if (cores)
2900 {
2901 *cores = NULL;
2902 if (count > 0)
2903 {
2904 struct buffer buffer2;
2905 int *b;
2906 int *e;
2907 qsort (core_numbers, count, sizeof (int), compare_ints);
2908
2909 /* Remove duplicates. */
2910 b = core_numbers;
2911 e = unique (b, core_numbers + count);
2912
2913 buffer_init (&buffer2);
2914
2915 for (b = core_numbers; b != e; ++b)
2916 {
2917 char number[sizeof ("4294967295")];
2918 sprintf (number, "%u", *b);
2919 buffer_xml_printf (&buffer2, "%s%s",
2920 (b == core_numbers) ? "" : ",", number);
2921 }
2922 buffer_grow_str0 (&buffer2, "");
2923
2924 *cores = buffer_finish (&buffer2);
2925 }
2926 }
2927 free (core_numbers);
2928 }
2929
2930 static void
2931 show_process (int pid, const char *username, struct buffer *buffer)
2932 {
2933 char pathname[128];
2934 FILE *f;
2935 char cmd[MAXPATHLEN + 1];
2936
2937 sprintf (pathname, "/proc/%d/cmdline", pid);
2938
2939 if ((f = fopen (pathname, "r")) != NULL)
2940 {
2941 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2942 if (len > 0)
2943 {
2944 char *cores = 0;
2945 int i;
2946 for (i = 0; i < len; i++)
2947 if (cmd[i] == '\0')
2948 cmd[i] = ' ';
2949 cmd[len] = '\0';
2950
2951 buffer_xml_printf (buffer,
2952 "<item>"
2953 "<column name=\"pid\">%d</column>"
2954 "<column name=\"user\">%s</column>"
2955 "<column name=\"command\">%s</column>",
2956 pid,
2957 username,
2958 cmd);
2959
2960 /* This only collects core numbers, and does not print threads. */
2961 list_threads (pid, NULL, &cores);
2962
2963 if (cores)
2964 {
2965 buffer_xml_printf (buffer,
2966 "<column name=\"cores\">%s</column>", cores);
2967 free (cores);
2968 }
2969
2970 buffer_xml_printf (buffer, "</item>");
2971 }
2972 fclose (f);
2973 }
2974 }
2975
2976 static int
2977 linux_qxfer_osdata (const char *annex,
2978 unsigned char *readbuf, unsigned const char *writebuf,
2979 CORE_ADDR offset, int len)
2980 {
2981 /* We make the process list snapshot when the object starts to be
2982 read. */
2983 static const char *buf;
2984 static long len_avail = -1;
2985 static struct buffer buffer;
2986 int processes = 0;
2987 int threads = 0;
2988
2989 DIR *dirp;
2990
2991 if (strcmp (annex, "processes") == 0)
2992 processes = 1;
2993 else if (strcmp (annex, "threads") == 0)
2994 threads = 1;
2995 else
2996 return 0;
2997
2998 if (!readbuf || writebuf)
2999 return 0;
3000
3001 if (offset == 0)
3002 {
3003 if (len_avail != -1 && len_avail != 0)
3004 buffer_free (&buffer);
3005 len_avail = 0;
3006 buf = NULL;
3007 buffer_init (&buffer);
3008 if (processes)
3009 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3010 else if (threads)
3011 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3012
3013 dirp = opendir ("/proc");
3014 if (dirp)
3015 {
3016 struct dirent *dp;
3017 while ((dp = readdir (dirp)) != NULL)
3018 {
3019 struct stat statbuf;
3020 char procentry[sizeof ("/proc/4294967295")];
3021
3022 if (!isdigit (dp->d_name[0])
3023 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3024 continue;
3025
3026 sprintf (procentry, "/proc/%s", dp->d_name);
3027 if (stat (procentry, &statbuf) == 0
3028 && S_ISDIR (statbuf.st_mode))
3029 {
3030 int pid = (int) strtoul (dp->d_name, NULL, 10);
3031
3032 if (processes)
3033 {
3034 struct passwd *entry = getpwuid (statbuf.st_uid);
3035 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3036 }
3037 else if (threads)
3038 {
3039 list_threads (pid, &buffer, NULL);
3040 }
3041 }
3042 }
3043
3044 closedir (dirp);
3045 }
3046 buffer_grow_str0 (&buffer, "</osdata>\n");
3047 buf = buffer_finish (&buffer);
3048 len_avail = strlen (buf);
3049 }
3050
3051 if (offset >= len_avail)
3052 {
3053 /* Done. Get rid of the data. */
3054 buffer_free (&buffer);
3055 buf = NULL;
3056 len_avail = 0;
3057 return 0;
3058 }
3059
3060 if (len > len_avail - offset)
3061 len = len_avail - offset;
3062 memcpy (readbuf, buf + offset, len);
3063
3064 return len;
3065 }
3066
3067 /* Convert a native/host siginfo object, into/from the siginfo in the
3068 layout of the inferiors' architecture. */
3069
3070 static void
3071 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3072 {
3073 int done = 0;
3074
3075 if (the_low_target.siginfo_fixup != NULL)
3076 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3077
3078 /* If there was no callback, or the callback didn't do anything,
3079 then just do a straight memcpy. */
3080 if (!done)
3081 {
3082 if (direction == 1)
3083 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3084 else
3085 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3086 }
3087 }
3088
3089 static int
3090 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3091 unsigned const char *writebuf, CORE_ADDR offset, int len)
3092 {
3093 int pid;
3094 struct siginfo siginfo;
3095 char inf_siginfo[sizeof (struct siginfo)];
3096
3097 if (current_inferior == NULL)
3098 return -1;
3099
3100 pid = lwpid_of (get_thread_lwp (current_inferior));
3101
3102 if (debug_threads)
3103 fprintf (stderr, "%s siginfo for lwp %d.\n",
3104 readbuf != NULL ? "Reading" : "Writing",
3105 pid);
3106
3107 if (offset > sizeof (siginfo))
3108 return -1;
3109
3110 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3111 return -1;
3112
3113 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3114 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3115 inferior with a 64-bit GDBSERVER should look the same as debugging it
3116 with a 32-bit GDBSERVER, we need to convert it. */
3117 siginfo_fixup (&siginfo, inf_siginfo, 0);
3118
3119 if (offset + len > sizeof (siginfo))
3120 len = sizeof (siginfo) - offset;
3121
3122 if (readbuf != NULL)
3123 memcpy (readbuf, inf_siginfo + offset, len);
3124 else
3125 {
3126 memcpy (inf_siginfo + offset, writebuf, len);
3127
3128 /* Convert back to ptrace layout before flushing it out. */
3129 siginfo_fixup (&siginfo, inf_siginfo, 1);
3130
3131 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3132 return -1;
3133 }
3134
3135 return len;
3136 }
3137
3138 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3139 so we notice when children change state; as the handler for the
3140 sigsuspend in my_waitpid. */
3141
3142 static void
3143 sigchld_handler (int signo)
3144 {
3145 int old_errno = errno;
3146
3147 if (debug_threads)
3148 /* fprintf is not async-signal-safe, so call write directly. */
3149 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3150
3151 if (target_is_async_p ())
3152 async_file_mark (); /* trigger a linux_wait */
3153
3154 errno = old_errno;
3155 }
3156
3157 static int
3158 linux_supports_non_stop (void)
3159 {
3160 return 1;
3161 }
3162
3163 static int
3164 linux_async (int enable)
3165 {
3166 int previous = (linux_event_pipe[0] != -1);
3167
3168 if (previous != enable)
3169 {
3170 sigset_t mask;
3171 sigemptyset (&mask);
3172 sigaddset (&mask, SIGCHLD);
3173
3174 sigprocmask (SIG_BLOCK, &mask, NULL);
3175
3176 if (enable)
3177 {
3178 if (pipe (linux_event_pipe) == -1)
3179 fatal ("creating event pipe failed.");
3180
3181 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
3182 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
3183
3184 /* Register the event loop handler. */
3185 add_file_handler (linux_event_pipe[0],
3186 handle_target_event, NULL);
3187
3188 /* Always trigger a linux_wait. */
3189 async_file_mark ();
3190 }
3191 else
3192 {
3193 delete_file_handler (linux_event_pipe[0]);
3194
3195 close (linux_event_pipe[0]);
3196 close (linux_event_pipe[1]);
3197 linux_event_pipe[0] = -1;
3198 linux_event_pipe[1] = -1;
3199 }
3200
3201 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3202 }
3203
3204 return previous;
3205 }
3206
3207 static int
3208 linux_start_non_stop (int nonstop)
3209 {
3210 /* Register or unregister from event-loop accordingly. */
3211 linux_async (nonstop);
3212 return 0;
3213 }
3214
3215 static int
3216 linux_supports_multi_process (void)
3217 {
3218 return 1;
3219 }
3220
3221
3222 /* Enumerate spufs IDs for process PID. */
3223 static int
3224 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
3225 {
3226 int pos = 0;
3227 int written = 0;
3228 char path[128];
3229 DIR *dir;
3230 struct dirent *entry;
3231
3232 sprintf (path, "/proc/%ld/fd", pid);
3233 dir = opendir (path);
3234 if (!dir)
3235 return -1;
3236
3237 rewinddir (dir);
3238 while ((entry = readdir (dir)) != NULL)
3239 {
3240 struct stat st;
3241 struct statfs stfs;
3242 int fd;
3243
3244 fd = atoi (entry->d_name);
3245 if (!fd)
3246 continue;
3247
3248 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
3249 if (stat (path, &st) != 0)
3250 continue;
3251 if (!S_ISDIR (st.st_mode))
3252 continue;
3253
3254 if (statfs (path, &stfs) != 0)
3255 continue;
3256 if (stfs.f_type != SPUFS_MAGIC)
3257 continue;
3258
3259 if (pos >= offset && pos + 4 <= offset + len)
3260 {
3261 *(unsigned int *)(buf + pos - offset) = fd;
3262 written += 4;
3263 }
3264 pos += 4;
3265 }
3266
3267 closedir (dir);
3268 return written;
3269 }
3270
3271 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
3272 object type, using the /proc file system. */
3273 static int
3274 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
3275 unsigned const char *writebuf,
3276 CORE_ADDR offset, int len)
3277 {
3278 long pid = lwpid_of (get_thread_lwp (current_inferior));
3279 char buf[128];
3280 int fd = 0;
3281 int ret = 0;
3282
3283 if (!writebuf && !readbuf)
3284 return -1;
3285
3286 if (!*annex)
3287 {
3288 if (!readbuf)
3289 return -1;
3290 else
3291 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
3292 }
3293
3294 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
3295 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
3296 if (fd <= 0)
3297 return -1;
3298
3299 if (offset != 0
3300 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3301 {
3302 close (fd);
3303 return 0;
3304 }
3305
3306 if (writebuf)
3307 ret = write (fd, writebuf, (size_t) len);
3308 else
3309 ret = read (fd, readbuf, (size_t) len);
3310
3311 close (fd);
3312 return ret;
3313 }
3314
3315 static int
3316 linux_core_of_thread (ptid_t ptid)
3317 {
3318 char filename[sizeof ("/proc//task//stat")
3319 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
3320 + 1];
3321 FILE *f;
3322 char *content = NULL;
3323 char *p;
3324 char *ts = 0;
3325 int content_read = 0;
3326 int i;
3327 int core;
3328
3329 sprintf (filename, "/proc/%d/task/%ld/stat",
3330 ptid_get_pid (ptid), ptid_get_lwp (ptid));
3331 f = fopen (filename, "r");
3332 if (!f)
3333 return -1;
3334
3335 for (;;)
3336 {
3337 int n;
3338 content = realloc (content, content_read + 1024);
3339 n = fread (content + content_read, 1, 1024, f);
3340 content_read += n;
3341 if (n < 1024)
3342 {
3343 content[content_read] = '\0';
3344 break;
3345 }
3346 }
3347
3348 p = strchr (content, '(');
3349 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
3350
3351 p = strtok_r (p, " ", &ts);
3352 for (i = 0; i != 36; ++i)
3353 p = strtok_r (NULL, " ", &ts);
3354
3355 if (sscanf (p, "%d", &core) == 0)
3356 core = -1;
3357
3358 free (content);
3359 fclose (f);
3360
3361 return core;
3362 }
3363
3364 static struct target_ops linux_target_ops = {
3365 linux_create_inferior,
3366 linux_attach,
3367 linux_kill,
3368 linux_detach,
3369 linux_join,
3370 linux_thread_alive,
3371 linux_resume,
3372 linux_wait,
3373 linux_fetch_registers,
3374 linux_store_registers,
3375 linux_read_memory,
3376 linux_write_memory,
3377 linux_look_up_symbols,
3378 linux_request_interrupt,
3379 linux_read_auxv,
3380 linux_insert_point,
3381 linux_remove_point,
3382 linux_stopped_by_watchpoint,
3383 linux_stopped_data_address,
3384 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3385 linux_read_offsets,
3386 #else
3387 NULL,
3388 #endif
3389 #ifdef USE_THREAD_DB
3390 thread_db_get_tls_address,
3391 #else
3392 NULL,
3393 #endif
3394 linux_qxfer_spu,
3395 hostio_last_error_from_errno,
3396 linux_qxfer_osdata,
3397 linux_xfer_siginfo,
3398 linux_supports_non_stop,
3399 linux_async,
3400 linux_start_non_stop,
3401 linux_supports_multi_process,
3402 #ifdef USE_THREAD_DB
3403 thread_db_handle_monitor_command,
3404 #else
3405 NULL,
3406 #endif
3407 linux_core_of_thread
3408 };
3409
3410 static void
3411 linux_init_signals ()
3412 {
3413 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
3414 to find what the cancel signal actually is. */
3415 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
3416 signal (__SIGRTMIN+1, SIG_IGN);
3417 #endif
3418 }
3419
3420 void
3421 initialize_low (void)
3422 {
3423 struct sigaction sigchld_action;
3424 memset (&sigchld_action, 0, sizeof (sigchld_action));
3425 set_target_ops (&linux_target_ops);
3426 set_breakpoint_data (the_low_target.breakpoint,
3427 the_low_target.breakpoint_len);
3428 linux_init_signals ();
3429 linux_test_for_tracefork ();
3430 #ifdef HAVE_LINUX_REGSETS
3431 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
3432 ;
3433 disabled_regsets = xmalloc (num_regsets);
3434 #endif
3435
3436 sigchld_action.sa_handler = sigchld_handler;
3437 sigemptyset (&sigchld_action.sa_mask);
3438 sigchld_action.sa_flags = SA_RESTART;
3439 sigaction (SIGCHLD, &sigchld_action, NULL);
3440 }