* linux-low.c (W_STOPCODE): Provide definition if missing.
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #ifndef ELFMAG0
43 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
44 then ELFMAG0 will have been defined. If it didn't get included by
45 gdb_proc_service.h then including it will likely introduce a duplicate
46 definition of elf_fpregset_t. */
47 #include <elf.h>
48 #endif
49
50 #ifndef SPUFS_MAGIC
51 #define SPUFS_MAGIC 0x23c9b64e
52 #endif
53
54 #ifndef PTRACE_GETSIGINFO
55 # define PTRACE_GETSIGINFO 0x4202
56 # define PTRACE_SETSIGINFO 0x4203
57 #endif
58
59 #ifndef O_LARGEFILE
60 #define O_LARGEFILE 0
61 #endif
62
63 /* If the system headers did not provide the constants, hard-code the normal
64 values. */
65 #ifndef PTRACE_EVENT_FORK
66
67 #define PTRACE_SETOPTIONS 0x4200
68 #define PTRACE_GETEVENTMSG 0x4201
69
70 /* options set using PTRACE_SETOPTIONS */
71 #define PTRACE_O_TRACESYSGOOD 0x00000001
72 #define PTRACE_O_TRACEFORK 0x00000002
73 #define PTRACE_O_TRACEVFORK 0x00000004
74 #define PTRACE_O_TRACECLONE 0x00000008
75 #define PTRACE_O_TRACEEXEC 0x00000010
76 #define PTRACE_O_TRACEVFORKDONE 0x00000020
77 #define PTRACE_O_TRACEEXIT 0x00000040
78
79 /* Wait extended result codes for the above trace options. */
80 #define PTRACE_EVENT_FORK 1
81 #define PTRACE_EVENT_VFORK 2
82 #define PTRACE_EVENT_CLONE 3
83 #define PTRACE_EVENT_EXEC 4
84 #define PTRACE_EVENT_VFORK_DONE 5
85 #define PTRACE_EVENT_EXIT 6
86
87 #endif /* PTRACE_EVENT_FORK */
88
89 /* We can't always assume that this flag is available, but all systems
90 with the ptrace event handlers also have __WALL, so it's safe to use
91 in some contexts. */
92 #ifndef __WALL
93 #define __WALL 0x40000000 /* Wait for any child. */
94 #endif
95
96 #ifndef W_STOPCODE
97 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
98 #endif
99
100 #ifdef __UCLIBC__
101 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
102 #define HAS_NOMMU
103 #endif
104 #endif
105
106 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
107 representation of the thread ID.
108
109 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
110 the same as the LWP ID.
111
112 ``all_processes'' is keyed by the "overall process ID", which
113 GNU/Linux calls tgid, "thread group ID". */
114
115 struct inferior_list all_lwps;
116
117 /* A list of all unknown processes which receive stop signals. Some other
118 process will presumably claim each of these as forked children
119 momentarily. */
120
121 struct inferior_list stopped_pids;
122
123 /* FIXME this is a bit of a hack, and could be removed. */
124 int stopping_threads;
125
126 /* FIXME make into a target method? */
127 int using_threads = 1;
128
129 /* This flag is true iff we've just created or attached to our first
130 inferior but it has not stopped yet. As soon as it does, we need
131 to call the low target's arch_setup callback. Doing this only on
132 the first inferior avoids reinializing the architecture on every
133 inferior, and avoids messing with the register caches of the
134 already running inferiors. NOTE: this assumes all inferiors under
135 control of gdbserver have the same architecture. */
136 static int new_inferior;
137
138 static void linux_resume_one_lwp (struct lwp_info *lwp,
139 int step, int signal, siginfo_t *info);
140 static void linux_resume (struct thread_resume *resume_info, size_t n);
141 static void stop_all_lwps (void);
142 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
143 static int check_removed_breakpoint (struct lwp_info *event_child);
144 static void *add_lwp (ptid_t ptid);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147 static int linux_core_of_thread (ptid_t ptid);
148
149 struct pending_signals
150 {
151 int signal;
152 siginfo_t info;
153 struct pending_signals *prev;
154 };
155
156 #define PTRACE_ARG3_TYPE long
157 #define PTRACE_XFER_TYPE long
158
159 #ifdef HAVE_LINUX_REGSETS
160 static char *disabled_regsets;
161 static int num_regsets;
162 #endif
163
164 /* The read/write ends of the pipe registered as waitable file in the
165 event loop. */
166 static int linux_event_pipe[2] = { -1, -1 };
167
168 /* True if we're currently in async mode. */
169 #define target_is_async_p() (linux_event_pipe[0] != -1)
170
171 static void send_sigstop (struct inferior_list_entry *entry);
172 static void wait_for_sigstop (struct inferior_list_entry *entry);
173
174 /* Accepts an integer PID; Returns a string representing a file that
175 can be opened to get info for the child process.
176 Space for the result is malloc'd, caller must free. */
177
178 char *
179 linux_child_pid_to_exec_file (int pid)
180 {
181 char *name1, *name2;
182
183 name1 = xmalloc (MAXPATHLEN);
184 name2 = xmalloc (MAXPATHLEN);
185 memset (name2, 0, MAXPATHLEN);
186
187 sprintf (name1, "/proc/%d/exe", pid);
188 if (readlink (name1, name2, MAXPATHLEN) > 0)
189 {
190 free (name1);
191 return name2;
192 }
193 else
194 {
195 free (name2);
196 return name1;
197 }
198 }
199
200 /* Return non-zero if HEADER is a 64-bit ELF file. */
201
202 static int
203 elf_64_header_p (const Elf64_Ehdr *header)
204 {
205 return (header->e_ident[EI_MAG0] == ELFMAG0
206 && header->e_ident[EI_MAG1] == ELFMAG1
207 && header->e_ident[EI_MAG2] == ELFMAG2
208 && header->e_ident[EI_MAG3] == ELFMAG3
209 && header->e_ident[EI_CLASS] == ELFCLASS64);
210 }
211
212 /* Return non-zero if FILE is a 64-bit ELF file,
213 zero if the file is not a 64-bit ELF file,
214 and -1 if the file is not accessible or doesn't exist. */
215
216 int
217 elf_64_file_p (const char *file)
218 {
219 Elf64_Ehdr header;
220 int fd;
221
222 fd = open (file, O_RDONLY);
223 if (fd < 0)
224 return -1;
225
226 if (read (fd, &header, sizeof (header)) != sizeof (header))
227 {
228 close (fd);
229 return 0;
230 }
231 close (fd);
232
233 return elf_64_header_p (&header);
234 }
235
236 static void
237 delete_lwp (struct lwp_info *lwp)
238 {
239 remove_thread (get_lwp_thread (lwp));
240 remove_inferior (&all_lwps, &lwp->head);
241 free (lwp->arch_private);
242 free (lwp);
243 }
244
245 /* Add a process to the common process list, and set its private
246 data. */
247
248 static struct process_info *
249 linux_add_process (int pid, int attached)
250 {
251 struct process_info *proc;
252
253 /* Is this the first process? If so, then set the arch. */
254 if (all_processes.head == NULL)
255 new_inferior = 1;
256
257 proc = add_process (pid, attached);
258 proc->private = xcalloc (1, sizeof (*proc->private));
259
260 if (the_low_target.new_process != NULL)
261 proc->private->arch_private = the_low_target.new_process ();
262
263 return proc;
264 }
265
266 /* Remove a process from the common process list,
267 also freeing all private data. */
268
269 static void
270 linux_remove_process (struct process_info *process)
271 {
272 struct process_info_private *priv = process->private;
273
274 free (priv->arch_private);
275 free (priv);
276 remove_process (process);
277 }
278
279 /* Wrapper function for waitpid which handles EINTR, and emulates
280 __WALL for systems where that is not available. */
281
282 static int
283 my_waitpid (int pid, int *status, int flags)
284 {
285 int ret, out_errno;
286
287 if (debug_threads)
288 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
289
290 if (flags & __WALL)
291 {
292 sigset_t block_mask, org_mask, wake_mask;
293 int wnohang;
294
295 wnohang = (flags & WNOHANG) != 0;
296 flags &= ~(__WALL | __WCLONE);
297 flags |= WNOHANG;
298
299 /* Block all signals while here. This avoids knowing about
300 LinuxThread's signals. */
301 sigfillset (&block_mask);
302 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
303
304 /* ... except during the sigsuspend below. */
305 sigemptyset (&wake_mask);
306
307 while (1)
308 {
309 /* Since all signals are blocked, there's no need to check
310 for EINTR here. */
311 ret = waitpid (pid, status, flags);
312 out_errno = errno;
313
314 if (ret == -1 && out_errno != ECHILD)
315 break;
316 else if (ret > 0)
317 break;
318
319 if (flags & __WCLONE)
320 {
321 /* We've tried both flavors now. If WNOHANG is set,
322 there's nothing else to do, just bail out. */
323 if (wnohang)
324 break;
325
326 if (debug_threads)
327 fprintf (stderr, "blocking\n");
328
329 /* Block waiting for signals. */
330 sigsuspend (&wake_mask);
331 }
332
333 flags ^= __WCLONE;
334 }
335
336 sigprocmask (SIG_SETMASK, &org_mask, NULL);
337 }
338 else
339 {
340 do
341 ret = waitpid (pid, status, flags);
342 while (ret == -1 && errno == EINTR);
343 out_errno = errno;
344 }
345
346 if (debug_threads)
347 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
348 pid, flags, status ? *status : -1, ret);
349
350 errno = out_errno;
351 return ret;
352 }
353
354 /* Handle a GNU/Linux extended wait response. If we see a clone
355 event, we need to add the new LWP to our list (and not report the
356 trap to higher layers). */
357
358 static void
359 handle_extended_wait (struct lwp_info *event_child, int wstat)
360 {
361 int event = wstat >> 16;
362 struct lwp_info *new_lwp;
363
364 if (event == PTRACE_EVENT_CLONE)
365 {
366 ptid_t ptid;
367 unsigned long new_pid;
368 int ret, status = W_STOPCODE (SIGSTOP);
369
370 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
371
372 /* If we haven't already seen the new PID stop, wait for it now. */
373 if (! pull_pid_from_list (&stopped_pids, new_pid))
374 {
375 /* The new child has a pending SIGSTOP. We can't affect it until it
376 hits the SIGSTOP, but we're already attached. */
377
378 ret = my_waitpid (new_pid, &status, __WALL);
379
380 if (ret == -1)
381 perror_with_name ("waiting for new child");
382 else if (ret != new_pid)
383 warning ("wait returned unexpected PID %d", ret);
384 else if (!WIFSTOPPED (status))
385 warning ("wait returned unexpected status 0x%x", status);
386 }
387
388 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
389
390 ptid = ptid_build (pid_of (event_child), new_pid, 0);
391 new_lwp = (struct lwp_info *) add_lwp (ptid);
392 add_thread (ptid, new_lwp);
393
394 /* Either we're going to immediately resume the new thread
395 or leave it stopped. linux_resume_one_lwp is a nop if it
396 thinks the thread is currently running, so set this first
397 before calling linux_resume_one_lwp. */
398 new_lwp->stopped = 1;
399
400 /* Normally we will get the pending SIGSTOP. But in some cases
401 we might get another signal delivered to the group first.
402 If we do get another signal, be sure not to lose it. */
403 if (WSTOPSIG (status) == SIGSTOP)
404 {
405 if (! stopping_threads)
406 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
407 }
408 else
409 {
410 new_lwp->stop_expected = 1;
411 if (stopping_threads)
412 {
413 new_lwp->status_pending_p = 1;
414 new_lwp->status_pending = status;
415 }
416 else
417 /* Pass the signal on. This is what GDB does - except
418 shouldn't we really report it instead? */
419 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
420 }
421
422 /* Always resume the current thread. If we are stopping
423 threads, it will have a pending SIGSTOP; we may as well
424 collect it now. */
425 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
426 }
427 }
428
429 /* This function should only be called if the process got a SIGTRAP.
430 The SIGTRAP could mean several things.
431
432 On i386, where decr_pc_after_break is non-zero:
433 If we were single-stepping this process using PTRACE_SINGLESTEP,
434 we will get only the one SIGTRAP (even if the instruction we
435 stepped over was a breakpoint). The value of $eip will be the
436 next instruction.
437 If we continue the process using PTRACE_CONT, we will get a
438 SIGTRAP when we hit a breakpoint. The value of $eip will be
439 the instruction after the breakpoint (i.e. needs to be
440 decremented). If we report the SIGTRAP to GDB, we must also
441 report the undecremented PC. If we cancel the SIGTRAP, we
442 must resume at the decremented PC.
443
444 (Presumably, not yet tested) On a non-decr_pc_after_break machine
445 with hardware or kernel single-step:
446 If we single-step over a breakpoint instruction, our PC will
447 point at the following instruction. If we continue and hit a
448 breakpoint instruction, our PC will point at the breakpoint
449 instruction. */
450
451 static CORE_ADDR
452 get_stop_pc (void)
453 {
454 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
455
456 if (! get_thread_lwp (current_inferior)->stepping)
457 stop_pc -= the_low_target.decr_pc_after_break;
458
459 if (debug_threads)
460 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
461
462 return stop_pc;
463 }
464
465 static void *
466 add_lwp (ptid_t ptid)
467 {
468 struct lwp_info *lwp;
469
470 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
471 memset (lwp, 0, sizeof (*lwp));
472
473 lwp->head.id = ptid;
474
475 if (the_low_target.new_thread != NULL)
476 lwp->arch_private = the_low_target.new_thread ();
477
478 add_inferior_to_list (&all_lwps, &lwp->head);
479
480 return lwp;
481 }
482
483 /* Start an inferior process and returns its pid.
484 ALLARGS is a vector of program-name and args. */
485
486 static int
487 linux_create_inferior (char *program, char **allargs)
488 {
489 struct lwp_info *new_lwp;
490 int pid;
491 ptid_t ptid;
492
493 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
494 pid = vfork ();
495 #else
496 pid = fork ();
497 #endif
498 if (pid < 0)
499 perror_with_name ("fork");
500
501 if (pid == 0)
502 {
503 ptrace (PTRACE_TRACEME, 0, 0, 0);
504
505 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
506 signal (__SIGRTMIN + 1, SIG_DFL);
507 #endif
508
509 setpgid (0, 0);
510
511 execv (program, allargs);
512 if (errno == ENOENT)
513 execvp (program, allargs);
514
515 fprintf (stderr, "Cannot exec %s: %s.\n", program,
516 strerror (errno));
517 fflush (stderr);
518 _exit (0177);
519 }
520
521 linux_add_process (pid, 0);
522
523 ptid = ptid_build (pid, pid, 0);
524 new_lwp = add_lwp (ptid);
525 add_thread (ptid, new_lwp);
526 new_lwp->must_set_ptrace_flags = 1;
527
528 return pid;
529 }
530
531 /* Attach to an inferior process. */
532
533 static void
534 linux_attach_lwp_1 (unsigned long lwpid, int initial)
535 {
536 ptid_t ptid;
537 struct lwp_info *new_lwp;
538
539 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
540 {
541 if (!initial)
542 {
543 /* If we fail to attach to an LWP, just warn. */
544 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
545 strerror (errno), errno);
546 fflush (stderr);
547 return;
548 }
549 else
550 /* If we fail to attach to a process, report an error. */
551 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
552 strerror (errno), errno);
553 }
554
555 if (initial)
556 /* NOTE/FIXME: This lwp might have not been the tgid. */
557 ptid = ptid_build (lwpid, lwpid, 0);
558 else
559 {
560 /* Note that extracting the pid from the current inferior is
561 safe, since we're always called in the context of the same
562 process as this new thread. */
563 int pid = pid_of (get_thread_lwp (current_inferior));
564 ptid = ptid_build (pid, lwpid, 0);
565 }
566
567 new_lwp = (struct lwp_info *) add_lwp (ptid);
568 add_thread (ptid, new_lwp);
569
570 /* We need to wait for SIGSTOP before being able to make the next
571 ptrace call on this LWP. */
572 new_lwp->must_set_ptrace_flags = 1;
573
574 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
575 brings it to a halt.
576
577 There are several cases to consider here:
578
579 1) gdbserver has already attached to the process and is being notified
580 of a new thread that is being created.
581 In this case we should ignore that SIGSTOP and resume the process.
582 This is handled below by setting stop_expected = 1.
583
584 2) This is the first thread (the process thread), and we're attaching
585 to it via attach_inferior.
586 In this case we want the process thread to stop.
587 This is handled by having linux_attach clear stop_expected after
588 we return.
589 ??? If the process already has several threads we leave the other
590 threads running.
591
592 3) GDB is connecting to gdbserver and is requesting an enumeration of all
593 existing threads.
594 In this case we want the thread to stop.
595 FIXME: This case is currently not properly handled.
596 We should wait for the SIGSTOP but don't. Things work apparently
597 because enough time passes between when we ptrace (ATTACH) and when
598 gdb makes the next ptrace call on the thread.
599
600 On the other hand, if we are currently trying to stop all threads, we
601 should treat the new thread as if we had sent it a SIGSTOP. This works
602 because we are guaranteed that the add_lwp call above added us to the
603 end of the list, and so the new thread has not yet reached
604 wait_for_sigstop (but will). */
605 if (! stopping_threads)
606 new_lwp->stop_expected = 1;
607 }
608
609 void
610 linux_attach_lwp (unsigned long lwpid)
611 {
612 linux_attach_lwp_1 (lwpid, 0);
613 }
614
615 int
616 linux_attach (unsigned long pid)
617 {
618 struct lwp_info *lwp;
619
620 linux_attach_lwp_1 (pid, 1);
621
622 linux_add_process (pid, 1);
623
624 if (!non_stop)
625 {
626 /* Don't ignore the initial SIGSTOP if we just attached to this
627 process. It will be collected by wait shortly. */
628 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
629 ptid_build (pid, pid, 0));
630 lwp->stop_expected = 0;
631 }
632
633 return 0;
634 }
635
636 struct counter
637 {
638 int pid;
639 int count;
640 };
641
642 static int
643 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
644 {
645 struct counter *counter = args;
646
647 if (ptid_get_pid (entry->id) == counter->pid)
648 {
649 if (++counter->count > 1)
650 return 1;
651 }
652
653 return 0;
654 }
655
656 static int
657 last_thread_of_process_p (struct thread_info *thread)
658 {
659 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
660 int pid = ptid_get_pid (ptid);
661 struct counter counter = { pid , 0 };
662
663 return (find_inferior (&all_threads,
664 second_thread_of_pid_p, &counter) == NULL);
665 }
666
667 /* Kill the inferior lwp. */
668
669 static int
670 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
671 {
672 struct thread_info *thread = (struct thread_info *) entry;
673 struct lwp_info *lwp = get_thread_lwp (thread);
674 int wstat;
675 int pid = * (int *) args;
676
677 if (ptid_get_pid (entry->id) != pid)
678 return 0;
679
680 /* We avoid killing the first thread here, because of a Linux kernel (at
681 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
682 the children get a chance to be reaped, it will remain a zombie
683 forever. */
684
685 if (lwpid_of (lwp) == pid)
686 {
687 if (debug_threads)
688 fprintf (stderr, "lkop: is last of process %s\n",
689 target_pid_to_str (entry->id));
690 return 0;
691 }
692
693 /* If we're killing a running inferior, make sure it is stopped
694 first, as PTRACE_KILL will not work otherwise. */
695 if (!lwp->stopped)
696 send_sigstop (&lwp->head);
697
698 do
699 {
700 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
701
702 /* Make sure it died. The loop is most likely unnecessary. */
703 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
704 } while (pid > 0 && WIFSTOPPED (wstat));
705
706 return 0;
707 }
708
709 static int
710 linux_kill (int pid)
711 {
712 struct process_info *process;
713 struct lwp_info *lwp;
714 struct thread_info *thread;
715 int wstat;
716 int lwpid;
717
718 process = find_process_pid (pid);
719 if (process == NULL)
720 return -1;
721
722 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
723
724 /* See the comment in linux_kill_one_lwp. We did not kill the first
725 thread in the list, so do so now. */
726 lwp = find_lwp_pid (pid_to_ptid (pid));
727 thread = get_lwp_thread (lwp);
728
729 if (debug_threads)
730 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
731 lwpid_of (lwp), pid);
732
733 /* If we're killing a running inferior, make sure it is stopped
734 first, as PTRACE_KILL will not work otherwise. */
735 if (!lwp->stopped)
736 send_sigstop (&lwp->head);
737
738 do
739 {
740 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
741
742 /* Make sure it died. The loop is most likely unnecessary. */
743 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
744 } while (lwpid > 0 && WIFSTOPPED (wstat));
745
746 #ifdef USE_THREAD_DB
747 thread_db_free (process, 0);
748 #endif
749 delete_lwp (lwp);
750 linux_remove_process (process);
751 return 0;
752 }
753
754 static int
755 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
756 {
757 struct thread_info *thread = (struct thread_info *) entry;
758 struct lwp_info *lwp = get_thread_lwp (thread);
759 int pid = * (int *) args;
760
761 if (ptid_get_pid (entry->id) != pid)
762 return 0;
763
764 /* If we're detaching from a running inferior, make sure it is
765 stopped first, as PTRACE_DETACH will not work otherwise. */
766 if (!lwp->stopped)
767 {
768 int lwpid = lwpid_of (lwp);
769
770 stopping_threads = 1;
771 send_sigstop (&lwp->head);
772
773 /* If this detects a new thread through a clone event, the new
774 thread is appended to the end of the lwp list, so we'll
775 eventually detach from it. */
776 wait_for_sigstop (&lwp->head);
777 stopping_threads = 0;
778
779 /* If LWP exits while we're trying to stop it, there's nothing
780 left to do. */
781 lwp = find_lwp_pid (pid_to_ptid (lwpid));
782 if (lwp == NULL)
783 return 0;
784 }
785
786 /* Make sure the process isn't stopped at a breakpoint that's
787 no longer there. */
788 check_removed_breakpoint (lwp);
789
790 /* If this process is stopped but is expecting a SIGSTOP, then make
791 sure we take care of that now. This isn't absolutely guaranteed
792 to collect the SIGSTOP, but is fairly likely to. */
793 if (lwp->stop_expected)
794 {
795 int wstat;
796 /* Clear stop_expected, so that the SIGSTOP will be reported. */
797 lwp->stop_expected = 0;
798 if (lwp->stopped)
799 linux_resume_one_lwp (lwp, 0, 0, NULL);
800 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
801 }
802
803 /* Flush any pending changes to the process's registers. */
804 regcache_invalidate_one ((struct inferior_list_entry *)
805 get_lwp_thread (lwp));
806
807 /* Finally, let it resume. */
808 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
809
810 delete_lwp (lwp);
811 return 0;
812 }
813
814 static int
815 any_thread_of (struct inferior_list_entry *entry, void *args)
816 {
817 int *pid_p = args;
818
819 if (ptid_get_pid (entry->id) == *pid_p)
820 return 1;
821
822 return 0;
823 }
824
825 static int
826 linux_detach (int pid)
827 {
828 struct process_info *process;
829
830 process = find_process_pid (pid);
831 if (process == NULL)
832 return -1;
833
834 #ifdef USE_THREAD_DB
835 thread_db_free (process, 1);
836 #endif
837
838 current_inferior =
839 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
840
841 delete_all_breakpoints ();
842 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
843 linux_remove_process (process);
844 return 0;
845 }
846
847 static void
848 linux_join (int pid)
849 {
850 int status, ret;
851 struct process_info *process;
852
853 process = find_process_pid (pid);
854 if (process == NULL)
855 return;
856
857 do {
858 ret = my_waitpid (pid, &status, 0);
859 if (WIFEXITED (status) || WIFSIGNALED (status))
860 break;
861 } while (ret != -1 || errno != ECHILD);
862 }
863
864 /* Return nonzero if the given thread is still alive. */
865 static int
866 linux_thread_alive (ptid_t ptid)
867 {
868 struct lwp_info *lwp = find_lwp_pid (ptid);
869
870 /* We assume we always know if a thread exits. If a whole process
871 exited but we still haven't been able to report it to GDB, we'll
872 hold on to the last lwp of the dead process. */
873 if (lwp != NULL)
874 return !lwp->dead;
875 else
876 return 0;
877 }
878
879 /* Return nonzero if this process stopped at a breakpoint which
880 no longer appears to be inserted. Also adjust the PC
881 appropriately to resume where the breakpoint used to be. */
882 static int
883 check_removed_breakpoint (struct lwp_info *event_child)
884 {
885 CORE_ADDR stop_pc;
886 struct thread_info *saved_inferior;
887
888 if (event_child->pending_is_breakpoint == 0)
889 return 0;
890
891 if (debug_threads)
892 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
893 lwpid_of (event_child));
894
895 saved_inferior = current_inferior;
896 current_inferior = get_lwp_thread (event_child);
897
898 stop_pc = get_stop_pc ();
899
900 /* If the PC has changed since we stopped, then we shouldn't do
901 anything. This happens if, for instance, GDB handled the
902 decr_pc_after_break subtraction itself. */
903 if (stop_pc != event_child->pending_stop_pc)
904 {
905 if (debug_threads)
906 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
907 event_child->pending_stop_pc);
908
909 event_child->pending_is_breakpoint = 0;
910 current_inferior = saved_inferior;
911 return 0;
912 }
913
914 /* If the breakpoint is still there, we will report hitting it. */
915 if ((*the_low_target.breakpoint_at) (stop_pc))
916 {
917 if (debug_threads)
918 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
919 current_inferior = saved_inferior;
920 return 0;
921 }
922
923 if (debug_threads)
924 fprintf (stderr, "Removed breakpoint.\n");
925
926 /* For decr_pc_after_break targets, here is where we perform the
927 decrement. We go immediately from this function to resuming,
928 and can not safely call get_stop_pc () again. */
929 if (the_low_target.set_pc != NULL)
930 {
931 if (debug_threads)
932 fprintf (stderr, "Set pc to 0x%lx\n", (long) stop_pc);
933 (*the_low_target.set_pc) (stop_pc);
934 }
935
936 /* We consumed the pending SIGTRAP. */
937 event_child->pending_is_breakpoint = 0;
938 event_child->status_pending_p = 0;
939 event_child->status_pending = 0;
940
941 current_inferior = saved_inferior;
942 return 1;
943 }
944
945 /* Return 1 if this lwp has an interesting status pending. This
946 function may silently resume an inferior lwp. */
947 static int
948 status_pending_p (struct inferior_list_entry *entry, void *arg)
949 {
950 struct lwp_info *lwp = (struct lwp_info *) entry;
951 ptid_t ptid = * (ptid_t *) arg;
952
953 /* Check if we're only interested in events from a specific process
954 or its lwps. */
955 if (!ptid_equal (minus_one_ptid, ptid)
956 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
957 return 0;
958
959 if (lwp->status_pending_p && !lwp->suspended)
960 if (check_removed_breakpoint (lwp))
961 {
962 /* This thread was stopped at a breakpoint, and the breakpoint
963 is now gone. We were told to continue (or step...) all threads,
964 so GDB isn't trying to single-step past this breakpoint.
965 So instead of reporting the old SIGTRAP, pretend we got to
966 the breakpoint just after it was removed instead of just
967 before; resume the process. */
968 linux_resume_one_lwp (lwp, 0, 0, NULL);
969 return 0;
970 }
971
972 return (lwp->status_pending_p && !lwp->suspended);
973 }
974
975 static int
976 same_lwp (struct inferior_list_entry *entry, void *data)
977 {
978 ptid_t ptid = *(ptid_t *) data;
979 int lwp;
980
981 if (ptid_get_lwp (ptid) != 0)
982 lwp = ptid_get_lwp (ptid);
983 else
984 lwp = ptid_get_pid (ptid);
985
986 if (ptid_get_lwp (entry->id) == lwp)
987 return 1;
988
989 return 0;
990 }
991
992 struct lwp_info *
993 find_lwp_pid (ptid_t ptid)
994 {
995 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
996 }
997
998 static struct lwp_info *
999 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1000 {
1001 int ret;
1002 int to_wait_for = -1;
1003 struct lwp_info *child = NULL;
1004
1005 if (debug_threads)
1006 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1007
1008 if (ptid_equal (ptid, minus_one_ptid))
1009 to_wait_for = -1; /* any child */
1010 else
1011 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1012
1013 options |= __WALL;
1014
1015 retry:
1016
1017 ret = my_waitpid (to_wait_for, wstatp, options);
1018 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1019 return NULL;
1020 else if (ret == -1)
1021 perror_with_name ("waitpid");
1022
1023 if (debug_threads
1024 && (!WIFSTOPPED (*wstatp)
1025 || (WSTOPSIG (*wstatp) != 32
1026 && WSTOPSIG (*wstatp) != 33)))
1027 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1028
1029 child = find_lwp_pid (pid_to_ptid (ret));
1030
1031 /* If we didn't find a process, one of two things presumably happened:
1032 - A process we started and then detached from has exited. Ignore it.
1033 - A process we are controlling has forked and the new child's stop
1034 was reported to us by the kernel. Save its PID. */
1035 if (child == NULL && WIFSTOPPED (*wstatp))
1036 {
1037 add_pid_to_list (&stopped_pids, ret);
1038 goto retry;
1039 }
1040 else if (child == NULL)
1041 goto retry;
1042
1043 child->stopped = 1;
1044 child->pending_is_breakpoint = 0;
1045
1046 child->last_status = *wstatp;
1047
1048 /* Architecture-specific setup after inferior is running.
1049 This needs to happen after we have attached to the inferior
1050 and it is stopped for the first time, but before we access
1051 any inferior registers. */
1052 if (new_inferior)
1053 {
1054 the_low_target.arch_setup ();
1055 #ifdef HAVE_LINUX_REGSETS
1056 memset (disabled_regsets, 0, num_regsets);
1057 #endif
1058 new_inferior = 0;
1059 }
1060
1061 if (debug_threads
1062 && WIFSTOPPED (*wstatp)
1063 && the_low_target.get_pc != NULL)
1064 {
1065 struct thread_info *saved_inferior = current_inferior;
1066 CORE_ADDR pc;
1067
1068 current_inferior = (struct thread_info *)
1069 find_inferior_id (&all_threads, child->head.id);
1070 pc = (*the_low_target.get_pc) ();
1071 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1072 current_inferior = saved_inferior;
1073 }
1074
1075 return child;
1076 }
1077
1078 /* Wait for an event from child PID. If PID is -1, wait for any
1079 child. Store the stop status through the status pointer WSTAT.
1080 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1081 event was found and OPTIONS contains WNOHANG. Return the PID of
1082 the stopped child otherwise. */
1083
1084 static int
1085 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1086 {
1087 CORE_ADDR stop_pc;
1088 struct lwp_info *event_child = NULL;
1089 int bp_status;
1090 struct lwp_info *requested_child = NULL;
1091
1092 /* Check for a lwp with a pending status. */
1093 /* It is possible that the user changed the pending task's registers since
1094 it stopped. We correctly handle the change of PC if we hit a breakpoint
1095 (in check_removed_breakpoint); signals should be reported anyway. */
1096
1097 if (ptid_equal (ptid, minus_one_ptid)
1098 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1099 {
1100 event_child = (struct lwp_info *)
1101 find_inferior (&all_lwps, status_pending_p, &ptid);
1102 if (debug_threads && event_child)
1103 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1104 }
1105 else
1106 {
1107 requested_child = find_lwp_pid (ptid);
1108 if (requested_child->status_pending_p
1109 && !check_removed_breakpoint (requested_child))
1110 event_child = requested_child;
1111 }
1112
1113 if (event_child != NULL)
1114 {
1115 if (debug_threads)
1116 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1117 lwpid_of (event_child), event_child->status_pending);
1118 *wstat = event_child->status_pending;
1119 event_child->status_pending_p = 0;
1120 event_child->status_pending = 0;
1121 current_inferior = get_lwp_thread (event_child);
1122 return lwpid_of (event_child);
1123 }
1124
1125 /* We only enter this loop if no process has a pending wait status. Thus
1126 any action taken in response to a wait status inside this loop is
1127 responding as soon as we detect the status, not after any pending
1128 events. */
1129 while (1)
1130 {
1131 event_child = linux_wait_for_lwp (ptid, wstat, options);
1132
1133 if ((options & WNOHANG) && event_child == NULL)
1134 return 0;
1135
1136 if (event_child == NULL)
1137 error ("event from unknown child");
1138
1139 current_inferior = get_lwp_thread (event_child);
1140
1141 /* Check for thread exit. */
1142 if (! WIFSTOPPED (*wstat))
1143 {
1144 if (debug_threads)
1145 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1146
1147 /* If the last thread is exiting, just return. */
1148 if (last_thread_of_process_p (current_inferior))
1149 {
1150 if (debug_threads)
1151 fprintf (stderr, "LWP %ld is last lwp of process\n",
1152 lwpid_of (event_child));
1153 return lwpid_of (event_child);
1154 }
1155
1156 delete_lwp (event_child);
1157
1158 if (!non_stop)
1159 {
1160 current_inferior = (struct thread_info *) all_threads.head;
1161 if (debug_threads)
1162 fprintf (stderr, "Current inferior is now %ld\n",
1163 lwpid_of (get_thread_lwp (current_inferior)));
1164 }
1165 else
1166 {
1167 current_inferior = NULL;
1168 if (debug_threads)
1169 fprintf (stderr, "Current inferior is now <NULL>\n");
1170 }
1171
1172 /* If we were waiting for this particular child to do something...
1173 well, it did something. */
1174 if (requested_child != NULL)
1175 return lwpid_of (event_child);
1176
1177 /* Wait for a more interesting event. */
1178 continue;
1179 }
1180
1181 if (event_child->must_set_ptrace_flags)
1182 {
1183 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1184 0, PTRACE_O_TRACECLONE);
1185 event_child->must_set_ptrace_flags = 0;
1186 }
1187
1188 if (WIFSTOPPED (*wstat)
1189 && WSTOPSIG (*wstat) == SIGSTOP
1190 && event_child->stop_expected)
1191 {
1192 if (debug_threads)
1193 fprintf (stderr, "Expected stop.\n");
1194 event_child->stop_expected = 0;
1195 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
1196 continue;
1197 }
1198
1199 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1200 && *wstat >> 16 != 0)
1201 {
1202 handle_extended_wait (event_child, *wstat);
1203 continue;
1204 }
1205
1206 /* If GDB is not interested in this signal, don't stop other
1207 threads, and don't report it to GDB. Just resume the
1208 inferior right away. We do this for threading-related
1209 signals as well as any that GDB specifically requested we
1210 ignore. But never ignore SIGSTOP if we sent it ourselves,
1211 and do not ignore signals when stepping - they may require
1212 special handling to skip the signal handler. */
1213 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1214 thread library? */
1215 if (WIFSTOPPED (*wstat)
1216 && !event_child->stepping
1217 && (
1218 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1219 (current_process ()->private->thread_db != NULL
1220 && (WSTOPSIG (*wstat) == __SIGRTMIN
1221 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1222 ||
1223 #endif
1224 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1225 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1226 {
1227 siginfo_t info, *info_p;
1228
1229 if (debug_threads)
1230 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1231 WSTOPSIG (*wstat), lwpid_of (event_child));
1232
1233 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1234 info_p = &info;
1235 else
1236 info_p = NULL;
1237 linux_resume_one_lwp (event_child,
1238 event_child->stepping,
1239 WSTOPSIG (*wstat), info_p);
1240 continue;
1241 }
1242
1243 /* If this event was not handled above, and is not a SIGTRAP, report
1244 it. */
1245 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
1246 return lwpid_of (event_child);
1247
1248 /* If this target does not support breakpoints, we simply report the
1249 SIGTRAP; it's of no concern to us. */
1250 if (the_low_target.get_pc == NULL)
1251 return lwpid_of (event_child);
1252
1253 stop_pc = get_stop_pc ();
1254
1255 /* bp_reinsert will only be set if we were single-stepping.
1256 Notice that we will resume the process after hitting
1257 a gdbserver breakpoint; single-stepping to/over one
1258 is not supported (yet). */
1259 if (event_child->bp_reinsert != 0)
1260 {
1261 if (debug_threads)
1262 fprintf (stderr, "Reinserted breakpoint.\n");
1263 reinsert_breakpoint (event_child->bp_reinsert);
1264 event_child->bp_reinsert = 0;
1265
1266 /* Clear the single-stepping flag and SIGTRAP as we resume. */
1267 linux_resume_one_lwp (event_child, 0, 0, NULL);
1268 continue;
1269 }
1270
1271 bp_status = check_breakpoints (stop_pc);
1272
1273 if (bp_status != 0)
1274 {
1275 if (debug_threads)
1276 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1277
1278 /* We hit one of our own breakpoints. We mark it as a pending
1279 breakpoint, so that check_removed_breakpoint () will do the PC
1280 adjustment for us at the appropriate time. */
1281 event_child->pending_is_breakpoint = 1;
1282 event_child->pending_stop_pc = stop_pc;
1283
1284 /* We may need to put the breakpoint back. We continue in the event
1285 loop instead of simply replacing the breakpoint right away,
1286 in order to not lose signals sent to the thread that hit the
1287 breakpoint. Unfortunately this increases the window where another
1288 thread could sneak past the removed breakpoint. For the current
1289 use of server-side breakpoints (thread creation) this is
1290 acceptable; but it needs to be considered before this breakpoint
1291 mechanism can be used in more general ways. For some breakpoints
1292 it may be necessary to stop all other threads, but that should
1293 be avoided where possible.
1294
1295 If breakpoint_reinsert_addr is NULL, that means that we can
1296 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
1297 mark it for reinsertion, and single-step.
1298
1299 Otherwise, call the target function to figure out where we need
1300 our temporary breakpoint, create it, and continue executing this
1301 process. */
1302
1303 /* NOTE: we're lifting breakpoints in non-stop mode. This
1304 is currently only used for thread event breakpoints, so
1305 it isn't that bad as long as we have PTRACE_EVENT_CLONE
1306 events. */
1307 if (bp_status == 2)
1308 /* No need to reinsert. */
1309 linux_resume_one_lwp (event_child, 0, 0, NULL);
1310 else if (the_low_target.breakpoint_reinsert_addr == NULL)
1311 {
1312 event_child->bp_reinsert = stop_pc;
1313 uninsert_breakpoint (stop_pc);
1314 linux_resume_one_lwp (event_child, 1, 0, NULL);
1315 }
1316 else
1317 {
1318 reinsert_breakpoint_by_bp
1319 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1320 linux_resume_one_lwp (event_child, 0, 0, NULL);
1321 }
1322
1323 continue;
1324 }
1325
1326 if (debug_threads)
1327 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1328
1329 /* If we were single-stepping, we definitely want to report the
1330 SIGTRAP. Although the single-step operation has completed,
1331 do not clear clear the stepping flag yet; we need to check it
1332 in wait_for_sigstop. */
1333 if (event_child->stepping)
1334 return lwpid_of (event_child);
1335
1336 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
1337 Check if it is a breakpoint, and if so mark the process information
1338 accordingly. This will handle both the necessary fiddling with the
1339 PC on decr_pc_after_break targets and suppressing extra threads
1340 hitting a breakpoint if two hit it at once and then GDB removes it
1341 after the first is reported. Arguably it would be better to report
1342 multiple threads hitting breakpoints simultaneously, but the current
1343 remote protocol does not allow this. */
1344 if ((*the_low_target.breakpoint_at) (stop_pc))
1345 {
1346 event_child->pending_is_breakpoint = 1;
1347 event_child->pending_stop_pc = stop_pc;
1348 }
1349
1350 return lwpid_of (event_child);
1351 }
1352
1353 /* NOTREACHED */
1354 return 0;
1355 }
1356
1357 static int
1358 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1359 {
1360 ptid_t wait_ptid;
1361
1362 if (ptid_is_pid (ptid))
1363 {
1364 /* A request to wait for a specific tgid. This is not possible
1365 with waitpid, so instead, we wait for any child, and leave
1366 children we're not interested in right now with a pending
1367 status to report later. */
1368 wait_ptid = minus_one_ptid;
1369 }
1370 else
1371 wait_ptid = ptid;
1372
1373 while (1)
1374 {
1375 int event_pid;
1376
1377 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1378
1379 if (event_pid > 0
1380 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1381 {
1382 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1383
1384 if (! WIFSTOPPED (*wstat))
1385 mark_lwp_dead (event_child, *wstat);
1386 else
1387 {
1388 event_child->status_pending_p = 1;
1389 event_child->status_pending = *wstat;
1390 }
1391 }
1392 else
1393 return event_pid;
1394 }
1395 }
1396
1397 /* Wait for process, returns status. */
1398
1399 static ptid_t
1400 linux_wait_1 (ptid_t ptid,
1401 struct target_waitstatus *ourstatus, int target_options)
1402 {
1403 int w;
1404 struct thread_info *thread = NULL;
1405 struct lwp_info *lwp = NULL;
1406 int options;
1407 int pid;
1408
1409 /* Translate generic target options into linux options. */
1410 options = __WALL;
1411 if (target_options & TARGET_WNOHANG)
1412 options |= WNOHANG;
1413
1414 retry:
1415 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1416
1417 /* If we were only supposed to resume one thread, only wait for
1418 that thread - if it's still alive. If it died, however - which
1419 can happen if we're coming from the thread death case below -
1420 then we need to make sure we restart the other threads. We could
1421 pick a thread at random or restart all; restarting all is less
1422 arbitrary. */
1423 if (!non_stop
1424 && !ptid_equal (cont_thread, null_ptid)
1425 && !ptid_equal (cont_thread, minus_one_ptid))
1426 {
1427 thread = (struct thread_info *) find_inferior_id (&all_threads,
1428 cont_thread);
1429
1430 /* No stepping, no signal - unless one is pending already, of course. */
1431 if (thread == NULL)
1432 {
1433 struct thread_resume resume_info;
1434 resume_info.thread = minus_one_ptid;
1435 resume_info.kind = resume_continue;
1436 resume_info.sig = 0;
1437 linux_resume (&resume_info, 1);
1438 }
1439 else
1440 ptid = cont_thread;
1441 }
1442
1443 pid = linux_wait_for_event (ptid, &w, options);
1444 if (pid == 0) /* only if TARGET_WNOHANG */
1445 return null_ptid;
1446
1447 lwp = get_thread_lwp (current_inferior);
1448
1449 /* If we are waiting for a particular child, and it exited,
1450 linux_wait_for_event will return its exit status. Similarly if
1451 the last child exited. If this is not the last child, however,
1452 do not report it as exited until there is a 'thread exited' response
1453 available in the remote protocol. Instead, just wait for another event.
1454 This should be safe, because if the thread crashed we will already
1455 have reported the termination signal to GDB; that should stop any
1456 in-progress stepping operations, etc.
1457
1458 Report the exit status of the last thread to exit. This matches
1459 LinuxThreads' behavior. */
1460
1461 if (last_thread_of_process_p (current_inferior))
1462 {
1463 if (WIFEXITED (w) || WIFSIGNALED (w))
1464 {
1465 int pid = pid_of (lwp);
1466 struct process_info *process = find_process_pid (pid);
1467
1468 #ifdef USE_THREAD_DB
1469 thread_db_free (process, 0);
1470 #endif
1471 delete_lwp (lwp);
1472 linux_remove_process (process);
1473
1474 current_inferior = NULL;
1475
1476 if (WIFEXITED (w))
1477 {
1478 ourstatus->kind = TARGET_WAITKIND_EXITED;
1479 ourstatus->value.integer = WEXITSTATUS (w);
1480
1481 if (debug_threads)
1482 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1483 }
1484 else
1485 {
1486 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1487 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1488
1489 if (debug_threads)
1490 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1491
1492 }
1493
1494 return pid_to_ptid (pid);
1495 }
1496 }
1497 else
1498 {
1499 if (!WIFSTOPPED (w))
1500 goto retry;
1501 }
1502
1503 /* In all-stop, stop all threads. Be careful to only do this if
1504 we're about to report an event to GDB. */
1505 if (!non_stop)
1506 stop_all_lwps ();
1507
1508 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1509
1510 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1511 {
1512 /* A thread that has been requested to stop by GDB with vCont;t,
1513 and it stopped cleanly, so report as SIG0. The use of
1514 SIGSTOP is an implementation detail. */
1515 ourstatus->value.sig = TARGET_SIGNAL_0;
1516 }
1517 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1518 {
1519 /* A thread that has been requested to stop by GDB with vCont;t,
1520 but, it stopped for other reasons. Set stop_expected so the
1521 pending SIGSTOP is ignored and the LWP is resumed. */
1522 lwp->stop_expected = 1;
1523 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1524 }
1525 else
1526 {
1527 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1528 }
1529
1530 if (debug_threads)
1531 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1532 target_pid_to_str (lwp->head.id),
1533 ourstatus->kind,
1534 ourstatus->value.sig);
1535
1536 return lwp->head.id;
1537 }
1538
1539 /* Get rid of any pending event in the pipe. */
1540 static void
1541 async_file_flush (void)
1542 {
1543 int ret;
1544 char buf;
1545
1546 do
1547 ret = read (linux_event_pipe[0], &buf, 1);
1548 while (ret >= 0 || (ret == -1 && errno == EINTR));
1549 }
1550
1551 /* Put something in the pipe, so the event loop wakes up. */
1552 static void
1553 async_file_mark (void)
1554 {
1555 int ret;
1556
1557 async_file_flush ();
1558
1559 do
1560 ret = write (linux_event_pipe[1], "+", 1);
1561 while (ret == 0 || (ret == -1 && errno == EINTR));
1562
1563 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1564 be awakened anyway. */
1565 }
1566
1567 static ptid_t
1568 linux_wait (ptid_t ptid,
1569 struct target_waitstatus *ourstatus, int target_options)
1570 {
1571 ptid_t event_ptid;
1572
1573 if (debug_threads)
1574 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1575
1576 /* Flush the async file first. */
1577 if (target_is_async_p ())
1578 async_file_flush ();
1579
1580 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1581
1582 /* If at least one stop was reported, there may be more. A single
1583 SIGCHLD can signal more than one child stop. */
1584 if (target_is_async_p ()
1585 && (target_options & TARGET_WNOHANG) != 0
1586 && !ptid_equal (event_ptid, null_ptid))
1587 async_file_mark ();
1588
1589 return event_ptid;
1590 }
1591
1592 /* Send a signal to an LWP. */
1593
1594 static int
1595 kill_lwp (unsigned long lwpid, int signo)
1596 {
1597 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1598 fails, then we are not using nptl threads and we should be using kill. */
1599
1600 #ifdef __NR_tkill
1601 {
1602 static int tkill_failed;
1603
1604 if (!tkill_failed)
1605 {
1606 int ret;
1607
1608 errno = 0;
1609 ret = syscall (__NR_tkill, lwpid, signo);
1610 if (errno != ENOSYS)
1611 return ret;
1612 tkill_failed = 1;
1613 }
1614 }
1615 #endif
1616
1617 return kill (lwpid, signo);
1618 }
1619
1620 static void
1621 send_sigstop (struct inferior_list_entry *entry)
1622 {
1623 struct lwp_info *lwp = (struct lwp_info *) entry;
1624 int pid;
1625
1626 if (lwp->stopped)
1627 return;
1628
1629 pid = lwpid_of (lwp);
1630
1631 /* If we already have a pending stop signal for this process, don't
1632 send another. */
1633 if (lwp->stop_expected)
1634 {
1635 if (debug_threads)
1636 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1637
1638 /* We clear the stop_expected flag so that wait_for_sigstop
1639 will receive the SIGSTOP event (instead of silently resuming and
1640 waiting again). It'll be reset below. */
1641 lwp->stop_expected = 0;
1642 return;
1643 }
1644
1645 if (debug_threads)
1646 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1647
1648 kill_lwp (pid, SIGSTOP);
1649 }
1650
1651 static void
1652 mark_lwp_dead (struct lwp_info *lwp, int wstat)
1653 {
1654 /* It's dead, really. */
1655 lwp->dead = 1;
1656
1657 /* Store the exit status for later. */
1658 lwp->status_pending_p = 1;
1659 lwp->status_pending = wstat;
1660
1661 /* So that check_removed_breakpoint doesn't try to figure out if
1662 this is stopped at a breakpoint. */
1663 lwp->pending_is_breakpoint = 0;
1664
1665 /* Prevent trying to stop it. */
1666 lwp->stopped = 1;
1667
1668 /* No further stops are expected from a dead lwp. */
1669 lwp->stop_expected = 0;
1670 }
1671
1672 static void
1673 wait_for_sigstop (struct inferior_list_entry *entry)
1674 {
1675 struct lwp_info *lwp = (struct lwp_info *) entry;
1676 struct thread_info *saved_inferior;
1677 int wstat;
1678 ptid_t saved_tid;
1679 ptid_t ptid;
1680
1681 if (lwp->stopped)
1682 return;
1683
1684 saved_inferior = current_inferior;
1685 if (saved_inferior != NULL)
1686 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1687 else
1688 saved_tid = null_ptid; /* avoid bogus unused warning */
1689
1690 ptid = lwp->head.id;
1691
1692 linux_wait_for_event (ptid, &wstat, __WALL);
1693
1694 /* If we stopped with a non-SIGSTOP signal, save it for later
1695 and record the pending SIGSTOP. If the process exited, just
1696 return. */
1697 if (WIFSTOPPED (wstat)
1698 && WSTOPSIG (wstat) != SIGSTOP)
1699 {
1700 if (debug_threads)
1701 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1702 lwpid_of (lwp), wstat);
1703
1704 /* Do not leave a pending single-step finish to be reported to
1705 the client. The client will give us a new action for this
1706 thread, possibly a continue request --- otherwise, the client
1707 would consider this pending SIGTRAP reported later a spurious
1708 signal. */
1709 if (WSTOPSIG (wstat) == SIGTRAP
1710 && lwp->stepping
1711 && !linux_stopped_by_watchpoint ())
1712 {
1713 if (debug_threads)
1714 fprintf (stderr, " single-step SIGTRAP ignored\n");
1715 }
1716 else
1717 {
1718 lwp->status_pending_p = 1;
1719 lwp->status_pending = wstat;
1720 }
1721 lwp->stop_expected = 1;
1722 }
1723 else if (!WIFSTOPPED (wstat))
1724 {
1725 if (debug_threads)
1726 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1727 lwpid_of (lwp));
1728
1729 /* Leave this status pending for the next time we're able to
1730 report it. In the mean time, we'll report this lwp as dead
1731 to GDB, so GDB doesn't try to read registers and memory from
1732 it. */
1733 mark_lwp_dead (lwp, wstat);
1734 }
1735
1736 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1737 current_inferior = saved_inferior;
1738 else
1739 {
1740 if (debug_threads)
1741 fprintf (stderr, "Previously current thread died.\n");
1742
1743 if (non_stop)
1744 {
1745 /* We can't change the current inferior behind GDB's back,
1746 otherwise, a subsequent command may apply to the wrong
1747 process. */
1748 current_inferior = NULL;
1749 }
1750 else
1751 {
1752 /* Set a valid thread as current. */
1753 set_desired_inferior (0);
1754 }
1755 }
1756 }
1757
1758 static void
1759 stop_all_lwps (void)
1760 {
1761 stopping_threads = 1;
1762 for_each_inferior (&all_lwps, send_sigstop);
1763 for_each_inferior (&all_lwps, wait_for_sigstop);
1764 stopping_threads = 0;
1765 }
1766
1767 /* Resume execution of the inferior process.
1768 If STEP is nonzero, single-step it.
1769 If SIGNAL is nonzero, give it that signal. */
1770
1771 static void
1772 linux_resume_one_lwp (struct lwp_info *lwp,
1773 int step, int signal, siginfo_t *info)
1774 {
1775 struct thread_info *saved_inferior;
1776
1777 if (lwp->stopped == 0)
1778 return;
1779
1780 /* If we have pending signals or status, and a new signal, enqueue the
1781 signal. Also enqueue the signal if we are waiting to reinsert a
1782 breakpoint; it will be picked up again below. */
1783 if (signal != 0
1784 && (lwp->status_pending_p || lwp->pending_signals != NULL
1785 || lwp->bp_reinsert != 0))
1786 {
1787 struct pending_signals *p_sig;
1788 p_sig = xmalloc (sizeof (*p_sig));
1789 p_sig->prev = lwp->pending_signals;
1790 p_sig->signal = signal;
1791 if (info == NULL)
1792 memset (&p_sig->info, 0, sizeof (siginfo_t));
1793 else
1794 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1795 lwp->pending_signals = p_sig;
1796 }
1797
1798 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1799 return;
1800
1801 saved_inferior = current_inferior;
1802 current_inferior = get_lwp_thread (lwp);
1803
1804 if (debug_threads)
1805 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1806 lwpid_of (lwp), step ? "step" : "continue", signal,
1807 lwp->stop_expected ? "expected" : "not expected");
1808
1809 /* This bit needs some thinking about. If we get a signal that
1810 we must report while a single-step reinsert is still pending,
1811 we often end up resuming the thread. It might be better to
1812 (ew) allow a stack of pending events; then we could be sure that
1813 the reinsert happened right away and not lose any signals.
1814
1815 Making this stack would also shrink the window in which breakpoints are
1816 uninserted (see comment in linux_wait_for_lwp) but not enough for
1817 complete correctness, so it won't solve that problem. It may be
1818 worthwhile just to solve this one, however. */
1819 if (lwp->bp_reinsert != 0)
1820 {
1821 if (debug_threads)
1822 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1823 if (step == 0)
1824 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1825 step = 1;
1826
1827 /* Postpone any pending signal. It was enqueued above. */
1828 signal = 0;
1829 }
1830
1831 check_removed_breakpoint (lwp);
1832
1833 if (debug_threads && the_low_target.get_pc != NULL)
1834 {
1835 CORE_ADDR pc = (*the_low_target.get_pc) ();
1836 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
1837 }
1838
1839 /* If we have pending signals, consume one unless we are trying to reinsert
1840 a breakpoint. */
1841 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1842 {
1843 struct pending_signals **p_sig;
1844
1845 p_sig = &lwp->pending_signals;
1846 while ((*p_sig)->prev != NULL)
1847 p_sig = &(*p_sig)->prev;
1848
1849 signal = (*p_sig)->signal;
1850 if ((*p_sig)->info.si_signo != 0)
1851 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1852
1853 free (*p_sig);
1854 *p_sig = NULL;
1855 }
1856
1857 if (the_low_target.prepare_to_resume != NULL)
1858 the_low_target.prepare_to_resume (lwp);
1859
1860 regcache_invalidate_one ((struct inferior_list_entry *)
1861 get_lwp_thread (lwp));
1862 errno = 0;
1863 lwp->stopped = 0;
1864 lwp->stepping = step;
1865 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal);
1866
1867 current_inferior = saved_inferior;
1868 if (errno)
1869 {
1870 /* ESRCH from ptrace either means that the thread was already
1871 running (an error) or that it is gone (a race condition). If
1872 it's gone, we will get a notification the next time we wait,
1873 so we can ignore the error. We could differentiate these
1874 two, but it's tricky without waiting; the thread still exists
1875 as a zombie, so sending it signal 0 would succeed. So just
1876 ignore ESRCH. */
1877 if (errno == ESRCH)
1878 return;
1879
1880 perror_with_name ("ptrace");
1881 }
1882 }
1883
1884 struct thread_resume_array
1885 {
1886 struct thread_resume *resume;
1887 size_t n;
1888 };
1889
1890 /* This function is called once per thread. We look up the thread
1891 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1892 resume request.
1893
1894 This algorithm is O(threads * resume elements), but resume elements
1895 is small (and will remain small at least until GDB supports thread
1896 suspension). */
1897 static int
1898 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1899 {
1900 struct lwp_info *lwp;
1901 struct thread_info *thread;
1902 int ndx;
1903 struct thread_resume_array *r;
1904
1905 thread = (struct thread_info *) entry;
1906 lwp = get_thread_lwp (thread);
1907 r = arg;
1908
1909 for (ndx = 0; ndx < r->n; ndx++)
1910 {
1911 ptid_t ptid = r->resume[ndx].thread;
1912 if (ptid_equal (ptid, minus_one_ptid)
1913 || ptid_equal (ptid, entry->id)
1914 || (ptid_is_pid (ptid)
1915 && (ptid_get_pid (ptid) == pid_of (lwp)))
1916 || (ptid_get_lwp (ptid) == -1
1917 && (ptid_get_pid (ptid) == pid_of (lwp))))
1918 {
1919 lwp->resume = &r->resume[ndx];
1920 return 0;
1921 }
1922 }
1923
1924 /* No resume action for this thread. */
1925 lwp->resume = NULL;
1926
1927 return 0;
1928 }
1929
1930
1931 /* Set *FLAG_P if this lwp has an interesting status pending. */
1932 static int
1933 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1934 {
1935 struct lwp_info *lwp = (struct lwp_info *) entry;
1936
1937 /* LWPs which will not be resumed are not interesting, because
1938 we might not wait for them next time through linux_wait. */
1939 if (lwp->resume == NULL)
1940 return 0;
1941
1942 /* If this thread has a removed breakpoint, we won't have any
1943 events to report later, so check now. check_removed_breakpoint
1944 may clear status_pending_p. We avoid calling check_removed_breakpoint
1945 for any thread that we are not otherwise going to resume - this
1946 lets us preserve stopped status when two threads hit a breakpoint.
1947 GDB removes the breakpoint to single-step a particular thread
1948 past it, then re-inserts it and resumes all threads. We want
1949 to report the second thread without resuming it in the interim. */
1950 if (lwp->status_pending_p)
1951 check_removed_breakpoint (lwp);
1952
1953 if (lwp->status_pending_p)
1954 * (int *) flag_p = 1;
1955
1956 return 0;
1957 }
1958
1959 /* This function is called once per thread. We check the thread's resume
1960 request, which will tell us whether to resume, step, or leave the thread
1961 stopped; and what signal, if any, it should be sent.
1962
1963 For threads which we aren't explicitly told otherwise, we preserve
1964 the stepping flag; this is used for stepping over gdbserver-placed
1965 breakpoints.
1966
1967 If pending_flags was set in any thread, we queue any needed
1968 signals, since we won't actually resume. We already have a pending
1969 event to report, so we don't need to preserve any step requests;
1970 they should be re-issued if necessary. */
1971
1972 static int
1973 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1974 {
1975 struct lwp_info *lwp;
1976 struct thread_info *thread;
1977 int step;
1978 int pending_flag = * (int *) arg;
1979
1980 thread = (struct thread_info *) entry;
1981 lwp = get_thread_lwp (thread);
1982
1983 if (lwp->resume == NULL)
1984 return 0;
1985
1986 if (lwp->resume->kind == resume_stop)
1987 {
1988 if (debug_threads)
1989 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1990
1991 if (!lwp->stopped)
1992 {
1993 if (debug_threads)
1994 fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
1995
1996 lwp->suspended = 1;
1997 send_sigstop (&lwp->head);
1998 }
1999 else
2000 {
2001 if (debug_threads)
2002 {
2003 if (lwp->suspended)
2004 fprintf (stderr, "already stopped/suspended LWP %ld\n",
2005 lwpid_of (lwp));
2006 else
2007 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
2008 lwpid_of (lwp));
2009 }
2010
2011 /* Make sure we leave the LWP suspended, so we don't try to
2012 resume it without GDB telling us to. FIXME: The LWP may
2013 have been stopped in an internal event that was not meant
2014 to be notified back to GDB (e.g., gdbserver breakpoint),
2015 so we should be reporting a stop event in that case
2016 too. */
2017 lwp->suspended = 1;
2018 }
2019
2020 /* For stop requests, we're done. */
2021 lwp->resume = NULL;
2022 return 0;
2023 }
2024 else
2025 lwp->suspended = 0;
2026
2027 /* If this thread which is about to be resumed has a pending status,
2028 then don't resume any threads - we can just report the pending
2029 status. Make sure to queue any signals that would otherwise be
2030 sent. In all-stop mode, we do this decision based on if *any*
2031 thread has a pending status. */
2032 if (non_stop)
2033 resume_status_pending_p (&lwp->head, &pending_flag);
2034
2035 if (!pending_flag)
2036 {
2037 if (debug_threads)
2038 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2039
2040 if (ptid_equal (lwp->resume->thread, minus_one_ptid)
2041 && lwp->stepping
2042 && lwp->pending_is_breakpoint)
2043 step = 1;
2044 else
2045 step = (lwp->resume->kind == resume_step);
2046
2047 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2048 }
2049 else
2050 {
2051 if (debug_threads)
2052 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2053
2054 /* If we have a new signal, enqueue the signal. */
2055 if (lwp->resume->sig != 0)
2056 {
2057 struct pending_signals *p_sig;
2058 p_sig = xmalloc (sizeof (*p_sig));
2059 p_sig->prev = lwp->pending_signals;
2060 p_sig->signal = lwp->resume->sig;
2061 memset (&p_sig->info, 0, sizeof (siginfo_t));
2062
2063 /* If this is the same signal we were previously stopped by,
2064 make sure to queue its siginfo. We can ignore the return
2065 value of ptrace; if it fails, we'll skip
2066 PTRACE_SETSIGINFO. */
2067 if (WIFSTOPPED (lwp->last_status)
2068 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2069 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2070
2071 lwp->pending_signals = p_sig;
2072 }
2073 }
2074
2075 lwp->resume = NULL;
2076 return 0;
2077 }
2078
2079 static void
2080 linux_resume (struct thread_resume *resume_info, size_t n)
2081 {
2082 int pending_flag;
2083 struct thread_resume_array array = { resume_info, n };
2084
2085 find_inferior (&all_threads, linux_set_resume_request, &array);
2086
2087 /* If there is a thread which would otherwise be resumed, which
2088 has a pending status, then don't resume any threads - we can just
2089 report the pending status. Make sure to queue any signals
2090 that would otherwise be sent. In non-stop mode, we'll apply this
2091 logic to each thread individually. */
2092 pending_flag = 0;
2093 if (!non_stop)
2094 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
2095
2096 if (debug_threads)
2097 {
2098 if (pending_flag)
2099 fprintf (stderr, "Not resuming, pending status\n");
2100 else
2101 fprintf (stderr, "Resuming, no pending status\n");
2102 }
2103
2104 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
2105 }
2106
2107 #ifdef HAVE_LINUX_USRREGS
2108
2109 int
2110 register_addr (int regnum)
2111 {
2112 int addr;
2113
2114 if (regnum < 0 || regnum >= the_low_target.num_regs)
2115 error ("Invalid register number %d.", regnum);
2116
2117 addr = the_low_target.regmap[regnum];
2118
2119 return addr;
2120 }
2121
2122 /* Fetch one register. */
2123 static void
2124 fetch_register (int regno)
2125 {
2126 CORE_ADDR regaddr;
2127 int i, size;
2128 char *buf;
2129 int pid;
2130
2131 if (regno >= the_low_target.num_regs)
2132 return;
2133 if ((*the_low_target.cannot_fetch_register) (regno))
2134 return;
2135
2136 regaddr = register_addr (regno);
2137 if (regaddr == -1)
2138 return;
2139
2140 pid = lwpid_of (get_thread_lwp (current_inferior));
2141 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2142 & - sizeof (PTRACE_XFER_TYPE));
2143 buf = alloca (size);
2144 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2145 {
2146 errno = 0;
2147 *(PTRACE_XFER_TYPE *) (buf + i) =
2148 ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) regaddr, 0);
2149 regaddr += sizeof (PTRACE_XFER_TYPE);
2150 if (errno != 0)
2151 {
2152 /* Warning, not error, in case we are attached; sometimes the
2153 kernel doesn't let us at the registers. */
2154 char *err = strerror (errno);
2155 char *msg = alloca (strlen (err) + 128);
2156 sprintf (msg, "reading register %d: %s", regno, err);
2157 error (msg);
2158 goto error_exit;
2159 }
2160 }
2161
2162 if (the_low_target.supply_ptrace_register)
2163 the_low_target.supply_ptrace_register (regno, buf);
2164 else
2165 supply_register (regno, buf);
2166
2167 error_exit:;
2168 }
2169
2170 /* Fetch all registers, or just one, from the child process. */
2171 static void
2172 usr_fetch_inferior_registers (int regno)
2173 {
2174 if (regno == -1)
2175 for (regno = 0; regno < the_low_target.num_regs; regno++)
2176 fetch_register (regno);
2177 else
2178 fetch_register (regno);
2179 }
2180
2181 /* Store our register values back into the inferior.
2182 If REGNO is -1, do this for all registers.
2183 Otherwise, REGNO specifies which register (so we can save time). */
2184 static void
2185 usr_store_inferior_registers (int regno)
2186 {
2187 CORE_ADDR regaddr;
2188 int i, size;
2189 char *buf;
2190 int pid;
2191
2192 if (regno >= 0)
2193 {
2194 if (regno >= the_low_target.num_regs)
2195 return;
2196
2197 if ((*the_low_target.cannot_store_register) (regno) == 1)
2198 return;
2199
2200 regaddr = register_addr (regno);
2201 if (regaddr == -1)
2202 return;
2203 errno = 0;
2204 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2205 & - sizeof (PTRACE_XFER_TYPE);
2206 buf = alloca (size);
2207 memset (buf, 0, size);
2208
2209 if (the_low_target.collect_ptrace_register)
2210 the_low_target.collect_ptrace_register (regno, buf);
2211 else
2212 collect_register (regno, buf);
2213
2214 pid = lwpid_of (get_thread_lwp (current_inferior));
2215 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2216 {
2217 errno = 0;
2218 ptrace (PTRACE_POKEUSER, pid, (PTRACE_ARG3_TYPE) regaddr,
2219 *(PTRACE_XFER_TYPE *) (buf + i));
2220 if (errno != 0)
2221 {
2222 /* At this point, ESRCH should mean the process is
2223 already gone, in which case we simply ignore attempts
2224 to change its registers. See also the related
2225 comment in linux_resume_one_lwp. */
2226 if (errno == ESRCH)
2227 return;
2228
2229 if ((*the_low_target.cannot_store_register) (regno) == 0)
2230 {
2231 char *err = strerror (errno);
2232 char *msg = alloca (strlen (err) + 128);
2233 sprintf (msg, "writing register %d: %s",
2234 regno, err);
2235 error (msg);
2236 return;
2237 }
2238 }
2239 regaddr += sizeof (PTRACE_XFER_TYPE);
2240 }
2241 }
2242 else
2243 for (regno = 0; regno < the_low_target.num_regs; regno++)
2244 usr_store_inferior_registers (regno);
2245 }
2246 #endif /* HAVE_LINUX_USRREGS */
2247
2248
2249
2250 #ifdef HAVE_LINUX_REGSETS
2251
2252 static int
2253 regsets_fetch_inferior_registers ()
2254 {
2255 struct regset_info *regset;
2256 int saw_general_regs = 0;
2257 int pid;
2258
2259 regset = target_regsets;
2260
2261 pid = lwpid_of (get_thread_lwp (current_inferior));
2262 while (regset->size >= 0)
2263 {
2264 void *buf;
2265 int res;
2266
2267 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2268 {
2269 regset ++;
2270 continue;
2271 }
2272
2273 buf = xmalloc (regset->size);
2274 #ifndef __sparc__
2275 res = ptrace (regset->get_request, pid, 0, buf);
2276 #else
2277 res = ptrace (regset->get_request, pid, buf, 0);
2278 #endif
2279 if (res < 0)
2280 {
2281 if (errno == EIO)
2282 {
2283 /* If we get EIO on a regset, do not try it again for
2284 this process. */
2285 disabled_regsets[regset - target_regsets] = 1;
2286 free (buf);
2287 continue;
2288 }
2289 else
2290 {
2291 char s[256];
2292 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2293 pid);
2294 perror (s);
2295 }
2296 }
2297 else if (regset->type == GENERAL_REGS)
2298 saw_general_regs = 1;
2299 regset->store_function (buf);
2300 regset ++;
2301 free (buf);
2302 }
2303 if (saw_general_regs)
2304 return 0;
2305 else
2306 return 1;
2307 }
2308
2309 static int
2310 regsets_store_inferior_registers ()
2311 {
2312 struct regset_info *regset;
2313 int saw_general_regs = 0;
2314 int pid;
2315
2316 regset = target_regsets;
2317
2318 pid = lwpid_of (get_thread_lwp (current_inferior));
2319 while (regset->size >= 0)
2320 {
2321 void *buf;
2322 int res;
2323
2324 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2325 {
2326 regset ++;
2327 continue;
2328 }
2329
2330 buf = xmalloc (regset->size);
2331
2332 /* First fill the buffer with the current register set contents,
2333 in case there are any items in the kernel's regset that are
2334 not in gdbserver's regcache. */
2335 #ifndef __sparc__
2336 res = ptrace (regset->get_request, pid, 0, buf);
2337 #else
2338 res = ptrace (regset->get_request, pid, buf, 0);
2339 #endif
2340
2341 if (res == 0)
2342 {
2343 /* Then overlay our cached registers on that. */
2344 regset->fill_function (buf);
2345
2346 /* Only now do we write the register set. */
2347 #ifndef __sparc__
2348 res = ptrace (regset->set_request, pid, 0, buf);
2349 #else
2350 res = ptrace (regset->set_request, pid, buf, 0);
2351 #endif
2352 }
2353
2354 if (res < 0)
2355 {
2356 if (errno == EIO)
2357 {
2358 /* If we get EIO on a regset, do not try it again for
2359 this process. */
2360 disabled_regsets[regset - target_regsets] = 1;
2361 free (buf);
2362 continue;
2363 }
2364 else if (errno == ESRCH)
2365 {
2366 /* At this point, ESRCH should mean the process is
2367 already gone, in which case we simply ignore attempts
2368 to change its registers. See also the related
2369 comment in linux_resume_one_lwp. */
2370 free (buf);
2371 return 0;
2372 }
2373 else
2374 {
2375 perror ("Warning: ptrace(regsets_store_inferior_registers)");
2376 }
2377 }
2378 else if (regset->type == GENERAL_REGS)
2379 saw_general_regs = 1;
2380 regset ++;
2381 free (buf);
2382 }
2383 if (saw_general_regs)
2384 return 0;
2385 else
2386 return 1;
2387 return 0;
2388 }
2389
2390 #endif /* HAVE_LINUX_REGSETS */
2391
2392
2393 void
2394 linux_fetch_registers (int regno)
2395 {
2396 #ifdef HAVE_LINUX_REGSETS
2397 if (regsets_fetch_inferior_registers () == 0)
2398 return;
2399 #endif
2400 #ifdef HAVE_LINUX_USRREGS
2401 usr_fetch_inferior_registers (regno);
2402 #endif
2403 }
2404
2405 void
2406 linux_store_registers (int regno)
2407 {
2408 #ifdef HAVE_LINUX_REGSETS
2409 if (regsets_store_inferior_registers () == 0)
2410 return;
2411 #endif
2412 #ifdef HAVE_LINUX_USRREGS
2413 usr_store_inferior_registers (regno);
2414 #endif
2415 }
2416
2417
2418 /* Copy LEN bytes from inferior's memory starting at MEMADDR
2419 to debugger memory starting at MYADDR. */
2420
2421 static int
2422 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2423 {
2424 register int i;
2425 /* Round starting address down to longword boundary. */
2426 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2427 /* Round ending address up; get number of longwords that makes. */
2428 register int count
2429 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2430 / sizeof (PTRACE_XFER_TYPE);
2431 /* Allocate buffer of that many longwords. */
2432 register PTRACE_XFER_TYPE *buffer
2433 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2434 int fd;
2435 char filename[64];
2436 int pid = lwpid_of (get_thread_lwp (current_inferior));
2437
2438 /* Try using /proc. Don't bother for one word. */
2439 if (len >= 3 * sizeof (long))
2440 {
2441 /* We could keep this file open and cache it - possibly one per
2442 thread. That requires some juggling, but is even faster. */
2443 sprintf (filename, "/proc/%d/mem", pid);
2444 fd = open (filename, O_RDONLY | O_LARGEFILE);
2445 if (fd == -1)
2446 goto no_proc;
2447
2448 /* If pread64 is available, use it. It's faster if the kernel
2449 supports it (only one syscall), and it's 64-bit safe even on
2450 32-bit platforms (for instance, SPARC debugging a SPARC64
2451 application). */
2452 #ifdef HAVE_PREAD64
2453 if (pread64 (fd, myaddr, len, memaddr) != len)
2454 #else
2455 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
2456 #endif
2457 {
2458 close (fd);
2459 goto no_proc;
2460 }
2461
2462 close (fd);
2463 return 0;
2464 }
2465
2466 no_proc:
2467 /* Read all the longwords */
2468 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2469 {
2470 errno = 0;
2471 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2472 if (errno)
2473 return errno;
2474 }
2475
2476 /* Copy appropriate bytes out of the buffer. */
2477 memcpy (myaddr,
2478 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2479 len);
2480
2481 return 0;
2482 }
2483
2484 /* Copy LEN bytes of data from debugger memory at MYADDR
2485 to inferior's memory at MEMADDR.
2486 On failure (cannot write the inferior)
2487 returns the value of errno. */
2488
2489 static int
2490 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2491 {
2492 register int i;
2493 /* Round starting address down to longword boundary. */
2494 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2495 /* Round ending address up; get number of longwords that makes. */
2496 register int count
2497 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2498 /* Allocate buffer of that many longwords. */
2499 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2500 int pid = lwpid_of (get_thread_lwp (current_inferior));
2501
2502 if (debug_threads)
2503 {
2504 /* Dump up to four bytes. */
2505 unsigned int val = * (unsigned int *) myaddr;
2506 if (len == 1)
2507 val = val & 0xff;
2508 else if (len == 2)
2509 val = val & 0xffff;
2510 else if (len == 3)
2511 val = val & 0xffffff;
2512 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
2513 val, (long)memaddr);
2514 }
2515
2516 /* Fill start and end extra bytes of buffer with existing memory data. */
2517
2518 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2519
2520 if (count > 1)
2521 {
2522 buffer[count - 1]
2523 = ptrace (PTRACE_PEEKTEXT, pid,
2524 (PTRACE_ARG3_TYPE) (addr + (count - 1)
2525 * sizeof (PTRACE_XFER_TYPE)),
2526 0);
2527 }
2528
2529 /* Copy data to be written over corresponding part of buffer */
2530
2531 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2532
2533 /* Write the entire buffer. */
2534
2535 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2536 {
2537 errno = 0;
2538 ptrace (PTRACE_POKETEXT, pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
2539 if (errno)
2540 return errno;
2541 }
2542
2543 return 0;
2544 }
2545
2546 static int linux_supports_tracefork_flag;
2547
2548 /* Helper functions for linux_test_for_tracefork, called via clone (). */
2549
2550 static int
2551 linux_tracefork_grandchild (void *arg)
2552 {
2553 _exit (0);
2554 }
2555
2556 #define STACK_SIZE 4096
2557
2558 static int
2559 linux_tracefork_child (void *arg)
2560 {
2561 ptrace (PTRACE_TRACEME, 0, 0, 0);
2562 kill (getpid (), SIGSTOP);
2563 #ifdef __ia64__
2564 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2565 CLONE_VM | SIGCHLD, NULL);
2566 #else
2567 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2568 CLONE_VM | SIGCHLD, NULL);
2569 #endif
2570 _exit (0);
2571 }
2572
2573 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2574 sure that we can enable the option, and that it had the desired
2575 effect. */
2576
2577 static void
2578 linux_test_for_tracefork (void)
2579 {
2580 int child_pid, ret, status;
2581 long second_pid;
2582 char *stack = xmalloc (STACK_SIZE * 4);
2583
2584 linux_supports_tracefork_flag = 0;
2585
2586 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2587 #ifdef __ia64__
2588 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2589 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2590 #else
2591 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2592 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2593 #endif
2594 if (child_pid == -1)
2595 perror_with_name ("clone");
2596
2597 ret = my_waitpid (child_pid, &status, 0);
2598 if (ret == -1)
2599 perror_with_name ("waitpid");
2600 else if (ret != child_pid)
2601 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2602 if (! WIFSTOPPED (status))
2603 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2604
2605 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
2606 if (ret != 0)
2607 {
2608 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2609 if (ret != 0)
2610 {
2611 warning ("linux_test_for_tracefork: failed to kill child");
2612 return;
2613 }
2614
2615 ret = my_waitpid (child_pid, &status, 0);
2616 if (ret != child_pid)
2617 warning ("linux_test_for_tracefork: failed to wait for killed child");
2618 else if (!WIFSIGNALED (status))
2619 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2620 "killed child", status);
2621
2622 return;
2623 }
2624
2625 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2626 if (ret != 0)
2627 warning ("linux_test_for_tracefork: failed to resume child");
2628
2629 ret = my_waitpid (child_pid, &status, 0);
2630
2631 if (ret == child_pid && WIFSTOPPED (status)
2632 && status >> 16 == PTRACE_EVENT_FORK)
2633 {
2634 second_pid = 0;
2635 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2636 if (ret == 0 && second_pid != 0)
2637 {
2638 int second_status;
2639
2640 linux_supports_tracefork_flag = 1;
2641 my_waitpid (second_pid, &second_status, 0);
2642 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2643 if (ret != 0)
2644 warning ("linux_test_for_tracefork: failed to kill second child");
2645 my_waitpid (second_pid, &status, 0);
2646 }
2647 }
2648 else
2649 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2650 "(%d, status 0x%x)", ret, status);
2651
2652 do
2653 {
2654 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2655 if (ret != 0)
2656 warning ("linux_test_for_tracefork: failed to kill child");
2657 my_waitpid (child_pid, &status, 0);
2658 }
2659 while (WIFSTOPPED (status));
2660
2661 free (stack);
2662 }
2663
2664
2665 static void
2666 linux_look_up_symbols (void)
2667 {
2668 #ifdef USE_THREAD_DB
2669 struct process_info *proc = current_process ();
2670
2671 if (proc->private->thread_db != NULL)
2672 return;
2673
2674 thread_db_init (!linux_supports_tracefork_flag);
2675 #endif
2676 }
2677
2678 static void
2679 linux_request_interrupt (void)
2680 {
2681 extern unsigned long signal_pid;
2682
2683 if (!ptid_equal (cont_thread, null_ptid)
2684 && !ptid_equal (cont_thread, minus_one_ptid))
2685 {
2686 struct lwp_info *lwp;
2687 int lwpid;
2688
2689 lwp = get_thread_lwp (current_inferior);
2690 lwpid = lwpid_of (lwp);
2691 kill_lwp (lwpid, SIGINT);
2692 }
2693 else
2694 kill_lwp (signal_pid, SIGINT);
2695 }
2696
2697 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2698 to debugger memory starting at MYADDR. */
2699
2700 static int
2701 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2702 {
2703 char filename[PATH_MAX];
2704 int fd, n;
2705 int pid = lwpid_of (get_thread_lwp (current_inferior));
2706
2707 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2708
2709 fd = open (filename, O_RDONLY);
2710 if (fd < 0)
2711 return -1;
2712
2713 if (offset != (CORE_ADDR) 0
2714 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2715 n = -1;
2716 else
2717 n = read (fd, myaddr, len);
2718
2719 close (fd);
2720
2721 return n;
2722 }
2723
2724 /* These breakpoint and watchpoint related wrapper functions simply
2725 pass on the function call if the target has registered a
2726 corresponding function. */
2727
2728 static int
2729 linux_insert_point (char type, CORE_ADDR addr, int len)
2730 {
2731 if (the_low_target.insert_point != NULL)
2732 return the_low_target.insert_point (type, addr, len);
2733 else
2734 /* Unsupported (see target.h). */
2735 return 1;
2736 }
2737
2738 static int
2739 linux_remove_point (char type, CORE_ADDR addr, int len)
2740 {
2741 if (the_low_target.remove_point != NULL)
2742 return the_low_target.remove_point (type, addr, len);
2743 else
2744 /* Unsupported (see target.h). */
2745 return 1;
2746 }
2747
2748 static int
2749 linux_stopped_by_watchpoint (void)
2750 {
2751 if (the_low_target.stopped_by_watchpoint != NULL)
2752 return the_low_target.stopped_by_watchpoint ();
2753 else
2754 return 0;
2755 }
2756
2757 static CORE_ADDR
2758 linux_stopped_data_address (void)
2759 {
2760 if (the_low_target.stopped_data_address != NULL)
2761 return the_low_target.stopped_data_address ();
2762 else
2763 return 0;
2764 }
2765
2766 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2767 #if defined(__mcoldfire__)
2768 /* These should really be defined in the kernel's ptrace.h header. */
2769 #define PT_TEXT_ADDR 49*4
2770 #define PT_DATA_ADDR 50*4
2771 #define PT_TEXT_END_ADDR 51*4
2772 #endif
2773
2774 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2775 to tell gdb about. */
2776
2777 static int
2778 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2779 {
2780 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2781 unsigned long text, text_end, data;
2782 int pid = lwpid_of (get_thread_lwp (current_inferior));
2783
2784 errno = 0;
2785
2786 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2787 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2788 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2789
2790 if (errno == 0)
2791 {
2792 /* Both text and data offsets produced at compile-time (and so
2793 used by gdb) are relative to the beginning of the program,
2794 with the data segment immediately following the text segment.
2795 However, the actual runtime layout in memory may put the data
2796 somewhere else, so when we send gdb a data base-address, we
2797 use the real data base address and subtract the compile-time
2798 data base-address from it (which is just the length of the
2799 text segment). BSS immediately follows data in both
2800 cases. */
2801 *text_p = text;
2802 *data_p = data - (text_end - text);
2803
2804 return 1;
2805 }
2806 #endif
2807 return 0;
2808 }
2809 #endif
2810
2811 static int
2812 compare_ints (const void *xa, const void *xb)
2813 {
2814 int a = *(const int *)xa;
2815 int b = *(const int *)xb;
2816
2817 return a - b;
2818 }
2819
2820 static int *
2821 unique (int *b, int *e)
2822 {
2823 int *d = b;
2824 while (++b != e)
2825 if (*d != *b)
2826 *++d = *b;
2827 return ++d;
2828 }
2829
2830 /* Given PID, iterates over all threads in that process.
2831
2832 Information about each thread, in a format suitable for qXfer:osdata:thread
2833 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
2834 initialized, and the caller is responsible for finishing and appending '\0'
2835 to it.
2836
2837 The list of cores that threads are running on is assigned to *CORES, if it
2838 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
2839 should free *CORES. */
2840
2841 static void
2842 list_threads (int pid, struct buffer *buffer, char **cores)
2843 {
2844 int count = 0;
2845 int allocated = 10;
2846 int *core_numbers = xmalloc (sizeof (int) * allocated);
2847 char pathname[128];
2848 DIR *dir;
2849 struct dirent *dp;
2850 struct stat statbuf;
2851
2852 sprintf (pathname, "/proc/%d/task", pid);
2853 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
2854 {
2855 dir = opendir (pathname);
2856 if (!dir)
2857 {
2858 free (core_numbers);
2859 return;
2860 }
2861
2862 while ((dp = readdir (dir)) != NULL)
2863 {
2864 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
2865
2866 if (lwp != 0)
2867 {
2868 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
2869
2870 if (core != -1)
2871 {
2872 char s[sizeof ("4294967295")];
2873 sprintf (s, "%u", core);
2874
2875 if (count == allocated)
2876 {
2877 allocated *= 2;
2878 core_numbers = realloc (core_numbers,
2879 sizeof (int) * allocated);
2880 }
2881 core_numbers[count++] = core;
2882 if (buffer)
2883 buffer_xml_printf (buffer,
2884 "<item>"
2885 "<column name=\"pid\">%d</column>"
2886 "<column name=\"tid\">%s</column>"
2887 "<column name=\"core\">%s</column>"
2888 "</item>", pid, dp->d_name, s);
2889 }
2890 else
2891 {
2892 if (buffer)
2893 buffer_xml_printf (buffer,
2894 "<item>"
2895 "<column name=\"pid\">%d</column>"
2896 "<column name=\"tid\">%s</column>"
2897 "</item>", pid, dp->d_name);
2898 }
2899 }
2900 }
2901 }
2902
2903 if (cores)
2904 {
2905 *cores = NULL;
2906 if (count > 0)
2907 {
2908 struct buffer buffer2;
2909 int *b;
2910 int *e;
2911 qsort (core_numbers, count, sizeof (int), compare_ints);
2912
2913 /* Remove duplicates. */
2914 b = core_numbers;
2915 e = unique (b, core_numbers + count);
2916
2917 buffer_init (&buffer2);
2918
2919 for (b = core_numbers; b != e; ++b)
2920 {
2921 char number[sizeof ("4294967295")];
2922 sprintf (number, "%u", *b);
2923 buffer_xml_printf (&buffer2, "%s%s",
2924 (b == core_numbers) ? "" : ",", number);
2925 }
2926 buffer_grow_str0 (&buffer2, "");
2927
2928 *cores = buffer_finish (&buffer2);
2929 }
2930 }
2931 free (core_numbers);
2932 }
2933
2934 static void
2935 show_process (int pid, const char *username, struct buffer *buffer)
2936 {
2937 char pathname[128];
2938 FILE *f;
2939 char cmd[MAXPATHLEN + 1];
2940
2941 sprintf (pathname, "/proc/%d/cmdline", pid);
2942
2943 if ((f = fopen (pathname, "r")) != NULL)
2944 {
2945 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2946 if (len > 0)
2947 {
2948 char *cores = 0;
2949 int i;
2950 for (i = 0; i < len; i++)
2951 if (cmd[i] == '\0')
2952 cmd[i] = ' ';
2953 cmd[len] = '\0';
2954
2955 buffer_xml_printf (buffer,
2956 "<item>"
2957 "<column name=\"pid\">%d</column>"
2958 "<column name=\"user\">%s</column>"
2959 "<column name=\"command\">%s</column>",
2960 pid,
2961 username,
2962 cmd);
2963
2964 /* This only collects core numbers, and does not print threads. */
2965 list_threads (pid, NULL, &cores);
2966
2967 if (cores)
2968 {
2969 buffer_xml_printf (buffer,
2970 "<column name=\"cores\">%s</column>", cores);
2971 free (cores);
2972 }
2973
2974 buffer_xml_printf (buffer, "</item>");
2975 }
2976 fclose (f);
2977 }
2978 }
2979
2980 static int
2981 linux_qxfer_osdata (const char *annex,
2982 unsigned char *readbuf, unsigned const char *writebuf,
2983 CORE_ADDR offset, int len)
2984 {
2985 /* We make the process list snapshot when the object starts to be
2986 read. */
2987 static const char *buf;
2988 static long len_avail = -1;
2989 static struct buffer buffer;
2990 int processes = 0;
2991 int threads = 0;
2992
2993 DIR *dirp;
2994
2995 if (strcmp (annex, "processes") == 0)
2996 processes = 1;
2997 else if (strcmp (annex, "threads") == 0)
2998 threads = 1;
2999 else
3000 return 0;
3001
3002 if (!readbuf || writebuf)
3003 return 0;
3004
3005 if (offset == 0)
3006 {
3007 if (len_avail != -1 && len_avail != 0)
3008 buffer_free (&buffer);
3009 len_avail = 0;
3010 buf = NULL;
3011 buffer_init (&buffer);
3012 if (processes)
3013 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3014 else if (threads)
3015 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3016
3017 dirp = opendir ("/proc");
3018 if (dirp)
3019 {
3020 struct dirent *dp;
3021 while ((dp = readdir (dirp)) != NULL)
3022 {
3023 struct stat statbuf;
3024 char procentry[sizeof ("/proc/4294967295")];
3025
3026 if (!isdigit (dp->d_name[0])
3027 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3028 continue;
3029
3030 sprintf (procentry, "/proc/%s", dp->d_name);
3031 if (stat (procentry, &statbuf) == 0
3032 && S_ISDIR (statbuf.st_mode))
3033 {
3034 int pid = (int) strtoul (dp->d_name, NULL, 10);
3035
3036 if (processes)
3037 {
3038 struct passwd *entry = getpwuid (statbuf.st_uid);
3039 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3040 }
3041 else if (threads)
3042 {
3043 list_threads (pid, &buffer, NULL);
3044 }
3045 }
3046 }
3047
3048 closedir (dirp);
3049 }
3050 buffer_grow_str0 (&buffer, "</osdata>\n");
3051 buf = buffer_finish (&buffer);
3052 len_avail = strlen (buf);
3053 }
3054
3055 if (offset >= len_avail)
3056 {
3057 /* Done. Get rid of the data. */
3058 buffer_free (&buffer);
3059 buf = NULL;
3060 len_avail = 0;
3061 return 0;
3062 }
3063
3064 if (len > len_avail - offset)
3065 len = len_avail - offset;
3066 memcpy (readbuf, buf + offset, len);
3067
3068 return len;
3069 }
3070
3071 /* Convert a native/host siginfo object, into/from the siginfo in the
3072 layout of the inferiors' architecture. */
3073
3074 static void
3075 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3076 {
3077 int done = 0;
3078
3079 if (the_low_target.siginfo_fixup != NULL)
3080 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3081
3082 /* If there was no callback, or the callback didn't do anything,
3083 then just do a straight memcpy. */
3084 if (!done)
3085 {
3086 if (direction == 1)
3087 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3088 else
3089 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3090 }
3091 }
3092
3093 static int
3094 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3095 unsigned const char *writebuf, CORE_ADDR offset, int len)
3096 {
3097 int pid;
3098 struct siginfo siginfo;
3099 char inf_siginfo[sizeof (struct siginfo)];
3100
3101 if (current_inferior == NULL)
3102 return -1;
3103
3104 pid = lwpid_of (get_thread_lwp (current_inferior));
3105
3106 if (debug_threads)
3107 fprintf (stderr, "%s siginfo for lwp %d.\n",
3108 readbuf != NULL ? "Reading" : "Writing",
3109 pid);
3110
3111 if (offset > sizeof (siginfo))
3112 return -1;
3113
3114 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3115 return -1;
3116
3117 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3118 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3119 inferior with a 64-bit GDBSERVER should look the same as debugging it
3120 with a 32-bit GDBSERVER, we need to convert it. */
3121 siginfo_fixup (&siginfo, inf_siginfo, 0);
3122
3123 if (offset + len > sizeof (siginfo))
3124 len = sizeof (siginfo) - offset;
3125
3126 if (readbuf != NULL)
3127 memcpy (readbuf, inf_siginfo + offset, len);
3128 else
3129 {
3130 memcpy (inf_siginfo + offset, writebuf, len);
3131
3132 /* Convert back to ptrace layout before flushing it out. */
3133 siginfo_fixup (&siginfo, inf_siginfo, 1);
3134
3135 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3136 return -1;
3137 }
3138
3139 return len;
3140 }
3141
3142 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3143 so we notice when children change state; as the handler for the
3144 sigsuspend in my_waitpid. */
3145
3146 static void
3147 sigchld_handler (int signo)
3148 {
3149 int old_errno = errno;
3150
3151 if (debug_threads)
3152 /* fprintf is not async-signal-safe, so call write directly. */
3153 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3154
3155 if (target_is_async_p ())
3156 async_file_mark (); /* trigger a linux_wait */
3157
3158 errno = old_errno;
3159 }
3160
3161 static int
3162 linux_supports_non_stop (void)
3163 {
3164 return 1;
3165 }
3166
3167 static int
3168 linux_async (int enable)
3169 {
3170 int previous = (linux_event_pipe[0] != -1);
3171
3172 if (previous != enable)
3173 {
3174 sigset_t mask;
3175 sigemptyset (&mask);
3176 sigaddset (&mask, SIGCHLD);
3177
3178 sigprocmask (SIG_BLOCK, &mask, NULL);
3179
3180 if (enable)
3181 {
3182 if (pipe (linux_event_pipe) == -1)
3183 fatal ("creating event pipe failed.");
3184
3185 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
3186 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
3187
3188 /* Register the event loop handler. */
3189 add_file_handler (linux_event_pipe[0],
3190 handle_target_event, NULL);
3191
3192 /* Always trigger a linux_wait. */
3193 async_file_mark ();
3194 }
3195 else
3196 {
3197 delete_file_handler (linux_event_pipe[0]);
3198
3199 close (linux_event_pipe[0]);
3200 close (linux_event_pipe[1]);
3201 linux_event_pipe[0] = -1;
3202 linux_event_pipe[1] = -1;
3203 }
3204
3205 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3206 }
3207
3208 return previous;
3209 }
3210
3211 static int
3212 linux_start_non_stop (int nonstop)
3213 {
3214 /* Register or unregister from event-loop accordingly. */
3215 linux_async (nonstop);
3216 return 0;
3217 }
3218
3219 static int
3220 linux_supports_multi_process (void)
3221 {
3222 return 1;
3223 }
3224
3225
3226 /* Enumerate spufs IDs for process PID. */
3227 static int
3228 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
3229 {
3230 int pos = 0;
3231 int written = 0;
3232 char path[128];
3233 DIR *dir;
3234 struct dirent *entry;
3235
3236 sprintf (path, "/proc/%ld/fd", pid);
3237 dir = opendir (path);
3238 if (!dir)
3239 return -1;
3240
3241 rewinddir (dir);
3242 while ((entry = readdir (dir)) != NULL)
3243 {
3244 struct stat st;
3245 struct statfs stfs;
3246 int fd;
3247
3248 fd = atoi (entry->d_name);
3249 if (!fd)
3250 continue;
3251
3252 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
3253 if (stat (path, &st) != 0)
3254 continue;
3255 if (!S_ISDIR (st.st_mode))
3256 continue;
3257
3258 if (statfs (path, &stfs) != 0)
3259 continue;
3260 if (stfs.f_type != SPUFS_MAGIC)
3261 continue;
3262
3263 if (pos >= offset && pos + 4 <= offset + len)
3264 {
3265 *(unsigned int *)(buf + pos - offset) = fd;
3266 written += 4;
3267 }
3268 pos += 4;
3269 }
3270
3271 closedir (dir);
3272 return written;
3273 }
3274
3275 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
3276 object type, using the /proc file system. */
3277 static int
3278 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
3279 unsigned const char *writebuf,
3280 CORE_ADDR offset, int len)
3281 {
3282 long pid = lwpid_of (get_thread_lwp (current_inferior));
3283 char buf[128];
3284 int fd = 0;
3285 int ret = 0;
3286
3287 if (!writebuf && !readbuf)
3288 return -1;
3289
3290 if (!*annex)
3291 {
3292 if (!readbuf)
3293 return -1;
3294 else
3295 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
3296 }
3297
3298 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
3299 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
3300 if (fd <= 0)
3301 return -1;
3302
3303 if (offset != 0
3304 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3305 {
3306 close (fd);
3307 return 0;
3308 }
3309
3310 if (writebuf)
3311 ret = write (fd, writebuf, (size_t) len);
3312 else
3313 ret = read (fd, readbuf, (size_t) len);
3314
3315 close (fd);
3316 return ret;
3317 }
3318
3319 static int
3320 linux_core_of_thread (ptid_t ptid)
3321 {
3322 char filename[sizeof ("/proc//task//stat")
3323 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
3324 + 1];
3325 FILE *f;
3326 char *content = NULL;
3327 char *p;
3328 char *ts = 0;
3329 int content_read = 0;
3330 int i;
3331 int core;
3332
3333 sprintf (filename, "/proc/%d/task/%ld/stat",
3334 ptid_get_pid (ptid), ptid_get_lwp (ptid));
3335 f = fopen (filename, "r");
3336 if (!f)
3337 return -1;
3338
3339 for (;;)
3340 {
3341 int n;
3342 content = realloc (content, content_read + 1024);
3343 n = fread (content + content_read, 1, 1024, f);
3344 content_read += n;
3345 if (n < 1024)
3346 {
3347 content[content_read] = '\0';
3348 break;
3349 }
3350 }
3351
3352 p = strchr (content, '(');
3353 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
3354
3355 p = strtok_r (p, " ", &ts);
3356 for (i = 0; i != 36; ++i)
3357 p = strtok_r (NULL, " ", &ts);
3358
3359 if (sscanf (p, "%d", &core) == 0)
3360 core = -1;
3361
3362 free (content);
3363 fclose (f);
3364
3365 return core;
3366 }
3367
3368 static struct target_ops linux_target_ops = {
3369 linux_create_inferior,
3370 linux_attach,
3371 linux_kill,
3372 linux_detach,
3373 linux_join,
3374 linux_thread_alive,
3375 linux_resume,
3376 linux_wait,
3377 linux_fetch_registers,
3378 linux_store_registers,
3379 linux_read_memory,
3380 linux_write_memory,
3381 linux_look_up_symbols,
3382 linux_request_interrupt,
3383 linux_read_auxv,
3384 linux_insert_point,
3385 linux_remove_point,
3386 linux_stopped_by_watchpoint,
3387 linux_stopped_data_address,
3388 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3389 linux_read_offsets,
3390 #else
3391 NULL,
3392 #endif
3393 #ifdef USE_THREAD_DB
3394 thread_db_get_tls_address,
3395 #else
3396 NULL,
3397 #endif
3398 linux_qxfer_spu,
3399 hostio_last_error_from_errno,
3400 linux_qxfer_osdata,
3401 linux_xfer_siginfo,
3402 linux_supports_non_stop,
3403 linux_async,
3404 linux_start_non_stop,
3405 linux_supports_multi_process,
3406 #ifdef USE_THREAD_DB
3407 thread_db_handle_monitor_command,
3408 #else
3409 NULL,
3410 #endif
3411 linux_core_of_thread
3412 };
3413
3414 static void
3415 linux_init_signals ()
3416 {
3417 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
3418 to find what the cancel signal actually is. */
3419 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
3420 signal (__SIGRTMIN+1, SIG_IGN);
3421 #endif
3422 }
3423
3424 void
3425 initialize_low (void)
3426 {
3427 struct sigaction sigchld_action;
3428 memset (&sigchld_action, 0, sizeof (sigchld_action));
3429 set_target_ops (&linux_target_ops);
3430 set_breakpoint_data (the_low_target.breakpoint,
3431 the_low_target.breakpoint_len);
3432 linux_init_signals ();
3433 linux_test_for_tracefork ();
3434 #ifdef HAVE_LINUX_REGSETS
3435 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
3436 ;
3437 disabled_regsets = xmalloc (num_regsets);
3438 #endif
3439
3440 sigchld_action.sa_handler = sigchld_handler;
3441 sigemptyset (&sigchld_action.sa_mask);
3442 sigchld_action.sa_flags = SA_RESTART;
3443 sigaction (SIGCHLD, &sigchld_action, NULL);
3444 }