* linux-low.c (linux_kill_one_lwp, linux_kill)
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 #ifdef __UCLIBC__
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103 #define HAS_NOMMU
104 #endif
105 #endif
106
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
109
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
115
116 struct inferior_list all_lwps;
117
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122 struct inferior_list stopped_pids;
123
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads;
126
127 /* FIXME make into a target method? */
128 int using_threads = 1;
129
130 /* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
137 static int new_inferior;
138
139 static void linux_resume_one_lwp (struct lwp_info *lwp,
140 int step, int signal, siginfo_t *info);
141 static void linux_resume (struct thread_resume *resume_info, size_t n);
142 static void stop_all_lwps (void);
143 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
144 static void *add_lwp (ptid_t ptid);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147 static int linux_core_of_thread (ptid_t ptid);
148 static void proceed_all_lwps (void);
149 static void unstop_all_lwps (struct lwp_info *except);
150 static int finish_step_over (struct lwp_info *lwp);
151 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152 static int kill_lwp (unsigned long lwpid, int signo);
153
154 /* True if the low target can hardware single-step. Such targets
155 don't need a BREAKPOINT_REINSERT_ADDR callback. */
156
157 static int
158 can_hardware_single_step (void)
159 {
160 return (the_low_target.breakpoint_reinsert_addr == NULL);
161 }
162
163 /* True if the low target supports memory breakpoints. If so, we'll
164 have a GET_PC implementation. */
165
166 static int
167 supports_breakpoints (void)
168 {
169 return (the_low_target.get_pc != NULL);
170 }
171
172 struct pending_signals
173 {
174 int signal;
175 siginfo_t info;
176 struct pending_signals *prev;
177 };
178
179 #define PTRACE_ARG3_TYPE void *
180 #define PTRACE_ARG4_TYPE void *
181 #define PTRACE_XFER_TYPE long
182
183 #ifdef HAVE_LINUX_REGSETS
184 static char *disabled_regsets;
185 static int num_regsets;
186 #endif
187
188 /* The read/write ends of the pipe registered as waitable file in the
189 event loop. */
190 static int linux_event_pipe[2] = { -1, -1 };
191
192 /* True if we're currently in async mode. */
193 #define target_is_async_p() (linux_event_pipe[0] != -1)
194
195 static void send_sigstop (struct lwp_info *lwp);
196 static void wait_for_sigstop (struct inferior_list_entry *entry);
197
198 /* Accepts an integer PID; Returns a string representing a file that
199 can be opened to get info for the child process.
200 Space for the result is malloc'd, caller must free. */
201
202 char *
203 linux_child_pid_to_exec_file (int pid)
204 {
205 char *name1, *name2;
206
207 name1 = xmalloc (MAXPATHLEN);
208 name2 = xmalloc (MAXPATHLEN);
209 memset (name2, 0, MAXPATHLEN);
210
211 sprintf (name1, "/proc/%d/exe", pid);
212 if (readlink (name1, name2, MAXPATHLEN) > 0)
213 {
214 free (name1);
215 return name2;
216 }
217 else
218 {
219 free (name2);
220 return name1;
221 }
222 }
223
224 /* Return non-zero if HEADER is a 64-bit ELF file. */
225
226 static int
227 elf_64_header_p (const Elf64_Ehdr *header)
228 {
229 return (header->e_ident[EI_MAG0] == ELFMAG0
230 && header->e_ident[EI_MAG1] == ELFMAG1
231 && header->e_ident[EI_MAG2] == ELFMAG2
232 && header->e_ident[EI_MAG3] == ELFMAG3
233 && header->e_ident[EI_CLASS] == ELFCLASS64);
234 }
235
236 /* Return non-zero if FILE is a 64-bit ELF file,
237 zero if the file is not a 64-bit ELF file,
238 and -1 if the file is not accessible or doesn't exist. */
239
240 int
241 elf_64_file_p (const char *file)
242 {
243 Elf64_Ehdr header;
244 int fd;
245
246 fd = open (file, O_RDONLY);
247 if (fd < 0)
248 return -1;
249
250 if (read (fd, &header, sizeof (header)) != sizeof (header))
251 {
252 close (fd);
253 return 0;
254 }
255 close (fd);
256
257 return elf_64_header_p (&header);
258 }
259
260 static void
261 delete_lwp (struct lwp_info *lwp)
262 {
263 remove_thread (get_lwp_thread (lwp));
264 remove_inferior (&all_lwps, &lwp->head);
265 free (lwp->arch_private);
266 free (lwp);
267 }
268
269 /* Add a process to the common process list, and set its private
270 data. */
271
272 static struct process_info *
273 linux_add_process (int pid, int attached)
274 {
275 struct process_info *proc;
276
277 /* Is this the first process? If so, then set the arch. */
278 if (all_processes.head == NULL)
279 new_inferior = 1;
280
281 proc = add_process (pid, attached);
282 proc->private = xcalloc (1, sizeof (*proc->private));
283
284 if (the_low_target.new_process != NULL)
285 proc->private->arch_private = the_low_target.new_process ();
286
287 return proc;
288 }
289
290 /* Wrapper function for waitpid which handles EINTR, and emulates
291 __WALL for systems where that is not available. */
292
293 static int
294 my_waitpid (int pid, int *status, int flags)
295 {
296 int ret, out_errno;
297
298 if (debug_threads)
299 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
300
301 if (flags & __WALL)
302 {
303 sigset_t block_mask, org_mask, wake_mask;
304 int wnohang;
305
306 wnohang = (flags & WNOHANG) != 0;
307 flags &= ~(__WALL | __WCLONE);
308 flags |= WNOHANG;
309
310 /* Block all signals while here. This avoids knowing about
311 LinuxThread's signals. */
312 sigfillset (&block_mask);
313 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
314
315 /* ... except during the sigsuspend below. */
316 sigemptyset (&wake_mask);
317
318 while (1)
319 {
320 /* Since all signals are blocked, there's no need to check
321 for EINTR here. */
322 ret = waitpid (pid, status, flags);
323 out_errno = errno;
324
325 if (ret == -1 && out_errno != ECHILD)
326 break;
327 else if (ret > 0)
328 break;
329
330 if (flags & __WCLONE)
331 {
332 /* We've tried both flavors now. If WNOHANG is set,
333 there's nothing else to do, just bail out. */
334 if (wnohang)
335 break;
336
337 if (debug_threads)
338 fprintf (stderr, "blocking\n");
339
340 /* Block waiting for signals. */
341 sigsuspend (&wake_mask);
342 }
343
344 flags ^= __WCLONE;
345 }
346
347 sigprocmask (SIG_SETMASK, &org_mask, NULL);
348 }
349 else
350 {
351 do
352 ret = waitpid (pid, status, flags);
353 while (ret == -1 && errno == EINTR);
354 out_errno = errno;
355 }
356
357 if (debug_threads)
358 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
359 pid, flags, status ? *status : -1, ret);
360
361 errno = out_errno;
362 return ret;
363 }
364
365 /* Handle a GNU/Linux extended wait response. If we see a clone
366 event, we need to add the new LWP to our list (and not report the
367 trap to higher layers). */
368
369 static void
370 handle_extended_wait (struct lwp_info *event_child, int wstat)
371 {
372 int event = wstat >> 16;
373 struct lwp_info *new_lwp;
374
375 if (event == PTRACE_EVENT_CLONE)
376 {
377 ptid_t ptid;
378 unsigned long new_pid;
379 int ret, status = W_STOPCODE (SIGSTOP);
380
381 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
382
383 /* If we haven't already seen the new PID stop, wait for it now. */
384 if (! pull_pid_from_list (&stopped_pids, new_pid))
385 {
386 /* The new child has a pending SIGSTOP. We can't affect it until it
387 hits the SIGSTOP, but we're already attached. */
388
389 ret = my_waitpid (new_pid, &status, __WALL);
390
391 if (ret == -1)
392 perror_with_name ("waiting for new child");
393 else if (ret != new_pid)
394 warning ("wait returned unexpected PID %d", ret);
395 else if (!WIFSTOPPED (status))
396 warning ("wait returned unexpected status 0x%x", status);
397 }
398
399 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
400
401 ptid = ptid_build (pid_of (event_child), new_pid, 0);
402 new_lwp = (struct lwp_info *) add_lwp (ptid);
403 add_thread (ptid, new_lwp);
404
405 /* Either we're going to immediately resume the new thread
406 or leave it stopped. linux_resume_one_lwp is a nop if it
407 thinks the thread is currently running, so set this first
408 before calling linux_resume_one_lwp. */
409 new_lwp->stopped = 1;
410
411 /* Normally we will get the pending SIGSTOP. But in some cases
412 we might get another signal delivered to the group first.
413 If we do get another signal, be sure not to lose it. */
414 if (WSTOPSIG (status) == SIGSTOP)
415 {
416 if (stopping_threads)
417 new_lwp->stop_pc = get_stop_pc (new_lwp);
418 else
419 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
420 }
421 else
422 {
423 new_lwp->stop_expected = 1;
424
425 if (stopping_threads)
426 {
427 new_lwp->stop_pc = get_stop_pc (new_lwp);
428 new_lwp->status_pending_p = 1;
429 new_lwp->status_pending = status;
430 }
431 else
432 /* Pass the signal on. This is what GDB does - except
433 shouldn't we really report it instead? */
434 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
435 }
436
437 /* Always resume the current thread. If we are stopping
438 threads, it will have a pending SIGSTOP; we may as well
439 collect it now. */
440 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
441 }
442 }
443
444 /* Return the PC as read from the regcache of LWP, without any
445 adjustment. */
446
447 static CORE_ADDR
448 get_pc (struct lwp_info *lwp)
449 {
450 struct thread_info *saved_inferior;
451 struct regcache *regcache;
452 CORE_ADDR pc;
453
454 if (the_low_target.get_pc == NULL)
455 return 0;
456
457 saved_inferior = current_inferior;
458 current_inferior = get_lwp_thread (lwp);
459
460 regcache = get_thread_regcache (current_inferior, 1);
461 pc = (*the_low_target.get_pc) (regcache);
462
463 if (debug_threads)
464 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
465
466 current_inferior = saved_inferior;
467 return pc;
468 }
469
470 /* This function should only be called if LWP got a SIGTRAP.
471 The SIGTRAP could mean several things.
472
473 On i386, where decr_pc_after_break is non-zero:
474 If we were single-stepping this process using PTRACE_SINGLESTEP,
475 we will get only the one SIGTRAP (even if the instruction we
476 stepped over was a breakpoint). The value of $eip will be the
477 next instruction.
478 If we continue the process using PTRACE_CONT, we will get a
479 SIGTRAP when we hit a breakpoint. The value of $eip will be
480 the instruction after the breakpoint (i.e. needs to be
481 decremented). If we report the SIGTRAP to GDB, we must also
482 report the undecremented PC. If we cancel the SIGTRAP, we
483 must resume at the decremented PC.
484
485 (Presumably, not yet tested) On a non-decr_pc_after_break machine
486 with hardware or kernel single-step:
487 If we single-step over a breakpoint instruction, our PC will
488 point at the following instruction. If we continue and hit a
489 breakpoint instruction, our PC will point at the breakpoint
490 instruction. */
491
492 static CORE_ADDR
493 get_stop_pc (struct lwp_info *lwp)
494 {
495 CORE_ADDR stop_pc;
496
497 if (the_low_target.get_pc == NULL)
498 return 0;
499
500 stop_pc = get_pc (lwp);
501
502 if (WSTOPSIG (lwp->last_status) == SIGTRAP
503 && !lwp->stepping
504 && !lwp->stopped_by_watchpoint
505 && lwp->last_status >> 16 == 0)
506 stop_pc -= the_low_target.decr_pc_after_break;
507
508 if (debug_threads)
509 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
510
511 return stop_pc;
512 }
513
514 static void *
515 add_lwp (ptid_t ptid)
516 {
517 struct lwp_info *lwp;
518
519 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
520 memset (lwp, 0, sizeof (*lwp));
521
522 lwp->head.id = ptid;
523
524 if (the_low_target.new_thread != NULL)
525 lwp->arch_private = the_low_target.new_thread ();
526
527 add_inferior_to_list (&all_lwps, &lwp->head);
528
529 return lwp;
530 }
531
532 /* Start an inferior process and returns its pid.
533 ALLARGS is a vector of program-name and args. */
534
535 static int
536 linux_create_inferior (char *program, char **allargs)
537 {
538 struct lwp_info *new_lwp;
539 int pid;
540 ptid_t ptid;
541
542 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
543 pid = vfork ();
544 #else
545 pid = fork ();
546 #endif
547 if (pid < 0)
548 perror_with_name ("fork");
549
550 if (pid == 0)
551 {
552 ptrace (PTRACE_TRACEME, 0, 0, 0);
553
554 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
555 signal (__SIGRTMIN + 1, SIG_DFL);
556 #endif
557
558 setpgid (0, 0);
559
560 execv (program, allargs);
561 if (errno == ENOENT)
562 execvp (program, allargs);
563
564 fprintf (stderr, "Cannot exec %s: %s.\n", program,
565 strerror (errno));
566 fflush (stderr);
567 _exit (0177);
568 }
569
570 linux_add_process (pid, 0);
571
572 ptid = ptid_build (pid, pid, 0);
573 new_lwp = add_lwp (ptid);
574 add_thread (ptid, new_lwp);
575 new_lwp->must_set_ptrace_flags = 1;
576
577 return pid;
578 }
579
580 /* Attach to an inferior process. */
581
582 static void
583 linux_attach_lwp_1 (unsigned long lwpid, int initial)
584 {
585 ptid_t ptid;
586 struct lwp_info *new_lwp;
587
588 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
589 {
590 if (!initial)
591 {
592 /* If we fail to attach to an LWP, just warn. */
593 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
594 strerror (errno), errno);
595 fflush (stderr);
596 return;
597 }
598 else
599 /* If we fail to attach to a process, report an error. */
600 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
601 strerror (errno), errno);
602 }
603
604 if (initial)
605 /* NOTE/FIXME: This lwp might have not been the tgid. */
606 ptid = ptid_build (lwpid, lwpid, 0);
607 else
608 {
609 /* Note that extracting the pid from the current inferior is
610 safe, since we're always called in the context of the same
611 process as this new thread. */
612 int pid = pid_of (get_thread_lwp (current_inferior));
613 ptid = ptid_build (pid, lwpid, 0);
614 }
615
616 new_lwp = (struct lwp_info *) add_lwp (ptid);
617 add_thread (ptid, new_lwp);
618
619 /* We need to wait for SIGSTOP before being able to make the next
620 ptrace call on this LWP. */
621 new_lwp->must_set_ptrace_flags = 1;
622
623 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
624 brings it to a halt.
625
626 There are several cases to consider here:
627
628 1) gdbserver has already attached to the process and is being notified
629 of a new thread that is being created.
630 In this case we should ignore that SIGSTOP and resume the
631 process. This is handled below by setting stop_expected = 1,
632 and the fact that add_thread sets last_resume_kind ==
633 resume_continue.
634
635 2) This is the first thread (the process thread), and we're attaching
636 to it via attach_inferior.
637 In this case we want the process thread to stop.
638 This is handled by having linux_attach set last_resume_kind ==
639 resume_stop after we return.
640 ??? If the process already has several threads we leave the other
641 threads running.
642
643 3) GDB is connecting to gdbserver and is requesting an enumeration of all
644 existing threads.
645 In this case we want the thread to stop.
646 FIXME: This case is currently not properly handled.
647 We should wait for the SIGSTOP but don't. Things work apparently
648 because enough time passes between when we ptrace (ATTACH) and when
649 gdb makes the next ptrace call on the thread.
650
651 On the other hand, if we are currently trying to stop all threads, we
652 should treat the new thread as if we had sent it a SIGSTOP. This works
653 because we are guaranteed that the add_lwp call above added us to the
654 end of the list, and so the new thread has not yet reached
655 wait_for_sigstop (but will). */
656 new_lwp->stop_expected = 1;
657 }
658
659 void
660 linux_attach_lwp (unsigned long lwpid)
661 {
662 linux_attach_lwp_1 (lwpid, 0);
663 }
664
665 int
666 linux_attach (unsigned long pid)
667 {
668 linux_attach_lwp_1 (pid, 1);
669 linux_add_process (pid, 1);
670
671 if (!non_stop)
672 {
673 struct thread_info *thread;
674
675 /* Don't ignore the initial SIGSTOP if we just attached to this
676 process. It will be collected by wait shortly. */
677 thread = find_thread_ptid (ptid_build (pid, pid, 0));
678 thread->last_resume_kind = resume_stop;
679 }
680
681 return 0;
682 }
683
684 struct counter
685 {
686 int pid;
687 int count;
688 };
689
690 static int
691 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
692 {
693 struct counter *counter = args;
694
695 if (ptid_get_pid (entry->id) == counter->pid)
696 {
697 if (++counter->count > 1)
698 return 1;
699 }
700
701 return 0;
702 }
703
704 static int
705 last_thread_of_process_p (struct thread_info *thread)
706 {
707 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
708 int pid = ptid_get_pid (ptid);
709 struct counter counter = { pid , 0 };
710
711 return (find_inferior (&all_threads,
712 second_thread_of_pid_p, &counter) == NULL);
713 }
714
715 /* Kill the inferior lwp. */
716
717 static int
718 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
719 {
720 struct thread_info *thread = (struct thread_info *) entry;
721 struct lwp_info *lwp = get_thread_lwp (thread);
722 int wstat;
723 int pid = * (int *) args;
724
725 if (ptid_get_pid (entry->id) != pid)
726 return 0;
727
728 /* We avoid killing the first thread here, because of a Linux kernel (at
729 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
730 the children get a chance to be reaped, it will remain a zombie
731 forever. */
732
733 if (lwpid_of (lwp) == pid)
734 {
735 if (debug_threads)
736 fprintf (stderr, "lkop: is last of process %s\n",
737 target_pid_to_str (entry->id));
738 return 0;
739 }
740
741 /* If we're killing a running inferior, make sure it is stopped
742 first, as PTRACE_KILL will not work otherwise. */
743 if (!lwp->stopped)
744 send_sigstop (lwp);
745
746 do
747 {
748 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
749
750 /* Make sure it died. The loop is most likely unnecessary. */
751 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
752 } while (pid > 0 && WIFSTOPPED (wstat));
753
754 return 0;
755 }
756
757 static int
758 linux_kill (int pid)
759 {
760 struct process_info *process;
761 struct lwp_info *lwp;
762 struct thread_info *thread;
763 int wstat;
764 int lwpid;
765
766 process = find_process_pid (pid);
767 if (process == NULL)
768 return -1;
769
770 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
771
772 /* See the comment in linux_kill_one_lwp. We did not kill the first
773 thread in the list, so do so now. */
774 lwp = find_lwp_pid (pid_to_ptid (pid));
775 thread = get_lwp_thread (lwp);
776
777 if (debug_threads)
778 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp), pid);
780
781 /* If we're killing a running inferior, make sure it is stopped
782 first, as PTRACE_KILL will not work otherwise. */
783 if (!lwp->stopped)
784 send_sigstop (lwp);
785
786 do
787 {
788 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
789
790 /* Make sure it died. The loop is most likely unnecessary. */
791 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
792 } while (lwpid > 0 && WIFSTOPPED (wstat));
793
794 delete_lwp (lwp);
795
796 the_target->mourn (process);
797 return 0;
798 }
799
800 static int
801 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
802 {
803 struct thread_info *thread = (struct thread_info *) entry;
804 struct lwp_info *lwp = get_thread_lwp (thread);
805 int pid = * (int *) args;
806
807 if (ptid_get_pid (entry->id) != pid)
808 return 0;
809
810 /* If we're detaching from a running inferior, make sure it is
811 stopped first, as PTRACE_DETACH will not work otherwise. */
812 if (!lwp->stopped)
813 {
814 int lwpid = lwpid_of (lwp);
815
816 stopping_threads = 1;
817 send_sigstop (lwp);
818
819 /* If this detects a new thread through a clone event, the new
820 thread is appended to the end of the lwp list, so we'll
821 eventually detach from it. */
822 wait_for_sigstop (&lwp->head);
823 stopping_threads = 0;
824
825 /* If LWP exits while we're trying to stop it, there's nothing
826 left to do. */
827 lwp = find_lwp_pid (pid_to_ptid (lwpid));
828 if (lwp == NULL)
829 return 0;
830 }
831
832 /* If this process is stopped but is expecting a SIGSTOP, then make
833 sure we take care of that now. This isn't absolutely guaranteed
834 to collect the SIGSTOP, but is fairly likely to. */
835 if (lwp->stop_expected)
836 {
837 int wstat;
838 /* Clear stop_expected, so that the SIGSTOP will be reported. */
839 lwp->stop_expected = 0;
840 if (lwp->stopped)
841 linux_resume_one_lwp (lwp, 0, 0, NULL);
842 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
843 }
844
845 /* Flush any pending changes to the process's registers. */
846 regcache_invalidate_one ((struct inferior_list_entry *)
847 get_lwp_thread (lwp));
848
849 /* Finally, let it resume. */
850 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
851
852 delete_lwp (lwp);
853 return 0;
854 }
855
856 static int
857 any_thread_of (struct inferior_list_entry *entry, void *args)
858 {
859 int *pid_p = args;
860
861 if (ptid_get_pid (entry->id) == *pid_p)
862 return 1;
863
864 return 0;
865 }
866
867 static int
868 linux_detach (int pid)
869 {
870 struct process_info *process;
871
872 process = find_process_pid (pid);
873 if (process == NULL)
874 return -1;
875
876 #ifdef USE_THREAD_DB
877 thread_db_detach (process);
878 #endif
879
880 current_inferior =
881 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
882
883 delete_all_breakpoints ();
884 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
885
886 the_target->mourn (process);
887 return 0;
888 }
889
890 static void
891 linux_mourn (struct process_info *process)
892 {
893 struct process_info_private *priv;
894
895 #ifdef USE_THREAD_DB
896 thread_db_mourn (process);
897 #endif
898
899 /* Freeing all private data. */
900 priv = process->private;
901 free (priv->arch_private);
902 free (priv);
903 process->private = NULL;
904
905 remove_process (process);
906 }
907
908 static void
909 linux_join (int pid)
910 {
911 int status, ret;
912 struct process_info *process;
913
914 process = find_process_pid (pid);
915 if (process == NULL)
916 return;
917
918 do {
919 ret = my_waitpid (pid, &status, 0);
920 if (WIFEXITED (status) || WIFSIGNALED (status))
921 break;
922 } while (ret != -1 || errno != ECHILD);
923 }
924
925 /* Return nonzero if the given thread is still alive. */
926 static int
927 linux_thread_alive (ptid_t ptid)
928 {
929 struct lwp_info *lwp = find_lwp_pid (ptid);
930
931 /* We assume we always know if a thread exits. If a whole process
932 exited but we still haven't been able to report it to GDB, we'll
933 hold on to the last lwp of the dead process. */
934 if (lwp != NULL)
935 return !lwp->dead;
936 else
937 return 0;
938 }
939
940 /* Return 1 if this lwp has an interesting status pending. */
941 static int
942 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
943 {
944 struct lwp_info *lwp = (struct lwp_info *) entry;
945 ptid_t ptid = * (ptid_t *) arg;
946 struct thread_info *thread = get_lwp_thread (lwp);
947
948 /* Check if we're only interested in events from a specific process
949 or its lwps. */
950 if (!ptid_equal (minus_one_ptid, ptid)
951 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
952 return 0;
953
954 thread = get_lwp_thread (lwp);
955
956 /* If we got a `vCont;t', but we haven't reported a stop yet, do
957 report any status pending the LWP may have. */
958 if (thread->last_resume_kind == resume_stop
959 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
960 return 0;
961
962 return lwp->status_pending_p;
963 }
964
965 static int
966 same_lwp (struct inferior_list_entry *entry, void *data)
967 {
968 ptid_t ptid = *(ptid_t *) data;
969 int lwp;
970
971 if (ptid_get_lwp (ptid) != 0)
972 lwp = ptid_get_lwp (ptid);
973 else
974 lwp = ptid_get_pid (ptid);
975
976 if (ptid_get_lwp (entry->id) == lwp)
977 return 1;
978
979 return 0;
980 }
981
982 struct lwp_info *
983 find_lwp_pid (ptid_t ptid)
984 {
985 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
986 }
987
988 static struct lwp_info *
989 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
990 {
991 int ret;
992 int to_wait_for = -1;
993 struct lwp_info *child = NULL;
994
995 if (debug_threads)
996 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
997
998 if (ptid_equal (ptid, minus_one_ptid))
999 to_wait_for = -1; /* any child */
1000 else
1001 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1002
1003 options |= __WALL;
1004
1005 retry:
1006
1007 ret = my_waitpid (to_wait_for, wstatp, options);
1008 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1009 return NULL;
1010 else if (ret == -1)
1011 perror_with_name ("waitpid");
1012
1013 if (debug_threads
1014 && (!WIFSTOPPED (*wstatp)
1015 || (WSTOPSIG (*wstatp) != 32
1016 && WSTOPSIG (*wstatp) != 33)))
1017 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1018
1019 child = find_lwp_pid (pid_to_ptid (ret));
1020
1021 /* If we didn't find a process, one of two things presumably happened:
1022 - A process we started and then detached from has exited. Ignore it.
1023 - A process we are controlling has forked and the new child's stop
1024 was reported to us by the kernel. Save its PID. */
1025 if (child == NULL && WIFSTOPPED (*wstatp))
1026 {
1027 add_pid_to_list (&stopped_pids, ret);
1028 goto retry;
1029 }
1030 else if (child == NULL)
1031 goto retry;
1032
1033 child->stopped = 1;
1034
1035 child->last_status = *wstatp;
1036
1037 /* Architecture-specific setup after inferior is running.
1038 This needs to happen after we have attached to the inferior
1039 and it is stopped for the first time, but before we access
1040 any inferior registers. */
1041 if (new_inferior)
1042 {
1043 the_low_target.arch_setup ();
1044 #ifdef HAVE_LINUX_REGSETS
1045 memset (disabled_regsets, 0, num_regsets);
1046 #endif
1047 new_inferior = 0;
1048 }
1049
1050 /* Fetch the possibly triggered data watchpoint info and store it in
1051 CHILD.
1052
1053 On some archs, like x86, that use debug registers to set
1054 watchpoints, it's possible that the way to know which watched
1055 address trapped, is to check the register that is used to select
1056 which address to watch. Problem is, between setting the
1057 watchpoint and reading back which data address trapped, the user
1058 may change the set of watchpoints, and, as a consequence, GDB
1059 changes the debug registers in the inferior. To avoid reading
1060 back a stale stopped-data-address when that happens, we cache in
1061 LP the fact that a watchpoint trapped, and the corresponding data
1062 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1063 changes the debug registers meanwhile, we have the cached data we
1064 can rely on. */
1065
1066 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1067 {
1068 if (the_low_target.stopped_by_watchpoint == NULL)
1069 {
1070 child->stopped_by_watchpoint = 0;
1071 }
1072 else
1073 {
1074 struct thread_info *saved_inferior;
1075
1076 saved_inferior = current_inferior;
1077 current_inferior = get_lwp_thread (child);
1078
1079 child->stopped_by_watchpoint
1080 = the_low_target.stopped_by_watchpoint ();
1081
1082 if (child->stopped_by_watchpoint)
1083 {
1084 if (the_low_target.stopped_data_address != NULL)
1085 child->stopped_data_address
1086 = the_low_target.stopped_data_address ();
1087 else
1088 child->stopped_data_address = 0;
1089 }
1090
1091 current_inferior = saved_inferior;
1092 }
1093 }
1094
1095 /* Store the STOP_PC, with adjustment applied. This depends on the
1096 architecture being defined already (so that CHILD has a valid
1097 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1098 not). */
1099 if (WIFSTOPPED (*wstatp))
1100 child->stop_pc = get_stop_pc (child);
1101
1102 if (debug_threads
1103 && WIFSTOPPED (*wstatp)
1104 && the_low_target.get_pc != NULL)
1105 {
1106 struct thread_info *saved_inferior = current_inferior;
1107 struct regcache *regcache;
1108 CORE_ADDR pc;
1109
1110 current_inferior = get_lwp_thread (child);
1111 regcache = get_thread_regcache (current_inferior, 1);
1112 pc = (*the_low_target.get_pc) (regcache);
1113 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1114 current_inferior = saved_inferior;
1115 }
1116
1117 return child;
1118 }
1119
1120 /* This function should only be called if the LWP got a SIGTRAP.
1121
1122 Handle any tracepoint steps or hits. Return true if a tracepoint
1123 event was handled, 0 otherwise. */
1124
1125 static int
1126 handle_tracepoints (struct lwp_info *lwp)
1127 {
1128 struct thread_info *tinfo = get_lwp_thread (lwp);
1129 int tpoint_related_event = 0;
1130
1131 /* And we need to be sure that any all-threads-stopping doesn't try
1132 to move threads out of the jump pads, as it could deadlock the
1133 inferior (LWP could be in the jump pad, maybe even holding the
1134 lock.) */
1135
1136 /* Do any necessary step collect actions. */
1137 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1138
1139 /* See if we just hit a tracepoint and do its main collect
1140 actions. */
1141 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1142
1143 if (tpoint_related_event)
1144 {
1145 if (debug_threads)
1146 fprintf (stderr, "got a tracepoint event\n");
1147 return 1;
1148 }
1149
1150 return 0;
1151 }
1152
1153 /* Arrange for a breakpoint to be hit again later. We don't keep the
1154 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1155 will handle the current event, eventually we will resume this LWP,
1156 and this breakpoint will trap again. */
1157
1158 static int
1159 cancel_breakpoint (struct lwp_info *lwp)
1160 {
1161 struct thread_info *saved_inferior;
1162
1163 /* There's nothing to do if we don't support breakpoints. */
1164 if (!supports_breakpoints ())
1165 return 0;
1166
1167 /* breakpoint_at reads from current inferior. */
1168 saved_inferior = current_inferior;
1169 current_inferior = get_lwp_thread (lwp);
1170
1171 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1172 {
1173 if (debug_threads)
1174 fprintf (stderr,
1175 "CB: Push back breakpoint for %s\n",
1176 target_pid_to_str (ptid_of (lwp)));
1177
1178 /* Back up the PC if necessary. */
1179 if (the_low_target.decr_pc_after_break)
1180 {
1181 struct regcache *regcache
1182 = get_thread_regcache (current_inferior, 1);
1183 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1184 }
1185
1186 current_inferior = saved_inferior;
1187 return 1;
1188 }
1189 else
1190 {
1191 if (debug_threads)
1192 fprintf (stderr,
1193 "CB: No breakpoint found at %s for [%s]\n",
1194 paddress (lwp->stop_pc),
1195 target_pid_to_str (ptid_of (lwp)));
1196 }
1197
1198 current_inferior = saved_inferior;
1199 return 0;
1200 }
1201
1202 /* When the event-loop is doing a step-over, this points at the thread
1203 being stepped. */
1204 ptid_t step_over_bkpt;
1205
1206 /* Wait for an event from child PID. If PID is -1, wait for any
1207 child. Store the stop status through the status pointer WSTAT.
1208 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1209 event was found and OPTIONS contains WNOHANG. Return the PID of
1210 the stopped child otherwise. */
1211
1212 static int
1213 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1214 {
1215 struct lwp_info *event_child, *requested_child;
1216
1217 event_child = NULL;
1218 requested_child = NULL;
1219
1220 /* Check for a lwp with a pending status. */
1221
1222 if (ptid_equal (ptid, minus_one_ptid)
1223 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1224 {
1225 event_child = (struct lwp_info *)
1226 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1227 if (debug_threads && event_child)
1228 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1229 }
1230 else
1231 {
1232 requested_child = find_lwp_pid (ptid);
1233
1234 if (requested_child->status_pending_p)
1235 event_child = requested_child;
1236 }
1237
1238 if (event_child != NULL)
1239 {
1240 if (debug_threads)
1241 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1242 lwpid_of (event_child), event_child->status_pending);
1243 *wstat = event_child->status_pending;
1244 event_child->status_pending_p = 0;
1245 event_child->status_pending = 0;
1246 current_inferior = get_lwp_thread (event_child);
1247 return lwpid_of (event_child);
1248 }
1249
1250 /* We only enter this loop if no process has a pending wait status. Thus
1251 any action taken in response to a wait status inside this loop is
1252 responding as soon as we detect the status, not after any pending
1253 events. */
1254 while (1)
1255 {
1256 event_child = linux_wait_for_lwp (ptid, wstat, options);
1257
1258 if ((options & WNOHANG) && event_child == NULL)
1259 {
1260 if (debug_threads)
1261 fprintf (stderr, "WNOHANG set, no event found\n");
1262 return 0;
1263 }
1264
1265 if (event_child == NULL)
1266 error ("event from unknown child");
1267
1268 current_inferior = get_lwp_thread (event_child);
1269
1270 /* Check for thread exit. */
1271 if (! WIFSTOPPED (*wstat))
1272 {
1273 if (debug_threads)
1274 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1275
1276 /* If the last thread is exiting, just return. */
1277 if (last_thread_of_process_p (current_inferior))
1278 {
1279 if (debug_threads)
1280 fprintf (stderr, "LWP %ld is last lwp of process\n",
1281 lwpid_of (event_child));
1282 return lwpid_of (event_child);
1283 }
1284
1285 if (!non_stop)
1286 {
1287 current_inferior = (struct thread_info *) all_threads.head;
1288 if (debug_threads)
1289 fprintf (stderr, "Current inferior is now %ld\n",
1290 lwpid_of (get_thread_lwp (current_inferior)));
1291 }
1292 else
1293 {
1294 current_inferior = NULL;
1295 if (debug_threads)
1296 fprintf (stderr, "Current inferior is now <NULL>\n");
1297 }
1298
1299 /* If we were waiting for this particular child to do something...
1300 well, it did something. */
1301 if (requested_child != NULL)
1302 {
1303 int lwpid = lwpid_of (event_child);
1304
1305 /* Cancel the step-over operation --- the thread that
1306 started it is gone. */
1307 if (finish_step_over (event_child))
1308 unstop_all_lwps (event_child);
1309 delete_lwp (event_child);
1310 return lwpid;
1311 }
1312
1313 delete_lwp (event_child);
1314
1315 /* Wait for a more interesting event. */
1316 continue;
1317 }
1318
1319 if (event_child->must_set_ptrace_flags)
1320 {
1321 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1322 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
1323 event_child->must_set_ptrace_flags = 0;
1324 }
1325
1326 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1327 && *wstat >> 16 != 0)
1328 {
1329 handle_extended_wait (event_child, *wstat);
1330 continue;
1331 }
1332
1333 /* If GDB is not interested in this signal, don't stop other
1334 threads, and don't report it to GDB. Just resume the
1335 inferior right away. We do this for threading-related
1336 signals as well as any that GDB specifically requested we
1337 ignore. But never ignore SIGSTOP if we sent it ourselves,
1338 and do not ignore signals when stepping - they may require
1339 special handling to skip the signal handler. */
1340 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1341 thread library? */
1342 if (WIFSTOPPED (*wstat)
1343 && !event_child->stepping
1344 && (
1345 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1346 (current_process ()->private->thread_db != NULL
1347 && (WSTOPSIG (*wstat) == __SIGRTMIN
1348 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1349 ||
1350 #endif
1351 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1352 && !(WSTOPSIG (*wstat) == SIGSTOP
1353 && event_child->stop_expected))))
1354 {
1355 siginfo_t info, *info_p;
1356
1357 if (debug_threads)
1358 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1359 WSTOPSIG (*wstat), lwpid_of (event_child));
1360
1361 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1362 info_p = &info;
1363 else
1364 info_p = NULL;
1365 linux_resume_one_lwp (event_child, event_child->stepping,
1366 WSTOPSIG (*wstat), info_p);
1367 continue;
1368 }
1369
1370 if (WIFSTOPPED (*wstat)
1371 && WSTOPSIG (*wstat) == SIGSTOP
1372 && event_child->stop_expected)
1373 {
1374 int should_stop;
1375
1376 if (debug_threads)
1377 fprintf (stderr, "Expected stop.\n");
1378 event_child->stop_expected = 0;
1379
1380 should_stop = (current_inferior->last_resume_kind == resume_stop
1381 || stopping_threads);
1382
1383 if (!should_stop)
1384 {
1385 linux_resume_one_lwp (event_child,
1386 event_child->stepping, 0, NULL);
1387 continue;
1388 }
1389 }
1390
1391 return lwpid_of (event_child);
1392 }
1393
1394 /* NOTREACHED */
1395 return 0;
1396 }
1397
1398 static int
1399 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1400 {
1401 ptid_t wait_ptid;
1402
1403 if (ptid_is_pid (ptid))
1404 {
1405 /* A request to wait for a specific tgid. This is not possible
1406 with waitpid, so instead, we wait for any child, and leave
1407 children we're not interested in right now with a pending
1408 status to report later. */
1409 wait_ptid = minus_one_ptid;
1410 }
1411 else
1412 wait_ptid = ptid;
1413
1414 while (1)
1415 {
1416 int event_pid;
1417
1418 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1419
1420 if (event_pid > 0
1421 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1422 {
1423 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1424
1425 if (! WIFSTOPPED (*wstat))
1426 mark_lwp_dead (event_child, *wstat);
1427 else
1428 {
1429 event_child->status_pending_p = 1;
1430 event_child->status_pending = *wstat;
1431 }
1432 }
1433 else
1434 return event_pid;
1435 }
1436 }
1437
1438
1439 /* Count the LWP's that have had events. */
1440
1441 static int
1442 count_events_callback (struct inferior_list_entry *entry, void *data)
1443 {
1444 struct lwp_info *lp = (struct lwp_info *) entry;
1445 struct thread_info *thread = get_lwp_thread (lp);
1446 int *count = data;
1447
1448 gdb_assert (count != NULL);
1449
1450 /* Count only resumed LWPs that have a SIGTRAP event pending that
1451 should be reported to GDB. */
1452 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1453 && thread->last_resume_kind != resume_stop
1454 && lp->status_pending_p
1455 && WIFSTOPPED (lp->status_pending)
1456 && WSTOPSIG (lp->status_pending) == SIGTRAP
1457 && !breakpoint_inserted_here (lp->stop_pc))
1458 (*count)++;
1459
1460 return 0;
1461 }
1462
1463 /* Select the LWP (if any) that is currently being single-stepped. */
1464
1465 static int
1466 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1467 {
1468 struct lwp_info *lp = (struct lwp_info *) entry;
1469 struct thread_info *thread = get_lwp_thread (lp);
1470
1471 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1472 && thread->last_resume_kind == resume_step
1473 && lp->status_pending_p)
1474 return 1;
1475 else
1476 return 0;
1477 }
1478
1479 /* Select the Nth LWP that has had a SIGTRAP event that should be
1480 reported to GDB. */
1481
1482 static int
1483 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1484 {
1485 struct lwp_info *lp = (struct lwp_info *) entry;
1486 struct thread_info *thread = get_lwp_thread (lp);
1487 int *selector = data;
1488
1489 gdb_assert (selector != NULL);
1490
1491 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1492 if (thread->last_resume_kind != resume_stop
1493 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1494 && lp->status_pending_p
1495 && WIFSTOPPED (lp->status_pending)
1496 && WSTOPSIG (lp->status_pending) == SIGTRAP
1497 && !breakpoint_inserted_here (lp->stop_pc))
1498 if ((*selector)-- == 0)
1499 return 1;
1500
1501 return 0;
1502 }
1503
1504 static int
1505 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1506 {
1507 struct lwp_info *lp = (struct lwp_info *) entry;
1508 struct thread_info *thread = get_lwp_thread (lp);
1509 struct lwp_info *event_lp = data;
1510
1511 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1512 if (lp == event_lp)
1513 return 0;
1514
1515 /* If a LWP other than the LWP that we're reporting an event for has
1516 hit a GDB breakpoint (as opposed to some random trap signal),
1517 then just arrange for it to hit it again later. We don't keep
1518 the SIGTRAP status and don't forward the SIGTRAP signal to the
1519 LWP. We will handle the current event, eventually we will resume
1520 all LWPs, and this one will get its breakpoint trap again.
1521
1522 If we do not do this, then we run the risk that the user will
1523 delete or disable the breakpoint, but the LWP will have already
1524 tripped on it. */
1525
1526 if (thread->last_resume_kind != resume_stop
1527 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1528 && lp->status_pending_p
1529 && WIFSTOPPED (lp->status_pending)
1530 && WSTOPSIG (lp->status_pending) == SIGTRAP
1531 && !lp->stepping
1532 && !lp->stopped_by_watchpoint
1533 && cancel_breakpoint (lp))
1534 /* Throw away the SIGTRAP. */
1535 lp->status_pending_p = 0;
1536
1537 return 0;
1538 }
1539
1540 /* Select one LWP out of those that have events pending. */
1541
1542 static void
1543 select_event_lwp (struct lwp_info **orig_lp)
1544 {
1545 int num_events = 0;
1546 int random_selector;
1547 struct lwp_info *event_lp;
1548
1549 /* Give preference to any LWP that is being single-stepped. */
1550 event_lp
1551 = (struct lwp_info *) find_inferior (&all_lwps,
1552 select_singlestep_lwp_callback, NULL);
1553 if (event_lp != NULL)
1554 {
1555 if (debug_threads)
1556 fprintf (stderr,
1557 "SEL: Select single-step %s\n",
1558 target_pid_to_str (ptid_of (event_lp)));
1559 }
1560 else
1561 {
1562 /* No single-stepping LWP. Select one at random, out of those
1563 which have had SIGTRAP events. */
1564
1565 /* First see how many SIGTRAP events we have. */
1566 find_inferior (&all_lwps, count_events_callback, &num_events);
1567
1568 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1569 random_selector = (int)
1570 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1571
1572 if (debug_threads && num_events > 1)
1573 fprintf (stderr,
1574 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1575 num_events, random_selector);
1576
1577 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1578 select_event_lwp_callback,
1579 &random_selector);
1580 }
1581
1582 if (event_lp != NULL)
1583 {
1584 /* Switch the event LWP. */
1585 *orig_lp = event_lp;
1586 }
1587 }
1588
1589 /* Set this inferior LWP's state as "want-stopped". We won't resume
1590 this LWP until the client gives us another action for it. */
1591
1592 static void
1593 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1594 {
1595 struct lwp_info *lwp = (struct lwp_info *) entry;
1596 struct thread_info *thread = get_lwp_thread (lwp);
1597
1598 /* Most threads are stopped implicitly (all-stop); tag that with
1599 signal 0. The thread being explicitly reported stopped to the
1600 client, gets it's status fixed up afterwards. */
1601 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1602 thread->last_status.value.sig = TARGET_SIGNAL_0;
1603
1604 thread->last_resume_kind = resume_stop;
1605 }
1606
1607 /* Set all LWP's states as "want-stopped". */
1608
1609 static void
1610 gdb_wants_all_stopped (void)
1611 {
1612 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1613 }
1614
1615 /* Wait for process, returns status. */
1616
1617 static ptid_t
1618 linux_wait_1 (ptid_t ptid,
1619 struct target_waitstatus *ourstatus, int target_options)
1620 {
1621 int w;
1622 struct lwp_info *event_child;
1623 int options;
1624 int pid;
1625 int step_over_finished;
1626 int bp_explains_trap;
1627 int maybe_internal_trap;
1628 int report_to_gdb;
1629 int trace_event;
1630
1631 /* Translate generic target options into linux options. */
1632 options = __WALL;
1633 if (target_options & TARGET_WNOHANG)
1634 options |= WNOHANG;
1635
1636 retry:
1637 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1638
1639 /* If we were only supposed to resume one thread, only wait for
1640 that thread - if it's still alive. If it died, however - which
1641 can happen if we're coming from the thread death case below -
1642 then we need to make sure we restart the other threads. We could
1643 pick a thread at random or restart all; restarting all is less
1644 arbitrary. */
1645 if (!non_stop
1646 && !ptid_equal (cont_thread, null_ptid)
1647 && !ptid_equal (cont_thread, minus_one_ptid))
1648 {
1649 struct thread_info *thread;
1650
1651 thread = (struct thread_info *) find_inferior_id (&all_threads,
1652 cont_thread);
1653
1654 /* No stepping, no signal - unless one is pending already, of course. */
1655 if (thread == NULL)
1656 {
1657 struct thread_resume resume_info;
1658 resume_info.thread = minus_one_ptid;
1659 resume_info.kind = resume_continue;
1660 resume_info.sig = 0;
1661 linux_resume (&resume_info, 1);
1662 }
1663 else
1664 ptid = cont_thread;
1665 }
1666
1667 if (ptid_equal (step_over_bkpt, null_ptid))
1668 pid = linux_wait_for_event (ptid, &w, options);
1669 else
1670 {
1671 if (debug_threads)
1672 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1673 target_pid_to_str (step_over_bkpt));
1674 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1675 }
1676
1677 if (pid == 0) /* only if TARGET_WNOHANG */
1678 return null_ptid;
1679
1680 event_child = get_thread_lwp (current_inferior);
1681
1682 /* If we are waiting for a particular child, and it exited,
1683 linux_wait_for_event will return its exit status. Similarly if
1684 the last child exited. If this is not the last child, however,
1685 do not report it as exited until there is a 'thread exited' response
1686 available in the remote protocol. Instead, just wait for another event.
1687 This should be safe, because if the thread crashed we will already
1688 have reported the termination signal to GDB; that should stop any
1689 in-progress stepping operations, etc.
1690
1691 Report the exit status of the last thread to exit. This matches
1692 LinuxThreads' behavior. */
1693
1694 if (last_thread_of_process_p (current_inferior))
1695 {
1696 if (WIFEXITED (w) || WIFSIGNALED (w))
1697 {
1698 delete_lwp (event_child);
1699
1700 current_inferior = NULL;
1701
1702 if (WIFEXITED (w))
1703 {
1704 ourstatus->kind = TARGET_WAITKIND_EXITED;
1705 ourstatus->value.integer = WEXITSTATUS (w);
1706
1707 if (debug_threads)
1708 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1709 }
1710 else
1711 {
1712 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1713 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1714
1715 if (debug_threads)
1716 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1717
1718 }
1719
1720 return pid_to_ptid (pid);
1721 }
1722 }
1723 else
1724 {
1725 if (!WIFSTOPPED (w))
1726 goto retry;
1727 }
1728
1729 /* If this event was not handled before, and is not a SIGTRAP, we
1730 report it. SIGILL and SIGSEGV are also treated as traps in case
1731 a breakpoint is inserted at the current PC. If this target does
1732 not support internal breakpoints at all, we also report the
1733 SIGTRAP without further processing; it's of no concern to us. */
1734 maybe_internal_trap
1735 = (supports_breakpoints ()
1736 && (WSTOPSIG (w) == SIGTRAP
1737 || ((WSTOPSIG (w) == SIGILL
1738 || WSTOPSIG (w) == SIGSEGV)
1739 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1740
1741 if (maybe_internal_trap)
1742 {
1743 /* Handle anything that requires bookkeeping before deciding to
1744 report the event or continue waiting. */
1745
1746 /* First check if we can explain the SIGTRAP with an internal
1747 breakpoint, or if we should possibly report the event to GDB.
1748 Do this before anything that may remove or insert a
1749 breakpoint. */
1750 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1751
1752 /* We have a SIGTRAP, possibly a step-over dance has just
1753 finished. If so, tweak the state machine accordingly,
1754 reinsert breakpoints and delete any reinsert (software
1755 single-step) breakpoints. */
1756 step_over_finished = finish_step_over (event_child);
1757
1758 /* Now invoke the callbacks of any internal breakpoints there. */
1759 check_breakpoints (event_child->stop_pc);
1760
1761 /* Handle tracepoint data collecting. This may overflow the
1762 trace buffer, and cause a tracing stop, removing
1763 breakpoints. */
1764 trace_event = handle_tracepoints (event_child);
1765
1766 if (bp_explains_trap)
1767 {
1768 /* If we stepped or ran into an internal breakpoint, we've
1769 already handled it. So next time we resume (from this
1770 PC), we should step over it. */
1771 if (debug_threads)
1772 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1773
1774 if (breakpoint_here (event_child->stop_pc))
1775 event_child->need_step_over = 1;
1776 }
1777 }
1778 else
1779 {
1780 /* We have some other signal, possibly a step-over dance was in
1781 progress, and it should be cancelled too. */
1782 step_over_finished = finish_step_over (event_child);
1783
1784 trace_event = 0;
1785 }
1786
1787 /* We have all the data we need. Either report the event to GDB, or
1788 resume threads and keep waiting for more. */
1789
1790 /* Check If GDB would be interested in this event. If GDB wanted
1791 this thread to single step, we always want to report the SIGTRAP,
1792 and let GDB handle it. Watchpoints should always be reported.
1793 So should signals we can't explain. A SIGTRAP we can't explain
1794 could be a GDB breakpoint --- we may or not support Z0
1795 breakpoints. If we do, we're be able to handle GDB breakpoints
1796 on top of internal breakpoints, by handling the internal
1797 breakpoint and still reporting the event to GDB. If we don't,
1798 we're out of luck, GDB won't see the breakpoint hit. */
1799 report_to_gdb = (!maybe_internal_trap
1800 || current_inferior->last_resume_kind == resume_step
1801 || event_child->stopped_by_watchpoint
1802 || (!step_over_finished && !bp_explains_trap && !trace_event)
1803 || gdb_breakpoint_here (event_child->stop_pc));
1804
1805 /* We found no reason GDB would want us to stop. We either hit one
1806 of our own breakpoints, or finished an internal step GDB
1807 shouldn't know about. */
1808 if (!report_to_gdb)
1809 {
1810 if (debug_threads)
1811 {
1812 if (bp_explains_trap)
1813 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1814 if (step_over_finished)
1815 fprintf (stderr, "Step-over finished.\n");
1816 if (trace_event)
1817 fprintf (stderr, "Tracepoint event.\n");
1818 }
1819
1820 /* We're not reporting this breakpoint to GDB, so apply the
1821 decr_pc_after_break adjustment to the inferior's regcache
1822 ourselves. */
1823
1824 if (the_low_target.set_pc != NULL)
1825 {
1826 struct regcache *regcache
1827 = get_thread_regcache (get_lwp_thread (event_child), 1);
1828 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1829 }
1830
1831 /* We've finished stepping over a breakpoint. We've stopped all
1832 LWPs momentarily except the stepping one. This is where we
1833 resume them all again. We're going to keep waiting, so use
1834 proceed, which handles stepping over the next breakpoint. */
1835 if (debug_threads)
1836 fprintf (stderr, "proceeding all threads.\n");
1837 proceed_all_lwps ();
1838 goto retry;
1839 }
1840
1841 if (debug_threads)
1842 {
1843 if (current_inferior->last_resume_kind == resume_step)
1844 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1845 if (event_child->stopped_by_watchpoint)
1846 fprintf (stderr, "Stopped by watchpoint.\n");
1847 if (gdb_breakpoint_here (event_child->stop_pc))
1848 fprintf (stderr, "Stopped by GDB breakpoint.\n");
1849 if (debug_threads)
1850 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1851 }
1852
1853 /* Alright, we're going to report a stop. */
1854
1855 if (!non_stop)
1856 {
1857 /* In all-stop, stop all threads. */
1858 stop_all_lwps ();
1859
1860 /* If we're not waiting for a specific LWP, choose an event LWP
1861 from among those that have had events. Giving equal priority
1862 to all LWPs that have had events helps prevent
1863 starvation. */
1864 if (ptid_equal (ptid, minus_one_ptid))
1865 {
1866 event_child->status_pending_p = 1;
1867 event_child->status_pending = w;
1868
1869 select_event_lwp (&event_child);
1870
1871 event_child->status_pending_p = 0;
1872 w = event_child->status_pending;
1873 }
1874
1875 /* Now that we've selected our final event LWP, cancel any
1876 breakpoints in other LWPs that have hit a GDB breakpoint.
1877 See the comment in cancel_breakpoints_callback to find out
1878 why. */
1879 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1880 }
1881 else
1882 {
1883 /* If we just finished a step-over, then all threads had been
1884 momentarily paused. In all-stop, that's fine, we want
1885 threads stopped by now anyway. In non-stop, we need to
1886 re-resume threads that GDB wanted to be running. */
1887 if (step_over_finished)
1888 unstop_all_lwps (event_child);
1889 }
1890
1891 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1892
1893 /* Do this before the gdb_wants_all_stopped calls below, since they
1894 always set last_resume_kind to resume_stop. */
1895 if (current_inferior->last_resume_kind == resume_stop
1896 && WSTOPSIG (w) == SIGSTOP)
1897 {
1898 /* A thread that has been requested to stop by GDB with vCont;t,
1899 and it stopped cleanly, so report as SIG0. The use of
1900 SIGSTOP is an implementation detail. */
1901 ourstatus->value.sig = TARGET_SIGNAL_0;
1902 }
1903 else if (current_inferior->last_resume_kind == resume_stop
1904 && WSTOPSIG (w) != SIGSTOP)
1905 {
1906 /* A thread that has been requested to stop by GDB with vCont;t,
1907 but, it stopped for other reasons. */
1908 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1909 }
1910 else
1911 {
1912 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1913 }
1914
1915 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1916
1917 if (!non_stop)
1918 {
1919 /* From GDB's perspective, all-stop mode always stops all
1920 threads implicitly. Tag all threads as "want-stopped". */
1921 gdb_wants_all_stopped ();
1922 }
1923 else
1924 {
1925 /* We're reporting this LWP as stopped. Update it's
1926 "want-stopped" state to what the client wants, until it gets
1927 a new resume action. */
1928 gdb_wants_lwp_stopped (&event_child->head);
1929 }
1930
1931 if (debug_threads)
1932 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1933 target_pid_to_str (ptid_of (event_child)),
1934 ourstatus->kind,
1935 ourstatus->value.sig);
1936
1937 get_lwp_thread (event_child)->last_status = *ourstatus;
1938 return ptid_of (event_child);
1939 }
1940
1941 /* Get rid of any pending event in the pipe. */
1942 static void
1943 async_file_flush (void)
1944 {
1945 int ret;
1946 char buf;
1947
1948 do
1949 ret = read (linux_event_pipe[0], &buf, 1);
1950 while (ret >= 0 || (ret == -1 && errno == EINTR));
1951 }
1952
1953 /* Put something in the pipe, so the event loop wakes up. */
1954 static void
1955 async_file_mark (void)
1956 {
1957 int ret;
1958
1959 async_file_flush ();
1960
1961 do
1962 ret = write (linux_event_pipe[1], "+", 1);
1963 while (ret == 0 || (ret == -1 && errno == EINTR));
1964
1965 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1966 be awakened anyway. */
1967 }
1968
1969 static ptid_t
1970 linux_wait (ptid_t ptid,
1971 struct target_waitstatus *ourstatus, int target_options)
1972 {
1973 ptid_t event_ptid;
1974
1975 if (debug_threads)
1976 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1977
1978 /* Flush the async file first. */
1979 if (target_is_async_p ())
1980 async_file_flush ();
1981
1982 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1983
1984 /* If at least one stop was reported, there may be more. A single
1985 SIGCHLD can signal more than one child stop. */
1986 if (target_is_async_p ()
1987 && (target_options & TARGET_WNOHANG) != 0
1988 && !ptid_equal (event_ptid, null_ptid))
1989 async_file_mark ();
1990
1991 return event_ptid;
1992 }
1993
1994 /* Send a signal to an LWP. */
1995
1996 static int
1997 kill_lwp (unsigned long lwpid, int signo)
1998 {
1999 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2000 fails, then we are not using nptl threads and we should be using kill. */
2001
2002 #ifdef __NR_tkill
2003 {
2004 static int tkill_failed;
2005
2006 if (!tkill_failed)
2007 {
2008 int ret;
2009
2010 errno = 0;
2011 ret = syscall (__NR_tkill, lwpid, signo);
2012 if (errno != ENOSYS)
2013 return ret;
2014 tkill_failed = 1;
2015 }
2016 }
2017 #endif
2018
2019 return kill (lwpid, signo);
2020 }
2021
2022 static void
2023 send_sigstop (struct lwp_info *lwp)
2024 {
2025 int pid;
2026
2027 pid = lwpid_of (lwp);
2028
2029 /* If we already have a pending stop signal for this process, don't
2030 send another. */
2031 if (lwp->stop_expected)
2032 {
2033 if (debug_threads)
2034 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2035
2036 return;
2037 }
2038
2039 if (debug_threads)
2040 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2041
2042 lwp->stop_expected = 1;
2043 kill_lwp (pid, SIGSTOP);
2044 }
2045
2046 static void
2047 send_sigstop_callback (struct inferior_list_entry *entry)
2048 {
2049 struct lwp_info *lwp = (struct lwp_info *) entry;
2050
2051 if (lwp->stopped)
2052 return;
2053
2054 send_sigstop (lwp);
2055 }
2056
2057 static void
2058 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2059 {
2060 /* It's dead, really. */
2061 lwp->dead = 1;
2062
2063 /* Store the exit status for later. */
2064 lwp->status_pending_p = 1;
2065 lwp->status_pending = wstat;
2066
2067 /* Prevent trying to stop it. */
2068 lwp->stopped = 1;
2069
2070 /* No further stops are expected from a dead lwp. */
2071 lwp->stop_expected = 0;
2072 }
2073
2074 static void
2075 wait_for_sigstop (struct inferior_list_entry *entry)
2076 {
2077 struct lwp_info *lwp = (struct lwp_info *) entry;
2078 struct thread_info *saved_inferior;
2079 int wstat;
2080 ptid_t saved_tid;
2081 ptid_t ptid;
2082 int pid;
2083
2084 if (lwp->stopped)
2085 {
2086 if (debug_threads)
2087 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2088 lwpid_of (lwp));
2089 return;
2090 }
2091
2092 saved_inferior = current_inferior;
2093 if (saved_inferior != NULL)
2094 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2095 else
2096 saved_tid = null_ptid; /* avoid bogus unused warning */
2097
2098 ptid = lwp->head.id;
2099
2100 if (debug_threads)
2101 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2102
2103 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2104
2105 /* If we stopped with a non-SIGSTOP signal, save it for later
2106 and record the pending SIGSTOP. If the process exited, just
2107 return. */
2108 if (WIFSTOPPED (wstat))
2109 {
2110 if (debug_threads)
2111 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2112 lwpid_of (lwp), WSTOPSIG (wstat));
2113
2114 if (WSTOPSIG (wstat) != SIGSTOP)
2115 {
2116 if (debug_threads)
2117 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2118 lwpid_of (lwp), wstat);
2119
2120 lwp->status_pending_p = 1;
2121 lwp->status_pending = wstat;
2122 }
2123 }
2124 else
2125 {
2126 if (debug_threads)
2127 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2128
2129 lwp = find_lwp_pid (pid_to_ptid (pid));
2130 if (lwp)
2131 {
2132 /* Leave this status pending for the next time we're able to
2133 report it. In the mean time, we'll report this lwp as
2134 dead to GDB, so GDB doesn't try to read registers and
2135 memory from it. This can only happen if this was the
2136 last thread of the process; otherwise, PID is removed
2137 from the thread tables before linux_wait_for_event
2138 returns. */
2139 mark_lwp_dead (lwp, wstat);
2140 }
2141 }
2142
2143 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2144 current_inferior = saved_inferior;
2145 else
2146 {
2147 if (debug_threads)
2148 fprintf (stderr, "Previously current thread died.\n");
2149
2150 if (non_stop)
2151 {
2152 /* We can't change the current inferior behind GDB's back,
2153 otherwise, a subsequent command may apply to the wrong
2154 process. */
2155 current_inferior = NULL;
2156 }
2157 else
2158 {
2159 /* Set a valid thread as current. */
2160 set_desired_inferior (0);
2161 }
2162 }
2163 }
2164
2165 static void
2166 stop_all_lwps (void)
2167 {
2168 stopping_threads = 1;
2169 for_each_inferior (&all_lwps, send_sigstop_callback);
2170 for_each_inferior (&all_lwps, wait_for_sigstop);
2171 stopping_threads = 0;
2172 }
2173
2174 /* Resume execution of the inferior process.
2175 If STEP is nonzero, single-step it.
2176 If SIGNAL is nonzero, give it that signal. */
2177
2178 static void
2179 linux_resume_one_lwp (struct lwp_info *lwp,
2180 int step, int signal, siginfo_t *info)
2181 {
2182 struct thread_info *saved_inferior;
2183
2184 if (lwp->stopped == 0)
2185 return;
2186
2187 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2188 user used the "jump" command, or "set $pc = foo"). */
2189 if (lwp->stop_pc != get_pc (lwp))
2190 {
2191 /* Collecting 'while-stepping' actions doesn't make sense
2192 anymore. */
2193 release_while_stepping_state_list (get_lwp_thread (lwp));
2194 }
2195
2196 /* If we have pending signals or status, and a new signal, enqueue the
2197 signal. Also enqueue the signal if we are waiting to reinsert a
2198 breakpoint; it will be picked up again below. */
2199 if (signal != 0
2200 && (lwp->status_pending_p || lwp->pending_signals != NULL
2201 || lwp->bp_reinsert != 0))
2202 {
2203 struct pending_signals *p_sig;
2204 p_sig = xmalloc (sizeof (*p_sig));
2205 p_sig->prev = lwp->pending_signals;
2206 p_sig->signal = signal;
2207 if (info == NULL)
2208 memset (&p_sig->info, 0, sizeof (siginfo_t));
2209 else
2210 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2211 lwp->pending_signals = p_sig;
2212 }
2213
2214 if (lwp->status_pending_p)
2215 {
2216 if (debug_threads)
2217 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2218 " has pending status\n",
2219 lwpid_of (lwp), step ? "step" : "continue", signal,
2220 lwp->stop_expected ? "expected" : "not expected");
2221 return;
2222 }
2223
2224 saved_inferior = current_inferior;
2225 current_inferior = get_lwp_thread (lwp);
2226
2227 if (debug_threads)
2228 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2229 lwpid_of (lwp), step ? "step" : "continue", signal,
2230 lwp->stop_expected ? "expected" : "not expected");
2231
2232 /* This bit needs some thinking about. If we get a signal that
2233 we must report while a single-step reinsert is still pending,
2234 we often end up resuming the thread. It might be better to
2235 (ew) allow a stack of pending events; then we could be sure that
2236 the reinsert happened right away and not lose any signals.
2237
2238 Making this stack would also shrink the window in which breakpoints are
2239 uninserted (see comment in linux_wait_for_lwp) but not enough for
2240 complete correctness, so it won't solve that problem. It may be
2241 worthwhile just to solve this one, however. */
2242 if (lwp->bp_reinsert != 0)
2243 {
2244 if (debug_threads)
2245 fprintf (stderr, " pending reinsert at 0x%s\n",
2246 paddress (lwp->bp_reinsert));
2247
2248 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2249 {
2250 if (step == 0)
2251 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2252
2253 step = 1;
2254 }
2255
2256 /* Postpone any pending signal. It was enqueued above. */
2257 signal = 0;
2258 }
2259
2260 /* If we have while-stepping actions in this thread set it stepping.
2261 If we have a signal to deliver, it may or may not be set to
2262 SIG_IGN, we don't know. Assume so, and allow collecting
2263 while-stepping into a signal handler. A possible smart thing to
2264 do would be to set an internal breakpoint at the signal return
2265 address, continue, and carry on catching this while-stepping
2266 action only when that breakpoint is hit. A future
2267 enhancement. */
2268 if (get_lwp_thread (lwp)->while_stepping != NULL
2269 && can_hardware_single_step ())
2270 {
2271 if (debug_threads)
2272 fprintf (stderr,
2273 "lwp %ld has a while-stepping action -> forcing step.\n",
2274 lwpid_of (lwp));
2275 step = 1;
2276 }
2277
2278 if (debug_threads && the_low_target.get_pc != NULL)
2279 {
2280 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2281 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2282 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2283 }
2284
2285 /* If we have pending signals, consume one unless we are trying to reinsert
2286 a breakpoint. */
2287 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2288 {
2289 struct pending_signals **p_sig;
2290
2291 p_sig = &lwp->pending_signals;
2292 while ((*p_sig)->prev != NULL)
2293 p_sig = &(*p_sig)->prev;
2294
2295 signal = (*p_sig)->signal;
2296 if ((*p_sig)->info.si_signo != 0)
2297 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2298
2299 free (*p_sig);
2300 *p_sig = NULL;
2301 }
2302
2303 if (the_low_target.prepare_to_resume != NULL)
2304 the_low_target.prepare_to_resume (lwp);
2305
2306 regcache_invalidate_one ((struct inferior_list_entry *)
2307 get_lwp_thread (lwp));
2308 errno = 0;
2309 lwp->stopped = 0;
2310 lwp->stopped_by_watchpoint = 0;
2311 lwp->stepping = step;
2312 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2313 /* Coerce to a uintptr_t first to avoid potential gcc warning
2314 of coercing an 8 byte integer to a 4 byte pointer. */
2315 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2316
2317 current_inferior = saved_inferior;
2318 if (errno)
2319 {
2320 /* ESRCH from ptrace either means that the thread was already
2321 running (an error) or that it is gone (a race condition). If
2322 it's gone, we will get a notification the next time we wait,
2323 so we can ignore the error. We could differentiate these
2324 two, but it's tricky without waiting; the thread still exists
2325 as a zombie, so sending it signal 0 would succeed. So just
2326 ignore ESRCH. */
2327 if (errno == ESRCH)
2328 return;
2329
2330 perror_with_name ("ptrace");
2331 }
2332 }
2333
2334 struct thread_resume_array
2335 {
2336 struct thread_resume *resume;
2337 size_t n;
2338 };
2339
2340 /* This function is called once per thread. We look up the thread
2341 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2342 resume request.
2343
2344 This algorithm is O(threads * resume elements), but resume elements
2345 is small (and will remain small at least until GDB supports thread
2346 suspension). */
2347 static int
2348 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2349 {
2350 struct lwp_info *lwp;
2351 struct thread_info *thread;
2352 int ndx;
2353 struct thread_resume_array *r;
2354
2355 thread = (struct thread_info *) entry;
2356 lwp = get_thread_lwp (thread);
2357 r = arg;
2358
2359 for (ndx = 0; ndx < r->n; ndx++)
2360 {
2361 ptid_t ptid = r->resume[ndx].thread;
2362 if (ptid_equal (ptid, minus_one_ptid)
2363 || ptid_equal (ptid, entry->id)
2364 || (ptid_is_pid (ptid)
2365 && (ptid_get_pid (ptid) == pid_of (lwp)))
2366 || (ptid_get_lwp (ptid) == -1
2367 && (ptid_get_pid (ptid) == pid_of (lwp))))
2368 {
2369 if (r->resume[ndx].kind == resume_stop
2370 && thread->last_resume_kind == resume_stop)
2371 {
2372 if (debug_threads)
2373 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2374 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2375 ? "stopped"
2376 : "stopping",
2377 lwpid_of (lwp));
2378
2379 continue;
2380 }
2381
2382 lwp->resume = &r->resume[ndx];
2383 thread->last_resume_kind = lwp->resume->kind;
2384 return 0;
2385 }
2386 }
2387
2388 /* No resume action for this thread. */
2389 lwp->resume = NULL;
2390
2391 return 0;
2392 }
2393
2394
2395 /* Set *FLAG_P if this lwp has an interesting status pending. */
2396 static int
2397 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2398 {
2399 struct lwp_info *lwp = (struct lwp_info *) entry;
2400
2401 /* LWPs which will not be resumed are not interesting, because
2402 we might not wait for them next time through linux_wait. */
2403 if (lwp->resume == NULL)
2404 return 0;
2405
2406 if (lwp->status_pending_p)
2407 * (int *) flag_p = 1;
2408
2409 return 0;
2410 }
2411
2412 /* Return 1 if this lwp that GDB wants running is stopped at an
2413 internal breakpoint that we need to step over. It assumes that any
2414 required STOP_PC adjustment has already been propagated to the
2415 inferior's regcache. */
2416
2417 static int
2418 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2419 {
2420 struct lwp_info *lwp = (struct lwp_info *) entry;
2421 struct thread_info *thread;
2422 struct thread_info *saved_inferior;
2423 CORE_ADDR pc;
2424
2425 /* LWPs which will not be resumed are not interesting, because we
2426 might not wait for them next time through linux_wait. */
2427
2428 if (!lwp->stopped)
2429 {
2430 if (debug_threads)
2431 fprintf (stderr,
2432 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2433 lwpid_of (lwp));
2434 return 0;
2435 }
2436
2437 thread = get_lwp_thread (lwp);
2438
2439 if (thread->last_resume_kind == resume_stop)
2440 {
2441 if (debug_threads)
2442 fprintf (stderr,
2443 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2444 lwpid_of (lwp));
2445 return 0;
2446 }
2447
2448 if (!lwp->need_step_over)
2449 {
2450 if (debug_threads)
2451 fprintf (stderr,
2452 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2453 }
2454
2455 if (lwp->status_pending_p)
2456 {
2457 if (debug_threads)
2458 fprintf (stderr,
2459 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2460 lwpid_of (lwp));
2461 return 0;
2462 }
2463
2464 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2465 or we have. */
2466 pc = get_pc (lwp);
2467
2468 /* If the PC has changed since we stopped, then don't do anything,
2469 and let the breakpoint/tracepoint be hit. This happens if, for
2470 instance, GDB handled the decr_pc_after_break subtraction itself,
2471 GDB is OOL stepping this thread, or the user has issued a "jump"
2472 command, or poked thread's registers herself. */
2473 if (pc != lwp->stop_pc)
2474 {
2475 if (debug_threads)
2476 fprintf (stderr,
2477 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2478 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2479 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2480
2481 lwp->need_step_over = 0;
2482 return 0;
2483 }
2484
2485 saved_inferior = current_inferior;
2486 current_inferior = thread;
2487
2488 /* We can only step over breakpoints we know about. */
2489 if (breakpoint_here (pc))
2490 {
2491 /* Don't step over a breakpoint that GDB expects to hit
2492 though. */
2493 if (gdb_breakpoint_here (pc))
2494 {
2495 if (debug_threads)
2496 fprintf (stderr,
2497 "Need step over [LWP %ld]? yes, but found"
2498 " GDB breakpoint at 0x%s; skipping step over\n",
2499 lwpid_of (lwp), paddress (pc));
2500
2501 current_inferior = saved_inferior;
2502 return 0;
2503 }
2504 else
2505 {
2506 if (debug_threads)
2507 fprintf (stderr,
2508 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2509 lwpid_of (lwp), paddress (pc));
2510
2511 /* We've found an lwp that needs stepping over --- return 1 so
2512 that find_inferior stops looking. */
2513 current_inferior = saved_inferior;
2514
2515 /* If the step over is cancelled, this is set again. */
2516 lwp->need_step_over = 0;
2517 return 1;
2518 }
2519 }
2520
2521 current_inferior = saved_inferior;
2522
2523 if (debug_threads)
2524 fprintf (stderr,
2525 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2526 lwpid_of (lwp), paddress (pc));
2527
2528 return 0;
2529 }
2530
2531 /* Start a step-over operation on LWP. When LWP stopped at a
2532 breakpoint, to make progress, we need to remove the breakpoint out
2533 of the way. If we let other threads run while we do that, they may
2534 pass by the breakpoint location and miss hitting it. To avoid
2535 that, a step-over momentarily stops all threads while LWP is
2536 single-stepped while the breakpoint is temporarily uninserted from
2537 the inferior. When the single-step finishes, we reinsert the
2538 breakpoint, and let all threads that are supposed to be running,
2539 run again.
2540
2541 On targets that don't support hardware single-step, we don't
2542 currently support full software single-stepping. Instead, we only
2543 support stepping over the thread event breakpoint, by asking the
2544 low target where to place a reinsert breakpoint. Since this
2545 routine assumes the breakpoint being stepped over is a thread event
2546 breakpoint, it usually assumes the return address of the current
2547 function is a good enough place to set the reinsert breakpoint. */
2548
2549 static int
2550 start_step_over (struct lwp_info *lwp)
2551 {
2552 struct thread_info *saved_inferior;
2553 CORE_ADDR pc;
2554 int step;
2555
2556 if (debug_threads)
2557 fprintf (stderr,
2558 "Starting step-over on LWP %ld. Stopping all threads\n",
2559 lwpid_of (lwp));
2560
2561 stop_all_lwps ();
2562
2563 if (debug_threads)
2564 fprintf (stderr, "Done stopping all threads for step-over.\n");
2565
2566 /* Note, we should always reach here with an already adjusted PC,
2567 either by GDB (if we're resuming due to GDB's request), or by our
2568 caller, if we just finished handling an internal breakpoint GDB
2569 shouldn't care about. */
2570 pc = get_pc (lwp);
2571
2572 saved_inferior = current_inferior;
2573 current_inferior = get_lwp_thread (lwp);
2574
2575 lwp->bp_reinsert = pc;
2576 uninsert_breakpoints_at (pc);
2577
2578 if (can_hardware_single_step ())
2579 {
2580 step = 1;
2581 }
2582 else
2583 {
2584 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2585 set_reinsert_breakpoint (raddr);
2586 step = 0;
2587 }
2588
2589 current_inferior = saved_inferior;
2590
2591 linux_resume_one_lwp (lwp, step, 0, NULL);
2592
2593 /* Require next event from this LWP. */
2594 step_over_bkpt = lwp->head.id;
2595 return 1;
2596 }
2597
2598 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2599 start_step_over, if still there, and delete any reinsert
2600 breakpoints we've set, on non hardware single-step targets. */
2601
2602 static int
2603 finish_step_over (struct lwp_info *lwp)
2604 {
2605 if (lwp->bp_reinsert != 0)
2606 {
2607 if (debug_threads)
2608 fprintf (stderr, "Finished step over.\n");
2609
2610 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2611 may be no breakpoint to reinsert there by now. */
2612 reinsert_breakpoints_at (lwp->bp_reinsert);
2613
2614 lwp->bp_reinsert = 0;
2615
2616 /* Delete any software-single-step reinsert breakpoints. No
2617 longer needed. We don't have to worry about other threads
2618 hitting this trap, and later not being able to explain it,
2619 because we were stepping over a breakpoint, and we hold all
2620 threads but LWP stopped while doing that. */
2621 if (!can_hardware_single_step ())
2622 delete_reinsert_breakpoints ();
2623
2624 step_over_bkpt = null_ptid;
2625 return 1;
2626 }
2627 else
2628 return 0;
2629 }
2630
2631 /* This function is called once per thread. We check the thread's resume
2632 request, which will tell us whether to resume, step, or leave the thread
2633 stopped; and what signal, if any, it should be sent.
2634
2635 For threads which we aren't explicitly told otherwise, we preserve
2636 the stepping flag; this is used for stepping over gdbserver-placed
2637 breakpoints.
2638
2639 If pending_flags was set in any thread, we queue any needed
2640 signals, since we won't actually resume. We already have a pending
2641 event to report, so we don't need to preserve any step requests;
2642 they should be re-issued if necessary. */
2643
2644 static int
2645 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2646 {
2647 struct lwp_info *lwp;
2648 struct thread_info *thread;
2649 int step;
2650 int leave_all_stopped = * (int *) arg;
2651 int leave_pending;
2652
2653 thread = (struct thread_info *) entry;
2654 lwp = get_thread_lwp (thread);
2655
2656 if (lwp->resume == NULL)
2657 return 0;
2658
2659 if (lwp->resume->kind == resume_stop)
2660 {
2661 if (debug_threads)
2662 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2663
2664 if (!lwp->stopped)
2665 {
2666 if (debug_threads)
2667 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2668
2669 /* Stop the thread, and wait for the event asynchronously,
2670 through the event loop. */
2671 send_sigstop (lwp);
2672 }
2673 else
2674 {
2675 if (debug_threads)
2676 fprintf (stderr, "already stopped LWP %ld\n",
2677 lwpid_of (lwp));
2678
2679 /* The LWP may have been stopped in an internal event that
2680 was not meant to be notified back to GDB (e.g., gdbserver
2681 breakpoint), so we should be reporting a stop event in
2682 this case too. */
2683
2684 /* If the thread already has a pending SIGSTOP, this is a
2685 no-op. Otherwise, something later will presumably resume
2686 the thread and this will cause it to cancel any pending
2687 operation, due to last_resume_kind == resume_stop. If
2688 the thread already has a pending status to report, we
2689 will still report it the next time we wait - see
2690 status_pending_p_callback. */
2691 send_sigstop (lwp);
2692 }
2693
2694 /* For stop requests, we're done. */
2695 lwp->resume = NULL;
2696 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2697 return 0;
2698 }
2699
2700 /* If this thread which is about to be resumed has a pending status,
2701 then don't resume any threads - we can just report the pending
2702 status. Make sure to queue any signals that would otherwise be
2703 sent. In all-stop mode, we do this decision based on if *any*
2704 thread has a pending status. If there's a thread that needs the
2705 step-over-breakpoint dance, then don't resume any other thread
2706 but that particular one. */
2707 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2708
2709 if (!leave_pending)
2710 {
2711 if (debug_threads)
2712 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2713
2714 step = (lwp->resume->kind == resume_step);
2715 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2716 }
2717 else
2718 {
2719 if (debug_threads)
2720 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2721
2722 /* If we have a new signal, enqueue the signal. */
2723 if (lwp->resume->sig != 0)
2724 {
2725 struct pending_signals *p_sig;
2726 p_sig = xmalloc (sizeof (*p_sig));
2727 p_sig->prev = lwp->pending_signals;
2728 p_sig->signal = lwp->resume->sig;
2729 memset (&p_sig->info, 0, sizeof (siginfo_t));
2730
2731 /* If this is the same signal we were previously stopped by,
2732 make sure to queue its siginfo. We can ignore the return
2733 value of ptrace; if it fails, we'll skip
2734 PTRACE_SETSIGINFO. */
2735 if (WIFSTOPPED (lwp->last_status)
2736 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2737 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2738
2739 lwp->pending_signals = p_sig;
2740 }
2741 }
2742
2743 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2744 lwp->resume = NULL;
2745 return 0;
2746 }
2747
2748 static void
2749 linux_resume (struct thread_resume *resume_info, size_t n)
2750 {
2751 struct thread_resume_array array = { resume_info, n };
2752 struct lwp_info *need_step_over = NULL;
2753 int any_pending;
2754 int leave_all_stopped;
2755
2756 find_inferior (&all_threads, linux_set_resume_request, &array);
2757
2758 /* If there is a thread which would otherwise be resumed, which has
2759 a pending status, then don't resume any threads - we can just
2760 report the pending status. Make sure to queue any signals that
2761 would otherwise be sent. In non-stop mode, we'll apply this
2762 logic to each thread individually. We consume all pending events
2763 before considering to start a step-over (in all-stop). */
2764 any_pending = 0;
2765 if (!non_stop)
2766 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2767
2768 /* If there is a thread which would otherwise be resumed, which is
2769 stopped at a breakpoint that needs stepping over, then don't
2770 resume any threads - have it step over the breakpoint with all
2771 other threads stopped, then resume all threads again. Make sure
2772 to queue any signals that would otherwise be delivered or
2773 queued. */
2774 if (!any_pending && supports_breakpoints ())
2775 need_step_over
2776 = (struct lwp_info *) find_inferior (&all_lwps,
2777 need_step_over_p, NULL);
2778
2779 leave_all_stopped = (need_step_over != NULL || any_pending);
2780
2781 if (debug_threads)
2782 {
2783 if (need_step_over != NULL)
2784 fprintf (stderr, "Not resuming all, need step over\n");
2785 else if (any_pending)
2786 fprintf (stderr,
2787 "Not resuming, all-stop and found "
2788 "an LWP with pending status\n");
2789 else
2790 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2791 }
2792
2793 /* Even if we're leaving threads stopped, queue all signals we'd
2794 otherwise deliver. */
2795 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2796
2797 if (need_step_over)
2798 start_step_over (need_step_over);
2799 }
2800
2801 /* This function is called once per thread. We check the thread's
2802 last resume request, which will tell us whether to resume, step, or
2803 leave the thread stopped. Any signal the client requested to be
2804 delivered has already been enqueued at this point.
2805
2806 If any thread that GDB wants running is stopped at an internal
2807 breakpoint that needs stepping over, we start a step-over operation
2808 on that particular thread, and leave all others stopped. */
2809
2810 static void
2811 proceed_one_lwp (struct inferior_list_entry *entry)
2812 {
2813 struct lwp_info *lwp;
2814 struct thread_info *thread;
2815 int step;
2816
2817 lwp = (struct lwp_info *) entry;
2818
2819 if (debug_threads)
2820 fprintf (stderr,
2821 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2822
2823 if (!lwp->stopped)
2824 {
2825 if (debug_threads)
2826 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2827 return;
2828 }
2829
2830 thread = get_lwp_thread (lwp);
2831
2832 if (thread->last_resume_kind == resume_stop
2833 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
2834 {
2835 if (debug_threads)
2836 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
2837 lwpid_of (lwp));
2838 return;
2839 }
2840
2841 if (lwp->status_pending_p)
2842 {
2843 if (debug_threads)
2844 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2845 lwpid_of (lwp));
2846 return;
2847 }
2848
2849 if (lwp->suspended)
2850 {
2851 if (debug_threads)
2852 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2853 return;
2854 }
2855
2856 if (thread->last_resume_kind == resume_stop)
2857 {
2858 /* We haven't reported this LWP as stopped yet (otherwise, the
2859 last_status.kind check above would catch it, and we wouldn't
2860 reach here. This LWP may have been momentarily paused by a
2861 stop_all_lwps call while handling for example, another LWP's
2862 step-over. In that case, the pending expected SIGSTOP signal
2863 that was queued at vCont;t handling time will have already
2864 been consumed by wait_for_sigstop, and so we need to requeue
2865 another one here. Note that if the LWP already has a SIGSTOP
2866 pending, this is a no-op. */
2867
2868 if (debug_threads)
2869 fprintf (stderr,
2870 "Client wants LWP %ld to stop. "
2871 "Making sure it has a SIGSTOP pending\n",
2872 lwpid_of (lwp));
2873
2874 send_sigstop (lwp);
2875 }
2876
2877 step = thread->last_resume_kind == resume_step;
2878 linux_resume_one_lwp (lwp, step, 0, NULL);
2879 }
2880
2881 /* When we finish a step-over, set threads running again. If there's
2882 another thread that may need a step-over, now's the time to start
2883 it. Eventually, we'll move all threads past their breakpoints. */
2884
2885 static void
2886 proceed_all_lwps (void)
2887 {
2888 struct lwp_info *need_step_over;
2889
2890 /* If there is a thread which would otherwise be resumed, which is
2891 stopped at a breakpoint that needs stepping over, then don't
2892 resume any threads - have it step over the breakpoint with all
2893 other threads stopped, then resume all threads again. */
2894
2895 if (supports_breakpoints ())
2896 {
2897 need_step_over
2898 = (struct lwp_info *) find_inferior (&all_lwps,
2899 need_step_over_p, NULL);
2900
2901 if (need_step_over != NULL)
2902 {
2903 if (debug_threads)
2904 fprintf (stderr, "proceed_all_lwps: found "
2905 "thread %ld needing a step-over\n",
2906 lwpid_of (need_step_over));
2907
2908 start_step_over (need_step_over);
2909 return;
2910 }
2911 }
2912
2913 if (debug_threads)
2914 fprintf (stderr, "Proceeding, no step-over needed\n");
2915
2916 for_each_inferior (&all_lwps, proceed_one_lwp);
2917 }
2918
2919 /* Stopped LWPs that the client wanted to be running, that don't have
2920 pending statuses, are set to run again, except for EXCEPT, if not
2921 NULL. This undoes a stop_all_lwps call. */
2922
2923 static void
2924 unstop_all_lwps (struct lwp_info *except)
2925 {
2926 if (debug_threads)
2927 {
2928 if (except)
2929 fprintf (stderr,
2930 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
2931 else
2932 fprintf (stderr,
2933 "unstopping all lwps\n");
2934 }
2935
2936 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2937 if (except != NULL)
2938 ++except->suspended;
2939
2940 for_each_inferior (&all_lwps, proceed_one_lwp);
2941
2942 if (except != NULL)
2943 --except->suspended;
2944 }
2945
2946 #ifdef HAVE_LINUX_USRREGS
2947
2948 int
2949 register_addr (int regnum)
2950 {
2951 int addr;
2952
2953 if (regnum < 0 || regnum >= the_low_target.num_regs)
2954 error ("Invalid register number %d.", regnum);
2955
2956 addr = the_low_target.regmap[regnum];
2957
2958 return addr;
2959 }
2960
2961 /* Fetch one register. */
2962 static void
2963 fetch_register (struct regcache *regcache, int regno)
2964 {
2965 CORE_ADDR regaddr;
2966 int i, size;
2967 char *buf;
2968 int pid;
2969
2970 if (regno >= the_low_target.num_regs)
2971 return;
2972 if ((*the_low_target.cannot_fetch_register) (regno))
2973 return;
2974
2975 regaddr = register_addr (regno);
2976 if (regaddr == -1)
2977 return;
2978
2979 pid = lwpid_of (get_thread_lwp (current_inferior));
2980 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2981 & - sizeof (PTRACE_XFER_TYPE));
2982 buf = alloca (size);
2983 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2984 {
2985 errno = 0;
2986 *(PTRACE_XFER_TYPE *) (buf + i) =
2987 ptrace (PTRACE_PEEKUSER, pid,
2988 /* Coerce to a uintptr_t first to avoid potential gcc warning
2989 of coercing an 8 byte integer to a 4 byte pointer. */
2990 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2991 regaddr += sizeof (PTRACE_XFER_TYPE);
2992 if (errno != 0)
2993 error ("reading register %d: %s", regno, strerror (errno));
2994 }
2995
2996 if (the_low_target.supply_ptrace_register)
2997 the_low_target.supply_ptrace_register (regcache, regno, buf);
2998 else
2999 supply_register (regcache, regno, buf);
3000 }
3001
3002 /* Fetch all registers, or just one, from the child process. */
3003 static void
3004 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3005 {
3006 if (regno == -1)
3007 for (regno = 0; regno < the_low_target.num_regs; regno++)
3008 fetch_register (regcache, regno);
3009 else
3010 fetch_register (regcache, regno);
3011 }
3012
3013 /* Store our register values back into the inferior.
3014 If REGNO is -1, do this for all registers.
3015 Otherwise, REGNO specifies which register (so we can save time). */
3016 static void
3017 usr_store_inferior_registers (struct regcache *regcache, int regno)
3018 {
3019 CORE_ADDR regaddr;
3020 int i, size;
3021 char *buf;
3022 int pid;
3023
3024 if (regno >= 0)
3025 {
3026 if (regno >= the_low_target.num_regs)
3027 return;
3028
3029 if ((*the_low_target.cannot_store_register) (regno) == 1)
3030 return;
3031
3032 regaddr = register_addr (regno);
3033 if (regaddr == -1)
3034 return;
3035 errno = 0;
3036 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3037 & - sizeof (PTRACE_XFER_TYPE);
3038 buf = alloca (size);
3039 memset (buf, 0, size);
3040
3041 if (the_low_target.collect_ptrace_register)
3042 the_low_target.collect_ptrace_register (regcache, regno, buf);
3043 else
3044 collect_register (regcache, regno, buf);
3045
3046 pid = lwpid_of (get_thread_lwp (current_inferior));
3047 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3048 {
3049 errno = 0;
3050 ptrace (PTRACE_POKEUSER, pid,
3051 /* Coerce to a uintptr_t first to avoid potential gcc warning
3052 about coercing an 8 byte integer to a 4 byte pointer. */
3053 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3054 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3055 if (errno != 0)
3056 {
3057 /* At this point, ESRCH should mean the process is
3058 already gone, in which case we simply ignore attempts
3059 to change its registers. See also the related
3060 comment in linux_resume_one_lwp. */
3061 if (errno == ESRCH)
3062 return;
3063
3064 if ((*the_low_target.cannot_store_register) (regno) == 0)
3065 error ("writing register %d: %s", regno, strerror (errno));
3066 }
3067 regaddr += sizeof (PTRACE_XFER_TYPE);
3068 }
3069 }
3070 else
3071 for (regno = 0; regno < the_low_target.num_regs; regno++)
3072 usr_store_inferior_registers (regcache, regno);
3073 }
3074 #endif /* HAVE_LINUX_USRREGS */
3075
3076
3077
3078 #ifdef HAVE_LINUX_REGSETS
3079
3080 static int
3081 regsets_fetch_inferior_registers (struct regcache *regcache)
3082 {
3083 struct regset_info *regset;
3084 int saw_general_regs = 0;
3085 int pid;
3086 struct iovec iov;
3087
3088 regset = target_regsets;
3089
3090 pid = lwpid_of (get_thread_lwp (current_inferior));
3091 while (regset->size >= 0)
3092 {
3093 void *buf, *data;
3094 int nt_type, res;
3095
3096 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3097 {
3098 regset ++;
3099 continue;
3100 }
3101
3102 buf = xmalloc (regset->size);
3103
3104 nt_type = regset->nt_type;
3105 if (nt_type)
3106 {
3107 iov.iov_base = buf;
3108 iov.iov_len = regset->size;
3109 data = (void *) &iov;
3110 }
3111 else
3112 data = buf;
3113
3114 #ifndef __sparc__
3115 res = ptrace (regset->get_request, pid, nt_type, data);
3116 #else
3117 res = ptrace (regset->get_request, pid, data, nt_type);
3118 #endif
3119 if (res < 0)
3120 {
3121 if (errno == EIO)
3122 {
3123 /* If we get EIO on a regset, do not try it again for
3124 this process. */
3125 disabled_regsets[regset - target_regsets] = 1;
3126 free (buf);
3127 continue;
3128 }
3129 else
3130 {
3131 char s[256];
3132 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3133 pid);
3134 perror (s);
3135 }
3136 }
3137 else if (regset->type == GENERAL_REGS)
3138 saw_general_regs = 1;
3139 regset->store_function (regcache, buf);
3140 regset ++;
3141 free (buf);
3142 }
3143 if (saw_general_regs)
3144 return 0;
3145 else
3146 return 1;
3147 }
3148
3149 static int
3150 regsets_store_inferior_registers (struct regcache *regcache)
3151 {
3152 struct regset_info *regset;
3153 int saw_general_regs = 0;
3154 int pid;
3155 struct iovec iov;
3156
3157 regset = target_regsets;
3158
3159 pid = lwpid_of (get_thread_lwp (current_inferior));
3160 while (regset->size >= 0)
3161 {
3162 void *buf, *data;
3163 int nt_type, res;
3164
3165 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3166 {
3167 regset ++;
3168 continue;
3169 }
3170
3171 buf = xmalloc (regset->size);
3172
3173 /* First fill the buffer with the current register set contents,
3174 in case there are any items in the kernel's regset that are
3175 not in gdbserver's regcache. */
3176
3177 nt_type = regset->nt_type;
3178 if (nt_type)
3179 {
3180 iov.iov_base = buf;
3181 iov.iov_len = regset->size;
3182 data = (void *) &iov;
3183 }
3184 else
3185 data = buf;
3186
3187 #ifndef __sparc__
3188 res = ptrace (regset->get_request, pid, nt_type, data);
3189 #else
3190 res = ptrace (regset->get_request, pid, &iov, data);
3191 #endif
3192
3193 if (res == 0)
3194 {
3195 /* Then overlay our cached registers on that. */
3196 regset->fill_function (regcache, buf);
3197
3198 /* Only now do we write the register set. */
3199 #ifndef __sparc__
3200 res = ptrace (regset->set_request, pid, nt_type, data);
3201 #else
3202 res = ptrace (regset->set_request, pid, data, nt_type);
3203 #endif
3204 }
3205
3206 if (res < 0)
3207 {
3208 if (errno == EIO)
3209 {
3210 /* If we get EIO on a regset, do not try it again for
3211 this process. */
3212 disabled_regsets[regset - target_regsets] = 1;
3213 free (buf);
3214 continue;
3215 }
3216 else if (errno == ESRCH)
3217 {
3218 /* At this point, ESRCH should mean the process is
3219 already gone, in which case we simply ignore attempts
3220 to change its registers. See also the related
3221 comment in linux_resume_one_lwp. */
3222 free (buf);
3223 return 0;
3224 }
3225 else
3226 {
3227 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3228 }
3229 }
3230 else if (regset->type == GENERAL_REGS)
3231 saw_general_regs = 1;
3232 regset ++;
3233 free (buf);
3234 }
3235 if (saw_general_regs)
3236 return 0;
3237 else
3238 return 1;
3239 return 0;
3240 }
3241
3242 #endif /* HAVE_LINUX_REGSETS */
3243
3244
3245 void
3246 linux_fetch_registers (struct regcache *regcache, int regno)
3247 {
3248 #ifdef HAVE_LINUX_REGSETS
3249 if (regsets_fetch_inferior_registers (regcache) == 0)
3250 return;
3251 #endif
3252 #ifdef HAVE_LINUX_USRREGS
3253 usr_fetch_inferior_registers (regcache, regno);
3254 #endif
3255 }
3256
3257 void
3258 linux_store_registers (struct regcache *regcache, int regno)
3259 {
3260 #ifdef HAVE_LINUX_REGSETS
3261 if (regsets_store_inferior_registers (regcache) == 0)
3262 return;
3263 #endif
3264 #ifdef HAVE_LINUX_USRREGS
3265 usr_store_inferior_registers (regcache, regno);
3266 #endif
3267 }
3268
3269
3270 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3271 to debugger memory starting at MYADDR. */
3272
3273 static int
3274 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3275 {
3276 register int i;
3277 /* Round starting address down to longword boundary. */
3278 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3279 /* Round ending address up; get number of longwords that makes. */
3280 register int count
3281 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3282 / sizeof (PTRACE_XFER_TYPE);
3283 /* Allocate buffer of that many longwords. */
3284 register PTRACE_XFER_TYPE *buffer
3285 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3286 int fd;
3287 char filename[64];
3288 int pid = lwpid_of (get_thread_lwp (current_inferior));
3289
3290 /* Try using /proc. Don't bother for one word. */
3291 if (len >= 3 * sizeof (long))
3292 {
3293 /* We could keep this file open and cache it - possibly one per
3294 thread. That requires some juggling, but is even faster. */
3295 sprintf (filename, "/proc/%d/mem", pid);
3296 fd = open (filename, O_RDONLY | O_LARGEFILE);
3297 if (fd == -1)
3298 goto no_proc;
3299
3300 /* If pread64 is available, use it. It's faster if the kernel
3301 supports it (only one syscall), and it's 64-bit safe even on
3302 32-bit platforms (for instance, SPARC debugging a SPARC64
3303 application). */
3304 #ifdef HAVE_PREAD64
3305 if (pread64 (fd, myaddr, len, memaddr) != len)
3306 #else
3307 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3308 #endif
3309 {
3310 close (fd);
3311 goto no_proc;
3312 }
3313
3314 close (fd);
3315 return 0;
3316 }
3317
3318 no_proc:
3319 /* Read all the longwords */
3320 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3321 {
3322 errno = 0;
3323 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3324 about coercing an 8 byte integer to a 4 byte pointer. */
3325 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3326 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3327 if (errno)
3328 return errno;
3329 }
3330
3331 /* Copy appropriate bytes out of the buffer. */
3332 memcpy (myaddr,
3333 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3334 len);
3335
3336 return 0;
3337 }
3338
3339 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3340 memory at MEMADDR. On failure (cannot write to the inferior)
3341 returns the value of errno. */
3342
3343 static int
3344 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3345 {
3346 register int i;
3347 /* Round starting address down to longword boundary. */
3348 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3349 /* Round ending address up; get number of longwords that makes. */
3350 register int count
3351 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3352 /* Allocate buffer of that many longwords. */
3353 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3354 int pid = lwpid_of (get_thread_lwp (current_inferior));
3355
3356 if (debug_threads)
3357 {
3358 /* Dump up to four bytes. */
3359 unsigned int val = * (unsigned int *) myaddr;
3360 if (len == 1)
3361 val = val & 0xff;
3362 else if (len == 2)
3363 val = val & 0xffff;
3364 else if (len == 3)
3365 val = val & 0xffffff;
3366 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3367 val, (long)memaddr);
3368 }
3369
3370 /* Fill start and end extra bytes of buffer with existing memory data. */
3371
3372 errno = 0;
3373 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3374 about coercing an 8 byte integer to a 4 byte pointer. */
3375 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3376 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3377 if (errno)
3378 return errno;
3379
3380 if (count > 1)
3381 {
3382 errno = 0;
3383 buffer[count - 1]
3384 = ptrace (PTRACE_PEEKTEXT, pid,
3385 /* Coerce to a uintptr_t first to avoid potential gcc warning
3386 about coercing an 8 byte integer to a 4 byte pointer. */
3387 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3388 * sizeof (PTRACE_XFER_TYPE)),
3389 0);
3390 if (errno)
3391 return errno;
3392 }
3393
3394 /* Copy data to be written over corresponding part of buffer. */
3395
3396 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3397
3398 /* Write the entire buffer. */
3399
3400 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3401 {
3402 errno = 0;
3403 ptrace (PTRACE_POKETEXT, pid,
3404 /* Coerce to a uintptr_t first to avoid potential gcc warning
3405 about coercing an 8 byte integer to a 4 byte pointer. */
3406 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3407 (PTRACE_ARG4_TYPE) buffer[i]);
3408 if (errno)
3409 return errno;
3410 }
3411
3412 return 0;
3413 }
3414
3415 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3416 static int linux_supports_tracefork_flag;
3417
3418 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3419
3420 static int
3421 linux_tracefork_grandchild (void *arg)
3422 {
3423 _exit (0);
3424 }
3425
3426 #define STACK_SIZE 4096
3427
3428 static int
3429 linux_tracefork_child (void *arg)
3430 {
3431 ptrace (PTRACE_TRACEME, 0, 0, 0);
3432 kill (getpid (), SIGSTOP);
3433
3434 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3435
3436 if (fork () == 0)
3437 linux_tracefork_grandchild (NULL);
3438
3439 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3440
3441 #ifdef __ia64__
3442 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3443 CLONE_VM | SIGCHLD, NULL);
3444 #else
3445 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3446 CLONE_VM | SIGCHLD, NULL);
3447 #endif
3448
3449 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3450
3451 _exit (0);
3452 }
3453
3454 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3455 sure that we can enable the option, and that it had the desired
3456 effect. */
3457
3458 static void
3459 linux_test_for_tracefork (void)
3460 {
3461 int child_pid, ret, status;
3462 long second_pid;
3463 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3464 char *stack = xmalloc (STACK_SIZE * 4);
3465 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3466
3467 linux_supports_tracefork_flag = 0;
3468
3469 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3470
3471 child_pid = fork ();
3472 if (child_pid == 0)
3473 linux_tracefork_child (NULL);
3474
3475 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3476
3477 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3478 #ifdef __ia64__
3479 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3480 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3481 #else /* !__ia64__ */
3482 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3483 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3484 #endif /* !__ia64__ */
3485
3486 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3487
3488 if (child_pid == -1)
3489 perror_with_name ("clone");
3490
3491 ret = my_waitpid (child_pid, &status, 0);
3492 if (ret == -1)
3493 perror_with_name ("waitpid");
3494 else if (ret != child_pid)
3495 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3496 if (! WIFSTOPPED (status))
3497 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3498
3499 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3500 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3501 if (ret != 0)
3502 {
3503 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3504 if (ret != 0)
3505 {
3506 warning ("linux_test_for_tracefork: failed to kill child");
3507 return;
3508 }
3509
3510 ret = my_waitpid (child_pid, &status, 0);
3511 if (ret != child_pid)
3512 warning ("linux_test_for_tracefork: failed to wait for killed child");
3513 else if (!WIFSIGNALED (status))
3514 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3515 "killed child", status);
3516
3517 return;
3518 }
3519
3520 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3521 if (ret != 0)
3522 warning ("linux_test_for_tracefork: failed to resume child");
3523
3524 ret = my_waitpid (child_pid, &status, 0);
3525
3526 if (ret == child_pid && WIFSTOPPED (status)
3527 && status >> 16 == PTRACE_EVENT_FORK)
3528 {
3529 second_pid = 0;
3530 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3531 if (ret == 0 && second_pid != 0)
3532 {
3533 int second_status;
3534
3535 linux_supports_tracefork_flag = 1;
3536 my_waitpid (second_pid, &second_status, 0);
3537 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3538 if (ret != 0)
3539 warning ("linux_test_for_tracefork: failed to kill second child");
3540 my_waitpid (second_pid, &status, 0);
3541 }
3542 }
3543 else
3544 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3545 "(%d, status 0x%x)", ret, status);
3546
3547 do
3548 {
3549 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3550 if (ret != 0)
3551 warning ("linux_test_for_tracefork: failed to kill child");
3552 my_waitpid (child_pid, &status, 0);
3553 }
3554 while (WIFSTOPPED (status));
3555
3556 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3557 free (stack);
3558 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3559 }
3560
3561
3562 static void
3563 linux_look_up_symbols (void)
3564 {
3565 #ifdef USE_THREAD_DB
3566 struct process_info *proc = current_process ();
3567
3568 if (proc->private->thread_db != NULL)
3569 return;
3570
3571 /* If the kernel supports tracing forks then it also supports tracing
3572 clones, and then we don't need to use the magic thread event breakpoint
3573 to learn about threads. */
3574 thread_db_init (!linux_supports_tracefork_flag);
3575 #endif
3576 }
3577
3578 static void
3579 linux_request_interrupt (void)
3580 {
3581 extern unsigned long signal_pid;
3582
3583 if (!ptid_equal (cont_thread, null_ptid)
3584 && !ptid_equal (cont_thread, minus_one_ptid))
3585 {
3586 struct lwp_info *lwp;
3587 int lwpid;
3588
3589 lwp = get_thread_lwp (current_inferior);
3590 lwpid = lwpid_of (lwp);
3591 kill_lwp (lwpid, SIGINT);
3592 }
3593 else
3594 kill_lwp (signal_pid, SIGINT);
3595 }
3596
3597 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3598 to debugger memory starting at MYADDR. */
3599
3600 static int
3601 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3602 {
3603 char filename[PATH_MAX];
3604 int fd, n;
3605 int pid = lwpid_of (get_thread_lwp (current_inferior));
3606
3607 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3608
3609 fd = open (filename, O_RDONLY);
3610 if (fd < 0)
3611 return -1;
3612
3613 if (offset != (CORE_ADDR) 0
3614 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3615 n = -1;
3616 else
3617 n = read (fd, myaddr, len);
3618
3619 close (fd);
3620
3621 return n;
3622 }
3623
3624 /* These breakpoint and watchpoint related wrapper functions simply
3625 pass on the function call if the target has registered a
3626 corresponding function. */
3627
3628 static int
3629 linux_insert_point (char type, CORE_ADDR addr, int len)
3630 {
3631 if (the_low_target.insert_point != NULL)
3632 return the_low_target.insert_point (type, addr, len);
3633 else
3634 /* Unsupported (see target.h). */
3635 return 1;
3636 }
3637
3638 static int
3639 linux_remove_point (char type, CORE_ADDR addr, int len)
3640 {
3641 if (the_low_target.remove_point != NULL)
3642 return the_low_target.remove_point (type, addr, len);
3643 else
3644 /* Unsupported (see target.h). */
3645 return 1;
3646 }
3647
3648 static int
3649 linux_stopped_by_watchpoint (void)
3650 {
3651 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3652
3653 return lwp->stopped_by_watchpoint;
3654 }
3655
3656 static CORE_ADDR
3657 linux_stopped_data_address (void)
3658 {
3659 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3660
3661 return lwp->stopped_data_address;
3662 }
3663
3664 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3665 #if defined(__mcoldfire__)
3666 /* These should really be defined in the kernel's ptrace.h header. */
3667 #define PT_TEXT_ADDR 49*4
3668 #define PT_DATA_ADDR 50*4
3669 #define PT_TEXT_END_ADDR 51*4
3670 #endif
3671
3672 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3673 to tell gdb about. */
3674
3675 static int
3676 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3677 {
3678 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3679 unsigned long text, text_end, data;
3680 int pid = lwpid_of (get_thread_lwp (current_inferior));
3681
3682 errno = 0;
3683
3684 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3685 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3686 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3687
3688 if (errno == 0)
3689 {
3690 /* Both text and data offsets produced at compile-time (and so
3691 used by gdb) are relative to the beginning of the program,
3692 with the data segment immediately following the text segment.
3693 However, the actual runtime layout in memory may put the data
3694 somewhere else, so when we send gdb a data base-address, we
3695 use the real data base address and subtract the compile-time
3696 data base-address from it (which is just the length of the
3697 text segment). BSS immediately follows data in both
3698 cases. */
3699 *text_p = text;
3700 *data_p = data - (text_end - text);
3701
3702 return 1;
3703 }
3704 #endif
3705 return 0;
3706 }
3707 #endif
3708
3709 static int
3710 compare_ints (const void *xa, const void *xb)
3711 {
3712 int a = *(const int *)xa;
3713 int b = *(const int *)xb;
3714
3715 return a - b;
3716 }
3717
3718 static int *
3719 unique (int *b, int *e)
3720 {
3721 int *d = b;
3722 while (++b != e)
3723 if (*d != *b)
3724 *++d = *b;
3725 return ++d;
3726 }
3727
3728 /* Given PID, iterates over all threads in that process.
3729
3730 Information about each thread, in a format suitable for qXfer:osdata:thread
3731 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3732 initialized, and the caller is responsible for finishing and appending '\0'
3733 to it.
3734
3735 The list of cores that threads are running on is assigned to *CORES, if it
3736 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3737 should free *CORES. */
3738
3739 static void
3740 list_threads (int pid, struct buffer *buffer, char **cores)
3741 {
3742 int count = 0;
3743 int allocated = 10;
3744 int *core_numbers = xmalloc (sizeof (int) * allocated);
3745 char pathname[128];
3746 DIR *dir;
3747 struct dirent *dp;
3748 struct stat statbuf;
3749
3750 sprintf (pathname, "/proc/%d/task", pid);
3751 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3752 {
3753 dir = opendir (pathname);
3754 if (!dir)
3755 {
3756 free (core_numbers);
3757 return;
3758 }
3759
3760 while ((dp = readdir (dir)) != NULL)
3761 {
3762 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3763
3764 if (lwp != 0)
3765 {
3766 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3767
3768 if (core != -1)
3769 {
3770 char s[sizeof ("4294967295")];
3771 sprintf (s, "%u", core);
3772
3773 if (count == allocated)
3774 {
3775 allocated *= 2;
3776 core_numbers = realloc (core_numbers,
3777 sizeof (int) * allocated);
3778 }
3779 core_numbers[count++] = core;
3780 if (buffer)
3781 buffer_xml_printf (buffer,
3782 "<item>"
3783 "<column name=\"pid\">%d</column>"
3784 "<column name=\"tid\">%s</column>"
3785 "<column name=\"core\">%s</column>"
3786 "</item>", pid, dp->d_name, s);
3787 }
3788 else
3789 {
3790 if (buffer)
3791 buffer_xml_printf (buffer,
3792 "<item>"
3793 "<column name=\"pid\">%d</column>"
3794 "<column name=\"tid\">%s</column>"
3795 "</item>", pid, dp->d_name);
3796 }
3797 }
3798 }
3799 }
3800
3801 if (cores)
3802 {
3803 *cores = NULL;
3804 if (count > 0)
3805 {
3806 struct buffer buffer2;
3807 int *b;
3808 int *e;
3809 qsort (core_numbers, count, sizeof (int), compare_ints);
3810
3811 /* Remove duplicates. */
3812 b = core_numbers;
3813 e = unique (b, core_numbers + count);
3814
3815 buffer_init (&buffer2);
3816
3817 for (b = core_numbers; b != e; ++b)
3818 {
3819 char number[sizeof ("4294967295")];
3820 sprintf (number, "%u", *b);
3821 buffer_xml_printf (&buffer2, "%s%s",
3822 (b == core_numbers) ? "" : ",", number);
3823 }
3824 buffer_grow_str0 (&buffer2, "");
3825
3826 *cores = buffer_finish (&buffer2);
3827 }
3828 }
3829 free (core_numbers);
3830 }
3831
3832 static void
3833 show_process (int pid, const char *username, struct buffer *buffer)
3834 {
3835 char pathname[128];
3836 FILE *f;
3837 char cmd[MAXPATHLEN + 1];
3838
3839 sprintf (pathname, "/proc/%d/cmdline", pid);
3840
3841 if ((f = fopen (pathname, "r")) != NULL)
3842 {
3843 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3844 if (len > 0)
3845 {
3846 char *cores = 0;
3847 int i;
3848 for (i = 0; i < len; i++)
3849 if (cmd[i] == '\0')
3850 cmd[i] = ' ';
3851 cmd[len] = '\0';
3852
3853 buffer_xml_printf (buffer,
3854 "<item>"
3855 "<column name=\"pid\">%d</column>"
3856 "<column name=\"user\">%s</column>"
3857 "<column name=\"command\">%s</column>",
3858 pid,
3859 username,
3860 cmd);
3861
3862 /* This only collects core numbers, and does not print threads. */
3863 list_threads (pid, NULL, &cores);
3864
3865 if (cores)
3866 {
3867 buffer_xml_printf (buffer,
3868 "<column name=\"cores\">%s</column>", cores);
3869 free (cores);
3870 }
3871
3872 buffer_xml_printf (buffer, "</item>");
3873 }
3874 fclose (f);
3875 }
3876 }
3877
3878 static int
3879 linux_qxfer_osdata (const char *annex,
3880 unsigned char *readbuf, unsigned const char *writebuf,
3881 CORE_ADDR offset, int len)
3882 {
3883 /* We make the process list snapshot when the object starts to be
3884 read. */
3885 static const char *buf;
3886 static long len_avail = -1;
3887 static struct buffer buffer;
3888 int processes = 0;
3889 int threads = 0;
3890
3891 DIR *dirp;
3892
3893 if (strcmp (annex, "processes") == 0)
3894 processes = 1;
3895 else if (strcmp (annex, "threads") == 0)
3896 threads = 1;
3897 else
3898 return 0;
3899
3900 if (!readbuf || writebuf)
3901 return 0;
3902
3903 if (offset == 0)
3904 {
3905 if (len_avail != -1 && len_avail != 0)
3906 buffer_free (&buffer);
3907 len_avail = 0;
3908 buf = NULL;
3909 buffer_init (&buffer);
3910 if (processes)
3911 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3912 else if (threads)
3913 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3914
3915 dirp = opendir ("/proc");
3916 if (dirp)
3917 {
3918 struct dirent *dp;
3919 while ((dp = readdir (dirp)) != NULL)
3920 {
3921 struct stat statbuf;
3922 char procentry[sizeof ("/proc/4294967295")];
3923
3924 if (!isdigit (dp->d_name[0])
3925 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3926 continue;
3927
3928 sprintf (procentry, "/proc/%s", dp->d_name);
3929 if (stat (procentry, &statbuf) == 0
3930 && S_ISDIR (statbuf.st_mode))
3931 {
3932 int pid = (int) strtoul (dp->d_name, NULL, 10);
3933
3934 if (processes)
3935 {
3936 struct passwd *entry = getpwuid (statbuf.st_uid);
3937 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3938 }
3939 else if (threads)
3940 {
3941 list_threads (pid, &buffer, NULL);
3942 }
3943 }
3944 }
3945
3946 closedir (dirp);
3947 }
3948 buffer_grow_str0 (&buffer, "</osdata>\n");
3949 buf = buffer_finish (&buffer);
3950 len_avail = strlen (buf);
3951 }
3952
3953 if (offset >= len_avail)
3954 {
3955 /* Done. Get rid of the data. */
3956 buffer_free (&buffer);
3957 buf = NULL;
3958 len_avail = 0;
3959 return 0;
3960 }
3961
3962 if (len > len_avail - offset)
3963 len = len_avail - offset;
3964 memcpy (readbuf, buf + offset, len);
3965
3966 return len;
3967 }
3968
3969 /* Convert a native/host siginfo object, into/from the siginfo in the
3970 layout of the inferiors' architecture. */
3971
3972 static void
3973 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3974 {
3975 int done = 0;
3976
3977 if (the_low_target.siginfo_fixup != NULL)
3978 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3979
3980 /* If there was no callback, or the callback didn't do anything,
3981 then just do a straight memcpy. */
3982 if (!done)
3983 {
3984 if (direction == 1)
3985 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3986 else
3987 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3988 }
3989 }
3990
3991 static int
3992 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3993 unsigned const char *writebuf, CORE_ADDR offset, int len)
3994 {
3995 int pid;
3996 struct siginfo siginfo;
3997 char inf_siginfo[sizeof (struct siginfo)];
3998
3999 if (current_inferior == NULL)
4000 return -1;
4001
4002 pid = lwpid_of (get_thread_lwp (current_inferior));
4003
4004 if (debug_threads)
4005 fprintf (stderr, "%s siginfo for lwp %d.\n",
4006 readbuf != NULL ? "Reading" : "Writing",
4007 pid);
4008
4009 if (offset > sizeof (siginfo))
4010 return -1;
4011
4012 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4013 return -1;
4014
4015 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4016 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4017 inferior with a 64-bit GDBSERVER should look the same as debugging it
4018 with a 32-bit GDBSERVER, we need to convert it. */
4019 siginfo_fixup (&siginfo, inf_siginfo, 0);
4020
4021 if (offset + len > sizeof (siginfo))
4022 len = sizeof (siginfo) - offset;
4023
4024 if (readbuf != NULL)
4025 memcpy (readbuf, inf_siginfo + offset, len);
4026 else
4027 {
4028 memcpy (inf_siginfo + offset, writebuf, len);
4029
4030 /* Convert back to ptrace layout before flushing it out. */
4031 siginfo_fixup (&siginfo, inf_siginfo, 1);
4032
4033 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4034 return -1;
4035 }
4036
4037 return len;
4038 }
4039
4040 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4041 so we notice when children change state; as the handler for the
4042 sigsuspend in my_waitpid. */
4043
4044 static void
4045 sigchld_handler (int signo)
4046 {
4047 int old_errno = errno;
4048
4049 if (debug_threads)
4050 /* fprintf is not async-signal-safe, so call write directly. */
4051 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4052
4053 if (target_is_async_p ())
4054 async_file_mark (); /* trigger a linux_wait */
4055
4056 errno = old_errno;
4057 }
4058
4059 static int
4060 linux_supports_non_stop (void)
4061 {
4062 return 1;
4063 }
4064
4065 static int
4066 linux_async (int enable)
4067 {
4068 int previous = (linux_event_pipe[0] != -1);
4069
4070 if (debug_threads)
4071 fprintf (stderr, "linux_async (%d), previous=%d\n",
4072 enable, previous);
4073
4074 if (previous != enable)
4075 {
4076 sigset_t mask;
4077 sigemptyset (&mask);
4078 sigaddset (&mask, SIGCHLD);
4079
4080 sigprocmask (SIG_BLOCK, &mask, NULL);
4081
4082 if (enable)
4083 {
4084 if (pipe (linux_event_pipe) == -1)
4085 fatal ("creating event pipe failed.");
4086
4087 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4088 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4089
4090 /* Register the event loop handler. */
4091 add_file_handler (linux_event_pipe[0],
4092 handle_target_event, NULL);
4093
4094 /* Always trigger a linux_wait. */
4095 async_file_mark ();
4096 }
4097 else
4098 {
4099 delete_file_handler (linux_event_pipe[0]);
4100
4101 close (linux_event_pipe[0]);
4102 close (linux_event_pipe[1]);
4103 linux_event_pipe[0] = -1;
4104 linux_event_pipe[1] = -1;
4105 }
4106
4107 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4108 }
4109
4110 return previous;
4111 }
4112
4113 static int
4114 linux_start_non_stop (int nonstop)
4115 {
4116 /* Register or unregister from event-loop accordingly. */
4117 linux_async (nonstop);
4118 return 0;
4119 }
4120
4121 static int
4122 linux_supports_multi_process (void)
4123 {
4124 return 1;
4125 }
4126
4127
4128 /* Enumerate spufs IDs for process PID. */
4129 static int
4130 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4131 {
4132 int pos = 0;
4133 int written = 0;
4134 char path[128];
4135 DIR *dir;
4136 struct dirent *entry;
4137
4138 sprintf (path, "/proc/%ld/fd", pid);
4139 dir = opendir (path);
4140 if (!dir)
4141 return -1;
4142
4143 rewinddir (dir);
4144 while ((entry = readdir (dir)) != NULL)
4145 {
4146 struct stat st;
4147 struct statfs stfs;
4148 int fd;
4149
4150 fd = atoi (entry->d_name);
4151 if (!fd)
4152 continue;
4153
4154 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4155 if (stat (path, &st) != 0)
4156 continue;
4157 if (!S_ISDIR (st.st_mode))
4158 continue;
4159
4160 if (statfs (path, &stfs) != 0)
4161 continue;
4162 if (stfs.f_type != SPUFS_MAGIC)
4163 continue;
4164
4165 if (pos >= offset && pos + 4 <= offset + len)
4166 {
4167 *(unsigned int *)(buf + pos - offset) = fd;
4168 written += 4;
4169 }
4170 pos += 4;
4171 }
4172
4173 closedir (dir);
4174 return written;
4175 }
4176
4177 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4178 object type, using the /proc file system. */
4179 static int
4180 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4181 unsigned const char *writebuf,
4182 CORE_ADDR offset, int len)
4183 {
4184 long pid = lwpid_of (get_thread_lwp (current_inferior));
4185 char buf[128];
4186 int fd = 0;
4187 int ret = 0;
4188
4189 if (!writebuf && !readbuf)
4190 return -1;
4191
4192 if (!*annex)
4193 {
4194 if (!readbuf)
4195 return -1;
4196 else
4197 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4198 }
4199
4200 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4201 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4202 if (fd <= 0)
4203 return -1;
4204
4205 if (offset != 0
4206 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4207 {
4208 close (fd);
4209 return 0;
4210 }
4211
4212 if (writebuf)
4213 ret = write (fd, writebuf, (size_t) len);
4214 else
4215 ret = read (fd, readbuf, (size_t) len);
4216
4217 close (fd);
4218 return ret;
4219 }
4220
4221 static int
4222 linux_core_of_thread (ptid_t ptid)
4223 {
4224 char filename[sizeof ("/proc//task//stat")
4225 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4226 + 1];
4227 FILE *f;
4228 char *content = NULL;
4229 char *p;
4230 char *ts = 0;
4231 int content_read = 0;
4232 int i;
4233 int core;
4234
4235 sprintf (filename, "/proc/%d/task/%ld/stat",
4236 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4237 f = fopen (filename, "r");
4238 if (!f)
4239 return -1;
4240
4241 for (;;)
4242 {
4243 int n;
4244 content = realloc (content, content_read + 1024);
4245 n = fread (content + content_read, 1, 1024, f);
4246 content_read += n;
4247 if (n < 1024)
4248 {
4249 content[content_read] = '\0';
4250 break;
4251 }
4252 }
4253
4254 p = strchr (content, '(');
4255 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4256
4257 p = strtok_r (p, " ", &ts);
4258 for (i = 0; i != 36; ++i)
4259 p = strtok_r (NULL, " ", &ts);
4260
4261 if (sscanf (p, "%d", &core) == 0)
4262 core = -1;
4263
4264 free (content);
4265 fclose (f);
4266
4267 return core;
4268 }
4269
4270 static void
4271 linux_process_qsupported (const char *query)
4272 {
4273 if (the_low_target.process_qsupported != NULL)
4274 the_low_target.process_qsupported (query);
4275 }
4276
4277 static int
4278 linux_supports_tracepoints (void)
4279 {
4280 if (*the_low_target.supports_tracepoints == NULL)
4281 return 0;
4282
4283 return (*the_low_target.supports_tracepoints) ();
4284 }
4285
4286 static CORE_ADDR
4287 linux_read_pc (struct regcache *regcache)
4288 {
4289 if (the_low_target.get_pc == NULL)
4290 return 0;
4291
4292 return (*the_low_target.get_pc) (regcache);
4293 }
4294
4295 static void
4296 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4297 {
4298 gdb_assert (the_low_target.set_pc != NULL);
4299
4300 (*the_low_target.set_pc) (regcache, pc);
4301 }
4302
4303 static int
4304 linux_thread_stopped (struct thread_info *thread)
4305 {
4306 return get_thread_lwp (thread)->stopped;
4307 }
4308
4309 /* This exposes stop-all-threads functionality to other modules. */
4310
4311 static void
4312 linux_pause_all (void)
4313 {
4314 stop_all_lwps ();
4315 }
4316
4317 static struct target_ops linux_target_ops = {
4318 linux_create_inferior,
4319 linux_attach,
4320 linux_kill,
4321 linux_detach,
4322 linux_mourn,
4323 linux_join,
4324 linux_thread_alive,
4325 linux_resume,
4326 linux_wait,
4327 linux_fetch_registers,
4328 linux_store_registers,
4329 linux_read_memory,
4330 linux_write_memory,
4331 linux_look_up_symbols,
4332 linux_request_interrupt,
4333 linux_read_auxv,
4334 linux_insert_point,
4335 linux_remove_point,
4336 linux_stopped_by_watchpoint,
4337 linux_stopped_data_address,
4338 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4339 linux_read_offsets,
4340 #else
4341 NULL,
4342 #endif
4343 #ifdef USE_THREAD_DB
4344 thread_db_get_tls_address,
4345 #else
4346 NULL,
4347 #endif
4348 linux_qxfer_spu,
4349 hostio_last_error_from_errno,
4350 linux_qxfer_osdata,
4351 linux_xfer_siginfo,
4352 linux_supports_non_stop,
4353 linux_async,
4354 linux_start_non_stop,
4355 linux_supports_multi_process,
4356 #ifdef USE_THREAD_DB
4357 thread_db_handle_monitor_command,
4358 #else
4359 NULL,
4360 #endif
4361 linux_core_of_thread,
4362 linux_process_qsupported,
4363 linux_supports_tracepoints,
4364 linux_read_pc,
4365 linux_write_pc,
4366 linux_thread_stopped,
4367 linux_pause_all,
4368 NULL, /* get_tib_address (Windows OS specific). */
4369 };
4370
4371 static void
4372 linux_init_signals ()
4373 {
4374 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4375 to find what the cancel signal actually is. */
4376 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4377 signal (__SIGRTMIN+1, SIG_IGN);
4378 #endif
4379 }
4380
4381 void
4382 initialize_low (void)
4383 {
4384 struct sigaction sigchld_action;
4385 memset (&sigchld_action, 0, sizeof (sigchld_action));
4386 set_target_ops (&linux_target_ops);
4387 set_breakpoint_data (the_low_target.breakpoint,
4388 the_low_target.breakpoint_len);
4389 linux_init_signals ();
4390 linux_test_for_tracefork ();
4391 #ifdef HAVE_LINUX_REGSETS
4392 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4393 ;
4394 disabled_regsets = xmalloc (num_regsets);
4395 #endif
4396
4397 sigchld_action.sa_handler = sigchld_handler;
4398 sigemptyset (&sigchld_action.sa_mask);
4399 sigchld_action.sa_flags = SA_RESTART;
4400 sigaction (SIGCHLD, &sigchld_action, NULL);
4401 }