* linux-low.c (linux_wait_for_event_1): Move passing the signal to
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 #ifdef __UCLIBC__
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103 #define HAS_NOMMU
104 #endif
105 #endif
106
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
109
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
115
116 struct inferior_list all_lwps;
117
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122 struct inferior_list stopped_pids;
123
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads;
126
127 /* FIXME make into a target method? */
128 int using_threads = 1;
129
130 /* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
137 static int new_inferior;
138
139 static void linux_resume_one_lwp (struct lwp_info *lwp,
140 int step, int signal, siginfo_t *info);
141 static void linux_resume (struct thread_resume *resume_info, size_t n);
142 static void stop_all_lwps (void);
143 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
144 static void *add_lwp (ptid_t ptid);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147 static int linux_core_of_thread (ptid_t ptid);
148 static void proceed_all_lwps (void);
149 static void unstop_all_lwps (struct lwp_info *except);
150 static int finish_step_over (struct lwp_info *lwp);
151 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152 static int kill_lwp (unsigned long lwpid, int signo);
153 static void linux_enable_event_reporting (int pid);
154
155 /* True if the low target can hardware single-step. Such targets
156 don't need a BREAKPOINT_REINSERT_ADDR callback. */
157
158 static int
159 can_hardware_single_step (void)
160 {
161 return (the_low_target.breakpoint_reinsert_addr == NULL);
162 }
163
164 /* True if the low target supports memory breakpoints. If so, we'll
165 have a GET_PC implementation. */
166
167 static int
168 supports_breakpoints (void)
169 {
170 return (the_low_target.get_pc != NULL);
171 }
172
173 struct pending_signals
174 {
175 int signal;
176 siginfo_t info;
177 struct pending_signals *prev;
178 };
179
180 #define PTRACE_ARG3_TYPE void *
181 #define PTRACE_ARG4_TYPE void *
182 #define PTRACE_XFER_TYPE long
183
184 #ifdef HAVE_LINUX_REGSETS
185 static char *disabled_regsets;
186 static int num_regsets;
187 #endif
188
189 /* The read/write ends of the pipe registered as waitable file in the
190 event loop. */
191 static int linux_event_pipe[2] = { -1, -1 };
192
193 /* True if we're currently in async mode. */
194 #define target_is_async_p() (linux_event_pipe[0] != -1)
195
196 static void send_sigstop (struct lwp_info *lwp);
197 static void wait_for_sigstop (struct inferior_list_entry *entry);
198
199 /* Accepts an integer PID; Returns a string representing a file that
200 can be opened to get info for the child process.
201 Space for the result is malloc'd, caller must free. */
202
203 char *
204 linux_child_pid_to_exec_file (int pid)
205 {
206 char *name1, *name2;
207
208 name1 = xmalloc (MAXPATHLEN);
209 name2 = xmalloc (MAXPATHLEN);
210 memset (name2, 0, MAXPATHLEN);
211
212 sprintf (name1, "/proc/%d/exe", pid);
213 if (readlink (name1, name2, MAXPATHLEN) > 0)
214 {
215 free (name1);
216 return name2;
217 }
218 else
219 {
220 free (name2);
221 return name1;
222 }
223 }
224
225 /* Return non-zero if HEADER is a 64-bit ELF file. */
226
227 static int
228 elf_64_header_p (const Elf64_Ehdr *header)
229 {
230 return (header->e_ident[EI_MAG0] == ELFMAG0
231 && header->e_ident[EI_MAG1] == ELFMAG1
232 && header->e_ident[EI_MAG2] == ELFMAG2
233 && header->e_ident[EI_MAG3] == ELFMAG3
234 && header->e_ident[EI_CLASS] == ELFCLASS64);
235 }
236
237 /* Return non-zero if FILE is a 64-bit ELF file,
238 zero if the file is not a 64-bit ELF file,
239 and -1 if the file is not accessible or doesn't exist. */
240
241 int
242 elf_64_file_p (const char *file)
243 {
244 Elf64_Ehdr header;
245 int fd;
246
247 fd = open (file, O_RDONLY);
248 if (fd < 0)
249 return -1;
250
251 if (read (fd, &header, sizeof (header)) != sizeof (header))
252 {
253 close (fd);
254 return 0;
255 }
256 close (fd);
257
258 return elf_64_header_p (&header);
259 }
260
261 static void
262 delete_lwp (struct lwp_info *lwp)
263 {
264 remove_thread (get_lwp_thread (lwp));
265 remove_inferior (&all_lwps, &lwp->head);
266 free (lwp->arch_private);
267 free (lwp);
268 }
269
270 /* Add a process to the common process list, and set its private
271 data. */
272
273 static struct process_info *
274 linux_add_process (int pid, int attached)
275 {
276 struct process_info *proc;
277
278 /* Is this the first process? If so, then set the arch. */
279 if (all_processes.head == NULL)
280 new_inferior = 1;
281
282 proc = add_process (pid, attached);
283 proc->private = xcalloc (1, sizeof (*proc->private));
284
285 if (the_low_target.new_process != NULL)
286 proc->private->arch_private = the_low_target.new_process ();
287
288 return proc;
289 }
290
291 /* Wrapper function for waitpid which handles EINTR, and emulates
292 __WALL for systems where that is not available. */
293
294 static int
295 my_waitpid (int pid, int *status, int flags)
296 {
297 int ret, out_errno;
298
299 if (debug_threads)
300 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
301
302 if (flags & __WALL)
303 {
304 sigset_t block_mask, org_mask, wake_mask;
305 int wnohang;
306
307 wnohang = (flags & WNOHANG) != 0;
308 flags &= ~(__WALL | __WCLONE);
309 flags |= WNOHANG;
310
311 /* Block all signals while here. This avoids knowing about
312 LinuxThread's signals. */
313 sigfillset (&block_mask);
314 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
315
316 /* ... except during the sigsuspend below. */
317 sigemptyset (&wake_mask);
318
319 while (1)
320 {
321 /* Since all signals are blocked, there's no need to check
322 for EINTR here. */
323 ret = waitpid (pid, status, flags);
324 out_errno = errno;
325
326 if (ret == -1 && out_errno != ECHILD)
327 break;
328 else if (ret > 0)
329 break;
330
331 if (flags & __WCLONE)
332 {
333 /* We've tried both flavors now. If WNOHANG is set,
334 there's nothing else to do, just bail out. */
335 if (wnohang)
336 break;
337
338 if (debug_threads)
339 fprintf (stderr, "blocking\n");
340
341 /* Block waiting for signals. */
342 sigsuspend (&wake_mask);
343 }
344
345 flags ^= __WCLONE;
346 }
347
348 sigprocmask (SIG_SETMASK, &org_mask, NULL);
349 }
350 else
351 {
352 do
353 ret = waitpid (pid, status, flags);
354 while (ret == -1 && errno == EINTR);
355 out_errno = errno;
356 }
357
358 if (debug_threads)
359 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
360 pid, flags, status ? *status : -1, ret);
361
362 errno = out_errno;
363 return ret;
364 }
365
366 /* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
369
370 static void
371 handle_extended_wait (struct lwp_info *event_child, int wstat)
372 {
373 int event = wstat >> 16;
374 struct lwp_info *new_lwp;
375
376 if (event == PTRACE_EVENT_CLONE)
377 {
378 ptid_t ptid;
379 unsigned long new_pid;
380 int ret, status = W_STOPCODE (SIGSTOP);
381
382 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
383
384 /* If we haven't already seen the new PID stop, wait for it now. */
385 if (! pull_pid_from_list (&stopped_pids, new_pid))
386 {
387 /* The new child has a pending SIGSTOP. We can't affect it until it
388 hits the SIGSTOP, but we're already attached. */
389
390 ret = my_waitpid (new_pid, &status, __WALL);
391
392 if (ret == -1)
393 perror_with_name ("waiting for new child");
394 else if (ret != new_pid)
395 warning ("wait returned unexpected PID %d", ret);
396 else if (!WIFSTOPPED (status))
397 warning ("wait returned unexpected status 0x%x", status);
398 }
399
400 linux_enable_event_reporting (new_pid);
401
402 ptid = ptid_build (pid_of (event_child), new_pid, 0);
403 new_lwp = (struct lwp_info *) add_lwp (ptid);
404 add_thread (ptid, new_lwp);
405
406 /* Either we're going to immediately resume the new thread
407 or leave it stopped. linux_resume_one_lwp is a nop if it
408 thinks the thread is currently running, so set this first
409 before calling linux_resume_one_lwp. */
410 new_lwp->stopped = 1;
411
412 /* Normally we will get the pending SIGSTOP. But in some cases
413 we might get another signal delivered to the group first.
414 If we do get another signal, be sure not to lose it. */
415 if (WSTOPSIG (status) == SIGSTOP)
416 {
417 if (stopping_threads)
418 new_lwp->stop_pc = get_stop_pc (new_lwp);
419 else
420 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
421 }
422 else
423 {
424 new_lwp->stop_expected = 1;
425
426 if (stopping_threads)
427 {
428 new_lwp->stop_pc = get_stop_pc (new_lwp);
429 new_lwp->status_pending_p = 1;
430 new_lwp->status_pending = status;
431 }
432 else
433 /* Pass the signal on. This is what GDB does - except
434 shouldn't we really report it instead? */
435 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
436 }
437
438 /* Always resume the current thread. If we are stopping
439 threads, it will have a pending SIGSTOP; we may as well
440 collect it now. */
441 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
442 }
443 }
444
445 /* Return the PC as read from the regcache of LWP, without any
446 adjustment. */
447
448 static CORE_ADDR
449 get_pc (struct lwp_info *lwp)
450 {
451 struct thread_info *saved_inferior;
452 struct regcache *regcache;
453 CORE_ADDR pc;
454
455 if (the_low_target.get_pc == NULL)
456 return 0;
457
458 saved_inferior = current_inferior;
459 current_inferior = get_lwp_thread (lwp);
460
461 regcache = get_thread_regcache (current_inferior, 1);
462 pc = (*the_low_target.get_pc) (regcache);
463
464 if (debug_threads)
465 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
466
467 current_inferior = saved_inferior;
468 return pc;
469 }
470
471 /* This function should only be called if LWP got a SIGTRAP.
472 The SIGTRAP could mean several things.
473
474 On i386, where decr_pc_after_break is non-zero:
475 If we were single-stepping this process using PTRACE_SINGLESTEP,
476 we will get only the one SIGTRAP (even if the instruction we
477 stepped over was a breakpoint). The value of $eip will be the
478 next instruction.
479 If we continue the process using PTRACE_CONT, we will get a
480 SIGTRAP when we hit a breakpoint. The value of $eip will be
481 the instruction after the breakpoint (i.e. needs to be
482 decremented). If we report the SIGTRAP to GDB, we must also
483 report the undecremented PC. If we cancel the SIGTRAP, we
484 must resume at the decremented PC.
485
486 (Presumably, not yet tested) On a non-decr_pc_after_break machine
487 with hardware or kernel single-step:
488 If we single-step over a breakpoint instruction, our PC will
489 point at the following instruction. If we continue and hit a
490 breakpoint instruction, our PC will point at the breakpoint
491 instruction. */
492
493 static CORE_ADDR
494 get_stop_pc (struct lwp_info *lwp)
495 {
496 CORE_ADDR stop_pc;
497
498 if (the_low_target.get_pc == NULL)
499 return 0;
500
501 stop_pc = get_pc (lwp);
502
503 if (WSTOPSIG (lwp->last_status) == SIGTRAP
504 && !lwp->stepping
505 && !lwp->stopped_by_watchpoint
506 && lwp->last_status >> 16 == 0)
507 stop_pc -= the_low_target.decr_pc_after_break;
508
509 if (debug_threads)
510 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
511
512 return stop_pc;
513 }
514
515 static void *
516 add_lwp (ptid_t ptid)
517 {
518 struct lwp_info *lwp;
519
520 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
521 memset (lwp, 0, sizeof (*lwp));
522
523 lwp->head.id = ptid;
524
525 if (the_low_target.new_thread != NULL)
526 lwp->arch_private = the_low_target.new_thread ();
527
528 add_inferior_to_list (&all_lwps, &lwp->head);
529
530 return lwp;
531 }
532
533 /* Start an inferior process and returns its pid.
534 ALLARGS is a vector of program-name and args. */
535
536 static int
537 linux_create_inferior (char *program, char **allargs)
538 {
539 struct lwp_info *new_lwp;
540 int pid;
541 ptid_t ptid;
542
543 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
544 pid = vfork ();
545 #else
546 pid = fork ();
547 #endif
548 if (pid < 0)
549 perror_with_name ("fork");
550
551 if (pid == 0)
552 {
553 ptrace (PTRACE_TRACEME, 0, 0, 0);
554
555 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
556 signal (__SIGRTMIN + 1, SIG_DFL);
557 #endif
558
559 setpgid (0, 0);
560
561 execv (program, allargs);
562 if (errno == ENOENT)
563 execvp (program, allargs);
564
565 fprintf (stderr, "Cannot exec %s: %s.\n", program,
566 strerror (errno));
567 fflush (stderr);
568 _exit (0177);
569 }
570
571 linux_add_process (pid, 0);
572
573 ptid = ptid_build (pid, pid, 0);
574 new_lwp = add_lwp (ptid);
575 add_thread (ptid, new_lwp);
576 new_lwp->must_set_ptrace_flags = 1;
577
578 return pid;
579 }
580
581 /* Attach to an inferior process. */
582
583 static void
584 linux_attach_lwp_1 (unsigned long lwpid, int initial)
585 {
586 ptid_t ptid;
587 struct lwp_info *new_lwp;
588
589 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
590 {
591 if (!initial)
592 {
593 /* If we fail to attach to an LWP, just warn. */
594 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
595 strerror (errno), errno);
596 fflush (stderr);
597 return;
598 }
599 else
600 /* If we fail to attach to a process, report an error. */
601 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
602 strerror (errno), errno);
603 }
604
605 if (initial)
606 /* NOTE/FIXME: This lwp might have not been the tgid. */
607 ptid = ptid_build (lwpid, lwpid, 0);
608 else
609 {
610 /* Note that extracting the pid from the current inferior is
611 safe, since we're always called in the context of the same
612 process as this new thread. */
613 int pid = pid_of (get_thread_lwp (current_inferior));
614 ptid = ptid_build (pid, lwpid, 0);
615 }
616
617 new_lwp = (struct lwp_info *) add_lwp (ptid);
618 add_thread (ptid, new_lwp);
619
620 /* We need to wait for SIGSTOP before being able to make the next
621 ptrace call on this LWP. */
622 new_lwp->must_set_ptrace_flags = 1;
623
624 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
625 brings it to a halt.
626
627 There are several cases to consider here:
628
629 1) gdbserver has already attached to the process and is being notified
630 of a new thread that is being created.
631 In this case we should ignore that SIGSTOP and resume the
632 process. This is handled below by setting stop_expected = 1,
633 and the fact that add_thread sets last_resume_kind ==
634 resume_continue.
635
636 2) This is the first thread (the process thread), and we're attaching
637 to it via attach_inferior.
638 In this case we want the process thread to stop.
639 This is handled by having linux_attach set last_resume_kind ==
640 resume_stop after we return.
641 ??? If the process already has several threads we leave the other
642 threads running.
643
644 3) GDB is connecting to gdbserver and is requesting an enumeration of all
645 existing threads.
646 In this case we want the thread to stop.
647 FIXME: This case is currently not properly handled.
648 We should wait for the SIGSTOP but don't. Things work apparently
649 because enough time passes between when we ptrace (ATTACH) and when
650 gdb makes the next ptrace call on the thread.
651
652 On the other hand, if we are currently trying to stop all threads, we
653 should treat the new thread as if we had sent it a SIGSTOP. This works
654 because we are guaranteed that the add_lwp call above added us to the
655 end of the list, and so the new thread has not yet reached
656 wait_for_sigstop (but will). */
657 new_lwp->stop_expected = 1;
658 }
659
660 void
661 linux_attach_lwp (unsigned long lwpid)
662 {
663 linux_attach_lwp_1 (lwpid, 0);
664 }
665
666 int
667 linux_attach (unsigned long pid)
668 {
669 linux_attach_lwp_1 (pid, 1);
670 linux_add_process (pid, 1);
671
672 if (!non_stop)
673 {
674 struct thread_info *thread;
675
676 /* Don't ignore the initial SIGSTOP if we just attached to this
677 process. It will be collected by wait shortly. */
678 thread = find_thread_ptid (ptid_build (pid, pid, 0));
679 thread->last_resume_kind = resume_stop;
680 }
681
682 return 0;
683 }
684
685 struct counter
686 {
687 int pid;
688 int count;
689 };
690
691 static int
692 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
693 {
694 struct counter *counter = args;
695
696 if (ptid_get_pid (entry->id) == counter->pid)
697 {
698 if (++counter->count > 1)
699 return 1;
700 }
701
702 return 0;
703 }
704
705 static int
706 last_thread_of_process_p (struct thread_info *thread)
707 {
708 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
709 int pid = ptid_get_pid (ptid);
710 struct counter counter = { pid , 0 };
711
712 return (find_inferior (&all_threads,
713 second_thread_of_pid_p, &counter) == NULL);
714 }
715
716 /* Kill the inferior lwp. */
717
718 static int
719 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
720 {
721 struct thread_info *thread = (struct thread_info *) entry;
722 struct lwp_info *lwp = get_thread_lwp (thread);
723 int wstat;
724 int pid = * (int *) args;
725
726 if (ptid_get_pid (entry->id) != pid)
727 return 0;
728
729 /* We avoid killing the first thread here, because of a Linux kernel (at
730 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
731 the children get a chance to be reaped, it will remain a zombie
732 forever. */
733
734 if (lwpid_of (lwp) == pid)
735 {
736 if (debug_threads)
737 fprintf (stderr, "lkop: is last of process %s\n",
738 target_pid_to_str (entry->id));
739 return 0;
740 }
741
742 do
743 {
744 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
745
746 /* Make sure it died. The loop is most likely unnecessary. */
747 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
748 } while (pid > 0 && WIFSTOPPED (wstat));
749
750 return 0;
751 }
752
753 static int
754 linux_kill (int pid)
755 {
756 struct process_info *process;
757 struct lwp_info *lwp;
758 struct thread_info *thread;
759 int wstat;
760 int lwpid;
761
762 process = find_process_pid (pid);
763 if (process == NULL)
764 return -1;
765
766 /* If we're killing a running inferior, make sure it is stopped
767 first, as PTRACE_KILL will not work otherwise. */
768 stop_all_lwps ();
769
770 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
771
772 /* See the comment in linux_kill_one_lwp. We did not kill the first
773 thread in the list, so do so now. */
774 lwp = find_lwp_pid (pid_to_ptid (pid));
775 thread = get_lwp_thread (lwp);
776
777 if (debug_threads)
778 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp), pid);
780
781 do
782 {
783 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
784
785 /* Make sure it died. The loop is most likely unnecessary. */
786 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
787 } while (lwpid > 0 && WIFSTOPPED (wstat));
788
789 the_target->mourn (process);
790
791 /* Since we presently can only stop all lwps of all processes, we
792 need to unstop lwps of other processes. */
793 unstop_all_lwps (NULL);
794 return 0;
795 }
796
797 static int
798 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
799 {
800 struct thread_info *thread = (struct thread_info *) entry;
801 struct lwp_info *lwp = get_thread_lwp (thread);
802 int pid = * (int *) args;
803
804 if (ptid_get_pid (entry->id) != pid)
805 return 0;
806
807 /* If this process is stopped but is expecting a SIGSTOP, then make
808 sure we take care of that now. This isn't absolutely guaranteed
809 to collect the SIGSTOP, but is fairly likely to. */
810 if (lwp->stop_expected)
811 {
812 int wstat;
813 /* Clear stop_expected, so that the SIGSTOP will be reported. */
814 lwp->stop_expected = 0;
815 linux_resume_one_lwp (lwp, 0, 0, NULL);
816 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
817 }
818
819 /* Flush any pending changes to the process's registers. */
820 regcache_invalidate_one ((struct inferior_list_entry *)
821 get_lwp_thread (lwp));
822
823 /* Finally, let it resume. */
824 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
825
826 delete_lwp (lwp);
827 return 0;
828 }
829
830 static int
831 linux_detach (int pid)
832 {
833 struct process_info *process;
834
835 process = find_process_pid (pid);
836 if (process == NULL)
837 return -1;
838
839 /* Stop all threads before detaching. First, ptrace requires that
840 the thread is stopped to sucessfully detach. Second, thread_db
841 may need to uninstall thread event breakpoints from memory, which
842 only works with a stopped process anyway. */
843 stop_all_lwps ();
844
845 #ifdef USE_THREAD_DB
846 thread_db_detach (process);
847 #endif
848
849 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
850
851 the_target->mourn (process);
852
853 /* Since we presently can only stop all lwps of all processes, we
854 need to unstop lwps of other processes. */
855 unstop_all_lwps (NULL);
856 return 0;
857 }
858
859 /* Remove all LWPs that belong to process PROC from the lwp list. */
860
861 static int
862 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
863 {
864 struct lwp_info *lwp = (struct lwp_info *) entry;
865 struct process_info *process = proc;
866
867 if (pid_of (lwp) == pid_of (process))
868 delete_lwp (lwp);
869
870 return 0;
871 }
872
873 static void
874 linux_mourn (struct process_info *process)
875 {
876 struct process_info_private *priv;
877
878 #ifdef USE_THREAD_DB
879 thread_db_mourn (process);
880 #endif
881
882 find_inferior (&all_lwps, delete_lwp_callback, process);
883
884 /* Freeing all private data. */
885 priv = process->private;
886 free (priv->arch_private);
887 free (priv);
888 process->private = NULL;
889
890 remove_process (process);
891 }
892
893 static void
894 linux_join (int pid)
895 {
896 int status, ret;
897 struct process_info *process;
898
899 process = find_process_pid (pid);
900 if (process == NULL)
901 return;
902
903 do {
904 ret = my_waitpid (pid, &status, 0);
905 if (WIFEXITED (status) || WIFSIGNALED (status))
906 break;
907 } while (ret != -1 || errno != ECHILD);
908 }
909
910 /* Return nonzero if the given thread is still alive. */
911 static int
912 linux_thread_alive (ptid_t ptid)
913 {
914 struct lwp_info *lwp = find_lwp_pid (ptid);
915
916 /* We assume we always know if a thread exits. If a whole process
917 exited but we still haven't been able to report it to GDB, we'll
918 hold on to the last lwp of the dead process. */
919 if (lwp != NULL)
920 return !lwp->dead;
921 else
922 return 0;
923 }
924
925 /* Return 1 if this lwp has an interesting status pending. */
926 static int
927 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
928 {
929 struct lwp_info *lwp = (struct lwp_info *) entry;
930 ptid_t ptid = * (ptid_t *) arg;
931 struct thread_info *thread = get_lwp_thread (lwp);
932
933 /* Check if we're only interested in events from a specific process
934 or its lwps. */
935 if (!ptid_equal (minus_one_ptid, ptid)
936 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
937 return 0;
938
939 thread = get_lwp_thread (lwp);
940
941 /* If we got a `vCont;t', but we haven't reported a stop yet, do
942 report any status pending the LWP may have. */
943 if (thread->last_resume_kind == resume_stop
944 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
945 return 0;
946
947 return lwp->status_pending_p;
948 }
949
950 static int
951 same_lwp (struct inferior_list_entry *entry, void *data)
952 {
953 ptid_t ptid = *(ptid_t *) data;
954 int lwp;
955
956 if (ptid_get_lwp (ptid) != 0)
957 lwp = ptid_get_lwp (ptid);
958 else
959 lwp = ptid_get_pid (ptid);
960
961 if (ptid_get_lwp (entry->id) == lwp)
962 return 1;
963
964 return 0;
965 }
966
967 struct lwp_info *
968 find_lwp_pid (ptid_t ptid)
969 {
970 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
971 }
972
973 static struct lwp_info *
974 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
975 {
976 int ret;
977 int to_wait_for = -1;
978 struct lwp_info *child = NULL;
979
980 if (debug_threads)
981 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
982
983 if (ptid_equal (ptid, minus_one_ptid))
984 to_wait_for = -1; /* any child */
985 else
986 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
987
988 options |= __WALL;
989
990 retry:
991
992 ret = my_waitpid (to_wait_for, wstatp, options);
993 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
994 return NULL;
995 else if (ret == -1)
996 perror_with_name ("waitpid");
997
998 if (debug_threads
999 && (!WIFSTOPPED (*wstatp)
1000 || (WSTOPSIG (*wstatp) != 32
1001 && WSTOPSIG (*wstatp) != 33)))
1002 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1003
1004 child = find_lwp_pid (pid_to_ptid (ret));
1005
1006 /* If we didn't find a process, one of two things presumably happened:
1007 - A process we started and then detached from has exited. Ignore it.
1008 - A process we are controlling has forked and the new child's stop
1009 was reported to us by the kernel. Save its PID. */
1010 if (child == NULL && WIFSTOPPED (*wstatp))
1011 {
1012 add_pid_to_list (&stopped_pids, ret);
1013 goto retry;
1014 }
1015 else if (child == NULL)
1016 goto retry;
1017
1018 child->stopped = 1;
1019
1020 child->last_status = *wstatp;
1021
1022 /* Architecture-specific setup after inferior is running.
1023 This needs to happen after we have attached to the inferior
1024 and it is stopped for the first time, but before we access
1025 any inferior registers. */
1026 if (new_inferior)
1027 {
1028 the_low_target.arch_setup ();
1029 #ifdef HAVE_LINUX_REGSETS
1030 memset (disabled_regsets, 0, num_regsets);
1031 #endif
1032 new_inferior = 0;
1033 }
1034
1035 /* Fetch the possibly triggered data watchpoint info and store it in
1036 CHILD.
1037
1038 On some archs, like x86, that use debug registers to set
1039 watchpoints, it's possible that the way to know which watched
1040 address trapped, is to check the register that is used to select
1041 which address to watch. Problem is, between setting the
1042 watchpoint and reading back which data address trapped, the user
1043 may change the set of watchpoints, and, as a consequence, GDB
1044 changes the debug registers in the inferior. To avoid reading
1045 back a stale stopped-data-address when that happens, we cache in
1046 LP the fact that a watchpoint trapped, and the corresponding data
1047 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1048 changes the debug registers meanwhile, we have the cached data we
1049 can rely on. */
1050
1051 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1052 {
1053 if (the_low_target.stopped_by_watchpoint == NULL)
1054 {
1055 child->stopped_by_watchpoint = 0;
1056 }
1057 else
1058 {
1059 struct thread_info *saved_inferior;
1060
1061 saved_inferior = current_inferior;
1062 current_inferior = get_lwp_thread (child);
1063
1064 child->stopped_by_watchpoint
1065 = the_low_target.stopped_by_watchpoint ();
1066
1067 if (child->stopped_by_watchpoint)
1068 {
1069 if (the_low_target.stopped_data_address != NULL)
1070 child->stopped_data_address
1071 = the_low_target.stopped_data_address ();
1072 else
1073 child->stopped_data_address = 0;
1074 }
1075
1076 current_inferior = saved_inferior;
1077 }
1078 }
1079
1080 /* Store the STOP_PC, with adjustment applied. This depends on the
1081 architecture being defined already (so that CHILD has a valid
1082 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1083 not). */
1084 if (WIFSTOPPED (*wstatp))
1085 child->stop_pc = get_stop_pc (child);
1086
1087 if (debug_threads
1088 && WIFSTOPPED (*wstatp)
1089 && the_low_target.get_pc != NULL)
1090 {
1091 struct thread_info *saved_inferior = current_inferior;
1092 struct regcache *regcache;
1093 CORE_ADDR pc;
1094
1095 current_inferior = get_lwp_thread (child);
1096 regcache = get_thread_regcache (current_inferior, 1);
1097 pc = (*the_low_target.get_pc) (regcache);
1098 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1099 current_inferior = saved_inferior;
1100 }
1101
1102 return child;
1103 }
1104
1105 /* This function should only be called if the LWP got a SIGTRAP.
1106
1107 Handle any tracepoint steps or hits. Return true if a tracepoint
1108 event was handled, 0 otherwise. */
1109
1110 static int
1111 handle_tracepoints (struct lwp_info *lwp)
1112 {
1113 struct thread_info *tinfo = get_lwp_thread (lwp);
1114 int tpoint_related_event = 0;
1115
1116 /* And we need to be sure that any all-threads-stopping doesn't try
1117 to move threads out of the jump pads, as it could deadlock the
1118 inferior (LWP could be in the jump pad, maybe even holding the
1119 lock.) */
1120
1121 /* Do any necessary step collect actions. */
1122 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1123
1124 /* See if we just hit a tracepoint and do its main collect
1125 actions. */
1126 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1127
1128 if (tpoint_related_event)
1129 {
1130 if (debug_threads)
1131 fprintf (stderr, "got a tracepoint event\n");
1132 return 1;
1133 }
1134
1135 return 0;
1136 }
1137
1138 /* Arrange for a breakpoint to be hit again later. We don't keep the
1139 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1140 will handle the current event, eventually we will resume this LWP,
1141 and this breakpoint will trap again. */
1142
1143 static int
1144 cancel_breakpoint (struct lwp_info *lwp)
1145 {
1146 struct thread_info *saved_inferior;
1147
1148 /* There's nothing to do if we don't support breakpoints. */
1149 if (!supports_breakpoints ())
1150 return 0;
1151
1152 /* breakpoint_at reads from current inferior. */
1153 saved_inferior = current_inferior;
1154 current_inferior = get_lwp_thread (lwp);
1155
1156 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1157 {
1158 if (debug_threads)
1159 fprintf (stderr,
1160 "CB: Push back breakpoint for %s\n",
1161 target_pid_to_str (ptid_of (lwp)));
1162
1163 /* Back up the PC if necessary. */
1164 if (the_low_target.decr_pc_after_break)
1165 {
1166 struct regcache *regcache
1167 = get_thread_regcache (current_inferior, 1);
1168 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1169 }
1170
1171 current_inferior = saved_inferior;
1172 return 1;
1173 }
1174 else
1175 {
1176 if (debug_threads)
1177 fprintf (stderr,
1178 "CB: No breakpoint found at %s for [%s]\n",
1179 paddress (lwp->stop_pc),
1180 target_pid_to_str (ptid_of (lwp)));
1181 }
1182
1183 current_inferior = saved_inferior;
1184 return 0;
1185 }
1186
1187 /* When the event-loop is doing a step-over, this points at the thread
1188 being stepped. */
1189 ptid_t step_over_bkpt;
1190
1191 /* Wait for an event from child PID. If PID is -1, wait for any
1192 child. Store the stop status through the status pointer WSTAT.
1193 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1194 event was found and OPTIONS contains WNOHANG. Return the PID of
1195 the stopped child otherwise. */
1196
1197 static int
1198 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1199 {
1200 struct lwp_info *event_child, *requested_child;
1201
1202 event_child = NULL;
1203 requested_child = NULL;
1204
1205 /* Check for a lwp with a pending status. */
1206
1207 if (ptid_equal (ptid, minus_one_ptid)
1208 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1209 {
1210 event_child = (struct lwp_info *)
1211 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1212 if (debug_threads && event_child)
1213 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1214 }
1215 else
1216 {
1217 requested_child = find_lwp_pid (ptid);
1218
1219 if (requested_child->status_pending_p)
1220 event_child = requested_child;
1221 }
1222
1223 if (event_child != NULL)
1224 {
1225 if (debug_threads)
1226 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1227 lwpid_of (event_child), event_child->status_pending);
1228 *wstat = event_child->status_pending;
1229 event_child->status_pending_p = 0;
1230 event_child->status_pending = 0;
1231 current_inferior = get_lwp_thread (event_child);
1232 return lwpid_of (event_child);
1233 }
1234
1235 /* We only enter this loop if no process has a pending wait status. Thus
1236 any action taken in response to a wait status inside this loop is
1237 responding as soon as we detect the status, not after any pending
1238 events. */
1239 while (1)
1240 {
1241 event_child = linux_wait_for_lwp (ptid, wstat, options);
1242
1243 if ((options & WNOHANG) && event_child == NULL)
1244 {
1245 if (debug_threads)
1246 fprintf (stderr, "WNOHANG set, no event found\n");
1247 return 0;
1248 }
1249
1250 if (event_child == NULL)
1251 error ("event from unknown child");
1252
1253 current_inferior = get_lwp_thread (event_child);
1254
1255 /* Check for thread exit. */
1256 if (! WIFSTOPPED (*wstat))
1257 {
1258 if (debug_threads)
1259 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1260
1261 /* If the last thread is exiting, just return. */
1262 if (last_thread_of_process_p (current_inferior))
1263 {
1264 if (debug_threads)
1265 fprintf (stderr, "LWP %ld is last lwp of process\n",
1266 lwpid_of (event_child));
1267 return lwpid_of (event_child);
1268 }
1269
1270 if (!non_stop)
1271 {
1272 current_inferior = (struct thread_info *) all_threads.head;
1273 if (debug_threads)
1274 fprintf (stderr, "Current inferior is now %ld\n",
1275 lwpid_of (get_thread_lwp (current_inferior)));
1276 }
1277 else
1278 {
1279 current_inferior = NULL;
1280 if (debug_threads)
1281 fprintf (stderr, "Current inferior is now <NULL>\n");
1282 }
1283
1284 /* If we were waiting for this particular child to do something...
1285 well, it did something. */
1286 if (requested_child != NULL)
1287 {
1288 int lwpid = lwpid_of (event_child);
1289
1290 /* Cancel the step-over operation --- the thread that
1291 started it is gone. */
1292 if (finish_step_over (event_child))
1293 unstop_all_lwps (event_child);
1294 delete_lwp (event_child);
1295 return lwpid;
1296 }
1297
1298 delete_lwp (event_child);
1299
1300 /* Wait for a more interesting event. */
1301 continue;
1302 }
1303
1304 if (event_child->must_set_ptrace_flags)
1305 {
1306 linux_enable_event_reporting (lwpid_of (event_child));
1307 event_child->must_set_ptrace_flags = 0;
1308 }
1309
1310 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1311 && *wstat >> 16 != 0)
1312 {
1313 handle_extended_wait (event_child, *wstat);
1314 continue;
1315 }
1316
1317 if (WIFSTOPPED (*wstat)
1318 && WSTOPSIG (*wstat) == SIGSTOP
1319 && event_child->stop_expected)
1320 {
1321 int should_stop;
1322
1323 if (debug_threads)
1324 fprintf (stderr, "Expected stop.\n");
1325 event_child->stop_expected = 0;
1326
1327 should_stop = (current_inferior->last_resume_kind == resume_stop
1328 || stopping_threads);
1329
1330 if (!should_stop)
1331 {
1332 linux_resume_one_lwp (event_child,
1333 event_child->stepping, 0, NULL);
1334 continue;
1335 }
1336 }
1337
1338 return lwpid_of (event_child);
1339 }
1340
1341 /* NOTREACHED */
1342 return 0;
1343 }
1344
1345 static int
1346 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1347 {
1348 ptid_t wait_ptid;
1349
1350 if (ptid_is_pid (ptid))
1351 {
1352 /* A request to wait for a specific tgid. This is not possible
1353 with waitpid, so instead, we wait for any child, and leave
1354 children we're not interested in right now with a pending
1355 status to report later. */
1356 wait_ptid = minus_one_ptid;
1357 }
1358 else
1359 wait_ptid = ptid;
1360
1361 while (1)
1362 {
1363 int event_pid;
1364
1365 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1366
1367 if (event_pid > 0
1368 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1369 {
1370 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1371
1372 if (! WIFSTOPPED (*wstat))
1373 mark_lwp_dead (event_child, *wstat);
1374 else
1375 {
1376 event_child->status_pending_p = 1;
1377 event_child->status_pending = *wstat;
1378 }
1379 }
1380 else
1381 return event_pid;
1382 }
1383 }
1384
1385
1386 /* Count the LWP's that have had events. */
1387
1388 static int
1389 count_events_callback (struct inferior_list_entry *entry, void *data)
1390 {
1391 struct lwp_info *lp = (struct lwp_info *) entry;
1392 struct thread_info *thread = get_lwp_thread (lp);
1393 int *count = data;
1394
1395 gdb_assert (count != NULL);
1396
1397 /* Count only resumed LWPs that have a SIGTRAP event pending that
1398 should be reported to GDB. */
1399 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1400 && thread->last_resume_kind != resume_stop
1401 && lp->status_pending_p
1402 && WIFSTOPPED (lp->status_pending)
1403 && WSTOPSIG (lp->status_pending) == SIGTRAP
1404 && !breakpoint_inserted_here (lp->stop_pc))
1405 (*count)++;
1406
1407 return 0;
1408 }
1409
1410 /* Select the LWP (if any) that is currently being single-stepped. */
1411
1412 static int
1413 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1414 {
1415 struct lwp_info *lp = (struct lwp_info *) entry;
1416 struct thread_info *thread = get_lwp_thread (lp);
1417
1418 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1419 && thread->last_resume_kind == resume_step
1420 && lp->status_pending_p)
1421 return 1;
1422 else
1423 return 0;
1424 }
1425
1426 /* Select the Nth LWP that has had a SIGTRAP event that should be
1427 reported to GDB. */
1428
1429 static int
1430 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1431 {
1432 struct lwp_info *lp = (struct lwp_info *) entry;
1433 struct thread_info *thread = get_lwp_thread (lp);
1434 int *selector = data;
1435
1436 gdb_assert (selector != NULL);
1437
1438 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1439 if (thread->last_resume_kind != resume_stop
1440 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1441 && lp->status_pending_p
1442 && WIFSTOPPED (lp->status_pending)
1443 && WSTOPSIG (lp->status_pending) == SIGTRAP
1444 && !breakpoint_inserted_here (lp->stop_pc))
1445 if ((*selector)-- == 0)
1446 return 1;
1447
1448 return 0;
1449 }
1450
1451 static int
1452 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1453 {
1454 struct lwp_info *lp = (struct lwp_info *) entry;
1455 struct thread_info *thread = get_lwp_thread (lp);
1456 struct lwp_info *event_lp = data;
1457
1458 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1459 if (lp == event_lp)
1460 return 0;
1461
1462 /* If a LWP other than the LWP that we're reporting an event for has
1463 hit a GDB breakpoint (as opposed to some random trap signal),
1464 then just arrange for it to hit it again later. We don't keep
1465 the SIGTRAP status and don't forward the SIGTRAP signal to the
1466 LWP. We will handle the current event, eventually we will resume
1467 all LWPs, and this one will get its breakpoint trap again.
1468
1469 If we do not do this, then we run the risk that the user will
1470 delete or disable the breakpoint, but the LWP will have already
1471 tripped on it. */
1472
1473 if (thread->last_resume_kind != resume_stop
1474 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1475 && lp->status_pending_p
1476 && WIFSTOPPED (lp->status_pending)
1477 && WSTOPSIG (lp->status_pending) == SIGTRAP
1478 && !lp->stepping
1479 && !lp->stopped_by_watchpoint
1480 && cancel_breakpoint (lp))
1481 /* Throw away the SIGTRAP. */
1482 lp->status_pending_p = 0;
1483
1484 return 0;
1485 }
1486
1487 /* Select one LWP out of those that have events pending. */
1488
1489 static void
1490 select_event_lwp (struct lwp_info **orig_lp)
1491 {
1492 int num_events = 0;
1493 int random_selector;
1494 struct lwp_info *event_lp;
1495
1496 /* Give preference to any LWP that is being single-stepped. */
1497 event_lp
1498 = (struct lwp_info *) find_inferior (&all_lwps,
1499 select_singlestep_lwp_callback, NULL);
1500 if (event_lp != NULL)
1501 {
1502 if (debug_threads)
1503 fprintf (stderr,
1504 "SEL: Select single-step %s\n",
1505 target_pid_to_str (ptid_of (event_lp)));
1506 }
1507 else
1508 {
1509 /* No single-stepping LWP. Select one at random, out of those
1510 which have had SIGTRAP events. */
1511
1512 /* First see how many SIGTRAP events we have. */
1513 find_inferior (&all_lwps, count_events_callback, &num_events);
1514
1515 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1516 random_selector = (int)
1517 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1518
1519 if (debug_threads && num_events > 1)
1520 fprintf (stderr,
1521 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1522 num_events, random_selector);
1523
1524 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1525 select_event_lwp_callback,
1526 &random_selector);
1527 }
1528
1529 if (event_lp != NULL)
1530 {
1531 /* Switch the event LWP. */
1532 *orig_lp = event_lp;
1533 }
1534 }
1535
1536 /* Set this inferior LWP's state as "want-stopped". We won't resume
1537 this LWP until the client gives us another action for it. */
1538
1539 static void
1540 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1541 {
1542 struct lwp_info *lwp = (struct lwp_info *) entry;
1543 struct thread_info *thread = get_lwp_thread (lwp);
1544
1545 /* Most threads are stopped implicitly (all-stop); tag that with
1546 signal 0. The thread being explicitly reported stopped to the
1547 client, gets it's status fixed up afterwards. */
1548 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1549 thread->last_status.value.sig = TARGET_SIGNAL_0;
1550
1551 thread->last_resume_kind = resume_stop;
1552 }
1553
1554 /* Set all LWP's states as "want-stopped". */
1555
1556 static void
1557 gdb_wants_all_stopped (void)
1558 {
1559 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1560 }
1561
1562 /* Wait for process, returns status. */
1563
1564 static ptid_t
1565 linux_wait_1 (ptid_t ptid,
1566 struct target_waitstatus *ourstatus, int target_options)
1567 {
1568 int w;
1569 struct lwp_info *event_child;
1570 int options;
1571 int pid;
1572 int step_over_finished;
1573 int bp_explains_trap;
1574 int maybe_internal_trap;
1575 int report_to_gdb;
1576 int trace_event;
1577
1578 /* Translate generic target options into linux options. */
1579 options = __WALL;
1580 if (target_options & TARGET_WNOHANG)
1581 options |= WNOHANG;
1582
1583 retry:
1584 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1585
1586 /* If we were only supposed to resume one thread, only wait for
1587 that thread - if it's still alive. If it died, however - which
1588 can happen if we're coming from the thread death case below -
1589 then we need to make sure we restart the other threads. We could
1590 pick a thread at random or restart all; restarting all is less
1591 arbitrary. */
1592 if (!non_stop
1593 && !ptid_equal (cont_thread, null_ptid)
1594 && !ptid_equal (cont_thread, minus_one_ptid))
1595 {
1596 struct thread_info *thread;
1597
1598 thread = (struct thread_info *) find_inferior_id (&all_threads,
1599 cont_thread);
1600
1601 /* No stepping, no signal - unless one is pending already, of course. */
1602 if (thread == NULL)
1603 {
1604 struct thread_resume resume_info;
1605 resume_info.thread = minus_one_ptid;
1606 resume_info.kind = resume_continue;
1607 resume_info.sig = 0;
1608 linux_resume (&resume_info, 1);
1609 }
1610 else
1611 ptid = cont_thread;
1612 }
1613
1614 if (ptid_equal (step_over_bkpt, null_ptid))
1615 pid = linux_wait_for_event (ptid, &w, options);
1616 else
1617 {
1618 if (debug_threads)
1619 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1620 target_pid_to_str (step_over_bkpt));
1621 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1622 }
1623
1624 if (pid == 0) /* only if TARGET_WNOHANG */
1625 return null_ptid;
1626
1627 event_child = get_thread_lwp (current_inferior);
1628
1629 /* If we are waiting for a particular child, and it exited,
1630 linux_wait_for_event will return its exit status. Similarly if
1631 the last child exited. If this is not the last child, however,
1632 do not report it as exited until there is a 'thread exited' response
1633 available in the remote protocol. Instead, just wait for another event.
1634 This should be safe, because if the thread crashed we will already
1635 have reported the termination signal to GDB; that should stop any
1636 in-progress stepping operations, etc.
1637
1638 Report the exit status of the last thread to exit. This matches
1639 LinuxThreads' behavior. */
1640
1641 if (last_thread_of_process_p (current_inferior))
1642 {
1643 if (WIFEXITED (w) || WIFSIGNALED (w))
1644 {
1645 if (WIFEXITED (w))
1646 {
1647 ourstatus->kind = TARGET_WAITKIND_EXITED;
1648 ourstatus->value.integer = WEXITSTATUS (w);
1649
1650 if (debug_threads)
1651 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1652 }
1653 else
1654 {
1655 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1656 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1657
1658 if (debug_threads)
1659 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1660
1661 }
1662
1663 return pid_to_ptid (pid);
1664 }
1665 }
1666 else
1667 {
1668 if (!WIFSTOPPED (w))
1669 goto retry;
1670 }
1671
1672 /* If this event was not handled before, and is not a SIGTRAP, we
1673 report it. SIGILL and SIGSEGV are also treated as traps in case
1674 a breakpoint is inserted at the current PC. If this target does
1675 not support internal breakpoints at all, we also report the
1676 SIGTRAP without further processing; it's of no concern to us. */
1677 maybe_internal_trap
1678 = (supports_breakpoints ()
1679 && (WSTOPSIG (w) == SIGTRAP
1680 || ((WSTOPSIG (w) == SIGILL
1681 || WSTOPSIG (w) == SIGSEGV)
1682 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1683
1684 if (maybe_internal_trap)
1685 {
1686 /* Handle anything that requires bookkeeping before deciding to
1687 report the event or continue waiting. */
1688
1689 /* First check if we can explain the SIGTRAP with an internal
1690 breakpoint, or if we should possibly report the event to GDB.
1691 Do this before anything that may remove or insert a
1692 breakpoint. */
1693 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1694
1695 /* We have a SIGTRAP, possibly a step-over dance has just
1696 finished. If so, tweak the state machine accordingly,
1697 reinsert breakpoints and delete any reinsert (software
1698 single-step) breakpoints. */
1699 step_over_finished = finish_step_over (event_child);
1700
1701 /* Now invoke the callbacks of any internal breakpoints there. */
1702 check_breakpoints (event_child->stop_pc);
1703
1704 /* Handle tracepoint data collecting. This may overflow the
1705 trace buffer, and cause a tracing stop, removing
1706 breakpoints. */
1707 trace_event = handle_tracepoints (event_child);
1708
1709 if (bp_explains_trap)
1710 {
1711 /* If we stepped or ran into an internal breakpoint, we've
1712 already handled it. So next time we resume (from this
1713 PC), we should step over it. */
1714 if (debug_threads)
1715 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1716
1717 if (breakpoint_here (event_child->stop_pc))
1718 event_child->need_step_over = 1;
1719 }
1720 }
1721 else
1722 {
1723 /* We have some other signal, possibly a step-over dance was in
1724 progress, and it should be cancelled too. */
1725 step_over_finished = finish_step_over (event_child);
1726
1727 trace_event = 0;
1728 }
1729
1730 /* Check whether GDB would be interested in this event. */
1731
1732 /* If GDB is not interested in this signal, don't stop other
1733 threads, and don't report it to GDB. Just resume the inferior
1734 right away. We do this for threading-related signals as well as
1735 any that GDB specifically requested we ignore. But never ignore
1736 SIGSTOP if we sent it ourselves, and do not ignore signals when
1737 stepping - they may require special handling to skip the signal
1738 handler. */
1739 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1740 thread library? */
1741 if (WIFSTOPPED (w)
1742 && current_inferior->last_resume_kind != resume_step
1743 && (
1744 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1745 (current_process ()->private->thread_db != NULL
1746 && (WSTOPSIG (w) == __SIGRTMIN
1747 || WSTOPSIG (w) == __SIGRTMIN + 1))
1748 ||
1749 #endif
1750 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
1751 && !(WSTOPSIG (w) == SIGSTOP
1752 && current_inferior->last_resume_kind == resume_stop))))
1753 {
1754 siginfo_t info, *info_p;
1755
1756 if (debug_threads)
1757 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1758 WSTOPSIG (w), lwpid_of (event_child));
1759
1760 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1761 info_p = &info;
1762 else
1763 info_p = NULL;
1764 linux_resume_one_lwp (event_child, event_child->stepping,
1765 WSTOPSIG (w), info_p);
1766 goto retry;
1767 }
1768
1769 /* If GDB wanted this thread to single step, we always want to
1770 report the SIGTRAP, and let GDB handle it. Watchpoints should
1771 always be reported. So should signals we can't explain. A
1772 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
1773 not support Z0 breakpoints. If we do, we're be able to handle
1774 GDB breakpoints on top of internal breakpoints, by handling the
1775 internal breakpoint and still reporting the event to GDB. If we
1776 don't, we're out of luck, GDB won't see the breakpoint hit. */
1777 report_to_gdb = (!maybe_internal_trap
1778 || current_inferior->last_resume_kind == resume_step
1779 || event_child->stopped_by_watchpoint
1780 || (!step_over_finished && !bp_explains_trap && !trace_event)
1781 || gdb_breakpoint_here (event_child->stop_pc));
1782
1783 /* We found no reason GDB would want us to stop. We either hit one
1784 of our own breakpoints, or finished an internal step GDB
1785 shouldn't know about. */
1786 if (!report_to_gdb)
1787 {
1788 if (debug_threads)
1789 {
1790 if (bp_explains_trap)
1791 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1792 if (step_over_finished)
1793 fprintf (stderr, "Step-over finished.\n");
1794 if (trace_event)
1795 fprintf (stderr, "Tracepoint event.\n");
1796 }
1797
1798 /* We're not reporting this breakpoint to GDB, so apply the
1799 decr_pc_after_break adjustment to the inferior's regcache
1800 ourselves. */
1801
1802 if (the_low_target.set_pc != NULL)
1803 {
1804 struct regcache *regcache
1805 = get_thread_regcache (get_lwp_thread (event_child), 1);
1806 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1807 }
1808
1809 /* We've finished stepping over a breakpoint. We've stopped all
1810 LWPs momentarily except the stepping one. This is where we
1811 resume them all again. We're going to keep waiting, so use
1812 proceed, which handles stepping over the next breakpoint. */
1813 if (debug_threads)
1814 fprintf (stderr, "proceeding all threads.\n");
1815 proceed_all_lwps ();
1816 goto retry;
1817 }
1818
1819 if (debug_threads)
1820 {
1821 if (current_inferior->last_resume_kind == resume_step)
1822 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1823 if (event_child->stopped_by_watchpoint)
1824 fprintf (stderr, "Stopped by watchpoint.\n");
1825 if (gdb_breakpoint_here (event_child->stop_pc))
1826 fprintf (stderr, "Stopped by GDB breakpoint.\n");
1827 if (debug_threads)
1828 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1829 }
1830
1831 /* Alright, we're going to report a stop. */
1832
1833 if (!non_stop)
1834 {
1835 /* In all-stop, stop all threads. */
1836 stop_all_lwps ();
1837
1838 /* If we're not waiting for a specific LWP, choose an event LWP
1839 from among those that have had events. Giving equal priority
1840 to all LWPs that have had events helps prevent
1841 starvation. */
1842 if (ptid_equal (ptid, minus_one_ptid))
1843 {
1844 event_child->status_pending_p = 1;
1845 event_child->status_pending = w;
1846
1847 select_event_lwp (&event_child);
1848
1849 event_child->status_pending_p = 0;
1850 w = event_child->status_pending;
1851 }
1852
1853 /* Now that we've selected our final event LWP, cancel any
1854 breakpoints in other LWPs that have hit a GDB breakpoint.
1855 See the comment in cancel_breakpoints_callback to find out
1856 why. */
1857 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1858 }
1859 else
1860 {
1861 /* If we just finished a step-over, then all threads had been
1862 momentarily paused. In all-stop, that's fine, we want
1863 threads stopped by now anyway. In non-stop, we need to
1864 re-resume threads that GDB wanted to be running. */
1865 if (step_over_finished)
1866 unstop_all_lwps (event_child);
1867 }
1868
1869 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1870
1871 /* Do this before the gdb_wants_all_stopped calls below, since they
1872 always set last_resume_kind to resume_stop. */
1873 if (current_inferior->last_resume_kind == resume_stop
1874 && WSTOPSIG (w) == SIGSTOP)
1875 {
1876 /* A thread that has been requested to stop by GDB with vCont;t,
1877 and it stopped cleanly, so report as SIG0. The use of
1878 SIGSTOP is an implementation detail. */
1879 ourstatus->value.sig = TARGET_SIGNAL_0;
1880 }
1881 else if (current_inferior->last_resume_kind == resume_stop
1882 && WSTOPSIG (w) != SIGSTOP)
1883 {
1884 /* A thread that has been requested to stop by GDB with vCont;t,
1885 but, it stopped for other reasons. */
1886 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1887 }
1888 else
1889 {
1890 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1891 }
1892
1893 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1894
1895 if (!non_stop)
1896 {
1897 /* From GDB's perspective, all-stop mode always stops all
1898 threads implicitly. Tag all threads as "want-stopped". */
1899 gdb_wants_all_stopped ();
1900 }
1901 else
1902 {
1903 /* We're reporting this LWP as stopped. Update it's
1904 "want-stopped" state to what the client wants, until it gets
1905 a new resume action. */
1906 gdb_wants_lwp_stopped (&event_child->head);
1907 }
1908
1909 if (debug_threads)
1910 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1911 target_pid_to_str (ptid_of (event_child)),
1912 ourstatus->kind,
1913 ourstatus->value.sig);
1914
1915 get_lwp_thread (event_child)->last_status = *ourstatus;
1916 return ptid_of (event_child);
1917 }
1918
1919 /* Get rid of any pending event in the pipe. */
1920 static void
1921 async_file_flush (void)
1922 {
1923 int ret;
1924 char buf;
1925
1926 do
1927 ret = read (linux_event_pipe[0], &buf, 1);
1928 while (ret >= 0 || (ret == -1 && errno == EINTR));
1929 }
1930
1931 /* Put something in the pipe, so the event loop wakes up. */
1932 static void
1933 async_file_mark (void)
1934 {
1935 int ret;
1936
1937 async_file_flush ();
1938
1939 do
1940 ret = write (linux_event_pipe[1], "+", 1);
1941 while (ret == 0 || (ret == -1 && errno == EINTR));
1942
1943 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1944 be awakened anyway. */
1945 }
1946
1947 static ptid_t
1948 linux_wait (ptid_t ptid,
1949 struct target_waitstatus *ourstatus, int target_options)
1950 {
1951 ptid_t event_ptid;
1952
1953 if (debug_threads)
1954 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1955
1956 /* Flush the async file first. */
1957 if (target_is_async_p ())
1958 async_file_flush ();
1959
1960 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1961
1962 /* If at least one stop was reported, there may be more. A single
1963 SIGCHLD can signal more than one child stop. */
1964 if (target_is_async_p ()
1965 && (target_options & TARGET_WNOHANG) != 0
1966 && !ptid_equal (event_ptid, null_ptid))
1967 async_file_mark ();
1968
1969 return event_ptid;
1970 }
1971
1972 /* Send a signal to an LWP. */
1973
1974 static int
1975 kill_lwp (unsigned long lwpid, int signo)
1976 {
1977 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1978 fails, then we are not using nptl threads and we should be using kill. */
1979
1980 #ifdef __NR_tkill
1981 {
1982 static int tkill_failed;
1983
1984 if (!tkill_failed)
1985 {
1986 int ret;
1987
1988 errno = 0;
1989 ret = syscall (__NR_tkill, lwpid, signo);
1990 if (errno != ENOSYS)
1991 return ret;
1992 tkill_failed = 1;
1993 }
1994 }
1995 #endif
1996
1997 return kill (lwpid, signo);
1998 }
1999
2000 static void
2001 send_sigstop (struct lwp_info *lwp)
2002 {
2003 int pid;
2004
2005 pid = lwpid_of (lwp);
2006
2007 /* If we already have a pending stop signal for this process, don't
2008 send another. */
2009 if (lwp->stop_expected)
2010 {
2011 if (debug_threads)
2012 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2013
2014 return;
2015 }
2016
2017 if (debug_threads)
2018 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2019
2020 lwp->stop_expected = 1;
2021 kill_lwp (pid, SIGSTOP);
2022 }
2023
2024 static void
2025 send_sigstop_callback (struct inferior_list_entry *entry)
2026 {
2027 struct lwp_info *lwp = (struct lwp_info *) entry;
2028
2029 if (lwp->stopped)
2030 return;
2031
2032 send_sigstop (lwp);
2033 }
2034
2035 static void
2036 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2037 {
2038 /* It's dead, really. */
2039 lwp->dead = 1;
2040
2041 /* Store the exit status for later. */
2042 lwp->status_pending_p = 1;
2043 lwp->status_pending = wstat;
2044
2045 /* Prevent trying to stop it. */
2046 lwp->stopped = 1;
2047
2048 /* No further stops are expected from a dead lwp. */
2049 lwp->stop_expected = 0;
2050 }
2051
2052 static void
2053 wait_for_sigstop (struct inferior_list_entry *entry)
2054 {
2055 struct lwp_info *lwp = (struct lwp_info *) entry;
2056 struct thread_info *saved_inferior;
2057 int wstat;
2058 ptid_t saved_tid;
2059 ptid_t ptid;
2060 int pid;
2061
2062 if (lwp->stopped)
2063 {
2064 if (debug_threads)
2065 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2066 lwpid_of (lwp));
2067 return;
2068 }
2069
2070 saved_inferior = current_inferior;
2071 if (saved_inferior != NULL)
2072 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2073 else
2074 saved_tid = null_ptid; /* avoid bogus unused warning */
2075
2076 ptid = lwp->head.id;
2077
2078 if (debug_threads)
2079 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2080
2081 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2082
2083 /* If we stopped with a non-SIGSTOP signal, save it for later
2084 and record the pending SIGSTOP. If the process exited, just
2085 return. */
2086 if (WIFSTOPPED (wstat))
2087 {
2088 if (debug_threads)
2089 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2090 lwpid_of (lwp), WSTOPSIG (wstat));
2091
2092 if (WSTOPSIG (wstat) != SIGSTOP)
2093 {
2094 if (debug_threads)
2095 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2096 lwpid_of (lwp), wstat);
2097
2098 lwp->status_pending_p = 1;
2099 lwp->status_pending = wstat;
2100 }
2101 }
2102 else
2103 {
2104 if (debug_threads)
2105 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2106
2107 lwp = find_lwp_pid (pid_to_ptid (pid));
2108 if (lwp)
2109 {
2110 /* Leave this status pending for the next time we're able to
2111 report it. In the mean time, we'll report this lwp as
2112 dead to GDB, so GDB doesn't try to read registers and
2113 memory from it. This can only happen if this was the
2114 last thread of the process; otherwise, PID is removed
2115 from the thread tables before linux_wait_for_event
2116 returns. */
2117 mark_lwp_dead (lwp, wstat);
2118 }
2119 }
2120
2121 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2122 current_inferior = saved_inferior;
2123 else
2124 {
2125 if (debug_threads)
2126 fprintf (stderr, "Previously current thread died.\n");
2127
2128 if (non_stop)
2129 {
2130 /* We can't change the current inferior behind GDB's back,
2131 otherwise, a subsequent command may apply to the wrong
2132 process. */
2133 current_inferior = NULL;
2134 }
2135 else
2136 {
2137 /* Set a valid thread as current. */
2138 set_desired_inferior (0);
2139 }
2140 }
2141 }
2142
2143 static void
2144 stop_all_lwps (void)
2145 {
2146 stopping_threads = 1;
2147 for_each_inferior (&all_lwps, send_sigstop_callback);
2148 for_each_inferior (&all_lwps, wait_for_sigstop);
2149 stopping_threads = 0;
2150 }
2151
2152 /* Resume execution of the inferior process.
2153 If STEP is nonzero, single-step it.
2154 If SIGNAL is nonzero, give it that signal. */
2155
2156 static void
2157 linux_resume_one_lwp (struct lwp_info *lwp,
2158 int step, int signal, siginfo_t *info)
2159 {
2160 struct thread_info *saved_inferior;
2161
2162 if (lwp->stopped == 0)
2163 return;
2164
2165 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2166 user used the "jump" command, or "set $pc = foo"). */
2167 if (lwp->stop_pc != get_pc (lwp))
2168 {
2169 /* Collecting 'while-stepping' actions doesn't make sense
2170 anymore. */
2171 release_while_stepping_state_list (get_lwp_thread (lwp));
2172 }
2173
2174 /* If we have pending signals or status, and a new signal, enqueue the
2175 signal. Also enqueue the signal if we are waiting to reinsert a
2176 breakpoint; it will be picked up again below. */
2177 if (signal != 0
2178 && (lwp->status_pending_p || lwp->pending_signals != NULL
2179 || lwp->bp_reinsert != 0))
2180 {
2181 struct pending_signals *p_sig;
2182 p_sig = xmalloc (sizeof (*p_sig));
2183 p_sig->prev = lwp->pending_signals;
2184 p_sig->signal = signal;
2185 if (info == NULL)
2186 memset (&p_sig->info, 0, sizeof (siginfo_t));
2187 else
2188 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2189 lwp->pending_signals = p_sig;
2190 }
2191
2192 if (lwp->status_pending_p)
2193 {
2194 if (debug_threads)
2195 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2196 " has pending status\n",
2197 lwpid_of (lwp), step ? "step" : "continue", signal,
2198 lwp->stop_expected ? "expected" : "not expected");
2199 return;
2200 }
2201
2202 saved_inferior = current_inferior;
2203 current_inferior = get_lwp_thread (lwp);
2204
2205 if (debug_threads)
2206 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2207 lwpid_of (lwp), step ? "step" : "continue", signal,
2208 lwp->stop_expected ? "expected" : "not expected");
2209
2210 /* This bit needs some thinking about. If we get a signal that
2211 we must report while a single-step reinsert is still pending,
2212 we often end up resuming the thread. It might be better to
2213 (ew) allow a stack of pending events; then we could be sure that
2214 the reinsert happened right away and not lose any signals.
2215
2216 Making this stack would also shrink the window in which breakpoints are
2217 uninserted (see comment in linux_wait_for_lwp) but not enough for
2218 complete correctness, so it won't solve that problem. It may be
2219 worthwhile just to solve this one, however. */
2220 if (lwp->bp_reinsert != 0)
2221 {
2222 if (debug_threads)
2223 fprintf (stderr, " pending reinsert at 0x%s\n",
2224 paddress (lwp->bp_reinsert));
2225
2226 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2227 {
2228 if (step == 0)
2229 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2230
2231 step = 1;
2232 }
2233
2234 /* Postpone any pending signal. It was enqueued above. */
2235 signal = 0;
2236 }
2237
2238 /* If we have while-stepping actions in this thread set it stepping.
2239 If we have a signal to deliver, it may or may not be set to
2240 SIG_IGN, we don't know. Assume so, and allow collecting
2241 while-stepping into a signal handler. A possible smart thing to
2242 do would be to set an internal breakpoint at the signal return
2243 address, continue, and carry on catching this while-stepping
2244 action only when that breakpoint is hit. A future
2245 enhancement. */
2246 if (get_lwp_thread (lwp)->while_stepping != NULL
2247 && can_hardware_single_step ())
2248 {
2249 if (debug_threads)
2250 fprintf (stderr,
2251 "lwp %ld has a while-stepping action -> forcing step.\n",
2252 lwpid_of (lwp));
2253 step = 1;
2254 }
2255
2256 if (debug_threads && the_low_target.get_pc != NULL)
2257 {
2258 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2259 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2260 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2261 }
2262
2263 /* If we have pending signals, consume one unless we are trying to reinsert
2264 a breakpoint. */
2265 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2266 {
2267 struct pending_signals **p_sig;
2268
2269 p_sig = &lwp->pending_signals;
2270 while ((*p_sig)->prev != NULL)
2271 p_sig = &(*p_sig)->prev;
2272
2273 signal = (*p_sig)->signal;
2274 if ((*p_sig)->info.si_signo != 0)
2275 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2276
2277 free (*p_sig);
2278 *p_sig = NULL;
2279 }
2280
2281 if (the_low_target.prepare_to_resume != NULL)
2282 the_low_target.prepare_to_resume (lwp);
2283
2284 regcache_invalidate_one ((struct inferior_list_entry *)
2285 get_lwp_thread (lwp));
2286 errno = 0;
2287 lwp->stopped = 0;
2288 lwp->stopped_by_watchpoint = 0;
2289 lwp->stepping = step;
2290 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2291 /* Coerce to a uintptr_t first to avoid potential gcc warning
2292 of coercing an 8 byte integer to a 4 byte pointer. */
2293 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2294
2295 current_inferior = saved_inferior;
2296 if (errno)
2297 {
2298 /* ESRCH from ptrace either means that the thread was already
2299 running (an error) or that it is gone (a race condition). If
2300 it's gone, we will get a notification the next time we wait,
2301 so we can ignore the error. We could differentiate these
2302 two, but it's tricky without waiting; the thread still exists
2303 as a zombie, so sending it signal 0 would succeed. So just
2304 ignore ESRCH. */
2305 if (errno == ESRCH)
2306 return;
2307
2308 perror_with_name ("ptrace");
2309 }
2310 }
2311
2312 struct thread_resume_array
2313 {
2314 struct thread_resume *resume;
2315 size_t n;
2316 };
2317
2318 /* This function is called once per thread. We look up the thread
2319 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2320 resume request.
2321
2322 This algorithm is O(threads * resume elements), but resume elements
2323 is small (and will remain small at least until GDB supports thread
2324 suspension). */
2325 static int
2326 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2327 {
2328 struct lwp_info *lwp;
2329 struct thread_info *thread;
2330 int ndx;
2331 struct thread_resume_array *r;
2332
2333 thread = (struct thread_info *) entry;
2334 lwp = get_thread_lwp (thread);
2335 r = arg;
2336
2337 for (ndx = 0; ndx < r->n; ndx++)
2338 {
2339 ptid_t ptid = r->resume[ndx].thread;
2340 if (ptid_equal (ptid, minus_one_ptid)
2341 || ptid_equal (ptid, entry->id)
2342 || (ptid_is_pid (ptid)
2343 && (ptid_get_pid (ptid) == pid_of (lwp)))
2344 || (ptid_get_lwp (ptid) == -1
2345 && (ptid_get_pid (ptid) == pid_of (lwp))))
2346 {
2347 if (r->resume[ndx].kind == resume_stop
2348 && thread->last_resume_kind == resume_stop)
2349 {
2350 if (debug_threads)
2351 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2352 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2353 ? "stopped"
2354 : "stopping",
2355 lwpid_of (lwp));
2356
2357 continue;
2358 }
2359
2360 lwp->resume = &r->resume[ndx];
2361 thread->last_resume_kind = lwp->resume->kind;
2362 return 0;
2363 }
2364 }
2365
2366 /* No resume action for this thread. */
2367 lwp->resume = NULL;
2368
2369 return 0;
2370 }
2371
2372
2373 /* Set *FLAG_P if this lwp has an interesting status pending. */
2374 static int
2375 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2376 {
2377 struct lwp_info *lwp = (struct lwp_info *) entry;
2378
2379 /* LWPs which will not be resumed are not interesting, because
2380 we might not wait for them next time through linux_wait. */
2381 if (lwp->resume == NULL)
2382 return 0;
2383
2384 if (lwp->status_pending_p)
2385 * (int *) flag_p = 1;
2386
2387 return 0;
2388 }
2389
2390 /* Return 1 if this lwp that GDB wants running is stopped at an
2391 internal breakpoint that we need to step over. It assumes that any
2392 required STOP_PC adjustment has already been propagated to the
2393 inferior's regcache. */
2394
2395 static int
2396 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2397 {
2398 struct lwp_info *lwp = (struct lwp_info *) entry;
2399 struct thread_info *thread;
2400 struct thread_info *saved_inferior;
2401 CORE_ADDR pc;
2402
2403 /* LWPs which will not be resumed are not interesting, because we
2404 might not wait for them next time through linux_wait. */
2405
2406 if (!lwp->stopped)
2407 {
2408 if (debug_threads)
2409 fprintf (stderr,
2410 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2411 lwpid_of (lwp));
2412 return 0;
2413 }
2414
2415 thread = get_lwp_thread (lwp);
2416
2417 if (thread->last_resume_kind == resume_stop)
2418 {
2419 if (debug_threads)
2420 fprintf (stderr,
2421 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2422 lwpid_of (lwp));
2423 return 0;
2424 }
2425
2426 if (!lwp->need_step_over)
2427 {
2428 if (debug_threads)
2429 fprintf (stderr,
2430 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2431 }
2432
2433 if (lwp->status_pending_p)
2434 {
2435 if (debug_threads)
2436 fprintf (stderr,
2437 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2438 lwpid_of (lwp));
2439 return 0;
2440 }
2441
2442 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2443 or we have. */
2444 pc = get_pc (lwp);
2445
2446 /* If the PC has changed since we stopped, then don't do anything,
2447 and let the breakpoint/tracepoint be hit. This happens if, for
2448 instance, GDB handled the decr_pc_after_break subtraction itself,
2449 GDB is OOL stepping this thread, or the user has issued a "jump"
2450 command, or poked thread's registers herself. */
2451 if (pc != lwp->stop_pc)
2452 {
2453 if (debug_threads)
2454 fprintf (stderr,
2455 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2456 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2457 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2458
2459 lwp->need_step_over = 0;
2460 return 0;
2461 }
2462
2463 saved_inferior = current_inferior;
2464 current_inferior = thread;
2465
2466 /* We can only step over breakpoints we know about. */
2467 if (breakpoint_here (pc))
2468 {
2469 /* Don't step over a breakpoint that GDB expects to hit
2470 though. */
2471 if (gdb_breakpoint_here (pc))
2472 {
2473 if (debug_threads)
2474 fprintf (stderr,
2475 "Need step over [LWP %ld]? yes, but found"
2476 " GDB breakpoint at 0x%s; skipping step over\n",
2477 lwpid_of (lwp), paddress (pc));
2478
2479 current_inferior = saved_inferior;
2480 return 0;
2481 }
2482 else
2483 {
2484 if (debug_threads)
2485 fprintf (stderr,
2486 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2487 lwpid_of (lwp), paddress (pc));
2488
2489 /* We've found an lwp that needs stepping over --- return 1 so
2490 that find_inferior stops looking. */
2491 current_inferior = saved_inferior;
2492
2493 /* If the step over is cancelled, this is set again. */
2494 lwp->need_step_over = 0;
2495 return 1;
2496 }
2497 }
2498
2499 current_inferior = saved_inferior;
2500
2501 if (debug_threads)
2502 fprintf (stderr,
2503 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2504 lwpid_of (lwp), paddress (pc));
2505
2506 return 0;
2507 }
2508
2509 /* Start a step-over operation on LWP. When LWP stopped at a
2510 breakpoint, to make progress, we need to remove the breakpoint out
2511 of the way. If we let other threads run while we do that, they may
2512 pass by the breakpoint location and miss hitting it. To avoid
2513 that, a step-over momentarily stops all threads while LWP is
2514 single-stepped while the breakpoint is temporarily uninserted from
2515 the inferior. When the single-step finishes, we reinsert the
2516 breakpoint, and let all threads that are supposed to be running,
2517 run again.
2518
2519 On targets that don't support hardware single-step, we don't
2520 currently support full software single-stepping. Instead, we only
2521 support stepping over the thread event breakpoint, by asking the
2522 low target where to place a reinsert breakpoint. Since this
2523 routine assumes the breakpoint being stepped over is a thread event
2524 breakpoint, it usually assumes the return address of the current
2525 function is a good enough place to set the reinsert breakpoint. */
2526
2527 static int
2528 start_step_over (struct lwp_info *lwp)
2529 {
2530 struct thread_info *saved_inferior;
2531 CORE_ADDR pc;
2532 int step;
2533
2534 if (debug_threads)
2535 fprintf (stderr,
2536 "Starting step-over on LWP %ld. Stopping all threads\n",
2537 lwpid_of (lwp));
2538
2539 stop_all_lwps ();
2540
2541 if (debug_threads)
2542 fprintf (stderr, "Done stopping all threads for step-over.\n");
2543
2544 /* Note, we should always reach here with an already adjusted PC,
2545 either by GDB (if we're resuming due to GDB's request), or by our
2546 caller, if we just finished handling an internal breakpoint GDB
2547 shouldn't care about. */
2548 pc = get_pc (lwp);
2549
2550 saved_inferior = current_inferior;
2551 current_inferior = get_lwp_thread (lwp);
2552
2553 lwp->bp_reinsert = pc;
2554 uninsert_breakpoints_at (pc);
2555
2556 if (can_hardware_single_step ())
2557 {
2558 step = 1;
2559 }
2560 else
2561 {
2562 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2563 set_reinsert_breakpoint (raddr);
2564 step = 0;
2565 }
2566
2567 current_inferior = saved_inferior;
2568
2569 linux_resume_one_lwp (lwp, step, 0, NULL);
2570
2571 /* Require next event from this LWP. */
2572 step_over_bkpt = lwp->head.id;
2573 return 1;
2574 }
2575
2576 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2577 start_step_over, if still there, and delete any reinsert
2578 breakpoints we've set, on non hardware single-step targets. */
2579
2580 static int
2581 finish_step_over (struct lwp_info *lwp)
2582 {
2583 if (lwp->bp_reinsert != 0)
2584 {
2585 if (debug_threads)
2586 fprintf (stderr, "Finished step over.\n");
2587
2588 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2589 may be no breakpoint to reinsert there by now. */
2590 reinsert_breakpoints_at (lwp->bp_reinsert);
2591
2592 lwp->bp_reinsert = 0;
2593
2594 /* Delete any software-single-step reinsert breakpoints. No
2595 longer needed. We don't have to worry about other threads
2596 hitting this trap, and later not being able to explain it,
2597 because we were stepping over a breakpoint, and we hold all
2598 threads but LWP stopped while doing that. */
2599 if (!can_hardware_single_step ())
2600 delete_reinsert_breakpoints ();
2601
2602 step_over_bkpt = null_ptid;
2603 return 1;
2604 }
2605 else
2606 return 0;
2607 }
2608
2609 /* This function is called once per thread. We check the thread's resume
2610 request, which will tell us whether to resume, step, or leave the thread
2611 stopped; and what signal, if any, it should be sent.
2612
2613 For threads which we aren't explicitly told otherwise, we preserve
2614 the stepping flag; this is used for stepping over gdbserver-placed
2615 breakpoints.
2616
2617 If pending_flags was set in any thread, we queue any needed
2618 signals, since we won't actually resume. We already have a pending
2619 event to report, so we don't need to preserve any step requests;
2620 they should be re-issued if necessary. */
2621
2622 static int
2623 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2624 {
2625 struct lwp_info *lwp;
2626 struct thread_info *thread;
2627 int step;
2628 int leave_all_stopped = * (int *) arg;
2629 int leave_pending;
2630
2631 thread = (struct thread_info *) entry;
2632 lwp = get_thread_lwp (thread);
2633
2634 if (lwp->resume == NULL)
2635 return 0;
2636
2637 if (lwp->resume->kind == resume_stop)
2638 {
2639 if (debug_threads)
2640 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2641
2642 if (!lwp->stopped)
2643 {
2644 if (debug_threads)
2645 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2646
2647 /* Stop the thread, and wait for the event asynchronously,
2648 through the event loop. */
2649 send_sigstop (lwp);
2650 }
2651 else
2652 {
2653 if (debug_threads)
2654 fprintf (stderr, "already stopped LWP %ld\n",
2655 lwpid_of (lwp));
2656
2657 /* The LWP may have been stopped in an internal event that
2658 was not meant to be notified back to GDB (e.g., gdbserver
2659 breakpoint), so we should be reporting a stop event in
2660 this case too. */
2661
2662 /* If the thread already has a pending SIGSTOP, this is a
2663 no-op. Otherwise, something later will presumably resume
2664 the thread and this will cause it to cancel any pending
2665 operation, due to last_resume_kind == resume_stop. If
2666 the thread already has a pending status to report, we
2667 will still report it the next time we wait - see
2668 status_pending_p_callback. */
2669 send_sigstop (lwp);
2670 }
2671
2672 /* For stop requests, we're done. */
2673 lwp->resume = NULL;
2674 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2675 return 0;
2676 }
2677
2678 /* If this thread which is about to be resumed has a pending status,
2679 then don't resume any threads - we can just report the pending
2680 status. Make sure to queue any signals that would otherwise be
2681 sent. In all-stop mode, we do this decision based on if *any*
2682 thread has a pending status. If there's a thread that needs the
2683 step-over-breakpoint dance, then don't resume any other thread
2684 but that particular one. */
2685 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2686
2687 if (!leave_pending)
2688 {
2689 if (debug_threads)
2690 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2691
2692 step = (lwp->resume->kind == resume_step);
2693 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2694 }
2695 else
2696 {
2697 if (debug_threads)
2698 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2699
2700 /* If we have a new signal, enqueue the signal. */
2701 if (lwp->resume->sig != 0)
2702 {
2703 struct pending_signals *p_sig;
2704 p_sig = xmalloc (sizeof (*p_sig));
2705 p_sig->prev = lwp->pending_signals;
2706 p_sig->signal = lwp->resume->sig;
2707 memset (&p_sig->info, 0, sizeof (siginfo_t));
2708
2709 /* If this is the same signal we were previously stopped by,
2710 make sure to queue its siginfo. We can ignore the return
2711 value of ptrace; if it fails, we'll skip
2712 PTRACE_SETSIGINFO. */
2713 if (WIFSTOPPED (lwp->last_status)
2714 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2715 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2716
2717 lwp->pending_signals = p_sig;
2718 }
2719 }
2720
2721 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2722 lwp->resume = NULL;
2723 return 0;
2724 }
2725
2726 static void
2727 linux_resume (struct thread_resume *resume_info, size_t n)
2728 {
2729 struct thread_resume_array array = { resume_info, n };
2730 struct lwp_info *need_step_over = NULL;
2731 int any_pending;
2732 int leave_all_stopped;
2733
2734 find_inferior (&all_threads, linux_set_resume_request, &array);
2735
2736 /* If there is a thread which would otherwise be resumed, which has
2737 a pending status, then don't resume any threads - we can just
2738 report the pending status. Make sure to queue any signals that
2739 would otherwise be sent. In non-stop mode, we'll apply this
2740 logic to each thread individually. We consume all pending events
2741 before considering to start a step-over (in all-stop). */
2742 any_pending = 0;
2743 if (!non_stop)
2744 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2745
2746 /* If there is a thread which would otherwise be resumed, which is
2747 stopped at a breakpoint that needs stepping over, then don't
2748 resume any threads - have it step over the breakpoint with all
2749 other threads stopped, then resume all threads again. Make sure
2750 to queue any signals that would otherwise be delivered or
2751 queued. */
2752 if (!any_pending && supports_breakpoints ())
2753 need_step_over
2754 = (struct lwp_info *) find_inferior (&all_lwps,
2755 need_step_over_p, NULL);
2756
2757 leave_all_stopped = (need_step_over != NULL || any_pending);
2758
2759 if (debug_threads)
2760 {
2761 if (need_step_over != NULL)
2762 fprintf (stderr, "Not resuming all, need step over\n");
2763 else if (any_pending)
2764 fprintf (stderr,
2765 "Not resuming, all-stop and found "
2766 "an LWP with pending status\n");
2767 else
2768 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2769 }
2770
2771 /* Even if we're leaving threads stopped, queue all signals we'd
2772 otherwise deliver. */
2773 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2774
2775 if (need_step_over)
2776 start_step_over (need_step_over);
2777 }
2778
2779 /* This function is called once per thread. We check the thread's
2780 last resume request, which will tell us whether to resume, step, or
2781 leave the thread stopped. Any signal the client requested to be
2782 delivered has already been enqueued at this point.
2783
2784 If any thread that GDB wants running is stopped at an internal
2785 breakpoint that needs stepping over, we start a step-over operation
2786 on that particular thread, and leave all others stopped. */
2787
2788 static void
2789 proceed_one_lwp (struct inferior_list_entry *entry)
2790 {
2791 struct lwp_info *lwp;
2792 struct thread_info *thread;
2793 int step;
2794
2795 lwp = (struct lwp_info *) entry;
2796
2797 if (debug_threads)
2798 fprintf (stderr,
2799 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2800
2801 if (!lwp->stopped)
2802 {
2803 if (debug_threads)
2804 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2805 return;
2806 }
2807
2808 thread = get_lwp_thread (lwp);
2809
2810 if (thread->last_resume_kind == resume_stop
2811 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
2812 {
2813 if (debug_threads)
2814 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
2815 lwpid_of (lwp));
2816 return;
2817 }
2818
2819 if (lwp->status_pending_p)
2820 {
2821 if (debug_threads)
2822 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2823 lwpid_of (lwp));
2824 return;
2825 }
2826
2827 if (lwp->suspended)
2828 {
2829 if (debug_threads)
2830 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2831 return;
2832 }
2833
2834 if (thread->last_resume_kind == resume_stop)
2835 {
2836 /* We haven't reported this LWP as stopped yet (otherwise, the
2837 last_status.kind check above would catch it, and we wouldn't
2838 reach here. This LWP may have been momentarily paused by a
2839 stop_all_lwps call while handling for example, another LWP's
2840 step-over. In that case, the pending expected SIGSTOP signal
2841 that was queued at vCont;t handling time will have already
2842 been consumed by wait_for_sigstop, and so we need to requeue
2843 another one here. Note that if the LWP already has a SIGSTOP
2844 pending, this is a no-op. */
2845
2846 if (debug_threads)
2847 fprintf (stderr,
2848 "Client wants LWP %ld to stop. "
2849 "Making sure it has a SIGSTOP pending\n",
2850 lwpid_of (lwp));
2851
2852 send_sigstop (lwp);
2853 }
2854
2855 step = thread->last_resume_kind == resume_step;
2856 linux_resume_one_lwp (lwp, step, 0, NULL);
2857 }
2858
2859 /* When we finish a step-over, set threads running again. If there's
2860 another thread that may need a step-over, now's the time to start
2861 it. Eventually, we'll move all threads past their breakpoints. */
2862
2863 static void
2864 proceed_all_lwps (void)
2865 {
2866 struct lwp_info *need_step_over;
2867
2868 /* If there is a thread which would otherwise be resumed, which is
2869 stopped at a breakpoint that needs stepping over, then don't
2870 resume any threads - have it step over the breakpoint with all
2871 other threads stopped, then resume all threads again. */
2872
2873 if (supports_breakpoints ())
2874 {
2875 need_step_over
2876 = (struct lwp_info *) find_inferior (&all_lwps,
2877 need_step_over_p, NULL);
2878
2879 if (need_step_over != NULL)
2880 {
2881 if (debug_threads)
2882 fprintf (stderr, "proceed_all_lwps: found "
2883 "thread %ld needing a step-over\n",
2884 lwpid_of (need_step_over));
2885
2886 start_step_over (need_step_over);
2887 return;
2888 }
2889 }
2890
2891 if (debug_threads)
2892 fprintf (stderr, "Proceeding, no step-over needed\n");
2893
2894 for_each_inferior (&all_lwps, proceed_one_lwp);
2895 }
2896
2897 /* Stopped LWPs that the client wanted to be running, that don't have
2898 pending statuses, are set to run again, except for EXCEPT, if not
2899 NULL. This undoes a stop_all_lwps call. */
2900
2901 static void
2902 unstop_all_lwps (struct lwp_info *except)
2903 {
2904 if (debug_threads)
2905 {
2906 if (except)
2907 fprintf (stderr,
2908 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
2909 else
2910 fprintf (stderr,
2911 "unstopping all lwps\n");
2912 }
2913
2914 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2915 if (except != NULL)
2916 ++except->suspended;
2917
2918 for_each_inferior (&all_lwps, proceed_one_lwp);
2919
2920 if (except != NULL)
2921 --except->suspended;
2922 }
2923
2924 #ifdef HAVE_LINUX_USRREGS
2925
2926 int
2927 register_addr (int regnum)
2928 {
2929 int addr;
2930
2931 if (regnum < 0 || regnum >= the_low_target.num_regs)
2932 error ("Invalid register number %d.", regnum);
2933
2934 addr = the_low_target.regmap[regnum];
2935
2936 return addr;
2937 }
2938
2939 /* Fetch one register. */
2940 static void
2941 fetch_register (struct regcache *regcache, int regno)
2942 {
2943 CORE_ADDR regaddr;
2944 int i, size;
2945 char *buf;
2946 int pid;
2947
2948 if (regno >= the_low_target.num_regs)
2949 return;
2950 if ((*the_low_target.cannot_fetch_register) (regno))
2951 return;
2952
2953 regaddr = register_addr (regno);
2954 if (regaddr == -1)
2955 return;
2956
2957 pid = lwpid_of (get_thread_lwp (current_inferior));
2958 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2959 & - sizeof (PTRACE_XFER_TYPE));
2960 buf = alloca (size);
2961 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2962 {
2963 errno = 0;
2964 *(PTRACE_XFER_TYPE *) (buf + i) =
2965 ptrace (PTRACE_PEEKUSER, pid,
2966 /* Coerce to a uintptr_t first to avoid potential gcc warning
2967 of coercing an 8 byte integer to a 4 byte pointer. */
2968 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2969 regaddr += sizeof (PTRACE_XFER_TYPE);
2970 if (errno != 0)
2971 error ("reading register %d: %s", regno, strerror (errno));
2972 }
2973
2974 if (the_low_target.supply_ptrace_register)
2975 the_low_target.supply_ptrace_register (regcache, regno, buf);
2976 else
2977 supply_register (regcache, regno, buf);
2978 }
2979
2980 /* Fetch all registers, or just one, from the child process. */
2981 static void
2982 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2983 {
2984 if (regno == -1)
2985 for (regno = 0; regno < the_low_target.num_regs; regno++)
2986 fetch_register (regcache, regno);
2987 else
2988 fetch_register (regcache, regno);
2989 }
2990
2991 /* Store our register values back into the inferior.
2992 If REGNO is -1, do this for all registers.
2993 Otherwise, REGNO specifies which register (so we can save time). */
2994 static void
2995 usr_store_inferior_registers (struct regcache *regcache, int regno)
2996 {
2997 CORE_ADDR regaddr;
2998 int i, size;
2999 char *buf;
3000 int pid;
3001
3002 if (regno >= 0)
3003 {
3004 if (regno >= the_low_target.num_regs)
3005 return;
3006
3007 if ((*the_low_target.cannot_store_register) (regno) == 1)
3008 return;
3009
3010 regaddr = register_addr (regno);
3011 if (regaddr == -1)
3012 return;
3013 errno = 0;
3014 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3015 & - sizeof (PTRACE_XFER_TYPE);
3016 buf = alloca (size);
3017 memset (buf, 0, size);
3018
3019 if (the_low_target.collect_ptrace_register)
3020 the_low_target.collect_ptrace_register (regcache, regno, buf);
3021 else
3022 collect_register (regcache, regno, buf);
3023
3024 pid = lwpid_of (get_thread_lwp (current_inferior));
3025 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3026 {
3027 errno = 0;
3028 ptrace (PTRACE_POKEUSER, pid,
3029 /* Coerce to a uintptr_t first to avoid potential gcc warning
3030 about coercing an 8 byte integer to a 4 byte pointer. */
3031 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3032 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3033 if (errno != 0)
3034 {
3035 /* At this point, ESRCH should mean the process is
3036 already gone, in which case we simply ignore attempts
3037 to change its registers. See also the related
3038 comment in linux_resume_one_lwp. */
3039 if (errno == ESRCH)
3040 return;
3041
3042 if ((*the_low_target.cannot_store_register) (regno) == 0)
3043 error ("writing register %d: %s", regno, strerror (errno));
3044 }
3045 regaddr += sizeof (PTRACE_XFER_TYPE);
3046 }
3047 }
3048 else
3049 for (regno = 0; regno < the_low_target.num_regs; regno++)
3050 usr_store_inferior_registers (regcache, regno);
3051 }
3052 #endif /* HAVE_LINUX_USRREGS */
3053
3054
3055
3056 #ifdef HAVE_LINUX_REGSETS
3057
3058 static int
3059 regsets_fetch_inferior_registers (struct regcache *regcache)
3060 {
3061 struct regset_info *regset;
3062 int saw_general_regs = 0;
3063 int pid;
3064 struct iovec iov;
3065
3066 regset = target_regsets;
3067
3068 pid = lwpid_of (get_thread_lwp (current_inferior));
3069 while (regset->size >= 0)
3070 {
3071 void *buf, *data;
3072 int nt_type, res;
3073
3074 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3075 {
3076 regset ++;
3077 continue;
3078 }
3079
3080 buf = xmalloc (regset->size);
3081
3082 nt_type = regset->nt_type;
3083 if (nt_type)
3084 {
3085 iov.iov_base = buf;
3086 iov.iov_len = regset->size;
3087 data = (void *) &iov;
3088 }
3089 else
3090 data = buf;
3091
3092 #ifndef __sparc__
3093 res = ptrace (regset->get_request, pid, nt_type, data);
3094 #else
3095 res = ptrace (regset->get_request, pid, data, nt_type);
3096 #endif
3097 if (res < 0)
3098 {
3099 if (errno == EIO)
3100 {
3101 /* If we get EIO on a regset, do not try it again for
3102 this process. */
3103 disabled_regsets[regset - target_regsets] = 1;
3104 free (buf);
3105 continue;
3106 }
3107 else
3108 {
3109 char s[256];
3110 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3111 pid);
3112 perror (s);
3113 }
3114 }
3115 else if (regset->type == GENERAL_REGS)
3116 saw_general_regs = 1;
3117 regset->store_function (regcache, buf);
3118 regset ++;
3119 free (buf);
3120 }
3121 if (saw_general_regs)
3122 return 0;
3123 else
3124 return 1;
3125 }
3126
3127 static int
3128 regsets_store_inferior_registers (struct regcache *regcache)
3129 {
3130 struct regset_info *regset;
3131 int saw_general_regs = 0;
3132 int pid;
3133 struct iovec iov;
3134
3135 regset = target_regsets;
3136
3137 pid = lwpid_of (get_thread_lwp (current_inferior));
3138 while (regset->size >= 0)
3139 {
3140 void *buf, *data;
3141 int nt_type, res;
3142
3143 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3144 {
3145 regset ++;
3146 continue;
3147 }
3148
3149 buf = xmalloc (regset->size);
3150
3151 /* First fill the buffer with the current register set contents,
3152 in case there are any items in the kernel's regset that are
3153 not in gdbserver's regcache. */
3154
3155 nt_type = regset->nt_type;
3156 if (nt_type)
3157 {
3158 iov.iov_base = buf;
3159 iov.iov_len = regset->size;
3160 data = (void *) &iov;
3161 }
3162 else
3163 data = buf;
3164
3165 #ifndef __sparc__
3166 res = ptrace (regset->get_request, pid, nt_type, data);
3167 #else
3168 res = ptrace (regset->get_request, pid, &iov, data);
3169 #endif
3170
3171 if (res == 0)
3172 {
3173 /* Then overlay our cached registers on that. */
3174 regset->fill_function (regcache, buf);
3175
3176 /* Only now do we write the register set. */
3177 #ifndef __sparc__
3178 res = ptrace (regset->set_request, pid, nt_type, data);
3179 #else
3180 res = ptrace (regset->set_request, pid, data, nt_type);
3181 #endif
3182 }
3183
3184 if (res < 0)
3185 {
3186 if (errno == EIO)
3187 {
3188 /* If we get EIO on a regset, do not try it again for
3189 this process. */
3190 disabled_regsets[regset - target_regsets] = 1;
3191 free (buf);
3192 continue;
3193 }
3194 else if (errno == ESRCH)
3195 {
3196 /* At this point, ESRCH should mean the process is
3197 already gone, in which case we simply ignore attempts
3198 to change its registers. See also the related
3199 comment in linux_resume_one_lwp. */
3200 free (buf);
3201 return 0;
3202 }
3203 else
3204 {
3205 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3206 }
3207 }
3208 else if (regset->type == GENERAL_REGS)
3209 saw_general_regs = 1;
3210 regset ++;
3211 free (buf);
3212 }
3213 if (saw_general_regs)
3214 return 0;
3215 else
3216 return 1;
3217 return 0;
3218 }
3219
3220 #endif /* HAVE_LINUX_REGSETS */
3221
3222
3223 void
3224 linux_fetch_registers (struct regcache *regcache, int regno)
3225 {
3226 #ifdef HAVE_LINUX_REGSETS
3227 if (regsets_fetch_inferior_registers (regcache) == 0)
3228 return;
3229 #endif
3230 #ifdef HAVE_LINUX_USRREGS
3231 usr_fetch_inferior_registers (regcache, regno);
3232 #endif
3233 }
3234
3235 void
3236 linux_store_registers (struct regcache *regcache, int regno)
3237 {
3238 #ifdef HAVE_LINUX_REGSETS
3239 if (regsets_store_inferior_registers (regcache) == 0)
3240 return;
3241 #endif
3242 #ifdef HAVE_LINUX_USRREGS
3243 usr_store_inferior_registers (regcache, regno);
3244 #endif
3245 }
3246
3247
3248 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3249 to debugger memory starting at MYADDR. */
3250
3251 static int
3252 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3253 {
3254 register int i;
3255 /* Round starting address down to longword boundary. */
3256 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3257 /* Round ending address up; get number of longwords that makes. */
3258 register int count
3259 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3260 / sizeof (PTRACE_XFER_TYPE);
3261 /* Allocate buffer of that many longwords. */
3262 register PTRACE_XFER_TYPE *buffer
3263 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3264 int fd;
3265 char filename[64];
3266 int pid = lwpid_of (get_thread_lwp (current_inferior));
3267
3268 /* Try using /proc. Don't bother for one word. */
3269 if (len >= 3 * sizeof (long))
3270 {
3271 /* We could keep this file open and cache it - possibly one per
3272 thread. That requires some juggling, but is even faster. */
3273 sprintf (filename, "/proc/%d/mem", pid);
3274 fd = open (filename, O_RDONLY | O_LARGEFILE);
3275 if (fd == -1)
3276 goto no_proc;
3277
3278 /* If pread64 is available, use it. It's faster if the kernel
3279 supports it (only one syscall), and it's 64-bit safe even on
3280 32-bit platforms (for instance, SPARC debugging a SPARC64
3281 application). */
3282 #ifdef HAVE_PREAD64
3283 if (pread64 (fd, myaddr, len, memaddr) != len)
3284 #else
3285 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3286 #endif
3287 {
3288 close (fd);
3289 goto no_proc;
3290 }
3291
3292 close (fd);
3293 return 0;
3294 }
3295
3296 no_proc:
3297 /* Read all the longwords */
3298 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3299 {
3300 errno = 0;
3301 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3302 about coercing an 8 byte integer to a 4 byte pointer. */
3303 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3304 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3305 if (errno)
3306 return errno;
3307 }
3308
3309 /* Copy appropriate bytes out of the buffer. */
3310 memcpy (myaddr,
3311 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3312 len);
3313
3314 return 0;
3315 }
3316
3317 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3318 memory at MEMADDR. On failure (cannot write to the inferior)
3319 returns the value of errno. */
3320
3321 static int
3322 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3323 {
3324 register int i;
3325 /* Round starting address down to longword boundary. */
3326 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3327 /* Round ending address up; get number of longwords that makes. */
3328 register int count
3329 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3330 /* Allocate buffer of that many longwords. */
3331 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3332 int pid = lwpid_of (get_thread_lwp (current_inferior));
3333
3334 if (debug_threads)
3335 {
3336 /* Dump up to four bytes. */
3337 unsigned int val = * (unsigned int *) myaddr;
3338 if (len == 1)
3339 val = val & 0xff;
3340 else if (len == 2)
3341 val = val & 0xffff;
3342 else if (len == 3)
3343 val = val & 0xffffff;
3344 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3345 val, (long)memaddr);
3346 }
3347
3348 /* Fill start and end extra bytes of buffer with existing memory data. */
3349
3350 errno = 0;
3351 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3352 about coercing an 8 byte integer to a 4 byte pointer. */
3353 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3354 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3355 if (errno)
3356 return errno;
3357
3358 if (count > 1)
3359 {
3360 errno = 0;
3361 buffer[count - 1]
3362 = ptrace (PTRACE_PEEKTEXT, pid,
3363 /* Coerce to a uintptr_t first to avoid potential gcc warning
3364 about coercing an 8 byte integer to a 4 byte pointer. */
3365 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3366 * sizeof (PTRACE_XFER_TYPE)),
3367 0);
3368 if (errno)
3369 return errno;
3370 }
3371
3372 /* Copy data to be written over corresponding part of buffer. */
3373
3374 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3375
3376 /* Write the entire buffer. */
3377
3378 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3379 {
3380 errno = 0;
3381 ptrace (PTRACE_POKETEXT, pid,
3382 /* Coerce to a uintptr_t first to avoid potential gcc warning
3383 about coercing an 8 byte integer to a 4 byte pointer. */
3384 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3385 (PTRACE_ARG4_TYPE) buffer[i]);
3386 if (errno)
3387 return errno;
3388 }
3389
3390 return 0;
3391 }
3392
3393 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3394 static int linux_supports_tracefork_flag;
3395
3396 static void
3397 linux_enable_event_reporting (int pid)
3398 {
3399 if (!linux_supports_tracefork_flag)
3400 return;
3401
3402 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
3403 }
3404
3405 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3406
3407 static int
3408 linux_tracefork_grandchild (void *arg)
3409 {
3410 _exit (0);
3411 }
3412
3413 #define STACK_SIZE 4096
3414
3415 static int
3416 linux_tracefork_child (void *arg)
3417 {
3418 ptrace (PTRACE_TRACEME, 0, 0, 0);
3419 kill (getpid (), SIGSTOP);
3420
3421 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3422
3423 if (fork () == 0)
3424 linux_tracefork_grandchild (NULL);
3425
3426 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3427
3428 #ifdef __ia64__
3429 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3430 CLONE_VM | SIGCHLD, NULL);
3431 #else
3432 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3433 CLONE_VM | SIGCHLD, NULL);
3434 #endif
3435
3436 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3437
3438 _exit (0);
3439 }
3440
3441 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3442 sure that we can enable the option, and that it had the desired
3443 effect. */
3444
3445 static void
3446 linux_test_for_tracefork (void)
3447 {
3448 int child_pid, ret, status;
3449 long second_pid;
3450 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3451 char *stack = xmalloc (STACK_SIZE * 4);
3452 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3453
3454 linux_supports_tracefork_flag = 0;
3455
3456 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3457
3458 child_pid = fork ();
3459 if (child_pid == 0)
3460 linux_tracefork_child (NULL);
3461
3462 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3463
3464 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3465 #ifdef __ia64__
3466 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3467 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3468 #else /* !__ia64__ */
3469 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3470 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3471 #endif /* !__ia64__ */
3472
3473 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3474
3475 if (child_pid == -1)
3476 perror_with_name ("clone");
3477
3478 ret = my_waitpid (child_pid, &status, 0);
3479 if (ret == -1)
3480 perror_with_name ("waitpid");
3481 else if (ret != child_pid)
3482 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3483 if (! WIFSTOPPED (status))
3484 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3485
3486 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3487 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3488 if (ret != 0)
3489 {
3490 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3491 if (ret != 0)
3492 {
3493 warning ("linux_test_for_tracefork: failed to kill child");
3494 return;
3495 }
3496
3497 ret = my_waitpid (child_pid, &status, 0);
3498 if (ret != child_pid)
3499 warning ("linux_test_for_tracefork: failed to wait for killed child");
3500 else if (!WIFSIGNALED (status))
3501 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3502 "killed child", status);
3503
3504 return;
3505 }
3506
3507 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3508 if (ret != 0)
3509 warning ("linux_test_for_tracefork: failed to resume child");
3510
3511 ret = my_waitpid (child_pid, &status, 0);
3512
3513 if (ret == child_pid && WIFSTOPPED (status)
3514 && status >> 16 == PTRACE_EVENT_FORK)
3515 {
3516 second_pid = 0;
3517 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3518 if (ret == 0 && second_pid != 0)
3519 {
3520 int second_status;
3521
3522 linux_supports_tracefork_flag = 1;
3523 my_waitpid (second_pid, &second_status, 0);
3524 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3525 if (ret != 0)
3526 warning ("linux_test_for_tracefork: failed to kill second child");
3527 my_waitpid (second_pid, &status, 0);
3528 }
3529 }
3530 else
3531 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3532 "(%d, status 0x%x)", ret, status);
3533
3534 do
3535 {
3536 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3537 if (ret != 0)
3538 warning ("linux_test_for_tracefork: failed to kill child");
3539 my_waitpid (child_pid, &status, 0);
3540 }
3541 while (WIFSTOPPED (status));
3542
3543 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3544 free (stack);
3545 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3546 }
3547
3548
3549 static void
3550 linux_look_up_symbols (void)
3551 {
3552 #ifdef USE_THREAD_DB
3553 struct process_info *proc = current_process ();
3554
3555 if (proc->private->thread_db != NULL)
3556 return;
3557
3558 /* If the kernel supports tracing forks then it also supports tracing
3559 clones, and then we don't need to use the magic thread event breakpoint
3560 to learn about threads. */
3561 thread_db_init (!linux_supports_tracefork_flag);
3562 #endif
3563 }
3564
3565 static void
3566 linux_request_interrupt (void)
3567 {
3568 extern unsigned long signal_pid;
3569
3570 if (!ptid_equal (cont_thread, null_ptid)
3571 && !ptid_equal (cont_thread, minus_one_ptid))
3572 {
3573 struct lwp_info *lwp;
3574 int lwpid;
3575
3576 lwp = get_thread_lwp (current_inferior);
3577 lwpid = lwpid_of (lwp);
3578 kill_lwp (lwpid, SIGINT);
3579 }
3580 else
3581 kill_lwp (signal_pid, SIGINT);
3582 }
3583
3584 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3585 to debugger memory starting at MYADDR. */
3586
3587 static int
3588 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3589 {
3590 char filename[PATH_MAX];
3591 int fd, n;
3592 int pid = lwpid_of (get_thread_lwp (current_inferior));
3593
3594 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3595
3596 fd = open (filename, O_RDONLY);
3597 if (fd < 0)
3598 return -1;
3599
3600 if (offset != (CORE_ADDR) 0
3601 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3602 n = -1;
3603 else
3604 n = read (fd, myaddr, len);
3605
3606 close (fd);
3607
3608 return n;
3609 }
3610
3611 /* These breakpoint and watchpoint related wrapper functions simply
3612 pass on the function call if the target has registered a
3613 corresponding function. */
3614
3615 static int
3616 linux_insert_point (char type, CORE_ADDR addr, int len)
3617 {
3618 if (the_low_target.insert_point != NULL)
3619 return the_low_target.insert_point (type, addr, len);
3620 else
3621 /* Unsupported (see target.h). */
3622 return 1;
3623 }
3624
3625 static int
3626 linux_remove_point (char type, CORE_ADDR addr, int len)
3627 {
3628 if (the_low_target.remove_point != NULL)
3629 return the_low_target.remove_point (type, addr, len);
3630 else
3631 /* Unsupported (see target.h). */
3632 return 1;
3633 }
3634
3635 static int
3636 linux_stopped_by_watchpoint (void)
3637 {
3638 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3639
3640 return lwp->stopped_by_watchpoint;
3641 }
3642
3643 static CORE_ADDR
3644 linux_stopped_data_address (void)
3645 {
3646 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3647
3648 return lwp->stopped_data_address;
3649 }
3650
3651 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3652 #if defined(__mcoldfire__)
3653 /* These should really be defined in the kernel's ptrace.h header. */
3654 #define PT_TEXT_ADDR 49*4
3655 #define PT_DATA_ADDR 50*4
3656 #define PT_TEXT_END_ADDR 51*4
3657 #endif
3658
3659 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3660 to tell gdb about. */
3661
3662 static int
3663 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3664 {
3665 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3666 unsigned long text, text_end, data;
3667 int pid = lwpid_of (get_thread_lwp (current_inferior));
3668
3669 errno = 0;
3670
3671 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3672 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3673 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3674
3675 if (errno == 0)
3676 {
3677 /* Both text and data offsets produced at compile-time (and so
3678 used by gdb) are relative to the beginning of the program,
3679 with the data segment immediately following the text segment.
3680 However, the actual runtime layout in memory may put the data
3681 somewhere else, so when we send gdb a data base-address, we
3682 use the real data base address and subtract the compile-time
3683 data base-address from it (which is just the length of the
3684 text segment). BSS immediately follows data in both
3685 cases. */
3686 *text_p = text;
3687 *data_p = data - (text_end - text);
3688
3689 return 1;
3690 }
3691 #endif
3692 return 0;
3693 }
3694 #endif
3695
3696 static int
3697 compare_ints (const void *xa, const void *xb)
3698 {
3699 int a = *(const int *)xa;
3700 int b = *(const int *)xb;
3701
3702 return a - b;
3703 }
3704
3705 static int *
3706 unique (int *b, int *e)
3707 {
3708 int *d = b;
3709 while (++b != e)
3710 if (*d != *b)
3711 *++d = *b;
3712 return ++d;
3713 }
3714
3715 /* Given PID, iterates over all threads in that process.
3716
3717 Information about each thread, in a format suitable for qXfer:osdata:thread
3718 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3719 initialized, and the caller is responsible for finishing and appending '\0'
3720 to it.
3721
3722 The list of cores that threads are running on is assigned to *CORES, if it
3723 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3724 should free *CORES. */
3725
3726 static void
3727 list_threads (int pid, struct buffer *buffer, char **cores)
3728 {
3729 int count = 0;
3730 int allocated = 10;
3731 int *core_numbers = xmalloc (sizeof (int) * allocated);
3732 char pathname[128];
3733 DIR *dir;
3734 struct dirent *dp;
3735 struct stat statbuf;
3736
3737 sprintf (pathname, "/proc/%d/task", pid);
3738 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3739 {
3740 dir = opendir (pathname);
3741 if (!dir)
3742 {
3743 free (core_numbers);
3744 return;
3745 }
3746
3747 while ((dp = readdir (dir)) != NULL)
3748 {
3749 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3750
3751 if (lwp != 0)
3752 {
3753 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3754
3755 if (core != -1)
3756 {
3757 char s[sizeof ("4294967295")];
3758 sprintf (s, "%u", core);
3759
3760 if (count == allocated)
3761 {
3762 allocated *= 2;
3763 core_numbers = realloc (core_numbers,
3764 sizeof (int) * allocated);
3765 }
3766 core_numbers[count++] = core;
3767 if (buffer)
3768 buffer_xml_printf (buffer,
3769 "<item>"
3770 "<column name=\"pid\">%d</column>"
3771 "<column name=\"tid\">%s</column>"
3772 "<column name=\"core\">%s</column>"
3773 "</item>", pid, dp->d_name, s);
3774 }
3775 else
3776 {
3777 if (buffer)
3778 buffer_xml_printf (buffer,
3779 "<item>"
3780 "<column name=\"pid\">%d</column>"
3781 "<column name=\"tid\">%s</column>"
3782 "</item>", pid, dp->d_name);
3783 }
3784 }
3785 }
3786 }
3787
3788 if (cores)
3789 {
3790 *cores = NULL;
3791 if (count > 0)
3792 {
3793 struct buffer buffer2;
3794 int *b;
3795 int *e;
3796 qsort (core_numbers, count, sizeof (int), compare_ints);
3797
3798 /* Remove duplicates. */
3799 b = core_numbers;
3800 e = unique (b, core_numbers + count);
3801
3802 buffer_init (&buffer2);
3803
3804 for (b = core_numbers; b != e; ++b)
3805 {
3806 char number[sizeof ("4294967295")];
3807 sprintf (number, "%u", *b);
3808 buffer_xml_printf (&buffer2, "%s%s",
3809 (b == core_numbers) ? "" : ",", number);
3810 }
3811 buffer_grow_str0 (&buffer2, "");
3812
3813 *cores = buffer_finish (&buffer2);
3814 }
3815 }
3816 free (core_numbers);
3817 }
3818
3819 static void
3820 show_process (int pid, const char *username, struct buffer *buffer)
3821 {
3822 char pathname[128];
3823 FILE *f;
3824 char cmd[MAXPATHLEN + 1];
3825
3826 sprintf (pathname, "/proc/%d/cmdline", pid);
3827
3828 if ((f = fopen (pathname, "r")) != NULL)
3829 {
3830 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3831 if (len > 0)
3832 {
3833 char *cores = 0;
3834 int i;
3835 for (i = 0; i < len; i++)
3836 if (cmd[i] == '\0')
3837 cmd[i] = ' ';
3838 cmd[len] = '\0';
3839
3840 buffer_xml_printf (buffer,
3841 "<item>"
3842 "<column name=\"pid\">%d</column>"
3843 "<column name=\"user\">%s</column>"
3844 "<column name=\"command\">%s</column>",
3845 pid,
3846 username,
3847 cmd);
3848
3849 /* This only collects core numbers, and does not print threads. */
3850 list_threads (pid, NULL, &cores);
3851
3852 if (cores)
3853 {
3854 buffer_xml_printf (buffer,
3855 "<column name=\"cores\">%s</column>", cores);
3856 free (cores);
3857 }
3858
3859 buffer_xml_printf (buffer, "</item>");
3860 }
3861 fclose (f);
3862 }
3863 }
3864
3865 static int
3866 linux_qxfer_osdata (const char *annex,
3867 unsigned char *readbuf, unsigned const char *writebuf,
3868 CORE_ADDR offset, int len)
3869 {
3870 /* We make the process list snapshot when the object starts to be
3871 read. */
3872 static const char *buf;
3873 static long len_avail = -1;
3874 static struct buffer buffer;
3875 int processes = 0;
3876 int threads = 0;
3877
3878 DIR *dirp;
3879
3880 if (strcmp (annex, "processes") == 0)
3881 processes = 1;
3882 else if (strcmp (annex, "threads") == 0)
3883 threads = 1;
3884 else
3885 return 0;
3886
3887 if (!readbuf || writebuf)
3888 return 0;
3889
3890 if (offset == 0)
3891 {
3892 if (len_avail != -1 && len_avail != 0)
3893 buffer_free (&buffer);
3894 len_avail = 0;
3895 buf = NULL;
3896 buffer_init (&buffer);
3897 if (processes)
3898 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3899 else if (threads)
3900 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3901
3902 dirp = opendir ("/proc");
3903 if (dirp)
3904 {
3905 struct dirent *dp;
3906 while ((dp = readdir (dirp)) != NULL)
3907 {
3908 struct stat statbuf;
3909 char procentry[sizeof ("/proc/4294967295")];
3910
3911 if (!isdigit (dp->d_name[0])
3912 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3913 continue;
3914
3915 sprintf (procentry, "/proc/%s", dp->d_name);
3916 if (stat (procentry, &statbuf) == 0
3917 && S_ISDIR (statbuf.st_mode))
3918 {
3919 int pid = (int) strtoul (dp->d_name, NULL, 10);
3920
3921 if (processes)
3922 {
3923 struct passwd *entry = getpwuid (statbuf.st_uid);
3924 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3925 }
3926 else if (threads)
3927 {
3928 list_threads (pid, &buffer, NULL);
3929 }
3930 }
3931 }
3932
3933 closedir (dirp);
3934 }
3935 buffer_grow_str0 (&buffer, "</osdata>\n");
3936 buf = buffer_finish (&buffer);
3937 len_avail = strlen (buf);
3938 }
3939
3940 if (offset >= len_avail)
3941 {
3942 /* Done. Get rid of the data. */
3943 buffer_free (&buffer);
3944 buf = NULL;
3945 len_avail = 0;
3946 return 0;
3947 }
3948
3949 if (len > len_avail - offset)
3950 len = len_avail - offset;
3951 memcpy (readbuf, buf + offset, len);
3952
3953 return len;
3954 }
3955
3956 /* Convert a native/host siginfo object, into/from the siginfo in the
3957 layout of the inferiors' architecture. */
3958
3959 static void
3960 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3961 {
3962 int done = 0;
3963
3964 if (the_low_target.siginfo_fixup != NULL)
3965 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3966
3967 /* If there was no callback, or the callback didn't do anything,
3968 then just do a straight memcpy. */
3969 if (!done)
3970 {
3971 if (direction == 1)
3972 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3973 else
3974 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3975 }
3976 }
3977
3978 static int
3979 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3980 unsigned const char *writebuf, CORE_ADDR offset, int len)
3981 {
3982 int pid;
3983 struct siginfo siginfo;
3984 char inf_siginfo[sizeof (struct siginfo)];
3985
3986 if (current_inferior == NULL)
3987 return -1;
3988
3989 pid = lwpid_of (get_thread_lwp (current_inferior));
3990
3991 if (debug_threads)
3992 fprintf (stderr, "%s siginfo for lwp %d.\n",
3993 readbuf != NULL ? "Reading" : "Writing",
3994 pid);
3995
3996 if (offset > sizeof (siginfo))
3997 return -1;
3998
3999 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4000 return -1;
4001
4002 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4003 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4004 inferior with a 64-bit GDBSERVER should look the same as debugging it
4005 with a 32-bit GDBSERVER, we need to convert it. */
4006 siginfo_fixup (&siginfo, inf_siginfo, 0);
4007
4008 if (offset + len > sizeof (siginfo))
4009 len = sizeof (siginfo) - offset;
4010
4011 if (readbuf != NULL)
4012 memcpy (readbuf, inf_siginfo + offset, len);
4013 else
4014 {
4015 memcpy (inf_siginfo + offset, writebuf, len);
4016
4017 /* Convert back to ptrace layout before flushing it out. */
4018 siginfo_fixup (&siginfo, inf_siginfo, 1);
4019
4020 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4021 return -1;
4022 }
4023
4024 return len;
4025 }
4026
4027 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4028 so we notice when children change state; as the handler for the
4029 sigsuspend in my_waitpid. */
4030
4031 static void
4032 sigchld_handler (int signo)
4033 {
4034 int old_errno = errno;
4035
4036 if (debug_threads)
4037 /* fprintf is not async-signal-safe, so call write directly. */
4038 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4039
4040 if (target_is_async_p ())
4041 async_file_mark (); /* trigger a linux_wait */
4042
4043 errno = old_errno;
4044 }
4045
4046 static int
4047 linux_supports_non_stop (void)
4048 {
4049 return 1;
4050 }
4051
4052 static int
4053 linux_async (int enable)
4054 {
4055 int previous = (linux_event_pipe[0] != -1);
4056
4057 if (debug_threads)
4058 fprintf (stderr, "linux_async (%d), previous=%d\n",
4059 enable, previous);
4060
4061 if (previous != enable)
4062 {
4063 sigset_t mask;
4064 sigemptyset (&mask);
4065 sigaddset (&mask, SIGCHLD);
4066
4067 sigprocmask (SIG_BLOCK, &mask, NULL);
4068
4069 if (enable)
4070 {
4071 if (pipe (linux_event_pipe) == -1)
4072 fatal ("creating event pipe failed.");
4073
4074 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4075 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4076
4077 /* Register the event loop handler. */
4078 add_file_handler (linux_event_pipe[0],
4079 handle_target_event, NULL);
4080
4081 /* Always trigger a linux_wait. */
4082 async_file_mark ();
4083 }
4084 else
4085 {
4086 delete_file_handler (linux_event_pipe[0]);
4087
4088 close (linux_event_pipe[0]);
4089 close (linux_event_pipe[1]);
4090 linux_event_pipe[0] = -1;
4091 linux_event_pipe[1] = -1;
4092 }
4093
4094 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4095 }
4096
4097 return previous;
4098 }
4099
4100 static int
4101 linux_start_non_stop (int nonstop)
4102 {
4103 /* Register or unregister from event-loop accordingly. */
4104 linux_async (nonstop);
4105 return 0;
4106 }
4107
4108 static int
4109 linux_supports_multi_process (void)
4110 {
4111 return 1;
4112 }
4113
4114
4115 /* Enumerate spufs IDs for process PID. */
4116 static int
4117 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4118 {
4119 int pos = 0;
4120 int written = 0;
4121 char path[128];
4122 DIR *dir;
4123 struct dirent *entry;
4124
4125 sprintf (path, "/proc/%ld/fd", pid);
4126 dir = opendir (path);
4127 if (!dir)
4128 return -1;
4129
4130 rewinddir (dir);
4131 while ((entry = readdir (dir)) != NULL)
4132 {
4133 struct stat st;
4134 struct statfs stfs;
4135 int fd;
4136
4137 fd = atoi (entry->d_name);
4138 if (!fd)
4139 continue;
4140
4141 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4142 if (stat (path, &st) != 0)
4143 continue;
4144 if (!S_ISDIR (st.st_mode))
4145 continue;
4146
4147 if (statfs (path, &stfs) != 0)
4148 continue;
4149 if (stfs.f_type != SPUFS_MAGIC)
4150 continue;
4151
4152 if (pos >= offset && pos + 4 <= offset + len)
4153 {
4154 *(unsigned int *)(buf + pos - offset) = fd;
4155 written += 4;
4156 }
4157 pos += 4;
4158 }
4159
4160 closedir (dir);
4161 return written;
4162 }
4163
4164 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4165 object type, using the /proc file system. */
4166 static int
4167 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4168 unsigned const char *writebuf,
4169 CORE_ADDR offset, int len)
4170 {
4171 long pid = lwpid_of (get_thread_lwp (current_inferior));
4172 char buf[128];
4173 int fd = 0;
4174 int ret = 0;
4175
4176 if (!writebuf && !readbuf)
4177 return -1;
4178
4179 if (!*annex)
4180 {
4181 if (!readbuf)
4182 return -1;
4183 else
4184 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4185 }
4186
4187 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4188 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4189 if (fd <= 0)
4190 return -1;
4191
4192 if (offset != 0
4193 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4194 {
4195 close (fd);
4196 return 0;
4197 }
4198
4199 if (writebuf)
4200 ret = write (fd, writebuf, (size_t) len);
4201 else
4202 ret = read (fd, readbuf, (size_t) len);
4203
4204 close (fd);
4205 return ret;
4206 }
4207
4208 static int
4209 linux_core_of_thread (ptid_t ptid)
4210 {
4211 char filename[sizeof ("/proc//task//stat")
4212 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4213 + 1];
4214 FILE *f;
4215 char *content = NULL;
4216 char *p;
4217 char *ts = 0;
4218 int content_read = 0;
4219 int i;
4220 int core;
4221
4222 sprintf (filename, "/proc/%d/task/%ld/stat",
4223 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4224 f = fopen (filename, "r");
4225 if (!f)
4226 return -1;
4227
4228 for (;;)
4229 {
4230 int n;
4231 content = realloc (content, content_read + 1024);
4232 n = fread (content + content_read, 1, 1024, f);
4233 content_read += n;
4234 if (n < 1024)
4235 {
4236 content[content_read] = '\0';
4237 break;
4238 }
4239 }
4240
4241 p = strchr (content, '(');
4242 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4243
4244 p = strtok_r (p, " ", &ts);
4245 for (i = 0; i != 36; ++i)
4246 p = strtok_r (NULL, " ", &ts);
4247
4248 if (sscanf (p, "%d", &core) == 0)
4249 core = -1;
4250
4251 free (content);
4252 fclose (f);
4253
4254 return core;
4255 }
4256
4257 static void
4258 linux_process_qsupported (const char *query)
4259 {
4260 if (the_low_target.process_qsupported != NULL)
4261 the_low_target.process_qsupported (query);
4262 }
4263
4264 static int
4265 linux_supports_tracepoints (void)
4266 {
4267 if (*the_low_target.supports_tracepoints == NULL)
4268 return 0;
4269
4270 return (*the_low_target.supports_tracepoints) ();
4271 }
4272
4273 static CORE_ADDR
4274 linux_read_pc (struct regcache *regcache)
4275 {
4276 if (the_low_target.get_pc == NULL)
4277 return 0;
4278
4279 return (*the_low_target.get_pc) (regcache);
4280 }
4281
4282 static void
4283 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4284 {
4285 gdb_assert (the_low_target.set_pc != NULL);
4286
4287 (*the_low_target.set_pc) (regcache, pc);
4288 }
4289
4290 static int
4291 linux_thread_stopped (struct thread_info *thread)
4292 {
4293 return get_thread_lwp (thread)->stopped;
4294 }
4295
4296 /* This exposes stop-all-threads functionality to other modules. */
4297
4298 static void
4299 linux_pause_all (void)
4300 {
4301 stop_all_lwps ();
4302 }
4303
4304 static struct target_ops linux_target_ops = {
4305 linux_create_inferior,
4306 linux_attach,
4307 linux_kill,
4308 linux_detach,
4309 linux_mourn,
4310 linux_join,
4311 linux_thread_alive,
4312 linux_resume,
4313 linux_wait,
4314 linux_fetch_registers,
4315 linux_store_registers,
4316 linux_read_memory,
4317 linux_write_memory,
4318 linux_look_up_symbols,
4319 linux_request_interrupt,
4320 linux_read_auxv,
4321 linux_insert_point,
4322 linux_remove_point,
4323 linux_stopped_by_watchpoint,
4324 linux_stopped_data_address,
4325 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4326 linux_read_offsets,
4327 #else
4328 NULL,
4329 #endif
4330 #ifdef USE_THREAD_DB
4331 thread_db_get_tls_address,
4332 #else
4333 NULL,
4334 #endif
4335 linux_qxfer_spu,
4336 hostio_last_error_from_errno,
4337 linux_qxfer_osdata,
4338 linux_xfer_siginfo,
4339 linux_supports_non_stop,
4340 linux_async,
4341 linux_start_non_stop,
4342 linux_supports_multi_process,
4343 #ifdef USE_THREAD_DB
4344 thread_db_handle_monitor_command,
4345 #else
4346 NULL,
4347 #endif
4348 linux_core_of_thread,
4349 linux_process_qsupported,
4350 linux_supports_tracepoints,
4351 linux_read_pc,
4352 linux_write_pc,
4353 linux_thread_stopped,
4354 linux_pause_all,
4355 NULL, /* get_tib_address (Windows OS specific). */
4356 };
4357
4358 static void
4359 linux_init_signals ()
4360 {
4361 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4362 to find what the cancel signal actually is. */
4363 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4364 signal (__SIGRTMIN+1, SIG_IGN);
4365 #endif
4366 }
4367
4368 void
4369 initialize_low (void)
4370 {
4371 struct sigaction sigchld_action;
4372 memset (&sigchld_action, 0, sizeof (sigchld_action));
4373 set_target_ops (&linux_target_ops);
4374 set_breakpoint_data (the_low_target.breakpoint,
4375 the_low_target.breakpoint_len);
4376 linux_init_signals ();
4377 linux_test_for_tracefork ();
4378 #ifdef HAVE_LINUX_REGSETS
4379 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4380 ;
4381 disabled_regsets = xmalloc (num_regsets);
4382 #endif
4383
4384 sigchld_action.sa_handler = sigchld_handler;
4385 sigemptyset (&sigchld_action.sa_mask);
4386 sigchld_action.sa_flags = SA_RESTART;
4387 sigaction (SIGCHLD, &sigchld_action, NULL);
4388 }