Add x86 AVX support to gdbserver.
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 #ifdef __UCLIBC__
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103 #define HAS_NOMMU
104 #endif
105 #endif
106
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
109
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
115
116 struct inferior_list all_lwps;
117
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122 struct inferior_list stopped_pids;
123
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads;
126
127 /* FIXME make into a target method? */
128 int using_threads = 1;
129
130 /* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
137 static int new_inferior;
138
139 static void linux_resume_one_lwp (struct lwp_info *lwp,
140 int step, int signal, siginfo_t *info);
141 static void linux_resume (struct thread_resume *resume_info, size_t n);
142 static void stop_all_lwps (void);
143 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
144 static void *add_lwp (ptid_t ptid);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147 static int linux_core_of_thread (ptid_t ptid);
148 static void proceed_all_lwps (void);
149 static void unstop_all_lwps (struct lwp_info *except);
150 static int finish_step_over (struct lwp_info *lwp);
151 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152 static int kill_lwp (unsigned long lwpid, int signo);
153
154 /* True if the low target can hardware single-step. Such targets
155 don't need a BREAKPOINT_REINSERT_ADDR callback. */
156
157 static int
158 can_hardware_single_step (void)
159 {
160 return (the_low_target.breakpoint_reinsert_addr == NULL);
161 }
162
163 /* True if the low target supports memory breakpoints. If so, we'll
164 have a GET_PC implementation. */
165
166 static int
167 supports_breakpoints (void)
168 {
169 return (the_low_target.get_pc != NULL);
170 }
171
172 struct pending_signals
173 {
174 int signal;
175 siginfo_t info;
176 struct pending_signals *prev;
177 };
178
179 #define PTRACE_ARG3_TYPE void *
180 #define PTRACE_ARG4_TYPE void *
181 #define PTRACE_XFER_TYPE long
182
183 #ifdef HAVE_LINUX_REGSETS
184 static char *disabled_regsets;
185 static int num_regsets;
186 #endif
187
188 /* The read/write ends of the pipe registered as waitable file in the
189 event loop. */
190 static int linux_event_pipe[2] = { -1, -1 };
191
192 /* True if we're currently in async mode. */
193 #define target_is_async_p() (linux_event_pipe[0] != -1)
194
195 static void send_sigstop (struct inferior_list_entry *entry);
196 static void wait_for_sigstop (struct inferior_list_entry *entry);
197
198 /* Accepts an integer PID; Returns a string representing a file that
199 can be opened to get info for the child process.
200 Space for the result is malloc'd, caller must free. */
201
202 char *
203 linux_child_pid_to_exec_file (int pid)
204 {
205 char *name1, *name2;
206
207 name1 = xmalloc (MAXPATHLEN);
208 name2 = xmalloc (MAXPATHLEN);
209 memset (name2, 0, MAXPATHLEN);
210
211 sprintf (name1, "/proc/%d/exe", pid);
212 if (readlink (name1, name2, MAXPATHLEN) > 0)
213 {
214 free (name1);
215 return name2;
216 }
217 else
218 {
219 free (name2);
220 return name1;
221 }
222 }
223
224 /* Return non-zero if HEADER is a 64-bit ELF file. */
225
226 static int
227 elf_64_header_p (const Elf64_Ehdr *header)
228 {
229 return (header->e_ident[EI_MAG0] == ELFMAG0
230 && header->e_ident[EI_MAG1] == ELFMAG1
231 && header->e_ident[EI_MAG2] == ELFMAG2
232 && header->e_ident[EI_MAG3] == ELFMAG3
233 && header->e_ident[EI_CLASS] == ELFCLASS64);
234 }
235
236 /* Return non-zero if FILE is a 64-bit ELF file,
237 zero if the file is not a 64-bit ELF file,
238 and -1 if the file is not accessible or doesn't exist. */
239
240 int
241 elf_64_file_p (const char *file)
242 {
243 Elf64_Ehdr header;
244 int fd;
245
246 fd = open (file, O_RDONLY);
247 if (fd < 0)
248 return -1;
249
250 if (read (fd, &header, sizeof (header)) != sizeof (header))
251 {
252 close (fd);
253 return 0;
254 }
255 close (fd);
256
257 return elf_64_header_p (&header);
258 }
259
260 static void
261 delete_lwp (struct lwp_info *lwp)
262 {
263 remove_thread (get_lwp_thread (lwp));
264 remove_inferior (&all_lwps, &lwp->head);
265 free (lwp->arch_private);
266 free (lwp);
267 }
268
269 /* Add a process to the common process list, and set its private
270 data. */
271
272 static struct process_info *
273 linux_add_process (int pid, int attached)
274 {
275 struct process_info *proc;
276
277 /* Is this the first process? If so, then set the arch. */
278 if (all_processes.head == NULL)
279 new_inferior = 1;
280
281 proc = add_process (pid, attached);
282 proc->private = xcalloc (1, sizeof (*proc->private));
283
284 if (the_low_target.new_process != NULL)
285 proc->private->arch_private = the_low_target.new_process ();
286
287 return proc;
288 }
289
290 /* Remove a process from the common process list,
291 also freeing all private data. */
292
293 static void
294 linux_remove_process (struct process_info *process)
295 {
296 struct process_info_private *priv = process->private;
297
298 free (priv->arch_private);
299 free (priv);
300 remove_process (process);
301 }
302
303 /* Wrapper function for waitpid which handles EINTR, and emulates
304 __WALL for systems where that is not available. */
305
306 static int
307 my_waitpid (int pid, int *status, int flags)
308 {
309 int ret, out_errno;
310
311 if (debug_threads)
312 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
313
314 if (flags & __WALL)
315 {
316 sigset_t block_mask, org_mask, wake_mask;
317 int wnohang;
318
319 wnohang = (flags & WNOHANG) != 0;
320 flags &= ~(__WALL | __WCLONE);
321 flags |= WNOHANG;
322
323 /* Block all signals while here. This avoids knowing about
324 LinuxThread's signals. */
325 sigfillset (&block_mask);
326 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
327
328 /* ... except during the sigsuspend below. */
329 sigemptyset (&wake_mask);
330
331 while (1)
332 {
333 /* Since all signals are blocked, there's no need to check
334 for EINTR here. */
335 ret = waitpid (pid, status, flags);
336 out_errno = errno;
337
338 if (ret == -1 && out_errno != ECHILD)
339 break;
340 else if (ret > 0)
341 break;
342
343 if (flags & __WCLONE)
344 {
345 /* We've tried both flavors now. If WNOHANG is set,
346 there's nothing else to do, just bail out. */
347 if (wnohang)
348 break;
349
350 if (debug_threads)
351 fprintf (stderr, "blocking\n");
352
353 /* Block waiting for signals. */
354 sigsuspend (&wake_mask);
355 }
356
357 flags ^= __WCLONE;
358 }
359
360 sigprocmask (SIG_SETMASK, &org_mask, NULL);
361 }
362 else
363 {
364 do
365 ret = waitpid (pid, status, flags);
366 while (ret == -1 && errno == EINTR);
367 out_errno = errno;
368 }
369
370 if (debug_threads)
371 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
372 pid, flags, status ? *status : -1, ret);
373
374 errno = out_errno;
375 return ret;
376 }
377
378 /* Handle a GNU/Linux extended wait response. If we see a clone
379 event, we need to add the new LWP to our list (and not report the
380 trap to higher layers). */
381
382 static void
383 handle_extended_wait (struct lwp_info *event_child, int wstat)
384 {
385 int event = wstat >> 16;
386 struct lwp_info *new_lwp;
387
388 if (event == PTRACE_EVENT_CLONE)
389 {
390 ptid_t ptid;
391 unsigned long new_pid;
392 int ret, status = W_STOPCODE (SIGSTOP);
393
394 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
395
396 /* If we haven't already seen the new PID stop, wait for it now. */
397 if (! pull_pid_from_list (&stopped_pids, new_pid))
398 {
399 /* The new child has a pending SIGSTOP. We can't affect it until it
400 hits the SIGSTOP, but we're already attached. */
401
402 ret = my_waitpid (new_pid, &status, __WALL);
403
404 if (ret == -1)
405 perror_with_name ("waiting for new child");
406 else if (ret != new_pid)
407 warning ("wait returned unexpected PID %d", ret);
408 else if (!WIFSTOPPED (status))
409 warning ("wait returned unexpected status 0x%x", status);
410 }
411
412 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
413
414 ptid = ptid_build (pid_of (event_child), new_pid, 0);
415 new_lwp = (struct lwp_info *) add_lwp (ptid);
416 add_thread (ptid, new_lwp);
417
418 /* Either we're going to immediately resume the new thread
419 or leave it stopped. linux_resume_one_lwp is a nop if it
420 thinks the thread is currently running, so set this first
421 before calling linux_resume_one_lwp. */
422 new_lwp->stopped = 1;
423
424 /* Normally we will get the pending SIGSTOP. But in some cases
425 we might get another signal delivered to the group first.
426 If we do get another signal, be sure not to lose it. */
427 if (WSTOPSIG (status) == SIGSTOP)
428 {
429 if (stopping_threads)
430 new_lwp->stop_pc = get_stop_pc (new_lwp);
431 else
432 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
433 }
434 else
435 {
436 new_lwp->stop_expected = 1;
437
438 if (stopping_threads)
439 {
440 new_lwp->stop_pc = get_stop_pc (new_lwp);
441 new_lwp->status_pending_p = 1;
442 new_lwp->status_pending = status;
443 }
444 else
445 /* Pass the signal on. This is what GDB does - except
446 shouldn't we really report it instead? */
447 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
448 }
449
450 /* Always resume the current thread. If we are stopping
451 threads, it will have a pending SIGSTOP; we may as well
452 collect it now. */
453 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
454 }
455 }
456
457 /* Return the PC as read from the regcache of LWP, without any
458 adjustment. */
459
460 static CORE_ADDR
461 get_pc (struct lwp_info *lwp)
462 {
463 struct thread_info *saved_inferior;
464 struct regcache *regcache;
465 CORE_ADDR pc;
466
467 if (the_low_target.get_pc == NULL)
468 return 0;
469
470 saved_inferior = current_inferior;
471 current_inferior = get_lwp_thread (lwp);
472
473 regcache = get_thread_regcache (current_inferior, 1);
474 pc = (*the_low_target.get_pc) (regcache);
475
476 if (debug_threads)
477 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
478
479 current_inferior = saved_inferior;
480 return pc;
481 }
482
483 /* This function should only be called if LWP got a SIGTRAP.
484 The SIGTRAP could mean several things.
485
486 On i386, where decr_pc_after_break is non-zero:
487 If we were single-stepping this process using PTRACE_SINGLESTEP,
488 we will get only the one SIGTRAP (even if the instruction we
489 stepped over was a breakpoint). The value of $eip will be the
490 next instruction.
491 If we continue the process using PTRACE_CONT, we will get a
492 SIGTRAP when we hit a breakpoint. The value of $eip will be
493 the instruction after the breakpoint (i.e. needs to be
494 decremented). If we report the SIGTRAP to GDB, we must also
495 report the undecremented PC. If we cancel the SIGTRAP, we
496 must resume at the decremented PC.
497
498 (Presumably, not yet tested) On a non-decr_pc_after_break machine
499 with hardware or kernel single-step:
500 If we single-step over a breakpoint instruction, our PC will
501 point at the following instruction. If we continue and hit a
502 breakpoint instruction, our PC will point at the breakpoint
503 instruction. */
504
505 static CORE_ADDR
506 get_stop_pc (struct lwp_info *lwp)
507 {
508 CORE_ADDR stop_pc;
509
510 if (the_low_target.get_pc == NULL)
511 return 0;
512
513 stop_pc = get_pc (lwp);
514
515 if (WSTOPSIG (lwp->last_status) == SIGTRAP
516 && !lwp->stepping
517 && !lwp->stopped_by_watchpoint
518 && lwp->last_status >> 16 == 0)
519 stop_pc -= the_low_target.decr_pc_after_break;
520
521 if (debug_threads)
522 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
523
524 return stop_pc;
525 }
526
527 static void *
528 add_lwp (ptid_t ptid)
529 {
530 struct lwp_info *lwp;
531
532 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
533 memset (lwp, 0, sizeof (*lwp));
534
535 lwp->head.id = ptid;
536
537 lwp->last_resume_kind = resume_continue;
538
539 if (the_low_target.new_thread != NULL)
540 lwp->arch_private = the_low_target.new_thread ();
541
542 add_inferior_to_list (&all_lwps, &lwp->head);
543
544 return lwp;
545 }
546
547 /* Start an inferior process and returns its pid.
548 ALLARGS is a vector of program-name and args. */
549
550 static int
551 linux_create_inferior (char *program, char **allargs)
552 {
553 struct lwp_info *new_lwp;
554 int pid;
555 ptid_t ptid;
556
557 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
558 pid = vfork ();
559 #else
560 pid = fork ();
561 #endif
562 if (pid < 0)
563 perror_with_name ("fork");
564
565 if (pid == 0)
566 {
567 ptrace (PTRACE_TRACEME, 0, 0, 0);
568
569 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
570 signal (__SIGRTMIN + 1, SIG_DFL);
571 #endif
572
573 setpgid (0, 0);
574
575 execv (program, allargs);
576 if (errno == ENOENT)
577 execvp (program, allargs);
578
579 fprintf (stderr, "Cannot exec %s: %s.\n", program,
580 strerror (errno));
581 fflush (stderr);
582 _exit (0177);
583 }
584
585 linux_add_process (pid, 0);
586
587 ptid = ptid_build (pid, pid, 0);
588 new_lwp = add_lwp (ptid);
589 add_thread (ptid, new_lwp);
590 new_lwp->must_set_ptrace_flags = 1;
591
592 return pid;
593 }
594
595 /* Attach to an inferior process. */
596
597 static void
598 linux_attach_lwp_1 (unsigned long lwpid, int initial)
599 {
600 ptid_t ptid;
601 struct lwp_info *new_lwp;
602
603 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
604 {
605 if (!initial)
606 {
607 /* If we fail to attach to an LWP, just warn. */
608 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
609 strerror (errno), errno);
610 fflush (stderr);
611 return;
612 }
613 else
614 /* If we fail to attach to a process, report an error. */
615 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
616 strerror (errno), errno);
617 }
618
619 if (initial)
620 /* NOTE/FIXME: This lwp might have not been the tgid. */
621 ptid = ptid_build (lwpid, lwpid, 0);
622 else
623 {
624 /* Note that extracting the pid from the current inferior is
625 safe, since we're always called in the context of the same
626 process as this new thread. */
627 int pid = pid_of (get_thread_lwp (current_inferior));
628 ptid = ptid_build (pid, lwpid, 0);
629 }
630
631 new_lwp = (struct lwp_info *) add_lwp (ptid);
632 add_thread (ptid, new_lwp);
633
634 /* We need to wait for SIGSTOP before being able to make the next
635 ptrace call on this LWP. */
636 new_lwp->must_set_ptrace_flags = 1;
637
638 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
639 brings it to a halt.
640
641 There are several cases to consider here:
642
643 1) gdbserver has already attached to the process and is being notified
644 of a new thread that is being created.
645 In this case we should ignore that SIGSTOP and resume the
646 process. This is handled below by setting stop_expected = 1,
647 and the fact that add_lwp sets last_resume_kind ==
648 resume_continue.
649
650 2) This is the first thread (the process thread), and we're attaching
651 to it via attach_inferior.
652 In this case we want the process thread to stop.
653 This is handled by having linux_attach set last_resume_kind ==
654 resume_stop after we return.
655 ??? If the process already has several threads we leave the other
656 threads running.
657
658 3) GDB is connecting to gdbserver and is requesting an enumeration of all
659 existing threads.
660 In this case we want the thread to stop.
661 FIXME: This case is currently not properly handled.
662 We should wait for the SIGSTOP but don't. Things work apparently
663 because enough time passes between when we ptrace (ATTACH) and when
664 gdb makes the next ptrace call on the thread.
665
666 On the other hand, if we are currently trying to stop all threads, we
667 should treat the new thread as if we had sent it a SIGSTOP. This works
668 because we are guaranteed that the add_lwp call above added us to the
669 end of the list, and so the new thread has not yet reached
670 wait_for_sigstop (but will). */
671 new_lwp->stop_expected = 1;
672 }
673
674 void
675 linux_attach_lwp (unsigned long lwpid)
676 {
677 linux_attach_lwp_1 (lwpid, 0);
678 }
679
680 int
681 linux_attach (unsigned long pid)
682 {
683 struct lwp_info *lwp;
684
685 linux_attach_lwp_1 (pid, 1);
686
687 linux_add_process (pid, 1);
688
689 if (!non_stop)
690 {
691 /* Don't ignore the initial SIGSTOP if we just attached to this
692 process. It will be collected by wait shortly. */
693 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
694 ptid_build (pid, pid, 0));
695 lwp->last_resume_kind = resume_stop;
696 }
697
698 return 0;
699 }
700
701 struct counter
702 {
703 int pid;
704 int count;
705 };
706
707 static int
708 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
709 {
710 struct counter *counter = args;
711
712 if (ptid_get_pid (entry->id) == counter->pid)
713 {
714 if (++counter->count > 1)
715 return 1;
716 }
717
718 return 0;
719 }
720
721 static int
722 last_thread_of_process_p (struct thread_info *thread)
723 {
724 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
725 int pid = ptid_get_pid (ptid);
726 struct counter counter = { pid , 0 };
727
728 return (find_inferior (&all_threads,
729 second_thread_of_pid_p, &counter) == NULL);
730 }
731
732 /* Kill the inferior lwp. */
733
734 static int
735 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
736 {
737 struct thread_info *thread = (struct thread_info *) entry;
738 struct lwp_info *lwp = get_thread_lwp (thread);
739 int wstat;
740 int pid = * (int *) args;
741
742 if (ptid_get_pid (entry->id) != pid)
743 return 0;
744
745 /* We avoid killing the first thread here, because of a Linux kernel (at
746 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
747 the children get a chance to be reaped, it will remain a zombie
748 forever. */
749
750 if (lwpid_of (lwp) == pid)
751 {
752 if (debug_threads)
753 fprintf (stderr, "lkop: is last of process %s\n",
754 target_pid_to_str (entry->id));
755 return 0;
756 }
757
758 /* If we're killing a running inferior, make sure it is stopped
759 first, as PTRACE_KILL will not work otherwise. */
760 if (!lwp->stopped)
761 send_sigstop (&lwp->head);
762
763 do
764 {
765 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
766
767 /* Make sure it died. The loop is most likely unnecessary. */
768 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
769 } while (pid > 0 && WIFSTOPPED (wstat));
770
771 return 0;
772 }
773
774 static int
775 linux_kill (int pid)
776 {
777 struct process_info *process;
778 struct lwp_info *lwp;
779 struct thread_info *thread;
780 int wstat;
781 int lwpid;
782
783 process = find_process_pid (pid);
784 if (process == NULL)
785 return -1;
786
787 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
788
789 /* See the comment in linux_kill_one_lwp. We did not kill the first
790 thread in the list, so do so now. */
791 lwp = find_lwp_pid (pid_to_ptid (pid));
792 thread = get_lwp_thread (lwp);
793
794 if (debug_threads)
795 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
796 lwpid_of (lwp), pid);
797
798 /* If we're killing a running inferior, make sure it is stopped
799 first, as PTRACE_KILL will not work otherwise. */
800 if (!lwp->stopped)
801 send_sigstop (&lwp->head);
802
803 do
804 {
805 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
806
807 /* Make sure it died. The loop is most likely unnecessary. */
808 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
809 } while (lwpid > 0 && WIFSTOPPED (wstat));
810
811 #ifdef USE_THREAD_DB
812 thread_db_free (process, 0);
813 #endif
814 delete_lwp (lwp);
815 linux_remove_process (process);
816 return 0;
817 }
818
819 static int
820 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
821 {
822 struct thread_info *thread = (struct thread_info *) entry;
823 struct lwp_info *lwp = get_thread_lwp (thread);
824 int pid = * (int *) args;
825
826 if (ptid_get_pid (entry->id) != pid)
827 return 0;
828
829 /* If we're detaching from a running inferior, make sure it is
830 stopped first, as PTRACE_DETACH will not work otherwise. */
831 if (!lwp->stopped)
832 {
833 int lwpid = lwpid_of (lwp);
834
835 stopping_threads = 1;
836 send_sigstop (&lwp->head);
837
838 /* If this detects a new thread through a clone event, the new
839 thread is appended to the end of the lwp list, so we'll
840 eventually detach from it. */
841 wait_for_sigstop (&lwp->head);
842 stopping_threads = 0;
843
844 /* If LWP exits while we're trying to stop it, there's nothing
845 left to do. */
846 lwp = find_lwp_pid (pid_to_ptid (lwpid));
847 if (lwp == NULL)
848 return 0;
849 }
850
851 /* If this process is stopped but is expecting a SIGSTOP, then make
852 sure we take care of that now. This isn't absolutely guaranteed
853 to collect the SIGSTOP, but is fairly likely to. */
854 if (lwp->stop_expected)
855 {
856 int wstat;
857 /* Clear stop_expected, so that the SIGSTOP will be reported. */
858 lwp->stop_expected = 0;
859 if (lwp->stopped)
860 linux_resume_one_lwp (lwp, 0, 0, NULL);
861 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
862 }
863
864 /* Flush any pending changes to the process's registers. */
865 regcache_invalidate_one ((struct inferior_list_entry *)
866 get_lwp_thread (lwp));
867
868 /* Finally, let it resume. */
869 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
870
871 delete_lwp (lwp);
872 return 0;
873 }
874
875 static int
876 any_thread_of (struct inferior_list_entry *entry, void *args)
877 {
878 int *pid_p = args;
879
880 if (ptid_get_pid (entry->id) == *pid_p)
881 return 1;
882
883 return 0;
884 }
885
886 static int
887 linux_detach (int pid)
888 {
889 struct process_info *process;
890
891 process = find_process_pid (pid);
892 if (process == NULL)
893 return -1;
894
895 #ifdef USE_THREAD_DB
896 thread_db_free (process, 1);
897 #endif
898
899 current_inferior =
900 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
901
902 delete_all_breakpoints ();
903 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
904 linux_remove_process (process);
905 return 0;
906 }
907
908 static void
909 linux_join (int pid)
910 {
911 int status, ret;
912 struct process_info *process;
913
914 process = find_process_pid (pid);
915 if (process == NULL)
916 return;
917
918 do {
919 ret = my_waitpid (pid, &status, 0);
920 if (WIFEXITED (status) || WIFSIGNALED (status))
921 break;
922 } while (ret != -1 || errno != ECHILD);
923 }
924
925 /* Return nonzero if the given thread is still alive. */
926 static int
927 linux_thread_alive (ptid_t ptid)
928 {
929 struct lwp_info *lwp = find_lwp_pid (ptid);
930
931 /* We assume we always know if a thread exits. If a whole process
932 exited but we still haven't been able to report it to GDB, we'll
933 hold on to the last lwp of the dead process. */
934 if (lwp != NULL)
935 return !lwp->dead;
936 else
937 return 0;
938 }
939
940 /* Return 1 if this lwp has an interesting status pending. */
941 static int
942 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
943 {
944 struct lwp_info *lwp = (struct lwp_info *) entry;
945 ptid_t ptid = * (ptid_t *) arg;
946 struct thread_info *thread = get_lwp_thread (lwp);
947
948 /* Check if we're only interested in events from a specific process
949 or its lwps. */
950 if (!ptid_equal (minus_one_ptid, ptid)
951 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
952 return 0;
953
954 thread = get_lwp_thread (lwp);
955
956 /* If we got a `vCont;t', but we haven't reported a stop yet, do
957 report any status pending the LWP may have. */
958 if (lwp->last_resume_kind == resume_stop
959 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
960 return 0;
961
962 return lwp->status_pending_p;
963 }
964
965 static int
966 same_lwp (struct inferior_list_entry *entry, void *data)
967 {
968 ptid_t ptid = *(ptid_t *) data;
969 int lwp;
970
971 if (ptid_get_lwp (ptid) != 0)
972 lwp = ptid_get_lwp (ptid);
973 else
974 lwp = ptid_get_pid (ptid);
975
976 if (ptid_get_lwp (entry->id) == lwp)
977 return 1;
978
979 return 0;
980 }
981
982 struct lwp_info *
983 find_lwp_pid (ptid_t ptid)
984 {
985 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
986 }
987
988 static struct lwp_info *
989 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
990 {
991 int ret;
992 int to_wait_for = -1;
993 struct lwp_info *child = NULL;
994
995 if (debug_threads)
996 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
997
998 if (ptid_equal (ptid, minus_one_ptid))
999 to_wait_for = -1; /* any child */
1000 else
1001 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1002
1003 options |= __WALL;
1004
1005 retry:
1006
1007 ret = my_waitpid (to_wait_for, wstatp, options);
1008 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1009 return NULL;
1010 else if (ret == -1)
1011 perror_with_name ("waitpid");
1012
1013 if (debug_threads
1014 && (!WIFSTOPPED (*wstatp)
1015 || (WSTOPSIG (*wstatp) != 32
1016 && WSTOPSIG (*wstatp) != 33)))
1017 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1018
1019 child = find_lwp_pid (pid_to_ptid (ret));
1020
1021 /* If we didn't find a process, one of two things presumably happened:
1022 - A process we started and then detached from has exited. Ignore it.
1023 - A process we are controlling has forked and the new child's stop
1024 was reported to us by the kernel. Save its PID. */
1025 if (child == NULL && WIFSTOPPED (*wstatp))
1026 {
1027 add_pid_to_list (&stopped_pids, ret);
1028 goto retry;
1029 }
1030 else if (child == NULL)
1031 goto retry;
1032
1033 child->stopped = 1;
1034
1035 child->last_status = *wstatp;
1036
1037 /* Architecture-specific setup after inferior is running.
1038 This needs to happen after we have attached to the inferior
1039 and it is stopped for the first time, but before we access
1040 any inferior registers. */
1041 if (new_inferior)
1042 {
1043 the_low_target.arch_setup ();
1044 #ifdef HAVE_LINUX_REGSETS
1045 memset (disabled_regsets, 0, num_regsets);
1046 #endif
1047 new_inferior = 0;
1048 }
1049
1050 /* Fetch the possibly triggered data watchpoint info and store it in
1051 CHILD.
1052
1053 On some archs, like x86, that use debug registers to set
1054 watchpoints, it's possible that the way to know which watched
1055 address trapped, is to check the register that is used to select
1056 which address to watch. Problem is, between setting the
1057 watchpoint and reading back which data address trapped, the user
1058 may change the set of watchpoints, and, as a consequence, GDB
1059 changes the debug registers in the inferior. To avoid reading
1060 back a stale stopped-data-address when that happens, we cache in
1061 LP the fact that a watchpoint trapped, and the corresponding data
1062 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1063 changes the debug registers meanwhile, we have the cached data we
1064 can rely on. */
1065
1066 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1067 {
1068 if (the_low_target.stopped_by_watchpoint == NULL)
1069 {
1070 child->stopped_by_watchpoint = 0;
1071 }
1072 else
1073 {
1074 struct thread_info *saved_inferior;
1075
1076 saved_inferior = current_inferior;
1077 current_inferior = get_lwp_thread (child);
1078
1079 child->stopped_by_watchpoint
1080 = the_low_target.stopped_by_watchpoint ();
1081
1082 if (child->stopped_by_watchpoint)
1083 {
1084 if (the_low_target.stopped_data_address != NULL)
1085 child->stopped_data_address
1086 = the_low_target.stopped_data_address ();
1087 else
1088 child->stopped_data_address = 0;
1089 }
1090
1091 current_inferior = saved_inferior;
1092 }
1093 }
1094
1095 /* Store the STOP_PC, with adjustment applied. This depends on the
1096 architecture being defined already (so that CHILD has a valid
1097 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1098 not). */
1099 if (WIFSTOPPED (*wstatp))
1100 child->stop_pc = get_stop_pc (child);
1101
1102 if (debug_threads
1103 && WIFSTOPPED (*wstatp)
1104 && the_low_target.get_pc != NULL)
1105 {
1106 struct thread_info *saved_inferior = current_inferior;
1107 struct regcache *regcache;
1108 CORE_ADDR pc;
1109
1110 current_inferior = get_lwp_thread (child);
1111 regcache = get_thread_regcache (current_inferior, 1);
1112 pc = (*the_low_target.get_pc) (regcache);
1113 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1114 current_inferior = saved_inferior;
1115 }
1116
1117 return child;
1118 }
1119
1120 /* Arrange for a breakpoint to be hit again later. We don't keep the
1121 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1122 will handle the current event, eventually we will resume this LWP,
1123 and this breakpoint will trap again. */
1124
1125 static int
1126 cancel_breakpoint (struct lwp_info *lwp)
1127 {
1128 struct thread_info *saved_inferior;
1129
1130 /* There's nothing to do if we don't support breakpoints. */
1131 if (!supports_breakpoints ())
1132 return 0;
1133
1134 /* breakpoint_at reads from current inferior. */
1135 saved_inferior = current_inferior;
1136 current_inferior = get_lwp_thread (lwp);
1137
1138 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1139 {
1140 if (debug_threads)
1141 fprintf (stderr,
1142 "CB: Push back breakpoint for %s\n",
1143 target_pid_to_str (ptid_of (lwp)));
1144
1145 /* Back up the PC if necessary. */
1146 if (the_low_target.decr_pc_after_break)
1147 {
1148 struct regcache *regcache
1149 = get_thread_regcache (current_inferior, 1);
1150 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1151 }
1152
1153 current_inferior = saved_inferior;
1154 return 1;
1155 }
1156 else
1157 {
1158 if (debug_threads)
1159 fprintf (stderr,
1160 "CB: No breakpoint found at %s for [%s]\n",
1161 paddress (lwp->stop_pc),
1162 target_pid_to_str (ptid_of (lwp)));
1163 }
1164
1165 current_inferior = saved_inferior;
1166 return 0;
1167 }
1168
1169 /* When the event-loop is doing a step-over, this points at the thread
1170 being stepped. */
1171 ptid_t step_over_bkpt;
1172
1173 /* Wait for an event from child PID. If PID is -1, wait for any
1174 child. Store the stop status through the status pointer WSTAT.
1175 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1176 event was found and OPTIONS contains WNOHANG. Return the PID of
1177 the stopped child otherwise. */
1178
1179 static int
1180 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1181 {
1182 struct lwp_info *event_child, *requested_child;
1183
1184 event_child = NULL;
1185 requested_child = NULL;
1186
1187 /* Check for a lwp with a pending status. */
1188
1189 if (ptid_equal (ptid, minus_one_ptid)
1190 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1191 {
1192 event_child = (struct lwp_info *)
1193 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1194 if (debug_threads && event_child)
1195 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1196 }
1197 else
1198 {
1199 requested_child = find_lwp_pid (ptid);
1200
1201 if (requested_child->status_pending_p)
1202 event_child = requested_child;
1203 }
1204
1205 if (event_child != NULL)
1206 {
1207 if (debug_threads)
1208 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1209 lwpid_of (event_child), event_child->status_pending);
1210 *wstat = event_child->status_pending;
1211 event_child->status_pending_p = 0;
1212 event_child->status_pending = 0;
1213 current_inferior = get_lwp_thread (event_child);
1214 return lwpid_of (event_child);
1215 }
1216
1217 /* We only enter this loop if no process has a pending wait status. Thus
1218 any action taken in response to a wait status inside this loop is
1219 responding as soon as we detect the status, not after any pending
1220 events. */
1221 while (1)
1222 {
1223 event_child = linux_wait_for_lwp (ptid, wstat, options);
1224
1225 if ((options & WNOHANG) && event_child == NULL)
1226 {
1227 if (debug_threads)
1228 fprintf (stderr, "WNOHANG set, no event found\n");
1229 return 0;
1230 }
1231
1232 if (event_child == NULL)
1233 error ("event from unknown child");
1234
1235 current_inferior = get_lwp_thread (event_child);
1236
1237 /* Check for thread exit. */
1238 if (! WIFSTOPPED (*wstat))
1239 {
1240 if (debug_threads)
1241 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1242
1243 /* If the last thread is exiting, just return. */
1244 if (last_thread_of_process_p (current_inferior))
1245 {
1246 if (debug_threads)
1247 fprintf (stderr, "LWP %ld is last lwp of process\n",
1248 lwpid_of (event_child));
1249 return lwpid_of (event_child);
1250 }
1251
1252 if (!non_stop)
1253 {
1254 current_inferior = (struct thread_info *) all_threads.head;
1255 if (debug_threads)
1256 fprintf (stderr, "Current inferior is now %ld\n",
1257 lwpid_of (get_thread_lwp (current_inferior)));
1258 }
1259 else
1260 {
1261 current_inferior = NULL;
1262 if (debug_threads)
1263 fprintf (stderr, "Current inferior is now <NULL>\n");
1264 }
1265
1266 /* If we were waiting for this particular child to do something...
1267 well, it did something. */
1268 if (requested_child != NULL)
1269 {
1270 int lwpid = lwpid_of (event_child);
1271
1272 /* Cancel the step-over operation --- the thread that
1273 started it is gone. */
1274 if (finish_step_over (event_child))
1275 unstop_all_lwps (event_child);
1276 delete_lwp (event_child);
1277 return lwpid;
1278 }
1279
1280 delete_lwp (event_child);
1281
1282 /* Wait for a more interesting event. */
1283 continue;
1284 }
1285
1286 if (event_child->must_set_ptrace_flags)
1287 {
1288 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1289 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
1290 event_child->must_set_ptrace_flags = 0;
1291 }
1292
1293 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1294 && *wstat >> 16 != 0)
1295 {
1296 handle_extended_wait (event_child, *wstat);
1297 continue;
1298 }
1299
1300 /* If GDB is not interested in this signal, don't stop other
1301 threads, and don't report it to GDB. Just resume the
1302 inferior right away. We do this for threading-related
1303 signals as well as any that GDB specifically requested we
1304 ignore. But never ignore SIGSTOP if we sent it ourselves,
1305 and do not ignore signals when stepping - they may require
1306 special handling to skip the signal handler. */
1307 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1308 thread library? */
1309 if (WIFSTOPPED (*wstat)
1310 && !event_child->stepping
1311 && (
1312 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1313 (current_process ()->private->thread_db != NULL
1314 && (WSTOPSIG (*wstat) == __SIGRTMIN
1315 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1316 ||
1317 #endif
1318 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1319 && !(WSTOPSIG (*wstat) == SIGSTOP
1320 && event_child->stop_expected))))
1321 {
1322 siginfo_t info, *info_p;
1323
1324 if (debug_threads)
1325 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1326 WSTOPSIG (*wstat), lwpid_of (event_child));
1327
1328 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1329 info_p = &info;
1330 else
1331 info_p = NULL;
1332 linux_resume_one_lwp (event_child, event_child->stepping,
1333 WSTOPSIG (*wstat), info_p);
1334 continue;
1335 }
1336
1337 if (WIFSTOPPED (*wstat)
1338 && WSTOPSIG (*wstat) == SIGSTOP
1339 && event_child->stop_expected)
1340 {
1341 int should_stop;
1342
1343 if (debug_threads)
1344 fprintf (stderr, "Expected stop.\n");
1345 event_child->stop_expected = 0;
1346
1347 should_stop = (event_child->last_resume_kind == resume_stop
1348 || stopping_threads);
1349
1350 if (!should_stop)
1351 {
1352 linux_resume_one_lwp (event_child,
1353 event_child->stepping, 0, NULL);
1354 continue;
1355 }
1356 }
1357
1358 return lwpid_of (event_child);
1359 }
1360
1361 /* NOTREACHED */
1362 return 0;
1363 }
1364
1365 static int
1366 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1367 {
1368 ptid_t wait_ptid;
1369
1370 if (ptid_is_pid (ptid))
1371 {
1372 /* A request to wait for a specific tgid. This is not possible
1373 with waitpid, so instead, we wait for any child, and leave
1374 children we're not interested in right now with a pending
1375 status to report later. */
1376 wait_ptid = minus_one_ptid;
1377 }
1378 else
1379 wait_ptid = ptid;
1380
1381 while (1)
1382 {
1383 int event_pid;
1384
1385 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1386
1387 if (event_pid > 0
1388 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1389 {
1390 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1391
1392 if (! WIFSTOPPED (*wstat))
1393 mark_lwp_dead (event_child, *wstat);
1394 else
1395 {
1396 event_child->status_pending_p = 1;
1397 event_child->status_pending = *wstat;
1398 }
1399 }
1400 else
1401 return event_pid;
1402 }
1403 }
1404
1405
1406 /* Count the LWP's that have had events. */
1407
1408 static int
1409 count_events_callback (struct inferior_list_entry *entry, void *data)
1410 {
1411 struct lwp_info *lp = (struct lwp_info *) entry;
1412 int *count = data;
1413
1414 gdb_assert (count != NULL);
1415
1416 /* Count only resumed LWPs that have a SIGTRAP event pending that
1417 should be reported to GDB. */
1418 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1419 && lp->last_resume_kind != resume_stop
1420 && lp->status_pending_p
1421 && WIFSTOPPED (lp->status_pending)
1422 && WSTOPSIG (lp->status_pending) == SIGTRAP
1423 && !breakpoint_inserted_here (lp->stop_pc))
1424 (*count)++;
1425
1426 return 0;
1427 }
1428
1429 /* Select the LWP (if any) that is currently being single-stepped. */
1430
1431 static int
1432 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1433 {
1434 struct lwp_info *lp = (struct lwp_info *) entry;
1435
1436 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1437 && lp->last_resume_kind == resume_step
1438 && lp->status_pending_p)
1439 return 1;
1440 else
1441 return 0;
1442 }
1443
1444 /* Select the Nth LWP that has had a SIGTRAP event that should be
1445 reported to GDB. */
1446
1447 static int
1448 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1449 {
1450 struct lwp_info *lp = (struct lwp_info *) entry;
1451 int *selector = data;
1452
1453 gdb_assert (selector != NULL);
1454
1455 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1456 if (lp->last_resume_kind != resume_stop
1457 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1458 && lp->status_pending_p
1459 && WIFSTOPPED (lp->status_pending)
1460 && WSTOPSIG (lp->status_pending) == SIGTRAP
1461 && !breakpoint_inserted_here (lp->stop_pc))
1462 if ((*selector)-- == 0)
1463 return 1;
1464
1465 return 0;
1466 }
1467
1468 static int
1469 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1470 {
1471 struct lwp_info *lp = (struct lwp_info *) entry;
1472 struct lwp_info *event_lp = data;
1473
1474 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1475 if (lp == event_lp)
1476 return 0;
1477
1478 /* If a LWP other than the LWP that we're reporting an event for has
1479 hit a GDB breakpoint (as opposed to some random trap signal),
1480 then just arrange for it to hit it again later. We don't keep
1481 the SIGTRAP status and don't forward the SIGTRAP signal to the
1482 LWP. We will handle the current event, eventually we will resume
1483 all LWPs, and this one will get its breakpoint trap again.
1484
1485 If we do not do this, then we run the risk that the user will
1486 delete or disable the breakpoint, but the LWP will have already
1487 tripped on it. */
1488
1489 if (lp->last_resume_kind != resume_stop
1490 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1491 && lp->status_pending_p
1492 && WIFSTOPPED (lp->status_pending)
1493 && WSTOPSIG (lp->status_pending) == SIGTRAP
1494 && !lp->stepping
1495 && !lp->stopped_by_watchpoint
1496 && cancel_breakpoint (lp))
1497 /* Throw away the SIGTRAP. */
1498 lp->status_pending_p = 0;
1499
1500 return 0;
1501 }
1502
1503 /* Select one LWP out of those that have events pending. */
1504
1505 static void
1506 select_event_lwp (struct lwp_info **orig_lp)
1507 {
1508 int num_events = 0;
1509 int random_selector;
1510 struct lwp_info *event_lp;
1511
1512 /* Give preference to any LWP that is being single-stepped. */
1513 event_lp
1514 = (struct lwp_info *) find_inferior (&all_lwps,
1515 select_singlestep_lwp_callback, NULL);
1516 if (event_lp != NULL)
1517 {
1518 if (debug_threads)
1519 fprintf (stderr,
1520 "SEL: Select single-step %s\n",
1521 target_pid_to_str (ptid_of (event_lp)));
1522 }
1523 else
1524 {
1525 /* No single-stepping LWP. Select one at random, out of those
1526 which have had SIGTRAP events. */
1527
1528 /* First see how many SIGTRAP events we have. */
1529 find_inferior (&all_lwps, count_events_callback, &num_events);
1530
1531 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1532 random_selector = (int)
1533 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1534
1535 if (debug_threads && num_events > 1)
1536 fprintf (stderr,
1537 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1538 num_events, random_selector);
1539
1540 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1541 select_event_lwp_callback,
1542 &random_selector);
1543 }
1544
1545 if (event_lp != NULL)
1546 {
1547 /* Switch the event LWP. */
1548 *orig_lp = event_lp;
1549 }
1550 }
1551
1552 /* Set this inferior LWP's state as "want-stopped". We won't resume
1553 this LWP until the client gives us another action for it. */
1554
1555 static void
1556 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1557 {
1558 struct lwp_info *lwp = (struct lwp_info *) entry;
1559 struct thread_info *thread = get_lwp_thread (lwp);
1560
1561 /* Most threads are stopped implicitly (all-stop); tag that with
1562 signal 0. The thread being explicitly reported stopped to the
1563 client, gets it's status fixed up afterwards. */
1564 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1565 thread->last_status.value.sig = TARGET_SIGNAL_0;
1566
1567 lwp->last_resume_kind = resume_stop;
1568 }
1569
1570 /* Set all LWP's states as "want-stopped". */
1571
1572 static void
1573 gdb_wants_all_stopped (void)
1574 {
1575 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1576 }
1577
1578 /* Wait for process, returns status. */
1579
1580 static ptid_t
1581 linux_wait_1 (ptid_t ptid,
1582 struct target_waitstatus *ourstatus, int target_options)
1583 {
1584 int w;
1585 struct lwp_info *event_child;
1586 int options;
1587 int pid;
1588 int step_over_finished;
1589 int bp_explains_trap;
1590 int maybe_internal_trap;
1591 int report_to_gdb;
1592
1593 /* Translate generic target options into linux options. */
1594 options = __WALL;
1595 if (target_options & TARGET_WNOHANG)
1596 options |= WNOHANG;
1597
1598 retry:
1599 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1600
1601 /* If we were only supposed to resume one thread, only wait for
1602 that thread - if it's still alive. If it died, however - which
1603 can happen if we're coming from the thread death case below -
1604 then we need to make sure we restart the other threads. We could
1605 pick a thread at random or restart all; restarting all is less
1606 arbitrary. */
1607 if (!non_stop
1608 && !ptid_equal (cont_thread, null_ptid)
1609 && !ptid_equal (cont_thread, minus_one_ptid))
1610 {
1611 struct thread_info *thread;
1612
1613 thread = (struct thread_info *) find_inferior_id (&all_threads,
1614 cont_thread);
1615
1616 /* No stepping, no signal - unless one is pending already, of course. */
1617 if (thread == NULL)
1618 {
1619 struct thread_resume resume_info;
1620 resume_info.thread = minus_one_ptid;
1621 resume_info.kind = resume_continue;
1622 resume_info.sig = 0;
1623 linux_resume (&resume_info, 1);
1624 }
1625 else
1626 ptid = cont_thread;
1627 }
1628
1629 if (ptid_equal (step_over_bkpt, null_ptid))
1630 pid = linux_wait_for_event (ptid, &w, options);
1631 else
1632 {
1633 if (debug_threads)
1634 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1635 target_pid_to_str (step_over_bkpt));
1636 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1637 }
1638
1639 if (pid == 0) /* only if TARGET_WNOHANG */
1640 return null_ptid;
1641
1642 event_child = get_thread_lwp (current_inferior);
1643
1644 /* If we are waiting for a particular child, and it exited,
1645 linux_wait_for_event will return its exit status. Similarly if
1646 the last child exited. If this is not the last child, however,
1647 do not report it as exited until there is a 'thread exited' response
1648 available in the remote protocol. Instead, just wait for another event.
1649 This should be safe, because if the thread crashed we will already
1650 have reported the termination signal to GDB; that should stop any
1651 in-progress stepping operations, etc.
1652
1653 Report the exit status of the last thread to exit. This matches
1654 LinuxThreads' behavior. */
1655
1656 if (last_thread_of_process_p (current_inferior))
1657 {
1658 if (WIFEXITED (w) || WIFSIGNALED (w))
1659 {
1660 int pid = pid_of (event_child);
1661 struct process_info *process = find_process_pid (pid);
1662
1663 #ifdef USE_THREAD_DB
1664 thread_db_free (process, 0);
1665 #endif
1666 delete_lwp (event_child);
1667 linux_remove_process (process);
1668
1669 current_inferior = NULL;
1670
1671 if (WIFEXITED (w))
1672 {
1673 ourstatus->kind = TARGET_WAITKIND_EXITED;
1674 ourstatus->value.integer = WEXITSTATUS (w);
1675
1676 if (debug_threads)
1677 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1678 }
1679 else
1680 {
1681 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1682 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1683
1684 if (debug_threads)
1685 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1686
1687 }
1688
1689 return pid_to_ptid (pid);
1690 }
1691 }
1692 else
1693 {
1694 if (!WIFSTOPPED (w))
1695 goto retry;
1696 }
1697
1698 /* If this event was not handled before, and is not a SIGTRAP, we
1699 report it. SIGILL and SIGSEGV are also treated as traps in case
1700 a breakpoint is inserted at the current PC. If this target does
1701 not support internal breakpoints at all, we also report the
1702 SIGTRAP without further processing; it's of no concern to us. */
1703 maybe_internal_trap
1704 = (supports_breakpoints ()
1705 && (WSTOPSIG (w) == SIGTRAP
1706 || ((WSTOPSIG (w) == SIGILL
1707 || WSTOPSIG (w) == SIGSEGV)
1708 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1709
1710 if (maybe_internal_trap)
1711 {
1712 /* Handle anything that requires bookkeeping before deciding to
1713 report the event or continue waiting. */
1714
1715 /* First check if we can explain the SIGTRAP with an internal
1716 breakpoint, or if we should possibly report the event to GDB.
1717 Do this before anything that may remove or insert a
1718 breakpoint. */
1719 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1720
1721 /* We have a SIGTRAP, possibly a step-over dance has just
1722 finished. If so, tweak the state machine accordingly,
1723 reinsert breakpoints and delete any reinsert (software
1724 single-step) breakpoints. */
1725 step_over_finished = finish_step_over (event_child);
1726
1727 /* Now invoke the callbacks of any internal breakpoints there. */
1728 check_breakpoints (event_child->stop_pc);
1729
1730 if (bp_explains_trap)
1731 {
1732 /* If we stepped or ran into an internal breakpoint, we've
1733 already handled it. So next time we resume (from this
1734 PC), we should step over it. */
1735 if (debug_threads)
1736 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1737
1738 if (breakpoint_here (event_child->stop_pc))
1739 event_child->need_step_over = 1;
1740 }
1741 }
1742 else
1743 {
1744 /* We have some other signal, possibly a step-over dance was in
1745 progress, and it should be cancelled too. */
1746 step_over_finished = finish_step_over (event_child);
1747 }
1748
1749 /* We have all the data we need. Either report the event to GDB, or
1750 resume threads and keep waiting for more. */
1751
1752 /* Check If GDB would be interested in this event. If GDB wanted
1753 this thread to single step, we always want to report the SIGTRAP,
1754 and let GDB handle it. Watchpoints should always be reported.
1755 So should signals we can't explain. A SIGTRAP we can't explain
1756 could be a GDB breakpoint --- we may or not support Z0
1757 breakpoints. If we do, we're be able to handle GDB breakpoints
1758 on top of internal breakpoints, by handling the internal
1759 breakpoint and still reporting the event to GDB. If we don't,
1760 we're out of luck, GDB won't see the breakpoint hit. */
1761 report_to_gdb = (!maybe_internal_trap
1762 || event_child->last_resume_kind == resume_step
1763 || event_child->stopped_by_watchpoint
1764 || (!step_over_finished && !bp_explains_trap)
1765 || gdb_breakpoint_here (event_child->stop_pc));
1766
1767 /* We found no reason GDB would want us to stop. We either hit one
1768 of our own breakpoints, or finished an internal step GDB
1769 shouldn't know about. */
1770 if (!report_to_gdb)
1771 {
1772 if (debug_threads)
1773 {
1774 if (bp_explains_trap)
1775 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1776 if (step_over_finished)
1777 fprintf (stderr, "Step-over finished.\n");
1778 }
1779
1780 /* We're not reporting this breakpoint to GDB, so apply the
1781 decr_pc_after_break adjustment to the inferior's regcache
1782 ourselves. */
1783
1784 if (the_low_target.set_pc != NULL)
1785 {
1786 struct regcache *regcache
1787 = get_thread_regcache (get_lwp_thread (event_child), 1);
1788 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1789 }
1790
1791 /* We've finished stepping over a breakpoint. We've stopped all
1792 LWPs momentarily except the stepping one. This is where we
1793 resume them all again. We're going to keep waiting, so use
1794 proceed, which handles stepping over the next breakpoint. */
1795 if (debug_threads)
1796 fprintf (stderr, "proceeding all threads.\n");
1797 proceed_all_lwps ();
1798 goto retry;
1799 }
1800
1801 if (debug_threads)
1802 {
1803 if (event_child->last_resume_kind == resume_step)
1804 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1805 if (event_child->stopped_by_watchpoint)
1806 fprintf (stderr, "Stopped by watchpoint.\n");
1807 if (gdb_breakpoint_here (event_child->stop_pc))
1808 fprintf (stderr, "Stopped by GDB breakpoint.\n");
1809 if (debug_threads)
1810 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1811 }
1812
1813 /* Alright, we're going to report a stop. */
1814
1815 if (!non_stop)
1816 {
1817 /* In all-stop, stop all threads. */
1818 stop_all_lwps ();
1819
1820 /* If we're not waiting for a specific LWP, choose an event LWP
1821 from among those that have had events. Giving equal priority
1822 to all LWPs that have had events helps prevent
1823 starvation. */
1824 if (ptid_equal (ptid, minus_one_ptid))
1825 {
1826 event_child->status_pending_p = 1;
1827 event_child->status_pending = w;
1828
1829 select_event_lwp (&event_child);
1830
1831 event_child->status_pending_p = 0;
1832 w = event_child->status_pending;
1833 }
1834
1835 /* Now that we've selected our final event LWP, cancel any
1836 breakpoints in other LWPs that have hit a GDB breakpoint.
1837 See the comment in cancel_breakpoints_callback to find out
1838 why. */
1839 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1840 }
1841 else
1842 {
1843 /* If we just finished a step-over, then all threads had been
1844 momentarily paused. In all-stop, that's fine, we want
1845 threads stopped by now anyway. In non-stop, we need to
1846 re-resume threads that GDB wanted to be running. */
1847 if (step_over_finished)
1848 unstop_all_lwps (event_child);
1849 }
1850
1851 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1852
1853 /* Do this before the gdb_wants_all_stopped calls below, since they
1854 always set last_resume_kind to resume_stop. */
1855 if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) == SIGSTOP)
1856 {
1857 /* A thread that has been requested to stop by GDB with vCont;t,
1858 and it stopped cleanly, so report as SIG0. The use of
1859 SIGSTOP is an implementation detail. */
1860 ourstatus->value.sig = TARGET_SIGNAL_0;
1861 }
1862 else if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) != SIGSTOP)
1863 {
1864 /* A thread that has been requested to stop by GDB with vCont;t,
1865 but, it stopped for other reasons. */
1866 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1867 }
1868 else
1869 {
1870 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1871 }
1872
1873 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1874
1875 if (!non_stop)
1876 {
1877 /* From GDB's perspective, all-stop mode always stops all
1878 threads implicitly. Tag all threads as "want-stopped". */
1879 gdb_wants_all_stopped ();
1880 }
1881 else
1882 {
1883 /* We're reporting this LWP as stopped. Update it's
1884 "want-stopped" state to what the client wants, until it gets
1885 a new resume action. */
1886 gdb_wants_lwp_stopped (&event_child->head);
1887 }
1888
1889 if (debug_threads)
1890 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1891 target_pid_to_str (ptid_of (event_child)),
1892 ourstatus->kind,
1893 ourstatus->value.sig);
1894
1895 get_lwp_thread (event_child)->last_status = *ourstatus;
1896 return ptid_of (event_child);
1897 }
1898
1899 /* Get rid of any pending event in the pipe. */
1900 static void
1901 async_file_flush (void)
1902 {
1903 int ret;
1904 char buf;
1905
1906 do
1907 ret = read (linux_event_pipe[0], &buf, 1);
1908 while (ret >= 0 || (ret == -1 && errno == EINTR));
1909 }
1910
1911 /* Put something in the pipe, so the event loop wakes up. */
1912 static void
1913 async_file_mark (void)
1914 {
1915 int ret;
1916
1917 async_file_flush ();
1918
1919 do
1920 ret = write (linux_event_pipe[1], "+", 1);
1921 while (ret == 0 || (ret == -1 && errno == EINTR));
1922
1923 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1924 be awakened anyway. */
1925 }
1926
1927 static ptid_t
1928 linux_wait (ptid_t ptid,
1929 struct target_waitstatus *ourstatus, int target_options)
1930 {
1931 ptid_t event_ptid;
1932
1933 if (debug_threads)
1934 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1935
1936 /* Flush the async file first. */
1937 if (target_is_async_p ())
1938 async_file_flush ();
1939
1940 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1941
1942 /* If at least one stop was reported, there may be more. A single
1943 SIGCHLD can signal more than one child stop. */
1944 if (target_is_async_p ()
1945 && (target_options & TARGET_WNOHANG) != 0
1946 && !ptid_equal (event_ptid, null_ptid))
1947 async_file_mark ();
1948
1949 return event_ptid;
1950 }
1951
1952 /* Send a signal to an LWP. */
1953
1954 static int
1955 kill_lwp (unsigned long lwpid, int signo)
1956 {
1957 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1958 fails, then we are not using nptl threads and we should be using kill. */
1959
1960 #ifdef __NR_tkill
1961 {
1962 static int tkill_failed;
1963
1964 if (!tkill_failed)
1965 {
1966 int ret;
1967
1968 errno = 0;
1969 ret = syscall (__NR_tkill, lwpid, signo);
1970 if (errno != ENOSYS)
1971 return ret;
1972 tkill_failed = 1;
1973 }
1974 }
1975 #endif
1976
1977 return kill (lwpid, signo);
1978 }
1979
1980 static void
1981 send_sigstop (struct inferior_list_entry *entry)
1982 {
1983 struct lwp_info *lwp = (struct lwp_info *) entry;
1984 int pid;
1985
1986 if (lwp->stopped)
1987 return;
1988
1989 pid = lwpid_of (lwp);
1990
1991 /* If we already have a pending stop signal for this process, don't
1992 send another. */
1993 if (lwp->stop_expected)
1994 {
1995 if (debug_threads)
1996 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1997
1998 return;
1999 }
2000
2001 if (debug_threads)
2002 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2003
2004 lwp->stop_expected = 1;
2005 kill_lwp (pid, SIGSTOP);
2006 }
2007
2008 static void
2009 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2010 {
2011 /* It's dead, really. */
2012 lwp->dead = 1;
2013
2014 /* Store the exit status for later. */
2015 lwp->status_pending_p = 1;
2016 lwp->status_pending = wstat;
2017
2018 /* Prevent trying to stop it. */
2019 lwp->stopped = 1;
2020
2021 /* No further stops are expected from a dead lwp. */
2022 lwp->stop_expected = 0;
2023 }
2024
2025 static void
2026 wait_for_sigstop (struct inferior_list_entry *entry)
2027 {
2028 struct lwp_info *lwp = (struct lwp_info *) entry;
2029 struct thread_info *saved_inferior;
2030 int wstat;
2031 ptid_t saved_tid;
2032 ptid_t ptid;
2033 int pid;
2034
2035 if (lwp->stopped)
2036 {
2037 if (debug_threads)
2038 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2039 lwpid_of (lwp));
2040 return;
2041 }
2042
2043 saved_inferior = current_inferior;
2044 if (saved_inferior != NULL)
2045 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2046 else
2047 saved_tid = null_ptid; /* avoid bogus unused warning */
2048
2049 ptid = lwp->head.id;
2050
2051 if (debug_threads)
2052 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2053
2054 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2055
2056 /* If we stopped with a non-SIGSTOP signal, save it for later
2057 and record the pending SIGSTOP. If the process exited, just
2058 return. */
2059 if (WIFSTOPPED (wstat))
2060 {
2061 if (debug_threads)
2062 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2063 lwpid_of (lwp), WSTOPSIG (wstat));
2064
2065 if (WSTOPSIG (wstat) != SIGSTOP)
2066 {
2067 if (debug_threads)
2068 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2069 lwpid_of (lwp), wstat);
2070
2071 lwp->status_pending_p = 1;
2072 lwp->status_pending = wstat;
2073 }
2074 }
2075 else
2076 {
2077 if (debug_threads)
2078 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2079
2080 lwp = find_lwp_pid (pid_to_ptid (pid));
2081 if (lwp)
2082 {
2083 /* Leave this status pending for the next time we're able to
2084 report it. In the mean time, we'll report this lwp as
2085 dead to GDB, so GDB doesn't try to read registers and
2086 memory from it. This can only happen if this was the
2087 last thread of the process; otherwise, PID is removed
2088 from the thread tables before linux_wait_for_event
2089 returns. */
2090 mark_lwp_dead (lwp, wstat);
2091 }
2092 }
2093
2094 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2095 current_inferior = saved_inferior;
2096 else
2097 {
2098 if (debug_threads)
2099 fprintf (stderr, "Previously current thread died.\n");
2100
2101 if (non_stop)
2102 {
2103 /* We can't change the current inferior behind GDB's back,
2104 otherwise, a subsequent command may apply to the wrong
2105 process. */
2106 current_inferior = NULL;
2107 }
2108 else
2109 {
2110 /* Set a valid thread as current. */
2111 set_desired_inferior (0);
2112 }
2113 }
2114 }
2115
2116 static void
2117 stop_all_lwps (void)
2118 {
2119 stopping_threads = 1;
2120 for_each_inferior (&all_lwps, send_sigstop);
2121 for_each_inferior (&all_lwps, wait_for_sigstop);
2122 stopping_threads = 0;
2123 }
2124
2125 /* Resume execution of the inferior process.
2126 If STEP is nonzero, single-step it.
2127 If SIGNAL is nonzero, give it that signal. */
2128
2129 static void
2130 linux_resume_one_lwp (struct lwp_info *lwp,
2131 int step, int signal, siginfo_t *info)
2132 {
2133 struct thread_info *saved_inferior;
2134
2135 if (lwp->stopped == 0)
2136 return;
2137
2138 /* If we have pending signals or status, and a new signal, enqueue the
2139 signal. Also enqueue the signal if we are waiting to reinsert a
2140 breakpoint; it will be picked up again below. */
2141 if (signal != 0
2142 && (lwp->status_pending_p || lwp->pending_signals != NULL
2143 || lwp->bp_reinsert != 0))
2144 {
2145 struct pending_signals *p_sig;
2146 p_sig = xmalloc (sizeof (*p_sig));
2147 p_sig->prev = lwp->pending_signals;
2148 p_sig->signal = signal;
2149 if (info == NULL)
2150 memset (&p_sig->info, 0, sizeof (siginfo_t));
2151 else
2152 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2153 lwp->pending_signals = p_sig;
2154 }
2155
2156 if (lwp->status_pending_p)
2157 {
2158 if (debug_threads)
2159 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2160 " has pending status\n",
2161 lwpid_of (lwp), step ? "step" : "continue", signal,
2162 lwp->stop_expected ? "expected" : "not expected");
2163 return;
2164 }
2165
2166 saved_inferior = current_inferior;
2167 current_inferior = get_lwp_thread (lwp);
2168
2169 if (debug_threads)
2170 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2171 lwpid_of (lwp), step ? "step" : "continue", signal,
2172 lwp->stop_expected ? "expected" : "not expected");
2173
2174 /* This bit needs some thinking about. If we get a signal that
2175 we must report while a single-step reinsert is still pending,
2176 we often end up resuming the thread. It might be better to
2177 (ew) allow a stack of pending events; then we could be sure that
2178 the reinsert happened right away and not lose any signals.
2179
2180 Making this stack would also shrink the window in which breakpoints are
2181 uninserted (see comment in linux_wait_for_lwp) but not enough for
2182 complete correctness, so it won't solve that problem. It may be
2183 worthwhile just to solve this one, however. */
2184 if (lwp->bp_reinsert != 0)
2185 {
2186 if (debug_threads)
2187 fprintf (stderr, " pending reinsert at 0x%s\n",
2188 paddress (lwp->bp_reinsert));
2189
2190 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2191 {
2192 if (step == 0)
2193 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2194
2195 step = 1;
2196 }
2197
2198 /* Postpone any pending signal. It was enqueued above. */
2199 signal = 0;
2200 }
2201
2202 if (debug_threads && the_low_target.get_pc != NULL)
2203 {
2204 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2205 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2206 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2207 }
2208
2209 /* If we have pending signals, consume one unless we are trying to reinsert
2210 a breakpoint. */
2211 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2212 {
2213 struct pending_signals **p_sig;
2214
2215 p_sig = &lwp->pending_signals;
2216 while ((*p_sig)->prev != NULL)
2217 p_sig = &(*p_sig)->prev;
2218
2219 signal = (*p_sig)->signal;
2220 if ((*p_sig)->info.si_signo != 0)
2221 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2222
2223 free (*p_sig);
2224 *p_sig = NULL;
2225 }
2226
2227 if (the_low_target.prepare_to_resume != NULL)
2228 the_low_target.prepare_to_resume (lwp);
2229
2230 regcache_invalidate_one ((struct inferior_list_entry *)
2231 get_lwp_thread (lwp));
2232 errno = 0;
2233 lwp->stopped = 0;
2234 lwp->stopped_by_watchpoint = 0;
2235 lwp->stepping = step;
2236 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2237 /* Coerce to a uintptr_t first to avoid potential gcc warning
2238 of coercing an 8 byte integer to a 4 byte pointer. */
2239 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2240
2241 current_inferior = saved_inferior;
2242 if (errno)
2243 {
2244 /* ESRCH from ptrace either means that the thread was already
2245 running (an error) or that it is gone (a race condition). If
2246 it's gone, we will get a notification the next time we wait,
2247 so we can ignore the error. We could differentiate these
2248 two, but it's tricky without waiting; the thread still exists
2249 as a zombie, so sending it signal 0 would succeed. So just
2250 ignore ESRCH. */
2251 if (errno == ESRCH)
2252 return;
2253
2254 perror_with_name ("ptrace");
2255 }
2256 }
2257
2258 struct thread_resume_array
2259 {
2260 struct thread_resume *resume;
2261 size_t n;
2262 };
2263
2264 /* This function is called once per thread. We look up the thread
2265 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2266 resume request.
2267
2268 This algorithm is O(threads * resume elements), but resume elements
2269 is small (and will remain small at least until GDB supports thread
2270 suspension). */
2271 static int
2272 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2273 {
2274 struct lwp_info *lwp;
2275 struct thread_info *thread;
2276 int ndx;
2277 struct thread_resume_array *r;
2278
2279 thread = (struct thread_info *) entry;
2280 lwp = get_thread_lwp (thread);
2281 r = arg;
2282
2283 for (ndx = 0; ndx < r->n; ndx++)
2284 {
2285 ptid_t ptid = r->resume[ndx].thread;
2286 if (ptid_equal (ptid, minus_one_ptid)
2287 || ptid_equal (ptid, entry->id)
2288 || (ptid_is_pid (ptid)
2289 && (ptid_get_pid (ptid) == pid_of (lwp)))
2290 || (ptid_get_lwp (ptid) == -1
2291 && (ptid_get_pid (ptid) == pid_of (lwp))))
2292 {
2293 if (r->resume[ndx].kind == resume_stop
2294 && lwp->last_resume_kind == resume_stop)
2295 {
2296 if (debug_threads)
2297 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2298 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2299 ? "stopped"
2300 : "stopping",
2301 lwpid_of (lwp));
2302
2303 continue;
2304 }
2305
2306 lwp->resume = &r->resume[ndx];
2307 lwp->last_resume_kind = lwp->resume->kind;
2308 return 0;
2309 }
2310 }
2311
2312 /* No resume action for this thread. */
2313 lwp->resume = NULL;
2314
2315 return 0;
2316 }
2317
2318
2319 /* Set *FLAG_P if this lwp has an interesting status pending. */
2320 static int
2321 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2322 {
2323 struct lwp_info *lwp = (struct lwp_info *) entry;
2324
2325 /* LWPs which will not be resumed are not interesting, because
2326 we might not wait for them next time through linux_wait. */
2327 if (lwp->resume == NULL)
2328 return 0;
2329
2330 if (lwp->status_pending_p)
2331 * (int *) flag_p = 1;
2332
2333 return 0;
2334 }
2335
2336 /* Return 1 if this lwp that GDB wants running is stopped at an
2337 internal breakpoint that we need to step over. It assumes that any
2338 required STOP_PC adjustment has already been propagated to the
2339 inferior's regcache. */
2340
2341 static int
2342 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2343 {
2344 struct lwp_info *lwp = (struct lwp_info *) entry;
2345 struct thread_info *saved_inferior;
2346 CORE_ADDR pc;
2347
2348 /* LWPs which will not be resumed are not interesting, because we
2349 might not wait for them next time through linux_wait. */
2350
2351 if (!lwp->stopped)
2352 {
2353 if (debug_threads)
2354 fprintf (stderr,
2355 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2356 lwpid_of (lwp));
2357 return 0;
2358 }
2359
2360 if (lwp->last_resume_kind == resume_stop)
2361 {
2362 if (debug_threads)
2363 fprintf (stderr,
2364 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2365 lwpid_of (lwp));
2366 return 0;
2367 }
2368
2369 if (!lwp->need_step_over)
2370 {
2371 if (debug_threads)
2372 fprintf (stderr,
2373 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2374 }
2375
2376 if (lwp->status_pending_p)
2377 {
2378 if (debug_threads)
2379 fprintf (stderr,
2380 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2381 lwpid_of (lwp));
2382 return 0;
2383 }
2384
2385 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2386 or we have. */
2387 pc = get_pc (lwp);
2388
2389 /* If the PC has changed since we stopped, then don't do anything,
2390 and let the breakpoint/tracepoint be hit. This happens if, for
2391 instance, GDB handled the decr_pc_after_break subtraction itself,
2392 GDB is OOL stepping this thread, or the user has issued a "jump"
2393 command, or poked thread's registers herself. */
2394 if (pc != lwp->stop_pc)
2395 {
2396 if (debug_threads)
2397 fprintf (stderr,
2398 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2399 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2400 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2401
2402 lwp->need_step_over = 0;
2403 return 0;
2404 }
2405
2406 saved_inferior = current_inferior;
2407 current_inferior = get_lwp_thread (lwp);
2408
2409 /* We can only step over breakpoints we know about. */
2410 if (breakpoint_here (pc))
2411 {
2412 /* Don't step over a breakpoint that GDB expects to hit
2413 though. */
2414 if (gdb_breakpoint_here (pc))
2415 {
2416 if (debug_threads)
2417 fprintf (stderr,
2418 "Need step over [LWP %ld]? yes, but found"
2419 " GDB breakpoint at 0x%s; skipping step over\n",
2420 lwpid_of (lwp), paddress (pc));
2421
2422 current_inferior = saved_inferior;
2423 return 0;
2424 }
2425 else
2426 {
2427 if (debug_threads)
2428 fprintf (stderr,
2429 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2430 lwpid_of (lwp), paddress (pc));
2431
2432 /* We've found an lwp that needs stepping over --- return 1 so
2433 that find_inferior stops looking. */
2434 current_inferior = saved_inferior;
2435
2436 /* If the step over is cancelled, this is set again. */
2437 lwp->need_step_over = 0;
2438 return 1;
2439 }
2440 }
2441
2442 current_inferior = saved_inferior;
2443
2444 if (debug_threads)
2445 fprintf (stderr,
2446 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2447 lwpid_of (lwp), paddress (pc));
2448
2449 return 0;
2450 }
2451
2452 /* Start a step-over operation on LWP. When LWP stopped at a
2453 breakpoint, to make progress, we need to remove the breakpoint out
2454 of the way. If we let other threads run while we do that, they may
2455 pass by the breakpoint location and miss hitting it. To avoid
2456 that, a step-over momentarily stops all threads while LWP is
2457 single-stepped while the breakpoint is temporarily uninserted from
2458 the inferior. When the single-step finishes, we reinsert the
2459 breakpoint, and let all threads that are supposed to be running,
2460 run again.
2461
2462 On targets that don't support hardware single-step, we don't
2463 currently support full software single-stepping. Instead, we only
2464 support stepping over the thread event breakpoint, by asking the
2465 low target where to place a reinsert breakpoint. Since this
2466 routine assumes the breakpoint being stepped over is a thread event
2467 breakpoint, it usually assumes the return address of the current
2468 function is a good enough place to set the reinsert breakpoint. */
2469
2470 static int
2471 start_step_over (struct lwp_info *lwp)
2472 {
2473 struct thread_info *saved_inferior;
2474 CORE_ADDR pc;
2475 int step;
2476
2477 if (debug_threads)
2478 fprintf (stderr,
2479 "Starting step-over on LWP %ld. Stopping all threads\n",
2480 lwpid_of (lwp));
2481
2482 stop_all_lwps ();
2483
2484 if (debug_threads)
2485 fprintf (stderr, "Done stopping all threads for step-over.\n");
2486
2487 /* Note, we should always reach here with an already adjusted PC,
2488 either by GDB (if we're resuming due to GDB's request), or by our
2489 caller, if we just finished handling an internal breakpoint GDB
2490 shouldn't care about. */
2491 pc = get_pc (lwp);
2492
2493 saved_inferior = current_inferior;
2494 current_inferior = get_lwp_thread (lwp);
2495
2496 lwp->bp_reinsert = pc;
2497 uninsert_breakpoints_at (pc);
2498
2499 if (can_hardware_single_step ())
2500 {
2501 step = 1;
2502 }
2503 else
2504 {
2505 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2506 set_reinsert_breakpoint (raddr);
2507 step = 0;
2508 }
2509
2510 current_inferior = saved_inferior;
2511
2512 linux_resume_one_lwp (lwp, step, 0, NULL);
2513
2514 /* Require next event from this LWP. */
2515 step_over_bkpt = lwp->head.id;
2516 return 1;
2517 }
2518
2519 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2520 start_step_over, if still there, and delete any reinsert
2521 breakpoints we've set, on non hardware single-step targets. */
2522
2523 static int
2524 finish_step_over (struct lwp_info *lwp)
2525 {
2526 if (lwp->bp_reinsert != 0)
2527 {
2528 if (debug_threads)
2529 fprintf (stderr, "Finished step over.\n");
2530
2531 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2532 may be no breakpoint to reinsert there by now. */
2533 reinsert_breakpoints_at (lwp->bp_reinsert);
2534
2535 lwp->bp_reinsert = 0;
2536
2537 /* Delete any software-single-step reinsert breakpoints. No
2538 longer needed. We don't have to worry about other threads
2539 hitting this trap, and later not being able to explain it,
2540 because we were stepping over a breakpoint, and we hold all
2541 threads but LWP stopped while doing that. */
2542 if (!can_hardware_single_step ())
2543 delete_reinsert_breakpoints ();
2544
2545 step_over_bkpt = null_ptid;
2546 return 1;
2547 }
2548 else
2549 return 0;
2550 }
2551
2552 /* This function is called once per thread. We check the thread's resume
2553 request, which will tell us whether to resume, step, or leave the thread
2554 stopped; and what signal, if any, it should be sent.
2555
2556 For threads which we aren't explicitly told otherwise, we preserve
2557 the stepping flag; this is used for stepping over gdbserver-placed
2558 breakpoints.
2559
2560 If pending_flags was set in any thread, we queue any needed
2561 signals, since we won't actually resume. We already have a pending
2562 event to report, so we don't need to preserve any step requests;
2563 they should be re-issued if necessary. */
2564
2565 static int
2566 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2567 {
2568 struct lwp_info *lwp;
2569 struct thread_info *thread;
2570 int step;
2571 int leave_all_stopped = * (int *) arg;
2572 int leave_pending;
2573
2574 thread = (struct thread_info *) entry;
2575 lwp = get_thread_lwp (thread);
2576
2577 if (lwp->resume == NULL)
2578 return 0;
2579
2580 if (lwp->resume->kind == resume_stop)
2581 {
2582 if (debug_threads)
2583 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2584
2585 if (!lwp->stopped)
2586 {
2587 if (debug_threads)
2588 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2589
2590 /* Stop the thread, and wait for the event asynchronously,
2591 through the event loop. */
2592 send_sigstop (&lwp->head);
2593 }
2594 else
2595 {
2596 if (debug_threads)
2597 fprintf (stderr, "already stopped LWP %ld\n",
2598 lwpid_of (lwp));
2599
2600 /* The LWP may have been stopped in an internal event that
2601 was not meant to be notified back to GDB (e.g., gdbserver
2602 breakpoint), so we should be reporting a stop event in
2603 this case too. */
2604
2605 /* If the thread already has a pending SIGSTOP, this is a
2606 no-op. Otherwise, something later will presumably resume
2607 the thread and this will cause it to cancel any pending
2608 operation, due to last_resume_kind == resume_stop. If
2609 the thread already has a pending status to report, we
2610 will still report it the next time we wait - see
2611 status_pending_p_callback. */
2612 send_sigstop (&lwp->head);
2613 }
2614
2615 /* For stop requests, we're done. */
2616 lwp->resume = NULL;
2617 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2618 return 0;
2619 }
2620
2621 /* If this thread which is about to be resumed has a pending status,
2622 then don't resume any threads - we can just report the pending
2623 status. Make sure to queue any signals that would otherwise be
2624 sent. In all-stop mode, we do this decision based on if *any*
2625 thread has a pending status. If there's a thread that needs the
2626 step-over-breakpoint dance, then don't resume any other thread
2627 but that particular one. */
2628 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2629
2630 if (!leave_pending)
2631 {
2632 if (debug_threads)
2633 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2634
2635 step = (lwp->resume->kind == resume_step);
2636 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2637 }
2638 else
2639 {
2640 if (debug_threads)
2641 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2642
2643 /* If we have a new signal, enqueue the signal. */
2644 if (lwp->resume->sig != 0)
2645 {
2646 struct pending_signals *p_sig;
2647 p_sig = xmalloc (sizeof (*p_sig));
2648 p_sig->prev = lwp->pending_signals;
2649 p_sig->signal = lwp->resume->sig;
2650 memset (&p_sig->info, 0, sizeof (siginfo_t));
2651
2652 /* If this is the same signal we were previously stopped by,
2653 make sure to queue its siginfo. We can ignore the return
2654 value of ptrace; if it fails, we'll skip
2655 PTRACE_SETSIGINFO. */
2656 if (WIFSTOPPED (lwp->last_status)
2657 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2658 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2659
2660 lwp->pending_signals = p_sig;
2661 }
2662 }
2663
2664 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2665 lwp->resume = NULL;
2666 return 0;
2667 }
2668
2669 static void
2670 linux_resume (struct thread_resume *resume_info, size_t n)
2671 {
2672 struct thread_resume_array array = { resume_info, n };
2673 struct lwp_info *need_step_over = NULL;
2674 int any_pending;
2675 int leave_all_stopped;
2676
2677 find_inferior (&all_threads, linux_set_resume_request, &array);
2678
2679 /* If there is a thread which would otherwise be resumed, which has
2680 a pending status, then don't resume any threads - we can just
2681 report the pending status. Make sure to queue any signals that
2682 would otherwise be sent. In non-stop mode, we'll apply this
2683 logic to each thread individually. We consume all pending events
2684 before considering to start a step-over (in all-stop). */
2685 any_pending = 0;
2686 if (!non_stop)
2687 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2688
2689 /* If there is a thread which would otherwise be resumed, which is
2690 stopped at a breakpoint that needs stepping over, then don't
2691 resume any threads - have it step over the breakpoint with all
2692 other threads stopped, then resume all threads again. Make sure
2693 to queue any signals that would otherwise be delivered or
2694 queued. */
2695 if (!any_pending && supports_breakpoints ())
2696 need_step_over
2697 = (struct lwp_info *) find_inferior (&all_lwps,
2698 need_step_over_p, NULL);
2699
2700 leave_all_stopped = (need_step_over != NULL || any_pending);
2701
2702 if (debug_threads)
2703 {
2704 if (need_step_over != NULL)
2705 fprintf (stderr, "Not resuming all, need step over\n");
2706 else if (any_pending)
2707 fprintf (stderr,
2708 "Not resuming, all-stop and found "
2709 "an LWP with pending status\n");
2710 else
2711 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2712 }
2713
2714 /* Even if we're leaving threads stopped, queue all signals we'd
2715 otherwise deliver. */
2716 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2717
2718 if (need_step_over)
2719 start_step_over (need_step_over);
2720 }
2721
2722 /* This function is called once per thread. We check the thread's
2723 last resume request, which will tell us whether to resume, step, or
2724 leave the thread stopped. Any signal the client requested to be
2725 delivered has already been enqueued at this point.
2726
2727 If any thread that GDB wants running is stopped at an internal
2728 breakpoint that needs stepping over, we start a step-over operation
2729 on that particular thread, and leave all others stopped. */
2730
2731 static void
2732 proceed_one_lwp (struct inferior_list_entry *entry)
2733 {
2734 struct lwp_info *lwp;
2735 int step;
2736
2737 lwp = (struct lwp_info *) entry;
2738
2739 if (debug_threads)
2740 fprintf (stderr,
2741 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2742
2743 if (!lwp->stopped)
2744 {
2745 if (debug_threads)
2746 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2747 return;
2748 }
2749
2750 if (lwp->last_resume_kind == resume_stop)
2751 {
2752 if (debug_threads)
2753 fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp));
2754 return;
2755 }
2756
2757 if (lwp->status_pending_p)
2758 {
2759 if (debug_threads)
2760 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2761 lwpid_of (lwp));
2762 return;
2763 }
2764
2765 if (lwp->suspended)
2766 {
2767 if (debug_threads)
2768 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2769 return;
2770 }
2771
2772 step = lwp->last_resume_kind == resume_step;
2773 linux_resume_one_lwp (lwp, step, 0, NULL);
2774 }
2775
2776 /* When we finish a step-over, set threads running again. If there's
2777 another thread that may need a step-over, now's the time to start
2778 it. Eventually, we'll move all threads past their breakpoints. */
2779
2780 static void
2781 proceed_all_lwps (void)
2782 {
2783 struct lwp_info *need_step_over;
2784
2785 /* If there is a thread which would otherwise be resumed, which is
2786 stopped at a breakpoint that needs stepping over, then don't
2787 resume any threads - have it step over the breakpoint with all
2788 other threads stopped, then resume all threads again. */
2789
2790 if (supports_breakpoints ())
2791 {
2792 need_step_over
2793 = (struct lwp_info *) find_inferior (&all_lwps,
2794 need_step_over_p, NULL);
2795
2796 if (need_step_over != NULL)
2797 {
2798 if (debug_threads)
2799 fprintf (stderr, "proceed_all_lwps: found "
2800 "thread %ld needing a step-over\n",
2801 lwpid_of (need_step_over));
2802
2803 start_step_over (need_step_over);
2804 return;
2805 }
2806 }
2807
2808 if (debug_threads)
2809 fprintf (stderr, "Proceeding, no step-over needed\n");
2810
2811 for_each_inferior (&all_lwps, proceed_one_lwp);
2812 }
2813
2814 /* Stopped LWPs that the client wanted to be running, that don't have
2815 pending statuses, are set to run again, except for EXCEPT, if not
2816 NULL. This undoes a stop_all_lwps call. */
2817
2818 static void
2819 unstop_all_lwps (struct lwp_info *except)
2820 {
2821 if (debug_threads)
2822 {
2823 if (except)
2824 fprintf (stderr,
2825 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
2826 else
2827 fprintf (stderr,
2828 "unstopping all lwps\n");
2829 }
2830
2831 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2832 if (except != NULL)
2833 ++except->suspended;
2834
2835 for_each_inferior (&all_lwps, proceed_one_lwp);
2836
2837 if (except != NULL)
2838 --except->suspended;
2839 }
2840
2841 #ifdef HAVE_LINUX_USRREGS
2842
2843 int
2844 register_addr (int regnum)
2845 {
2846 int addr;
2847
2848 if (regnum < 0 || regnum >= the_low_target.num_regs)
2849 error ("Invalid register number %d.", regnum);
2850
2851 addr = the_low_target.regmap[regnum];
2852
2853 return addr;
2854 }
2855
2856 /* Fetch one register. */
2857 static void
2858 fetch_register (struct regcache *regcache, int regno)
2859 {
2860 CORE_ADDR regaddr;
2861 int i, size;
2862 char *buf;
2863 int pid;
2864
2865 if (regno >= the_low_target.num_regs)
2866 return;
2867 if ((*the_low_target.cannot_fetch_register) (regno))
2868 return;
2869
2870 regaddr = register_addr (regno);
2871 if (regaddr == -1)
2872 return;
2873
2874 pid = lwpid_of (get_thread_lwp (current_inferior));
2875 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2876 & - sizeof (PTRACE_XFER_TYPE));
2877 buf = alloca (size);
2878 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2879 {
2880 errno = 0;
2881 *(PTRACE_XFER_TYPE *) (buf + i) =
2882 ptrace (PTRACE_PEEKUSER, pid,
2883 /* Coerce to a uintptr_t first to avoid potential gcc warning
2884 of coercing an 8 byte integer to a 4 byte pointer. */
2885 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2886 regaddr += sizeof (PTRACE_XFER_TYPE);
2887 if (errno != 0)
2888 error ("reading register %d: %s", regno, strerror (errno));
2889 }
2890
2891 if (the_low_target.supply_ptrace_register)
2892 the_low_target.supply_ptrace_register (regcache, regno, buf);
2893 else
2894 supply_register (regcache, regno, buf);
2895 }
2896
2897 /* Fetch all registers, or just one, from the child process. */
2898 static void
2899 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2900 {
2901 if (regno == -1)
2902 for (regno = 0; regno < the_low_target.num_regs; regno++)
2903 fetch_register (regcache, regno);
2904 else
2905 fetch_register (regcache, regno);
2906 }
2907
2908 /* Store our register values back into the inferior.
2909 If REGNO is -1, do this for all registers.
2910 Otherwise, REGNO specifies which register (so we can save time). */
2911 static void
2912 usr_store_inferior_registers (struct regcache *regcache, int regno)
2913 {
2914 CORE_ADDR regaddr;
2915 int i, size;
2916 char *buf;
2917 int pid;
2918
2919 if (regno >= 0)
2920 {
2921 if (regno >= the_low_target.num_regs)
2922 return;
2923
2924 if ((*the_low_target.cannot_store_register) (regno) == 1)
2925 return;
2926
2927 regaddr = register_addr (regno);
2928 if (regaddr == -1)
2929 return;
2930 errno = 0;
2931 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2932 & - sizeof (PTRACE_XFER_TYPE);
2933 buf = alloca (size);
2934 memset (buf, 0, size);
2935
2936 if (the_low_target.collect_ptrace_register)
2937 the_low_target.collect_ptrace_register (regcache, regno, buf);
2938 else
2939 collect_register (regcache, regno, buf);
2940
2941 pid = lwpid_of (get_thread_lwp (current_inferior));
2942 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2943 {
2944 errno = 0;
2945 ptrace (PTRACE_POKEUSER, pid,
2946 /* Coerce to a uintptr_t first to avoid potential gcc warning
2947 about coercing an 8 byte integer to a 4 byte pointer. */
2948 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
2949 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
2950 if (errno != 0)
2951 {
2952 /* At this point, ESRCH should mean the process is
2953 already gone, in which case we simply ignore attempts
2954 to change its registers. See also the related
2955 comment in linux_resume_one_lwp. */
2956 if (errno == ESRCH)
2957 return;
2958
2959 if ((*the_low_target.cannot_store_register) (regno) == 0)
2960 error ("writing register %d: %s", regno, strerror (errno));
2961 }
2962 regaddr += sizeof (PTRACE_XFER_TYPE);
2963 }
2964 }
2965 else
2966 for (regno = 0; regno < the_low_target.num_regs; regno++)
2967 usr_store_inferior_registers (regcache, regno);
2968 }
2969 #endif /* HAVE_LINUX_USRREGS */
2970
2971
2972
2973 #ifdef HAVE_LINUX_REGSETS
2974
2975 static int
2976 regsets_fetch_inferior_registers (struct regcache *regcache)
2977 {
2978 struct regset_info *regset;
2979 int saw_general_regs = 0;
2980 int pid;
2981 struct iovec iov;
2982
2983 regset = target_regsets;
2984
2985 pid = lwpid_of (get_thread_lwp (current_inferior));
2986 while (regset->size >= 0)
2987 {
2988 void *buf, *data;
2989 int nt_type, res;
2990
2991 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2992 {
2993 regset ++;
2994 continue;
2995 }
2996
2997 buf = xmalloc (regset->size);
2998
2999 nt_type = regset->nt_type;
3000 if (nt_type)
3001 {
3002 iov.iov_base = buf;
3003 iov.iov_len = regset->size;
3004 data = (void *) &iov;
3005 }
3006 else
3007 data = buf;
3008
3009 #ifndef __sparc__
3010 res = ptrace (regset->get_request, pid, nt_type, data);
3011 #else
3012 res = ptrace (regset->get_request, pid, data, nt_type);
3013 #endif
3014 if (res < 0)
3015 {
3016 if (errno == EIO)
3017 {
3018 /* If we get EIO on a regset, do not try it again for
3019 this process. */
3020 disabled_regsets[regset - target_regsets] = 1;
3021 free (buf);
3022 continue;
3023 }
3024 else
3025 {
3026 char s[256];
3027 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3028 pid);
3029 perror (s);
3030 }
3031 }
3032 else if (regset->type == GENERAL_REGS)
3033 saw_general_regs = 1;
3034 regset->store_function (regcache, buf);
3035 regset ++;
3036 free (buf);
3037 }
3038 if (saw_general_regs)
3039 return 0;
3040 else
3041 return 1;
3042 }
3043
3044 static int
3045 regsets_store_inferior_registers (struct regcache *regcache)
3046 {
3047 struct regset_info *regset;
3048 int saw_general_regs = 0;
3049 int pid;
3050 struct iovec iov;
3051
3052 regset = target_regsets;
3053
3054 pid = lwpid_of (get_thread_lwp (current_inferior));
3055 while (regset->size >= 0)
3056 {
3057 void *buf, *data;
3058 int nt_type, res;
3059
3060 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3061 {
3062 regset ++;
3063 continue;
3064 }
3065
3066 buf = xmalloc (regset->size);
3067
3068 /* First fill the buffer with the current register set contents,
3069 in case there are any items in the kernel's regset that are
3070 not in gdbserver's regcache. */
3071
3072 nt_type = regset->nt_type;
3073 if (nt_type)
3074 {
3075 iov.iov_base = buf;
3076 iov.iov_len = regset->size;
3077 data = (void *) &iov;
3078 }
3079 else
3080 data = buf;
3081
3082 #ifndef __sparc__
3083 res = ptrace (regset->get_request, pid, nt_type, data);
3084 #else
3085 res = ptrace (regset->get_request, pid, &iov, data);
3086 #endif
3087
3088 if (res == 0)
3089 {
3090 /* Then overlay our cached registers on that. */
3091 regset->fill_function (regcache, buf);
3092
3093 /* Only now do we write the register set. */
3094 #ifndef __sparc__
3095 res = ptrace (regset->set_request, pid, nt_type, data);
3096 #else
3097 res = ptrace (regset->set_request, pid, data, nt_type);
3098 #endif
3099 }
3100
3101 if (res < 0)
3102 {
3103 if (errno == EIO)
3104 {
3105 /* If we get EIO on a regset, do not try it again for
3106 this process. */
3107 disabled_regsets[regset - target_regsets] = 1;
3108 free (buf);
3109 continue;
3110 }
3111 else if (errno == ESRCH)
3112 {
3113 /* At this point, ESRCH should mean the process is
3114 already gone, in which case we simply ignore attempts
3115 to change its registers. See also the related
3116 comment in linux_resume_one_lwp. */
3117 free (buf);
3118 return 0;
3119 }
3120 else
3121 {
3122 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3123 }
3124 }
3125 else if (regset->type == GENERAL_REGS)
3126 saw_general_regs = 1;
3127 regset ++;
3128 free (buf);
3129 }
3130 if (saw_general_regs)
3131 return 0;
3132 else
3133 return 1;
3134 return 0;
3135 }
3136
3137 #endif /* HAVE_LINUX_REGSETS */
3138
3139
3140 void
3141 linux_fetch_registers (struct regcache *regcache, int regno)
3142 {
3143 #ifdef HAVE_LINUX_REGSETS
3144 if (regsets_fetch_inferior_registers (regcache) == 0)
3145 return;
3146 #endif
3147 #ifdef HAVE_LINUX_USRREGS
3148 usr_fetch_inferior_registers (regcache, regno);
3149 #endif
3150 }
3151
3152 void
3153 linux_store_registers (struct regcache *regcache, int regno)
3154 {
3155 #ifdef HAVE_LINUX_REGSETS
3156 if (regsets_store_inferior_registers (regcache) == 0)
3157 return;
3158 #endif
3159 #ifdef HAVE_LINUX_USRREGS
3160 usr_store_inferior_registers (regcache, regno);
3161 #endif
3162 }
3163
3164
3165 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3166 to debugger memory starting at MYADDR. */
3167
3168 static int
3169 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3170 {
3171 register int i;
3172 /* Round starting address down to longword boundary. */
3173 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3174 /* Round ending address up; get number of longwords that makes. */
3175 register int count
3176 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3177 / sizeof (PTRACE_XFER_TYPE);
3178 /* Allocate buffer of that many longwords. */
3179 register PTRACE_XFER_TYPE *buffer
3180 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3181 int fd;
3182 char filename[64];
3183 int pid = lwpid_of (get_thread_lwp (current_inferior));
3184
3185 /* Try using /proc. Don't bother for one word. */
3186 if (len >= 3 * sizeof (long))
3187 {
3188 /* We could keep this file open and cache it - possibly one per
3189 thread. That requires some juggling, but is even faster. */
3190 sprintf (filename, "/proc/%d/mem", pid);
3191 fd = open (filename, O_RDONLY | O_LARGEFILE);
3192 if (fd == -1)
3193 goto no_proc;
3194
3195 /* If pread64 is available, use it. It's faster if the kernel
3196 supports it (only one syscall), and it's 64-bit safe even on
3197 32-bit platforms (for instance, SPARC debugging a SPARC64
3198 application). */
3199 #ifdef HAVE_PREAD64
3200 if (pread64 (fd, myaddr, len, memaddr) != len)
3201 #else
3202 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3203 #endif
3204 {
3205 close (fd);
3206 goto no_proc;
3207 }
3208
3209 close (fd);
3210 return 0;
3211 }
3212
3213 no_proc:
3214 /* Read all the longwords */
3215 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3216 {
3217 errno = 0;
3218 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3219 about coercing an 8 byte integer to a 4 byte pointer. */
3220 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3221 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3222 if (errno)
3223 return errno;
3224 }
3225
3226 /* Copy appropriate bytes out of the buffer. */
3227 memcpy (myaddr,
3228 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3229 len);
3230
3231 return 0;
3232 }
3233
3234 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3235 memory at MEMADDR. On failure (cannot write to the inferior)
3236 returns the value of errno. */
3237
3238 static int
3239 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3240 {
3241 register int i;
3242 /* Round starting address down to longword boundary. */
3243 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3244 /* Round ending address up; get number of longwords that makes. */
3245 register int count
3246 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3247 /* Allocate buffer of that many longwords. */
3248 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3249 int pid = lwpid_of (get_thread_lwp (current_inferior));
3250
3251 if (debug_threads)
3252 {
3253 /* Dump up to four bytes. */
3254 unsigned int val = * (unsigned int *) myaddr;
3255 if (len == 1)
3256 val = val & 0xff;
3257 else if (len == 2)
3258 val = val & 0xffff;
3259 else if (len == 3)
3260 val = val & 0xffffff;
3261 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3262 val, (long)memaddr);
3263 }
3264
3265 /* Fill start and end extra bytes of buffer with existing memory data. */
3266
3267 errno = 0;
3268 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3269 about coercing an 8 byte integer to a 4 byte pointer. */
3270 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3271 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3272 if (errno)
3273 return errno;
3274
3275 if (count > 1)
3276 {
3277 errno = 0;
3278 buffer[count - 1]
3279 = ptrace (PTRACE_PEEKTEXT, pid,
3280 /* Coerce to a uintptr_t first to avoid potential gcc warning
3281 about coercing an 8 byte integer to a 4 byte pointer. */
3282 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3283 * sizeof (PTRACE_XFER_TYPE)),
3284 0);
3285 if (errno)
3286 return errno;
3287 }
3288
3289 /* Copy data to be written over corresponding part of buffer. */
3290
3291 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3292
3293 /* Write the entire buffer. */
3294
3295 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3296 {
3297 errno = 0;
3298 ptrace (PTRACE_POKETEXT, pid,
3299 /* Coerce to a uintptr_t first to avoid potential gcc warning
3300 about coercing an 8 byte integer to a 4 byte pointer. */
3301 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3302 (PTRACE_ARG4_TYPE) buffer[i]);
3303 if (errno)
3304 return errno;
3305 }
3306
3307 return 0;
3308 }
3309
3310 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3311 static int linux_supports_tracefork_flag;
3312
3313 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3314
3315 static int
3316 linux_tracefork_grandchild (void *arg)
3317 {
3318 _exit (0);
3319 }
3320
3321 #define STACK_SIZE 4096
3322
3323 static int
3324 linux_tracefork_child (void *arg)
3325 {
3326 ptrace (PTRACE_TRACEME, 0, 0, 0);
3327 kill (getpid (), SIGSTOP);
3328
3329 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3330
3331 if (fork () == 0)
3332 linux_tracefork_grandchild (NULL);
3333
3334 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3335
3336 #ifdef __ia64__
3337 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3338 CLONE_VM | SIGCHLD, NULL);
3339 #else
3340 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3341 CLONE_VM | SIGCHLD, NULL);
3342 #endif
3343
3344 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3345
3346 _exit (0);
3347 }
3348
3349 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3350 sure that we can enable the option, and that it had the desired
3351 effect. */
3352
3353 static void
3354 linux_test_for_tracefork (void)
3355 {
3356 int child_pid, ret, status;
3357 long second_pid;
3358 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3359 char *stack = xmalloc (STACK_SIZE * 4);
3360 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3361
3362 linux_supports_tracefork_flag = 0;
3363
3364 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3365
3366 child_pid = fork ();
3367 if (child_pid == 0)
3368 linux_tracefork_child (NULL);
3369
3370 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3371
3372 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3373 #ifdef __ia64__
3374 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3375 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3376 #else /* !__ia64__ */
3377 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3378 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3379 #endif /* !__ia64__ */
3380
3381 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3382
3383 if (child_pid == -1)
3384 perror_with_name ("clone");
3385
3386 ret = my_waitpid (child_pid, &status, 0);
3387 if (ret == -1)
3388 perror_with_name ("waitpid");
3389 else if (ret != child_pid)
3390 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3391 if (! WIFSTOPPED (status))
3392 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3393
3394 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3395 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3396 if (ret != 0)
3397 {
3398 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3399 if (ret != 0)
3400 {
3401 warning ("linux_test_for_tracefork: failed to kill child");
3402 return;
3403 }
3404
3405 ret = my_waitpid (child_pid, &status, 0);
3406 if (ret != child_pid)
3407 warning ("linux_test_for_tracefork: failed to wait for killed child");
3408 else if (!WIFSIGNALED (status))
3409 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3410 "killed child", status);
3411
3412 return;
3413 }
3414
3415 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3416 if (ret != 0)
3417 warning ("linux_test_for_tracefork: failed to resume child");
3418
3419 ret = my_waitpid (child_pid, &status, 0);
3420
3421 if (ret == child_pid && WIFSTOPPED (status)
3422 && status >> 16 == PTRACE_EVENT_FORK)
3423 {
3424 second_pid = 0;
3425 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3426 if (ret == 0 && second_pid != 0)
3427 {
3428 int second_status;
3429
3430 linux_supports_tracefork_flag = 1;
3431 my_waitpid (second_pid, &second_status, 0);
3432 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3433 if (ret != 0)
3434 warning ("linux_test_for_tracefork: failed to kill second child");
3435 my_waitpid (second_pid, &status, 0);
3436 }
3437 }
3438 else
3439 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3440 "(%d, status 0x%x)", ret, status);
3441
3442 do
3443 {
3444 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3445 if (ret != 0)
3446 warning ("linux_test_for_tracefork: failed to kill child");
3447 my_waitpid (child_pid, &status, 0);
3448 }
3449 while (WIFSTOPPED (status));
3450
3451 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3452 free (stack);
3453 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3454 }
3455
3456
3457 static void
3458 linux_look_up_symbols (void)
3459 {
3460 #ifdef USE_THREAD_DB
3461 struct process_info *proc = current_process ();
3462
3463 if (proc->private->thread_db != NULL)
3464 return;
3465
3466 /* If the kernel supports tracing forks then it also supports tracing
3467 clones, and then we don't need to use the magic thread event breakpoint
3468 to learn about threads. */
3469 thread_db_init (!linux_supports_tracefork_flag);
3470 #endif
3471 }
3472
3473 static void
3474 linux_request_interrupt (void)
3475 {
3476 extern unsigned long signal_pid;
3477
3478 if (!ptid_equal (cont_thread, null_ptid)
3479 && !ptid_equal (cont_thread, minus_one_ptid))
3480 {
3481 struct lwp_info *lwp;
3482 int lwpid;
3483
3484 lwp = get_thread_lwp (current_inferior);
3485 lwpid = lwpid_of (lwp);
3486 kill_lwp (lwpid, SIGINT);
3487 }
3488 else
3489 kill_lwp (signal_pid, SIGINT);
3490 }
3491
3492 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3493 to debugger memory starting at MYADDR. */
3494
3495 static int
3496 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3497 {
3498 char filename[PATH_MAX];
3499 int fd, n;
3500 int pid = lwpid_of (get_thread_lwp (current_inferior));
3501
3502 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3503
3504 fd = open (filename, O_RDONLY);
3505 if (fd < 0)
3506 return -1;
3507
3508 if (offset != (CORE_ADDR) 0
3509 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3510 n = -1;
3511 else
3512 n = read (fd, myaddr, len);
3513
3514 close (fd);
3515
3516 return n;
3517 }
3518
3519 /* These breakpoint and watchpoint related wrapper functions simply
3520 pass on the function call if the target has registered a
3521 corresponding function. */
3522
3523 static int
3524 linux_insert_point (char type, CORE_ADDR addr, int len)
3525 {
3526 if (the_low_target.insert_point != NULL)
3527 return the_low_target.insert_point (type, addr, len);
3528 else
3529 /* Unsupported (see target.h). */
3530 return 1;
3531 }
3532
3533 static int
3534 linux_remove_point (char type, CORE_ADDR addr, int len)
3535 {
3536 if (the_low_target.remove_point != NULL)
3537 return the_low_target.remove_point (type, addr, len);
3538 else
3539 /* Unsupported (see target.h). */
3540 return 1;
3541 }
3542
3543 static int
3544 linux_stopped_by_watchpoint (void)
3545 {
3546 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3547
3548 return lwp->stopped_by_watchpoint;
3549 }
3550
3551 static CORE_ADDR
3552 linux_stopped_data_address (void)
3553 {
3554 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3555
3556 return lwp->stopped_data_address;
3557 }
3558
3559 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3560 #if defined(__mcoldfire__)
3561 /* These should really be defined in the kernel's ptrace.h header. */
3562 #define PT_TEXT_ADDR 49*4
3563 #define PT_DATA_ADDR 50*4
3564 #define PT_TEXT_END_ADDR 51*4
3565 #endif
3566
3567 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3568 to tell gdb about. */
3569
3570 static int
3571 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3572 {
3573 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3574 unsigned long text, text_end, data;
3575 int pid = lwpid_of (get_thread_lwp (current_inferior));
3576
3577 errno = 0;
3578
3579 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3580 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3581 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3582
3583 if (errno == 0)
3584 {
3585 /* Both text and data offsets produced at compile-time (and so
3586 used by gdb) are relative to the beginning of the program,
3587 with the data segment immediately following the text segment.
3588 However, the actual runtime layout in memory may put the data
3589 somewhere else, so when we send gdb a data base-address, we
3590 use the real data base address and subtract the compile-time
3591 data base-address from it (which is just the length of the
3592 text segment). BSS immediately follows data in both
3593 cases. */
3594 *text_p = text;
3595 *data_p = data - (text_end - text);
3596
3597 return 1;
3598 }
3599 #endif
3600 return 0;
3601 }
3602 #endif
3603
3604 static int
3605 compare_ints (const void *xa, const void *xb)
3606 {
3607 int a = *(const int *)xa;
3608 int b = *(const int *)xb;
3609
3610 return a - b;
3611 }
3612
3613 static int *
3614 unique (int *b, int *e)
3615 {
3616 int *d = b;
3617 while (++b != e)
3618 if (*d != *b)
3619 *++d = *b;
3620 return ++d;
3621 }
3622
3623 /* Given PID, iterates over all threads in that process.
3624
3625 Information about each thread, in a format suitable for qXfer:osdata:thread
3626 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3627 initialized, and the caller is responsible for finishing and appending '\0'
3628 to it.
3629
3630 The list of cores that threads are running on is assigned to *CORES, if it
3631 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3632 should free *CORES. */
3633
3634 static void
3635 list_threads (int pid, struct buffer *buffer, char **cores)
3636 {
3637 int count = 0;
3638 int allocated = 10;
3639 int *core_numbers = xmalloc (sizeof (int) * allocated);
3640 char pathname[128];
3641 DIR *dir;
3642 struct dirent *dp;
3643 struct stat statbuf;
3644
3645 sprintf (pathname, "/proc/%d/task", pid);
3646 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3647 {
3648 dir = opendir (pathname);
3649 if (!dir)
3650 {
3651 free (core_numbers);
3652 return;
3653 }
3654
3655 while ((dp = readdir (dir)) != NULL)
3656 {
3657 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3658
3659 if (lwp != 0)
3660 {
3661 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3662
3663 if (core != -1)
3664 {
3665 char s[sizeof ("4294967295")];
3666 sprintf (s, "%u", core);
3667
3668 if (count == allocated)
3669 {
3670 allocated *= 2;
3671 core_numbers = realloc (core_numbers,
3672 sizeof (int) * allocated);
3673 }
3674 core_numbers[count++] = core;
3675 if (buffer)
3676 buffer_xml_printf (buffer,
3677 "<item>"
3678 "<column name=\"pid\">%d</column>"
3679 "<column name=\"tid\">%s</column>"
3680 "<column name=\"core\">%s</column>"
3681 "</item>", pid, dp->d_name, s);
3682 }
3683 else
3684 {
3685 if (buffer)
3686 buffer_xml_printf (buffer,
3687 "<item>"
3688 "<column name=\"pid\">%d</column>"
3689 "<column name=\"tid\">%s</column>"
3690 "</item>", pid, dp->d_name);
3691 }
3692 }
3693 }
3694 }
3695
3696 if (cores)
3697 {
3698 *cores = NULL;
3699 if (count > 0)
3700 {
3701 struct buffer buffer2;
3702 int *b;
3703 int *e;
3704 qsort (core_numbers, count, sizeof (int), compare_ints);
3705
3706 /* Remove duplicates. */
3707 b = core_numbers;
3708 e = unique (b, core_numbers + count);
3709
3710 buffer_init (&buffer2);
3711
3712 for (b = core_numbers; b != e; ++b)
3713 {
3714 char number[sizeof ("4294967295")];
3715 sprintf (number, "%u", *b);
3716 buffer_xml_printf (&buffer2, "%s%s",
3717 (b == core_numbers) ? "" : ",", number);
3718 }
3719 buffer_grow_str0 (&buffer2, "");
3720
3721 *cores = buffer_finish (&buffer2);
3722 }
3723 }
3724 free (core_numbers);
3725 }
3726
3727 static void
3728 show_process (int pid, const char *username, struct buffer *buffer)
3729 {
3730 char pathname[128];
3731 FILE *f;
3732 char cmd[MAXPATHLEN + 1];
3733
3734 sprintf (pathname, "/proc/%d/cmdline", pid);
3735
3736 if ((f = fopen (pathname, "r")) != NULL)
3737 {
3738 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3739 if (len > 0)
3740 {
3741 char *cores = 0;
3742 int i;
3743 for (i = 0; i < len; i++)
3744 if (cmd[i] == '\0')
3745 cmd[i] = ' ';
3746 cmd[len] = '\0';
3747
3748 buffer_xml_printf (buffer,
3749 "<item>"
3750 "<column name=\"pid\">%d</column>"
3751 "<column name=\"user\">%s</column>"
3752 "<column name=\"command\">%s</column>",
3753 pid,
3754 username,
3755 cmd);
3756
3757 /* This only collects core numbers, and does not print threads. */
3758 list_threads (pid, NULL, &cores);
3759
3760 if (cores)
3761 {
3762 buffer_xml_printf (buffer,
3763 "<column name=\"cores\">%s</column>", cores);
3764 free (cores);
3765 }
3766
3767 buffer_xml_printf (buffer, "</item>");
3768 }
3769 fclose (f);
3770 }
3771 }
3772
3773 static int
3774 linux_qxfer_osdata (const char *annex,
3775 unsigned char *readbuf, unsigned const char *writebuf,
3776 CORE_ADDR offset, int len)
3777 {
3778 /* We make the process list snapshot when the object starts to be
3779 read. */
3780 static const char *buf;
3781 static long len_avail = -1;
3782 static struct buffer buffer;
3783 int processes = 0;
3784 int threads = 0;
3785
3786 DIR *dirp;
3787
3788 if (strcmp (annex, "processes") == 0)
3789 processes = 1;
3790 else if (strcmp (annex, "threads") == 0)
3791 threads = 1;
3792 else
3793 return 0;
3794
3795 if (!readbuf || writebuf)
3796 return 0;
3797
3798 if (offset == 0)
3799 {
3800 if (len_avail != -1 && len_avail != 0)
3801 buffer_free (&buffer);
3802 len_avail = 0;
3803 buf = NULL;
3804 buffer_init (&buffer);
3805 if (processes)
3806 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3807 else if (threads)
3808 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3809
3810 dirp = opendir ("/proc");
3811 if (dirp)
3812 {
3813 struct dirent *dp;
3814 while ((dp = readdir (dirp)) != NULL)
3815 {
3816 struct stat statbuf;
3817 char procentry[sizeof ("/proc/4294967295")];
3818
3819 if (!isdigit (dp->d_name[0])
3820 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3821 continue;
3822
3823 sprintf (procentry, "/proc/%s", dp->d_name);
3824 if (stat (procentry, &statbuf) == 0
3825 && S_ISDIR (statbuf.st_mode))
3826 {
3827 int pid = (int) strtoul (dp->d_name, NULL, 10);
3828
3829 if (processes)
3830 {
3831 struct passwd *entry = getpwuid (statbuf.st_uid);
3832 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3833 }
3834 else if (threads)
3835 {
3836 list_threads (pid, &buffer, NULL);
3837 }
3838 }
3839 }
3840
3841 closedir (dirp);
3842 }
3843 buffer_grow_str0 (&buffer, "</osdata>\n");
3844 buf = buffer_finish (&buffer);
3845 len_avail = strlen (buf);
3846 }
3847
3848 if (offset >= len_avail)
3849 {
3850 /* Done. Get rid of the data. */
3851 buffer_free (&buffer);
3852 buf = NULL;
3853 len_avail = 0;
3854 return 0;
3855 }
3856
3857 if (len > len_avail - offset)
3858 len = len_avail - offset;
3859 memcpy (readbuf, buf + offset, len);
3860
3861 return len;
3862 }
3863
3864 /* Convert a native/host siginfo object, into/from the siginfo in the
3865 layout of the inferiors' architecture. */
3866
3867 static void
3868 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3869 {
3870 int done = 0;
3871
3872 if (the_low_target.siginfo_fixup != NULL)
3873 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3874
3875 /* If there was no callback, or the callback didn't do anything,
3876 then just do a straight memcpy. */
3877 if (!done)
3878 {
3879 if (direction == 1)
3880 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3881 else
3882 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3883 }
3884 }
3885
3886 static int
3887 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3888 unsigned const char *writebuf, CORE_ADDR offset, int len)
3889 {
3890 int pid;
3891 struct siginfo siginfo;
3892 char inf_siginfo[sizeof (struct siginfo)];
3893
3894 if (current_inferior == NULL)
3895 return -1;
3896
3897 pid = lwpid_of (get_thread_lwp (current_inferior));
3898
3899 if (debug_threads)
3900 fprintf (stderr, "%s siginfo for lwp %d.\n",
3901 readbuf != NULL ? "Reading" : "Writing",
3902 pid);
3903
3904 if (offset > sizeof (siginfo))
3905 return -1;
3906
3907 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3908 return -1;
3909
3910 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3911 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3912 inferior with a 64-bit GDBSERVER should look the same as debugging it
3913 with a 32-bit GDBSERVER, we need to convert it. */
3914 siginfo_fixup (&siginfo, inf_siginfo, 0);
3915
3916 if (offset + len > sizeof (siginfo))
3917 len = sizeof (siginfo) - offset;
3918
3919 if (readbuf != NULL)
3920 memcpy (readbuf, inf_siginfo + offset, len);
3921 else
3922 {
3923 memcpy (inf_siginfo + offset, writebuf, len);
3924
3925 /* Convert back to ptrace layout before flushing it out. */
3926 siginfo_fixup (&siginfo, inf_siginfo, 1);
3927
3928 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3929 return -1;
3930 }
3931
3932 return len;
3933 }
3934
3935 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3936 so we notice when children change state; as the handler for the
3937 sigsuspend in my_waitpid. */
3938
3939 static void
3940 sigchld_handler (int signo)
3941 {
3942 int old_errno = errno;
3943
3944 if (debug_threads)
3945 /* fprintf is not async-signal-safe, so call write directly. */
3946 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3947
3948 if (target_is_async_p ())
3949 async_file_mark (); /* trigger a linux_wait */
3950
3951 errno = old_errno;
3952 }
3953
3954 static int
3955 linux_supports_non_stop (void)
3956 {
3957 return 1;
3958 }
3959
3960 static int
3961 linux_async (int enable)
3962 {
3963 int previous = (linux_event_pipe[0] != -1);
3964
3965 if (previous != enable)
3966 {
3967 sigset_t mask;
3968 sigemptyset (&mask);
3969 sigaddset (&mask, SIGCHLD);
3970
3971 sigprocmask (SIG_BLOCK, &mask, NULL);
3972
3973 if (enable)
3974 {
3975 if (pipe (linux_event_pipe) == -1)
3976 fatal ("creating event pipe failed.");
3977
3978 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
3979 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
3980
3981 /* Register the event loop handler. */
3982 add_file_handler (linux_event_pipe[0],
3983 handle_target_event, NULL);
3984
3985 /* Always trigger a linux_wait. */
3986 async_file_mark ();
3987 }
3988 else
3989 {
3990 delete_file_handler (linux_event_pipe[0]);
3991
3992 close (linux_event_pipe[0]);
3993 close (linux_event_pipe[1]);
3994 linux_event_pipe[0] = -1;
3995 linux_event_pipe[1] = -1;
3996 }
3997
3998 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3999 }
4000
4001 return previous;
4002 }
4003
4004 static int
4005 linux_start_non_stop (int nonstop)
4006 {
4007 /* Register or unregister from event-loop accordingly. */
4008 linux_async (nonstop);
4009 return 0;
4010 }
4011
4012 static int
4013 linux_supports_multi_process (void)
4014 {
4015 return 1;
4016 }
4017
4018
4019 /* Enumerate spufs IDs for process PID. */
4020 static int
4021 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4022 {
4023 int pos = 0;
4024 int written = 0;
4025 char path[128];
4026 DIR *dir;
4027 struct dirent *entry;
4028
4029 sprintf (path, "/proc/%ld/fd", pid);
4030 dir = opendir (path);
4031 if (!dir)
4032 return -1;
4033
4034 rewinddir (dir);
4035 while ((entry = readdir (dir)) != NULL)
4036 {
4037 struct stat st;
4038 struct statfs stfs;
4039 int fd;
4040
4041 fd = atoi (entry->d_name);
4042 if (!fd)
4043 continue;
4044
4045 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4046 if (stat (path, &st) != 0)
4047 continue;
4048 if (!S_ISDIR (st.st_mode))
4049 continue;
4050
4051 if (statfs (path, &stfs) != 0)
4052 continue;
4053 if (stfs.f_type != SPUFS_MAGIC)
4054 continue;
4055
4056 if (pos >= offset && pos + 4 <= offset + len)
4057 {
4058 *(unsigned int *)(buf + pos - offset) = fd;
4059 written += 4;
4060 }
4061 pos += 4;
4062 }
4063
4064 closedir (dir);
4065 return written;
4066 }
4067
4068 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4069 object type, using the /proc file system. */
4070 static int
4071 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4072 unsigned const char *writebuf,
4073 CORE_ADDR offset, int len)
4074 {
4075 long pid = lwpid_of (get_thread_lwp (current_inferior));
4076 char buf[128];
4077 int fd = 0;
4078 int ret = 0;
4079
4080 if (!writebuf && !readbuf)
4081 return -1;
4082
4083 if (!*annex)
4084 {
4085 if (!readbuf)
4086 return -1;
4087 else
4088 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4089 }
4090
4091 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4092 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4093 if (fd <= 0)
4094 return -1;
4095
4096 if (offset != 0
4097 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4098 {
4099 close (fd);
4100 return 0;
4101 }
4102
4103 if (writebuf)
4104 ret = write (fd, writebuf, (size_t) len);
4105 else
4106 ret = read (fd, readbuf, (size_t) len);
4107
4108 close (fd);
4109 return ret;
4110 }
4111
4112 static int
4113 linux_core_of_thread (ptid_t ptid)
4114 {
4115 char filename[sizeof ("/proc//task//stat")
4116 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4117 + 1];
4118 FILE *f;
4119 char *content = NULL;
4120 char *p;
4121 char *ts = 0;
4122 int content_read = 0;
4123 int i;
4124 int core;
4125
4126 sprintf (filename, "/proc/%d/task/%ld/stat",
4127 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4128 f = fopen (filename, "r");
4129 if (!f)
4130 return -1;
4131
4132 for (;;)
4133 {
4134 int n;
4135 content = realloc (content, content_read + 1024);
4136 n = fread (content + content_read, 1, 1024, f);
4137 content_read += n;
4138 if (n < 1024)
4139 {
4140 content[content_read] = '\0';
4141 break;
4142 }
4143 }
4144
4145 p = strchr (content, '(');
4146 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4147
4148 p = strtok_r (p, " ", &ts);
4149 for (i = 0; i != 36; ++i)
4150 p = strtok_r (NULL, " ", &ts);
4151
4152 if (sscanf (p, "%d", &core) == 0)
4153 core = -1;
4154
4155 free (content);
4156 fclose (f);
4157
4158 return core;
4159 }
4160
4161 static void
4162 linux_process_qsupported (const char *query)
4163 {
4164 if (the_low_target.process_qsupported != NULL)
4165 the_low_target.process_qsupported (query);
4166 }
4167
4168 static struct target_ops linux_target_ops = {
4169 linux_create_inferior,
4170 linux_attach,
4171 linux_kill,
4172 linux_detach,
4173 linux_join,
4174 linux_thread_alive,
4175 linux_resume,
4176 linux_wait,
4177 linux_fetch_registers,
4178 linux_store_registers,
4179 linux_read_memory,
4180 linux_write_memory,
4181 linux_look_up_symbols,
4182 linux_request_interrupt,
4183 linux_read_auxv,
4184 linux_insert_point,
4185 linux_remove_point,
4186 linux_stopped_by_watchpoint,
4187 linux_stopped_data_address,
4188 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4189 linux_read_offsets,
4190 #else
4191 NULL,
4192 #endif
4193 #ifdef USE_THREAD_DB
4194 thread_db_get_tls_address,
4195 #else
4196 NULL,
4197 #endif
4198 linux_qxfer_spu,
4199 hostio_last_error_from_errno,
4200 linux_qxfer_osdata,
4201 linux_xfer_siginfo,
4202 linux_supports_non_stop,
4203 linux_async,
4204 linux_start_non_stop,
4205 linux_supports_multi_process,
4206 #ifdef USE_THREAD_DB
4207 thread_db_handle_monitor_command,
4208 #else
4209 NULL,
4210 #endif
4211 linux_core_of_thread,
4212 linux_process_qsupported
4213 };
4214
4215 static void
4216 linux_init_signals ()
4217 {
4218 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4219 to find what the cancel signal actually is. */
4220 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4221 signal (__SIGRTMIN+1, SIG_IGN);
4222 #endif
4223 }
4224
4225 void
4226 initialize_low (void)
4227 {
4228 struct sigaction sigchld_action;
4229 memset (&sigchld_action, 0, sizeof (sigchld_action));
4230 set_target_ops (&linux_target_ops);
4231 set_breakpoint_data (the_low_target.breakpoint,
4232 the_low_target.breakpoint_len);
4233 linux_init_signals ();
4234 linux_test_for_tracefork ();
4235 #ifdef HAVE_LINUX_REGSETS
4236 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4237 ;
4238 disabled_regsets = xmalloc (num_regsets);
4239 #endif
4240
4241 sigchld_action.sa_handler = sigchld_handler;
4242 sigemptyset (&sigchld_action.sa_mask);
4243 sigchld_action.sa_flags = SA_RESTART;
4244 sigaction (SIGCHLD, &sigchld_action, NULL);
4245 }