*** empty log message ***
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 #ifdef __UCLIBC__
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103 #define HAS_NOMMU
104 #endif
105 #endif
106
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
109
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
115
116 struct inferior_list all_lwps;
117
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122 struct inferior_list stopped_pids;
123
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads;
126
127 /* FIXME make into a target method? */
128 int using_threads = 1;
129
130 /* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
137 static int new_inferior;
138
139 static void linux_resume_one_lwp (struct lwp_info *lwp,
140 int step, int signal, siginfo_t *info);
141 static void linux_resume (struct thread_resume *resume_info, size_t n);
142 static void stop_all_lwps (int suspend, struct lwp_info *except);
143 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
144 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
145 static void *add_lwp (ptid_t ptid);
146 static int linux_stopped_by_watchpoint (void);
147 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
148 static int linux_core_of_thread (ptid_t ptid);
149 static void proceed_all_lwps (void);
150 static int finish_step_over (struct lwp_info *lwp);
151 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152 static int kill_lwp (unsigned long lwpid, int signo);
153 static void linux_enable_event_reporting (int pid);
154
155 /* True if the low target can hardware single-step. Such targets
156 don't need a BREAKPOINT_REINSERT_ADDR callback. */
157
158 static int
159 can_hardware_single_step (void)
160 {
161 return (the_low_target.breakpoint_reinsert_addr == NULL);
162 }
163
164 /* True if the low target supports memory breakpoints. If so, we'll
165 have a GET_PC implementation. */
166
167 static int
168 supports_breakpoints (void)
169 {
170 return (the_low_target.get_pc != NULL);
171 }
172
173 struct pending_signals
174 {
175 int signal;
176 siginfo_t info;
177 struct pending_signals *prev;
178 };
179
180 #define PTRACE_ARG3_TYPE void *
181 #define PTRACE_ARG4_TYPE void *
182 #define PTRACE_XFER_TYPE long
183
184 #ifdef HAVE_LINUX_REGSETS
185 static char *disabled_regsets;
186 static int num_regsets;
187 #endif
188
189 /* The read/write ends of the pipe registered as waitable file in the
190 event loop. */
191 static int linux_event_pipe[2] = { -1, -1 };
192
193 /* True if we're currently in async mode. */
194 #define target_is_async_p() (linux_event_pipe[0] != -1)
195
196 static void send_sigstop (struct lwp_info *lwp);
197 static void wait_for_sigstop (struct inferior_list_entry *entry);
198
199 /* Accepts an integer PID; Returns a string representing a file that
200 can be opened to get info for the child process.
201 Space for the result is malloc'd, caller must free. */
202
203 char *
204 linux_child_pid_to_exec_file (int pid)
205 {
206 char *name1, *name2;
207
208 name1 = xmalloc (MAXPATHLEN);
209 name2 = xmalloc (MAXPATHLEN);
210 memset (name2, 0, MAXPATHLEN);
211
212 sprintf (name1, "/proc/%d/exe", pid);
213 if (readlink (name1, name2, MAXPATHLEN) > 0)
214 {
215 free (name1);
216 return name2;
217 }
218 else
219 {
220 free (name2);
221 return name1;
222 }
223 }
224
225 /* Return non-zero if HEADER is a 64-bit ELF file. */
226
227 static int
228 elf_64_header_p (const Elf64_Ehdr *header)
229 {
230 return (header->e_ident[EI_MAG0] == ELFMAG0
231 && header->e_ident[EI_MAG1] == ELFMAG1
232 && header->e_ident[EI_MAG2] == ELFMAG2
233 && header->e_ident[EI_MAG3] == ELFMAG3
234 && header->e_ident[EI_CLASS] == ELFCLASS64);
235 }
236
237 /* Return non-zero if FILE is a 64-bit ELF file,
238 zero if the file is not a 64-bit ELF file,
239 and -1 if the file is not accessible or doesn't exist. */
240
241 int
242 elf_64_file_p (const char *file)
243 {
244 Elf64_Ehdr header;
245 int fd;
246
247 fd = open (file, O_RDONLY);
248 if (fd < 0)
249 return -1;
250
251 if (read (fd, &header, sizeof (header)) != sizeof (header))
252 {
253 close (fd);
254 return 0;
255 }
256 close (fd);
257
258 return elf_64_header_p (&header);
259 }
260
261 static void
262 delete_lwp (struct lwp_info *lwp)
263 {
264 remove_thread (get_lwp_thread (lwp));
265 remove_inferior (&all_lwps, &lwp->head);
266 free (lwp->arch_private);
267 free (lwp);
268 }
269
270 /* Add a process to the common process list, and set its private
271 data. */
272
273 static struct process_info *
274 linux_add_process (int pid, int attached)
275 {
276 struct process_info *proc;
277
278 /* Is this the first process? If so, then set the arch. */
279 if (all_processes.head == NULL)
280 new_inferior = 1;
281
282 proc = add_process (pid, attached);
283 proc->private = xcalloc (1, sizeof (*proc->private));
284
285 if (the_low_target.new_process != NULL)
286 proc->private->arch_private = the_low_target.new_process ();
287
288 return proc;
289 }
290
291 /* Wrapper function for waitpid which handles EINTR, and emulates
292 __WALL for systems where that is not available. */
293
294 static int
295 my_waitpid (int pid, int *status, int flags)
296 {
297 int ret, out_errno;
298
299 if (debug_threads)
300 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
301
302 if (flags & __WALL)
303 {
304 sigset_t block_mask, org_mask, wake_mask;
305 int wnohang;
306
307 wnohang = (flags & WNOHANG) != 0;
308 flags &= ~(__WALL | __WCLONE);
309 flags |= WNOHANG;
310
311 /* Block all signals while here. This avoids knowing about
312 LinuxThread's signals. */
313 sigfillset (&block_mask);
314 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
315
316 /* ... except during the sigsuspend below. */
317 sigemptyset (&wake_mask);
318
319 while (1)
320 {
321 /* Since all signals are blocked, there's no need to check
322 for EINTR here. */
323 ret = waitpid (pid, status, flags);
324 out_errno = errno;
325
326 if (ret == -1 && out_errno != ECHILD)
327 break;
328 else if (ret > 0)
329 break;
330
331 if (flags & __WCLONE)
332 {
333 /* We've tried both flavors now. If WNOHANG is set,
334 there's nothing else to do, just bail out. */
335 if (wnohang)
336 break;
337
338 if (debug_threads)
339 fprintf (stderr, "blocking\n");
340
341 /* Block waiting for signals. */
342 sigsuspend (&wake_mask);
343 }
344
345 flags ^= __WCLONE;
346 }
347
348 sigprocmask (SIG_SETMASK, &org_mask, NULL);
349 }
350 else
351 {
352 do
353 ret = waitpid (pid, status, flags);
354 while (ret == -1 && errno == EINTR);
355 out_errno = errno;
356 }
357
358 if (debug_threads)
359 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
360 pid, flags, status ? *status : -1, ret);
361
362 errno = out_errno;
363 return ret;
364 }
365
366 /* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
369
370 static void
371 handle_extended_wait (struct lwp_info *event_child, int wstat)
372 {
373 int event = wstat >> 16;
374 struct lwp_info *new_lwp;
375
376 if (event == PTRACE_EVENT_CLONE)
377 {
378 ptid_t ptid;
379 unsigned long new_pid;
380 int ret, status = W_STOPCODE (SIGSTOP);
381
382 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
383
384 /* If we haven't already seen the new PID stop, wait for it now. */
385 if (! pull_pid_from_list (&stopped_pids, new_pid))
386 {
387 /* The new child has a pending SIGSTOP. We can't affect it until it
388 hits the SIGSTOP, but we're already attached. */
389
390 ret = my_waitpid (new_pid, &status, __WALL);
391
392 if (ret == -1)
393 perror_with_name ("waiting for new child");
394 else if (ret != new_pid)
395 warning ("wait returned unexpected PID %d", ret);
396 else if (!WIFSTOPPED (status))
397 warning ("wait returned unexpected status 0x%x", status);
398 }
399
400 linux_enable_event_reporting (new_pid);
401
402 ptid = ptid_build (pid_of (event_child), new_pid, 0);
403 new_lwp = (struct lwp_info *) add_lwp (ptid);
404 add_thread (ptid, new_lwp);
405
406 /* Either we're going to immediately resume the new thread
407 or leave it stopped. linux_resume_one_lwp is a nop if it
408 thinks the thread is currently running, so set this first
409 before calling linux_resume_one_lwp. */
410 new_lwp->stopped = 1;
411
412 /* Normally we will get the pending SIGSTOP. But in some cases
413 we might get another signal delivered to the group first.
414 If we do get another signal, be sure not to lose it. */
415 if (WSTOPSIG (status) == SIGSTOP)
416 {
417 if (stopping_threads)
418 new_lwp->stop_pc = get_stop_pc (new_lwp);
419 else
420 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
421 }
422 else
423 {
424 new_lwp->stop_expected = 1;
425
426 if (stopping_threads)
427 {
428 new_lwp->stop_pc = get_stop_pc (new_lwp);
429 new_lwp->status_pending_p = 1;
430 new_lwp->status_pending = status;
431 }
432 else
433 /* Pass the signal on. This is what GDB does - except
434 shouldn't we really report it instead? */
435 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
436 }
437
438 /* Always resume the current thread. If we are stopping
439 threads, it will have a pending SIGSTOP; we may as well
440 collect it now. */
441 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
442 }
443 }
444
445 /* Return the PC as read from the regcache of LWP, without any
446 adjustment. */
447
448 static CORE_ADDR
449 get_pc (struct lwp_info *lwp)
450 {
451 struct thread_info *saved_inferior;
452 struct regcache *regcache;
453 CORE_ADDR pc;
454
455 if (the_low_target.get_pc == NULL)
456 return 0;
457
458 saved_inferior = current_inferior;
459 current_inferior = get_lwp_thread (lwp);
460
461 regcache = get_thread_regcache (current_inferior, 1);
462 pc = (*the_low_target.get_pc) (regcache);
463
464 if (debug_threads)
465 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
466
467 current_inferior = saved_inferior;
468 return pc;
469 }
470
471 /* This function should only be called if LWP got a SIGTRAP.
472 The SIGTRAP could mean several things.
473
474 On i386, where decr_pc_after_break is non-zero:
475 If we were single-stepping this process using PTRACE_SINGLESTEP,
476 we will get only the one SIGTRAP (even if the instruction we
477 stepped over was a breakpoint). The value of $eip will be the
478 next instruction.
479 If we continue the process using PTRACE_CONT, we will get a
480 SIGTRAP when we hit a breakpoint. The value of $eip will be
481 the instruction after the breakpoint (i.e. needs to be
482 decremented). If we report the SIGTRAP to GDB, we must also
483 report the undecremented PC. If we cancel the SIGTRAP, we
484 must resume at the decremented PC.
485
486 (Presumably, not yet tested) On a non-decr_pc_after_break machine
487 with hardware or kernel single-step:
488 If we single-step over a breakpoint instruction, our PC will
489 point at the following instruction. If we continue and hit a
490 breakpoint instruction, our PC will point at the breakpoint
491 instruction. */
492
493 static CORE_ADDR
494 get_stop_pc (struct lwp_info *lwp)
495 {
496 CORE_ADDR stop_pc;
497
498 if (the_low_target.get_pc == NULL)
499 return 0;
500
501 stop_pc = get_pc (lwp);
502
503 if (WSTOPSIG (lwp->last_status) == SIGTRAP
504 && !lwp->stepping
505 && !lwp->stopped_by_watchpoint
506 && lwp->last_status >> 16 == 0)
507 stop_pc -= the_low_target.decr_pc_after_break;
508
509 if (debug_threads)
510 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
511
512 return stop_pc;
513 }
514
515 static void *
516 add_lwp (ptid_t ptid)
517 {
518 struct lwp_info *lwp;
519
520 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
521 memset (lwp, 0, sizeof (*lwp));
522
523 lwp->head.id = ptid;
524
525 if (the_low_target.new_thread != NULL)
526 lwp->arch_private = the_low_target.new_thread ();
527
528 add_inferior_to_list (&all_lwps, &lwp->head);
529
530 return lwp;
531 }
532
533 /* Start an inferior process and returns its pid.
534 ALLARGS is a vector of program-name and args. */
535
536 static int
537 linux_create_inferior (char *program, char **allargs)
538 {
539 struct lwp_info *new_lwp;
540 int pid;
541 ptid_t ptid;
542
543 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
544 pid = vfork ();
545 #else
546 pid = fork ();
547 #endif
548 if (pid < 0)
549 perror_with_name ("fork");
550
551 if (pid == 0)
552 {
553 ptrace (PTRACE_TRACEME, 0, 0, 0);
554
555 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
556 signal (__SIGRTMIN + 1, SIG_DFL);
557 #endif
558
559 setpgid (0, 0);
560
561 execv (program, allargs);
562 if (errno == ENOENT)
563 execvp (program, allargs);
564
565 fprintf (stderr, "Cannot exec %s: %s.\n", program,
566 strerror (errno));
567 fflush (stderr);
568 _exit (0177);
569 }
570
571 linux_add_process (pid, 0);
572
573 ptid = ptid_build (pid, pid, 0);
574 new_lwp = add_lwp (ptid);
575 add_thread (ptid, new_lwp);
576 new_lwp->must_set_ptrace_flags = 1;
577
578 return pid;
579 }
580
581 /* Attach to an inferior process. */
582
583 static void
584 linux_attach_lwp_1 (unsigned long lwpid, int initial)
585 {
586 ptid_t ptid;
587 struct lwp_info *new_lwp;
588
589 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
590 {
591 if (!initial)
592 {
593 /* If we fail to attach to an LWP, just warn. */
594 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
595 strerror (errno), errno);
596 fflush (stderr);
597 return;
598 }
599 else
600 /* If we fail to attach to a process, report an error. */
601 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
602 strerror (errno), errno);
603 }
604
605 if (initial)
606 /* NOTE/FIXME: This lwp might have not been the tgid. */
607 ptid = ptid_build (lwpid, lwpid, 0);
608 else
609 {
610 /* Note that extracting the pid from the current inferior is
611 safe, since we're always called in the context of the same
612 process as this new thread. */
613 int pid = pid_of (get_thread_lwp (current_inferior));
614 ptid = ptid_build (pid, lwpid, 0);
615 }
616
617 new_lwp = (struct lwp_info *) add_lwp (ptid);
618 add_thread (ptid, new_lwp);
619
620 /* We need to wait for SIGSTOP before being able to make the next
621 ptrace call on this LWP. */
622 new_lwp->must_set_ptrace_flags = 1;
623
624 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
625 brings it to a halt.
626
627 There are several cases to consider here:
628
629 1) gdbserver has already attached to the process and is being notified
630 of a new thread that is being created.
631 In this case we should ignore that SIGSTOP and resume the
632 process. This is handled below by setting stop_expected = 1,
633 and the fact that add_thread sets last_resume_kind ==
634 resume_continue.
635
636 2) This is the first thread (the process thread), and we're attaching
637 to it via attach_inferior.
638 In this case we want the process thread to stop.
639 This is handled by having linux_attach set last_resume_kind ==
640 resume_stop after we return.
641 ??? If the process already has several threads we leave the other
642 threads running.
643
644 3) GDB is connecting to gdbserver and is requesting an enumeration of all
645 existing threads.
646 In this case we want the thread to stop.
647 FIXME: This case is currently not properly handled.
648 We should wait for the SIGSTOP but don't. Things work apparently
649 because enough time passes between when we ptrace (ATTACH) and when
650 gdb makes the next ptrace call on the thread.
651
652 On the other hand, if we are currently trying to stop all threads, we
653 should treat the new thread as if we had sent it a SIGSTOP. This works
654 because we are guaranteed that the add_lwp call above added us to the
655 end of the list, and so the new thread has not yet reached
656 wait_for_sigstop (but will). */
657 new_lwp->stop_expected = 1;
658 }
659
660 void
661 linux_attach_lwp (unsigned long lwpid)
662 {
663 linux_attach_lwp_1 (lwpid, 0);
664 }
665
666 int
667 linux_attach (unsigned long pid)
668 {
669 linux_attach_lwp_1 (pid, 1);
670 linux_add_process (pid, 1);
671
672 if (!non_stop)
673 {
674 struct thread_info *thread;
675
676 /* Don't ignore the initial SIGSTOP if we just attached to this
677 process. It will be collected by wait shortly. */
678 thread = find_thread_ptid (ptid_build (pid, pid, 0));
679 thread->last_resume_kind = resume_stop;
680 }
681
682 return 0;
683 }
684
685 struct counter
686 {
687 int pid;
688 int count;
689 };
690
691 static int
692 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
693 {
694 struct counter *counter = args;
695
696 if (ptid_get_pid (entry->id) == counter->pid)
697 {
698 if (++counter->count > 1)
699 return 1;
700 }
701
702 return 0;
703 }
704
705 static int
706 last_thread_of_process_p (struct thread_info *thread)
707 {
708 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
709 int pid = ptid_get_pid (ptid);
710 struct counter counter = { pid , 0 };
711
712 return (find_inferior (&all_threads,
713 second_thread_of_pid_p, &counter) == NULL);
714 }
715
716 /* Kill the inferior lwp. */
717
718 static int
719 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
720 {
721 struct thread_info *thread = (struct thread_info *) entry;
722 struct lwp_info *lwp = get_thread_lwp (thread);
723 int wstat;
724 int pid = * (int *) args;
725
726 if (ptid_get_pid (entry->id) != pid)
727 return 0;
728
729 /* We avoid killing the first thread here, because of a Linux kernel (at
730 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
731 the children get a chance to be reaped, it will remain a zombie
732 forever. */
733
734 if (lwpid_of (lwp) == pid)
735 {
736 if (debug_threads)
737 fprintf (stderr, "lkop: is last of process %s\n",
738 target_pid_to_str (entry->id));
739 return 0;
740 }
741
742 do
743 {
744 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
745
746 /* Make sure it died. The loop is most likely unnecessary. */
747 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
748 } while (pid > 0 && WIFSTOPPED (wstat));
749
750 return 0;
751 }
752
753 static int
754 linux_kill (int pid)
755 {
756 struct process_info *process;
757 struct lwp_info *lwp;
758 struct thread_info *thread;
759 int wstat;
760 int lwpid;
761
762 process = find_process_pid (pid);
763 if (process == NULL)
764 return -1;
765
766 /* If we're killing a running inferior, make sure it is stopped
767 first, as PTRACE_KILL will not work otherwise. */
768 stop_all_lwps (0, NULL);
769
770 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
771
772 /* See the comment in linux_kill_one_lwp. We did not kill the first
773 thread in the list, so do so now. */
774 lwp = find_lwp_pid (pid_to_ptid (pid));
775 thread = get_lwp_thread (lwp);
776
777 if (debug_threads)
778 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp), pid);
780
781 do
782 {
783 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
784
785 /* Make sure it died. The loop is most likely unnecessary. */
786 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
787 } while (lwpid > 0 && WIFSTOPPED (wstat));
788
789 the_target->mourn (process);
790
791 /* Since we presently can only stop all lwps of all processes, we
792 need to unstop lwps of other processes. */
793 unstop_all_lwps (0, NULL);
794 return 0;
795 }
796
797 static int
798 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
799 {
800 struct thread_info *thread = (struct thread_info *) entry;
801 struct lwp_info *lwp = get_thread_lwp (thread);
802 int pid = * (int *) args;
803
804 if (ptid_get_pid (entry->id) != pid)
805 return 0;
806
807 /* If this process is stopped but is expecting a SIGSTOP, then make
808 sure we take care of that now. This isn't absolutely guaranteed
809 to collect the SIGSTOP, but is fairly likely to. */
810 if (lwp->stop_expected)
811 {
812 int wstat;
813 /* Clear stop_expected, so that the SIGSTOP will be reported. */
814 lwp->stop_expected = 0;
815 linux_resume_one_lwp (lwp, 0, 0, NULL);
816 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
817 }
818
819 /* Flush any pending changes to the process's registers. */
820 regcache_invalidate_one ((struct inferior_list_entry *)
821 get_lwp_thread (lwp));
822
823 /* Finally, let it resume. */
824 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
825
826 delete_lwp (lwp);
827 return 0;
828 }
829
830 static int
831 linux_detach (int pid)
832 {
833 struct process_info *process;
834
835 process = find_process_pid (pid);
836 if (process == NULL)
837 return -1;
838
839 /* Stop all threads before detaching. First, ptrace requires that
840 the thread is stopped to sucessfully detach. Second, thread_db
841 may need to uninstall thread event breakpoints from memory, which
842 only works with a stopped process anyway. */
843 stop_all_lwps (0, NULL);
844
845 #ifdef USE_THREAD_DB
846 thread_db_detach (process);
847 #endif
848
849 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
850
851 the_target->mourn (process);
852
853 /* Since we presently can only stop all lwps of all processes, we
854 need to unstop lwps of other processes. */
855 unstop_all_lwps (0, NULL);
856 return 0;
857 }
858
859 /* Remove all LWPs that belong to process PROC from the lwp list. */
860
861 static int
862 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
863 {
864 struct lwp_info *lwp = (struct lwp_info *) entry;
865 struct process_info *process = proc;
866
867 if (pid_of (lwp) == pid_of (process))
868 delete_lwp (lwp);
869
870 return 0;
871 }
872
873 static void
874 linux_mourn (struct process_info *process)
875 {
876 struct process_info_private *priv;
877
878 #ifdef USE_THREAD_DB
879 thread_db_mourn (process);
880 #endif
881
882 find_inferior (&all_lwps, delete_lwp_callback, process);
883
884 /* Freeing all private data. */
885 priv = process->private;
886 free (priv->arch_private);
887 free (priv);
888 process->private = NULL;
889
890 remove_process (process);
891 }
892
893 static void
894 linux_join (int pid)
895 {
896 int status, ret;
897 struct process_info *process;
898
899 process = find_process_pid (pid);
900 if (process == NULL)
901 return;
902
903 do {
904 ret = my_waitpid (pid, &status, 0);
905 if (WIFEXITED (status) || WIFSIGNALED (status))
906 break;
907 } while (ret != -1 || errno != ECHILD);
908 }
909
910 /* Return nonzero if the given thread is still alive. */
911 static int
912 linux_thread_alive (ptid_t ptid)
913 {
914 struct lwp_info *lwp = find_lwp_pid (ptid);
915
916 /* We assume we always know if a thread exits. If a whole process
917 exited but we still haven't been able to report it to GDB, we'll
918 hold on to the last lwp of the dead process. */
919 if (lwp != NULL)
920 return !lwp->dead;
921 else
922 return 0;
923 }
924
925 /* Return 1 if this lwp has an interesting status pending. */
926 static int
927 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
928 {
929 struct lwp_info *lwp = (struct lwp_info *) entry;
930 ptid_t ptid = * (ptid_t *) arg;
931 struct thread_info *thread;
932
933 /* Check if we're only interested in events from a specific process
934 or its lwps. */
935 if (!ptid_equal (minus_one_ptid, ptid)
936 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
937 return 0;
938
939 thread = get_lwp_thread (lwp);
940
941 /* If we got a `vCont;t', but we haven't reported a stop yet, do
942 report any status pending the LWP may have. */
943 if (thread->last_resume_kind == resume_stop
944 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
945 return 0;
946
947 return lwp->status_pending_p;
948 }
949
950 static int
951 same_lwp (struct inferior_list_entry *entry, void *data)
952 {
953 ptid_t ptid = *(ptid_t *) data;
954 int lwp;
955
956 if (ptid_get_lwp (ptid) != 0)
957 lwp = ptid_get_lwp (ptid);
958 else
959 lwp = ptid_get_pid (ptid);
960
961 if (ptid_get_lwp (entry->id) == lwp)
962 return 1;
963
964 return 0;
965 }
966
967 struct lwp_info *
968 find_lwp_pid (ptid_t ptid)
969 {
970 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
971 }
972
973 static struct lwp_info *
974 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
975 {
976 int ret;
977 int to_wait_for = -1;
978 struct lwp_info *child = NULL;
979
980 if (debug_threads)
981 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
982
983 if (ptid_equal (ptid, minus_one_ptid))
984 to_wait_for = -1; /* any child */
985 else
986 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
987
988 options |= __WALL;
989
990 retry:
991
992 ret = my_waitpid (to_wait_for, wstatp, options);
993 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
994 return NULL;
995 else if (ret == -1)
996 perror_with_name ("waitpid");
997
998 if (debug_threads
999 && (!WIFSTOPPED (*wstatp)
1000 || (WSTOPSIG (*wstatp) != 32
1001 && WSTOPSIG (*wstatp) != 33)))
1002 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1003
1004 child = find_lwp_pid (pid_to_ptid (ret));
1005
1006 /* If we didn't find a process, one of two things presumably happened:
1007 - A process we started and then detached from has exited. Ignore it.
1008 - A process we are controlling has forked and the new child's stop
1009 was reported to us by the kernel. Save its PID. */
1010 if (child == NULL && WIFSTOPPED (*wstatp))
1011 {
1012 add_pid_to_list (&stopped_pids, ret);
1013 goto retry;
1014 }
1015 else if (child == NULL)
1016 goto retry;
1017
1018 child->stopped = 1;
1019
1020 child->last_status = *wstatp;
1021
1022 /* Architecture-specific setup after inferior is running.
1023 This needs to happen after we have attached to the inferior
1024 and it is stopped for the first time, but before we access
1025 any inferior registers. */
1026 if (new_inferior)
1027 {
1028 the_low_target.arch_setup ();
1029 #ifdef HAVE_LINUX_REGSETS
1030 memset (disabled_regsets, 0, num_regsets);
1031 #endif
1032 new_inferior = 0;
1033 }
1034
1035 /* Fetch the possibly triggered data watchpoint info and store it in
1036 CHILD.
1037
1038 On some archs, like x86, that use debug registers to set
1039 watchpoints, it's possible that the way to know which watched
1040 address trapped, is to check the register that is used to select
1041 which address to watch. Problem is, between setting the
1042 watchpoint and reading back which data address trapped, the user
1043 may change the set of watchpoints, and, as a consequence, GDB
1044 changes the debug registers in the inferior. To avoid reading
1045 back a stale stopped-data-address when that happens, we cache in
1046 LP the fact that a watchpoint trapped, and the corresponding data
1047 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1048 changes the debug registers meanwhile, we have the cached data we
1049 can rely on. */
1050
1051 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1052 {
1053 if (the_low_target.stopped_by_watchpoint == NULL)
1054 {
1055 child->stopped_by_watchpoint = 0;
1056 }
1057 else
1058 {
1059 struct thread_info *saved_inferior;
1060
1061 saved_inferior = current_inferior;
1062 current_inferior = get_lwp_thread (child);
1063
1064 child->stopped_by_watchpoint
1065 = the_low_target.stopped_by_watchpoint ();
1066
1067 if (child->stopped_by_watchpoint)
1068 {
1069 if (the_low_target.stopped_data_address != NULL)
1070 child->stopped_data_address
1071 = the_low_target.stopped_data_address ();
1072 else
1073 child->stopped_data_address = 0;
1074 }
1075
1076 current_inferior = saved_inferior;
1077 }
1078 }
1079
1080 /* Store the STOP_PC, with adjustment applied. This depends on the
1081 architecture being defined already (so that CHILD has a valid
1082 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1083 not). */
1084 if (WIFSTOPPED (*wstatp))
1085 child->stop_pc = get_stop_pc (child);
1086
1087 if (debug_threads
1088 && WIFSTOPPED (*wstatp)
1089 && the_low_target.get_pc != NULL)
1090 {
1091 struct thread_info *saved_inferior = current_inferior;
1092 struct regcache *regcache;
1093 CORE_ADDR pc;
1094
1095 current_inferior = get_lwp_thread (child);
1096 regcache = get_thread_regcache (current_inferior, 1);
1097 pc = (*the_low_target.get_pc) (regcache);
1098 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1099 current_inferior = saved_inferior;
1100 }
1101
1102 return child;
1103 }
1104
1105 /* This function should only be called if the LWP got a SIGTRAP.
1106
1107 Handle any tracepoint steps or hits. Return true if a tracepoint
1108 event was handled, 0 otherwise. */
1109
1110 static int
1111 handle_tracepoints (struct lwp_info *lwp)
1112 {
1113 struct thread_info *tinfo = get_lwp_thread (lwp);
1114 int tpoint_related_event = 0;
1115
1116 /* If this tracepoint hit causes a tracing stop, we'll immediately
1117 uninsert tracepoints. To do this, we temporarily pause all
1118 threads, unpatch away, and then unpause threads. We need to make
1119 sure the unpausing doesn't resume LWP too. */
1120 lwp->suspended++;
1121
1122 /* And we need to be sure that any all-threads-stopping doesn't try
1123 to move threads out of the jump pads, as it could deadlock the
1124 inferior (LWP could be in the jump pad, maybe even holding the
1125 lock.) */
1126
1127 /* Do any necessary step collect actions. */
1128 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1129
1130 /* See if we just hit a tracepoint and do its main collect
1131 actions. */
1132 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1133
1134 lwp->suspended--;
1135
1136 gdb_assert (lwp->suspended == 0);
1137
1138 if (tpoint_related_event)
1139 {
1140 if (debug_threads)
1141 fprintf (stderr, "got a tracepoint event\n");
1142 return 1;
1143 }
1144
1145 return 0;
1146 }
1147
1148 /* Arrange for a breakpoint to be hit again later. We don't keep the
1149 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1150 will handle the current event, eventually we will resume this LWP,
1151 and this breakpoint will trap again. */
1152
1153 static int
1154 cancel_breakpoint (struct lwp_info *lwp)
1155 {
1156 struct thread_info *saved_inferior;
1157
1158 /* There's nothing to do if we don't support breakpoints. */
1159 if (!supports_breakpoints ())
1160 return 0;
1161
1162 /* breakpoint_at reads from current inferior. */
1163 saved_inferior = current_inferior;
1164 current_inferior = get_lwp_thread (lwp);
1165
1166 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1167 {
1168 if (debug_threads)
1169 fprintf (stderr,
1170 "CB: Push back breakpoint for %s\n",
1171 target_pid_to_str (ptid_of (lwp)));
1172
1173 /* Back up the PC if necessary. */
1174 if (the_low_target.decr_pc_after_break)
1175 {
1176 struct regcache *regcache
1177 = get_thread_regcache (current_inferior, 1);
1178 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1179 }
1180
1181 current_inferior = saved_inferior;
1182 return 1;
1183 }
1184 else
1185 {
1186 if (debug_threads)
1187 fprintf (stderr,
1188 "CB: No breakpoint found at %s for [%s]\n",
1189 paddress (lwp->stop_pc),
1190 target_pid_to_str (ptid_of (lwp)));
1191 }
1192
1193 current_inferior = saved_inferior;
1194 return 0;
1195 }
1196
1197 /* When the event-loop is doing a step-over, this points at the thread
1198 being stepped. */
1199 ptid_t step_over_bkpt;
1200
1201 /* Wait for an event from child PID. If PID is -1, wait for any
1202 child. Store the stop status through the status pointer WSTAT.
1203 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1204 event was found and OPTIONS contains WNOHANG. Return the PID of
1205 the stopped child otherwise. */
1206
1207 static int
1208 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1209 {
1210 struct lwp_info *event_child, *requested_child;
1211
1212 event_child = NULL;
1213 requested_child = NULL;
1214
1215 /* Check for a lwp with a pending status. */
1216
1217 if (ptid_equal (ptid, minus_one_ptid)
1218 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1219 {
1220 event_child = (struct lwp_info *)
1221 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1222 if (debug_threads && event_child)
1223 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1224 }
1225 else
1226 {
1227 requested_child = find_lwp_pid (ptid);
1228
1229 if (requested_child->status_pending_p)
1230 event_child = requested_child;
1231 }
1232
1233 if (event_child != NULL)
1234 {
1235 if (debug_threads)
1236 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1237 lwpid_of (event_child), event_child->status_pending);
1238 *wstat = event_child->status_pending;
1239 event_child->status_pending_p = 0;
1240 event_child->status_pending = 0;
1241 current_inferior = get_lwp_thread (event_child);
1242 return lwpid_of (event_child);
1243 }
1244
1245 /* We only enter this loop if no process has a pending wait status. Thus
1246 any action taken in response to a wait status inside this loop is
1247 responding as soon as we detect the status, not after any pending
1248 events. */
1249 while (1)
1250 {
1251 event_child = linux_wait_for_lwp (ptid, wstat, options);
1252
1253 if ((options & WNOHANG) && event_child == NULL)
1254 {
1255 if (debug_threads)
1256 fprintf (stderr, "WNOHANG set, no event found\n");
1257 return 0;
1258 }
1259
1260 if (event_child == NULL)
1261 error ("event from unknown child");
1262
1263 current_inferior = get_lwp_thread (event_child);
1264
1265 /* Check for thread exit. */
1266 if (! WIFSTOPPED (*wstat))
1267 {
1268 if (debug_threads)
1269 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1270
1271 /* If the last thread is exiting, just return. */
1272 if (last_thread_of_process_p (current_inferior))
1273 {
1274 if (debug_threads)
1275 fprintf (stderr, "LWP %ld is last lwp of process\n",
1276 lwpid_of (event_child));
1277 return lwpid_of (event_child);
1278 }
1279
1280 if (!non_stop)
1281 {
1282 current_inferior = (struct thread_info *) all_threads.head;
1283 if (debug_threads)
1284 fprintf (stderr, "Current inferior is now %ld\n",
1285 lwpid_of (get_thread_lwp (current_inferior)));
1286 }
1287 else
1288 {
1289 current_inferior = NULL;
1290 if (debug_threads)
1291 fprintf (stderr, "Current inferior is now <NULL>\n");
1292 }
1293
1294 /* If we were waiting for this particular child to do something...
1295 well, it did something. */
1296 if (requested_child != NULL)
1297 {
1298 int lwpid = lwpid_of (event_child);
1299
1300 /* Cancel the step-over operation --- the thread that
1301 started it is gone. */
1302 if (finish_step_over (event_child))
1303 unstop_all_lwps (1, event_child);
1304 delete_lwp (event_child);
1305 return lwpid;
1306 }
1307
1308 delete_lwp (event_child);
1309
1310 /* Wait for a more interesting event. */
1311 continue;
1312 }
1313
1314 if (event_child->must_set_ptrace_flags)
1315 {
1316 linux_enable_event_reporting (lwpid_of (event_child));
1317 event_child->must_set_ptrace_flags = 0;
1318 }
1319
1320 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1321 && *wstat >> 16 != 0)
1322 {
1323 handle_extended_wait (event_child, *wstat);
1324 continue;
1325 }
1326
1327 if (WIFSTOPPED (*wstat)
1328 && WSTOPSIG (*wstat) == SIGSTOP
1329 && event_child->stop_expected)
1330 {
1331 int should_stop;
1332
1333 if (debug_threads)
1334 fprintf (stderr, "Expected stop.\n");
1335 event_child->stop_expected = 0;
1336
1337 should_stop = (current_inferior->last_resume_kind == resume_stop
1338 || stopping_threads);
1339
1340 if (!should_stop)
1341 {
1342 linux_resume_one_lwp (event_child,
1343 event_child->stepping, 0, NULL);
1344 continue;
1345 }
1346 }
1347
1348 return lwpid_of (event_child);
1349 }
1350
1351 /* NOTREACHED */
1352 return 0;
1353 }
1354
1355 static int
1356 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1357 {
1358 ptid_t wait_ptid;
1359
1360 if (ptid_is_pid (ptid))
1361 {
1362 /* A request to wait for a specific tgid. This is not possible
1363 with waitpid, so instead, we wait for any child, and leave
1364 children we're not interested in right now with a pending
1365 status to report later. */
1366 wait_ptid = minus_one_ptid;
1367 }
1368 else
1369 wait_ptid = ptid;
1370
1371 while (1)
1372 {
1373 int event_pid;
1374
1375 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1376
1377 if (event_pid > 0
1378 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1379 {
1380 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1381
1382 if (! WIFSTOPPED (*wstat))
1383 mark_lwp_dead (event_child, *wstat);
1384 else
1385 {
1386 event_child->status_pending_p = 1;
1387 event_child->status_pending = *wstat;
1388 }
1389 }
1390 else
1391 return event_pid;
1392 }
1393 }
1394
1395
1396 /* Count the LWP's that have had events. */
1397
1398 static int
1399 count_events_callback (struct inferior_list_entry *entry, void *data)
1400 {
1401 struct lwp_info *lp = (struct lwp_info *) entry;
1402 struct thread_info *thread = get_lwp_thread (lp);
1403 int *count = data;
1404
1405 gdb_assert (count != NULL);
1406
1407 /* Count only resumed LWPs that have a SIGTRAP event pending that
1408 should be reported to GDB. */
1409 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1410 && thread->last_resume_kind != resume_stop
1411 && lp->status_pending_p
1412 && WIFSTOPPED (lp->status_pending)
1413 && WSTOPSIG (lp->status_pending) == SIGTRAP
1414 && !breakpoint_inserted_here (lp->stop_pc))
1415 (*count)++;
1416
1417 return 0;
1418 }
1419
1420 /* Select the LWP (if any) that is currently being single-stepped. */
1421
1422 static int
1423 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1424 {
1425 struct lwp_info *lp = (struct lwp_info *) entry;
1426 struct thread_info *thread = get_lwp_thread (lp);
1427
1428 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1429 && thread->last_resume_kind == resume_step
1430 && lp->status_pending_p)
1431 return 1;
1432 else
1433 return 0;
1434 }
1435
1436 /* Select the Nth LWP that has had a SIGTRAP event that should be
1437 reported to GDB. */
1438
1439 static int
1440 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1441 {
1442 struct lwp_info *lp = (struct lwp_info *) entry;
1443 struct thread_info *thread = get_lwp_thread (lp);
1444 int *selector = data;
1445
1446 gdb_assert (selector != NULL);
1447
1448 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1449 if (thread->last_resume_kind != resume_stop
1450 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1451 && lp->status_pending_p
1452 && WIFSTOPPED (lp->status_pending)
1453 && WSTOPSIG (lp->status_pending) == SIGTRAP
1454 && !breakpoint_inserted_here (lp->stop_pc))
1455 if ((*selector)-- == 0)
1456 return 1;
1457
1458 return 0;
1459 }
1460
1461 static int
1462 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1463 {
1464 struct lwp_info *lp = (struct lwp_info *) entry;
1465 struct thread_info *thread = get_lwp_thread (lp);
1466 struct lwp_info *event_lp = data;
1467
1468 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1469 if (lp == event_lp)
1470 return 0;
1471
1472 /* If a LWP other than the LWP that we're reporting an event for has
1473 hit a GDB breakpoint (as opposed to some random trap signal),
1474 then just arrange for it to hit it again later. We don't keep
1475 the SIGTRAP status and don't forward the SIGTRAP signal to the
1476 LWP. We will handle the current event, eventually we will resume
1477 all LWPs, and this one will get its breakpoint trap again.
1478
1479 If we do not do this, then we run the risk that the user will
1480 delete or disable the breakpoint, but the LWP will have already
1481 tripped on it. */
1482
1483 if (thread->last_resume_kind != resume_stop
1484 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1485 && lp->status_pending_p
1486 && WIFSTOPPED (lp->status_pending)
1487 && WSTOPSIG (lp->status_pending) == SIGTRAP
1488 && !lp->stepping
1489 && !lp->stopped_by_watchpoint
1490 && cancel_breakpoint (lp))
1491 /* Throw away the SIGTRAP. */
1492 lp->status_pending_p = 0;
1493
1494 return 0;
1495 }
1496
1497 static void
1498 linux_cancel_breakpoints (void)
1499 {
1500 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1501 }
1502
1503 /* Select one LWP out of those that have events pending. */
1504
1505 static void
1506 select_event_lwp (struct lwp_info **orig_lp)
1507 {
1508 int num_events = 0;
1509 int random_selector;
1510 struct lwp_info *event_lp;
1511
1512 /* Give preference to any LWP that is being single-stepped. */
1513 event_lp
1514 = (struct lwp_info *) find_inferior (&all_lwps,
1515 select_singlestep_lwp_callback, NULL);
1516 if (event_lp != NULL)
1517 {
1518 if (debug_threads)
1519 fprintf (stderr,
1520 "SEL: Select single-step %s\n",
1521 target_pid_to_str (ptid_of (event_lp)));
1522 }
1523 else
1524 {
1525 /* No single-stepping LWP. Select one at random, out of those
1526 which have had SIGTRAP events. */
1527
1528 /* First see how many SIGTRAP events we have. */
1529 find_inferior (&all_lwps, count_events_callback, &num_events);
1530
1531 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1532 random_selector = (int)
1533 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1534
1535 if (debug_threads && num_events > 1)
1536 fprintf (stderr,
1537 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1538 num_events, random_selector);
1539
1540 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1541 select_event_lwp_callback,
1542 &random_selector);
1543 }
1544
1545 if (event_lp != NULL)
1546 {
1547 /* Switch the event LWP. */
1548 *orig_lp = event_lp;
1549 }
1550 }
1551
1552 /* Set this inferior LWP's state as "want-stopped". We won't resume
1553 this LWP until the client gives us another action for it. */
1554
1555 static void
1556 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1557 {
1558 struct lwp_info *lwp = (struct lwp_info *) entry;
1559 struct thread_info *thread = get_lwp_thread (lwp);
1560
1561 /* Most threads are stopped implicitly (all-stop); tag that with
1562 signal 0. The thread being explicitly reported stopped to the
1563 client, gets it's status fixed up afterwards. */
1564 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1565 thread->last_status.value.sig = TARGET_SIGNAL_0;
1566
1567 thread->last_resume_kind = resume_stop;
1568 }
1569
1570 /* Decrement the suspend count of an LWP. */
1571
1572 static int
1573 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1574 {
1575 struct lwp_info *lwp = (struct lwp_info *) entry;
1576
1577 /* Ignore EXCEPT. */
1578 if (lwp == except)
1579 return 0;
1580
1581 lwp->suspended--;
1582
1583 gdb_assert (lwp->suspended >= 0);
1584 return 0;
1585 }
1586
1587 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1588 NULL. */
1589
1590 static void
1591 unsuspend_all_lwps (struct lwp_info *except)
1592 {
1593 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1594 }
1595
1596 /* Set all LWP's states as "want-stopped". */
1597
1598 static void
1599 gdb_wants_all_stopped (void)
1600 {
1601 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1602 }
1603
1604 /* Wait for process, returns status. */
1605
1606 static ptid_t
1607 linux_wait_1 (ptid_t ptid,
1608 struct target_waitstatus *ourstatus, int target_options)
1609 {
1610 int w;
1611 struct lwp_info *event_child;
1612 int options;
1613 int pid;
1614 int step_over_finished;
1615 int bp_explains_trap;
1616 int maybe_internal_trap;
1617 int report_to_gdb;
1618 int trace_event;
1619
1620 /* Translate generic target options into linux options. */
1621 options = __WALL;
1622 if (target_options & TARGET_WNOHANG)
1623 options |= WNOHANG;
1624
1625 retry:
1626 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1627
1628 /* If we were only supposed to resume one thread, only wait for
1629 that thread - if it's still alive. If it died, however - which
1630 can happen if we're coming from the thread death case below -
1631 then we need to make sure we restart the other threads. We could
1632 pick a thread at random or restart all; restarting all is less
1633 arbitrary. */
1634 if (!non_stop
1635 && !ptid_equal (cont_thread, null_ptid)
1636 && !ptid_equal (cont_thread, minus_one_ptid))
1637 {
1638 struct thread_info *thread;
1639
1640 thread = (struct thread_info *) find_inferior_id (&all_threads,
1641 cont_thread);
1642
1643 /* No stepping, no signal - unless one is pending already, of course. */
1644 if (thread == NULL)
1645 {
1646 struct thread_resume resume_info;
1647 resume_info.thread = minus_one_ptid;
1648 resume_info.kind = resume_continue;
1649 resume_info.sig = 0;
1650 linux_resume (&resume_info, 1);
1651 }
1652 else
1653 ptid = cont_thread;
1654 }
1655
1656 if (ptid_equal (step_over_bkpt, null_ptid))
1657 pid = linux_wait_for_event (ptid, &w, options);
1658 else
1659 {
1660 if (debug_threads)
1661 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1662 target_pid_to_str (step_over_bkpt));
1663 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1664 }
1665
1666 if (pid == 0) /* only if TARGET_WNOHANG */
1667 return null_ptid;
1668
1669 event_child = get_thread_lwp (current_inferior);
1670
1671 /* If we are waiting for a particular child, and it exited,
1672 linux_wait_for_event will return its exit status. Similarly if
1673 the last child exited. If this is not the last child, however,
1674 do not report it as exited until there is a 'thread exited' response
1675 available in the remote protocol. Instead, just wait for another event.
1676 This should be safe, because if the thread crashed we will already
1677 have reported the termination signal to GDB; that should stop any
1678 in-progress stepping operations, etc.
1679
1680 Report the exit status of the last thread to exit. This matches
1681 LinuxThreads' behavior. */
1682
1683 if (last_thread_of_process_p (current_inferior))
1684 {
1685 if (WIFEXITED (w) || WIFSIGNALED (w))
1686 {
1687 if (WIFEXITED (w))
1688 {
1689 ourstatus->kind = TARGET_WAITKIND_EXITED;
1690 ourstatus->value.integer = WEXITSTATUS (w);
1691
1692 if (debug_threads)
1693 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1694 }
1695 else
1696 {
1697 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1698 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1699
1700 if (debug_threads)
1701 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1702
1703 }
1704
1705 return pid_to_ptid (pid);
1706 }
1707 }
1708 else
1709 {
1710 if (!WIFSTOPPED (w))
1711 goto retry;
1712 }
1713
1714 /* If this event was not handled before, and is not a SIGTRAP, we
1715 report it. SIGILL and SIGSEGV are also treated as traps in case
1716 a breakpoint is inserted at the current PC. If this target does
1717 not support internal breakpoints at all, we also report the
1718 SIGTRAP without further processing; it's of no concern to us. */
1719 maybe_internal_trap
1720 = (supports_breakpoints ()
1721 && (WSTOPSIG (w) == SIGTRAP
1722 || ((WSTOPSIG (w) == SIGILL
1723 || WSTOPSIG (w) == SIGSEGV)
1724 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1725
1726 if (maybe_internal_trap)
1727 {
1728 /* Handle anything that requires bookkeeping before deciding to
1729 report the event or continue waiting. */
1730
1731 /* First check if we can explain the SIGTRAP with an internal
1732 breakpoint, or if we should possibly report the event to GDB.
1733 Do this before anything that may remove or insert a
1734 breakpoint. */
1735 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1736
1737 /* We have a SIGTRAP, possibly a step-over dance has just
1738 finished. If so, tweak the state machine accordingly,
1739 reinsert breakpoints and delete any reinsert (software
1740 single-step) breakpoints. */
1741 step_over_finished = finish_step_over (event_child);
1742
1743 /* Now invoke the callbacks of any internal breakpoints there. */
1744 check_breakpoints (event_child->stop_pc);
1745
1746 /* Handle tracepoint data collecting. This may overflow the
1747 trace buffer, and cause a tracing stop, removing
1748 breakpoints. */
1749 trace_event = handle_tracepoints (event_child);
1750
1751 if (bp_explains_trap)
1752 {
1753 /* If we stepped or ran into an internal breakpoint, we've
1754 already handled it. So next time we resume (from this
1755 PC), we should step over it. */
1756 if (debug_threads)
1757 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1758
1759 if (breakpoint_here (event_child->stop_pc))
1760 event_child->need_step_over = 1;
1761 }
1762 }
1763 else
1764 {
1765 /* We have some other signal, possibly a step-over dance was in
1766 progress, and it should be cancelled too. */
1767 step_over_finished = finish_step_over (event_child);
1768
1769 trace_event = 0;
1770 }
1771
1772 /* Check whether GDB would be interested in this event. */
1773
1774 /* If GDB is not interested in this signal, don't stop other
1775 threads, and don't report it to GDB. Just resume the inferior
1776 right away. We do this for threading-related signals as well as
1777 any that GDB specifically requested we ignore. But never ignore
1778 SIGSTOP if we sent it ourselves, and do not ignore signals when
1779 stepping - they may require special handling to skip the signal
1780 handler. */
1781 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1782 thread library? */
1783 if (WIFSTOPPED (w)
1784 && current_inferior->last_resume_kind != resume_step
1785 && (
1786 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1787 (current_process ()->private->thread_db != NULL
1788 && (WSTOPSIG (w) == __SIGRTMIN
1789 || WSTOPSIG (w) == __SIGRTMIN + 1))
1790 ||
1791 #endif
1792 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
1793 && !(WSTOPSIG (w) == SIGSTOP
1794 && current_inferior->last_resume_kind == resume_stop))))
1795 {
1796 siginfo_t info, *info_p;
1797
1798 if (debug_threads)
1799 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1800 WSTOPSIG (w), lwpid_of (event_child));
1801
1802 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1803 info_p = &info;
1804 else
1805 info_p = NULL;
1806 linux_resume_one_lwp (event_child, event_child->stepping,
1807 WSTOPSIG (w), info_p);
1808 goto retry;
1809 }
1810
1811 /* If GDB wanted this thread to single step, we always want to
1812 report the SIGTRAP, and let GDB handle it. Watchpoints should
1813 always be reported. So should signals we can't explain. A
1814 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
1815 not support Z0 breakpoints. If we do, we're be able to handle
1816 GDB breakpoints on top of internal breakpoints, by handling the
1817 internal breakpoint and still reporting the event to GDB. If we
1818 don't, we're out of luck, GDB won't see the breakpoint hit. */
1819 report_to_gdb = (!maybe_internal_trap
1820 || current_inferior->last_resume_kind == resume_step
1821 || event_child->stopped_by_watchpoint
1822 || (!step_over_finished && !bp_explains_trap && !trace_event)
1823 || gdb_breakpoint_here (event_child->stop_pc));
1824
1825 /* We found no reason GDB would want us to stop. We either hit one
1826 of our own breakpoints, or finished an internal step GDB
1827 shouldn't know about. */
1828 if (!report_to_gdb)
1829 {
1830 if (debug_threads)
1831 {
1832 if (bp_explains_trap)
1833 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1834 if (step_over_finished)
1835 fprintf (stderr, "Step-over finished.\n");
1836 if (trace_event)
1837 fprintf (stderr, "Tracepoint event.\n");
1838 }
1839
1840 /* We're not reporting this breakpoint to GDB, so apply the
1841 decr_pc_after_break adjustment to the inferior's regcache
1842 ourselves. */
1843
1844 if (the_low_target.set_pc != NULL)
1845 {
1846 struct regcache *regcache
1847 = get_thread_regcache (get_lwp_thread (event_child), 1);
1848 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1849 }
1850
1851 /* We may have finished stepping over a breakpoint. If so,
1852 we've stopped and suspended all LWPs momentarily except the
1853 stepping one. This is where we resume them all again. We're
1854 going to keep waiting, so use proceed, which handles stepping
1855 over the next breakpoint. */
1856 if (debug_threads)
1857 fprintf (stderr, "proceeding all threads.\n");
1858
1859 if (step_over_finished)
1860 unsuspend_all_lwps (event_child);
1861
1862 proceed_all_lwps ();
1863 goto retry;
1864 }
1865
1866 if (debug_threads)
1867 {
1868 if (current_inferior->last_resume_kind == resume_step)
1869 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1870 if (event_child->stopped_by_watchpoint)
1871 fprintf (stderr, "Stopped by watchpoint.\n");
1872 if (gdb_breakpoint_here (event_child->stop_pc))
1873 fprintf (stderr, "Stopped by GDB breakpoint.\n");
1874 if (debug_threads)
1875 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1876 }
1877
1878 /* Alright, we're going to report a stop. */
1879
1880 if (!non_stop)
1881 {
1882 /* In all-stop, stop all threads. */
1883 stop_all_lwps (0, NULL);
1884
1885 /* If we're not waiting for a specific LWP, choose an event LWP
1886 from among those that have had events. Giving equal priority
1887 to all LWPs that have had events helps prevent
1888 starvation. */
1889 if (ptid_equal (ptid, minus_one_ptid))
1890 {
1891 event_child->status_pending_p = 1;
1892 event_child->status_pending = w;
1893
1894 select_event_lwp (&event_child);
1895
1896 event_child->status_pending_p = 0;
1897 w = event_child->status_pending;
1898 }
1899
1900 /* Now that we've selected our final event LWP, cancel any
1901 breakpoints in other LWPs that have hit a GDB breakpoint.
1902 See the comment in cancel_breakpoints_callback to find out
1903 why. */
1904 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1905 }
1906 else
1907 {
1908 /* If we just finished a step-over, then all threads had been
1909 momentarily paused. In all-stop, that's fine, we want
1910 threads stopped by now anyway. In non-stop, we need to
1911 re-resume threads that GDB wanted to be running. */
1912 if (step_over_finished)
1913 unstop_all_lwps (1, event_child);
1914 }
1915
1916 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1917
1918 /* Do this before the gdb_wants_all_stopped calls below, since they
1919 always set last_resume_kind to resume_stop. */
1920 if (current_inferior->last_resume_kind == resume_stop
1921 && WSTOPSIG (w) == SIGSTOP)
1922 {
1923 /* A thread that has been requested to stop by GDB with vCont;t,
1924 and it stopped cleanly, so report as SIG0. The use of
1925 SIGSTOP is an implementation detail. */
1926 ourstatus->value.sig = TARGET_SIGNAL_0;
1927 }
1928 else if (current_inferior->last_resume_kind == resume_stop
1929 && WSTOPSIG (w) != SIGSTOP)
1930 {
1931 /* A thread that has been requested to stop by GDB with vCont;t,
1932 but, it stopped for other reasons. */
1933 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1934 }
1935 else
1936 {
1937 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1938 }
1939
1940 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1941
1942 if (!non_stop)
1943 {
1944 /* From GDB's perspective, all-stop mode always stops all
1945 threads implicitly. Tag all threads as "want-stopped". */
1946 gdb_wants_all_stopped ();
1947 }
1948 else
1949 {
1950 /* We're reporting this LWP as stopped. Update it's
1951 "want-stopped" state to what the client wants, until it gets
1952 a new resume action. */
1953 gdb_wants_lwp_stopped (&event_child->head);
1954 }
1955
1956 if (debug_threads)
1957 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1958 target_pid_to_str (ptid_of (event_child)),
1959 ourstatus->kind,
1960 ourstatus->value.sig);
1961
1962 current_inferior->last_status = *ourstatus;
1963
1964 return ptid_of (event_child);
1965 }
1966
1967 /* Get rid of any pending event in the pipe. */
1968 static void
1969 async_file_flush (void)
1970 {
1971 int ret;
1972 char buf;
1973
1974 do
1975 ret = read (linux_event_pipe[0], &buf, 1);
1976 while (ret >= 0 || (ret == -1 && errno == EINTR));
1977 }
1978
1979 /* Put something in the pipe, so the event loop wakes up. */
1980 static void
1981 async_file_mark (void)
1982 {
1983 int ret;
1984
1985 async_file_flush ();
1986
1987 do
1988 ret = write (linux_event_pipe[1], "+", 1);
1989 while (ret == 0 || (ret == -1 && errno == EINTR));
1990
1991 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1992 be awakened anyway. */
1993 }
1994
1995 static ptid_t
1996 linux_wait (ptid_t ptid,
1997 struct target_waitstatus *ourstatus, int target_options)
1998 {
1999 ptid_t event_ptid;
2000
2001 if (debug_threads)
2002 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2003
2004 /* Flush the async file first. */
2005 if (target_is_async_p ())
2006 async_file_flush ();
2007
2008 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2009
2010 /* If at least one stop was reported, there may be more. A single
2011 SIGCHLD can signal more than one child stop. */
2012 if (target_is_async_p ()
2013 && (target_options & TARGET_WNOHANG) != 0
2014 && !ptid_equal (event_ptid, null_ptid))
2015 async_file_mark ();
2016
2017 return event_ptid;
2018 }
2019
2020 /* Send a signal to an LWP. */
2021
2022 static int
2023 kill_lwp (unsigned long lwpid, int signo)
2024 {
2025 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2026 fails, then we are not using nptl threads and we should be using kill. */
2027
2028 #ifdef __NR_tkill
2029 {
2030 static int tkill_failed;
2031
2032 if (!tkill_failed)
2033 {
2034 int ret;
2035
2036 errno = 0;
2037 ret = syscall (__NR_tkill, lwpid, signo);
2038 if (errno != ENOSYS)
2039 return ret;
2040 tkill_failed = 1;
2041 }
2042 }
2043 #endif
2044
2045 return kill (lwpid, signo);
2046 }
2047
2048 static void
2049 send_sigstop (struct lwp_info *lwp)
2050 {
2051 int pid;
2052
2053 pid = lwpid_of (lwp);
2054
2055 /* If we already have a pending stop signal for this process, don't
2056 send another. */
2057 if (lwp->stop_expected)
2058 {
2059 if (debug_threads)
2060 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2061
2062 return;
2063 }
2064
2065 if (debug_threads)
2066 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2067
2068 lwp->stop_expected = 1;
2069 kill_lwp (pid, SIGSTOP);
2070 }
2071
2072 static int
2073 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2074 {
2075 struct lwp_info *lwp = (struct lwp_info *) entry;
2076
2077 /* Ignore EXCEPT. */
2078 if (lwp == except)
2079 return 0;
2080
2081 if (lwp->stopped)
2082 return 0;
2083
2084 send_sigstop (lwp);
2085 return 0;
2086 }
2087
2088 /* Increment the suspend count of an LWP, and stop it, if not stopped
2089 yet. */
2090 static int
2091 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2092 void *except)
2093 {
2094 struct lwp_info *lwp = (struct lwp_info *) entry;
2095
2096 /* Ignore EXCEPT. */
2097 if (lwp == except)
2098 return 0;
2099
2100 lwp->suspended++;
2101
2102 return send_sigstop_callback (entry, except);
2103 }
2104
2105 static void
2106 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2107 {
2108 /* It's dead, really. */
2109 lwp->dead = 1;
2110
2111 /* Store the exit status for later. */
2112 lwp->status_pending_p = 1;
2113 lwp->status_pending = wstat;
2114
2115 /* Prevent trying to stop it. */
2116 lwp->stopped = 1;
2117
2118 /* No further stops are expected from a dead lwp. */
2119 lwp->stop_expected = 0;
2120 }
2121
2122 static void
2123 wait_for_sigstop (struct inferior_list_entry *entry)
2124 {
2125 struct lwp_info *lwp = (struct lwp_info *) entry;
2126 struct thread_info *saved_inferior;
2127 int wstat;
2128 ptid_t saved_tid;
2129 ptid_t ptid;
2130 int pid;
2131
2132 if (lwp->stopped)
2133 {
2134 if (debug_threads)
2135 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2136 lwpid_of (lwp));
2137 return;
2138 }
2139
2140 saved_inferior = current_inferior;
2141 if (saved_inferior != NULL)
2142 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2143 else
2144 saved_tid = null_ptid; /* avoid bogus unused warning */
2145
2146 ptid = lwp->head.id;
2147
2148 if (debug_threads)
2149 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2150
2151 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2152
2153 /* If we stopped with a non-SIGSTOP signal, save it for later
2154 and record the pending SIGSTOP. If the process exited, just
2155 return. */
2156 if (WIFSTOPPED (wstat))
2157 {
2158 if (debug_threads)
2159 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2160 lwpid_of (lwp), WSTOPSIG (wstat));
2161
2162 if (WSTOPSIG (wstat) != SIGSTOP)
2163 {
2164 if (debug_threads)
2165 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2166 lwpid_of (lwp), wstat);
2167
2168 lwp->status_pending_p = 1;
2169 lwp->status_pending = wstat;
2170 }
2171 }
2172 else
2173 {
2174 if (debug_threads)
2175 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2176
2177 lwp = find_lwp_pid (pid_to_ptid (pid));
2178 if (lwp)
2179 {
2180 /* Leave this status pending for the next time we're able to
2181 report it. In the mean time, we'll report this lwp as
2182 dead to GDB, so GDB doesn't try to read registers and
2183 memory from it. This can only happen if this was the
2184 last thread of the process; otherwise, PID is removed
2185 from the thread tables before linux_wait_for_event
2186 returns. */
2187 mark_lwp_dead (lwp, wstat);
2188 }
2189 }
2190
2191 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2192 current_inferior = saved_inferior;
2193 else
2194 {
2195 if (debug_threads)
2196 fprintf (stderr, "Previously current thread died.\n");
2197
2198 if (non_stop)
2199 {
2200 /* We can't change the current inferior behind GDB's back,
2201 otherwise, a subsequent command may apply to the wrong
2202 process. */
2203 current_inferior = NULL;
2204 }
2205 else
2206 {
2207 /* Set a valid thread as current. */
2208 set_desired_inferior (0);
2209 }
2210 }
2211 }
2212
2213 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2214 If SUSPEND, then also increase the suspend count of every LWP,
2215 except EXCEPT. */
2216
2217 static void
2218 stop_all_lwps (int suspend, struct lwp_info *except)
2219 {
2220 stopping_threads = 1;
2221
2222 if (suspend)
2223 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2224 else
2225 find_inferior (&all_lwps, send_sigstop_callback, except);
2226 for_each_inferior (&all_lwps, wait_for_sigstop);
2227 stopping_threads = 0;
2228 }
2229
2230 /* Resume execution of the inferior process.
2231 If STEP is nonzero, single-step it.
2232 If SIGNAL is nonzero, give it that signal. */
2233
2234 static void
2235 linux_resume_one_lwp (struct lwp_info *lwp,
2236 int step, int signal, siginfo_t *info)
2237 {
2238 struct thread_info *saved_inferior;
2239
2240 if (lwp->stopped == 0)
2241 return;
2242
2243 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2244 user used the "jump" command, or "set $pc = foo"). */
2245 if (lwp->stop_pc != get_pc (lwp))
2246 {
2247 /* Collecting 'while-stepping' actions doesn't make sense
2248 anymore. */
2249 release_while_stepping_state_list (get_lwp_thread (lwp));
2250 }
2251
2252 /* If we have pending signals or status, and a new signal, enqueue the
2253 signal. Also enqueue the signal if we are waiting to reinsert a
2254 breakpoint; it will be picked up again below. */
2255 if (signal != 0
2256 && (lwp->status_pending_p || lwp->pending_signals != NULL
2257 || lwp->bp_reinsert != 0))
2258 {
2259 struct pending_signals *p_sig;
2260 p_sig = xmalloc (sizeof (*p_sig));
2261 p_sig->prev = lwp->pending_signals;
2262 p_sig->signal = signal;
2263 if (info == NULL)
2264 memset (&p_sig->info, 0, sizeof (siginfo_t));
2265 else
2266 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2267 lwp->pending_signals = p_sig;
2268 }
2269
2270 if (lwp->status_pending_p)
2271 {
2272 if (debug_threads)
2273 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2274 " has pending status\n",
2275 lwpid_of (lwp), step ? "step" : "continue", signal,
2276 lwp->stop_expected ? "expected" : "not expected");
2277 return;
2278 }
2279
2280 saved_inferior = current_inferior;
2281 current_inferior = get_lwp_thread (lwp);
2282
2283 if (debug_threads)
2284 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2285 lwpid_of (lwp), step ? "step" : "continue", signal,
2286 lwp->stop_expected ? "expected" : "not expected");
2287
2288 /* This bit needs some thinking about. If we get a signal that
2289 we must report while a single-step reinsert is still pending,
2290 we often end up resuming the thread. It might be better to
2291 (ew) allow a stack of pending events; then we could be sure that
2292 the reinsert happened right away and not lose any signals.
2293
2294 Making this stack would also shrink the window in which breakpoints are
2295 uninserted (see comment in linux_wait_for_lwp) but not enough for
2296 complete correctness, so it won't solve that problem. It may be
2297 worthwhile just to solve this one, however. */
2298 if (lwp->bp_reinsert != 0)
2299 {
2300 if (debug_threads)
2301 fprintf (stderr, " pending reinsert at 0x%s\n",
2302 paddress (lwp->bp_reinsert));
2303
2304 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2305 {
2306 if (step == 0)
2307 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2308 if (lwp->suspended)
2309 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2310 lwp->suspended);
2311
2312 step = 1;
2313 }
2314
2315 /* Postpone any pending signal. It was enqueued above. */
2316 signal = 0;
2317 }
2318
2319 /* If we have while-stepping actions in this thread set it stepping.
2320 If we have a signal to deliver, it may or may not be set to
2321 SIG_IGN, we don't know. Assume so, and allow collecting
2322 while-stepping into a signal handler. A possible smart thing to
2323 do would be to set an internal breakpoint at the signal return
2324 address, continue, and carry on catching this while-stepping
2325 action only when that breakpoint is hit. A future
2326 enhancement. */
2327 if (get_lwp_thread (lwp)->while_stepping != NULL
2328 && can_hardware_single_step ())
2329 {
2330 if (debug_threads)
2331 fprintf (stderr,
2332 "lwp %ld has a while-stepping action -> forcing step.\n",
2333 lwpid_of (lwp));
2334 step = 1;
2335 }
2336
2337 if (debug_threads && the_low_target.get_pc != NULL)
2338 {
2339 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2340 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2341 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2342 }
2343
2344 /* If we have pending signals, consume one unless we are trying to reinsert
2345 a breakpoint. */
2346 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2347 {
2348 struct pending_signals **p_sig;
2349
2350 p_sig = &lwp->pending_signals;
2351 while ((*p_sig)->prev != NULL)
2352 p_sig = &(*p_sig)->prev;
2353
2354 signal = (*p_sig)->signal;
2355 if ((*p_sig)->info.si_signo != 0)
2356 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2357
2358 free (*p_sig);
2359 *p_sig = NULL;
2360 }
2361
2362 if (the_low_target.prepare_to_resume != NULL)
2363 the_low_target.prepare_to_resume (lwp);
2364
2365 regcache_invalidate_one ((struct inferior_list_entry *)
2366 get_lwp_thread (lwp));
2367 errno = 0;
2368 lwp->stopped = 0;
2369 lwp->stopped_by_watchpoint = 0;
2370 lwp->stepping = step;
2371 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2372 /* Coerce to a uintptr_t first to avoid potential gcc warning
2373 of coercing an 8 byte integer to a 4 byte pointer. */
2374 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2375
2376 current_inferior = saved_inferior;
2377 if (errno)
2378 {
2379 /* ESRCH from ptrace either means that the thread was already
2380 running (an error) or that it is gone (a race condition). If
2381 it's gone, we will get a notification the next time we wait,
2382 so we can ignore the error. We could differentiate these
2383 two, but it's tricky without waiting; the thread still exists
2384 as a zombie, so sending it signal 0 would succeed. So just
2385 ignore ESRCH. */
2386 if (errno == ESRCH)
2387 return;
2388
2389 perror_with_name ("ptrace");
2390 }
2391 }
2392
2393 struct thread_resume_array
2394 {
2395 struct thread_resume *resume;
2396 size_t n;
2397 };
2398
2399 /* This function is called once per thread. We look up the thread
2400 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2401 resume request.
2402
2403 This algorithm is O(threads * resume elements), but resume elements
2404 is small (and will remain small at least until GDB supports thread
2405 suspension). */
2406 static int
2407 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2408 {
2409 struct lwp_info *lwp;
2410 struct thread_info *thread;
2411 int ndx;
2412 struct thread_resume_array *r;
2413
2414 thread = (struct thread_info *) entry;
2415 lwp = get_thread_lwp (thread);
2416 r = arg;
2417
2418 for (ndx = 0; ndx < r->n; ndx++)
2419 {
2420 ptid_t ptid = r->resume[ndx].thread;
2421 if (ptid_equal (ptid, minus_one_ptid)
2422 || ptid_equal (ptid, entry->id)
2423 || (ptid_is_pid (ptid)
2424 && (ptid_get_pid (ptid) == pid_of (lwp)))
2425 || (ptid_get_lwp (ptid) == -1
2426 && (ptid_get_pid (ptid) == pid_of (lwp))))
2427 {
2428 if (r->resume[ndx].kind == resume_stop
2429 && thread->last_resume_kind == resume_stop)
2430 {
2431 if (debug_threads)
2432 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2433 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2434 ? "stopped"
2435 : "stopping",
2436 lwpid_of (lwp));
2437
2438 continue;
2439 }
2440
2441 lwp->resume = &r->resume[ndx];
2442 thread->last_resume_kind = lwp->resume->kind;
2443 return 0;
2444 }
2445 }
2446
2447 /* No resume action for this thread. */
2448 lwp->resume = NULL;
2449
2450 return 0;
2451 }
2452
2453
2454 /* Set *FLAG_P if this lwp has an interesting status pending. */
2455 static int
2456 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2457 {
2458 struct lwp_info *lwp = (struct lwp_info *) entry;
2459
2460 /* LWPs which will not be resumed are not interesting, because
2461 we might not wait for them next time through linux_wait. */
2462 if (lwp->resume == NULL)
2463 return 0;
2464
2465 if (lwp->status_pending_p)
2466 * (int *) flag_p = 1;
2467
2468 return 0;
2469 }
2470
2471 /* Return 1 if this lwp that GDB wants running is stopped at an
2472 internal breakpoint that we need to step over. It assumes that any
2473 required STOP_PC adjustment has already been propagated to the
2474 inferior's regcache. */
2475
2476 static int
2477 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2478 {
2479 struct lwp_info *lwp = (struct lwp_info *) entry;
2480 struct thread_info *thread;
2481 struct thread_info *saved_inferior;
2482 CORE_ADDR pc;
2483
2484 /* LWPs which will not be resumed are not interesting, because we
2485 might not wait for them next time through linux_wait. */
2486
2487 if (!lwp->stopped)
2488 {
2489 if (debug_threads)
2490 fprintf (stderr,
2491 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2492 lwpid_of (lwp));
2493 return 0;
2494 }
2495
2496 thread = get_lwp_thread (lwp);
2497
2498 if (thread->last_resume_kind == resume_stop)
2499 {
2500 if (debug_threads)
2501 fprintf (stderr,
2502 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2503 lwpid_of (lwp));
2504 return 0;
2505 }
2506
2507 gdb_assert (lwp->suspended >= 0);
2508
2509 if (lwp->suspended)
2510 {
2511 if (debug_threads)
2512 fprintf (stderr,
2513 "Need step over [LWP %ld]? Ignoring, suspended\n",
2514 lwpid_of (lwp));
2515 return 0;
2516 }
2517
2518 if (!lwp->need_step_over)
2519 {
2520 if (debug_threads)
2521 fprintf (stderr,
2522 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2523 }
2524
2525 if (lwp->status_pending_p)
2526 {
2527 if (debug_threads)
2528 fprintf (stderr,
2529 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2530 lwpid_of (lwp));
2531 return 0;
2532 }
2533
2534 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2535 or we have. */
2536 pc = get_pc (lwp);
2537
2538 /* If the PC has changed since we stopped, then don't do anything,
2539 and let the breakpoint/tracepoint be hit. This happens if, for
2540 instance, GDB handled the decr_pc_after_break subtraction itself,
2541 GDB is OOL stepping this thread, or the user has issued a "jump"
2542 command, or poked thread's registers herself. */
2543 if (pc != lwp->stop_pc)
2544 {
2545 if (debug_threads)
2546 fprintf (stderr,
2547 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2548 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2549 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2550
2551 lwp->need_step_over = 0;
2552 return 0;
2553 }
2554
2555 saved_inferior = current_inferior;
2556 current_inferior = thread;
2557
2558 /* We can only step over breakpoints we know about. */
2559 if (breakpoint_here (pc))
2560 {
2561 /* Don't step over a breakpoint that GDB expects to hit
2562 though. */
2563 if (gdb_breakpoint_here (pc))
2564 {
2565 if (debug_threads)
2566 fprintf (stderr,
2567 "Need step over [LWP %ld]? yes, but found"
2568 " GDB breakpoint at 0x%s; skipping step over\n",
2569 lwpid_of (lwp), paddress (pc));
2570
2571 current_inferior = saved_inferior;
2572 return 0;
2573 }
2574 else
2575 {
2576 if (debug_threads)
2577 fprintf (stderr,
2578 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2579 lwpid_of (lwp), paddress (pc));
2580
2581 /* We've found an lwp that needs stepping over --- return 1 so
2582 that find_inferior stops looking. */
2583 current_inferior = saved_inferior;
2584
2585 /* If the step over is cancelled, this is set again. */
2586 lwp->need_step_over = 0;
2587 return 1;
2588 }
2589 }
2590
2591 current_inferior = saved_inferior;
2592
2593 if (debug_threads)
2594 fprintf (stderr,
2595 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2596 lwpid_of (lwp), paddress (pc));
2597
2598 return 0;
2599 }
2600
2601 /* Start a step-over operation on LWP. When LWP stopped at a
2602 breakpoint, to make progress, we need to remove the breakpoint out
2603 of the way. If we let other threads run while we do that, they may
2604 pass by the breakpoint location and miss hitting it. To avoid
2605 that, a step-over momentarily stops all threads while LWP is
2606 single-stepped while the breakpoint is temporarily uninserted from
2607 the inferior. When the single-step finishes, we reinsert the
2608 breakpoint, and let all threads that are supposed to be running,
2609 run again.
2610
2611 On targets that don't support hardware single-step, we don't
2612 currently support full software single-stepping. Instead, we only
2613 support stepping over the thread event breakpoint, by asking the
2614 low target where to place a reinsert breakpoint. Since this
2615 routine assumes the breakpoint being stepped over is a thread event
2616 breakpoint, it usually assumes the return address of the current
2617 function is a good enough place to set the reinsert breakpoint. */
2618
2619 static int
2620 start_step_over (struct lwp_info *lwp)
2621 {
2622 struct thread_info *saved_inferior;
2623 CORE_ADDR pc;
2624 int step;
2625
2626 if (debug_threads)
2627 fprintf (stderr,
2628 "Starting step-over on LWP %ld. Stopping all threads\n",
2629 lwpid_of (lwp));
2630
2631 stop_all_lwps (1, lwp);
2632 gdb_assert (lwp->suspended == 0);
2633
2634 if (debug_threads)
2635 fprintf (stderr, "Done stopping all threads for step-over.\n");
2636
2637 /* Note, we should always reach here with an already adjusted PC,
2638 either by GDB (if we're resuming due to GDB's request), or by our
2639 caller, if we just finished handling an internal breakpoint GDB
2640 shouldn't care about. */
2641 pc = get_pc (lwp);
2642
2643 saved_inferior = current_inferior;
2644 current_inferior = get_lwp_thread (lwp);
2645
2646 lwp->bp_reinsert = pc;
2647 uninsert_breakpoints_at (pc);
2648
2649 if (can_hardware_single_step ())
2650 {
2651 step = 1;
2652 }
2653 else
2654 {
2655 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2656 set_reinsert_breakpoint (raddr);
2657 step = 0;
2658 }
2659
2660 current_inferior = saved_inferior;
2661
2662 linux_resume_one_lwp (lwp, step, 0, NULL);
2663
2664 /* Require next event from this LWP. */
2665 step_over_bkpt = lwp->head.id;
2666 return 1;
2667 }
2668
2669 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2670 start_step_over, if still there, and delete any reinsert
2671 breakpoints we've set, on non hardware single-step targets. */
2672
2673 static int
2674 finish_step_over (struct lwp_info *lwp)
2675 {
2676 if (lwp->bp_reinsert != 0)
2677 {
2678 if (debug_threads)
2679 fprintf (stderr, "Finished step over.\n");
2680
2681 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2682 may be no breakpoint to reinsert there by now. */
2683 reinsert_breakpoints_at (lwp->bp_reinsert);
2684
2685 lwp->bp_reinsert = 0;
2686
2687 /* Delete any software-single-step reinsert breakpoints. No
2688 longer needed. We don't have to worry about other threads
2689 hitting this trap, and later not being able to explain it,
2690 because we were stepping over a breakpoint, and we hold all
2691 threads but LWP stopped while doing that. */
2692 if (!can_hardware_single_step ())
2693 delete_reinsert_breakpoints ();
2694
2695 step_over_bkpt = null_ptid;
2696 return 1;
2697 }
2698 else
2699 return 0;
2700 }
2701
2702 /* This function is called once per thread. We check the thread's resume
2703 request, which will tell us whether to resume, step, or leave the thread
2704 stopped; and what signal, if any, it should be sent.
2705
2706 For threads which we aren't explicitly told otherwise, we preserve
2707 the stepping flag; this is used for stepping over gdbserver-placed
2708 breakpoints.
2709
2710 If pending_flags was set in any thread, we queue any needed
2711 signals, since we won't actually resume. We already have a pending
2712 event to report, so we don't need to preserve any step requests;
2713 they should be re-issued if necessary. */
2714
2715 static int
2716 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2717 {
2718 struct lwp_info *lwp;
2719 struct thread_info *thread;
2720 int step;
2721 int leave_all_stopped = * (int *) arg;
2722 int leave_pending;
2723
2724 thread = (struct thread_info *) entry;
2725 lwp = get_thread_lwp (thread);
2726
2727 if (lwp->resume == NULL)
2728 return 0;
2729
2730 if (lwp->resume->kind == resume_stop)
2731 {
2732 if (debug_threads)
2733 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2734
2735 if (!lwp->stopped)
2736 {
2737 if (debug_threads)
2738 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2739
2740 /* Stop the thread, and wait for the event asynchronously,
2741 through the event loop. */
2742 send_sigstop (lwp);
2743 }
2744 else
2745 {
2746 if (debug_threads)
2747 fprintf (stderr, "already stopped LWP %ld\n",
2748 lwpid_of (lwp));
2749
2750 /* The LWP may have been stopped in an internal event that
2751 was not meant to be notified back to GDB (e.g., gdbserver
2752 breakpoint), so we should be reporting a stop event in
2753 this case too. */
2754
2755 /* If the thread already has a pending SIGSTOP, this is a
2756 no-op. Otherwise, something later will presumably resume
2757 the thread and this will cause it to cancel any pending
2758 operation, due to last_resume_kind == resume_stop. If
2759 the thread already has a pending status to report, we
2760 will still report it the next time we wait - see
2761 status_pending_p_callback. */
2762 send_sigstop (lwp);
2763 }
2764
2765 /* For stop requests, we're done. */
2766 lwp->resume = NULL;
2767 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2768 return 0;
2769 }
2770
2771 /* If this thread which is about to be resumed has a pending status,
2772 then don't resume any threads - we can just report the pending
2773 status. Make sure to queue any signals that would otherwise be
2774 sent. In all-stop mode, we do this decision based on if *any*
2775 thread has a pending status. If there's a thread that needs the
2776 step-over-breakpoint dance, then don't resume any other thread
2777 but that particular one. */
2778 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2779
2780 if (!leave_pending)
2781 {
2782 if (debug_threads)
2783 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2784
2785 step = (lwp->resume->kind == resume_step);
2786 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2787 }
2788 else
2789 {
2790 if (debug_threads)
2791 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2792
2793 /* If we have a new signal, enqueue the signal. */
2794 if (lwp->resume->sig != 0)
2795 {
2796 struct pending_signals *p_sig;
2797 p_sig = xmalloc (sizeof (*p_sig));
2798 p_sig->prev = lwp->pending_signals;
2799 p_sig->signal = lwp->resume->sig;
2800 memset (&p_sig->info, 0, sizeof (siginfo_t));
2801
2802 /* If this is the same signal we were previously stopped by,
2803 make sure to queue its siginfo. We can ignore the return
2804 value of ptrace; if it fails, we'll skip
2805 PTRACE_SETSIGINFO. */
2806 if (WIFSTOPPED (lwp->last_status)
2807 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2808 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2809
2810 lwp->pending_signals = p_sig;
2811 }
2812 }
2813
2814 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2815 lwp->resume = NULL;
2816 return 0;
2817 }
2818
2819 static void
2820 linux_resume (struct thread_resume *resume_info, size_t n)
2821 {
2822 struct thread_resume_array array = { resume_info, n };
2823 struct lwp_info *need_step_over = NULL;
2824 int any_pending;
2825 int leave_all_stopped;
2826
2827 find_inferior (&all_threads, linux_set_resume_request, &array);
2828
2829 /* If there is a thread which would otherwise be resumed, which has
2830 a pending status, then don't resume any threads - we can just
2831 report the pending status. Make sure to queue any signals that
2832 would otherwise be sent. In non-stop mode, we'll apply this
2833 logic to each thread individually. We consume all pending events
2834 before considering to start a step-over (in all-stop). */
2835 any_pending = 0;
2836 if (!non_stop)
2837 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2838
2839 /* If there is a thread which would otherwise be resumed, which is
2840 stopped at a breakpoint that needs stepping over, then don't
2841 resume any threads - have it step over the breakpoint with all
2842 other threads stopped, then resume all threads again. Make sure
2843 to queue any signals that would otherwise be delivered or
2844 queued. */
2845 if (!any_pending && supports_breakpoints ())
2846 need_step_over
2847 = (struct lwp_info *) find_inferior (&all_lwps,
2848 need_step_over_p, NULL);
2849
2850 leave_all_stopped = (need_step_over != NULL || any_pending);
2851
2852 if (debug_threads)
2853 {
2854 if (need_step_over != NULL)
2855 fprintf (stderr, "Not resuming all, need step over\n");
2856 else if (any_pending)
2857 fprintf (stderr,
2858 "Not resuming, all-stop and found "
2859 "an LWP with pending status\n");
2860 else
2861 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2862 }
2863
2864 /* Even if we're leaving threads stopped, queue all signals we'd
2865 otherwise deliver. */
2866 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2867
2868 if (need_step_over)
2869 start_step_over (need_step_over);
2870 }
2871
2872 /* This function is called once per thread. We check the thread's
2873 last resume request, which will tell us whether to resume, step, or
2874 leave the thread stopped. Any signal the client requested to be
2875 delivered has already been enqueued at this point.
2876
2877 If any thread that GDB wants running is stopped at an internal
2878 breakpoint that needs stepping over, we start a step-over operation
2879 on that particular thread, and leave all others stopped. */
2880
2881 static int
2882 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
2883 {
2884 struct lwp_info *lwp = (struct lwp_info *) entry;
2885 struct thread_info *thread;
2886 int step;
2887
2888 if (lwp == except)
2889 return 0;
2890
2891 if (debug_threads)
2892 fprintf (stderr,
2893 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2894
2895 if (!lwp->stopped)
2896 {
2897 if (debug_threads)
2898 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2899 return 0;
2900 }
2901
2902 thread = get_lwp_thread (lwp);
2903
2904 if (thread->last_resume_kind == resume_stop
2905 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
2906 {
2907 if (debug_threads)
2908 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
2909 lwpid_of (lwp));
2910 return 0;
2911 }
2912
2913 if (lwp->status_pending_p)
2914 {
2915 if (debug_threads)
2916 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2917 lwpid_of (lwp));
2918 return 0;
2919 }
2920
2921 gdb_assert (lwp->suspended >= 0);
2922
2923 if (lwp->suspended)
2924 {
2925 if (debug_threads)
2926 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2927 return 0;
2928 }
2929
2930 if (thread->last_resume_kind == resume_stop)
2931 {
2932 /* We haven't reported this LWP as stopped yet (otherwise, the
2933 last_status.kind check above would catch it, and we wouldn't
2934 reach here. This LWP may have been momentarily paused by a
2935 stop_all_lwps call while handling for example, another LWP's
2936 step-over. In that case, the pending expected SIGSTOP signal
2937 that was queued at vCont;t handling time will have already
2938 been consumed by wait_for_sigstop, and so we need to requeue
2939 another one here. Note that if the LWP already has a SIGSTOP
2940 pending, this is a no-op. */
2941
2942 if (debug_threads)
2943 fprintf (stderr,
2944 "Client wants LWP %ld to stop. "
2945 "Making sure it has a SIGSTOP pending\n",
2946 lwpid_of (lwp));
2947
2948 send_sigstop (lwp);
2949 }
2950
2951 step = thread->last_resume_kind == resume_step;
2952 linux_resume_one_lwp (lwp, step, 0, NULL);
2953 return 0;
2954 }
2955
2956 static int
2957 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
2958 {
2959 struct lwp_info *lwp = (struct lwp_info *) entry;
2960
2961 if (lwp == except)
2962 return 0;
2963
2964 lwp->suspended--;
2965 gdb_assert (lwp->suspended >= 0);
2966
2967 return proceed_one_lwp (entry, except);
2968 }
2969
2970 /* When we finish a step-over, set threads running again. If there's
2971 another thread that may need a step-over, now's the time to start
2972 it. Eventually, we'll move all threads past their breakpoints. */
2973
2974 static void
2975 proceed_all_lwps (void)
2976 {
2977 struct lwp_info *need_step_over;
2978
2979 /* If there is a thread which would otherwise be resumed, which is
2980 stopped at a breakpoint that needs stepping over, then don't
2981 resume any threads - have it step over the breakpoint with all
2982 other threads stopped, then resume all threads again. */
2983
2984 if (supports_breakpoints ())
2985 {
2986 need_step_over
2987 = (struct lwp_info *) find_inferior (&all_lwps,
2988 need_step_over_p, NULL);
2989
2990 if (need_step_over != NULL)
2991 {
2992 if (debug_threads)
2993 fprintf (stderr, "proceed_all_lwps: found "
2994 "thread %ld needing a step-over\n",
2995 lwpid_of (need_step_over));
2996
2997 start_step_over (need_step_over);
2998 return;
2999 }
3000 }
3001
3002 if (debug_threads)
3003 fprintf (stderr, "Proceeding, no step-over needed\n");
3004
3005 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3006 }
3007
3008 /* Stopped LWPs that the client wanted to be running, that don't have
3009 pending statuses, are set to run again, except for EXCEPT, if not
3010 NULL. This undoes a stop_all_lwps call. */
3011
3012 static void
3013 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3014 {
3015 if (debug_threads)
3016 {
3017 if (except)
3018 fprintf (stderr,
3019 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3020 else
3021 fprintf (stderr,
3022 "unstopping all lwps\n");
3023 }
3024
3025 if (unsuspend)
3026 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3027 else
3028 find_inferior (&all_lwps, proceed_one_lwp, except);
3029 }
3030
3031 #ifdef HAVE_LINUX_USRREGS
3032
3033 int
3034 register_addr (int regnum)
3035 {
3036 int addr;
3037
3038 if (regnum < 0 || regnum >= the_low_target.num_regs)
3039 error ("Invalid register number %d.", regnum);
3040
3041 addr = the_low_target.regmap[regnum];
3042
3043 return addr;
3044 }
3045
3046 /* Fetch one register. */
3047 static void
3048 fetch_register (struct regcache *regcache, int regno)
3049 {
3050 CORE_ADDR regaddr;
3051 int i, size;
3052 char *buf;
3053 int pid;
3054
3055 if (regno >= the_low_target.num_regs)
3056 return;
3057 if ((*the_low_target.cannot_fetch_register) (regno))
3058 return;
3059
3060 regaddr = register_addr (regno);
3061 if (regaddr == -1)
3062 return;
3063
3064 pid = lwpid_of (get_thread_lwp (current_inferior));
3065 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3066 & - sizeof (PTRACE_XFER_TYPE));
3067 buf = alloca (size);
3068 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3069 {
3070 errno = 0;
3071 *(PTRACE_XFER_TYPE *) (buf + i) =
3072 ptrace (PTRACE_PEEKUSER, pid,
3073 /* Coerce to a uintptr_t first to avoid potential gcc warning
3074 of coercing an 8 byte integer to a 4 byte pointer. */
3075 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3076 regaddr += sizeof (PTRACE_XFER_TYPE);
3077 if (errno != 0)
3078 error ("reading register %d: %s", regno, strerror (errno));
3079 }
3080
3081 if (the_low_target.supply_ptrace_register)
3082 the_low_target.supply_ptrace_register (regcache, regno, buf);
3083 else
3084 supply_register (regcache, regno, buf);
3085 }
3086
3087 /* Fetch all registers, or just one, from the child process. */
3088 static void
3089 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3090 {
3091 if (regno == -1)
3092 for (regno = 0; regno < the_low_target.num_regs; regno++)
3093 fetch_register (regcache, regno);
3094 else
3095 fetch_register (regcache, regno);
3096 }
3097
3098 /* Store our register values back into the inferior.
3099 If REGNO is -1, do this for all registers.
3100 Otherwise, REGNO specifies which register (so we can save time). */
3101 static void
3102 usr_store_inferior_registers (struct regcache *regcache, int regno)
3103 {
3104 CORE_ADDR regaddr;
3105 int i, size;
3106 char *buf;
3107 int pid;
3108
3109 if (regno >= 0)
3110 {
3111 if (regno >= the_low_target.num_regs)
3112 return;
3113
3114 if ((*the_low_target.cannot_store_register) (regno) == 1)
3115 return;
3116
3117 regaddr = register_addr (regno);
3118 if (regaddr == -1)
3119 return;
3120 errno = 0;
3121 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3122 & - sizeof (PTRACE_XFER_TYPE);
3123 buf = alloca (size);
3124 memset (buf, 0, size);
3125
3126 if (the_low_target.collect_ptrace_register)
3127 the_low_target.collect_ptrace_register (regcache, regno, buf);
3128 else
3129 collect_register (regcache, regno, buf);
3130
3131 pid = lwpid_of (get_thread_lwp (current_inferior));
3132 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3133 {
3134 errno = 0;
3135 ptrace (PTRACE_POKEUSER, pid,
3136 /* Coerce to a uintptr_t first to avoid potential gcc warning
3137 about coercing an 8 byte integer to a 4 byte pointer. */
3138 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3139 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3140 if (errno != 0)
3141 {
3142 /* At this point, ESRCH should mean the process is
3143 already gone, in which case we simply ignore attempts
3144 to change its registers. See also the related
3145 comment in linux_resume_one_lwp. */
3146 if (errno == ESRCH)
3147 return;
3148
3149 if ((*the_low_target.cannot_store_register) (regno) == 0)
3150 error ("writing register %d: %s", regno, strerror (errno));
3151 }
3152 regaddr += sizeof (PTRACE_XFER_TYPE);
3153 }
3154 }
3155 else
3156 for (regno = 0; regno < the_low_target.num_regs; regno++)
3157 usr_store_inferior_registers (regcache, regno);
3158 }
3159 #endif /* HAVE_LINUX_USRREGS */
3160
3161
3162
3163 #ifdef HAVE_LINUX_REGSETS
3164
3165 static int
3166 regsets_fetch_inferior_registers (struct regcache *regcache)
3167 {
3168 struct regset_info *regset;
3169 int saw_general_regs = 0;
3170 int pid;
3171 struct iovec iov;
3172
3173 regset = target_regsets;
3174
3175 pid = lwpid_of (get_thread_lwp (current_inferior));
3176 while (regset->size >= 0)
3177 {
3178 void *buf, *data;
3179 int nt_type, res;
3180
3181 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3182 {
3183 regset ++;
3184 continue;
3185 }
3186
3187 buf = xmalloc (regset->size);
3188
3189 nt_type = regset->nt_type;
3190 if (nt_type)
3191 {
3192 iov.iov_base = buf;
3193 iov.iov_len = regset->size;
3194 data = (void *) &iov;
3195 }
3196 else
3197 data = buf;
3198
3199 #ifndef __sparc__
3200 res = ptrace (regset->get_request, pid, nt_type, data);
3201 #else
3202 res = ptrace (regset->get_request, pid, data, nt_type);
3203 #endif
3204 if (res < 0)
3205 {
3206 if (errno == EIO)
3207 {
3208 /* If we get EIO on a regset, do not try it again for
3209 this process. */
3210 disabled_regsets[regset - target_regsets] = 1;
3211 free (buf);
3212 continue;
3213 }
3214 else
3215 {
3216 char s[256];
3217 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3218 pid);
3219 perror (s);
3220 }
3221 }
3222 else if (regset->type == GENERAL_REGS)
3223 saw_general_regs = 1;
3224 regset->store_function (regcache, buf);
3225 regset ++;
3226 free (buf);
3227 }
3228 if (saw_general_regs)
3229 return 0;
3230 else
3231 return 1;
3232 }
3233
3234 static int
3235 regsets_store_inferior_registers (struct regcache *regcache)
3236 {
3237 struct regset_info *regset;
3238 int saw_general_regs = 0;
3239 int pid;
3240 struct iovec iov;
3241
3242 regset = target_regsets;
3243
3244 pid = lwpid_of (get_thread_lwp (current_inferior));
3245 while (regset->size >= 0)
3246 {
3247 void *buf, *data;
3248 int nt_type, res;
3249
3250 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3251 {
3252 regset ++;
3253 continue;
3254 }
3255
3256 buf = xmalloc (regset->size);
3257
3258 /* First fill the buffer with the current register set contents,
3259 in case there are any items in the kernel's regset that are
3260 not in gdbserver's regcache. */
3261
3262 nt_type = regset->nt_type;
3263 if (nt_type)
3264 {
3265 iov.iov_base = buf;
3266 iov.iov_len = regset->size;
3267 data = (void *) &iov;
3268 }
3269 else
3270 data = buf;
3271
3272 #ifndef __sparc__
3273 res = ptrace (regset->get_request, pid, nt_type, data);
3274 #else
3275 res = ptrace (regset->get_request, pid, &iov, data);
3276 #endif
3277
3278 if (res == 0)
3279 {
3280 /* Then overlay our cached registers on that. */
3281 regset->fill_function (regcache, buf);
3282
3283 /* Only now do we write the register set. */
3284 #ifndef __sparc__
3285 res = ptrace (regset->set_request, pid, nt_type, data);
3286 #else
3287 res = ptrace (regset->set_request, pid, data, nt_type);
3288 #endif
3289 }
3290
3291 if (res < 0)
3292 {
3293 if (errno == EIO)
3294 {
3295 /* If we get EIO on a regset, do not try it again for
3296 this process. */
3297 disabled_regsets[regset - target_regsets] = 1;
3298 free (buf);
3299 continue;
3300 }
3301 else if (errno == ESRCH)
3302 {
3303 /* At this point, ESRCH should mean the process is
3304 already gone, in which case we simply ignore attempts
3305 to change its registers. See also the related
3306 comment in linux_resume_one_lwp. */
3307 free (buf);
3308 return 0;
3309 }
3310 else
3311 {
3312 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3313 }
3314 }
3315 else if (regset->type == GENERAL_REGS)
3316 saw_general_regs = 1;
3317 regset ++;
3318 free (buf);
3319 }
3320 if (saw_general_regs)
3321 return 0;
3322 else
3323 return 1;
3324 return 0;
3325 }
3326
3327 #endif /* HAVE_LINUX_REGSETS */
3328
3329
3330 void
3331 linux_fetch_registers (struct regcache *regcache, int regno)
3332 {
3333 #ifdef HAVE_LINUX_REGSETS
3334 if (regsets_fetch_inferior_registers (regcache) == 0)
3335 return;
3336 #endif
3337 #ifdef HAVE_LINUX_USRREGS
3338 usr_fetch_inferior_registers (regcache, regno);
3339 #endif
3340 }
3341
3342 void
3343 linux_store_registers (struct regcache *regcache, int regno)
3344 {
3345 #ifdef HAVE_LINUX_REGSETS
3346 if (regsets_store_inferior_registers (regcache) == 0)
3347 return;
3348 #endif
3349 #ifdef HAVE_LINUX_USRREGS
3350 usr_store_inferior_registers (regcache, regno);
3351 #endif
3352 }
3353
3354
3355 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3356 to debugger memory starting at MYADDR. */
3357
3358 static int
3359 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3360 {
3361 register int i;
3362 /* Round starting address down to longword boundary. */
3363 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3364 /* Round ending address up; get number of longwords that makes. */
3365 register int count
3366 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3367 / sizeof (PTRACE_XFER_TYPE);
3368 /* Allocate buffer of that many longwords. */
3369 register PTRACE_XFER_TYPE *buffer
3370 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3371 int fd;
3372 char filename[64];
3373 int pid = lwpid_of (get_thread_lwp (current_inferior));
3374
3375 /* Try using /proc. Don't bother for one word. */
3376 if (len >= 3 * sizeof (long))
3377 {
3378 /* We could keep this file open and cache it - possibly one per
3379 thread. That requires some juggling, but is even faster. */
3380 sprintf (filename, "/proc/%d/mem", pid);
3381 fd = open (filename, O_RDONLY | O_LARGEFILE);
3382 if (fd == -1)
3383 goto no_proc;
3384
3385 /* If pread64 is available, use it. It's faster if the kernel
3386 supports it (only one syscall), and it's 64-bit safe even on
3387 32-bit platforms (for instance, SPARC debugging a SPARC64
3388 application). */
3389 #ifdef HAVE_PREAD64
3390 if (pread64 (fd, myaddr, len, memaddr) != len)
3391 #else
3392 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3393 #endif
3394 {
3395 close (fd);
3396 goto no_proc;
3397 }
3398
3399 close (fd);
3400 return 0;
3401 }
3402
3403 no_proc:
3404 /* Read all the longwords */
3405 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3406 {
3407 errno = 0;
3408 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3409 about coercing an 8 byte integer to a 4 byte pointer. */
3410 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3411 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3412 if (errno)
3413 return errno;
3414 }
3415
3416 /* Copy appropriate bytes out of the buffer. */
3417 memcpy (myaddr,
3418 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3419 len);
3420
3421 return 0;
3422 }
3423
3424 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3425 memory at MEMADDR. On failure (cannot write to the inferior)
3426 returns the value of errno. */
3427
3428 static int
3429 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3430 {
3431 register int i;
3432 /* Round starting address down to longword boundary. */
3433 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3434 /* Round ending address up; get number of longwords that makes. */
3435 register int count
3436 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3437 /* Allocate buffer of that many longwords. */
3438 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3439 int pid = lwpid_of (get_thread_lwp (current_inferior));
3440
3441 if (debug_threads)
3442 {
3443 /* Dump up to four bytes. */
3444 unsigned int val = * (unsigned int *) myaddr;
3445 if (len == 1)
3446 val = val & 0xff;
3447 else if (len == 2)
3448 val = val & 0xffff;
3449 else if (len == 3)
3450 val = val & 0xffffff;
3451 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3452 val, (long)memaddr);
3453 }
3454
3455 /* Fill start and end extra bytes of buffer with existing memory data. */
3456
3457 errno = 0;
3458 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3459 about coercing an 8 byte integer to a 4 byte pointer. */
3460 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3461 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3462 if (errno)
3463 return errno;
3464
3465 if (count > 1)
3466 {
3467 errno = 0;
3468 buffer[count - 1]
3469 = ptrace (PTRACE_PEEKTEXT, pid,
3470 /* Coerce to a uintptr_t first to avoid potential gcc warning
3471 about coercing an 8 byte integer to a 4 byte pointer. */
3472 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3473 * sizeof (PTRACE_XFER_TYPE)),
3474 0);
3475 if (errno)
3476 return errno;
3477 }
3478
3479 /* Copy data to be written over corresponding part of buffer. */
3480
3481 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3482
3483 /* Write the entire buffer. */
3484
3485 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3486 {
3487 errno = 0;
3488 ptrace (PTRACE_POKETEXT, pid,
3489 /* Coerce to a uintptr_t first to avoid potential gcc warning
3490 about coercing an 8 byte integer to a 4 byte pointer. */
3491 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3492 (PTRACE_ARG4_TYPE) buffer[i]);
3493 if (errno)
3494 return errno;
3495 }
3496
3497 return 0;
3498 }
3499
3500 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3501 static int linux_supports_tracefork_flag;
3502
3503 static void
3504 linux_enable_event_reporting (int pid)
3505 {
3506 if (!linux_supports_tracefork_flag)
3507 return;
3508
3509 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
3510 }
3511
3512 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3513
3514 static int
3515 linux_tracefork_grandchild (void *arg)
3516 {
3517 _exit (0);
3518 }
3519
3520 #define STACK_SIZE 4096
3521
3522 static int
3523 linux_tracefork_child (void *arg)
3524 {
3525 ptrace (PTRACE_TRACEME, 0, 0, 0);
3526 kill (getpid (), SIGSTOP);
3527
3528 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3529
3530 if (fork () == 0)
3531 linux_tracefork_grandchild (NULL);
3532
3533 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3534
3535 #ifdef __ia64__
3536 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3537 CLONE_VM | SIGCHLD, NULL);
3538 #else
3539 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3540 CLONE_VM | SIGCHLD, NULL);
3541 #endif
3542
3543 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3544
3545 _exit (0);
3546 }
3547
3548 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3549 sure that we can enable the option, and that it had the desired
3550 effect. */
3551
3552 static void
3553 linux_test_for_tracefork (void)
3554 {
3555 int child_pid, ret, status;
3556 long second_pid;
3557 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3558 char *stack = xmalloc (STACK_SIZE * 4);
3559 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3560
3561 linux_supports_tracefork_flag = 0;
3562
3563 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3564
3565 child_pid = fork ();
3566 if (child_pid == 0)
3567 linux_tracefork_child (NULL);
3568
3569 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3570
3571 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3572 #ifdef __ia64__
3573 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3574 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3575 #else /* !__ia64__ */
3576 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3577 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3578 #endif /* !__ia64__ */
3579
3580 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3581
3582 if (child_pid == -1)
3583 perror_with_name ("clone");
3584
3585 ret = my_waitpid (child_pid, &status, 0);
3586 if (ret == -1)
3587 perror_with_name ("waitpid");
3588 else if (ret != child_pid)
3589 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3590 if (! WIFSTOPPED (status))
3591 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3592
3593 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3594 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3595 if (ret != 0)
3596 {
3597 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3598 if (ret != 0)
3599 {
3600 warning ("linux_test_for_tracefork: failed to kill child");
3601 return;
3602 }
3603
3604 ret = my_waitpid (child_pid, &status, 0);
3605 if (ret != child_pid)
3606 warning ("linux_test_for_tracefork: failed to wait for killed child");
3607 else if (!WIFSIGNALED (status))
3608 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3609 "killed child", status);
3610
3611 return;
3612 }
3613
3614 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3615 if (ret != 0)
3616 warning ("linux_test_for_tracefork: failed to resume child");
3617
3618 ret = my_waitpid (child_pid, &status, 0);
3619
3620 if (ret == child_pid && WIFSTOPPED (status)
3621 && status >> 16 == PTRACE_EVENT_FORK)
3622 {
3623 second_pid = 0;
3624 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3625 if (ret == 0 && second_pid != 0)
3626 {
3627 int second_status;
3628
3629 linux_supports_tracefork_flag = 1;
3630 my_waitpid (second_pid, &second_status, 0);
3631 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3632 if (ret != 0)
3633 warning ("linux_test_for_tracefork: failed to kill second child");
3634 my_waitpid (second_pid, &status, 0);
3635 }
3636 }
3637 else
3638 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3639 "(%d, status 0x%x)", ret, status);
3640
3641 do
3642 {
3643 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3644 if (ret != 0)
3645 warning ("linux_test_for_tracefork: failed to kill child");
3646 my_waitpid (child_pid, &status, 0);
3647 }
3648 while (WIFSTOPPED (status));
3649
3650 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3651 free (stack);
3652 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3653 }
3654
3655
3656 static void
3657 linux_look_up_symbols (void)
3658 {
3659 #ifdef USE_THREAD_DB
3660 struct process_info *proc = current_process ();
3661
3662 if (proc->private->thread_db != NULL)
3663 return;
3664
3665 /* If the kernel supports tracing forks then it also supports tracing
3666 clones, and then we don't need to use the magic thread event breakpoint
3667 to learn about threads. */
3668 thread_db_init (!linux_supports_tracefork_flag);
3669 #endif
3670 }
3671
3672 static void
3673 linux_request_interrupt (void)
3674 {
3675 extern unsigned long signal_pid;
3676
3677 if (!ptid_equal (cont_thread, null_ptid)
3678 && !ptid_equal (cont_thread, minus_one_ptid))
3679 {
3680 struct lwp_info *lwp;
3681 int lwpid;
3682
3683 lwp = get_thread_lwp (current_inferior);
3684 lwpid = lwpid_of (lwp);
3685 kill_lwp (lwpid, SIGINT);
3686 }
3687 else
3688 kill_lwp (signal_pid, SIGINT);
3689 }
3690
3691 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3692 to debugger memory starting at MYADDR. */
3693
3694 static int
3695 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3696 {
3697 char filename[PATH_MAX];
3698 int fd, n;
3699 int pid = lwpid_of (get_thread_lwp (current_inferior));
3700
3701 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3702
3703 fd = open (filename, O_RDONLY);
3704 if (fd < 0)
3705 return -1;
3706
3707 if (offset != (CORE_ADDR) 0
3708 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3709 n = -1;
3710 else
3711 n = read (fd, myaddr, len);
3712
3713 close (fd);
3714
3715 return n;
3716 }
3717
3718 /* These breakpoint and watchpoint related wrapper functions simply
3719 pass on the function call if the target has registered a
3720 corresponding function. */
3721
3722 static int
3723 linux_insert_point (char type, CORE_ADDR addr, int len)
3724 {
3725 if (the_low_target.insert_point != NULL)
3726 return the_low_target.insert_point (type, addr, len);
3727 else
3728 /* Unsupported (see target.h). */
3729 return 1;
3730 }
3731
3732 static int
3733 linux_remove_point (char type, CORE_ADDR addr, int len)
3734 {
3735 if (the_low_target.remove_point != NULL)
3736 return the_low_target.remove_point (type, addr, len);
3737 else
3738 /* Unsupported (see target.h). */
3739 return 1;
3740 }
3741
3742 static int
3743 linux_stopped_by_watchpoint (void)
3744 {
3745 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3746
3747 return lwp->stopped_by_watchpoint;
3748 }
3749
3750 static CORE_ADDR
3751 linux_stopped_data_address (void)
3752 {
3753 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3754
3755 return lwp->stopped_data_address;
3756 }
3757
3758 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3759 #if defined(__mcoldfire__)
3760 /* These should really be defined in the kernel's ptrace.h header. */
3761 #define PT_TEXT_ADDR 49*4
3762 #define PT_DATA_ADDR 50*4
3763 #define PT_TEXT_END_ADDR 51*4
3764 #endif
3765
3766 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3767 to tell gdb about. */
3768
3769 static int
3770 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3771 {
3772 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3773 unsigned long text, text_end, data;
3774 int pid = lwpid_of (get_thread_lwp (current_inferior));
3775
3776 errno = 0;
3777
3778 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3779 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3780 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3781
3782 if (errno == 0)
3783 {
3784 /* Both text and data offsets produced at compile-time (and so
3785 used by gdb) are relative to the beginning of the program,
3786 with the data segment immediately following the text segment.
3787 However, the actual runtime layout in memory may put the data
3788 somewhere else, so when we send gdb a data base-address, we
3789 use the real data base address and subtract the compile-time
3790 data base-address from it (which is just the length of the
3791 text segment). BSS immediately follows data in both
3792 cases. */
3793 *text_p = text;
3794 *data_p = data - (text_end - text);
3795
3796 return 1;
3797 }
3798 #endif
3799 return 0;
3800 }
3801 #endif
3802
3803 static int
3804 compare_ints (const void *xa, const void *xb)
3805 {
3806 int a = *(const int *)xa;
3807 int b = *(const int *)xb;
3808
3809 return a - b;
3810 }
3811
3812 static int *
3813 unique (int *b, int *e)
3814 {
3815 int *d = b;
3816 while (++b != e)
3817 if (*d != *b)
3818 *++d = *b;
3819 return ++d;
3820 }
3821
3822 /* Given PID, iterates over all threads in that process.
3823
3824 Information about each thread, in a format suitable for qXfer:osdata:thread
3825 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3826 initialized, and the caller is responsible for finishing and appending '\0'
3827 to it.
3828
3829 The list of cores that threads are running on is assigned to *CORES, if it
3830 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3831 should free *CORES. */
3832
3833 static void
3834 list_threads (int pid, struct buffer *buffer, char **cores)
3835 {
3836 int count = 0;
3837 int allocated = 10;
3838 int *core_numbers = xmalloc (sizeof (int) * allocated);
3839 char pathname[128];
3840 DIR *dir;
3841 struct dirent *dp;
3842 struct stat statbuf;
3843
3844 sprintf (pathname, "/proc/%d/task", pid);
3845 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3846 {
3847 dir = opendir (pathname);
3848 if (!dir)
3849 {
3850 free (core_numbers);
3851 return;
3852 }
3853
3854 while ((dp = readdir (dir)) != NULL)
3855 {
3856 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3857
3858 if (lwp != 0)
3859 {
3860 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3861
3862 if (core != -1)
3863 {
3864 char s[sizeof ("4294967295")];
3865 sprintf (s, "%u", core);
3866
3867 if (count == allocated)
3868 {
3869 allocated *= 2;
3870 core_numbers = realloc (core_numbers,
3871 sizeof (int) * allocated);
3872 }
3873 core_numbers[count++] = core;
3874 if (buffer)
3875 buffer_xml_printf (buffer,
3876 "<item>"
3877 "<column name=\"pid\">%d</column>"
3878 "<column name=\"tid\">%s</column>"
3879 "<column name=\"core\">%s</column>"
3880 "</item>", pid, dp->d_name, s);
3881 }
3882 else
3883 {
3884 if (buffer)
3885 buffer_xml_printf (buffer,
3886 "<item>"
3887 "<column name=\"pid\">%d</column>"
3888 "<column name=\"tid\">%s</column>"
3889 "</item>", pid, dp->d_name);
3890 }
3891 }
3892 }
3893 }
3894
3895 if (cores)
3896 {
3897 *cores = NULL;
3898 if (count > 0)
3899 {
3900 struct buffer buffer2;
3901 int *b;
3902 int *e;
3903 qsort (core_numbers, count, sizeof (int), compare_ints);
3904
3905 /* Remove duplicates. */
3906 b = core_numbers;
3907 e = unique (b, core_numbers + count);
3908
3909 buffer_init (&buffer2);
3910
3911 for (b = core_numbers; b != e; ++b)
3912 {
3913 char number[sizeof ("4294967295")];
3914 sprintf (number, "%u", *b);
3915 buffer_xml_printf (&buffer2, "%s%s",
3916 (b == core_numbers) ? "" : ",", number);
3917 }
3918 buffer_grow_str0 (&buffer2, "");
3919
3920 *cores = buffer_finish (&buffer2);
3921 }
3922 }
3923 free (core_numbers);
3924 }
3925
3926 static void
3927 show_process (int pid, const char *username, struct buffer *buffer)
3928 {
3929 char pathname[128];
3930 FILE *f;
3931 char cmd[MAXPATHLEN + 1];
3932
3933 sprintf (pathname, "/proc/%d/cmdline", pid);
3934
3935 if ((f = fopen (pathname, "r")) != NULL)
3936 {
3937 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3938 if (len > 0)
3939 {
3940 char *cores = 0;
3941 int i;
3942 for (i = 0; i < len; i++)
3943 if (cmd[i] == '\0')
3944 cmd[i] = ' ';
3945 cmd[len] = '\0';
3946
3947 buffer_xml_printf (buffer,
3948 "<item>"
3949 "<column name=\"pid\">%d</column>"
3950 "<column name=\"user\">%s</column>"
3951 "<column name=\"command\">%s</column>",
3952 pid,
3953 username,
3954 cmd);
3955
3956 /* This only collects core numbers, and does not print threads. */
3957 list_threads (pid, NULL, &cores);
3958
3959 if (cores)
3960 {
3961 buffer_xml_printf (buffer,
3962 "<column name=\"cores\">%s</column>", cores);
3963 free (cores);
3964 }
3965
3966 buffer_xml_printf (buffer, "</item>");
3967 }
3968 fclose (f);
3969 }
3970 }
3971
3972 static int
3973 linux_qxfer_osdata (const char *annex,
3974 unsigned char *readbuf, unsigned const char *writebuf,
3975 CORE_ADDR offset, int len)
3976 {
3977 /* We make the process list snapshot when the object starts to be
3978 read. */
3979 static const char *buf;
3980 static long len_avail = -1;
3981 static struct buffer buffer;
3982 int processes = 0;
3983 int threads = 0;
3984
3985 DIR *dirp;
3986
3987 if (strcmp (annex, "processes") == 0)
3988 processes = 1;
3989 else if (strcmp (annex, "threads") == 0)
3990 threads = 1;
3991 else
3992 return 0;
3993
3994 if (!readbuf || writebuf)
3995 return 0;
3996
3997 if (offset == 0)
3998 {
3999 if (len_avail != -1 && len_avail != 0)
4000 buffer_free (&buffer);
4001 len_avail = 0;
4002 buf = NULL;
4003 buffer_init (&buffer);
4004 if (processes)
4005 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
4006 else if (threads)
4007 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
4008
4009 dirp = opendir ("/proc");
4010 if (dirp)
4011 {
4012 struct dirent *dp;
4013 while ((dp = readdir (dirp)) != NULL)
4014 {
4015 struct stat statbuf;
4016 char procentry[sizeof ("/proc/4294967295")];
4017
4018 if (!isdigit (dp->d_name[0])
4019 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4020 continue;
4021
4022 sprintf (procentry, "/proc/%s", dp->d_name);
4023 if (stat (procentry, &statbuf) == 0
4024 && S_ISDIR (statbuf.st_mode))
4025 {
4026 int pid = (int) strtoul (dp->d_name, NULL, 10);
4027
4028 if (processes)
4029 {
4030 struct passwd *entry = getpwuid (statbuf.st_uid);
4031 show_process (pid, entry ? entry->pw_name : "?", &buffer);
4032 }
4033 else if (threads)
4034 {
4035 list_threads (pid, &buffer, NULL);
4036 }
4037 }
4038 }
4039
4040 closedir (dirp);
4041 }
4042 buffer_grow_str0 (&buffer, "</osdata>\n");
4043 buf = buffer_finish (&buffer);
4044 len_avail = strlen (buf);
4045 }
4046
4047 if (offset >= len_avail)
4048 {
4049 /* Done. Get rid of the data. */
4050 buffer_free (&buffer);
4051 buf = NULL;
4052 len_avail = 0;
4053 return 0;
4054 }
4055
4056 if (len > len_avail - offset)
4057 len = len_avail - offset;
4058 memcpy (readbuf, buf + offset, len);
4059
4060 return len;
4061 }
4062
4063 /* Convert a native/host siginfo object, into/from the siginfo in the
4064 layout of the inferiors' architecture. */
4065
4066 static void
4067 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4068 {
4069 int done = 0;
4070
4071 if (the_low_target.siginfo_fixup != NULL)
4072 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4073
4074 /* If there was no callback, or the callback didn't do anything,
4075 then just do a straight memcpy. */
4076 if (!done)
4077 {
4078 if (direction == 1)
4079 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4080 else
4081 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4082 }
4083 }
4084
4085 static int
4086 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4087 unsigned const char *writebuf, CORE_ADDR offset, int len)
4088 {
4089 int pid;
4090 struct siginfo siginfo;
4091 char inf_siginfo[sizeof (struct siginfo)];
4092
4093 if (current_inferior == NULL)
4094 return -1;
4095
4096 pid = lwpid_of (get_thread_lwp (current_inferior));
4097
4098 if (debug_threads)
4099 fprintf (stderr, "%s siginfo for lwp %d.\n",
4100 readbuf != NULL ? "Reading" : "Writing",
4101 pid);
4102
4103 if (offset > sizeof (siginfo))
4104 return -1;
4105
4106 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4107 return -1;
4108
4109 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4110 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4111 inferior with a 64-bit GDBSERVER should look the same as debugging it
4112 with a 32-bit GDBSERVER, we need to convert it. */
4113 siginfo_fixup (&siginfo, inf_siginfo, 0);
4114
4115 if (offset + len > sizeof (siginfo))
4116 len = sizeof (siginfo) - offset;
4117
4118 if (readbuf != NULL)
4119 memcpy (readbuf, inf_siginfo + offset, len);
4120 else
4121 {
4122 memcpy (inf_siginfo + offset, writebuf, len);
4123
4124 /* Convert back to ptrace layout before flushing it out. */
4125 siginfo_fixup (&siginfo, inf_siginfo, 1);
4126
4127 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4128 return -1;
4129 }
4130
4131 return len;
4132 }
4133
4134 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4135 so we notice when children change state; as the handler for the
4136 sigsuspend in my_waitpid. */
4137
4138 static void
4139 sigchld_handler (int signo)
4140 {
4141 int old_errno = errno;
4142
4143 if (debug_threads)
4144 /* fprintf is not async-signal-safe, so call write directly. */
4145 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4146
4147 if (target_is_async_p ())
4148 async_file_mark (); /* trigger a linux_wait */
4149
4150 errno = old_errno;
4151 }
4152
4153 static int
4154 linux_supports_non_stop (void)
4155 {
4156 return 1;
4157 }
4158
4159 static int
4160 linux_async (int enable)
4161 {
4162 int previous = (linux_event_pipe[0] != -1);
4163
4164 if (debug_threads)
4165 fprintf (stderr, "linux_async (%d), previous=%d\n",
4166 enable, previous);
4167
4168 if (previous != enable)
4169 {
4170 sigset_t mask;
4171 sigemptyset (&mask);
4172 sigaddset (&mask, SIGCHLD);
4173
4174 sigprocmask (SIG_BLOCK, &mask, NULL);
4175
4176 if (enable)
4177 {
4178 if (pipe (linux_event_pipe) == -1)
4179 fatal ("creating event pipe failed.");
4180
4181 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4182 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4183
4184 /* Register the event loop handler. */
4185 add_file_handler (linux_event_pipe[0],
4186 handle_target_event, NULL);
4187
4188 /* Always trigger a linux_wait. */
4189 async_file_mark ();
4190 }
4191 else
4192 {
4193 delete_file_handler (linux_event_pipe[0]);
4194
4195 close (linux_event_pipe[0]);
4196 close (linux_event_pipe[1]);
4197 linux_event_pipe[0] = -1;
4198 linux_event_pipe[1] = -1;
4199 }
4200
4201 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4202 }
4203
4204 return previous;
4205 }
4206
4207 static int
4208 linux_start_non_stop (int nonstop)
4209 {
4210 /* Register or unregister from event-loop accordingly. */
4211 linux_async (nonstop);
4212 return 0;
4213 }
4214
4215 static int
4216 linux_supports_multi_process (void)
4217 {
4218 return 1;
4219 }
4220
4221
4222 /* Enumerate spufs IDs for process PID. */
4223 static int
4224 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4225 {
4226 int pos = 0;
4227 int written = 0;
4228 char path[128];
4229 DIR *dir;
4230 struct dirent *entry;
4231
4232 sprintf (path, "/proc/%ld/fd", pid);
4233 dir = opendir (path);
4234 if (!dir)
4235 return -1;
4236
4237 rewinddir (dir);
4238 while ((entry = readdir (dir)) != NULL)
4239 {
4240 struct stat st;
4241 struct statfs stfs;
4242 int fd;
4243
4244 fd = atoi (entry->d_name);
4245 if (!fd)
4246 continue;
4247
4248 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4249 if (stat (path, &st) != 0)
4250 continue;
4251 if (!S_ISDIR (st.st_mode))
4252 continue;
4253
4254 if (statfs (path, &stfs) != 0)
4255 continue;
4256 if (stfs.f_type != SPUFS_MAGIC)
4257 continue;
4258
4259 if (pos >= offset && pos + 4 <= offset + len)
4260 {
4261 *(unsigned int *)(buf + pos - offset) = fd;
4262 written += 4;
4263 }
4264 pos += 4;
4265 }
4266
4267 closedir (dir);
4268 return written;
4269 }
4270
4271 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4272 object type, using the /proc file system. */
4273 static int
4274 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4275 unsigned const char *writebuf,
4276 CORE_ADDR offset, int len)
4277 {
4278 long pid = lwpid_of (get_thread_lwp (current_inferior));
4279 char buf[128];
4280 int fd = 0;
4281 int ret = 0;
4282
4283 if (!writebuf && !readbuf)
4284 return -1;
4285
4286 if (!*annex)
4287 {
4288 if (!readbuf)
4289 return -1;
4290 else
4291 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4292 }
4293
4294 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4295 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4296 if (fd <= 0)
4297 return -1;
4298
4299 if (offset != 0
4300 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4301 {
4302 close (fd);
4303 return 0;
4304 }
4305
4306 if (writebuf)
4307 ret = write (fd, writebuf, (size_t) len);
4308 else
4309 ret = read (fd, readbuf, (size_t) len);
4310
4311 close (fd);
4312 return ret;
4313 }
4314
4315 static int
4316 linux_core_of_thread (ptid_t ptid)
4317 {
4318 char filename[sizeof ("/proc//task//stat")
4319 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4320 + 1];
4321 FILE *f;
4322 char *content = NULL;
4323 char *p;
4324 char *ts = 0;
4325 int content_read = 0;
4326 int i;
4327 int core;
4328
4329 sprintf (filename, "/proc/%d/task/%ld/stat",
4330 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4331 f = fopen (filename, "r");
4332 if (!f)
4333 return -1;
4334
4335 for (;;)
4336 {
4337 int n;
4338 content = realloc (content, content_read + 1024);
4339 n = fread (content + content_read, 1, 1024, f);
4340 content_read += n;
4341 if (n < 1024)
4342 {
4343 content[content_read] = '\0';
4344 break;
4345 }
4346 }
4347
4348 p = strchr (content, '(');
4349
4350 /* Skip ")". */
4351 if (p != NULL)
4352 p = strchr (p, ')');
4353 if (p != NULL)
4354 p++;
4355
4356 /* If the first field after program name has index 0, then core number is
4357 the field with index 36. There's no constant for that anywhere. */
4358 if (p != NULL)
4359 p = strtok_r (p, " ", &ts);
4360 for (i = 0; p != NULL && i != 36; ++i)
4361 p = strtok_r (NULL, " ", &ts);
4362
4363 if (p == NULL || sscanf (p, "%d", &core) == 0)
4364 core = -1;
4365
4366 free (content);
4367 fclose (f);
4368
4369 return core;
4370 }
4371
4372 static void
4373 linux_process_qsupported (const char *query)
4374 {
4375 if (the_low_target.process_qsupported != NULL)
4376 the_low_target.process_qsupported (query);
4377 }
4378
4379 static int
4380 linux_supports_tracepoints (void)
4381 {
4382 if (*the_low_target.supports_tracepoints == NULL)
4383 return 0;
4384
4385 return (*the_low_target.supports_tracepoints) ();
4386 }
4387
4388 static CORE_ADDR
4389 linux_read_pc (struct regcache *regcache)
4390 {
4391 if (the_low_target.get_pc == NULL)
4392 return 0;
4393
4394 return (*the_low_target.get_pc) (regcache);
4395 }
4396
4397 static void
4398 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4399 {
4400 gdb_assert (the_low_target.set_pc != NULL);
4401
4402 (*the_low_target.set_pc) (regcache, pc);
4403 }
4404
4405 static int
4406 linux_thread_stopped (struct thread_info *thread)
4407 {
4408 return get_thread_lwp (thread)->stopped;
4409 }
4410
4411 /* This exposes stop-all-threads functionality to other modules. */
4412
4413 static void
4414 linux_pause_all (int freeze)
4415 {
4416 stop_all_lwps (freeze, NULL);
4417 }
4418
4419 /* This exposes unstop-all-threads functionality to other gdbserver
4420 modules. */
4421
4422 static void
4423 linux_unpause_all (int unfreeze)
4424 {
4425 unstop_all_lwps (unfreeze, NULL);
4426 }
4427
4428 static struct target_ops linux_target_ops = {
4429 linux_create_inferior,
4430 linux_attach,
4431 linux_kill,
4432 linux_detach,
4433 linux_mourn,
4434 linux_join,
4435 linux_thread_alive,
4436 linux_resume,
4437 linux_wait,
4438 linux_fetch_registers,
4439 linux_store_registers,
4440 linux_read_memory,
4441 linux_write_memory,
4442 linux_look_up_symbols,
4443 linux_request_interrupt,
4444 linux_read_auxv,
4445 linux_insert_point,
4446 linux_remove_point,
4447 linux_stopped_by_watchpoint,
4448 linux_stopped_data_address,
4449 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4450 linux_read_offsets,
4451 #else
4452 NULL,
4453 #endif
4454 #ifdef USE_THREAD_DB
4455 thread_db_get_tls_address,
4456 #else
4457 NULL,
4458 #endif
4459 linux_qxfer_spu,
4460 hostio_last_error_from_errno,
4461 linux_qxfer_osdata,
4462 linux_xfer_siginfo,
4463 linux_supports_non_stop,
4464 linux_async,
4465 linux_start_non_stop,
4466 linux_supports_multi_process,
4467 #ifdef USE_THREAD_DB
4468 thread_db_handle_monitor_command,
4469 #else
4470 NULL,
4471 #endif
4472 linux_core_of_thread,
4473 linux_process_qsupported,
4474 linux_supports_tracepoints,
4475 linux_read_pc,
4476 linux_write_pc,
4477 linux_thread_stopped,
4478 NULL,
4479 linux_pause_all,
4480 linux_unpause_all,
4481 linux_cancel_breakpoints
4482 };
4483
4484 static void
4485 linux_init_signals ()
4486 {
4487 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4488 to find what the cancel signal actually is. */
4489 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4490 signal (__SIGRTMIN+1, SIG_IGN);
4491 #endif
4492 }
4493
4494 void
4495 initialize_low (void)
4496 {
4497 struct sigaction sigchld_action;
4498 memset (&sigchld_action, 0, sizeof (sigchld_action));
4499 set_target_ops (&linux_target_ops);
4500 set_breakpoint_data (the_low_target.breakpoint,
4501 the_low_target.breakpoint_len);
4502 linux_init_signals ();
4503 linux_test_for_tracefork ();
4504 #ifdef HAVE_LINUX_REGSETS
4505 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4506 ;
4507 disabled_regsets = xmalloc (num_regsets);
4508 #endif
4509
4510 sigchld_action.sa_handler = sigchld_handler;
4511 sigemptyset (&sigchld_action.sa_mask);
4512 sigchld_action.sa_flags = SA_RESTART;
4513 sigaction (SIGCHLD, &sigchld_action, NULL);
4514 }