Include string.h in common-defs.h
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 #endif
107
108 #ifndef HAVE_ELF32_AUXV_T
109 /* Copied from glibc's elf.h. */
110 typedef struct
111 {
112 uint32_t a_type; /* Entry type */
113 union
114 {
115 uint32_t a_val; /* Integer value */
116 /* We use to have pointer elements added here. We cannot do that,
117 though, since it does not work when using 32-bit definitions
118 on 64-bit platforms and vice versa. */
119 } a_un;
120 } Elf32_auxv_t;
121 #endif
122
123 #ifndef HAVE_ELF64_AUXV_T
124 /* Copied from glibc's elf.h. */
125 typedef struct
126 {
127 uint64_t a_type; /* Entry type */
128 union
129 {
130 uint64_t a_val; /* Integer value */
131 /* We use to have pointer elements added here. We cannot do that,
132 though, since it does not work when using 32-bit definitions
133 on 64-bit platforms and vice versa. */
134 } a_un;
135 } Elf64_auxv_t;
136 #endif
137
138 /* A list of all unknown processes which receive stop signals. Some
139 other process will presumably claim each of these as forked
140 children momentarily. */
141
142 struct simple_pid_list
143 {
144 /* The process ID. */
145 int pid;
146
147 /* The status as reported by waitpid. */
148 int status;
149
150 /* Next in chain. */
151 struct simple_pid_list *next;
152 };
153 struct simple_pid_list *stopped_pids;
154
155 /* Trivial list manipulation functions to keep track of a list of new
156 stopped processes. */
157
158 static void
159 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
160 {
161 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
162
163 new_pid->pid = pid;
164 new_pid->status = status;
165 new_pid->next = *listp;
166 *listp = new_pid;
167 }
168
169 static int
170 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
171 {
172 struct simple_pid_list **p;
173
174 for (p = listp; *p != NULL; p = &(*p)->next)
175 if ((*p)->pid == pid)
176 {
177 struct simple_pid_list *next = (*p)->next;
178
179 *statusp = (*p)->status;
180 xfree (*p);
181 *p = next;
182 return 1;
183 }
184 return 0;
185 }
186
187 enum stopping_threads_kind
188 {
189 /* Not stopping threads presently. */
190 NOT_STOPPING_THREADS,
191
192 /* Stopping threads. */
193 STOPPING_THREADS,
194
195 /* Stopping and suspending threads. */
196 STOPPING_AND_SUSPENDING_THREADS
197 };
198
199 /* This is set while stop_all_lwps is in effect. */
200 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
201
202 /* FIXME make into a target method? */
203 int using_threads = 1;
204
205 /* True if we're presently stabilizing threads (moving them out of
206 jump pads). */
207 static int stabilizing_threads;
208
209 static void linux_resume_one_lwp (struct lwp_info *lwp,
210 int step, int signal, siginfo_t *info);
211 static void linux_resume (struct thread_resume *resume_info, size_t n);
212 static void stop_all_lwps (int suspend, struct lwp_info *except);
213 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
214 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
215 int *wstat, int options);
216 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
217 static struct lwp_info *add_lwp (ptid_t ptid);
218 static int linux_stopped_by_watchpoint (void);
219 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
220 static void proceed_all_lwps (void);
221 static int finish_step_over (struct lwp_info *lwp);
222 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
223 static int kill_lwp (unsigned long lwpid, int signo);
224
225 /* True if the low target can hardware single-step. Such targets
226 don't need a BREAKPOINT_REINSERT_ADDR callback. */
227
228 static int
229 can_hardware_single_step (void)
230 {
231 return (the_low_target.breakpoint_reinsert_addr == NULL);
232 }
233
234 /* True if the low target supports memory breakpoints. If so, we'll
235 have a GET_PC implementation. */
236
237 static int
238 supports_breakpoints (void)
239 {
240 return (the_low_target.get_pc != NULL);
241 }
242
243 /* Returns true if this target can support fast tracepoints. This
244 does not mean that the in-process agent has been loaded in the
245 inferior. */
246
247 static int
248 supports_fast_tracepoints (void)
249 {
250 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
251 }
252
253 /* True if LWP is stopped in its stepping range. */
254
255 static int
256 lwp_in_step_range (struct lwp_info *lwp)
257 {
258 CORE_ADDR pc = lwp->stop_pc;
259
260 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
261 }
262
263 struct pending_signals
264 {
265 int signal;
266 siginfo_t info;
267 struct pending_signals *prev;
268 };
269
270 /* The read/write ends of the pipe registered as waitable file in the
271 event loop. */
272 static int linux_event_pipe[2] = { -1, -1 };
273
274 /* True if we're currently in async mode. */
275 #define target_is_async_p() (linux_event_pipe[0] != -1)
276
277 static void send_sigstop (struct lwp_info *lwp);
278 static void wait_for_sigstop (void);
279
280 /* Return non-zero if HEADER is a 64-bit ELF file. */
281
282 static int
283 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
284 {
285 if (header->e_ident[EI_MAG0] == ELFMAG0
286 && header->e_ident[EI_MAG1] == ELFMAG1
287 && header->e_ident[EI_MAG2] == ELFMAG2
288 && header->e_ident[EI_MAG3] == ELFMAG3)
289 {
290 *machine = header->e_machine;
291 return header->e_ident[EI_CLASS] == ELFCLASS64;
292
293 }
294 *machine = EM_NONE;
295 return -1;
296 }
297
298 /* Return non-zero if FILE is a 64-bit ELF file,
299 zero if the file is not a 64-bit ELF file,
300 and -1 if the file is not accessible or doesn't exist. */
301
302 static int
303 elf_64_file_p (const char *file, unsigned int *machine)
304 {
305 Elf64_Ehdr header;
306 int fd;
307
308 fd = open (file, O_RDONLY);
309 if (fd < 0)
310 return -1;
311
312 if (read (fd, &header, sizeof (header)) != sizeof (header))
313 {
314 close (fd);
315 return 0;
316 }
317 close (fd);
318
319 return elf_64_header_p (&header, machine);
320 }
321
322 /* Accepts an integer PID; Returns true if the executable PID is
323 running is a 64-bit ELF file.. */
324
325 int
326 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
327 {
328 char file[PATH_MAX];
329
330 sprintf (file, "/proc/%d/exe", pid);
331 return elf_64_file_p (file, machine);
332 }
333
334 static void
335 delete_lwp (struct lwp_info *lwp)
336 {
337 struct thread_info *thr = get_lwp_thread (lwp);
338
339 if (debug_threads)
340 debug_printf ("deleting %ld\n", lwpid_of (thr));
341
342 remove_thread (thr);
343 free (lwp->arch_private);
344 free (lwp);
345 }
346
347 /* Add a process to the common process list, and set its private
348 data. */
349
350 static struct process_info *
351 linux_add_process (int pid, int attached)
352 {
353 struct process_info *proc;
354
355 proc = add_process (pid, attached);
356 proc->private = xcalloc (1, sizeof (*proc->private));
357
358 /* Set the arch when the first LWP stops. */
359 proc->private->new_inferior = 1;
360
361 if (the_low_target.new_process != NULL)
362 proc->private->arch_private = the_low_target.new_process ();
363
364 return proc;
365 }
366
367 /* Handle a GNU/Linux extended wait response. If we see a clone
368 event, we need to add the new LWP to our list (and not report the
369 trap to higher layers). */
370
371 static void
372 handle_extended_wait (struct lwp_info *event_child, int wstat)
373 {
374 int event = wstat >> 16;
375 struct thread_info *event_thr = get_lwp_thread (event_child);
376 struct lwp_info *new_lwp;
377
378 if (event == PTRACE_EVENT_CLONE)
379 {
380 ptid_t ptid;
381 unsigned long new_pid;
382 int ret, status;
383
384 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
385 &new_pid);
386
387 /* If we haven't already seen the new PID stop, wait for it now. */
388 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
389 {
390 /* The new child has a pending SIGSTOP. We can't affect it until it
391 hits the SIGSTOP, but we're already attached. */
392
393 ret = my_waitpid (new_pid, &status, __WALL);
394
395 if (ret == -1)
396 perror_with_name ("waiting for new child");
397 else if (ret != new_pid)
398 warning ("wait returned unexpected PID %d", ret);
399 else if (!WIFSTOPPED (status))
400 warning ("wait returned unexpected status 0x%x", status);
401 }
402
403 if (debug_threads)
404 debug_printf ("HEW: Got clone event "
405 "from LWP %ld, new child is LWP %ld\n",
406 lwpid_of (event_thr), new_pid);
407
408 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
409 new_lwp = add_lwp (ptid);
410
411 /* Either we're going to immediately resume the new thread
412 or leave it stopped. linux_resume_one_lwp is a nop if it
413 thinks the thread is currently running, so set this first
414 before calling linux_resume_one_lwp. */
415 new_lwp->stopped = 1;
416
417 /* If we're suspending all threads, leave this one suspended
418 too. */
419 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
420 new_lwp->suspended = 1;
421
422 /* Normally we will get the pending SIGSTOP. But in some cases
423 we might get another signal delivered to the group first.
424 If we do get another signal, be sure not to lose it. */
425 if (WSTOPSIG (status) == SIGSTOP)
426 {
427 if (stopping_threads != NOT_STOPPING_THREADS)
428 new_lwp->stop_pc = get_stop_pc (new_lwp);
429 else
430 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
431 }
432 else
433 {
434 new_lwp->stop_expected = 1;
435
436 if (stopping_threads != NOT_STOPPING_THREADS)
437 {
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 new_lwp->status_pending_p = 1;
440 new_lwp->status_pending = status;
441 }
442 else
443 /* Pass the signal on. This is what GDB does - except
444 shouldn't we really report it instead? */
445 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
446 }
447
448 /* Always resume the current thread. If we are stopping
449 threads, it will have a pending SIGSTOP; we may as well
450 collect it now. */
451 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
452 }
453 }
454
455 /* Return the PC as read from the regcache of LWP, without any
456 adjustment. */
457
458 static CORE_ADDR
459 get_pc (struct lwp_info *lwp)
460 {
461 struct thread_info *saved_inferior;
462 struct regcache *regcache;
463 CORE_ADDR pc;
464
465 if (the_low_target.get_pc == NULL)
466 return 0;
467
468 saved_inferior = current_inferior;
469 current_inferior = get_lwp_thread (lwp);
470
471 regcache = get_thread_regcache (current_inferior, 1);
472 pc = (*the_low_target.get_pc) (regcache);
473
474 if (debug_threads)
475 debug_printf ("pc is 0x%lx\n", (long) pc);
476
477 current_inferior = saved_inferior;
478 return pc;
479 }
480
481 /* This function should only be called if LWP got a SIGTRAP.
482 The SIGTRAP could mean several things.
483
484 On i386, where decr_pc_after_break is non-zero:
485 If we were single-stepping this process using PTRACE_SINGLESTEP,
486 we will get only the one SIGTRAP (even if the instruction we
487 stepped over was a breakpoint). The value of $eip will be the
488 next instruction.
489 If we continue the process using PTRACE_CONT, we will get a
490 SIGTRAP when we hit a breakpoint. The value of $eip will be
491 the instruction after the breakpoint (i.e. needs to be
492 decremented). If we report the SIGTRAP to GDB, we must also
493 report the undecremented PC. If we cancel the SIGTRAP, we
494 must resume at the decremented PC.
495
496 (Presumably, not yet tested) On a non-decr_pc_after_break machine
497 with hardware or kernel single-step:
498 If we single-step over a breakpoint instruction, our PC will
499 point at the following instruction. If we continue and hit a
500 breakpoint instruction, our PC will point at the breakpoint
501 instruction. */
502
503 static CORE_ADDR
504 get_stop_pc (struct lwp_info *lwp)
505 {
506 CORE_ADDR stop_pc;
507
508 if (the_low_target.get_pc == NULL)
509 return 0;
510
511 stop_pc = get_pc (lwp);
512
513 if (WSTOPSIG (lwp->last_status) == SIGTRAP
514 && !lwp->stepping
515 && !lwp->stopped_by_watchpoint
516 && lwp->last_status >> 16 == 0)
517 stop_pc -= the_low_target.decr_pc_after_break;
518
519 if (debug_threads)
520 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
521
522 return stop_pc;
523 }
524
525 static struct lwp_info *
526 add_lwp (ptid_t ptid)
527 {
528 struct lwp_info *lwp;
529
530 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
531 memset (lwp, 0, sizeof (*lwp));
532
533 if (the_low_target.new_thread != NULL)
534 lwp->arch_private = the_low_target.new_thread ();
535
536 lwp->thread = add_thread (ptid, lwp);
537
538 return lwp;
539 }
540
541 /* Start an inferior process and returns its pid.
542 ALLARGS is a vector of program-name and args. */
543
544 static int
545 linux_create_inferior (char *program, char **allargs)
546 {
547 #ifdef HAVE_PERSONALITY
548 int personality_orig = 0, personality_set = 0;
549 #endif
550 struct lwp_info *new_lwp;
551 int pid;
552 ptid_t ptid;
553
554 #ifdef HAVE_PERSONALITY
555 if (disable_randomization)
556 {
557 errno = 0;
558 personality_orig = personality (0xffffffff);
559 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
560 {
561 personality_set = 1;
562 personality (personality_orig | ADDR_NO_RANDOMIZE);
563 }
564 if (errno != 0 || (personality_set
565 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
566 warning ("Error disabling address space randomization: %s",
567 strerror (errno));
568 }
569 #endif
570
571 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
572 pid = vfork ();
573 #else
574 pid = fork ();
575 #endif
576 if (pid < 0)
577 perror_with_name ("fork");
578
579 if (pid == 0)
580 {
581 close_most_fds ();
582 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
583
584 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
585 signal (__SIGRTMIN + 1, SIG_DFL);
586 #endif
587
588 setpgid (0, 0);
589
590 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
591 stdout to stderr so that inferior i/o doesn't corrupt the connection.
592 Also, redirect stdin to /dev/null. */
593 if (remote_connection_is_stdio ())
594 {
595 close (0);
596 open ("/dev/null", O_RDONLY);
597 dup2 (2, 1);
598 if (write (2, "stdin/stdout redirected\n",
599 sizeof ("stdin/stdout redirected\n") - 1) < 0)
600 {
601 /* Errors ignored. */;
602 }
603 }
604
605 execv (program, allargs);
606 if (errno == ENOENT)
607 execvp (program, allargs);
608
609 fprintf (stderr, "Cannot exec %s: %s.\n", program,
610 strerror (errno));
611 fflush (stderr);
612 _exit (0177);
613 }
614
615 #ifdef HAVE_PERSONALITY
616 if (personality_set)
617 {
618 errno = 0;
619 personality (personality_orig);
620 if (errno != 0)
621 warning ("Error restoring address space randomization: %s",
622 strerror (errno));
623 }
624 #endif
625
626 linux_add_process (pid, 0);
627
628 ptid = ptid_build (pid, pid, 0);
629 new_lwp = add_lwp (ptid);
630 new_lwp->must_set_ptrace_flags = 1;
631
632 return pid;
633 }
634
635 char *
636 linux_attach_fail_reason_string (ptid_t ptid, int err)
637 {
638 static char *reason_string;
639 struct buffer buffer;
640 char *warnings;
641 long lwpid = ptid_get_lwp (ptid);
642
643 xfree (reason_string);
644
645 buffer_init (&buffer);
646 linux_ptrace_attach_fail_reason (lwpid, &buffer);
647 buffer_grow_str0 (&buffer, "");
648 warnings = buffer_finish (&buffer);
649 if (warnings[0] != '\0')
650 reason_string = xstrprintf ("%s (%d), %s",
651 strerror (err), err, warnings);
652 else
653 reason_string = xstrprintf ("%s (%d)",
654 strerror (err), err);
655 xfree (warnings);
656 return reason_string;
657 }
658
659 /* Attach to an inferior process. */
660
661 int
662 linux_attach_lwp (ptid_t ptid)
663 {
664 struct lwp_info *new_lwp;
665 int lwpid = ptid_get_lwp (ptid);
666
667 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
668 != 0)
669 return errno;
670
671 new_lwp = add_lwp (ptid);
672
673 /* We need to wait for SIGSTOP before being able to make the next
674 ptrace call on this LWP. */
675 new_lwp->must_set_ptrace_flags = 1;
676
677 if (linux_proc_pid_is_stopped (lwpid))
678 {
679 if (debug_threads)
680 debug_printf ("Attached to a stopped process\n");
681
682 /* The process is definitely stopped. It is in a job control
683 stop, unless the kernel predates the TASK_STOPPED /
684 TASK_TRACED distinction, in which case it might be in a
685 ptrace stop. Make sure it is in a ptrace stop; from there we
686 can kill it, signal it, et cetera.
687
688 First make sure there is a pending SIGSTOP. Since we are
689 already attached, the process can not transition from stopped
690 to running without a PTRACE_CONT; so we know this signal will
691 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
692 probably already in the queue (unless this kernel is old
693 enough to use TASK_STOPPED for ptrace stops); but since
694 SIGSTOP is not an RT signal, it can only be queued once. */
695 kill_lwp (lwpid, SIGSTOP);
696
697 /* Finally, resume the stopped process. This will deliver the
698 SIGSTOP (or a higher priority signal, just like normal
699 PTRACE_ATTACH), which we'll catch later on. */
700 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
701 }
702
703 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
704 brings it to a halt.
705
706 There are several cases to consider here:
707
708 1) gdbserver has already attached to the process and is being notified
709 of a new thread that is being created.
710 In this case we should ignore that SIGSTOP and resume the
711 process. This is handled below by setting stop_expected = 1,
712 and the fact that add_thread sets last_resume_kind ==
713 resume_continue.
714
715 2) This is the first thread (the process thread), and we're attaching
716 to it via attach_inferior.
717 In this case we want the process thread to stop.
718 This is handled by having linux_attach set last_resume_kind ==
719 resume_stop after we return.
720
721 If the pid we are attaching to is also the tgid, we attach to and
722 stop all the existing threads. Otherwise, we attach to pid and
723 ignore any other threads in the same group as this pid.
724
725 3) GDB is connecting to gdbserver and is requesting an enumeration of all
726 existing threads.
727 In this case we want the thread to stop.
728 FIXME: This case is currently not properly handled.
729 We should wait for the SIGSTOP but don't. Things work apparently
730 because enough time passes between when we ptrace (ATTACH) and when
731 gdb makes the next ptrace call on the thread.
732
733 On the other hand, if we are currently trying to stop all threads, we
734 should treat the new thread as if we had sent it a SIGSTOP. This works
735 because we are guaranteed that the add_lwp call above added us to the
736 end of the list, and so the new thread has not yet reached
737 wait_for_sigstop (but will). */
738 new_lwp->stop_expected = 1;
739
740 return 0;
741 }
742
743 /* Attach to PID. If PID is the tgid, attach to it and all
744 of its threads. */
745
746 static int
747 linux_attach (unsigned long pid)
748 {
749 ptid_t ptid = ptid_build (pid, pid, 0);
750 int err;
751
752 /* Attach to PID. We will check for other threads
753 soon. */
754 err = linux_attach_lwp (ptid);
755 if (err != 0)
756 error ("Cannot attach to process %ld: %s",
757 pid, linux_attach_fail_reason_string (ptid, err));
758
759 linux_add_process (pid, 1);
760
761 if (!non_stop)
762 {
763 struct thread_info *thread;
764
765 /* Don't ignore the initial SIGSTOP if we just attached to this
766 process. It will be collected by wait shortly. */
767 thread = find_thread_ptid (ptid_build (pid, pid, 0));
768 thread->last_resume_kind = resume_stop;
769 }
770
771 if (linux_proc_get_tgid (pid) == pid)
772 {
773 DIR *dir;
774 char pathname[128];
775
776 sprintf (pathname, "/proc/%ld/task", pid);
777
778 dir = opendir (pathname);
779
780 if (!dir)
781 {
782 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
783 fflush (stderr);
784 }
785 else
786 {
787 /* At this point we attached to the tgid. Scan the task for
788 existing threads. */
789 int new_threads_found;
790 int iterations = 0;
791
792 while (iterations < 2)
793 {
794 struct dirent *dp;
795
796 new_threads_found = 0;
797 /* Add all the other threads. While we go through the
798 threads, new threads may be spawned. Cycle through
799 the list of threads until we have done two iterations without
800 finding new threads. */
801 while ((dp = readdir (dir)) != NULL)
802 {
803 unsigned long lwp;
804 ptid_t ptid;
805
806 /* Fetch one lwp. */
807 lwp = strtoul (dp->d_name, NULL, 10);
808
809 ptid = ptid_build (pid, lwp, 0);
810
811 /* Is this a new thread? */
812 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
813 {
814 int err;
815
816 if (debug_threads)
817 debug_printf ("Found new lwp %ld\n", lwp);
818
819 err = linux_attach_lwp (ptid);
820 if (err != 0)
821 warning ("Cannot attach to lwp %ld: %s",
822 lwp,
823 linux_attach_fail_reason_string (ptid, err));
824
825 new_threads_found++;
826 }
827 }
828
829 if (!new_threads_found)
830 iterations++;
831 else
832 iterations = 0;
833
834 rewinddir (dir);
835 }
836 closedir (dir);
837 }
838 }
839
840 return 0;
841 }
842
843 struct counter
844 {
845 int pid;
846 int count;
847 };
848
849 static int
850 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
851 {
852 struct counter *counter = args;
853
854 if (ptid_get_pid (entry->id) == counter->pid)
855 {
856 if (++counter->count > 1)
857 return 1;
858 }
859
860 return 0;
861 }
862
863 static int
864 last_thread_of_process_p (int pid)
865 {
866 struct counter counter = { pid , 0 };
867
868 return (find_inferior (&all_threads,
869 second_thread_of_pid_p, &counter) == NULL);
870 }
871
872 /* Kill LWP. */
873
874 static void
875 linux_kill_one_lwp (struct lwp_info *lwp)
876 {
877 struct thread_info *thr = get_lwp_thread (lwp);
878 int pid = lwpid_of (thr);
879
880 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
881 there is no signal context, and ptrace(PTRACE_KILL) (or
882 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
883 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
884 alternative is to kill with SIGKILL. We only need one SIGKILL
885 per process, not one for each thread. But since we still support
886 linuxthreads, and we also support debugging programs using raw
887 clone without CLONE_THREAD, we send one for each thread. For
888 years, we used PTRACE_KILL only, so we're being a bit paranoid
889 about some old kernels where PTRACE_KILL might work better
890 (dubious if there are any such, but that's why it's paranoia), so
891 we try SIGKILL first, PTRACE_KILL second, and so we're fine
892 everywhere. */
893
894 errno = 0;
895 kill_lwp (pid, SIGKILL);
896 if (debug_threads)
897 {
898 int save_errno = errno;
899
900 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
901 target_pid_to_str (ptid_of (thr)),
902 save_errno ? strerror (save_errno) : "OK");
903 }
904
905 errno = 0;
906 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
907 if (debug_threads)
908 {
909 int save_errno = errno;
910
911 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
912 target_pid_to_str (ptid_of (thr)),
913 save_errno ? strerror (save_errno) : "OK");
914 }
915 }
916
917 /* Kill LWP and wait for it to die. */
918
919 static void
920 kill_wait_lwp (struct lwp_info *lwp)
921 {
922 struct thread_info *thr = get_lwp_thread (lwp);
923 int pid = ptid_get_pid (ptid_of (thr));
924 int lwpid = ptid_get_lwp (ptid_of (thr));
925 int wstat;
926 int res;
927
928 if (debug_threads)
929 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
930
931 do
932 {
933 linux_kill_one_lwp (lwp);
934
935 /* Make sure it died. Notes:
936
937 - The loop is most likely unnecessary.
938
939 - We don't use linux_wait_for_event as that could delete lwps
940 while we're iterating over them. We're not interested in
941 any pending status at this point, only in making sure all
942 wait status on the kernel side are collected until the
943 process is reaped.
944
945 - We don't use __WALL here as the __WALL emulation relies on
946 SIGCHLD, and killing a stopped process doesn't generate
947 one, nor an exit status.
948 */
949 res = my_waitpid (lwpid, &wstat, 0);
950 if (res == -1 && errno == ECHILD)
951 res = my_waitpid (lwpid, &wstat, __WCLONE);
952 } while (res > 0 && WIFSTOPPED (wstat));
953
954 gdb_assert (res > 0);
955 }
956
957 /* Callback for `find_inferior'. Kills an lwp of a given process,
958 except the leader. */
959
960 static int
961 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
962 {
963 struct thread_info *thread = (struct thread_info *) entry;
964 struct lwp_info *lwp = get_thread_lwp (thread);
965 int pid = * (int *) args;
966
967 if (ptid_get_pid (entry->id) != pid)
968 return 0;
969
970 /* We avoid killing the first thread here, because of a Linux kernel (at
971 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
972 the children get a chance to be reaped, it will remain a zombie
973 forever. */
974
975 if (lwpid_of (thread) == pid)
976 {
977 if (debug_threads)
978 debug_printf ("lkop: is last of process %s\n",
979 target_pid_to_str (entry->id));
980 return 0;
981 }
982
983 kill_wait_lwp (lwp);
984 return 0;
985 }
986
987 static int
988 linux_kill (int pid)
989 {
990 struct process_info *process;
991 struct lwp_info *lwp;
992
993 process = find_process_pid (pid);
994 if (process == NULL)
995 return -1;
996
997 /* If we're killing a running inferior, make sure it is stopped
998 first, as PTRACE_KILL will not work otherwise. */
999 stop_all_lwps (0, NULL);
1000
1001 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1002
1003 /* See the comment in linux_kill_one_lwp. We did not kill the first
1004 thread in the list, so do so now. */
1005 lwp = find_lwp_pid (pid_to_ptid (pid));
1006
1007 if (lwp == NULL)
1008 {
1009 if (debug_threads)
1010 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1011 pid);
1012 }
1013 else
1014 kill_wait_lwp (lwp);
1015
1016 the_target->mourn (process);
1017
1018 /* Since we presently can only stop all lwps of all processes, we
1019 need to unstop lwps of other processes. */
1020 unstop_all_lwps (0, NULL);
1021 return 0;
1022 }
1023
1024 /* Get pending signal of THREAD, for detaching purposes. This is the
1025 signal the thread last stopped for, which we need to deliver to the
1026 thread when detaching, otherwise, it'd be suppressed/lost. */
1027
1028 static int
1029 get_detach_signal (struct thread_info *thread)
1030 {
1031 enum gdb_signal signo = GDB_SIGNAL_0;
1032 int status;
1033 struct lwp_info *lp = get_thread_lwp (thread);
1034
1035 if (lp->status_pending_p)
1036 status = lp->status_pending;
1037 else
1038 {
1039 /* If the thread had been suspended by gdbserver, and it stopped
1040 cleanly, then it'll have stopped with SIGSTOP. But we don't
1041 want to deliver that SIGSTOP. */
1042 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1043 || thread->last_status.value.sig == GDB_SIGNAL_0)
1044 return 0;
1045
1046 /* Otherwise, we may need to deliver the signal we
1047 intercepted. */
1048 status = lp->last_status;
1049 }
1050
1051 if (!WIFSTOPPED (status))
1052 {
1053 if (debug_threads)
1054 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1055 target_pid_to_str (ptid_of (thread)));
1056 return 0;
1057 }
1058
1059 /* Extended wait statuses aren't real SIGTRAPs. */
1060 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1061 {
1062 if (debug_threads)
1063 debug_printf ("GPS: lwp %s had stopped with extended "
1064 "status: no pending signal\n",
1065 target_pid_to_str (ptid_of (thread)));
1066 return 0;
1067 }
1068
1069 signo = gdb_signal_from_host (WSTOPSIG (status));
1070
1071 if (program_signals_p && !program_signals[signo])
1072 {
1073 if (debug_threads)
1074 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1075 target_pid_to_str (ptid_of (thread)),
1076 gdb_signal_to_string (signo));
1077 return 0;
1078 }
1079 else if (!program_signals_p
1080 /* If we have no way to know which signals GDB does not
1081 want to have passed to the program, assume
1082 SIGTRAP/SIGINT, which is GDB's default. */
1083 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1084 {
1085 if (debug_threads)
1086 debug_printf ("GPS: lwp %s had signal %s, "
1087 "but we don't know if we should pass it. "
1088 "Default to not.\n",
1089 target_pid_to_str (ptid_of (thread)),
1090 gdb_signal_to_string (signo));
1091 return 0;
1092 }
1093 else
1094 {
1095 if (debug_threads)
1096 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1097 target_pid_to_str (ptid_of (thread)),
1098 gdb_signal_to_string (signo));
1099
1100 return WSTOPSIG (status);
1101 }
1102 }
1103
1104 static int
1105 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1106 {
1107 struct thread_info *thread = (struct thread_info *) entry;
1108 struct lwp_info *lwp = get_thread_lwp (thread);
1109 int pid = * (int *) args;
1110 int sig;
1111
1112 if (ptid_get_pid (entry->id) != pid)
1113 return 0;
1114
1115 /* If there is a pending SIGSTOP, get rid of it. */
1116 if (lwp->stop_expected)
1117 {
1118 if (debug_threads)
1119 debug_printf ("Sending SIGCONT to %s\n",
1120 target_pid_to_str (ptid_of (thread)));
1121
1122 kill_lwp (lwpid_of (thread), SIGCONT);
1123 lwp->stop_expected = 0;
1124 }
1125
1126 /* Flush any pending changes to the process's registers. */
1127 regcache_invalidate_thread (thread);
1128
1129 /* Pass on any pending signal for this thread. */
1130 sig = get_detach_signal (thread);
1131
1132 /* Finally, let it resume. */
1133 if (the_low_target.prepare_to_resume != NULL)
1134 the_low_target.prepare_to_resume (lwp);
1135 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1136 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1137 error (_("Can't detach %s: %s"),
1138 target_pid_to_str (ptid_of (thread)),
1139 strerror (errno));
1140
1141 delete_lwp (lwp);
1142 return 0;
1143 }
1144
1145 static int
1146 linux_detach (int pid)
1147 {
1148 struct process_info *process;
1149
1150 process = find_process_pid (pid);
1151 if (process == NULL)
1152 return -1;
1153
1154 /* Stop all threads before detaching. First, ptrace requires that
1155 the thread is stopped to sucessfully detach. Second, thread_db
1156 may need to uninstall thread event breakpoints from memory, which
1157 only works with a stopped process anyway. */
1158 stop_all_lwps (0, NULL);
1159
1160 #ifdef USE_THREAD_DB
1161 thread_db_detach (process);
1162 #endif
1163
1164 /* Stabilize threads (move out of jump pads). */
1165 stabilize_threads ();
1166
1167 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1168
1169 the_target->mourn (process);
1170
1171 /* Since we presently can only stop all lwps of all processes, we
1172 need to unstop lwps of other processes. */
1173 unstop_all_lwps (0, NULL);
1174 return 0;
1175 }
1176
1177 /* Remove all LWPs that belong to process PROC from the lwp list. */
1178
1179 static int
1180 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1181 {
1182 struct thread_info *thread = (struct thread_info *) entry;
1183 struct lwp_info *lwp = get_thread_lwp (thread);
1184 struct process_info *process = proc;
1185
1186 if (pid_of (thread) == pid_of (process))
1187 delete_lwp (lwp);
1188
1189 return 0;
1190 }
1191
1192 static void
1193 linux_mourn (struct process_info *process)
1194 {
1195 struct process_info_private *priv;
1196
1197 #ifdef USE_THREAD_DB
1198 thread_db_mourn (process);
1199 #endif
1200
1201 find_inferior (&all_threads, delete_lwp_callback, process);
1202
1203 /* Freeing all private data. */
1204 priv = process->private;
1205 free (priv->arch_private);
1206 free (priv);
1207 process->private = NULL;
1208
1209 remove_process (process);
1210 }
1211
1212 static void
1213 linux_join (int pid)
1214 {
1215 int status, ret;
1216
1217 do {
1218 ret = my_waitpid (pid, &status, 0);
1219 if (WIFEXITED (status) || WIFSIGNALED (status))
1220 break;
1221 } while (ret != -1 || errno != ECHILD);
1222 }
1223
1224 /* Return nonzero if the given thread is still alive. */
1225 static int
1226 linux_thread_alive (ptid_t ptid)
1227 {
1228 struct lwp_info *lwp = find_lwp_pid (ptid);
1229
1230 /* We assume we always know if a thread exits. If a whole process
1231 exited but we still haven't been able to report it to GDB, we'll
1232 hold on to the last lwp of the dead process. */
1233 if (lwp != NULL)
1234 return !lwp->dead;
1235 else
1236 return 0;
1237 }
1238
1239 /* Return 1 if this lwp has an interesting status pending. */
1240 static int
1241 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1242 {
1243 struct thread_info *thread = (struct thread_info *) entry;
1244 struct lwp_info *lwp = get_thread_lwp (thread);
1245 ptid_t ptid = * (ptid_t *) arg;
1246
1247 /* Check if we're only interested in events from a specific process
1248 or its lwps. */
1249 if (!ptid_equal (minus_one_ptid, ptid)
1250 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1251 return 0;
1252
1253 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1254 report any status pending the LWP may have. */
1255 if (thread->last_resume_kind == resume_stop
1256 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1257 return 0;
1258
1259 return lwp->status_pending_p;
1260 }
1261
1262 static int
1263 same_lwp (struct inferior_list_entry *entry, void *data)
1264 {
1265 ptid_t ptid = *(ptid_t *) data;
1266 int lwp;
1267
1268 if (ptid_get_lwp (ptid) != 0)
1269 lwp = ptid_get_lwp (ptid);
1270 else
1271 lwp = ptid_get_pid (ptid);
1272
1273 if (ptid_get_lwp (entry->id) == lwp)
1274 return 1;
1275
1276 return 0;
1277 }
1278
1279 struct lwp_info *
1280 find_lwp_pid (ptid_t ptid)
1281 {
1282 struct inferior_list_entry *thread
1283 = find_inferior (&all_threads, same_lwp, &ptid);
1284
1285 if (thread == NULL)
1286 return NULL;
1287
1288 return get_thread_lwp ((struct thread_info *) thread);
1289 }
1290
1291 /* Return the number of known LWPs in the tgid given by PID. */
1292
1293 static int
1294 num_lwps (int pid)
1295 {
1296 struct inferior_list_entry *inf, *tmp;
1297 int count = 0;
1298
1299 ALL_INFERIORS (&all_threads, inf, tmp)
1300 {
1301 if (ptid_get_pid (inf->id) == pid)
1302 count++;
1303 }
1304
1305 return count;
1306 }
1307
1308 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1309 their exits until all other threads in the group have exited. */
1310
1311 static void
1312 check_zombie_leaders (void)
1313 {
1314 struct process_info *proc, *tmp;
1315
1316 ALL_PROCESSES (proc, tmp)
1317 {
1318 pid_t leader_pid = pid_of (proc);
1319 struct lwp_info *leader_lp;
1320
1321 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1322
1323 if (debug_threads)
1324 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1325 "num_lwps=%d, zombie=%d\n",
1326 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1327 linux_proc_pid_is_zombie (leader_pid));
1328
1329 if (leader_lp != NULL
1330 /* Check if there are other threads in the group, as we may
1331 have raced with the inferior simply exiting. */
1332 && !last_thread_of_process_p (leader_pid)
1333 && linux_proc_pid_is_zombie (leader_pid))
1334 {
1335 /* A leader zombie can mean one of two things:
1336
1337 - It exited, and there's an exit status pending
1338 available, or only the leader exited (not the whole
1339 program). In the latter case, we can't waitpid the
1340 leader's exit status until all other threads are gone.
1341
1342 - There are 3 or more threads in the group, and a thread
1343 other than the leader exec'd. On an exec, the Linux
1344 kernel destroys all other threads (except the execing
1345 one) in the thread group, and resets the execing thread's
1346 tid to the tgid. No exit notification is sent for the
1347 execing thread -- from the ptracer's perspective, it
1348 appears as though the execing thread just vanishes.
1349 Until we reap all other threads except the leader and the
1350 execing thread, the leader will be zombie, and the
1351 execing thread will be in `D (disc sleep)'. As soon as
1352 all other threads are reaped, the execing thread changes
1353 it's tid to the tgid, and the previous (zombie) leader
1354 vanishes, giving place to the "new" leader. We could try
1355 distinguishing the exit and exec cases, by waiting once
1356 more, and seeing if something comes out, but it doesn't
1357 sound useful. The previous leader _does_ go away, and
1358 we'll re-add the new one once we see the exec event
1359 (which is just the same as what would happen if the
1360 previous leader did exit voluntarily before some other
1361 thread execs). */
1362
1363 if (debug_threads)
1364 fprintf (stderr,
1365 "CZL: Thread group leader %d zombie "
1366 "(it exited, or another thread execd).\n",
1367 leader_pid);
1368
1369 delete_lwp (leader_lp);
1370 }
1371 }
1372 }
1373
1374 /* Callback for `find_inferior'. Returns the first LWP that is not
1375 stopped. ARG is a PTID filter. */
1376
1377 static int
1378 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1379 {
1380 struct thread_info *thr = (struct thread_info *) entry;
1381 struct lwp_info *lwp;
1382 ptid_t filter = *(ptid_t *) arg;
1383
1384 if (!ptid_match (ptid_of (thr), filter))
1385 return 0;
1386
1387 lwp = get_thread_lwp (thr);
1388 if (!lwp->stopped)
1389 return 1;
1390
1391 return 0;
1392 }
1393
1394 /* This function should only be called if the LWP got a SIGTRAP.
1395
1396 Handle any tracepoint steps or hits. Return true if a tracepoint
1397 event was handled, 0 otherwise. */
1398
1399 static int
1400 handle_tracepoints (struct lwp_info *lwp)
1401 {
1402 struct thread_info *tinfo = get_lwp_thread (lwp);
1403 int tpoint_related_event = 0;
1404
1405 /* If this tracepoint hit causes a tracing stop, we'll immediately
1406 uninsert tracepoints. To do this, we temporarily pause all
1407 threads, unpatch away, and then unpause threads. We need to make
1408 sure the unpausing doesn't resume LWP too. */
1409 lwp->suspended++;
1410
1411 /* And we need to be sure that any all-threads-stopping doesn't try
1412 to move threads out of the jump pads, as it could deadlock the
1413 inferior (LWP could be in the jump pad, maybe even holding the
1414 lock.) */
1415
1416 /* Do any necessary step collect actions. */
1417 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1418
1419 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1420
1421 /* See if we just hit a tracepoint and do its main collect
1422 actions. */
1423 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1424
1425 lwp->suspended--;
1426
1427 gdb_assert (lwp->suspended == 0);
1428 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1429
1430 if (tpoint_related_event)
1431 {
1432 if (debug_threads)
1433 debug_printf ("got a tracepoint event\n");
1434 return 1;
1435 }
1436
1437 return 0;
1438 }
1439
1440 /* Convenience wrapper. Returns true if LWP is presently collecting a
1441 fast tracepoint. */
1442
1443 static int
1444 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1445 struct fast_tpoint_collect_status *status)
1446 {
1447 CORE_ADDR thread_area;
1448 struct thread_info *thread = get_lwp_thread (lwp);
1449
1450 if (the_low_target.get_thread_area == NULL)
1451 return 0;
1452
1453 /* Get the thread area address. This is used to recognize which
1454 thread is which when tracing with the in-process agent library.
1455 We don't read anything from the address, and treat it as opaque;
1456 it's the address itself that we assume is unique per-thread. */
1457 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1458 return 0;
1459
1460 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1461 }
1462
1463 /* The reason we resume in the caller, is because we want to be able
1464 to pass lwp->status_pending as WSTAT, and we need to clear
1465 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1466 refuses to resume. */
1467
1468 static int
1469 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1470 {
1471 struct thread_info *saved_inferior;
1472
1473 saved_inferior = current_inferior;
1474 current_inferior = get_lwp_thread (lwp);
1475
1476 if ((wstat == NULL
1477 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1478 && supports_fast_tracepoints ()
1479 && agent_loaded_p ())
1480 {
1481 struct fast_tpoint_collect_status status;
1482 int r;
1483
1484 if (debug_threads)
1485 debug_printf ("Checking whether LWP %ld needs to move out of the "
1486 "jump pad.\n",
1487 lwpid_of (current_inferior));
1488
1489 r = linux_fast_tracepoint_collecting (lwp, &status);
1490
1491 if (wstat == NULL
1492 || (WSTOPSIG (*wstat) != SIGILL
1493 && WSTOPSIG (*wstat) != SIGFPE
1494 && WSTOPSIG (*wstat) != SIGSEGV
1495 && WSTOPSIG (*wstat) != SIGBUS))
1496 {
1497 lwp->collecting_fast_tracepoint = r;
1498
1499 if (r != 0)
1500 {
1501 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1502 {
1503 /* Haven't executed the original instruction yet.
1504 Set breakpoint there, and wait till it's hit,
1505 then single-step until exiting the jump pad. */
1506 lwp->exit_jump_pad_bkpt
1507 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1508 }
1509
1510 if (debug_threads)
1511 debug_printf ("Checking whether LWP %ld needs to move out of "
1512 "the jump pad...it does\n",
1513 lwpid_of (current_inferior));
1514 current_inferior = saved_inferior;
1515
1516 return 1;
1517 }
1518 }
1519 else
1520 {
1521 /* If we get a synchronous signal while collecting, *and*
1522 while executing the (relocated) original instruction,
1523 reset the PC to point at the tpoint address, before
1524 reporting to GDB. Otherwise, it's an IPA lib bug: just
1525 report the signal to GDB, and pray for the best. */
1526
1527 lwp->collecting_fast_tracepoint = 0;
1528
1529 if (r != 0
1530 && (status.adjusted_insn_addr <= lwp->stop_pc
1531 && lwp->stop_pc < status.adjusted_insn_addr_end))
1532 {
1533 siginfo_t info;
1534 struct regcache *regcache;
1535
1536 /* The si_addr on a few signals references the address
1537 of the faulting instruction. Adjust that as
1538 well. */
1539 if ((WSTOPSIG (*wstat) == SIGILL
1540 || WSTOPSIG (*wstat) == SIGFPE
1541 || WSTOPSIG (*wstat) == SIGBUS
1542 || WSTOPSIG (*wstat) == SIGSEGV)
1543 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
1544 (PTRACE_TYPE_ARG3) 0, &info) == 0
1545 /* Final check just to make sure we don't clobber
1546 the siginfo of non-kernel-sent signals. */
1547 && (uintptr_t) info.si_addr == lwp->stop_pc)
1548 {
1549 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1550 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_inferior),
1551 (PTRACE_TYPE_ARG3) 0, &info);
1552 }
1553
1554 regcache = get_thread_regcache (current_inferior, 1);
1555 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1556 lwp->stop_pc = status.tpoint_addr;
1557
1558 /* Cancel any fast tracepoint lock this thread was
1559 holding. */
1560 force_unlock_trace_buffer ();
1561 }
1562
1563 if (lwp->exit_jump_pad_bkpt != NULL)
1564 {
1565 if (debug_threads)
1566 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1567 "stopping all threads momentarily.\n");
1568
1569 stop_all_lwps (1, lwp);
1570 cancel_breakpoints ();
1571
1572 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1573 lwp->exit_jump_pad_bkpt = NULL;
1574
1575 unstop_all_lwps (1, lwp);
1576
1577 gdb_assert (lwp->suspended >= 0);
1578 }
1579 }
1580 }
1581
1582 if (debug_threads)
1583 debug_printf ("Checking whether LWP %ld needs to move out of the "
1584 "jump pad...no\n",
1585 lwpid_of (current_inferior));
1586
1587 current_inferior = saved_inferior;
1588 return 0;
1589 }
1590
1591 /* Enqueue one signal in the "signals to report later when out of the
1592 jump pad" list. */
1593
1594 static void
1595 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1596 {
1597 struct pending_signals *p_sig;
1598 struct thread_info *thread = get_lwp_thread (lwp);
1599
1600 if (debug_threads)
1601 debug_printf ("Deferring signal %d for LWP %ld.\n",
1602 WSTOPSIG (*wstat), lwpid_of (thread));
1603
1604 if (debug_threads)
1605 {
1606 struct pending_signals *sig;
1607
1608 for (sig = lwp->pending_signals_to_report;
1609 sig != NULL;
1610 sig = sig->prev)
1611 debug_printf (" Already queued %d\n",
1612 sig->signal);
1613
1614 debug_printf (" (no more currently queued signals)\n");
1615 }
1616
1617 /* Don't enqueue non-RT signals if they are already in the deferred
1618 queue. (SIGSTOP being the easiest signal to see ending up here
1619 twice) */
1620 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1621 {
1622 struct pending_signals *sig;
1623
1624 for (sig = lwp->pending_signals_to_report;
1625 sig != NULL;
1626 sig = sig->prev)
1627 {
1628 if (sig->signal == WSTOPSIG (*wstat))
1629 {
1630 if (debug_threads)
1631 debug_printf ("Not requeuing already queued non-RT signal %d"
1632 " for LWP %ld\n",
1633 sig->signal,
1634 lwpid_of (thread));
1635 return;
1636 }
1637 }
1638 }
1639
1640 p_sig = xmalloc (sizeof (*p_sig));
1641 p_sig->prev = lwp->pending_signals_to_report;
1642 p_sig->signal = WSTOPSIG (*wstat);
1643 memset (&p_sig->info, 0, sizeof (siginfo_t));
1644 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1645 &p_sig->info);
1646
1647 lwp->pending_signals_to_report = p_sig;
1648 }
1649
1650 /* Dequeue one signal from the "signals to report later when out of
1651 the jump pad" list. */
1652
1653 static int
1654 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1655 {
1656 struct thread_info *thread = get_lwp_thread (lwp);
1657
1658 if (lwp->pending_signals_to_report != NULL)
1659 {
1660 struct pending_signals **p_sig;
1661
1662 p_sig = &lwp->pending_signals_to_report;
1663 while ((*p_sig)->prev != NULL)
1664 p_sig = &(*p_sig)->prev;
1665
1666 *wstat = W_STOPCODE ((*p_sig)->signal);
1667 if ((*p_sig)->info.si_signo != 0)
1668 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1669 &(*p_sig)->info);
1670 free (*p_sig);
1671 *p_sig = NULL;
1672
1673 if (debug_threads)
1674 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1675 WSTOPSIG (*wstat), lwpid_of (thread));
1676
1677 if (debug_threads)
1678 {
1679 struct pending_signals *sig;
1680
1681 for (sig = lwp->pending_signals_to_report;
1682 sig != NULL;
1683 sig = sig->prev)
1684 debug_printf (" Still queued %d\n",
1685 sig->signal);
1686
1687 debug_printf (" (no more queued signals)\n");
1688 }
1689
1690 return 1;
1691 }
1692
1693 return 0;
1694 }
1695
1696 /* Arrange for a breakpoint to be hit again later. We don't keep the
1697 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1698 will handle the current event, eventually we will resume this LWP,
1699 and this breakpoint will trap again. */
1700
1701 static int
1702 cancel_breakpoint (struct lwp_info *lwp)
1703 {
1704 struct thread_info *saved_inferior;
1705
1706 /* There's nothing to do if we don't support breakpoints. */
1707 if (!supports_breakpoints ())
1708 return 0;
1709
1710 /* breakpoint_at reads from current inferior. */
1711 saved_inferior = current_inferior;
1712 current_inferior = get_lwp_thread (lwp);
1713
1714 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1715 {
1716 if (debug_threads)
1717 debug_printf ("CB: Push back breakpoint for %s\n",
1718 target_pid_to_str (ptid_of (current_inferior)));
1719
1720 /* Back up the PC if necessary. */
1721 if (the_low_target.decr_pc_after_break)
1722 {
1723 struct regcache *regcache
1724 = get_thread_regcache (current_inferior, 1);
1725 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1726 }
1727
1728 current_inferior = saved_inferior;
1729 return 1;
1730 }
1731 else
1732 {
1733 if (debug_threads)
1734 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1735 paddress (lwp->stop_pc),
1736 target_pid_to_str (ptid_of (current_inferior)));
1737 }
1738
1739 current_inferior = saved_inferior;
1740 return 0;
1741 }
1742
1743 /* Do low-level handling of the event, and check if we should go on
1744 and pass it to caller code. Return the affected lwp if we are, or
1745 NULL otherwise. */
1746
1747 static struct lwp_info *
1748 linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1749 {
1750 struct lwp_info *child;
1751 struct thread_info *thread;
1752
1753 child = find_lwp_pid (pid_to_ptid (lwpid));
1754
1755 /* If we didn't find a process, one of two things presumably happened:
1756 - A process we started and then detached from has exited. Ignore it.
1757 - A process we are controlling has forked and the new child's stop
1758 was reported to us by the kernel. Save its PID. */
1759 if (child == NULL && WIFSTOPPED (wstat))
1760 {
1761 add_to_pid_list (&stopped_pids, lwpid, wstat);
1762 return NULL;
1763 }
1764 else if (child == NULL)
1765 return NULL;
1766
1767 thread = get_lwp_thread (child);
1768
1769 child->stopped = 1;
1770
1771 child->last_status = wstat;
1772
1773 if (WIFSTOPPED (wstat))
1774 {
1775 struct process_info *proc;
1776
1777 /* Architecture-specific setup after inferior is running. This
1778 needs to happen after we have attached to the inferior and it
1779 is stopped for the first time, but before we access any
1780 inferior registers. */
1781 proc = find_process_pid (pid_of (thread));
1782 if (proc->private->new_inferior)
1783 {
1784 struct thread_info *saved_inferior;
1785
1786 saved_inferior = current_inferior;
1787 current_inferior = thread;
1788
1789 the_low_target.arch_setup ();
1790
1791 current_inferior = saved_inferior;
1792
1793 proc->private->new_inferior = 0;
1794 }
1795 }
1796
1797 /* Store the STOP_PC, with adjustment applied. This depends on the
1798 architecture being defined already (so that CHILD has a valid
1799 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1800 not). */
1801 if (WIFSTOPPED (wstat))
1802 {
1803 if (debug_threads
1804 && the_low_target.get_pc != NULL)
1805 {
1806 struct thread_info *saved_inferior;
1807 struct regcache *regcache;
1808 CORE_ADDR pc;
1809
1810 saved_inferior = current_inferior;
1811 current_inferior = thread;
1812 regcache = get_thread_regcache (current_inferior, 1);
1813 pc = (*the_low_target.get_pc) (regcache);
1814 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
1815 current_inferior = saved_inferior;
1816 }
1817
1818 child->stop_pc = get_stop_pc (child);
1819 }
1820
1821 /* Fetch the possibly triggered data watchpoint info and store it in
1822 CHILD.
1823
1824 On some archs, like x86, that use debug registers to set
1825 watchpoints, it's possible that the way to know which watched
1826 address trapped, is to check the register that is used to select
1827 which address to watch. Problem is, between setting the
1828 watchpoint and reading back which data address trapped, the user
1829 may change the set of watchpoints, and, as a consequence, GDB
1830 changes the debug registers in the inferior. To avoid reading
1831 back a stale stopped-data-address when that happens, we cache in
1832 LP the fact that a watchpoint trapped, and the corresponding data
1833 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1834 changes the debug registers meanwhile, we have the cached data we
1835 can rely on. */
1836
1837 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1838 {
1839 if (the_low_target.stopped_by_watchpoint == NULL)
1840 {
1841 child->stopped_by_watchpoint = 0;
1842 }
1843 else
1844 {
1845 struct thread_info *saved_inferior;
1846
1847 saved_inferior = current_inferior;
1848 current_inferior = thread;
1849
1850 child->stopped_by_watchpoint
1851 = the_low_target.stopped_by_watchpoint ();
1852
1853 if (child->stopped_by_watchpoint)
1854 {
1855 if (the_low_target.stopped_data_address != NULL)
1856 child->stopped_data_address
1857 = the_low_target.stopped_data_address ();
1858 else
1859 child->stopped_data_address = 0;
1860 }
1861
1862 current_inferior = saved_inferior;
1863 }
1864 }
1865
1866 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1867 {
1868 linux_enable_event_reporting (lwpid);
1869 child->must_set_ptrace_flags = 0;
1870 }
1871
1872 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1873 && wstat >> 16 != 0)
1874 {
1875 handle_extended_wait (child, wstat);
1876 return NULL;
1877 }
1878
1879 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1880 && child->stop_expected)
1881 {
1882 if (debug_threads)
1883 debug_printf ("Expected stop.\n");
1884 child->stop_expected = 0;
1885
1886 if (thread->last_resume_kind == resume_stop)
1887 {
1888 /* We want to report the stop to the core. Treat the
1889 SIGSTOP as a normal event. */
1890 }
1891 else if (stopping_threads != NOT_STOPPING_THREADS)
1892 {
1893 /* Stopping threads. We don't want this SIGSTOP to end up
1894 pending in the FILTER_PTID handling below. */
1895 return NULL;
1896 }
1897 else
1898 {
1899 /* Filter out the event. */
1900 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1901 return NULL;
1902 }
1903 }
1904
1905 /* Check if the thread has exited. */
1906 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1907 && num_lwps (pid_of (thread)) > 1)
1908 {
1909 if (debug_threads)
1910 debug_printf ("LLW: %d exited.\n", lwpid);
1911
1912 /* If there is at least one more LWP, then the exit signal
1913 was not the end of the debugged application and should be
1914 ignored. */
1915 delete_lwp (child);
1916 return NULL;
1917 }
1918
1919 if (!ptid_match (ptid_of (thread), filter_ptid))
1920 {
1921 if (debug_threads)
1922 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1923 lwpid, wstat);
1924
1925 if (WIFSTOPPED (wstat))
1926 {
1927 child->status_pending_p = 1;
1928 child->status_pending = wstat;
1929
1930 if (WSTOPSIG (wstat) != SIGSTOP)
1931 {
1932 /* Cancel breakpoint hits. The breakpoint may be
1933 removed before we fetch events from this process to
1934 report to the core. It is best not to assume the
1935 moribund breakpoints heuristic always handles these
1936 cases --- it could be too many events go through to
1937 the core before this one is handled. All-stop always
1938 cancels breakpoint hits in all threads. */
1939 if (non_stop
1940 && WSTOPSIG (wstat) == SIGTRAP
1941 && cancel_breakpoint (child))
1942 {
1943 /* Throw away the SIGTRAP. */
1944 child->status_pending_p = 0;
1945
1946 if (debug_threads)
1947 debug_printf ("LLW: LWP %d hit a breakpoint while"
1948 " waiting for another process;"
1949 " cancelled it\n", lwpid);
1950 }
1951 }
1952 }
1953 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1954 {
1955 if (debug_threads)
1956 debug_printf ("LLWE: process %d exited while fetching "
1957 "event from another LWP\n", lwpid);
1958
1959 /* This was the last lwp in the process. Since events are
1960 serialized to GDB core, and we can't report this one
1961 right now, but GDB core and the other target layers will
1962 want to be notified about the exit code/signal, leave the
1963 status pending for the next time we're able to report
1964 it. */
1965 mark_lwp_dead (child, wstat);
1966 }
1967
1968 return NULL;
1969 }
1970
1971 return child;
1972 }
1973
1974 /* When the event-loop is doing a step-over, this points at the thread
1975 being stepped. */
1976 ptid_t step_over_bkpt;
1977
1978 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1979 match FILTER_PTID (leaving others pending). The PTIDs can be:
1980 minus_one_ptid, to specify any child; a pid PTID, specifying all
1981 lwps of a thread group; or a PTID representing a single lwp. Store
1982 the stop status through the status pointer WSTAT. OPTIONS is
1983 passed to the waitpid call. Return 0 if no event was found and
1984 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1985 was found. Return the PID of the stopped child otherwise. */
1986
1987 static int
1988 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1989 int *wstatp, int options)
1990 {
1991 struct thread_info *event_thread;
1992 struct lwp_info *event_child, *requested_child;
1993 sigset_t block_mask, prev_mask;
1994
1995 retry:
1996 /* N.B. event_thread points to the thread_info struct that contains
1997 event_child. Keep them in sync. */
1998 event_thread = NULL;
1999 event_child = NULL;
2000 requested_child = NULL;
2001
2002 /* Check for a lwp with a pending status. */
2003
2004 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2005 {
2006 event_thread = (struct thread_info *)
2007 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2008 if (event_thread != NULL)
2009 event_child = get_thread_lwp (event_thread);
2010 if (debug_threads && event_thread)
2011 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2012 }
2013 else if (!ptid_equal (filter_ptid, null_ptid))
2014 {
2015 requested_child = find_lwp_pid (filter_ptid);
2016
2017 if (stopping_threads == NOT_STOPPING_THREADS
2018 && requested_child->status_pending_p
2019 && requested_child->collecting_fast_tracepoint)
2020 {
2021 enqueue_one_deferred_signal (requested_child,
2022 &requested_child->status_pending);
2023 requested_child->status_pending_p = 0;
2024 requested_child->status_pending = 0;
2025 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2026 }
2027
2028 if (requested_child->suspended
2029 && requested_child->status_pending_p)
2030 fatal ("requesting an event out of a suspended child?");
2031
2032 if (requested_child->status_pending_p)
2033 {
2034 event_child = requested_child;
2035 event_thread = get_lwp_thread (event_child);
2036 }
2037 }
2038
2039 if (event_child != NULL)
2040 {
2041 if (debug_threads)
2042 debug_printf ("Got an event from pending child %ld (%04x)\n",
2043 lwpid_of (event_thread), event_child->status_pending);
2044 *wstatp = event_child->status_pending;
2045 event_child->status_pending_p = 0;
2046 event_child->status_pending = 0;
2047 current_inferior = event_thread;
2048 return lwpid_of (event_thread);
2049 }
2050
2051 /* But if we don't find a pending event, we'll have to wait.
2052
2053 We only enter this loop if no process has a pending wait status.
2054 Thus any action taken in response to a wait status inside this
2055 loop is responding as soon as we detect the status, not after any
2056 pending events. */
2057
2058 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2059 all signals while here. */
2060 sigfillset (&block_mask);
2061 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2062
2063 while (event_child == NULL)
2064 {
2065 pid_t ret = 0;
2066
2067 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2068 quirks:
2069
2070 - If the thread group leader exits while other threads in the
2071 thread group still exist, waitpid(TGID, ...) hangs. That
2072 waitpid won't return an exit status until the other threads
2073 in the group are reaped.
2074
2075 - When a non-leader thread execs, that thread just vanishes
2076 without reporting an exit (so we'd hang if we waited for it
2077 explicitly in that case). The exec event is reported to
2078 the TGID pid (although we don't currently enable exec
2079 events). */
2080 errno = 0;
2081 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2082
2083 if (debug_threads)
2084 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2085 ret, errno ? strerror (errno) : "ERRNO-OK");
2086
2087 if (ret > 0)
2088 {
2089 if (debug_threads)
2090 {
2091 debug_printf ("LLW: waitpid %ld received %s\n",
2092 (long) ret, status_to_str (*wstatp));
2093 }
2094
2095 event_child = linux_low_filter_event (filter_ptid,
2096 ret, *wstatp);
2097 if (event_child != NULL)
2098 {
2099 /* We got an event to report to the core. */
2100 event_thread = get_lwp_thread (event_child);
2101 break;
2102 }
2103
2104 /* Retry until nothing comes out of waitpid. A single
2105 SIGCHLD can indicate more than one child stopped. */
2106 continue;
2107 }
2108
2109 /* Check for zombie thread group leaders. Those can't be reaped
2110 until all other threads in the thread group are. */
2111 check_zombie_leaders ();
2112
2113 /* If there are no resumed children left in the set of LWPs we
2114 want to wait for, bail. We can't just block in
2115 waitpid/sigsuspend, because lwps might have been left stopped
2116 in trace-stop state, and we'd be stuck forever waiting for
2117 their status to change (which would only happen if we resumed
2118 them). Even if WNOHANG is set, this return code is preferred
2119 over 0 (below), as it is more detailed. */
2120 if ((find_inferior (&all_threads,
2121 not_stopped_callback,
2122 &wait_ptid) == NULL))
2123 {
2124 if (debug_threads)
2125 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2126 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2127 return -1;
2128 }
2129
2130 /* No interesting event to report to the caller. */
2131 if ((options & WNOHANG))
2132 {
2133 if (debug_threads)
2134 debug_printf ("WNOHANG set, no event found\n");
2135
2136 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2137 return 0;
2138 }
2139
2140 /* Block until we get an event reported with SIGCHLD. */
2141 if (debug_threads)
2142 debug_printf ("sigsuspend'ing\n");
2143
2144 sigsuspend (&prev_mask);
2145 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2146 goto retry;
2147 }
2148
2149 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2150
2151 current_inferior = event_thread;
2152
2153 /* Check for thread exit. */
2154 if (! WIFSTOPPED (*wstatp))
2155 {
2156 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2157
2158 if (debug_threads)
2159 debug_printf ("LWP %d is the last lwp of process. "
2160 "Process %ld exiting.\n",
2161 pid_of (event_thread), lwpid_of (event_thread));
2162 return lwpid_of (event_thread);
2163 }
2164
2165 return lwpid_of (event_thread);
2166 }
2167
2168 /* Wait for an event from child(ren) PTID. PTIDs can be:
2169 minus_one_ptid, to specify any child; a pid PTID, specifying all
2170 lwps of a thread group; or a PTID representing a single lwp. Store
2171 the stop status through the status pointer WSTAT. OPTIONS is
2172 passed to the waitpid call. Return 0 if no event was found and
2173 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2174 was found. Return the PID of the stopped child otherwise. */
2175
2176 static int
2177 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2178 {
2179 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2180 }
2181
2182 /* Count the LWP's that have had events. */
2183
2184 static int
2185 count_events_callback (struct inferior_list_entry *entry, void *data)
2186 {
2187 struct thread_info *thread = (struct thread_info *) entry;
2188 struct lwp_info *lp = get_thread_lwp (thread);
2189 int *count = data;
2190
2191 gdb_assert (count != NULL);
2192
2193 /* Count only resumed LWPs that have a SIGTRAP event pending that
2194 should be reported to GDB. */
2195 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2196 && thread->last_resume_kind != resume_stop
2197 && lp->status_pending_p
2198 && WIFSTOPPED (lp->status_pending)
2199 && WSTOPSIG (lp->status_pending) == SIGTRAP
2200 && !breakpoint_inserted_here (lp->stop_pc))
2201 (*count)++;
2202
2203 return 0;
2204 }
2205
2206 /* Select the LWP (if any) that is currently being single-stepped. */
2207
2208 static int
2209 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2210 {
2211 struct thread_info *thread = (struct thread_info *) entry;
2212 struct lwp_info *lp = get_thread_lwp (thread);
2213
2214 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2215 && thread->last_resume_kind == resume_step
2216 && lp->status_pending_p)
2217 return 1;
2218 else
2219 return 0;
2220 }
2221
2222 /* Select the Nth LWP that has had a SIGTRAP event that should be
2223 reported to GDB. */
2224
2225 static int
2226 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2227 {
2228 struct thread_info *thread = (struct thread_info *) entry;
2229 struct lwp_info *lp = get_thread_lwp (thread);
2230 int *selector = data;
2231
2232 gdb_assert (selector != NULL);
2233
2234 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2235 if (thread->last_resume_kind != resume_stop
2236 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2237 && lp->status_pending_p
2238 && WIFSTOPPED (lp->status_pending)
2239 && WSTOPSIG (lp->status_pending) == SIGTRAP
2240 && !breakpoint_inserted_here (lp->stop_pc))
2241 if ((*selector)-- == 0)
2242 return 1;
2243
2244 return 0;
2245 }
2246
2247 static int
2248 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2249 {
2250 struct thread_info *thread = (struct thread_info *) entry;
2251 struct lwp_info *lp = get_thread_lwp (thread);
2252 struct lwp_info *event_lp = data;
2253
2254 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2255 if (lp == event_lp)
2256 return 0;
2257
2258 /* If a LWP other than the LWP that we're reporting an event for has
2259 hit a GDB breakpoint (as opposed to some random trap signal),
2260 then just arrange for it to hit it again later. We don't keep
2261 the SIGTRAP status and don't forward the SIGTRAP signal to the
2262 LWP. We will handle the current event, eventually we will resume
2263 all LWPs, and this one will get its breakpoint trap again.
2264
2265 If we do not do this, then we run the risk that the user will
2266 delete or disable the breakpoint, but the LWP will have already
2267 tripped on it. */
2268
2269 if (thread->last_resume_kind != resume_stop
2270 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2271 && lp->status_pending_p
2272 && WIFSTOPPED (lp->status_pending)
2273 && WSTOPSIG (lp->status_pending) == SIGTRAP
2274 && !lp->stepping
2275 && !lp->stopped_by_watchpoint
2276 && cancel_breakpoint (lp))
2277 /* Throw away the SIGTRAP. */
2278 lp->status_pending_p = 0;
2279
2280 return 0;
2281 }
2282
2283 static void
2284 linux_cancel_breakpoints (void)
2285 {
2286 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
2287 }
2288
2289 /* Select one LWP out of those that have events pending. */
2290
2291 static void
2292 select_event_lwp (struct lwp_info **orig_lp)
2293 {
2294 int num_events = 0;
2295 int random_selector;
2296 struct thread_info *event_thread;
2297
2298 /* Give preference to any LWP that is being single-stepped. */
2299 event_thread
2300 = (struct thread_info *) find_inferior (&all_threads,
2301 select_singlestep_lwp_callback,
2302 NULL);
2303 if (event_thread != NULL)
2304 {
2305 if (debug_threads)
2306 debug_printf ("SEL: Select single-step %s\n",
2307 target_pid_to_str (ptid_of (event_thread)));
2308 }
2309 else
2310 {
2311 /* No single-stepping LWP. Select one at random, out of those
2312 which have had SIGTRAP events. */
2313
2314 /* First see how many SIGTRAP events we have. */
2315 find_inferior (&all_threads, count_events_callback, &num_events);
2316
2317 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2318 random_selector = (int)
2319 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2320
2321 if (debug_threads && num_events > 1)
2322 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2323 num_events, random_selector);
2324
2325 event_thread
2326 = (struct thread_info *) find_inferior (&all_threads,
2327 select_event_lwp_callback,
2328 &random_selector);
2329 }
2330
2331 if (event_thread != NULL)
2332 {
2333 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2334
2335 /* Switch the event LWP. */
2336 *orig_lp = event_lp;
2337 }
2338 }
2339
2340 /* Decrement the suspend count of an LWP. */
2341
2342 static int
2343 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2344 {
2345 struct thread_info *thread = (struct thread_info *) entry;
2346 struct lwp_info *lwp = get_thread_lwp (thread);
2347
2348 /* Ignore EXCEPT. */
2349 if (lwp == except)
2350 return 0;
2351
2352 lwp->suspended--;
2353
2354 gdb_assert (lwp->suspended >= 0);
2355 return 0;
2356 }
2357
2358 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2359 NULL. */
2360
2361 static void
2362 unsuspend_all_lwps (struct lwp_info *except)
2363 {
2364 find_inferior (&all_threads, unsuspend_one_lwp, except);
2365 }
2366
2367 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2368 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2369 void *data);
2370 static int lwp_running (struct inferior_list_entry *entry, void *data);
2371 static ptid_t linux_wait_1 (ptid_t ptid,
2372 struct target_waitstatus *ourstatus,
2373 int target_options);
2374
2375 /* Stabilize threads (move out of jump pads).
2376
2377 If a thread is midway collecting a fast tracepoint, we need to
2378 finish the collection and move it out of the jump pad before
2379 reporting the signal.
2380
2381 This avoids recursion while collecting (when a signal arrives
2382 midway, and the signal handler itself collects), which would trash
2383 the trace buffer. In case the user set a breakpoint in a signal
2384 handler, this avoids the backtrace showing the jump pad, etc..
2385 Most importantly, there are certain things we can't do safely if
2386 threads are stopped in a jump pad (or in its callee's). For
2387 example:
2388
2389 - starting a new trace run. A thread still collecting the
2390 previous run, could trash the trace buffer when resumed. The trace
2391 buffer control structures would have been reset but the thread had
2392 no way to tell. The thread could even midway memcpy'ing to the
2393 buffer, which would mean that when resumed, it would clobber the
2394 trace buffer that had been set for a new run.
2395
2396 - we can't rewrite/reuse the jump pads for new tracepoints
2397 safely. Say you do tstart while a thread is stopped midway while
2398 collecting. When the thread is later resumed, it finishes the
2399 collection, and returns to the jump pad, to execute the original
2400 instruction that was under the tracepoint jump at the time the
2401 older run had been started. If the jump pad had been rewritten
2402 since for something else in the new run, the thread would now
2403 execute the wrong / random instructions. */
2404
2405 static void
2406 linux_stabilize_threads (void)
2407 {
2408 struct thread_info *save_inferior;
2409 struct thread_info *thread_stuck;
2410
2411 thread_stuck
2412 = (struct thread_info *) find_inferior (&all_threads,
2413 stuck_in_jump_pad_callback,
2414 NULL);
2415 if (thread_stuck != NULL)
2416 {
2417 if (debug_threads)
2418 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2419 lwpid_of (thread_stuck));
2420 return;
2421 }
2422
2423 save_inferior = current_inferior;
2424
2425 stabilizing_threads = 1;
2426
2427 /* Kick 'em all. */
2428 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2429
2430 /* Loop until all are stopped out of the jump pads. */
2431 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2432 {
2433 struct target_waitstatus ourstatus;
2434 struct lwp_info *lwp;
2435 int wstat;
2436
2437 /* Note that we go through the full wait even loop. While
2438 moving threads out of jump pad, we need to be able to step
2439 over internal breakpoints and such. */
2440 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2441
2442 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2443 {
2444 lwp = get_thread_lwp (current_inferior);
2445
2446 /* Lock it. */
2447 lwp->suspended++;
2448
2449 if (ourstatus.value.sig != GDB_SIGNAL_0
2450 || current_inferior->last_resume_kind == resume_stop)
2451 {
2452 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2453 enqueue_one_deferred_signal (lwp, &wstat);
2454 }
2455 }
2456 }
2457
2458 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2459
2460 stabilizing_threads = 0;
2461
2462 current_inferior = save_inferior;
2463
2464 if (debug_threads)
2465 {
2466 thread_stuck
2467 = (struct thread_info *) find_inferior (&all_threads,
2468 stuck_in_jump_pad_callback,
2469 NULL);
2470 if (thread_stuck != NULL)
2471 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2472 lwpid_of (thread_stuck));
2473 }
2474 }
2475
2476 /* Wait for process, returns status. */
2477
2478 static ptid_t
2479 linux_wait_1 (ptid_t ptid,
2480 struct target_waitstatus *ourstatus, int target_options)
2481 {
2482 int w;
2483 struct lwp_info *event_child;
2484 int options;
2485 int pid;
2486 int step_over_finished;
2487 int bp_explains_trap;
2488 int maybe_internal_trap;
2489 int report_to_gdb;
2490 int trace_event;
2491 int in_step_range;
2492
2493 if (debug_threads)
2494 {
2495 debug_enter ();
2496 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2497 }
2498
2499 /* Translate generic target options into linux options. */
2500 options = __WALL;
2501 if (target_options & TARGET_WNOHANG)
2502 options |= WNOHANG;
2503
2504 retry:
2505 bp_explains_trap = 0;
2506 trace_event = 0;
2507 in_step_range = 0;
2508 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2509
2510 /* If we were only supposed to resume one thread, only wait for
2511 that thread - if it's still alive. If it died, however - which
2512 can happen if we're coming from the thread death case below -
2513 then we need to make sure we restart the other threads. We could
2514 pick a thread at random or restart all; restarting all is less
2515 arbitrary. */
2516 if (!non_stop
2517 && !ptid_equal (cont_thread, null_ptid)
2518 && !ptid_equal (cont_thread, minus_one_ptid))
2519 {
2520 struct thread_info *thread;
2521
2522 thread = (struct thread_info *) find_inferior_id (&all_threads,
2523 cont_thread);
2524
2525 /* No stepping, no signal - unless one is pending already, of course. */
2526 if (thread == NULL)
2527 {
2528 struct thread_resume resume_info;
2529 resume_info.thread = minus_one_ptid;
2530 resume_info.kind = resume_continue;
2531 resume_info.sig = 0;
2532 linux_resume (&resume_info, 1);
2533 }
2534 else
2535 ptid = cont_thread;
2536 }
2537
2538 if (ptid_equal (step_over_bkpt, null_ptid))
2539 pid = linux_wait_for_event (ptid, &w, options);
2540 else
2541 {
2542 if (debug_threads)
2543 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2544 target_pid_to_str (step_over_bkpt));
2545 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2546 }
2547
2548 if (pid == 0)
2549 {
2550 gdb_assert (target_options & TARGET_WNOHANG);
2551
2552 if (debug_threads)
2553 {
2554 debug_printf ("linux_wait_1 ret = null_ptid, "
2555 "TARGET_WAITKIND_IGNORE\n");
2556 debug_exit ();
2557 }
2558
2559 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2560 return null_ptid;
2561 }
2562 else if (pid == -1)
2563 {
2564 if (debug_threads)
2565 {
2566 debug_printf ("linux_wait_1 ret = null_ptid, "
2567 "TARGET_WAITKIND_NO_RESUMED\n");
2568 debug_exit ();
2569 }
2570
2571 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2572 return null_ptid;
2573 }
2574
2575 event_child = get_thread_lwp (current_inferior);
2576
2577 /* linux_wait_for_event only returns an exit status for the last
2578 child of a process. Report it. */
2579 if (WIFEXITED (w) || WIFSIGNALED (w))
2580 {
2581 if (WIFEXITED (w))
2582 {
2583 ourstatus->kind = TARGET_WAITKIND_EXITED;
2584 ourstatus->value.integer = WEXITSTATUS (w);
2585
2586 if (debug_threads)
2587 {
2588 debug_printf ("linux_wait_1 ret = %s, exited with "
2589 "retcode %d\n",
2590 target_pid_to_str (ptid_of (current_inferior)),
2591 WEXITSTATUS (w));
2592 debug_exit ();
2593 }
2594 }
2595 else
2596 {
2597 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2598 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2599
2600 if (debug_threads)
2601 {
2602 debug_printf ("linux_wait_1 ret = %s, terminated with "
2603 "signal %d\n",
2604 target_pid_to_str (ptid_of (current_inferior)),
2605 WTERMSIG (w));
2606 debug_exit ();
2607 }
2608 }
2609
2610 return ptid_of (current_inferior);
2611 }
2612
2613 /* If this event was not handled before, and is not a SIGTRAP, we
2614 report it. SIGILL and SIGSEGV are also treated as traps in case
2615 a breakpoint is inserted at the current PC. If this target does
2616 not support internal breakpoints at all, we also report the
2617 SIGTRAP without further processing; it's of no concern to us. */
2618 maybe_internal_trap
2619 = (supports_breakpoints ()
2620 && (WSTOPSIG (w) == SIGTRAP
2621 || ((WSTOPSIG (w) == SIGILL
2622 || WSTOPSIG (w) == SIGSEGV)
2623 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2624
2625 if (maybe_internal_trap)
2626 {
2627 /* Handle anything that requires bookkeeping before deciding to
2628 report the event or continue waiting. */
2629
2630 /* First check if we can explain the SIGTRAP with an internal
2631 breakpoint, or if we should possibly report the event to GDB.
2632 Do this before anything that may remove or insert a
2633 breakpoint. */
2634 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2635
2636 /* We have a SIGTRAP, possibly a step-over dance has just
2637 finished. If so, tweak the state machine accordingly,
2638 reinsert breakpoints and delete any reinsert (software
2639 single-step) breakpoints. */
2640 step_over_finished = finish_step_over (event_child);
2641
2642 /* Now invoke the callbacks of any internal breakpoints there. */
2643 check_breakpoints (event_child->stop_pc);
2644
2645 /* Handle tracepoint data collecting. This may overflow the
2646 trace buffer, and cause a tracing stop, removing
2647 breakpoints. */
2648 trace_event = handle_tracepoints (event_child);
2649
2650 if (bp_explains_trap)
2651 {
2652 /* If we stepped or ran into an internal breakpoint, we've
2653 already handled it. So next time we resume (from this
2654 PC), we should step over it. */
2655 if (debug_threads)
2656 debug_printf ("Hit a gdbserver breakpoint.\n");
2657
2658 if (breakpoint_here (event_child->stop_pc))
2659 event_child->need_step_over = 1;
2660 }
2661 }
2662 else
2663 {
2664 /* We have some other signal, possibly a step-over dance was in
2665 progress, and it should be cancelled too. */
2666 step_over_finished = finish_step_over (event_child);
2667 }
2668
2669 /* We have all the data we need. Either report the event to GDB, or
2670 resume threads and keep waiting for more. */
2671
2672 /* If we're collecting a fast tracepoint, finish the collection and
2673 move out of the jump pad before delivering a signal. See
2674 linux_stabilize_threads. */
2675
2676 if (WIFSTOPPED (w)
2677 && WSTOPSIG (w) != SIGTRAP
2678 && supports_fast_tracepoints ()
2679 && agent_loaded_p ())
2680 {
2681 if (debug_threads)
2682 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2683 "to defer or adjust it.\n",
2684 WSTOPSIG (w), lwpid_of (current_inferior));
2685
2686 /* Allow debugging the jump pad itself. */
2687 if (current_inferior->last_resume_kind != resume_step
2688 && maybe_move_out_of_jump_pad (event_child, &w))
2689 {
2690 enqueue_one_deferred_signal (event_child, &w);
2691
2692 if (debug_threads)
2693 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2694 WSTOPSIG (w), lwpid_of (current_inferior));
2695
2696 linux_resume_one_lwp (event_child, 0, 0, NULL);
2697 goto retry;
2698 }
2699 }
2700
2701 if (event_child->collecting_fast_tracepoint)
2702 {
2703 if (debug_threads)
2704 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2705 "Check if we're already there.\n",
2706 lwpid_of (current_inferior),
2707 event_child->collecting_fast_tracepoint);
2708
2709 trace_event = 1;
2710
2711 event_child->collecting_fast_tracepoint
2712 = linux_fast_tracepoint_collecting (event_child, NULL);
2713
2714 if (event_child->collecting_fast_tracepoint != 1)
2715 {
2716 /* No longer need this breakpoint. */
2717 if (event_child->exit_jump_pad_bkpt != NULL)
2718 {
2719 if (debug_threads)
2720 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2721 "stopping all threads momentarily.\n");
2722
2723 /* Other running threads could hit this breakpoint.
2724 We don't handle moribund locations like GDB does,
2725 instead we always pause all threads when removing
2726 breakpoints, so that any step-over or
2727 decr_pc_after_break adjustment is always taken
2728 care of while the breakpoint is still
2729 inserted. */
2730 stop_all_lwps (1, event_child);
2731 cancel_breakpoints ();
2732
2733 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2734 event_child->exit_jump_pad_bkpt = NULL;
2735
2736 unstop_all_lwps (1, event_child);
2737
2738 gdb_assert (event_child->suspended >= 0);
2739 }
2740 }
2741
2742 if (event_child->collecting_fast_tracepoint == 0)
2743 {
2744 if (debug_threads)
2745 debug_printf ("fast tracepoint finished "
2746 "collecting successfully.\n");
2747
2748 /* We may have a deferred signal to report. */
2749 if (dequeue_one_deferred_signal (event_child, &w))
2750 {
2751 if (debug_threads)
2752 debug_printf ("dequeued one signal.\n");
2753 }
2754 else
2755 {
2756 if (debug_threads)
2757 debug_printf ("no deferred signals.\n");
2758
2759 if (stabilizing_threads)
2760 {
2761 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2762 ourstatus->value.sig = GDB_SIGNAL_0;
2763
2764 if (debug_threads)
2765 {
2766 debug_printf ("linux_wait_1 ret = %s, stopped "
2767 "while stabilizing threads\n",
2768 target_pid_to_str (ptid_of (current_inferior)));
2769 debug_exit ();
2770 }
2771
2772 return ptid_of (current_inferior);
2773 }
2774 }
2775 }
2776 }
2777
2778 /* Check whether GDB would be interested in this event. */
2779
2780 /* If GDB is not interested in this signal, don't stop other
2781 threads, and don't report it to GDB. Just resume the inferior
2782 right away. We do this for threading-related signals as well as
2783 any that GDB specifically requested we ignore. But never ignore
2784 SIGSTOP if we sent it ourselves, and do not ignore signals when
2785 stepping - they may require special handling to skip the signal
2786 handler. */
2787 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2788 thread library? */
2789 if (WIFSTOPPED (w)
2790 && current_inferior->last_resume_kind != resume_step
2791 && (
2792 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2793 (current_process ()->private->thread_db != NULL
2794 && (WSTOPSIG (w) == __SIGRTMIN
2795 || WSTOPSIG (w) == __SIGRTMIN + 1))
2796 ||
2797 #endif
2798 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2799 && !(WSTOPSIG (w) == SIGSTOP
2800 && current_inferior->last_resume_kind == resume_stop))))
2801 {
2802 siginfo_t info, *info_p;
2803
2804 if (debug_threads)
2805 debug_printf ("Ignored signal %d for LWP %ld.\n",
2806 WSTOPSIG (w), lwpid_of (current_inferior));
2807
2808 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
2809 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2810 info_p = &info;
2811 else
2812 info_p = NULL;
2813 linux_resume_one_lwp (event_child, event_child->stepping,
2814 WSTOPSIG (w), info_p);
2815 goto retry;
2816 }
2817
2818 /* Note that all addresses are always "out of the step range" when
2819 there's no range to begin with. */
2820 in_step_range = lwp_in_step_range (event_child);
2821
2822 /* If GDB wanted this thread to single step, and the thread is out
2823 of the step range, we always want to report the SIGTRAP, and let
2824 GDB handle it. Watchpoints should always be reported. So should
2825 signals we can't explain. A SIGTRAP we can't explain could be a
2826 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2827 do, we're be able to handle GDB breakpoints on top of internal
2828 breakpoints, by handling the internal breakpoint and still
2829 reporting the event to GDB. If we don't, we're out of luck, GDB
2830 won't see the breakpoint hit. */
2831 report_to_gdb = (!maybe_internal_trap
2832 || (current_inferior->last_resume_kind == resume_step
2833 && !in_step_range)
2834 || event_child->stopped_by_watchpoint
2835 || (!step_over_finished && !in_step_range
2836 && !bp_explains_trap && !trace_event)
2837 || (gdb_breakpoint_here (event_child->stop_pc)
2838 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2839 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2840
2841 run_breakpoint_commands (event_child->stop_pc);
2842
2843 /* We found no reason GDB would want us to stop. We either hit one
2844 of our own breakpoints, or finished an internal step GDB
2845 shouldn't know about. */
2846 if (!report_to_gdb)
2847 {
2848 if (debug_threads)
2849 {
2850 if (bp_explains_trap)
2851 debug_printf ("Hit a gdbserver breakpoint.\n");
2852 if (step_over_finished)
2853 debug_printf ("Step-over finished.\n");
2854 if (trace_event)
2855 debug_printf ("Tracepoint event.\n");
2856 if (lwp_in_step_range (event_child))
2857 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2858 paddress (event_child->stop_pc),
2859 paddress (event_child->step_range_start),
2860 paddress (event_child->step_range_end));
2861 }
2862
2863 /* We're not reporting this breakpoint to GDB, so apply the
2864 decr_pc_after_break adjustment to the inferior's regcache
2865 ourselves. */
2866
2867 if (the_low_target.set_pc != NULL)
2868 {
2869 struct regcache *regcache
2870 = get_thread_regcache (current_inferior, 1);
2871 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2872 }
2873
2874 /* We may have finished stepping over a breakpoint. If so,
2875 we've stopped and suspended all LWPs momentarily except the
2876 stepping one. This is where we resume them all again. We're
2877 going to keep waiting, so use proceed, which handles stepping
2878 over the next breakpoint. */
2879 if (debug_threads)
2880 debug_printf ("proceeding all threads.\n");
2881
2882 if (step_over_finished)
2883 unsuspend_all_lwps (event_child);
2884
2885 proceed_all_lwps ();
2886 goto retry;
2887 }
2888
2889 if (debug_threads)
2890 {
2891 if (current_inferior->last_resume_kind == resume_step)
2892 {
2893 if (event_child->step_range_start == event_child->step_range_end)
2894 debug_printf ("GDB wanted to single-step, reporting event.\n");
2895 else if (!lwp_in_step_range (event_child))
2896 debug_printf ("Out of step range, reporting event.\n");
2897 }
2898 if (event_child->stopped_by_watchpoint)
2899 debug_printf ("Stopped by watchpoint.\n");
2900 if (gdb_breakpoint_here (event_child->stop_pc))
2901 debug_printf ("Stopped by GDB breakpoint.\n");
2902 if (debug_threads)
2903 debug_printf ("Hit a non-gdbserver trap event.\n");
2904 }
2905
2906 /* Alright, we're going to report a stop. */
2907
2908 if (!non_stop && !stabilizing_threads)
2909 {
2910 /* In all-stop, stop all threads. */
2911 stop_all_lwps (0, NULL);
2912
2913 /* If we're not waiting for a specific LWP, choose an event LWP
2914 from among those that have had events. Giving equal priority
2915 to all LWPs that have had events helps prevent
2916 starvation. */
2917 if (ptid_equal (ptid, minus_one_ptid))
2918 {
2919 event_child->status_pending_p = 1;
2920 event_child->status_pending = w;
2921
2922 select_event_lwp (&event_child);
2923
2924 /* current_inferior and event_child must stay in sync. */
2925 current_inferior = get_lwp_thread (event_child);
2926
2927 event_child->status_pending_p = 0;
2928 w = event_child->status_pending;
2929 }
2930
2931 /* Now that we've selected our final event LWP, cancel any
2932 breakpoints in other LWPs that have hit a GDB breakpoint.
2933 See the comment in cancel_breakpoints_callback to find out
2934 why. */
2935 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
2936
2937 /* If we were going a step-over, all other threads but the stepping one
2938 had been paused in start_step_over, with their suspend counts
2939 incremented. We don't want to do a full unstop/unpause, because we're
2940 in all-stop mode (so we want threads stopped), but we still need to
2941 unsuspend the other threads, to decrement their `suspended' count
2942 back. */
2943 if (step_over_finished)
2944 unsuspend_all_lwps (event_child);
2945
2946 /* Stabilize threads (move out of jump pads). */
2947 stabilize_threads ();
2948 }
2949 else
2950 {
2951 /* If we just finished a step-over, then all threads had been
2952 momentarily paused. In all-stop, that's fine, we want
2953 threads stopped by now anyway. In non-stop, we need to
2954 re-resume threads that GDB wanted to be running. */
2955 if (step_over_finished)
2956 unstop_all_lwps (1, event_child);
2957 }
2958
2959 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2960
2961 if (current_inferior->last_resume_kind == resume_stop
2962 && WSTOPSIG (w) == SIGSTOP)
2963 {
2964 /* A thread that has been requested to stop by GDB with vCont;t,
2965 and it stopped cleanly, so report as SIG0. The use of
2966 SIGSTOP is an implementation detail. */
2967 ourstatus->value.sig = GDB_SIGNAL_0;
2968 }
2969 else if (current_inferior->last_resume_kind == resume_stop
2970 && WSTOPSIG (w) != SIGSTOP)
2971 {
2972 /* A thread that has been requested to stop by GDB with vCont;t,
2973 but, it stopped for other reasons. */
2974 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2975 }
2976 else
2977 {
2978 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2979 }
2980
2981 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2982
2983 if (debug_threads)
2984 {
2985 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2986 target_pid_to_str (ptid_of (current_inferior)),
2987 ourstatus->kind, ourstatus->value.sig);
2988 debug_exit ();
2989 }
2990
2991 return ptid_of (current_inferior);
2992 }
2993
2994 /* Get rid of any pending event in the pipe. */
2995 static void
2996 async_file_flush (void)
2997 {
2998 int ret;
2999 char buf;
3000
3001 do
3002 ret = read (linux_event_pipe[0], &buf, 1);
3003 while (ret >= 0 || (ret == -1 && errno == EINTR));
3004 }
3005
3006 /* Put something in the pipe, so the event loop wakes up. */
3007 static void
3008 async_file_mark (void)
3009 {
3010 int ret;
3011
3012 async_file_flush ();
3013
3014 do
3015 ret = write (linux_event_pipe[1], "+", 1);
3016 while (ret == 0 || (ret == -1 && errno == EINTR));
3017
3018 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3019 be awakened anyway. */
3020 }
3021
3022 static ptid_t
3023 linux_wait (ptid_t ptid,
3024 struct target_waitstatus *ourstatus, int target_options)
3025 {
3026 ptid_t event_ptid;
3027
3028 /* Flush the async file first. */
3029 if (target_is_async_p ())
3030 async_file_flush ();
3031
3032 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3033
3034 /* If at least one stop was reported, there may be more. A single
3035 SIGCHLD can signal more than one child stop. */
3036 if (target_is_async_p ()
3037 && (target_options & TARGET_WNOHANG) != 0
3038 && !ptid_equal (event_ptid, null_ptid))
3039 async_file_mark ();
3040
3041 return event_ptid;
3042 }
3043
3044 /* Send a signal to an LWP. */
3045
3046 static int
3047 kill_lwp (unsigned long lwpid, int signo)
3048 {
3049 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3050 fails, then we are not using nptl threads and we should be using kill. */
3051
3052 #ifdef __NR_tkill
3053 {
3054 static int tkill_failed;
3055
3056 if (!tkill_failed)
3057 {
3058 int ret;
3059
3060 errno = 0;
3061 ret = syscall (__NR_tkill, lwpid, signo);
3062 if (errno != ENOSYS)
3063 return ret;
3064 tkill_failed = 1;
3065 }
3066 }
3067 #endif
3068
3069 return kill (lwpid, signo);
3070 }
3071
3072 void
3073 linux_stop_lwp (struct lwp_info *lwp)
3074 {
3075 send_sigstop (lwp);
3076 }
3077
3078 static void
3079 send_sigstop (struct lwp_info *lwp)
3080 {
3081 int pid;
3082
3083 pid = lwpid_of (get_lwp_thread (lwp));
3084
3085 /* If we already have a pending stop signal for this process, don't
3086 send another. */
3087 if (lwp->stop_expected)
3088 {
3089 if (debug_threads)
3090 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3091
3092 return;
3093 }
3094
3095 if (debug_threads)
3096 debug_printf ("Sending sigstop to lwp %d\n", pid);
3097
3098 lwp->stop_expected = 1;
3099 kill_lwp (pid, SIGSTOP);
3100 }
3101
3102 static int
3103 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3104 {
3105 struct thread_info *thread = (struct thread_info *) entry;
3106 struct lwp_info *lwp = get_thread_lwp (thread);
3107
3108 /* Ignore EXCEPT. */
3109 if (lwp == except)
3110 return 0;
3111
3112 if (lwp->stopped)
3113 return 0;
3114
3115 send_sigstop (lwp);
3116 return 0;
3117 }
3118
3119 /* Increment the suspend count of an LWP, and stop it, if not stopped
3120 yet. */
3121 static int
3122 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3123 void *except)
3124 {
3125 struct thread_info *thread = (struct thread_info *) entry;
3126 struct lwp_info *lwp = get_thread_lwp (thread);
3127
3128 /* Ignore EXCEPT. */
3129 if (lwp == except)
3130 return 0;
3131
3132 lwp->suspended++;
3133
3134 return send_sigstop_callback (entry, except);
3135 }
3136
3137 static void
3138 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3139 {
3140 /* It's dead, really. */
3141 lwp->dead = 1;
3142
3143 /* Store the exit status for later. */
3144 lwp->status_pending_p = 1;
3145 lwp->status_pending = wstat;
3146
3147 /* Prevent trying to stop it. */
3148 lwp->stopped = 1;
3149
3150 /* No further stops are expected from a dead lwp. */
3151 lwp->stop_expected = 0;
3152 }
3153
3154 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3155
3156 static void
3157 wait_for_sigstop (void)
3158 {
3159 struct thread_info *saved_inferior;
3160 ptid_t saved_tid;
3161 int wstat;
3162 int ret;
3163
3164 saved_inferior = current_inferior;
3165 if (saved_inferior != NULL)
3166 saved_tid = saved_inferior->entry.id;
3167 else
3168 saved_tid = null_ptid; /* avoid bogus unused warning */
3169
3170 if (debug_threads)
3171 debug_printf ("wait_for_sigstop: pulling events\n");
3172
3173 /* Passing NULL_PTID as filter indicates we want all events to be
3174 left pending. Eventually this returns when there are no
3175 unwaited-for children left. */
3176 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3177 &wstat, __WALL);
3178 gdb_assert (ret == -1);
3179
3180 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3181 current_inferior = saved_inferior;
3182 else
3183 {
3184 if (debug_threads)
3185 debug_printf ("Previously current thread died.\n");
3186
3187 if (non_stop)
3188 {
3189 /* We can't change the current inferior behind GDB's back,
3190 otherwise, a subsequent command may apply to the wrong
3191 process. */
3192 current_inferior = NULL;
3193 }
3194 else
3195 {
3196 /* Set a valid thread as current. */
3197 set_desired_inferior (0);
3198 }
3199 }
3200 }
3201
3202 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3203 move it out, because we need to report the stop event to GDB. For
3204 example, if the user puts a breakpoint in the jump pad, it's
3205 because she wants to debug it. */
3206
3207 static int
3208 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3209 {
3210 struct thread_info *thread = (struct thread_info *) entry;
3211 struct lwp_info *lwp = get_thread_lwp (thread);
3212
3213 gdb_assert (lwp->suspended == 0);
3214 gdb_assert (lwp->stopped);
3215
3216 /* Allow debugging the jump pad, gdb_collect, etc.. */
3217 return (supports_fast_tracepoints ()
3218 && agent_loaded_p ()
3219 && (gdb_breakpoint_here (lwp->stop_pc)
3220 || lwp->stopped_by_watchpoint
3221 || thread->last_resume_kind == resume_step)
3222 && linux_fast_tracepoint_collecting (lwp, NULL));
3223 }
3224
3225 static void
3226 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3227 {
3228 struct thread_info *thread = (struct thread_info *) entry;
3229 struct lwp_info *lwp = get_thread_lwp (thread);
3230 int *wstat;
3231
3232 gdb_assert (lwp->suspended == 0);
3233 gdb_assert (lwp->stopped);
3234
3235 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3236
3237 /* Allow debugging the jump pad, gdb_collect, etc. */
3238 if (!gdb_breakpoint_here (lwp->stop_pc)
3239 && !lwp->stopped_by_watchpoint
3240 && thread->last_resume_kind != resume_step
3241 && maybe_move_out_of_jump_pad (lwp, wstat))
3242 {
3243 if (debug_threads)
3244 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3245 lwpid_of (thread));
3246
3247 if (wstat)
3248 {
3249 lwp->status_pending_p = 0;
3250 enqueue_one_deferred_signal (lwp, wstat);
3251
3252 if (debug_threads)
3253 debug_printf ("Signal %d for LWP %ld deferred "
3254 "(in jump pad)\n",
3255 WSTOPSIG (*wstat), lwpid_of (thread));
3256 }
3257
3258 linux_resume_one_lwp (lwp, 0, 0, NULL);
3259 }
3260 else
3261 lwp->suspended++;
3262 }
3263
3264 static int
3265 lwp_running (struct inferior_list_entry *entry, void *data)
3266 {
3267 struct thread_info *thread = (struct thread_info *) entry;
3268 struct lwp_info *lwp = get_thread_lwp (thread);
3269
3270 if (lwp->dead)
3271 return 0;
3272 if (lwp->stopped)
3273 return 0;
3274 return 1;
3275 }
3276
3277 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3278 If SUSPEND, then also increase the suspend count of every LWP,
3279 except EXCEPT. */
3280
3281 static void
3282 stop_all_lwps (int suspend, struct lwp_info *except)
3283 {
3284 /* Should not be called recursively. */
3285 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3286
3287 if (debug_threads)
3288 {
3289 debug_enter ();
3290 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3291 suspend ? "stop-and-suspend" : "stop",
3292 except != NULL
3293 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3294 : "none");
3295 }
3296
3297 stopping_threads = (suspend
3298 ? STOPPING_AND_SUSPENDING_THREADS
3299 : STOPPING_THREADS);
3300
3301 if (suspend)
3302 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3303 else
3304 find_inferior (&all_threads, send_sigstop_callback, except);
3305 wait_for_sigstop ();
3306 stopping_threads = NOT_STOPPING_THREADS;
3307
3308 if (debug_threads)
3309 {
3310 debug_printf ("stop_all_lwps done, setting stopping_threads "
3311 "back to !stopping\n");
3312 debug_exit ();
3313 }
3314 }
3315
3316 /* Resume execution of the inferior process.
3317 If STEP is nonzero, single-step it.
3318 If SIGNAL is nonzero, give it that signal. */
3319
3320 static void
3321 linux_resume_one_lwp (struct lwp_info *lwp,
3322 int step, int signal, siginfo_t *info)
3323 {
3324 struct thread_info *thread = get_lwp_thread (lwp);
3325 struct thread_info *saved_inferior;
3326 int fast_tp_collecting;
3327
3328 if (lwp->stopped == 0)
3329 return;
3330
3331 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3332
3333 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3334
3335 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3336 user used the "jump" command, or "set $pc = foo"). */
3337 if (lwp->stop_pc != get_pc (lwp))
3338 {
3339 /* Collecting 'while-stepping' actions doesn't make sense
3340 anymore. */
3341 release_while_stepping_state_list (thread);
3342 }
3343
3344 /* If we have pending signals or status, and a new signal, enqueue the
3345 signal. Also enqueue the signal if we are waiting to reinsert a
3346 breakpoint; it will be picked up again below. */
3347 if (signal != 0
3348 && (lwp->status_pending_p
3349 || lwp->pending_signals != NULL
3350 || lwp->bp_reinsert != 0
3351 || fast_tp_collecting))
3352 {
3353 struct pending_signals *p_sig;
3354 p_sig = xmalloc (sizeof (*p_sig));
3355 p_sig->prev = lwp->pending_signals;
3356 p_sig->signal = signal;
3357 if (info == NULL)
3358 memset (&p_sig->info, 0, sizeof (siginfo_t));
3359 else
3360 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3361 lwp->pending_signals = p_sig;
3362 }
3363
3364 if (lwp->status_pending_p)
3365 {
3366 if (debug_threads)
3367 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3368 " has pending status\n",
3369 lwpid_of (thread), step ? "step" : "continue", signal,
3370 lwp->stop_expected ? "expected" : "not expected");
3371 return;
3372 }
3373
3374 saved_inferior = current_inferior;
3375 current_inferior = thread;
3376
3377 if (debug_threads)
3378 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3379 lwpid_of (thread), step ? "step" : "continue", signal,
3380 lwp->stop_expected ? "expected" : "not expected");
3381
3382 /* This bit needs some thinking about. If we get a signal that
3383 we must report while a single-step reinsert is still pending,
3384 we often end up resuming the thread. It might be better to
3385 (ew) allow a stack of pending events; then we could be sure that
3386 the reinsert happened right away and not lose any signals.
3387
3388 Making this stack would also shrink the window in which breakpoints are
3389 uninserted (see comment in linux_wait_for_lwp) but not enough for
3390 complete correctness, so it won't solve that problem. It may be
3391 worthwhile just to solve this one, however. */
3392 if (lwp->bp_reinsert != 0)
3393 {
3394 if (debug_threads)
3395 debug_printf (" pending reinsert at 0x%s\n",
3396 paddress (lwp->bp_reinsert));
3397
3398 if (can_hardware_single_step ())
3399 {
3400 if (fast_tp_collecting == 0)
3401 {
3402 if (step == 0)
3403 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3404 if (lwp->suspended)
3405 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3406 lwp->suspended);
3407 }
3408
3409 step = 1;
3410 }
3411
3412 /* Postpone any pending signal. It was enqueued above. */
3413 signal = 0;
3414 }
3415
3416 if (fast_tp_collecting == 1)
3417 {
3418 if (debug_threads)
3419 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3420 " (exit-jump-pad-bkpt)\n",
3421 lwpid_of (thread));
3422
3423 /* Postpone any pending signal. It was enqueued above. */
3424 signal = 0;
3425 }
3426 else if (fast_tp_collecting == 2)
3427 {
3428 if (debug_threads)
3429 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3430 " single-stepping\n",
3431 lwpid_of (thread));
3432
3433 if (can_hardware_single_step ())
3434 step = 1;
3435 else
3436 fatal ("moving out of jump pad single-stepping"
3437 " not implemented on this target");
3438
3439 /* Postpone any pending signal. It was enqueued above. */
3440 signal = 0;
3441 }
3442
3443 /* If we have while-stepping actions in this thread set it stepping.
3444 If we have a signal to deliver, it may or may not be set to
3445 SIG_IGN, we don't know. Assume so, and allow collecting
3446 while-stepping into a signal handler. A possible smart thing to
3447 do would be to set an internal breakpoint at the signal return
3448 address, continue, and carry on catching this while-stepping
3449 action only when that breakpoint is hit. A future
3450 enhancement. */
3451 if (thread->while_stepping != NULL
3452 && can_hardware_single_step ())
3453 {
3454 if (debug_threads)
3455 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3456 lwpid_of (thread));
3457 step = 1;
3458 }
3459
3460 if (debug_threads && the_low_target.get_pc != NULL)
3461 {
3462 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3463 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3464 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3465 }
3466
3467 /* If we have pending signals, consume one unless we are trying to
3468 reinsert a breakpoint or we're trying to finish a fast tracepoint
3469 collect. */
3470 if (lwp->pending_signals != NULL
3471 && lwp->bp_reinsert == 0
3472 && fast_tp_collecting == 0)
3473 {
3474 struct pending_signals **p_sig;
3475
3476 p_sig = &lwp->pending_signals;
3477 while ((*p_sig)->prev != NULL)
3478 p_sig = &(*p_sig)->prev;
3479
3480 signal = (*p_sig)->signal;
3481 if ((*p_sig)->info.si_signo != 0)
3482 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3483 &(*p_sig)->info);
3484
3485 free (*p_sig);
3486 *p_sig = NULL;
3487 }
3488
3489 if (the_low_target.prepare_to_resume != NULL)
3490 the_low_target.prepare_to_resume (lwp);
3491
3492 regcache_invalidate_thread (thread);
3493 errno = 0;
3494 lwp->stopped = 0;
3495 lwp->stopped_by_watchpoint = 0;
3496 lwp->stepping = step;
3497 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3498 (PTRACE_TYPE_ARG3) 0,
3499 /* Coerce to a uintptr_t first to avoid potential gcc warning
3500 of coercing an 8 byte integer to a 4 byte pointer. */
3501 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3502
3503 current_inferior = saved_inferior;
3504 if (errno)
3505 {
3506 /* ESRCH from ptrace either means that the thread was already
3507 running (an error) or that it is gone (a race condition). If
3508 it's gone, we will get a notification the next time we wait,
3509 so we can ignore the error. We could differentiate these
3510 two, but it's tricky without waiting; the thread still exists
3511 as a zombie, so sending it signal 0 would succeed. So just
3512 ignore ESRCH. */
3513 if (errno == ESRCH)
3514 return;
3515
3516 perror_with_name ("ptrace");
3517 }
3518 }
3519
3520 struct thread_resume_array
3521 {
3522 struct thread_resume *resume;
3523 size_t n;
3524 };
3525
3526 /* This function is called once per thread via find_inferior.
3527 ARG is a pointer to a thread_resume_array struct.
3528 We look up the thread specified by ENTRY in ARG, and mark the thread
3529 with a pointer to the appropriate resume request.
3530
3531 This algorithm is O(threads * resume elements), but resume elements
3532 is small (and will remain small at least until GDB supports thread
3533 suspension). */
3534
3535 static int
3536 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3537 {
3538 struct thread_info *thread = (struct thread_info *) entry;
3539 struct lwp_info *lwp = get_thread_lwp (thread);
3540 int ndx;
3541 struct thread_resume_array *r;
3542
3543 r = arg;
3544
3545 for (ndx = 0; ndx < r->n; ndx++)
3546 {
3547 ptid_t ptid = r->resume[ndx].thread;
3548 if (ptid_equal (ptid, minus_one_ptid)
3549 || ptid_equal (ptid, entry->id)
3550 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3551 of PID'. */
3552 || (ptid_get_pid (ptid) == pid_of (thread)
3553 && (ptid_is_pid (ptid)
3554 || ptid_get_lwp (ptid) == -1)))
3555 {
3556 if (r->resume[ndx].kind == resume_stop
3557 && thread->last_resume_kind == resume_stop)
3558 {
3559 if (debug_threads)
3560 debug_printf ("already %s LWP %ld at GDB's request\n",
3561 (thread->last_status.kind
3562 == TARGET_WAITKIND_STOPPED)
3563 ? "stopped"
3564 : "stopping",
3565 lwpid_of (thread));
3566
3567 continue;
3568 }
3569
3570 lwp->resume = &r->resume[ndx];
3571 thread->last_resume_kind = lwp->resume->kind;
3572
3573 lwp->step_range_start = lwp->resume->step_range_start;
3574 lwp->step_range_end = lwp->resume->step_range_end;
3575
3576 /* If we had a deferred signal to report, dequeue one now.
3577 This can happen if LWP gets more than one signal while
3578 trying to get out of a jump pad. */
3579 if (lwp->stopped
3580 && !lwp->status_pending_p
3581 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3582 {
3583 lwp->status_pending_p = 1;
3584
3585 if (debug_threads)
3586 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3587 "leaving status pending.\n",
3588 WSTOPSIG (lwp->status_pending),
3589 lwpid_of (thread));
3590 }
3591
3592 return 0;
3593 }
3594 }
3595
3596 /* No resume action for this thread. */
3597 lwp->resume = NULL;
3598
3599 return 0;
3600 }
3601
3602 /* find_inferior callback for linux_resume.
3603 Set *FLAG_P if this lwp has an interesting status pending. */
3604
3605 static int
3606 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3607 {
3608 struct thread_info *thread = (struct thread_info *) entry;
3609 struct lwp_info *lwp = get_thread_lwp (thread);
3610
3611 /* LWPs which will not be resumed are not interesting, because
3612 we might not wait for them next time through linux_wait. */
3613 if (lwp->resume == NULL)
3614 return 0;
3615
3616 if (lwp->status_pending_p)
3617 * (int *) flag_p = 1;
3618
3619 return 0;
3620 }
3621
3622 /* Return 1 if this lwp that GDB wants running is stopped at an
3623 internal breakpoint that we need to step over. It assumes that any
3624 required STOP_PC adjustment has already been propagated to the
3625 inferior's regcache. */
3626
3627 static int
3628 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3629 {
3630 struct thread_info *thread = (struct thread_info *) entry;
3631 struct lwp_info *lwp = get_thread_lwp (thread);
3632 struct thread_info *saved_inferior;
3633 CORE_ADDR pc;
3634
3635 /* LWPs which will not be resumed are not interesting, because we
3636 might not wait for them next time through linux_wait. */
3637
3638 if (!lwp->stopped)
3639 {
3640 if (debug_threads)
3641 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3642 lwpid_of (thread));
3643 return 0;
3644 }
3645
3646 if (thread->last_resume_kind == resume_stop)
3647 {
3648 if (debug_threads)
3649 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3650 " stopped\n",
3651 lwpid_of (thread));
3652 return 0;
3653 }
3654
3655 gdb_assert (lwp->suspended >= 0);
3656
3657 if (lwp->suspended)
3658 {
3659 if (debug_threads)
3660 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3661 lwpid_of (thread));
3662 return 0;
3663 }
3664
3665 if (!lwp->need_step_over)
3666 {
3667 if (debug_threads)
3668 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3669 }
3670
3671 if (lwp->status_pending_p)
3672 {
3673 if (debug_threads)
3674 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3675 " status.\n",
3676 lwpid_of (thread));
3677 return 0;
3678 }
3679
3680 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3681 or we have. */
3682 pc = get_pc (lwp);
3683
3684 /* If the PC has changed since we stopped, then don't do anything,
3685 and let the breakpoint/tracepoint be hit. This happens if, for
3686 instance, GDB handled the decr_pc_after_break subtraction itself,
3687 GDB is OOL stepping this thread, or the user has issued a "jump"
3688 command, or poked thread's registers herself. */
3689 if (pc != lwp->stop_pc)
3690 {
3691 if (debug_threads)
3692 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3693 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3694 lwpid_of (thread),
3695 paddress (lwp->stop_pc), paddress (pc));
3696
3697 lwp->need_step_over = 0;
3698 return 0;
3699 }
3700
3701 saved_inferior = current_inferior;
3702 current_inferior = thread;
3703
3704 /* We can only step over breakpoints we know about. */
3705 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3706 {
3707 /* Don't step over a breakpoint that GDB expects to hit
3708 though. If the condition is being evaluated on the target's side
3709 and it evaluate to false, step over this breakpoint as well. */
3710 if (gdb_breakpoint_here (pc)
3711 && gdb_condition_true_at_breakpoint (pc)
3712 && gdb_no_commands_at_breakpoint (pc))
3713 {
3714 if (debug_threads)
3715 debug_printf ("Need step over [LWP %ld]? yes, but found"
3716 " GDB breakpoint at 0x%s; skipping step over\n",
3717 lwpid_of (thread), paddress (pc));
3718
3719 current_inferior = saved_inferior;
3720 return 0;
3721 }
3722 else
3723 {
3724 if (debug_threads)
3725 debug_printf ("Need step over [LWP %ld]? yes, "
3726 "found breakpoint at 0x%s\n",
3727 lwpid_of (thread), paddress (pc));
3728
3729 /* We've found an lwp that needs stepping over --- return 1 so
3730 that find_inferior stops looking. */
3731 current_inferior = saved_inferior;
3732
3733 /* If the step over is cancelled, this is set again. */
3734 lwp->need_step_over = 0;
3735 return 1;
3736 }
3737 }
3738
3739 current_inferior = saved_inferior;
3740
3741 if (debug_threads)
3742 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3743 " at 0x%s\n",
3744 lwpid_of (thread), paddress (pc));
3745
3746 return 0;
3747 }
3748
3749 /* Start a step-over operation on LWP. When LWP stopped at a
3750 breakpoint, to make progress, we need to remove the breakpoint out
3751 of the way. If we let other threads run while we do that, they may
3752 pass by the breakpoint location and miss hitting it. To avoid
3753 that, a step-over momentarily stops all threads while LWP is
3754 single-stepped while the breakpoint is temporarily uninserted from
3755 the inferior. When the single-step finishes, we reinsert the
3756 breakpoint, and let all threads that are supposed to be running,
3757 run again.
3758
3759 On targets that don't support hardware single-step, we don't
3760 currently support full software single-stepping. Instead, we only
3761 support stepping over the thread event breakpoint, by asking the
3762 low target where to place a reinsert breakpoint. Since this
3763 routine assumes the breakpoint being stepped over is a thread event
3764 breakpoint, it usually assumes the return address of the current
3765 function is a good enough place to set the reinsert breakpoint. */
3766
3767 static int
3768 start_step_over (struct lwp_info *lwp)
3769 {
3770 struct thread_info *thread = get_lwp_thread (lwp);
3771 struct thread_info *saved_inferior;
3772 CORE_ADDR pc;
3773 int step;
3774
3775 if (debug_threads)
3776 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3777 lwpid_of (thread));
3778
3779 stop_all_lwps (1, lwp);
3780 gdb_assert (lwp->suspended == 0);
3781
3782 if (debug_threads)
3783 debug_printf ("Done stopping all threads for step-over.\n");
3784
3785 /* Note, we should always reach here with an already adjusted PC,
3786 either by GDB (if we're resuming due to GDB's request), or by our
3787 caller, if we just finished handling an internal breakpoint GDB
3788 shouldn't care about. */
3789 pc = get_pc (lwp);
3790
3791 saved_inferior = current_inferior;
3792 current_inferior = thread;
3793
3794 lwp->bp_reinsert = pc;
3795 uninsert_breakpoints_at (pc);
3796 uninsert_fast_tracepoint_jumps_at (pc);
3797
3798 if (can_hardware_single_step ())
3799 {
3800 step = 1;
3801 }
3802 else
3803 {
3804 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3805 set_reinsert_breakpoint (raddr);
3806 step = 0;
3807 }
3808
3809 current_inferior = saved_inferior;
3810
3811 linux_resume_one_lwp (lwp, step, 0, NULL);
3812
3813 /* Require next event from this LWP. */
3814 step_over_bkpt = thread->entry.id;
3815 return 1;
3816 }
3817
3818 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3819 start_step_over, if still there, and delete any reinsert
3820 breakpoints we've set, on non hardware single-step targets. */
3821
3822 static int
3823 finish_step_over (struct lwp_info *lwp)
3824 {
3825 if (lwp->bp_reinsert != 0)
3826 {
3827 if (debug_threads)
3828 debug_printf ("Finished step over.\n");
3829
3830 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3831 may be no breakpoint to reinsert there by now. */
3832 reinsert_breakpoints_at (lwp->bp_reinsert);
3833 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3834
3835 lwp->bp_reinsert = 0;
3836
3837 /* Delete any software-single-step reinsert breakpoints. No
3838 longer needed. We don't have to worry about other threads
3839 hitting this trap, and later not being able to explain it,
3840 because we were stepping over a breakpoint, and we hold all
3841 threads but LWP stopped while doing that. */
3842 if (!can_hardware_single_step ())
3843 delete_reinsert_breakpoints ();
3844
3845 step_over_bkpt = null_ptid;
3846 return 1;
3847 }
3848 else
3849 return 0;
3850 }
3851
3852 /* This function is called once per thread. We check the thread's resume
3853 request, which will tell us whether to resume, step, or leave the thread
3854 stopped; and what signal, if any, it should be sent.
3855
3856 For threads which we aren't explicitly told otherwise, we preserve
3857 the stepping flag; this is used for stepping over gdbserver-placed
3858 breakpoints.
3859
3860 If pending_flags was set in any thread, we queue any needed
3861 signals, since we won't actually resume. We already have a pending
3862 event to report, so we don't need to preserve any step requests;
3863 they should be re-issued if necessary. */
3864
3865 static int
3866 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3867 {
3868 struct thread_info *thread = (struct thread_info *) entry;
3869 struct lwp_info *lwp = get_thread_lwp (thread);
3870 int step;
3871 int leave_all_stopped = * (int *) arg;
3872 int leave_pending;
3873
3874 if (lwp->resume == NULL)
3875 return 0;
3876
3877 if (lwp->resume->kind == resume_stop)
3878 {
3879 if (debug_threads)
3880 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3881
3882 if (!lwp->stopped)
3883 {
3884 if (debug_threads)
3885 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3886
3887 /* Stop the thread, and wait for the event asynchronously,
3888 through the event loop. */
3889 send_sigstop (lwp);
3890 }
3891 else
3892 {
3893 if (debug_threads)
3894 debug_printf ("already stopped LWP %ld\n",
3895 lwpid_of (thread));
3896
3897 /* The LWP may have been stopped in an internal event that
3898 was not meant to be notified back to GDB (e.g., gdbserver
3899 breakpoint), so we should be reporting a stop event in
3900 this case too. */
3901
3902 /* If the thread already has a pending SIGSTOP, this is a
3903 no-op. Otherwise, something later will presumably resume
3904 the thread and this will cause it to cancel any pending
3905 operation, due to last_resume_kind == resume_stop. If
3906 the thread already has a pending status to report, we
3907 will still report it the next time we wait - see
3908 status_pending_p_callback. */
3909
3910 /* If we already have a pending signal to report, then
3911 there's no need to queue a SIGSTOP, as this means we're
3912 midway through moving the LWP out of the jumppad, and we
3913 will report the pending signal as soon as that is
3914 finished. */
3915 if (lwp->pending_signals_to_report == NULL)
3916 send_sigstop (lwp);
3917 }
3918
3919 /* For stop requests, we're done. */
3920 lwp->resume = NULL;
3921 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3922 return 0;
3923 }
3924
3925 /* If this thread which is about to be resumed has a pending status,
3926 then don't resume any threads - we can just report the pending
3927 status. Make sure to queue any signals that would otherwise be
3928 sent. In all-stop mode, we do this decision based on if *any*
3929 thread has a pending status. If there's a thread that needs the
3930 step-over-breakpoint dance, then don't resume any other thread
3931 but that particular one. */
3932 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3933
3934 if (!leave_pending)
3935 {
3936 if (debug_threads)
3937 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3938
3939 step = (lwp->resume->kind == resume_step);
3940 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3941 }
3942 else
3943 {
3944 if (debug_threads)
3945 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3946
3947 /* If we have a new signal, enqueue the signal. */
3948 if (lwp->resume->sig != 0)
3949 {
3950 struct pending_signals *p_sig;
3951 p_sig = xmalloc (sizeof (*p_sig));
3952 p_sig->prev = lwp->pending_signals;
3953 p_sig->signal = lwp->resume->sig;
3954 memset (&p_sig->info, 0, sizeof (siginfo_t));
3955
3956 /* If this is the same signal we were previously stopped by,
3957 make sure to queue its siginfo. We can ignore the return
3958 value of ptrace; if it fails, we'll skip
3959 PTRACE_SETSIGINFO. */
3960 if (WIFSTOPPED (lwp->last_status)
3961 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3962 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3963 &p_sig->info);
3964
3965 lwp->pending_signals = p_sig;
3966 }
3967 }
3968
3969 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3970 lwp->resume = NULL;
3971 return 0;
3972 }
3973
3974 static void
3975 linux_resume (struct thread_resume *resume_info, size_t n)
3976 {
3977 struct thread_resume_array array = { resume_info, n };
3978 struct thread_info *need_step_over = NULL;
3979 int any_pending;
3980 int leave_all_stopped;
3981
3982 if (debug_threads)
3983 {
3984 debug_enter ();
3985 debug_printf ("linux_resume:\n");
3986 }
3987
3988 find_inferior (&all_threads, linux_set_resume_request, &array);
3989
3990 /* If there is a thread which would otherwise be resumed, which has
3991 a pending status, then don't resume any threads - we can just
3992 report the pending status. Make sure to queue any signals that
3993 would otherwise be sent. In non-stop mode, we'll apply this
3994 logic to each thread individually. We consume all pending events
3995 before considering to start a step-over (in all-stop). */
3996 any_pending = 0;
3997 if (!non_stop)
3998 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
3999
4000 /* If there is a thread which would otherwise be resumed, which is
4001 stopped at a breakpoint that needs stepping over, then don't
4002 resume any threads - have it step over the breakpoint with all
4003 other threads stopped, then resume all threads again. Make sure
4004 to queue any signals that would otherwise be delivered or
4005 queued. */
4006 if (!any_pending && supports_breakpoints ())
4007 need_step_over
4008 = (struct thread_info *) find_inferior (&all_threads,
4009 need_step_over_p, NULL);
4010
4011 leave_all_stopped = (need_step_over != NULL || any_pending);
4012
4013 if (debug_threads)
4014 {
4015 if (need_step_over != NULL)
4016 debug_printf ("Not resuming all, need step over\n");
4017 else if (any_pending)
4018 debug_printf ("Not resuming, all-stop and found "
4019 "an LWP with pending status\n");
4020 else
4021 debug_printf ("Resuming, no pending status or step over needed\n");
4022 }
4023
4024 /* Even if we're leaving threads stopped, queue all signals we'd
4025 otherwise deliver. */
4026 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4027
4028 if (need_step_over)
4029 start_step_over (get_thread_lwp (need_step_over));
4030
4031 if (debug_threads)
4032 {
4033 debug_printf ("linux_resume done\n");
4034 debug_exit ();
4035 }
4036 }
4037
4038 /* This function is called once per thread. We check the thread's
4039 last resume request, which will tell us whether to resume, step, or
4040 leave the thread stopped. Any signal the client requested to be
4041 delivered has already been enqueued at this point.
4042
4043 If any thread that GDB wants running is stopped at an internal
4044 breakpoint that needs stepping over, we start a step-over operation
4045 on that particular thread, and leave all others stopped. */
4046
4047 static int
4048 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4049 {
4050 struct thread_info *thread = (struct thread_info *) entry;
4051 struct lwp_info *lwp = get_thread_lwp (thread);
4052 int step;
4053
4054 if (lwp == except)
4055 return 0;
4056
4057 if (debug_threads)
4058 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4059
4060 if (!lwp->stopped)
4061 {
4062 if (debug_threads)
4063 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4064 return 0;
4065 }
4066
4067 if (thread->last_resume_kind == resume_stop
4068 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4069 {
4070 if (debug_threads)
4071 debug_printf (" client wants LWP to remain %ld stopped\n",
4072 lwpid_of (thread));
4073 return 0;
4074 }
4075
4076 if (lwp->status_pending_p)
4077 {
4078 if (debug_threads)
4079 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4080 lwpid_of (thread));
4081 return 0;
4082 }
4083
4084 gdb_assert (lwp->suspended >= 0);
4085
4086 if (lwp->suspended)
4087 {
4088 if (debug_threads)
4089 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4090 return 0;
4091 }
4092
4093 if (thread->last_resume_kind == resume_stop
4094 && lwp->pending_signals_to_report == NULL
4095 && lwp->collecting_fast_tracepoint == 0)
4096 {
4097 /* We haven't reported this LWP as stopped yet (otherwise, the
4098 last_status.kind check above would catch it, and we wouldn't
4099 reach here. This LWP may have been momentarily paused by a
4100 stop_all_lwps call while handling for example, another LWP's
4101 step-over. In that case, the pending expected SIGSTOP signal
4102 that was queued at vCont;t handling time will have already
4103 been consumed by wait_for_sigstop, and so we need to requeue
4104 another one here. Note that if the LWP already has a SIGSTOP
4105 pending, this is a no-op. */
4106
4107 if (debug_threads)
4108 debug_printf ("Client wants LWP %ld to stop. "
4109 "Making sure it has a SIGSTOP pending\n",
4110 lwpid_of (thread));
4111
4112 send_sigstop (lwp);
4113 }
4114
4115 step = thread->last_resume_kind == resume_step;
4116 linux_resume_one_lwp (lwp, step, 0, NULL);
4117 return 0;
4118 }
4119
4120 static int
4121 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4122 {
4123 struct thread_info *thread = (struct thread_info *) entry;
4124 struct lwp_info *lwp = get_thread_lwp (thread);
4125
4126 if (lwp == except)
4127 return 0;
4128
4129 lwp->suspended--;
4130 gdb_assert (lwp->suspended >= 0);
4131
4132 return proceed_one_lwp (entry, except);
4133 }
4134
4135 /* When we finish a step-over, set threads running again. If there's
4136 another thread that may need a step-over, now's the time to start
4137 it. Eventually, we'll move all threads past their breakpoints. */
4138
4139 static void
4140 proceed_all_lwps (void)
4141 {
4142 struct thread_info *need_step_over;
4143
4144 /* If there is a thread which would otherwise be resumed, which is
4145 stopped at a breakpoint that needs stepping over, then don't
4146 resume any threads - have it step over the breakpoint with all
4147 other threads stopped, then resume all threads again. */
4148
4149 if (supports_breakpoints ())
4150 {
4151 need_step_over
4152 = (struct thread_info *) find_inferior (&all_threads,
4153 need_step_over_p, NULL);
4154
4155 if (need_step_over != NULL)
4156 {
4157 if (debug_threads)
4158 debug_printf ("proceed_all_lwps: found "
4159 "thread %ld needing a step-over\n",
4160 lwpid_of (need_step_over));
4161
4162 start_step_over (get_thread_lwp (need_step_over));
4163 return;
4164 }
4165 }
4166
4167 if (debug_threads)
4168 debug_printf ("Proceeding, no step-over needed\n");
4169
4170 find_inferior (&all_threads, proceed_one_lwp, NULL);
4171 }
4172
4173 /* Stopped LWPs that the client wanted to be running, that don't have
4174 pending statuses, are set to run again, except for EXCEPT, if not
4175 NULL. This undoes a stop_all_lwps call. */
4176
4177 static void
4178 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4179 {
4180 if (debug_threads)
4181 {
4182 debug_enter ();
4183 if (except)
4184 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4185 lwpid_of (get_lwp_thread (except)));
4186 else
4187 debug_printf ("unstopping all lwps\n");
4188 }
4189
4190 if (unsuspend)
4191 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4192 else
4193 find_inferior (&all_threads, proceed_one_lwp, except);
4194
4195 if (debug_threads)
4196 {
4197 debug_printf ("unstop_all_lwps done\n");
4198 debug_exit ();
4199 }
4200 }
4201
4202
4203 #ifdef HAVE_LINUX_REGSETS
4204
4205 #define use_linux_regsets 1
4206
4207 /* Returns true if REGSET has been disabled. */
4208
4209 static int
4210 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4211 {
4212 return (info->disabled_regsets != NULL
4213 && info->disabled_regsets[regset - info->regsets]);
4214 }
4215
4216 /* Disable REGSET. */
4217
4218 static void
4219 disable_regset (struct regsets_info *info, struct regset_info *regset)
4220 {
4221 int dr_offset;
4222
4223 dr_offset = regset - info->regsets;
4224 if (info->disabled_regsets == NULL)
4225 info->disabled_regsets = xcalloc (1, info->num_regsets);
4226 info->disabled_regsets[dr_offset] = 1;
4227 }
4228
4229 static int
4230 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4231 struct regcache *regcache)
4232 {
4233 struct regset_info *regset;
4234 int saw_general_regs = 0;
4235 int pid;
4236 struct iovec iov;
4237
4238 regset = regsets_info->regsets;
4239
4240 pid = lwpid_of (current_inferior);
4241 while (regset->size >= 0)
4242 {
4243 void *buf, *data;
4244 int nt_type, res;
4245
4246 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4247 {
4248 regset ++;
4249 continue;
4250 }
4251
4252 buf = xmalloc (regset->size);
4253
4254 nt_type = regset->nt_type;
4255 if (nt_type)
4256 {
4257 iov.iov_base = buf;
4258 iov.iov_len = regset->size;
4259 data = (void *) &iov;
4260 }
4261 else
4262 data = buf;
4263
4264 #ifndef __sparc__
4265 res = ptrace (regset->get_request, pid,
4266 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4267 #else
4268 res = ptrace (regset->get_request, pid, data, nt_type);
4269 #endif
4270 if (res < 0)
4271 {
4272 if (errno == EIO)
4273 {
4274 /* If we get EIO on a regset, do not try it again for
4275 this process mode. */
4276 disable_regset (regsets_info, regset);
4277 free (buf);
4278 continue;
4279 }
4280 else
4281 {
4282 char s[256];
4283 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4284 pid);
4285 perror (s);
4286 }
4287 }
4288 else if (regset->type == GENERAL_REGS)
4289 saw_general_regs = 1;
4290 regset->store_function (regcache, buf);
4291 regset ++;
4292 free (buf);
4293 }
4294 if (saw_general_regs)
4295 return 0;
4296 else
4297 return 1;
4298 }
4299
4300 static int
4301 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4302 struct regcache *regcache)
4303 {
4304 struct regset_info *regset;
4305 int saw_general_regs = 0;
4306 int pid;
4307 struct iovec iov;
4308
4309 regset = regsets_info->regsets;
4310
4311 pid = lwpid_of (current_inferior);
4312 while (regset->size >= 0)
4313 {
4314 void *buf, *data;
4315 int nt_type, res;
4316
4317 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4318 {
4319 regset ++;
4320 continue;
4321 }
4322
4323 buf = xmalloc (regset->size);
4324
4325 /* First fill the buffer with the current register set contents,
4326 in case there are any items in the kernel's regset that are
4327 not in gdbserver's regcache. */
4328
4329 nt_type = regset->nt_type;
4330 if (nt_type)
4331 {
4332 iov.iov_base = buf;
4333 iov.iov_len = regset->size;
4334 data = (void *) &iov;
4335 }
4336 else
4337 data = buf;
4338
4339 #ifndef __sparc__
4340 res = ptrace (regset->get_request, pid,
4341 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4342 #else
4343 res = ptrace (regset->get_request, pid, data, nt_type);
4344 #endif
4345
4346 if (res == 0)
4347 {
4348 /* Then overlay our cached registers on that. */
4349 regset->fill_function (regcache, buf);
4350
4351 /* Only now do we write the register set. */
4352 #ifndef __sparc__
4353 res = ptrace (regset->set_request, pid,
4354 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4355 #else
4356 res = ptrace (regset->set_request, pid, data, nt_type);
4357 #endif
4358 }
4359
4360 if (res < 0)
4361 {
4362 if (errno == EIO)
4363 {
4364 /* If we get EIO on a regset, do not try it again for
4365 this process mode. */
4366 disable_regset (regsets_info, regset);
4367 free (buf);
4368 continue;
4369 }
4370 else if (errno == ESRCH)
4371 {
4372 /* At this point, ESRCH should mean the process is
4373 already gone, in which case we simply ignore attempts
4374 to change its registers. See also the related
4375 comment in linux_resume_one_lwp. */
4376 free (buf);
4377 return 0;
4378 }
4379 else
4380 {
4381 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4382 }
4383 }
4384 else if (regset->type == GENERAL_REGS)
4385 saw_general_regs = 1;
4386 regset ++;
4387 free (buf);
4388 }
4389 if (saw_general_regs)
4390 return 0;
4391 else
4392 return 1;
4393 }
4394
4395 #else /* !HAVE_LINUX_REGSETS */
4396
4397 #define use_linux_regsets 0
4398 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4399 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4400
4401 #endif
4402
4403 /* Return 1 if register REGNO is supported by one of the regset ptrace
4404 calls or 0 if it has to be transferred individually. */
4405
4406 static int
4407 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4408 {
4409 unsigned char mask = 1 << (regno % 8);
4410 size_t index = regno / 8;
4411
4412 return (use_linux_regsets
4413 && (regs_info->regset_bitmap == NULL
4414 || (regs_info->regset_bitmap[index] & mask) != 0));
4415 }
4416
4417 #ifdef HAVE_LINUX_USRREGS
4418
4419 int
4420 register_addr (const struct usrregs_info *usrregs, int regnum)
4421 {
4422 int addr;
4423
4424 if (regnum < 0 || regnum >= usrregs->num_regs)
4425 error ("Invalid register number %d.", regnum);
4426
4427 addr = usrregs->regmap[regnum];
4428
4429 return addr;
4430 }
4431
4432 /* Fetch one register. */
4433 static void
4434 fetch_register (const struct usrregs_info *usrregs,
4435 struct regcache *regcache, int regno)
4436 {
4437 CORE_ADDR regaddr;
4438 int i, size;
4439 char *buf;
4440 int pid;
4441
4442 if (regno >= usrregs->num_regs)
4443 return;
4444 if ((*the_low_target.cannot_fetch_register) (regno))
4445 return;
4446
4447 regaddr = register_addr (usrregs, regno);
4448 if (regaddr == -1)
4449 return;
4450
4451 size = ((register_size (regcache->tdesc, regno)
4452 + sizeof (PTRACE_XFER_TYPE) - 1)
4453 & -sizeof (PTRACE_XFER_TYPE));
4454 buf = alloca (size);
4455
4456 pid = lwpid_of (current_inferior);
4457 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4458 {
4459 errno = 0;
4460 *(PTRACE_XFER_TYPE *) (buf + i) =
4461 ptrace (PTRACE_PEEKUSER, pid,
4462 /* Coerce to a uintptr_t first to avoid potential gcc warning
4463 of coercing an 8 byte integer to a 4 byte pointer. */
4464 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4465 regaddr += sizeof (PTRACE_XFER_TYPE);
4466 if (errno != 0)
4467 error ("reading register %d: %s", regno, strerror (errno));
4468 }
4469
4470 if (the_low_target.supply_ptrace_register)
4471 the_low_target.supply_ptrace_register (regcache, regno, buf);
4472 else
4473 supply_register (regcache, regno, buf);
4474 }
4475
4476 /* Store one register. */
4477 static void
4478 store_register (const struct usrregs_info *usrregs,
4479 struct regcache *regcache, int regno)
4480 {
4481 CORE_ADDR regaddr;
4482 int i, size;
4483 char *buf;
4484 int pid;
4485
4486 if (regno >= usrregs->num_regs)
4487 return;
4488 if ((*the_low_target.cannot_store_register) (regno))
4489 return;
4490
4491 regaddr = register_addr (usrregs, regno);
4492 if (regaddr == -1)
4493 return;
4494
4495 size = ((register_size (regcache->tdesc, regno)
4496 + sizeof (PTRACE_XFER_TYPE) - 1)
4497 & -sizeof (PTRACE_XFER_TYPE));
4498 buf = alloca (size);
4499 memset (buf, 0, size);
4500
4501 if (the_low_target.collect_ptrace_register)
4502 the_low_target.collect_ptrace_register (regcache, regno, buf);
4503 else
4504 collect_register (regcache, regno, buf);
4505
4506 pid = lwpid_of (current_inferior);
4507 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4508 {
4509 errno = 0;
4510 ptrace (PTRACE_POKEUSER, pid,
4511 /* Coerce to a uintptr_t first to avoid potential gcc warning
4512 about coercing an 8 byte integer to a 4 byte pointer. */
4513 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4514 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4515 if (errno != 0)
4516 {
4517 /* At this point, ESRCH should mean the process is
4518 already gone, in which case we simply ignore attempts
4519 to change its registers. See also the related
4520 comment in linux_resume_one_lwp. */
4521 if (errno == ESRCH)
4522 return;
4523
4524 if ((*the_low_target.cannot_store_register) (regno) == 0)
4525 error ("writing register %d: %s", regno, strerror (errno));
4526 }
4527 regaddr += sizeof (PTRACE_XFER_TYPE);
4528 }
4529 }
4530
4531 /* Fetch all registers, or just one, from the child process.
4532 If REGNO is -1, do this for all registers, skipping any that are
4533 assumed to have been retrieved by regsets_fetch_inferior_registers,
4534 unless ALL is non-zero.
4535 Otherwise, REGNO specifies which register (so we can save time). */
4536 static void
4537 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4538 struct regcache *regcache, int regno, int all)
4539 {
4540 struct usrregs_info *usr = regs_info->usrregs;
4541
4542 if (regno == -1)
4543 {
4544 for (regno = 0; regno < usr->num_regs; regno++)
4545 if (all || !linux_register_in_regsets (regs_info, regno))
4546 fetch_register (usr, regcache, regno);
4547 }
4548 else
4549 fetch_register (usr, regcache, regno);
4550 }
4551
4552 /* Store our register values back into the inferior.
4553 If REGNO is -1, do this for all registers, skipping any that are
4554 assumed to have been saved by regsets_store_inferior_registers,
4555 unless ALL is non-zero.
4556 Otherwise, REGNO specifies which register (so we can save time). */
4557 static void
4558 usr_store_inferior_registers (const struct regs_info *regs_info,
4559 struct regcache *regcache, int regno, int all)
4560 {
4561 struct usrregs_info *usr = regs_info->usrregs;
4562
4563 if (regno == -1)
4564 {
4565 for (regno = 0; regno < usr->num_regs; regno++)
4566 if (all || !linux_register_in_regsets (regs_info, regno))
4567 store_register (usr, regcache, regno);
4568 }
4569 else
4570 store_register (usr, regcache, regno);
4571 }
4572
4573 #else /* !HAVE_LINUX_USRREGS */
4574
4575 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4576 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4577
4578 #endif
4579
4580
4581 void
4582 linux_fetch_registers (struct regcache *regcache, int regno)
4583 {
4584 int use_regsets;
4585 int all = 0;
4586 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4587
4588 if (regno == -1)
4589 {
4590 if (the_low_target.fetch_register != NULL
4591 && regs_info->usrregs != NULL)
4592 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4593 (*the_low_target.fetch_register) (regcache, regno);
4594
4595 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4596 if (regs_info->usrregs != NULL)
4597 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4598 }
4599 else
4600 {
4601 if (the_low_target.fetch_register != NULL
4602 && (*the_low_target.fetch_register) (regcache, regno))
4603 return;
4604
4605 use_regsets = linux_register_in_regsets (regs_info, regno);
4606 if (use_regsets)
4607 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4608 regcache);
4609 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4610 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4611 }
4612 }
4613
4614 void
4615 linux_store_registers (struct regcache *regcache, int regno)
4616 {
4617 int use_regsets;
4618 int all = 0;
4619 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4620
4621 if (regno == -1)
4622 {
4623 all = regsets_store_inferior_registers (regs_info->regsets_info,
4624 regcache);
4625 if (regs_info->usrregs != NULL)
4626 usr_store_inferior_registers (regs_info, regcache, regno, all);
4627 }
4628 else
4629 {
4630 use_regsets = linux_register_in_regsets (regs_info, regno);
4631 if (use_regsets)
4632 all = regsets_store_inferior_registers (regs_info->regsets_info,
4633 regcache);
4634 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4635 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4636 }
4637 }
4638
4639
4640 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4641 to debugger memory starting at MYADDR. */
4642
4643 static int
4644 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4645 {
4646 int pid = lwpid_of (current_inferior);
4647 register PTRACE_XFER_TYPE *buffer;
4648 register CORE_ADDR addr;
4649 register int count;
4650 char filename[64];
4651 register int i;
4652 int ret;
4653 int fd;
4654
4655 /* Try using /proc. Don't bother for one word. */
4656 if (len >= 3 * sizeof (long))
4657 {
4658 int bytes;
4659
4660 /* We could keep this file open and cache it - possibly one per
4661 thread. That requires some juggling, but is even faster. */
4662 sprintf (filename, "/proc/%d/mem", pid);
4663 fd = open (filename, O_RDONLY | O_LARGEFILE);
4664 if (fd == -1)
4665 goto no_proc;
4666
4667 /* If pread64 is available, use it. It's faster if the kernel
4668 supports it (only one syscall), and it's 64-bit safe even on
4669 32-bit platforms (for instance, SPARC debugging a SPARC64
4670 application). */
4671 #ifdef HAVE_PREAD64
4672 bytes = pread64 (fd, myaddr, len, memaddr);
4673 #else
4674 bytes = -1;
4675 if (lseek (fd, memaddr, SEEK_SET) != -1)
4676 bytes = read (fd, myaddr, len);
4677 #endif
4678
4679 close (fd);
4680 if (bytes == len)
4681 return 0;
4682
4683 /* Some data was read, we'll try to get the rest with ptrace. */
4684 if (bytes > 0)
4685 {
4686 memaddr += bytes;
4687 myaddr += bytes;
4688 len -= bytes;
4689 }
4690 }
4691
4692 no_proc:
4693 /* Round starting address down to longword boundary. */
4694 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4695 /* Round ending address up; get number of longwords that makes. */
4696 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4697 / sizeof (PTRACE_XFER_TYPE));
4698 /* Allocate buffer of that many longwords. */
4699 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4700
4701 /* Read all the longwords */
4702 errno = 0;
4703 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4704 {
4705 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4706 about coercing an 8 byte integer to a 4 byte pointer. */
4707 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4708 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4709 (PTRACE_TYPE_ARG4) 0);
4710 if (errno)
4711 break;
4712 }
4713 ret = errno;
4714
4715 /* Copy appropriate bytes out of the buffer. */
4716 if (i > 0)
4717 {
4718 i *= sizeof (PTRACE_XFER_TYPE);
4719 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4720 memcpy (myaddr,
4721 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4722 i < len ? i : len);
4723 }
4724
4725 return ret;
4726 }
4727
4728 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4729 memory at MEMADDR. On failure (cannot write to the inferior)
4730 returns the value of errno. Always succeeds if LEN is zero. */
4731
4732 static int
4733 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4734 {
4735 register int i;
4736 /* Round starting address down to longword boundary. */
4737 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4738 /* Round ending address up; get number of longwords that makes. */
4739 register int count
4740 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4741 / sizeof (PTRACE_XFER_TYPE);
4742
4743 /* Allocate buffer of that many longwords. */
4744 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4745 alloca (count * sizeof (PTRACE_XFER_TYPE));
4746
4747 int pid = lwpid_of (current_inferior);
4748
4749 if (len == 0)
4750 {
4751 /* Zero length write always succeeds. */
4752 return 0;
4753 }
4754
4755 if (debug_threads)
4756 {
4757 /* Dump up to four bytes. */
4758 unsigned int val = * (unsigned int *) myaddr;
4759 if (len == 1)
4760 val = val & 0xff;
4761 else if (len == 2)
4762 val = val & 0xffff;
4763 else if (len == 3)
4764 val = val & 0xffffff;
4765 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4766 val, (long)memaddr);
4767 }
4768
4769 /* Fill start and end extra bytes of buffer with existing memory data. */
4770
4771 errno = 0;
4772 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4773 about coercing an 8 byte integer to a 4 byte pointer. */
4774 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4775 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4776 (PTRACE_TYPE_ARG4) 0);
4777 if (errno)
4778 return errno;
4779
4780 if (count > 1)
4781 {
4782 errno = 0;
4783 buffer[count - 1]
4784 = ptrace (PTRACE_PEEKTEXT, pid,
4785 /* Coerce to a uintptr_t first to avoid potential gcc warning
4786 about coercing an 8 byte integer to a 4 byte pointer. */
4787 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4788 * sizeof (PTRACE_XFER_TYPE)),
4789 (PTRACE_TYPE_ARG4) 0);
4790 if (errno)
4791 return errno;
4792 }
4793
4794 /* Copy data to be written over corresponding part of buffer. */
4795
4796 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4797 myaddr, len);
4798
4799 /* Write the entire buffer. */
4800
4801 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4802 {
4803 errno = 0;
4804 ptrace (PTRACE_POKETEXT, pid,
4805 /* Coerce to a uintptr_t first to avoid potential gcc warning
4806 about coercing an 8 byte integer to a 4 byte pointer. */
4807 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4808 (PTRACE_TYPE_ARG4) buffer[i]);
4809 if (errno)
4810 return errno;
4811 }
4812
4813 return 0;
4814 }
4815
4816 static void
4817 linux_look_up_symbols (void)
4818 {
4819 #ifdef USE_THREAD_DB
4820 struct process_info *proc = current_process ();
4821
4822 if (proc->private->thread_db != NULL)
4823 return;
4824
4825 /* If the kernel supports tracing clones, then we don't need to
4826 use the magic thread event breakpoint to learn about
4827 threads. */
4828 thread_db_init (!linux_supports_traceclone ());
4829 #endif
4830 }
4831
4832 static void
4833 linux_request_interrupt (void)
4834 {
4835 extern unsigned long signal_pid;
4836
4837 if (!ptid_equal (cont_thread, null_ptid)
4838 && !ptid_equal (cont_thread, minus_one_ptid))
4839 {
4840 int lwpid;
4841
4842 lwpid = lwpid_of (current_inferior);
4843 kill_lwp (lwpid, SIGINT);
4844 }
4845 else
4846 kill_lwp (signal_pid, SIGINT);
4847 }
4848
4849 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4850 to debugger memory starting at MYADDR. */
4851
4852 static int
4853 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4854 {
4855 char filename[PATH_MAX];
4856 int fd, n;
4857 int pid = lwpid_of (current_inferior);
4858
4859 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4860
4861 fd = open (filename, O_RDONLY);
4862 if (fd < 0)
4863 return -1;
4864
4865 if (offset != (CORE_ADDR) 0
4866 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4867 n = -1;
4868 else
4869 n = read (fd, myaddr, len);
4870
4871 close (fd);
4872
4873 return n;
4874 }
4875
4876 /* These breakpoint and watchpoint related wrapper functions simply
4877 pass on the function call if the target has registered a
4878 corresponding function. */
4879
4880 static int
4881 linux_supports_z_point_type (char z_type)
4882 {
4883 return (the_low_target.supports_z_point_type != NULL
4884 && the_low_target.supports_z_point_type (z_type));
4885 }
4886
4887 static int
4888 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4889 int size, struct raw_breakpoint *bp)
4890 {
4891 if (the_low_target.insert_point != NULL)
4892 return the_low_target.insert_point (type, addr, size, bp);
4893 else
4894 /* Unsupported (see target.h). */
4895 return 1;
4896 }
4897
4898 static int
4899 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4900 int size, struct raw_breakpoint *bp)
4901 {
4902 if (the_low_target.remove_point != NULL)
4903 return the_low_target.remove_point (type, addr, size, bp);
4904 else
4905 /* Unsupported (see target.h). */
4906 return 1;
4907 }
4908
4909 static int
4910 linux_stopped_by_watchpoint (void)
4911 {
4912 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4913
4914 return lwp->stopped_by_watchpoint;
4915 }
4916
4917 static CORE_ADDR
4918 linux_stopped_data_address (void)
4919 {
4920 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4921
4922 return lwp->stopped_data_address;
4923 }
4924
4925 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4926 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4927 && defined(PT_TEXT_END_ADDR)
4928
4929 /* This is only used for targets that define PT_TEXT_ADDR,
4930 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4931 the target has different ways of acquiring this information, like
4932 loadmaps. */
4933
4934 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4935 to tell gdb about. */
4936
4937 static int
4938 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4939 {
4940 unsigned long text, text_end, data;
4941 int pid = lwpid_of (get_thread_lwp (current_inferior));
4942
4943 errno = 0;
4944
4945 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4946 (PTRACE_TYPE_ARG4) 0);
4947 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4948 (PTRACE_TYPE_ARG4) 0);
4949 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4950 (PTRACE_TYPE_ARG4) 0);
4951
4952 if (errno == 0)
4953 {
4954 /* Both text and data offsets produced at compile-time (and so
4955 used by gdb) are relative to the beginning of the program,
4956 with the data segment immediately following the text segment.
4957 However, the actual runtime layout in memory may put the data
4958 somewhere else, so when we send gdb a data base-address, we
4959 use the real data base address and subtract the compile-time
4960 data base-address from it (which is just the length of the
4961 text segment). BSS immediately follows data in both
4962 cases. */
4963 *text_p = text;
4964 *data_p = data - (text_end - text);
4965
4966 return 1;
4967 }
4968 return 0;
4969 }
4970 #endif
4971
4972 static int
4973 linux_qxfer_osdata (const char *annex,
4974 unsigned char *readbuf, unsigned const char *writebuf,
4975 CORE_ADDR offset, int len)
4976 {
4977 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4978 }
4979
4980 /* Convert a native/host siginfo object, into/from the siginfo in the
4981 layout of the inferiors' architecture. */
4982
4983 static void
4984 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4985 {
4986 int done = 0;
4987
4988 if (the_low_target.siginfo_fixup != NULL)
4989 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4990
4991 /* If there was no callback, or the callback didn't do anything,
4992 then just do a straight memcpy. */
4993 if (!done)
4994 {
4995 if (direction == 1)
4996 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4997 else
4998 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4999 }
5000 }
5001
5002 static int
5003 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5004 unsigned const char *writebuf, CORE_ADDR offset, int len)
5005 {
5006 int pid;
5007 siginfo_t siginfo;
5008 char inf_siginfo[sizeof (siginfo_t)];
5009
5010 if (current_inferior == NULL)
5011 return -1;
5012
5013 pid = lwpid_of (current_inferior);
5014
5015 if (debug_threads)
5016 debug_printf ("%s siginfo for lwp %d.\n",
5017 readbuf != NULL ? "Reading" : "Writing",
5018 pid);
5019
5020 if (offset >= sizeof (siginfo))
5021 return -1;
5022
5023 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5024 return -1;
5025
5026 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5027 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5028 inferior with a 64-bit GDBSERVER should look the same as debugging it
5029 with a 32-bit GDBSERVER, we need to convert it. */
5030 siginfo_fixup (&siginfo, inf_siginfo, 0);
5031
5032 if (offset + len > sizeof (siginfo))
5033 len = sizeof (siginfo) - offset;
5034
5035 if (readbuf != NULL)
5036 memcpy (readbuf, inf_siginfo + offset, len);
5037 else
5038 {
5039 memcpy (inf_siginfo + offset, writebuf, len);
5040
5041 /* Convert back to ptrace layout before flushing it out. */
5042 siginfo_fixup (&siginfo, inf_siginfo, 1);
5043
5044 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5045 return -1;
5046 }
5047
5048 return len;
5049 }
5050
5051 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5052 so we notice when children change state; as the handler for the
5053 sigsuspend in my_waitpid. */
5054
5055 static void
5056 sigchld_handler (int signo)
5057 {
5058 int old_errno = errno;
5059
5060 if (debug_threads)
5061 {
5062 do
5063 {
5064 /* fprintf is not async-signal-safe, so call write
5065 directly. */
5066 if (write (2, "sigchld_handler\n",
5067 sizeof ("sigchld_handler\n") - 1) < 0)
5068 break; /* just ignore */
5069 } while (0);
5070 }
5071
5072 if (target_is_async_p ())
5073 async_file_mark (); /* trigger a linux_wait */
5074
5075 errno = old_errno;
5076 }
5077
5078 static int
5079 linux_supports_non_stop (void)
5080 {
5081 return 1;
5082 }
5083
5084 static int
5085 linux_async (int enable)
5086 {
5087 int previous = target_is_async_p ();
5088
5089 if (debug_threads)
5090 debug_printf ("linux_async (%d), previous=%d\n",
5091 enable, previous);
5092
5093 if (previous != enable)
5094 {
5095 sigset_t mask;
5096 sigemptyset (&mask);
5097 sigaddset (&mask, SIGCHLD);
5098
5099 sigprocmask (SIG_BLOCK, &mask, NULL);
5100
5101 if (enable)
5102 {
5103 if (pipe (linux_event_pipe) == -1)
5104 fatal ("creating event pipe failed.");
5105
5106 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5107 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5108
5109 /* Register the event loop handler. */
5110 add_file_handler (linux_event_pipe[0],
5111 handle_target_event, NULL);
5112
5113 /* Always trigger a linux_wait. */
5114 async_file_mark ();
5115 }
5116 else
5117 {
5118 delete_file_handler (linux_event_pipe[0]);
5119
5120 close (linux_event_pipe[0]);
5121 close (linux_event_pipe[1]);
5122 linux_event_pipe[0] = -1;
5123 linux_event_pipe[1] = -1;
5124 }
5125
5126 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5127 }
5128
5129 return previous;
5130 }
5131
5132 static int
5133 linux_start_non_stop (int nonstop)
5134 {
5135 /* Register or unregister from event-loop accordingly. */
5136 linux_async (nonstop);
5137 return 0;
5138 }
5139
5140 static int
5141 linux_supports_multi_process (void)
5142 {
5143 return 1;
5144 }
5145
5146 static int
5147 linux_supports_disable_randomization (void)
5148 {
5149 #ifdef HAVE_PERSONALITY
5150 return 1;
5151 #else
5152 return 0;
5153 #endif
5154 }
5155
5156 static int
5157 linux_supports_agent (void)
5158 {
5159 return 1;
5160 }
5161
5162 static int
5163 linux_supports_range_stepping (void)
5164 {
5165 if (*the_low_target.supports_range_stepping == NULL)
5166 return 0;
5167
5168 return (*the_low_target.supports_range_stepping) ();
5169 }
5170
5171 /* Enumerate spufs IDs for process PID. */
5172 static int
5173 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5174 {
5175 int pos = 0;
5176 int written = 0;
5177 char path[128];
5178 DIR *dir;
5179 struct dirent *entry;
5180
5181 sprintf (path, "/proc/%ld/fd", pid);
5182 dir = opendir (path);
5183 if (!dir)
5184 return -1;
5185
5186 rewinddir (dir);
5187 while ((entry = readdir (dir)) != NULL)
5188 {
5189 struct stat st;
5190 struct statfs stfs;
5191 int fd;
5192
5193 fd = atoi (entry->d_name);
5194 if (!fd)
5195 continue;
5196
5197 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5198 if (stat (path, &st) != 0)
5199 continue;
5200 if (!S_ISDIR (st.st_mode))
5201 continue;
5202
5203 if (statfs (path, &stfs) != 0)
5204 continue;
5205 if (stfs.f_type != SPUFS_MAGIC)
5206 continue;
5207
5208 if (pos >= offset && pos + 4 <= offset + len)
5209 {
5210 *(unsigned int *)(buf + pos - offset) = fd;
5211 written += 4;
5212 }
5213 pos += 4;
5214 }
5215
5216 closedir (dir);
5217 return written;
5218 }
5219
5220 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5221 object type, using the /proc file system. */
5222 static int
5223 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5224 unsigned const char *writebuf,
5225 CORE_ADDR offset, int len)
5226 {
5227 long pid = lwpid_of (current_inferior);
5228 char buf[128];
5229 int fd = 0;
5230 int ret = 0;
5231
5232 if (!writebuf && !readbuf)
5233 return -1;
5234
5235 if (!*annex)
5236 {
5237 if (!readbuf)
5238 return -1;
5239 else
5240 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5241 }
5242
5243 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5244 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5245 if (fd <= 0)
5246 return -1;
5247
5248 if (offset != 0
5249 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5250 {
5251 close (fd);
5252 return 0;
5253 }
5254
5255 if (writebuf)
5256 ret = write (fd, writebuf, (size_t) len);
5257 else
5258 ret = read (fd, readbuf, (size_t) len);
5259
5260 close (fd);
5261 return ret;
5262 }
5263
5264 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5265 struct target_loadseg
5266 {
5267 /* Core address to which the segment is mapped. */
5268 Elf32_Addr addr;
5269 /* VMA recorded in the program header. */
5270 Elf32_Addr p_vaddr;
5271 /* Size of this segment in memory. */
5272 Elf32_Word p_memsz;
5273 };
5274
5275 # if defined PT_GETDSBT
5276 struct target_loadmap
5277 {
5278 /* Protocol version number, must be zero. */
5279 Elf32_Word version;
5280 /* Pointer to the DSBT table, its size, and the DSBT index. */
5281 unsigned *dsbt_table;
5282 unsigned dsbt_size, dsbt_index;
5283 /* Number of segments in this map. */
5284 Elf32_Word nsegs;
5285 /* The actual memory map. */
5286 struct target_loadseg segs[/*nsegs*/];
5287 };
5288 # define LINUX_LOADMAP PT_GETDSBT
5289 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5290 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5291 # else
5292 struct target_loadmap
5293 {
5294 /* Protocol version number, must be zero. */
5295 Elf32_Half version;
5296 /* Number of segments in this map. */
5297 Elf32_Half nsegs;
5298 /* The actual memory map. */
5299 struct target_loadseg segs[/*nsegs*/];
5300 };
5301 # define LINUX_LOADMAP PTRACE_GETFDPIC
5302 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5303 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5304 # endif
5305
5306 static int
5307 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5308 unsigned char *myaddr, unsigned int len)
5309 {
5310 int pid = lwpid_of (current_inferior);
5311 int addr = -1;
5312 struct target_loadmap *data = NULL;
5313 unsigned int actual_length, copy_length;
5314
5315 if (strcmp (annex, "exec") == 0)
5316 addr = (int) LINUX_LOADMAP_EXEC;
5317 else if (strcmp (annex, "interp") == 0)
5318 addr = (int) LINUX_LOADMAP_INTERP;
5319 else
5320 return -1;
5321
5322 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5323 return -1;
5324
5325 if (data == NULL)
5326 return -1;
5327
5328 actual_length = sizeof (struct target_loadmap)
5329 + sizeof (struct target_loadseg) * data->nsegs;
5330
5331 if (offset < 0 || offset > actual_length)
5332 return -1;
5333
5334 copy_length = actual_length - offset < len ? actual_length - offset : len;
5335 memcpy (myaddr, (char *) data + offset, copy_length);
5336 return copy_length;
5337 }
5338 #else
5339 # define linux_read_loadmap NULL
5340 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5341
5342 static void
5343 linux_process_qsupported (const char *query)
5344 {
5345 if (the_low_target.process_qsupported != NULL)
5346 the_low_target.process_qsupported (query);
5347 }
5348
5349 static int
5350 linux_supports_tracepoints (void)
5351 {
5352 if (*the_low_target.supports_tracepoints == NULL)
5353 return 0;
5354
5355 return (*the_low_target.supports_tracepoints) ();
5356 }
5357
5358 static CORE_ADDR
5359 linux_read_pc (struct regcache *regcache)
5360 {
5361 if (the_low_target.get_pc == NULL)
5362 return 0;
5363
5364 return (*the_low_target.get_pc) (regcache);
5365 }
5366
5367 static void
5368 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5369 {
5370 gdb_assert (the_low_target.set_pc != NULL);
5371
5372 (*the_low_target.set_pc) (regcache, pc);
5373 }
5374
5375 static int
5376 linux_thread_stopped (struct thread_info *thread)
5377 {
5378 return get_thread_lwp (thread)->stopped;
5379 }
5380
5381 /* This exposes stop-all-threads functionality to other modules. */
5382
5383 static void
5384 linux_pause_all (int freeze)
5385 {
5386 stop_all_lwps (freeze, NULL);
5387 }
5388
5389 /* This exposes unstop-all-threads functionality to other gdbserver
5390 modules. */
5391
5392 static void
5393 linux_unpause_all (int unfreeze)
5394 {
5395 unstop_all_lwps (unfreeze, NULL);
5396 }
5397
5398 static int
5399 linux_prepare_to_access_memory (void)
5400 {
5401 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5402 running LWP. */
5403 if (non_stop)
5404 linux_pause_all (1);
5405 return 0;
5406 }
5407
5408 static void
5409 linux_done_accessing_memory (void)
5410 {
5411 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5412 running LWP. */
5413 if (non_stop)
5414 linux_unpause_all (1);
5415 }
5416
5417 static int
5418 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5419 CORE_ADDR collector,
5420 CORE_ADDR lockaddr,
5421 ULONGEST orig_size,
5422 CORE_ADDR *jump_entry,
5423 CORE_ADDR *trampoline,
5424 ULONGEST *trampoline_size,
5425 unsigned char *jjump_pad_insn,
5426 ULONGEST *jjump_pad_insn_size,
5427 CORE_ADDR *adjusted_insn_addr,
5428 CORE_ADDR *adjusted_insn_addr_end,
5429 char *err)
5430 {
5431 return (*the_low_target.install_fast_tracepoint_jump_pad)
5432 (tpoint, tpaddr, collector, lockaddr, orig_size,
5433 jump_entry, trampoline, trampoline_size,
5434 jjump_pad_insn, jjump_pad_insn_size,
5435 adjusted_insn_addr, adjusted_insn_addr_end,
5436 err);
5437 }
5438
5439 static struct emit_ops *
5440 linux_emit_ops (void)
5441 {
5442 if (the_low_target.emit_ops != NULL)
5443 return (*the_low_target.emit_ops) ();
5444 else
5445 return NULL;
5446 }
5447
5448 static int
5449 linux_get_min_fast_tracepoint_insn_len (void)
5450 {
5451 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5452 }
5453
5454 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5455
5456 static int
5457 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5458 CORE_ADDR *phdr_memaddr, int *num_phdr)
5459 {
5460 char filename[PATH_MAX];
5461 int fd;
5462 const int auxv_size = is_elf64
5463 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5464 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5465
5466 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5467
5468 fd = open (filename, O_RDONLY);
5469 if (fd < 0)
5470 return 1;
5471
5472 *phdr_memaddr = 0;
5473 *num_phdr = 0;
5474 while (read (fd, buf, auxv_size) == auxv_size
5475 && (*phdr_memaddr == 0 || *num_phdr == 0))
5476 {
5477 if (is_elf64)
5478 {
5479 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5480
5481 switch (aux->a_type)
5482 {
5483 case AT_PHDR:
5484 *phdr_memaddr = aux->a_un.a_val;
5485 break;
5486 case AT_PHNUM:
5487 *num_phdr = aux->a_un.a_val;
5488 break;
5489 }
5490 }
5491 else
5492 {
5493 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5494
5495 switch (aux->a_type)
5496 {
5497 case AT_PHDR:
5498 *phdr_memaddr = aux->a_un.a_val;
5499 break;
5500 case AT_PHNUM:
5501 *num_phdr = aux->a_un.a_val;
5502 break;
5503 }
5504 }
5505 }
5506
5507 close (fd);
5508
5509 if (*phdr_memaddr == 0 || *num_phdr == 0)
5510 {
5511 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5512 "phdr_memaddr = %ld, phdr_num = %d",
5513 (long) *phdr_memaddr, *num_phdr);
5514 return 2;
5515 }
5516
5517 return 0;
5518 }
5519
5520 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5521
5522 static CORE_ADDR
5523 get_dynamic (const int pid, const int is_elf64)
5524 {
5525 CORE_ADDR phdr_memaddr, relocation;
5526 int num_phdr, i;
5527 unsigned char *phdr_buf;
5528 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5529
5530 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5531 return 0;
5532
5533 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5534 phdr_buf = alloca (num_phdr * phdr_size);
5535
5536 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5537 return 0;
5538
5539 /* Compute relocation: it is expected to be 0 for "regular" executables,
5540 non-zero for PIE ones. */
5541 relocation = -1;
5542 for (i = 0; relocation == -1 && i < num_phdr; i++)
5543 if (is_elf64)
5544 {
5545 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5546
5547 if (p->p_type == PT_PHDR)
5548 relocation = phdr_memaddr - p->p_vaddr;
5549 }
5550 else
5551 {
5552 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5553
5554 if (p->p_type == PT_PHDR)
5555 relocation = phdr_memaddr - p->p_vaddr;
5556 }
5557
5558 if (relocation == -1)
5559 {
5560 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5561 any real world executables, including PIE executables, have always
5562 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5563 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5564 or present DT_DEBUG anyway (fpc binaries are statically linked).
5565
5566 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5567
5568 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5569
5570 return 0;
5571 }
5572
5573 for (i = 0; i < num_phdr; i++)
5574 {
5575 if (is_elf64)
5576 {
5577 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5578
5579 if (p->p_type == PT_DYNAMIC)
5580 return p->p_vaddr + relocation;
5581 }
5582 else
5583 {
5584 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5585
5586 if (p->p_type == PT_DYNAMIC)
5587 return p->p_vaddr + relocation;
5588 }
5589 }
5590
5591 return 0;
5592 }
5593
5594 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5595 can be 0 if the inferior does not yet have the library list initialized.
5596 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5597 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5598
5599 static CORE_ADDR
5600 get_r_debug (const int pid, const int is_elf64)
5601 {
5602 CORE_ADDR dynamic_memaddr;
5603 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5604 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5605 CORE_ADDR map = -1;
5606
5607 dynamic_memaddr = get_dynamic (pid, is_elf64);
5608 if (dynamic_memaddr == 0)
5609 return map;
5610
5611 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5612 {
5613 if (is_elf64)
5614 {
5615 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5616 #ifdef DT_MIPS_RLD_MAP
5617 union
5618 {
5619 Elf64_Xword map;
5620 unsigned char buf[sizeof (Elf64_Xword)];
5621 }
5622 rld_map;
5623
5624 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5625 {
5626 if (linux_read_memory (dyn->d_un.d_val,
5627 rld_map.buf, sizeof (rld_map.buf)) == 0)
5628 return rld_map.map;
5629 else
5630 break;
5631 }
5632 #endif /* DT_MIPS_RLD_MAP */
5633
5634 if (dyn->d_tag == DT_DEBUG && map == -1)
5635 map = dyn->d_un.d_val;
5636
5637 if (dyn->d_tag == DT_NULL)
5638 break;
5639 }
5640 else
5641 {
5642 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5643 #ifdef DT_MIPS_RLD_MAP
5644 union
5645 {
5646 Elf32_Word map;
5647 unsigned char buf[sizeof (Elf32_Word)];
5648 }
5649 rld_map;
5650
5651 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5652 {
5653 if (linux_read_memory (dyn->d_un.d_val,
5654 rld_map.buf, sizeof (rld_map.buf)) == 0)
5655 return rld_map.map;
5656 else
5657 break;
5658 }
5659 #endif /* DT_MIPS_RLD_MAP */
5660
5661 if (dyn->d_tag == DT_DEBUG && map == -1)
5662 map = dyn->d_un.d_val;
5663
5664 if (dyn->d_tag == DT_NULL)
5665 break;
5666 }
5667
5668 dynamic_memaddr += dyn_size;
5669 }
5670
5671 return map;
5672 }
5673
5674 /* Read one pointer from MEMADDR in the inferior. */
5675
5676 static int
5677 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5678 {
5679 int ret;
5680
5681 /* Go through a union so this works on either big or little endian
5682 hosts, when the inferior's pointer size is smaller than the size
5683 of CORE_ADDR. It is assumed the inferior's endianness is the
5684 same of the superior's. */
5685 union
5686 {
5687 CORE_ADDR core_addr;
5688 unsigned int ui;
5689 unsigned char uc;
5690 } addr;
5691
5692 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5693 if (ret == 0)
5694 {
5695 if (ptr_size == sizeof (CORE_ADDR))
5696 *ptr = addr.core_addr;
5697 else if (ptr_size == sizeof (unsigned int))
5698 *ptr = addr.ui;
5699 else
5700 gdb_assert_not_reached ("unhandled pointer size");
5701 }
5702 return ret;
5703 }
5704
5705 struct link_map_offsets
5706 {
5707 /* Offset and size of r_debug.r_version. */
5708 int r_version_offset;
5709
5710 /* Offset and size of r_debug.r_map. */
5711 int r_map_offset;
5712
5713 /* Offset to l_addr field in struct link_map. */
5714 int l_addr_offset;
5715
5716 /* Offset to l_name field in struct link_map. */
5717 int l_name_offset;
5718
5719 /* Offset to l_ld field in struct link_map. */
5720 int l_ld_offset;
5721
5722 /* Offset to l_next field in struct link_map. */
5723 int l_next_offset;
5724
5725 /* Offset to l_prev field in struct link_map. */
5726 int l_prev_offset;
5727 };
5728
5729 /* Construct qXfer:libraries-svr4:read reply. */
5730
5731 static int
5732 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5733 unsigned const char *writebuf,
5734 CORE_ADDR offset, int len)
5735 {
5736 char *document;
5737 unsigned document_len;
5738 struct process_info_private *const priv = current_process ()->private;
5739 char filename[PATH_MAX];
5740 int pid, is_elf64;
5741
5742 static const struct link_map_offsets lmo_32bit_offsets =
5743 {
5744 0, /* r_version offset. */
5745 4, /* r_debug.r_map offset. */
5746 0, /* l_addr offset in link_map. */
5747 4, /* l_name offset in link_map. */
5748 8, /* l_ld offset in link_map. */
5749 12, /* l_next offset in link_map. */
5750 16 /* l_prev offset in link_map. */
5751 };
5752
5753 static const struct link_map_offsets lmo_64bit_offsets =
5754 {
5755 0, /* r_version offset. */
5756 8, /* r_debug.r_map offset. */
5757 0, /* l_addr offset in link_map. */
5758 8, /* l_name offset in link_map. */
5759 16, /* l_ld offset in link_map. */
5760 24, /* l_next offset in link_map. */
5761 32 /* l_prev offset in link_map. */
5762 };
5763 const struct link_map_offsets *lmo;
5764 unsigned int machine;
5765 int ptr_size;
5766 CORE_ADDR lm_addr = 0, lm_prev = 0;
5767 int allocated = 1024;
5768 char *p;
5769 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5770 int header_done = 0;
5771
5772 if (writebuf != NULL)
5773 return -2;
5774 if (readbuf == NULL)
5775 return -1;
5776
5777 pid = lwpid_of (current_inferior);
5778 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5779 is_elf64 = elf_64_file_p (filename, &machine);
5780 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5781 ptr_size = is_elf64 ? 8 : 4;
5782
5783 while (annex[0] != '\0')
5784 {
5785 const char *sep;
5786 CORE_ADDR *addrp;
5787 int len;
5788
5789 sep = strchr (annex, '=');
5790 if (sep == NULL)
5791 break;
5792
5793 len = sep - annex;
5794 if (len == 5 && strncmp (annex, "start", 5) == 0)
5795 addrp = &lm_addr;
5796 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5797 addrp = &lm_prev;
5798 else
5799 {
5800 annex = strchr (sep, ';');
5801 if (annex == NULL)
5802 break;
5803 annex++;
5804 continue;
5805 }
5806
5807 annex = decode_address_to_semicolon (addrp, sep + 1);
5808 }
5809
5810 if (lm_addr == 0)
5811 {
5812 int r_version = 0;
5813
5814 if (priv->r_debug == 0)
5815 priv->r_debug = get_r_debug (pid, is_elf64);
5816
5817 /* We failed to find DT_DEBUG. Such situation will not change
5818 for this inferior - do not retry it. Report it to GDB as
5819 E01, see for the reasons at the GDB solib-svr4.c side. */
5820 if (priv->r_debug == (CORE_ADDR) -1)
5821 return -1;
5822
5823 if (priv->r_debug != 0)
5824 {
5825 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5826 (unsigned char *) &r_version,
5827 sizeof (r_version)) != 0
5828 || r_version != 1)
5829 {
5830 warning ("unexpected r_debug version %d", r_version);
5831 }
5832 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5833 &lm_addr, ptr_size) != 0)
5834 {
5835 warning ("unable to read r_map from 0x%lx",
5836 (long) priv->r_debug + lmo->r_map_offset);
5837 }
5838 }
5839 }
5840
5841 document = xmalloc (allocated);
5842 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5843 p = document + strlen (document);
5844
5845 while (lm_addr
5846 && read_one_ptr (lm_addr + lmo->l_name_offset,
5847 &l_name, ptr_size) == 0
5848 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5849 &l_addr, ptr_size) == 0
5850 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5851 &l_ld, ptr_size) == 0
5852 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5853 &l_prev, ptr_size) == 0
5854 && read_one_ptr (lm_addr + lmo->l_next_offset,
5855 &l_next, ptr_size) == 0)
5856 {
5857 unsigned char libname[PATH_MAX];
5858
5859 if (lm_prev != l_prev)
5860 {
5861 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5862 (long) lm_prev, (long) l_prev);
5863 break;
5864 }
5865
5866 /* Ignore the first entry even if it has valid name as the first entry
5867 corresponds to the main executable. The first entry should not be
5868 skipped if the dynamic loader was loaded late by a static executable
5869 (see solib-svr4.c parameter ignore_first). But in such case the main
5870 executable does not have PT_DYNAMIC present and this function already
5871 exited above due to failed get_r_debug. */
5872 if (lm_prev == 0)
5873 {
5874 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5875 p = p + strlen (p);
5876 }
5877 else
5878 {
5879 /* Not checking for error because reading may stop before
5880 we've got PATH_MAX worth of characters. */
5881 libname[0] = '\0';
5882 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5883 libname[sizeof (libname) - 1] = '\0';
5884 if (libname[0] != '\0')
5885 {
5886 /* 6x the size for xml_escape_text below. */
5887 size_t len = 6 * strlen ((char *) libname);
5888 char *name;
5889
5890 if (!header_done)
5891 {
5892 /* Terminate `<library-list-svr4'. */
5893 *p++ = '>';
5894 header_done = 1;
5895 }
5896
5897 while (allocated < p - document + len + 200)
5898 {
5899 /* Expand to guarantee sufficient storage. */
5900 uintptr_t document_len = p - document;
5901
5902 document = xrealloc (document, 2 * allocated);
5903 allocated *= 2;
5904 p = document + document_len;
5905 }
5906
5907 name = xml_escape_text ((char *) libname);
5908 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5909 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5910 name, (unsigned long) lm_addr,
5911 (unsigned long) l_addr, (unsigned long) l_ld);
5912 free (name);
5913 }
5914 }
5915
5916 lm_prev = lm_addr;
5917 lm_addr = l_next;
5918 }
5919
5920 if (!header_done)
5921 {
5922 /* Empty list; terminate `<library-list-svr4'. */
5923 strcpy (p, "/>");
5924 }
5925 else
5926 strcpy (p, "</library-list-svr4>");
5927
5928 document_len = strlen (document);
5929 if (offset < document_len)
5930 document_len -= offset;
5931 else
5932 document_len = 0;
5933 if (len > document_len)
5934 len = document_len;
5935
5936 memcpy (readbuf, document + offset, len);
5937 xfree (document);
5938
5939 return len;
5940 }
5941
5942 #ifdef HAVE_LINUX_BTRACE
5943
5944 /* See to_enable_btrace target method. */
5945
5946 static struct btrace_target_info *
5947 linux_low_enable_btrace (ptid_t ptid)
5948 {
5949 struct btrace_target_info *tinfo;
5950
5951 tinfo = linux_enable_btrace (ptid);
5952
5953 if (tinfo != NULL)
5954 {
5955 struct thread_info *thread = find_thread_ptid (ptid);
5956 struct regcache *regcache = get_thread_regcache (thread, 0);
5957
5958 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5959 }
5960
5961 return tinfo;
5962 }
5963
5964 /* See to_disable_btrace target method. */
5965
5966 static int
5967 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5968 {
5969 enum btrace_error err;
5970
5971 err = linux_disable_btrace (tinfo);
5972 return (err == BTRACE_ERR_NONE ? 0 : -1);
5973 }
5974
5975 /* See to_read_btrace target method. */
5976
5977 static int
5978 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5979 int type)
5980 {
5981 VEC (btrace_block_s) *btrace;
5982 struct btrace_block *block;
5983 enum btrace_error err;
5984 int i;
5985
5986 btrace = NULL;
5987 err = linux_read_btrace (&btrace, tinfo, type);
5988 if (err != BTRACE_ERR_NONE)
5989 {
5990 if (err == BTRACE_ERR_OVERFLOW)
5991 buffer_grow_str0 (buffer, "E.Overflow.");
5992 else
5993 buffer_grow_str0 (buffer, "E.Generic Error.");
5994
5995 return -1;
5996 }
5997
5998 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5999 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6000
6001 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
6002 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6003 paddress (block->begin), paddress (block->end));
6004
6005 buffer_grow_str0 (buffer, "</btrace>\n");
6006
6007 VEC_free (btrace_block_s, btrace);
6008
6009 return 0;
6010 }
6011 #endif /* HAVE_LINUX_BTRACE */
6012
6013 static struct target_ops linux_target_ops = {
6014 linux_create_inferior,
6015 linux_attach,
6016 linux_kill,
6017 linux_detach,
6018 linux_mourn,
6019 linux_join,
6020 linux_thread_alive,
6021 linux_resume,
6022 linux_wait,
6023 linux_fetch_registers,
6024 linux_store_registers,
6025 linux_prepare_to_access_memory,
6026 linux_done_accessing_memory,
6027 linux_read_memory,
6028 linux_write_memory,
6029 linux_look_up_symbols,
6030 linux_request_interrupt,
6031 linux_read_auxv,
6032 linux_supports_z_point_type,
6033 linux_insert_point,
6034 linux_remove_point,
6035 linux_stopped_by_watchpoint,
6036 linux_stopped_data_address,
6037 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6038 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6039 && defined(PT_TEXT_END_ADDR)
6040 linux_read_offsets,
6041 #else
6042 NULL,
6043 #endif
6044 #ifdef USE_THREAD_DB
6045 thread_db_get_tls_address,
6046 #else
6047 NULL,
6048 #endif
6049 linux_qxfer_spu,
6050 hostio_last_error_from_errno,
6051 linux_qxfer_osdata,
6052 linux_xfer_siginfo,
6053 linux_supports_non_stop,
6054 linux_async,
6055 linux_start_non_stop,
6056 linux_supports_multi_process,
6057 #ifdef USE_THREAD_DB
6058 thread_db_handle_monitor_command,
6059 #else
6060 NULL,
6061 #endif
6062 linux_common_core_of_thread,
6063 linux_read_loadmap,
6064 linux_process_qsupported,
6065 linux_supports_tracepoints,
6066 linux_read_pc,
6067 linux_write_pc,
6068 linux_thread_stopped,
6069 NULL,
6070 linux_pause_all,
6071 linux_unpause_all,
6072 linux_cancel_breakpoints,
6073 linux_stabilize_threads,
6074 linux_install_fast_tracepoint_jump_pad,
6075 linux_emit_ops,
6076 linux_supports_disable_randomization,
6077 linux_get_min_fast_tracepoint_insn_len,
6078 linux_qxfer_libraries_svr4,
6079 linux_supports_agent,
6080 #ifdef HAVE_LINUX_BTRACE
6081 linux_supports_btrace,
6082 linux_low_enable_btrace,
6083 linux_low_disable_btrace,
6084 linux_low_read_btrace,
6085 #else
6086 NULL,
6087 NULL,
6088 NULL,
6089 NULL,
6090 #endif
6091 linux_supports_range_stepping,
6092 };
6093
6094 static void
6095 linux_init_signals ()
6096 {
6097 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6098 to find what the cancel signal actually is. */
6099 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6100 signal (__SIGRTMIN+1, SIG_IGN);
6101 #endif
6102 }
6103
6104 #ifdef HAVE_LINUX_REGSETS
6105 void
6106 initialize_regsets_info (struct regsets_info *info)
6107 {
6108 for (info->num_regsets = 0;
6109 info->regsets[info->num_regsets].size >= 0;
6110 info->num_regsets++)
6111 ;
6112 }
6113 #endif
6114
6115 void
6116 initialize_low (void)
6117 {
6118 struct sigaction sigchld_action;
6119 memset (&sigchld_action, 0, sizeof (sigchld_action));
6120 set_target_ops (&linux_target_ops);
6121 set_breakpoint_data (the_low_target.breakpoint,
6122 the_low_target.breakpoint_len);
6123 linux_init_signals ();
6124 linux_ptrace_init_warnings ();
6125
6126 sigchld_action.sa_handler = sigchld_handler;
6127 sigemptyset (&sigchld_action.sa_mask);
6128 sigchld_action.sa_flags = SA_RESTART;
6129 sigaction (SIGCHLD, &sigchld_action, NULL);
6130
6131 initialize_low_arch ();
6132 }