[GDBserver] Avoid stale errno
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <stdio.h>
28 #include <sys/ptrace.h>
29 #include "nat/linux-ptrace.h"
30 #include "nat/linux-procfs.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include "filestuff.h"
48 #include "tracepoint.h"
49 #include "hostio.h"
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* A list of all unknown processes which receive stop signals. Some
142 other process will presumably claim each of these as forked
143 children momentarily. */
144
145 struct simple_pid_list
146 {
147 /* The process ID. */
148 int pid;
149
150 /* The status as reported by waitpid. */
151 int status;
152
153 /* Next in chain. */
154 struct simple_pid_list *next;
155 };
156 struct simple_pid_list *stopped_pids;
157
158 /* Trivial list manipulation functions to keep track of a list of new
159 stopped processes. */
160
161 static void
162 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
163 {
164 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
165
166 new_pid->pid = pid;
167 new_pid->status = status;
168 new_pid->next = *listp;
169 *listp = new_pid;
170 }
171
172 static int
173 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
174 {
175 struct simple_pid_list **p;
176
177 for (p = listp; *p != NULL; p = &(*p)->next)
178 if ((*p)->pid == pid)
179 {
180 struct simple_pid_list *next = (*p)->next;
181
182 *statusp = (*p)->status;
183 xfree (*p);
184 *p = next;
185 return 1;
186 }
187 return 0;
188 }
189
190 enum stopping_threads_kind
191 {
192 /* Not stopping threads presently. */
193 NOT_STOPPING_THREADS,
194
195 /* Stopping threads. */
196 STOPPING_THREADS,
197
198 /* Stopping and suspending threads. */
199 STOPPING_AND_SUSPENDING_THREADS
200 };
201
202 /* This is set while stop_all_lwps is in effect. */
203 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
204
205 /* FIXME make into a target method? */
206 int using_threads = 1;
207
208 /* True if we're presently stabilizing threads (moving them out of
209 jump pads). */
210 static int stabilizing_threads;
211
212 static void linux_resume_one_lwp (struct lwp_info *lwp,
213 int step, int signal, siginfo_t *info);
214 static void linux_resume (struct thread_resume *resume_info, size_t n);
215 static void stop_all_lwps (int suspend, struct lwp_info *except);
216 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
217 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
218 int *wstat, int options);
219 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
220 static struct lwp_info *add_lwp (ptid_t ptid);
221 static int linux_stopped_by_watchpoint (void);
222 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
223 static void proceed_all_lwps (void);
224 static int finish_step_over (struct lwp_info *lwp);
225 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
226 static int kill_lwp (unsigned long lwpid, int signo);
227
228 /* True if the low target can hardware single-step. Such targets
229 don't need a BREAKPOINT_REINSERT_ADDR callback. */
230
231 static int
232 can_hardware_single_step (void)
233 {
234 return (the_low_target.breakpoint_reinsert_addr == NULL);
235 }
236
237 /* True if the low target supports memory breakpoints. If so, we'll
238 have a GET_PC implementation. */
239
240 static int
241 supports_breakpoints (void)
242 {
243 return (the_low_target.get_pc != NULL);
244 }
245
246 /* Returns true if this target can support fast tracepoints. This
247 does not mean that the in-process agent has been loaded in the
248 inferior. */
249
250 static int
251 supports_fast_tracepoints (void)
252 {
253 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
254 }
255
256 /* True if LWP is stopped in its stepping range. */
257
258 static int
259 lwp_in_step_range (struct lwp_info *lwp)
260 {
261 CORE_ADDR pc = lwp->stop_pc;
262
263 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
264 }
265
266 struct pending_signals
267 {
268 int signal;
269 siginfo_t info;
270 struct pending_signals *prev;
271 };
272
273 /* The read/write ends of the pipe registered as waitable file in the
274 event loop. */
275 static int linux_event_pipe[2] = { -1, -1 };
276
277 /* True if we're currently in async mode. */
278 #define target_is_async_p() (linux_event_pipe[0] != -1)
279
280 static void send_sigstop (struct lwp_info *lwp);
281 static void wait_for_sigstop (void);
282
283 /* Return non-zero if HEADER is a 64-bit ELF file. */
284
285 static int
286 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
287 {
288 if (header->e_ident[EI_MAG0] == ELFMAG0
289 && header->e_ident[EI_MAG1] == ELFMAG1
290 && header->e_ident[EI_MAG2] == ELFMAG2
291 && header->e_ident[EI_MAG3] == ELFMAG3)
292 {
293 *machine = header->e_machine;
294 return header->e_ident[EI_CLASS] == ELFCLASS64;
295
296 }
297 *machine = EM_NONE;
298 return -1;
299 }
300
301 /* Return non-zero if FILE is a 64-bit ELF file,
302 zero if the file is not a 64-bit ELF file,
303 and -1 if the file is not accessible or doesn't exist. */
304
305 static int
306 elf_64_file_p (const char *file, unsigned int *machine)
307 {
308 Elf64_Ehdr header;
309 int fd;
310
311 fd = open (file, O_RDONLY);
312 if (fd < 0)
313 return -1;
314
315 if (read (fd, &header, sizeof (header)) != sizeof (header))
316 {
317 close (fd);
318 return 0;
319 }
320 close (fd);
321
322 return elf_64_header_p (&header, machine);
323 }
324
325 /* Accepts an integer PID; Returns true if the executable PID is
326 running is a 64-bit ELF file.. */
327
328 int
329 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
330 {
331 char file[PATH_MAX];
332
333 sprintf (file, "/proc/%d/exe", pid);
334 return elf_64_file_p (file, machine);
335 }
336
337 static void
338 delete_lwp (struct lwp_info *lwp)
339 {
340 struct thread_info *thr = get_lwp_thread (lwp);
341
342 if (debug_threads)
343 debug_printf ("deleting %ld\n", lwpid_of (thr));
344
345 remove_thread (thr);
346 free (lwp->arch_private);
347 free (lwp);
348 }
349
350 /* Add a process to the common process list, and set its private
351 data. */
352
353 static struct process_info *
354 linux_add_process (int pid, int attached)
355 {
356 struct process_info *proc;
357
358 proc = add_process (pid, attached);
359 proc->private = xcalloc (1, sizeof (*proc->private));
360
361 /* Set the arch when the first LWP stops. */
362 proc->private->new_inferior = 1;
363
364 if (the_low_target.new_process != NULL)
365 proc->private->arch_private = the_low_target.new_process ();
366
367 return proc;
368 }
369
370 /* Handle a GNU/Linux extended wait response. If we see a clone
371 event, we need to add the new LWP to our list (and not report the
372 trap to higher layers). */
373
374 static void
375 handle_extended_wait (struct lwp_info *event_child, int wstat)
376 {
377 int event = wstat >> 16;
378 struct thread_info *event_thr = get_lwp_thread (event_child);
379 struct lwp_info *new_lwp;
380
381 if (event == PTRACE_EVENT_CLONE)
382 {
383 ptid_t ptid;
384 unsigned long new_pid;
385 int ret, status;
386
387 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
388 &new_pid);
389
390 /* If we haven't already seen the new PID stop, wait for it now. */
391 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
392 {
393 /* The new child has a pending SIGSTOP. We can't affect it until it
394 hits the SIGSTOP, but we're already attached. */
395
396 ret = my_waitpid (new_pid, &status, __WALL);
397
398 if (ret == -1)
399 perror_with_name ("waiting for new child");
400 else if (ret != new_pid)
401 warning ("wait returned unexpected PID %d", ret);
402 else if (!WIFSTOPPED (status))
403 warning ("wait returned unexpected status 0x%x", status);
404 }
405
406 if (debug_threads)
407 debug_printf ("HEW: Got clone event "
408 "from LWP %ld, new child is LWP %ld\n",
409 lwpid_of (event_thr), new_pid);
410
411 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
412 new_lwp = add_lwp (ptid);
413
414 /* Either we're going to immediately resume the new thread
415 or leave it stopped. linux_resume_one_lwp is a nop if it
416 thinks the thread is currently running, so set this first
417 before calling linux_resume_one_lwp. */
418 new_lwp->stopped = 1;
419
420 /* If we're suspending all threads, leave this one suspended
421 too. */
422 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
423 new_lwp->suspended = 1;
424
425 /* Normally we will get the pending SIGSTOP. But in some cases
426 we might get another signal delivered to the group first.
427 If we do get another signal, be sure not to lose it. */
428 if (WSTOPSIG (status) == SIGSTOP)
429 {
430 if (stopping_threads != NOT_STOPPING_THREADS)
431 new_lwp->stop_pc = get_stop_pc (new_lwp);
432 else
433 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
434 }
435 else
436 {
437 new_lwp->stop_expected = 1;
438
439 if (stopping_threads != NOT_STOPPING_THREADS)
440 {
441 new_lwp->stop_pc = get_stop_pc (new_lwp);
442 new_lwp->status_pending_p = 1;
443 new_lwp->status_pending = status;
444 }
445 else
446 /* Pass the signal on. This is what GDB does - except
447 shouldn't we really report it instead? */
448 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
449 }
450
451 /* Always resume the current thread. If we are stopping
452 threads, it will have a pending SIGSTOP; we may as well
453 collect it now. */
454 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
455 }
456 }
457
458 /* Return the PC as read from the regcache of LWP, without any
459 adjustment. */
460
461 static CORE_ADDR
462 get_pc (struct lwp_info *lwp)
463 {
464 struct thread_info *saved_inferior;
465 struct regcache *regcache;
466 CORE_ADDR pc;
467
468 if (the_low_target.get_pc == NULL)
469 return 0;
470
471 saved_inferior = current_inferior;
472 current_inferior = get_lwp_thread (lwp);
473
474 regcache = get_thread_regcache (current_inferior, 1);
475 pc = (*the_low_target.get_pc) (regcache);
476
477 if (debug_threads)
478 debug_printf ("pc is 0x%lx\n", (long) pc);
479
480 current_inferior = saved_inferior;
481 return pc;
482 }
483
484 /* This function should only be called if LWP got a SIGTRAP.
485 The SIGTRAP could mean several things.
486
487 On i386, where decr_pc_after_break is non-zero:
488 If we were single-stepping this process using PTRACE_SINGLESTEP,
489 we will get only the one SIGTRAP (even if the instruction we
490 stepped over was a breakpoint). The value of $eip will be the
491 next instruction.
492 If we continue the process using PTRACE_CONT, we will get a
493 SIGTRAP when we hit a breakpoint. The value of $eip will be
494 the instruction after the breakpoint (i.e. needs to be
495 decremented). If we report the SIGTRAP to GDB, we must also
496 report the undecremented PC. If we cancel the SIGTRAP, we
497 must resume at the decremented PC.
498
499 (Presumably, not yet tested) On a non-decr_pc_after_break machine
500 with hardware or kernel single-step:
501 If we single-step over a breakpoint instruction, our PC will
502 point at the following instruction. If we continue and hit a
503 breakpoint instruction, our PC will point at the breakpoint
504 instruction. */
505
506 static CORE_ADDR
507 get_stop_pc (struct lwp_info *lwp)
508 {
509 CORE_ADDR stop_pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
513
514 stop_pc = get_pc (lwp);
515
516 if (WSTOPSIG (lwp->last_status) == SIGTRAP
517 && !lwp->stepping
518 && !lwp->stopped_by_watchpoint
519 && lwp->last_status >> 16 == 0)
520 stop_pc -= the_low_target.decr_pc_after_break;
521
522 if (debug_threads)
523 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
524
525 return stop_pc;
526 }
527
528 static struct lwp_info *
529 add_lwp (ptid_t ptid)
530 {
531 struct lwp_info *lwp;
532
533 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
534 memset (lwp, 0, sizeof (*lwp));
535
536 if (the_low_target.new_thread != NULL)
537 lwp->arch_private = the_low_target.new_thread ();
538
539 lwp->thread = add_thread (ptid, lwp);
540
541 return lwp;
542 }
543
544 /* Start an inferior process and returns its pid.
545 ALLARGS is a vector of program-name and args. */
546
547 static int
548 linux_create_inferior (char *program, char **allargs)
549 {
550 #ifdef HAVE_PERSONALITY
551 int personality_orig = 0, personality_set = 0;
552 #endif
553 struct lwp_info *new_lwp;
554 int pid;
555 ptid_t ptid;
556
557 #ifdef HAVE_PERSONALITY
558 if (disable_randomization)
559 {
560 errno = 0;
561 personality_orig = personality (0xffffffff);
562 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
563 {
564 personality_set = 1;
565 personality (personality_orig | ADDR_NO_RANDOMIZE);
566 }
567 if (errno != 0 || (personality_set
568 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
569 warning ("Error disabling address space randomization: %s",
570 strerror (errno));
571 }
572 #endif
573
574 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
575 pid = vfork ();
576 #else
577 pid = fork ();
578 #endif
579 if (pid < 0)
580 perror_with_name ("fork");
581
582 if (pid == 0)
583 {
584 close_most_fds ();
585 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
586
587 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
588 signal (__SIGRTMIN + 1, SIG_DFL);
589 #endif
590
591 setpgid (0, 0);
592
593 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
594 stdout to stderr so that inferior i/o doesn't corrupt the connection.
595 Also, redirect stdin to /dev/null. */
596 if (remote_connection_is_stdio ())
597 {
598 close (0);
599 open ("/dev/null", O_RDONLY);
600 dup2 (2, 1);
601 if (write (2, "stdin/stdout redirected\n",
602 sizeof ("stdin/stdout redirected\n") - 1) < 0)
603 {
604 /* Errors ignored. */;
605 }
606 }
607
608 execv (program, allargs);
609 if (errno == ENOENT)
610 execvp (program, allargs);
611
612 fprintf (stderr, "Cannot exec %s: %s.\n", program,
613 strerror (errno));
614 fflush (stderr);
615 _exit (0177);
616 }
617
618 #ifdef HAVE_PERSONALITY
619 if (personality_set)
620 {
621 errno = 0;
622 personality (personality_orig);
623 if (errno != 0)
624 warning ("Error restoring address space randomization: %s",
625 strerror (errno));
626 }
627 #endif
628
629 linux_add_process (pid, 0);
630
631 ptid = ptid_build (pid, pid, 0);
632 new_lwp = add_lwp (ptid);
633 new_lwp->must_set_ptrace_flags = 1;
634
635 return pid;
636 }
637
638 char *
639 linux_attach_fail_reason_string (ptid_t ptid, int err)
640 {
641 static char *reason_string;
642 struct buffer buffer;
643 char *warnings;
644 long lwpid = ptid_get_lwp (ptid);
645
646 xfree (reason_string);
647
648 buffer_init (&buffer);
649 linux_ptrace_attach_fail_reason (lwpid, &buffer);
650 buffer_grow_str0 (&buffer, "");
651 warnings = buffer_finish (&buffer);
652 if (warnings[0] != '\0')
653 reason_string = xstrprintf ("%s (%d), %s",
654 strerror (err), err, warnings);
655 else
656 reason_string = xstrprintf ("%s (%d)",
657 strerror (err), err);
658 xfree (warnings);
659 return reason_string;
660 }
661
662 /* Attach to an inferior process. */
663
664 int
665 linux_attach_lwp (ptid_t ptid)
666 {
667 struct lwp_info *new_lwp;
668 int lwpid = ptid_get_lwp (ptid);
669
670 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
671 != 0)
672 return errno;
673
674 new_lwp = add_lwp (ptid);
675
676 /* We need to wait for SIGSTOP before being able to make the next
677 ptrace call on this LWP. */
678 new_lwp->must_set_ptrace_flags = 1;
679
680 if (linux_proc_pid_is_stopped (lwpid))
681 {
682 if (debug_threads)
683 debug_printf ("Attached to a stopped process\n");
684
685 /* The process is definitely stopped. It is in a job control
686 stop, unless the kernel predates the TASK_STOPPED /
687 TASK_TRACED distinction, in which case it might be in a
688 ptrace stop. Make sure it is in a ptrace stop; from there we
689 can kill it, signal it, et cetera.
690
691 First make sure there is a pending SIGSTOP. Since we are
692 already attached, the process can not transition from stopped
693 to running without a PTRACE_CONT; so we know this signal will
694 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
695 probably already in the queue (unless this kernel is old
696 enough to use TASK_STOPPED for ptrace stops); but since
697 SIGSTOP is not an RT signal, it can only be queued once. */
698 kill_lwp (lwpid, SIGSTOP);
699
700 /* Finally, resume the stopped process. This will deliver the
701 SIGSTOP (or a higher priority signal, just like normal
702 PTRACE_ATTACH), which we'll catch later on. */
703 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
704 }
705
706 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
707 brings it to a halt.
708
709 There are several cases to consider here:
710
711 1) gdbserver has already attached to the process and is being notified
712 of a new thread that is being created.
713 In this case we should ignore that SIGSTOP and resume the
714 process. This is handled below by setting stop_expected = 1,
715 and the fact that add_thread sets last_resume_kind ==
716 resume_continue.
717
718 2) This is the first thread (the process thread), and we're attaching
719 to it via attach_inferior.
720 In this case we want the process thread to stop.
721 This is handled by having linux_attach set last_resume_kind ==
722 resume_stop after we return.
723
724 If the pid we are attaching to is also the tgid, we attach to and
725 stop all the existing threads. Otherwise, we attach to pid and
726 ignore any other threads in the same group as this pid.
727
728 3) GDB is connecting to gdbserver and is requesting an enumeration of all
729 existing threads.
730 In this case we want the thread to stop.
731 FIXME: This case is currently not properly handled.
732 We should wait for the SIGSTOP but don't. Things work apparently
733 because enough time passes between when we ptrace (ATTACH) and when
734 gdb makes the next ptrace call on the thread.
735
736 On the other hand, if we are currently trying to stop all threads, we
737 should treat the new thread as if we had sent it a SIGSTOP. This works
738 because we are guaranteed that the add_lwp call above added us to the
739 end of the list, and so the new thread has not yet reached
740 wait_for_sigstop (but will). */
741 new_lwp->stop_expected = 1;
742
743 return 0;
744 }
745
746 /* Attach to PID. If PID is the tgid, attach to it and all
747 of its threads. */
748
749 static int
750 linux_attach (unsigned long pid)
751 {
752 ptid_t ptid = ptid_build (pid, pid, 0);
753 int err;
754
755 /* Attach to PID. We will check for other threads
756 soon. */
757 err = linux_attach_lwp (ptid);
758 if (err != 0)
759 error ("Cannot attach to process %ld: %s",
760 pid, linux_attach_fail_reason_string (ptid, err));
761
762 linux_add_process (pid, 1);
763
764 if (!non_stop)
765 {
766 struct thread_info *thread;
767
768 /* Don't ignore the initial SIGSTOP if we just attached to this
769 process. It will be collected by wait shortly. */
770 thread = find_thread_ptid (ptid_build (pid, pid, 0));
771 thread->last_resume_kind = resume_stop;
772 }
773
774 if (linux_proc_get_tgid (pid) == pid)
775 {
776 DIR *dir;
777 char pathname[128];
778
779 sprintf (pathname, "/proc/%ld/task", pid);
780
781 dir = opendir (pathname);
782
783 if (!dir)
784 {
785 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
786 fflush (stderr);
787 }
788 else
789 {
790 /* At this point we attached to the tgid. Scan the task for
791 existing threads. */
792 int new_threads_found;
793 int iterations = 0;
794
795 while (iterations < 2)
796 {
797 struct dirent *dp;
798
799 new_threads_found = 0;
800 /* Add all the other threads. While we go through the
801 threads, new threads may be spawned. Cycle through
802 the list of threads until we have done two iterations without
803 finding new threads. */
804 while ((dp = readdir (dir)) != NULL)
805 {
806 unsigned long lwp;
807 ptid_t ptid;
808
809 /* Fetch one lwp. */
810 lwp = strtoul (dp->d_name, NULL, 10);
811
812 ptid = ptid_build (pid, lwp, 0);
813
814 /* Is this a new thread? */
815 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
816 {
817 int err;
818
819 if (debug_threads)
820 debug_printf ("Found new lwp %ld\n", lwp);
821
822 err = linux_attach_lwp (ptid);
823 if (err != 0)
824 warning ("Cannot attach to lwp %ld: %s",
825 lwp,
826 linux_attach_fail_reason_string (ptid, err));
827
828 new_threads_found++;
829 }
830 }
831
832 if (!new_threads_found)
833 iterations++;
834 else
835 iterations = 0;
836
837 rewinddir (dir);
838 }
839 closedir (dir);
840 }
841 }
842
843 return 0;
844 }
845
846 struct counter
847 {
848 int pid;
849 int count;
850 };
851
852 static int
853 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
854 {
855 struct counter *counter = args;
856
857 if (ptid_get_pid (entry->id) == counter->pid)
858 {
859 if (++counter->count > 1)
860 return 1;
861 }
862
863 return 0;
864 }
865
866 static int
867 last_thread_of_process_p (int pid)
868 {
869 struct counter counter = { pid , 0 };
870
871 return (find_inferior (&all_threads,
872 second_thread_of_pid_p, &counter) == NULL);
873 }
874
875 /* Kill LWP. */
876
877 static void
878 linux_kill_one_lwp (struct lwp_info *lwp)
879 {
880 struct thread_info *thr = get_lwp_thread (lwp);
881 int pid = lwpid_of (thr);
882
883 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
884 there is no signal context, and ptrace(PTRACE_KILL) (or
885 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
886 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
887 alternative is to kill with SIGKILL. We only need one SIGKILL
888 per process, not one for each thread. But since we still support
889 linuxthreads, and we also support debugging programs using raw
890 clone without CLONE_THREAD, we send one for each thread. For
891 years, we used PTRACE_KILL only, so we're being a bit paranoid
892 about some old kernels where PTRACE_KILL might work better
893 (dubious if there are any such, but that's why it's paranoia), so
894 we try SIGKILL first, PTRACE_KILL second, and so we're fine
895 everywhere. */
896
897 errno = 0;
898 kill (pid, SIGKILL);
899 if (debug_threads)
900 {
901 int save_errno = errno;
902
903 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
904 target_pid_to_str (ptid_of (thr)),
905 save_errno ? strerror (save_errno) : "OK");
906 }
907
908 errno = 0;
909 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
910 if (debug_threads)
911 {
912 int save_errno = errno;
913
914 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
915 target_pid_to_str (ptid_of (thr)),
916 save_errno ? strerror (save_errno) : "OK");
917 }
918 }
919
920 /* Kill LWP and wait for it to die. */
921
922 static void
923 kill_wait_lwp (struct lwp_info *lwp)
924 {
925 struct thread_info *thr = get_lwp_thread (lwp);
926 int pid = ptid_get_pid (ptid_of (thr));
927 int lwpid = ptid_get_lwp (ptid_of (thr));
928 int wstat;
929 int res;
930
931 if (debug_threads)
932 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
933
934 do
935 {
936 linux_kill_one_lwp (lwp);
937
938 /* Make sure it died. Notes:
939
940 - The loop is most likely unnecessary.
941
942 - We don't use linux_wait_for_event as that could delete lwps
943 while we're iterating over them. We're not interested in
944 any pending status at this point, only in making sure all
945 wait status on the kernel side are collected until the
946 process is reaped.
947
948 - We don't use __WALL here as the __WALL emulation relies on
949 SIGCHLD, and killing a stopped process doesn't generate
950 one, nor an exit status.
951 */
952 res = my_waitpid (lwpid, &wstat, 0);
953 if (res == -1 && errno == ECHILD)
954 res = my_waitpid (lwpid, &wstat, __WCLONE);
955 } while (res > 0 && WIFSTOPPED (wstat));
956
957 gdb_assert (res > 0);
958 }
959
960 /* Callback for `find_inferior'. Kills an lwp of a given process,
961 except the leader. */
962
963 static int
964 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
965 {
966 struct thread_info *thread = (struct thread_info *) entry;
967 struct lwp_info *lwp = get_thread_lwp (thread);
968 int pid = * (int *) args;
969
970 if (ptid_get_pid (entry->id) != pid)
971 return 0;
972
973 /* We avoid killing the first thread here, because of a Linux kernel (at
974 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
975 the children get a chance to be reaped, it will remain a zombie
976 forever. */
977
978 if (lwpid_of (thread) == pid)
979 {
980 if (debug_threads)
981 debug_printf ("lkop: is last of process %s\n",
982 target_pid_to_str (entry->id));
983 return 0;
984 }
985
986 kill_wait_lwp (lwp);
987 return 0;
988 }
989
990 static int
991 linux_kill (int pid)
992 {
993 struct process_info *process;
994 struct lwp_info *lwp;
995
996 process = find_process_pid (pid);
997 if (process == NULL)
998 return -1;
999
1000 /* If we're killing a running inferior, make sure it is stopped
1001 first, as PTRACE_KILL will not work otherwise. */
1002 stop_all_lwps (0, NULL);
1003
1004 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1005
1006 /* See the comment in linux_kill_one_lwp. We did not kill the first
1007 thread in the list, so do so now. */
1008 lwp = find_lwp_pid (pid_to_ptid (pid));
1009
1010 if (lwp == NULL)
1011 {
1012 if (debug_threads)
1013 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1014 pid);
1015 }
1016 else
1017 kill_wait_lwp (lwp);
1018
1019 the_target->mourn (process);
1020
1021 /* Since we presently can only stop all lwps of all processes, we
1022 need to unstop lwps of other processes. */
1023 unstop_all_lwps (0, NULL);
1024 return 0;
1025 }
1026
1027 /* Get pending signal of THREAD, for detaching purposes. This is the
1028 signal the thread last stopped for, which we need to deliver to the
1029 thread when detaching, otherwise, it'd be suppressed/lost. */
1030
1031 static int
1032 get_detach_signal (struct thread_info *thread)
1033 {
1034 enum gdb_signal signo = GDB_SIGNAL_0;
1035 int status;
1036 struct lwp_info *lp = get_thread_lwp (thread);
1037
1038 if (lp->status_pending_p)
1039 status = lp->status_pending;
1040 else
1041 {
1042 /* If the thread had been suspended by gdbserver, and it stopped
1043 cleanly, then it'll have stopped with SIGSTOP. But we don't
1044 want to deliver that SIGSTOP. */
1045 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1046 || thread->last_status.value.sig == GDB_SIGNAL_0)
1047 return 0;
1048
1049 /* Otherwise, we may need to deliver the signal we
1050 intercepted. */
1051 status = lp->last_status;
1052 }
1053
1054 if (!WIFSTOPPED (status))
1055 {
1056 if (debug_threads)
1057 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1058 target_pid_to_str (ptid_of (thread)));
1059 return 0;
1060 }
1061
1062 /* Extended wait statuses aren't real SIGTRAPs. */
1063 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1064 {
1065 if (debug_threads)
1066 debug_printf ("GPS: lwp %s had stopped with extended "
1067 "status: no pending signal\n",
1068 target_pid_to_str (ptid_of (thread)));
1069 return 0;
1070 }
1071
1072 signo = gdb_signal_from_host (WSTOPSIG (status));
1073
1074 if (program_signals_p && !program_signals[signo])
1075 {
1076 if (debug_threads)
1077 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1078 target_pid_to_str (ptid_of (thread)),
1079 gdb_signal_to_string (signo));
1080 return 0;
1081 }
1082 else if (!program_signals_p
1083 /* If we have no way to know which signals GDB does not
1084 want to have passed to the program, assume
1085 SIGTRAP/SIGINT, which is GDB's default. */
1086 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1087 {
1088 if (debug_threads)
1089 debug_printf ("GPS: lwp %s had signal %s, "
1090 "but we don't know if we should pass it. "
1091 "Default to not.\n",
1092 target_pid_to_str (ptid_of (thread)),
1093 gdb_signal_to_string (signo));
1094 return 0;
1095 }
1096 else
1097 {
1098 if (debug_threads)
1099 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1100 target_pid_to_str (ptid_of (thread)),
1101 gdb_signal_to_string (signo));
1102
1103 return WSTOPSIG (status);
1104 }
1105 }
1106
1107 static int
1108 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1109 {
1110 struct thread_info *thread = (struct thread_info *) entry;
1111 struct lwp_info *lwp = get_thread_lwp (thread);
1112 int pid = * (int *) args;
1113 int sig;
1114
1115 if (ptid_get_pid (entry->id) != pid)
1116 return 0;
1117
1118 /* If there is a pending SIGSTOP, get rid of it. */
1119 if (lwp->stop_expected)
1120 {
1121 if (debug_threads)
1122 debug_printf ("Sending SIGCONT to %s\n",
1123 target_pid_to_str (ptid_of (thread)));
1124
1125 kill_lwp (lwpid_of (thread), SIGCONT);
1126 lwp->stop_expected = 0;
1127 }
1128
1129 /* Flush any pending changes to the process's registers. */
1130 regcache_invalidate_thread (thread);
1131
1132 /* Pass on any pending signal for this thread. */
1133 sig = get_detach_signal (thread);
1134
1135 /* Finally, let it resume. */
1136 if (the_low_target.prepare_to_resume != NULL)
1137 the_low_target.prepare_to_resume (lwp);
1138 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1139 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1140 error (_("Can't detach %s: %s"),
1141 target_pid_to_str (ptid_of (thread)),
1142 strerror (errno));
1143
1144 delete_lwp (lwp);
1145 return 0;
1146 }
1147
1148 static int
1149 linux_detach (int pid)
1150 {
1151 struct process_info *process;
1152
1153 process = find_process_pid (pid);
1154 if (process == NULL)
1155 return -1;
1156
1157 /* Stop all threads before detaching. First, ptrace requires that
1158 the thread is stopped to sucessfully detach. Second, thread_db
1159 may need to uninstall thread event breakpoints from memory, which
1160 only works with a stopped process anyway. */
1161 stop_all_lwps (0, NULL);
1162
1163 #ifdef USE_THREAD_DB
1164 thread_db_detach (process);
1165 #endif
1166
1167 /* Stabilize threads (move out of jump pads). */
1168 stabilize_threads ();
1169
1170 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1171
1172 the_target->mourn (process);
1173
1174 /* Since we presently can only stop all lwps of all processes, we
1175 need to unstop lwps of other processes. */
1176 unstop_all_lwps (0, NULL);
1177 return 0;
1178 }
1179
1180 /* Remove all LWPs that belong to process PROC from the lwp list. */
1181
1182 static int
1183 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1184 {
1185 struct thread_info *thread = (struct thread_info *) entry;
1186 struct lwp_info *lwp = get_thread_lwp (thread);
1187 struct process_info *process = proc;
1188
1189 if (pid_of (thread) == pid_of (process))
1190 delete_lwp (lwp);
1191
1192 return 0;
1193 }
1194
1195 static void
1196 linux_mourn (struct process_info *process)
1197 {
1198 struct process_info_private *priv;
1199
1200 #ifdef USE_THREAD_DB
1201 thread_db_mourn (process);
1202 #endif
1203
1204 find_inferior (&all_threads, delete_lwp_callback, process);
1205
1206 /* Freeing all private data. */
1207 priv = process->private;
1208 free (priv->arch_private);
1209 free (priv);
1210 process->private = NULL;
1211
1212 remove_process (process);
1213 }
1214
1215 static void
1216 linux_join (int pid)
1217 {
1218 int status, ret;
1219
1220 do {
1221 ret = my_waitpid (pid, &status, 0);
1222 if (WIFEXITED (status) || WIFSIGNALED (status))
1223 break;
1224 } while (ret != -1 || errno != ECHILD);
1225 }
1226
1227 /* Return nonzero if the given thread is still alive. */
1228 static int
1229 linux_thread_alive (ptid_t ptid)
1230 {
1231 struct lwp_info *lwp = find_lwp_pid (ptid);
1232
1233 /* We assume we always know if a thread exits. If a whole process
1234 exited but we still haven't been able to report it to GDB, we'll
1235 hold on to the last lwp of the dead process. */
1236 if (lwp != NULL)
1237 return !lwp->dead;
1238 else
1239 return 0;
1240 }
1241
1242 /* Return 1 if this lwp has an interesting status pending. */
1243 static int
1244 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1245 {
1246 struct thread_info *thread = (struct thread_info *) entry;
1247 struct lwp_info *lwp = get_thread_lwp (thread);
1248 ptid_t ptid = * (ptid_t *) arg;
1249
1250 /* Check if we're only interested in events from a specific process
1251 or its lwps. */
1252 if (!ptid_equal (minus_one_ptid, ptid)
1253 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1254 return 0;
1255
1256 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1257 report any status pending the LWP may have. */
1258 if (thread->last_resume_kind == resume_stop
1259 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1260 return 0;
1261
1262 return lwp->status_pending_p;
1263 }
1264
1265 static int
1266 same_lwp (struct inferior_list_entry *entry, void *data)
1267 {
1268 ptid_t ptid = *(ptid_t *) data;
1269 int lwp;
1270
1271 if (ptid_get_lwp (ptid) != 0)
1272 lwp = ptid_get_lwp (ptid);
1273 else
1274 lwp = ptid_get_pid (ptid);
1275
1276 if (ptid_get_lwp (entry->id) == lwp)
1277 return 1;
1278
1279 return 0;
1280 }
1281
1282 struct lwp_info *
1283 find_lwp_pid (ptid_t ptid)
1284 {
1285 struct inferior_list_entry *thread
1286 = find_inferior (&all_threads, same_lwp, &ptid);
1287
1288 if (thread == NULL)
1289 return NULL;
1290
1291 return get_thread_lwp ((struct thread_info *) thread);
1292 }
1293
1294 /* Return the number of known LWPs in the tgid given by PID. */
1295
1296 static int
1297 num_lwps (int pid)
1298 {
1299 struct inferior_list_entry *inf, *tmp;
1300 int count = 0;
1301
1302 ALL_INFERIORS (&all_threads, inf, tmp)
1303 {
1304 if (ptid_get_pid (inf->id) == pid)
1305 count++;
1306 }
1307
1308 return count;
1309 }
1310
1311 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1312 their exits until all other threads in the group have exited. */
1313
1314 static void
1315 check_zombie_leaders (void)
1316 {
1317 struct process_info *proc, *tmp;
1318
1319 ALL_PROCESSES (proc, tmp)
1320 {
1321 pid_t leader_pid = pid_of (proc);
1322 struct lwp_info *leader_lp;
1323
1324 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1325
1326 if (debug_threads)
1327 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1328 "num_lwps=%d, zombie=%d\n",
1329 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1330 linux_proc_pid_is_zombie (leader_pid));
1331
1332 if (leader_lp != NULL
1333 /* Check if there are other threads in the group, as we may
1334 have raced with the inferior simply exiting. */
1335 && !last_thread_of_process_p (leader_pid)
1336 && linux_proc_pid_is_zombie (leader_pid))
1337 {
1338 /* A leader zombie can mean one of two things:
1339
1340 - It exited, and there's an exit status pending
1341 available, or only the leader exited (not the whole
1342 program). In the latter case, we can't waitpid the
1343 leader's exit status until all other threads are gone.
1344
1345 - There are 3 or more threads in the group, and a thread
1346 other than the leader exec'd. On an exec, the Linux
1347 kernel destroys all other threads (except the execing
1348 one) in the thread group, and resets the execing thread's
1349 tid to the tgid. No exit notification is sent for the
1350 execing thread -- from the ptracer's perspective, it
1351 appears as though the execing thread just vanishes.
1352 Until we reap all other threads except the leader and the
1353 execing thread, the leader will be zombie, and the
1354 execing thread will be in `D (disc sleep)'. As soon as
1355 all other threads are reaped, the execing thread changes
1356 it's tid to the tgid, and the previous (zombie) leader
1357 vanishes, giving place to the "new" leader. We could try
1358 distinguishing the exit and exec cases, by waiting once
1359 more, and seeing if something comes out, but it doesn't
1360 sound useful. The previous leader _does_ go away, and
1361 we'll re-add the new one once we see the exec event
1362 (which is just the same as what would happen if the
1363 previous leader did exit voluntarily before some other
1364 thread execs). */
1365
1366 if (debug_threads)
1367 fprintf (stderr,
1368 "CZL: Thread group leader %d zombie "
1369 "(it exited, or another thread execd).\n",
1370 leader_pid);
1371
1372 delete_lwp (leader_lp);
1373 }
1374 }
1375 }
1376
1377 /* Callback for `find_inferior'. Returns the first LWP that is not
1378 stopped. ARG is a PTID filter. */
1379
1380 static int
1381 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1382 {
1383 struct thread_info *thr = (struct thread_info *) entry;
1384 struct lwp_info *lwp;
1385 ptid_t filter = *(ptid_t *) arg;
1386
1387 if (!ptid_match (ptid_of (thr), filter))
1388 return 0;
1389
1390 lwp = get_thread_lwp (thr);
1391 if (!lwp->stopped)
1392 return 1;
1393
1394 return 0;
1395 }
1396
1397 /* This function should only be called if the LWP got a SIGTRAP.
1398
1399 Handle any tracepoint steps or hits. Return true if a tracepoint
1400 event was handled, 0 otherwise. */
1401
1402 static int
1403 handle_tracepoints (struct lwp_info *lwp)
1404 {
1405 struct thread_info *tinfo = get_lwp_thread (lwp);
1406 int tpoint_related_event = 0;
1407
1408 /* If this tracepoint hit causes a tracing stop, we'll immediately
1409 uninsert tracepoints. To do this, we temporarily pause all
1410 threads, unpatch away, and then unpause threads. We need to make
1411 sure the unpausing doesn't resume LWP too. */
1412 lwp->suspended++;
1413
1414 /* And we need to be sure that any all-threads-stopping doesn't try
1415 to move threads out of the jump pads, as it could deadlock the
1416 inferior (LWP could be in the jump pad, maybe even holding the
1417 lock.) */
1418
1419 /* Do any necessary step collect actions. */
1420 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1421
1422 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1423
1424 /* See if we just hit a tracepoint and do its main collect
1425 actions. */
1426 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1427
1428 lwp->suspended--;
1429
1430 gdb_assert (lwp->suspended == 0);
1431 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1432
1433 if (tpoint_related_event)
1434 {
1435 if (debug_threads)
1436 debug_printf ("got a tracepoint event\n");
1437 return 1;
1438 }
1439
1440 return 0;
1441 }
1442
1443 /* Convenience wrapper. Returns true if LWP is presently collecting a
1444 fast tracepoint. */
1445
1446 static int
1447 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1448 struct fast_tpoint_collect_status *status)
1449 {
1450 CORE_ADDR thread_area;
1451 struct thread_info *thread = get_lwp_thread (lwp);
1452
1453 if (the_low_target.get_thread_area == NULL)
1454 return 0;
1455
1456 /* Get the thread area address. This is used to recognize which
1457 thread is which when tracing with the in-process agent library.
1458 We don't read anything from the address, and treat it as opaque;
1459 it's the address itself that we assume is unique per-thread. */
1460 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1461 return 0;
1462
1463 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1464 }
1465
1466 /* The reason we resume in the caller, is because we want to be able
1467 to pass lwp->status_pending as WSTAT, and we need to clear
1468 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1469 refuses to resume. */
1470
1471 static int
1472 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1473 {
1474 struct thread_info *saved_inferior;
1475
1476 saved_inferior = current_inferior;
1477 current_inferior = get_lwp_thread (lwp);
1478
1479 if ((wstat == NULL
1480 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1481 && supports_fast_tracepoints ()
1482 && agent_loaded_p ())
1483 {
1484 struct fast_tpoint_collect_status status;
1485 int r;
1486
1487 if (debug_threads)
1488 debug_printf ("Checking whether LWP %ld needs to move out of the "
1489 "jump pad.\n",
1490 lwpid_of (current_inferior));
1491
1492 r = linux_fast_tracepoint_collecting (lwp, &status);
1493
1494 if (wstat == NULL
1495 || (WSTOPSIG (*wstat) != SIGILL
1496 && WSTOPSIG (*wstat) != SIGFPE
1497 && WSTOPSIG (*wstat) != SIGSEGV
1498 && WSTOPSIG (*wstat) != SIGBUS))
1499 {
1500 lwp->collecting_fast_tracepoint = r;
1501
1502 if (r != 0)
1503 {
1504 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1505 {
1506 /* Haven't executed the original instruction yet.
1507 Set breakpoint there, and wait till it's hit,
1508 then single-step until exiting the jump pad. */
1509 lwp->exit_jump_pad_bkpt
1510 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1511 }
1512
1513 if (debug_threads)
1514 debug_printf ("Checking whether LWP %ld needs to move out of "
1515 "the jump pad...it does\n",
1516 lwpid_of (current_inferior));
1517 current_inferior = saved_inferior;
1518
1519 return 1;
1520 }
1521 }
1522 else
1523 {
1524 /* If we get a synchronous signal while collecting, *and*
1525 while executing the (relocated) original instruction,
1526 reset the PC to point at the tpoint address, before
1527 reporting to GDB. Otherwise, it's an IPA lib bug: just
1528 report the signal to GDB, and pray for the best. */
1529
1530 lwp->collecting_fast_tracepoint = 0;
1531
1532 if (r != 0
1533 && (status.adjusted_insn_addr <= lwp->stop_pc
1534 && lwp->stop_pc < status.adjusted_insn_addr_end))
1535 {
1536 siginfo_t info;
1537 struct regcache *regcache;
1538
1539 /* The si_addr on a few signals references the address
1540 of the faulting instruction. Adjust that as
1541 well. */
1542 if ((WSTOPSIG (*wstat) == SIGILL
1543 || WSTOPSIG (*wstat) == SIGFPE
1544 || WSTOPSIG (*wstat) == SIGBUS
1545 || WSTOPSIG (*wstat) == SIGSEGV)
1546 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
1547 (PTRACE_TYPE_ARG3) 0, &info) == 0
1548 /* Final check just to make sure we don't clobber
1549 the siginfo of non-kernel-sent signals. */
1550 && (uintptr_t) info.si_addr == lwp->stop_pc)
1551 {
1552 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1553 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_inferior),
1554 (PTRACE_TYPE_ARG3) 0, &info);
1555 }
1556
1557 regcache = get_thread_regcache (current_inferior, 1);
1558 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1559 lwp->stop_pc = status.tpoint_addr;
1560
1561 /* Cancel any fast tracepoint lock this thread was
1562 holding. */
1563 force_unlock_trace_buffer ();
1564 }
1565
1566 if (lwp->exit_jump_pad_bkpt != NULL)
1567 {
1568 if (debug_threads)
1569 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1570 "stopping all threads momentarily.\n");
1571
1572 stop_all_lwps (1, lwp);
1573 cancel_breakpoints ();
1574
1575 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1576 lwp->exit_jump_pad_bkpt = NULL;
1577
1578 unstop_all_lwps (1, lwp);
1579
1580 gdb_assert (lwp->suspended >= 0);
1581 }
1582 }
1583 }
1584
1585 if (debug_threads)
1586 debug_printf ("Checking whether LWP %ld needs to move out of the "
1587 "jump pad...no\n",
1588 lwpid_of (current_inferior));
1589
1590 current_inferior = saved_inferior;
1591 return 0;
1592 }
1593
1594 /* Enqueue one signal in the "signals to report later when out of the
1595 jump pad" list. */
1596
1597 static void
1598 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1599 {
1600 struct pending_signals *p_sig;
1601 struct thread_info *thread = get_lwp_thread (lwp);
1602
1603 if (debug_threads)
1604 debug_printf ("Deferring signal %d for LWP %ld.\n",
1605 WSTOPSIG (*wstat), lwpid_of (thread));
1606
1607 if (debug_threads)
1608 {
1609 struct pending_signals *sig;
1610
1611 for (sig = lwp->pending_signals_to_report;
1612 sig != NULL;
1613 sig = sig->prev)
1614 debug_printf (" Already queued %d\n",
1615 sig->signal);
1616
1617 debug_printf (" (no more currently queued signals)\n");
1618 }
1619
1620 /* Don't enqueue non-RT signals if they are already in the deferred
1621 queue. (SIGSTOP being the easiest signal to see ending up here
1622 twice) */
1623 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1624 {
1625 struct pending_signals *sig;
1626
1627 for (sig = lwp->pending_signals_to_report;
1628 sig != NULL;
1629 sig = sig->prev)
1630 {
1631 if (sig->signal == WSTOPSIG (*wstat))
1632 {
1633 if (debug_threads)
1634 debug_printf ("Not requeuing already queued non-RT signal %d"
1635 " for LWP %ld\n",
1636 sig->signal,
1637 lwpid_of (thread));
1638 return;
1639 }
1640 }
1641 }
1642
1643 p_sig = xmalloc (sizeof (*p_sig));
1644 p_sig->prev = lwp->pending_signals_to_report;
1645 p_sig->signal = WSTOPSIG (*wstat);
1646 memset (&p_sig->info, 0, sizeof (siginfo_t));
1647 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1648 &p_sig->info);
1649
1650 lwp->pending_signals_to_report = p_sig;
1651 }
1652
1653 /* Dequeue one signal from the "signals to report later when out of
1654 the jump pad" list. */
1655
1656 static int
1657 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1658 {
1659 struct thread_info *thread = get_lwp_thread (lwp);
1660
1661 if (lwp->pending_signals_to_report != NULL)
1662 {
1663 struct pending_signals **p_sig;
1664
1665 p_sig = &lwp->pending_signals_to_report;
1666 while ((*p_sig)->prev != NULL)
1667 p_sig = &(*p_sig)->prev;
1668
1669 *wstat = W_STOPCODE ((*p_sig)->signal);
1670 if ((*p_sig)->info.si_signo != 0)
1671 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1672 &(*p_sig)->info);
1673 free (*p_sig);
1674 *p_sig = NULL;
1675
1676 if (debug_threads)
1677 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1678 WSTOPSIG (*wstat), lwpid_of (thread));
1679
1680 if (debug_threads)
1681 {
1682 struct pending_signals *sig;
1683
1684 for (sig = lwp->pending_signals_to_report;
1685 sig != NULL;
1686 sig = sig->prev)
1687 debug_printf (" Still queued %d\n",
1688 sig->signal);
1689
1690 debug_printf (" (no more queued signals)\n");
1691 }
1692
1693 return 1;
1694 }
1695
1696 return 0;
1697 }
1698
1699 /* Arrange for a breakpoint to be hit again later. We don't keep the
1700 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1701 will handle the current event, eventually we will resume this LWP,
1702 and this breakpoint will trap again. */
1703
1704 static int
1705 cancel_breakpoint (struct lwp_info *lwp)
1706 {
1707 struct thread_info *saved_inferior;
1708
1709 /* There's nothing to do if we don't support breakpoints. */
1710 if (!supports_breakpoints ())
1711 return 0;
1712
1713 /* breakpoint_at reads from current inferior. */
1714 saved_inferior = current_inferior;
1715 current_inferior = get_lwp_thread (lwp);
1716
1717 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1718 {
1719 if (debug_threads)
1720 debug_printf ("CB: Push back breakpoint for %s\n",
1721 target_pid_to_str (ptid_of (current_inferior)));
1722
1723 /* Back up the PC if necessary. */
1724 if (the_low_target.decr_pc_after_break)
1725 {
1726 struct regcache *regcache
1727 = get_thread_regcache (current_inferior, 1);
1728 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1729 }
1730
1731 current_inferior = saved_inferior;
1732 return 1;
1733 }
1734 else
1735 {
1736 if (debug_threads)
1737 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1738 paddress (lwp->stop_pc),
1739 target_pid_to_str (ptid_of (current_inferior)));
1740 }
1741
1742 current_inferior = saved_inferior;
1743 return 0;
1744 }
1745
1746 /* Do low-level handling of the event, and check if we should go on
1747 and pass it to caller code. Return the affected lwp if we are, or
1748 NULL otherwise. */
1749
1750 static struct lwp_info *
1751 linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1752 {
1753 struct lwp_info *child;
1754 struct thread_info *thread;
1755
1756 child = find_lwp_pid (pid_to_ptid (lwpid));
1757
1758 /* If we didn't find a process, one of two things presumably happened:
1759 - A process we started and then detached from has exited. Ignore it.
1760 - A process we are controlling has forked and the new child's stop
1761 was reported to us by the kernel. Save its PID. */
1762 if (child == NULL && WIFSTOPPED (wstat))
1763 {
1764 add_to_pid_list (&stopped_pids, lwpid, wstat);
1765 return NULL;
1766 }
1767 else if (child == NULL)
1768 return NULL;
1769
1770 thread = get_lwp_thread (child);
1771
1772 child->stopped = 1;
1773
1774 child->last_status = wstat;
1775
1776 if (WIFSTOPPED (wstat))
1777 {
1778 struct process_info *proc;
1779
1780 /* Architecture-specific setup after inferior is running. This
1781 needs to happen after we have attached to the inferior and it
1782 is stopped for the first time, but before we access any
1783 inferior registers. */
1784 proc = find_process_pid (pid_of (thread));
1785 if (proc->private->new_inferior)
1786 {
1787 struct thread_info *saved_inferior;
1788
1789 saved_inferior = current_inferior;
1790 current_inferior = thread;
1791
1792 the_low_target.arch_setup ();
1793
1794 current_inferior = saved_inferior;
1795
1796 proc->private->new_inferior = 0;
1797 }
1798 }
1799
1800 /* Store the STOP_PC, with adjustment applied. This depends on the
1801 architecture being defined already (so that CHILD has a valid
1802 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1803 not). */
1804 if (WIFSTOPPED (wstat))
1805 {
1806 if (debug_threads
1807 && the_low_target.get_pc != NULL)
1808 {
1809 struct thread_info *saved_inferior;
1810 struct regcache *regcache;
1811 CORE_ADDR pc;
1812
1813 saved_inferior = current_inferior;
1814 current_inferior = thread;
1815 regcache = get_thread_regcache (current_inferior, 1);
1816 pc = (*the_low_target.get_pc) (regcache);
1817 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
1818 current_inferior = saved_inferior;
1819 }
1820
1821 child->stop_pc = get_stop_pc (child);
1822 }
1823
1824 /* Fetch the possibly triggered data watchpoint info and store it in
1825 CHILD.
1826
1827 On some archs, like x86, that use debug registers to set
1828 watchpoints, it's possible that the way to know which watched
1829 address trapped, is to check the register that is used to select
1830 which address to watch. Problem is, between setting the
1831 watchpoint and reading back which data address trapped, the user
1832 may change the set of watchpoints, and, as a consequence, GDB
1833 changes the debug registers in the inferior. To avoid reading
1834 back a stale stopped-data-address when that happens, we cache in
1835 LP the fact that a watchpoint trapped, and the corresponding data
1836 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1837 changes the debug registers meanwhile, we have the cached data we
1838 can rely on. */
1839
1840 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1841 {
1842 if (the_low_target.stopped_by_watchpoint == NULL)
1843 {
1844 child->stopped_by_watchpoint = 0;
1845 }
1846 else
1847 {
1848 struct thread_info *saved_inferior;
1849
1850 saved_inferior = current_inferior;
1851 current_inferior = thread;
1852
1853 child->stopped_by_watchpoint
1854 = the_low_target.stopped_by_watchpoint ();
1855
1856 if (child->stopped_by_watchpoint)
1857 {
1858 if (the_low_target.stopped_data_address != NULL)
1859 child->stopped_data_address
1860 = the_low_target.stopped_data_address ();
1861 else
1862 child->stopped_data_address = 0;
1863 }
1864
1865 current_inferior = saved_inferior;
1866 }
1867 }
1868
1869 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1870 {
1871 linux_enable_event_reporting (lwpid);
1872 child->must_set_ptrace_flags = 0;
1873 }
1874
1875 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1876 && wstat >> 16 != 0)
1877 {
1878 handle_extended_wait (child, wstat);
1879 return NULL;
1880 }
1881
1882 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1883 && child->stop_expected)
1884 {
1885 if (debug_threads)
1886 debug_printf ("Expected stop.\n");
1887 child->stop_expected = 0;
1888
1889 if (thread->last_resume_kind == resume_stop)
1890 {
1891 /* We want to report the stop to the core. Treat the
1892 SIGSTOP as a normal event. */
1893 }
1894 else if (stopping_threads != NOT_STOPPING_THREADS)
1895 {
1896 /* Stopping threads. We don't want this SIGSTOP to end up
1897 pending in the FILTER_PTID handling below. */
1898 return NULL;
1899 }
1900 else
1901 {
1902 /* Filter out the event. */
1903 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1904 return NULL;
1905 }
1906 }
1907
1908 /* Check if the thread has exited. */
1909 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1910 && num_lwps (pid_of (thread)) > 1)
1911 {
1912 if (debug_threads)
1913 debug_printf ("LLW: %d exited.\n", lwpid);
1914
1915 /* If there is at least one more LWP, then the exit signal
1916 was not the end of the debugged application and should be
1917 ignored. */
1918 delete_lwp (child);
1919 return NULL;
1920 }
1921
1922 if (!ptid_match (ptid_of (thread), filter_ptid))
1923 {
1924 if (debug_threads)
1925 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1926 lwpid, wstat);
1927
1928 if (WIFSTOPPED (wstat))
1929 {
1930 child->status_pending_p = 1;
1931 child->status_pending = wstat;
1932
1933 if (WSTOPSIG (wstat) != SIGSTOP)
1934 {
1935 /* Cancel breakpoint hits. The breakpoint may be
1936 removed before we fetch events from this process to
1937 report to the core. It is best not to assume the
1938 moribund breakpoints heuristic always handles these
1939 cases --- it could be too many events go through to
1940 the core before this one is handled. All-stop always
1941 cancels breakpoint hits in all threads. */
1942 if (non_stop
1943 && WSTOPSIG (wstat) == SIGTRAP
1944 && cancel_breakpoint (child))
1945 {
1946 /* Throw away the SIGTRAP. */
1947 child->status_pending_p = 0;
1948
1949 if (debug_threads)
1950 debug_printf ("LLW: LWP %d hit a breakpoint while"
1951 " waiting for another process;"
1952 " cancelled it\n", lwpid);
1953 }
1954 }
1955 }
1956 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1957 {
1958 if (debug_threads)
1959 debug_printf ("LLWE: process %d exited while fetching "
1960 "event from another LWP\n", lwpid);
1961
1962 /* This was the last lwp in the process. Since events are
1963 serialized to GDB core, and we can't report this one
1964 right now, but GDB core and the other target layers will
1965 want to be notified about the exit code/signal, leave the
1966 status pending for the next time we're able to report
1967 it. */
1968 mark_lwp_dead (child, wstat);
1969 }
1970
1971 return NULL;
1972 }
1973
1974 return child;
1975 }
1976
1977 /* When the event-loop is doing a step-over, this points at the thread
1978 being stepped. */
1979 ptid_t step_over_bkpt;
1980
1981 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1982 match FILTER_PTID (leaving others pending). The PTIDs can be:
1983 minus_one_ptid, to specify any child; a pid PTID, specifying all
1984 lwps of a thread group; or a PTID representing a single lwp. Store
1985 the stop status through the status pointer WSTAT. OPTIONS is
1986 passed to the waitpid call. Return 0 if no event was found and
1987 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1988 was found. Return the PID of the stopped child otherwise. */
1989
1990 static int
1991 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1992 int *wstatp, int options)
1993 {
1994 struct thread_info *event_thread;
1995 struct lwp_info *event_child, *requested_child;
1996 sigset_t block_mask, prev_mask;
1997
1998 retry:
1999 /* N.B. event_thread points to the thread_info struct that contains
2000 event_child. Keep them in sync. */
2001 event_thread = NULL;
2002 event_child = NULL;
2003 requested_child = NULL;
2004
2005 /* Check for a lwp with a pending status. */
2006
2007 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2008 {
2009 event_thread = (struct thread_info *)
2010 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2011 if (event_thread != NULL)
2012 event_child = get_thread_lwp (event_thread);
2013 if (debug_threads && event_thread)
2014 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2015 }
2016 else if (!ptid_equal (filter_ptid, null_ptid))
2017 {
2018 requested_child = find_lwp_pid (filter_ptid);
2019
2020 if (stopping_threads == NOT_STOPPING_THREADS
2021 && requested_child->status_pending_p
2022 && requested_child->collecting_fast_tracepoint)
2023 {
2024 enqueue_one_deferred_signal (requested_child,
2025 &requested_child->status_pending);
2026 requested_child->status_pending_p = 0;
2027 requested_child->status_pending = 0;
2028 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2029 }
2030
2031 if (requested_child->suspended
2032 && requested_child->status_pending_p)
2033 fatal ("requesting an event out of a suspended child?");
2034
2035 if (requested_child->status_pending_p)
2036 {
2037 event_child = requested_child;
2038 event_thread = get_lwp_thread (event_child);
2039 }
2040 }
2041
2042 if (event_child != NULL)
2043 {
2044 if (debug_threads)
2045 debug_printf ("Got an event from pending child %ld (%04x)\n",
2046 lwpid_of (event_thread), event_child->status_pending);
2047 *wstatp = event_child->status_pending;
2048 event_child->status_pending_p = 0;
2049 event_child->status_pending = 0;
2050 current_inferior = event_thread;
2051 return lwpid_of (event_thread);
2052 }
2053
2054 /* But if we don't find a pending event, we'll have to wait.
2055
2056 We only enter this loop if no process has a pending wait status.
2057 Thus any action taken in response to a wait status inside this
2058 loop is responding as soon as we detect the status, not after any
2059 pending events. */
2060
2061 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2062 all signals while here. */
2063 sigfillset (&block_mask);
2064 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2065
2066 while (event_child == NULL)
2067 {
2068 pid_t ret = 0;
2069
2070 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2071 quirks:
2072
2073 - If the thread group leader exits while other threads in the
2074 thread group still exist, waitpid(TGID, ...) hangs. That
2075 waitpid won't return an exit status until the other threads
2076 in the group are reaped.
2077
2078 - When a non-leader thread execs, that thread just vanishes
2079 without reporting an exit (so we'd hang if we waited for it
2080 explicitly in that case). The exec event is reported to
2081 the TGID pid (although we don't currently enable exec
2082 events). */
2083 errno = 0;
2084 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2085
2086 if (debug_threads)
2087 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2088 ret, errno ? strerror (errno) : "ERRNO-OK");
2089
2090 if (ret > 0)
2091 {
2092 if (debug_threads)
2093 {
2094 debug_printf ("LLW: waitpid %ld received %s\n",
2095 (long) ret, status_to_str (*wstatp));
2096 }
2097
2098 event_child = linux_low_filter_event (filter_ptid,
2099 ret, *wstatp);
2100 if (event_child != NULL)
2101 {
2102 /* We got an event to report to the core. */
2103 event_thread = get_lwp_thread (event_child);
2104 break;
2105 }
2106
2107 /* Retry until nothing comes out of waitpid. A single
2108 SIGCHLD can indicate more than one child stopped. */
2109 continue;
2110 }
2111
2112 /* Check for zombie thread group leaders. Those can't be reaped
2113 until all other threads in the thread group are. */
2114 check_zombie_leaders ();
2115
2116 /* If there are no resumed children left in the set of LWPs we
2117 want to wait for, bail. We can't just block in
2118 waitpid/sigsuspend, because lwps might have been left stopped
2119 in trace-stop state, and we'd be stuck forever waiting for
2120 their status to change (which would only happen if we resumed
2121 them). Even if WNOHANG is set, this return code is preferred
2122 over 0 (below), as it is more detailed. */
2123 if ((find_inferior (&all_threads,
2124 not_stopped_callback,
2125 &wait_ptid) == NULL))
2126 {
2127 if (debug_threads)
2128 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2129 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2130 return -1;
2131 }
2132
2133 /* No interesting event to report to the caller. */
2134 if ((options & WNOHANG))
2135 {
2136 if (debug_threads)
2137 debug_printf ("WNOHANG set, no event found\n");
2138
2139 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2140 return 0;
2141 }
2142
2143 /* Block until we get an event reported with SIGCHLD. */
2144 if (debug_threads)
2145 debug_printf ("sigsuspend'ing\n");
2146
2147 sigsuspend (&prev_mask);
2148 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2149 goto retry;
2150 }
2151
2152 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2153
2154 current_inferior = event_thread;
2155
2156 /* Check for thread exit. */
2157 if (! WIFSTOPPED (*wstatp))
2158 {
2159 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2160
2161 if (debug_threads)
2162 debug_printf ("LWP %d is the last lwp of process. "
2163 "Process %ld exiting.\n",
2164 pid_of (event_thread), lwpid_of (event_thread));
2165 return lwpid_of (event_thread);
2166 }
2167
2168 return lwpid_of (event_thread);
2169 }
2170
2171 /* Wait for an event from child(ren) PTID. PTIDs can be:
2172 minus_one_ptid, to specify any child; a pid PTID, specifying all
2173 lwps of a thread group; or a PTID representing a single lwp. Store
2174 the stop status through the status pointer WSTAT. OPTIONS is
2175 passed to the waitpid call. Return 0 if no event was found and
2176 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2177 was found. Return the PID of the stopped child otherwise. */
2178
2179 static int
2180 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2181 {
2182 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2183 }
2184
2185 /* Count the LWP's that have had events. */
2186
2187 static int
2188 count_events_callback (struct inferior_list_entry *entry, void *data)
2189 {
2190 struct thread_info *thread = (struct thread_info *) entry;
2191 struct lwp_info *lp = get_thread_lwp (thread);
2192 int *count = data;
2193
2194 gdb_assert (count != NULL);
2195
2196 /* Count only resumed LWPs that have a SIGTRAP event pending that
2197 should be reported to GDB. */
2198 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2199 && thread->last_resume_kind != resume_stop
2200 && lp->status_pending_p
2201 && WIFSTOPPED (lp->status_pending)
2202 && WSTOPSIG (lp->status_pending) == SIGTRAP
2203 && !breakpoint_inserted_here (lp->stop_pc))
2204 (*count)++;
2205
2206 return 0;
2207 }
2208
2209 /* Select the LWP (if any) that is currently being single-stepped. */
2210
2211 static int
2212 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2213 {
2214 struct thread_info *thread = (struct thread_info *) entry;
2215 struct lwp_info *lp = get_thread_lwp (thread);
2216
2217 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2218 && thread->last_resume_kind == resume_step
2219 && lp->status_pending_p)
2220 return 1;
2221 else
2222 return 0;
2223 }
2224
2225 /* Select the Nth LWP that has had a SIGTRAP event that should be
2226 reported to GDB. */
2227
2228 static int
2229 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2230 {
2231 struct thread_info *thread = (struct thread_info *) entry;
2232 struct lwp_info *lp = get_thread_lwp (thread);
2233 int *selector = data;
2234
2235 gdb_assert (selector != NULL);
2236
2237 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2238 if (thread->last_resume_kind != resume_stop
2239 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2240 && lp->status_pending_p
2241 && WIFSTOPPED (lp->status_pending)
2242 && WSTOPSIG (lp->status_pending) == SIGTRAP
2243 && !breakpoint_inserted_here (lp->stop_pc))
2244 if ((*selector)-- == 0)
2245 return 1;
2246
2247 return 0;
2248 }
2249
2250 static int
2251 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2252 {
2253 struct thread_info *thread = (struct thread_info *) entry;
2254 struct lwp_info *lp = get_thread_lwp (thread);
2255 struct lwp_info *event_lp = data;
2256
2257 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2258 if (lp == event_lp)
2259 return 0;
2260
2261 /* If a LWP other than the LWP that we're reporting an event for has
2262 hit a GDB breakpoint (as opposed to some random trap signal),
2263 then just arrange for it to hit it again later. We don't keep
2264 the SIGTRAP status and don't forward the SIGTRAP signal to the
2265 LWP. We will handle the current event, eventually we will resume
2266 all LWPs, and this one will get its breakpoint trap again.
2267
2268 If we do not do this, then we run the risk that the user will
2269 delete or disable the breakpoint, but the LWP will have already
2270 tripped on it. */
2271
2272 if (thread->last_resume_kind != resume_stop
2273 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2274 && lp->status_pending_p
2275 && WIFSTOPPED (lp->status_pending)
2276 && WSTOPSIG (lp->status_pending) == SIGTRAP
2277 && !lp->stepping
2278 && !lp->stopped_by_watchpoint
2279 && cancel_breakpoint (lp))
2280 /* Throw away the SIGTRAP. */
2281 lp->status_pending_p = 0;
2282
2283 return 0;
2284 }
2285
2286 static void
2287 linux_cancel_breakpoints (void)
2288 {
2289 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
2290 }
2291
2292 /* Select one LWP out of those that have events pending. */
2293
2294 static void
2295 select_event_lwp (struct lwp_info **orig_lp)
2296 {
2297 int num_events = 0;
2298 int random_selector;
2299 struct thread_info *event_thread;
2300
2301 /* Give preference to any LWP that is being single-stepped. */
2302 event_thread
2303 = (struct thread_info *) find_inferior (&all_threads,
2304 select_singlestep_lwp_callback,
2305 NULL);
2306 if (event_thread != NULL)
2307 {
2308 if (debug_threads)
2309 debug_printf ("SEL: Select single-step %s\n",
2310 target_pid_to_str (ptid_of (event_thread)));
2311 }
2312 else
2313 {
2314 /* No single-stepping LWP. Select one at random, out of those
2315 which have had SIGTRAP events. */
2316
2317 /* First see how many SIGTRAP events we have. */
2318 find_inferior (&all_threads, count_events_callback, &num_events);
2319
2320 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2321 random_selector = (int)
2322 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2323
2324 if (debug_threads && num_events > 1)
2325 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2326 num_events, random_selector);
2327
2328 event_thread
2329 = (struct thread_info *) find_inferior (&all_threads,
2330 select_event_lwp_callback,
2331 &random_selector);
2332 }
2333
2334 if (event_thread != NULL)
2335 {
2336 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2337
2338 /* Switch the event LWP. */
2339 *orig_lp = event_lp;
2340 }
2341 }
2342
2343 /* Decrement the suspend count of an LWP. */
2344
2345 static int
2346 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2347 {
2348 struct thread_info *thread = (struct thread_info *) entry;
2349 struct lwp_info *lwp = get_thread_lwp (thread);
2350
2351 /* Ignore EXCEPT. */
2352 if (lwp == except)
2353 return 0;
2354
2355 lwp->suspended--;
2356
2357 gdb_assert (lwp->suspended >= 0);
2358 return 0;
2359 }
2360
2361 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2362 NULL. */
2363
2364 static void
2365 unsuspend_all_lwps (struct lwp_info *except)
2366 {
2367 find_inferior (&all_threads, unsuspend_one_lwp, except);
2368 }
2369
2370 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2371 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2372 void *data);
2373 static int lwp_running (struct inferior_list_entry *entry, void *data);
2374 static ptid_t linux_wait_1 (ptid_t ptid,
2375 struct target_waitstatus *ourstatus,
2376 int target_options);
2377
2378 /* Stabilize threads (move out of jump pads).
2379
2380 If a thread is midway collecting a fast tracepoint, we need to
2381 finish the collection and move it out of the jump pad before
2382 reporting the signal.
2383
2384 This avoids recursion while collecting (when a signal arrives
2385 midway, and the signal handler itself collects), which would trash
2386 the trace buffer. In case the user set a breakpoint in a signal
2387 handler, this avoids the backtrace showing the jump pad, etc..
2388 Most importantly, there are certain things we can't do safely if
2389 threads are stopped in a jump pad (or in its callee's). For
2390 example:
2391
2392 - starting a new trace run. A thread still collecting the
2393 previous run, could trash the trace buffer when resumed. The trace
2394 buffer control structures would have been reset but the thread had
2395 no way to tell. The thread could even midway memcpy'ing to the
2396 buffer, which would mean that when resumed, it would clobber the
2397 trace buffer that had been set for a new run.
2398
2399 - we can't rewrite/reuse the jump pads for new tracepoints
2400 safely. Say you do tstart while a thread is stopped midway while
2401 collecting. When the thread is later resumed, it finishes the
2402 collection, and returns to the jump pad, to execute the original
2403 instruction that was under the tracepoint jump at the time the
2404 older run had been started. If the jump pad had been rewritten
2405 since for something else in the new run, the thread would now
2406 execute the wrong / random instructions. */
2407
2408 static void
2409 linux_stabilize_threads (void)
2410 {
2411 struct thread_info *save_inferior;
2412 struct thread_info *thread_stuck;
2413
2414 thread_stuck
2415 = (struct thread_info *) find_inferior (&all_threads,
2416 stuck_in_jump_pad_callback,
2417 NULL);
2418 if (thread_stuck != NULL)
2419 {
2420 if (debug_threads)
2421 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2422 lwpid_of (thread_stuck));
2423 return;
2424 }
2425
2426 save_inferior = current_inferior;
2427
2428 stabilizing_threads = 1;
2429
2430 /* Kick 'em all. */
2431 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2432
2433 /* Loop until all are stopped out of the jump pads. */
2434 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2435 {
2436 struct target_waitstatus ourstatus;
2437 struct lwp_info *lwp;
2438 int wstat;
2439
2440 /* Note that we go through the full wait even loop. While
2441 moving threads out of jump pad, we need to be able to step
2442 over internal breakpoints and such. */
2443 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2444
2445 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2446 {
2447 lwp = get_thread_lwp (current_inferior);
2448
2449 /* Lock it. */
2450 lwp->suspended++;
2451
2452 if (ourstatus.value.sig != GDB_SIGNAL_0
2453 || current_inferior->last_resume_kind == resume_stop)
2454 {
2455 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2456 enqueue_one_deferred_signal (lwp, &wstat);
2457 }
2458 }
2459 }
2460
2461 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2462
2463 stabilizing_threads = 0;
2464
2465 current_inferior = save_inferior;
2466
2467 if (debug_threads)
2468 {
2469 thread_stuck
2470 = (struct thread_info *) find_inferior (&all_threads,
2471 stuck_in_jump_pad_callback,
2472 NULL);
2473 if (thread_stuck != NULL)
2474 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2475 lwpid_of (thread_stuck));
2476 }
2477 }
2478
2479 /* Wait for process, returns status. */
2480
2481 static ptid_t
2482 linux_wait_1 (ptid_t ptid,
2483 struct target_waitstatus *ourstatus, int target_options)
2484 {
2485 int w;
2486 struct lwp_info *event_child;
2487 int options;
2488 int pid;
2489 int step_over_finished;
2490 int bp_explains_trap;
2491 int maybe_internal_trap;
2492 int report_to_gdb;
2493 int trace_event;
2494 int in_step_range;
2495
2496 if (debug_threads)
2497 {
2498 debug_enter ();
2499 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2500 }
2501
2502 /* Translate generic target options into linux options. */
2503 options = __WALL;
2504 if (target_options & TARGET_WNOHANG)
2505 options |= WNOHANG;
2506
2507 retry:
2508 bp_explains_trap = 0;
2509 trace_event = 0;
2510 in_step_range = 0;
2511 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2512
2513 /* If we were only supposed to resume one thread, only wait for
2514 that thread - if it's still alive. If it died, however - which
2515 can happen if we're coming from the thread death case below -
2516 then we need to make sure we restart the other threads. We could
2517 pick a thread at random or restart all; restarting all is less
2518 arbitrary. */
2519 if (!non_stop
2520 && !ptid_equal (cont_thread, null_ptid)
2521 && !ptid_equal (cont_thread, minus_one_ptid))
2522 {
2523 struct thread_info *thread;
2524
2525 thread = (struct thread_info *) find_inferior_id (&all_threads,
2526 cont_thread);
2527
2528 /* No stepping, no signal - unless one is pending already, of course. */
2529 if (thread == NULL)
2530 {
2531 struct thread_resume resume_info;
2532 resume_info.thread = minus_one_ptid;
2533 resume_info.kind = resume_continue;
2534 resume_info.sig = 0;
2535 linux_resume (&resume_info, 1);
2536 }
2537 else
2538 ptid = cont_thread;
2539 }
2540
2541 if (ptid_equal (step_over_bkpt, null_ptid))
2542 pid = linux_wait_for_event (ptid, &w, options);
2543 else
2544 {
2545 if (debug_threads)
2546 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2547 target_pid_to_str (step_over_bkpt));
2548 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2549 }
2550
2551 if (pid == 0)
2552 {
2553 gdb_assert (target_options & TARGET_WNOHANG);
2554
2555 if (debug_threads)
2556 {
2557 debug_printf ("linux_wait_1 ret = null_ptid, "
2558 "TARGET_WAITKIND_IGNORE\n");
2559 debug_exit ();
2560 }
2561
2562 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2563 return null_ptid;
2564 }
2565 else if (pid == -1)
2566 {
2567 if (debug_threads)
2568 {
2569 debug_printf ("linux_wait_1 ret = null_ptid, "
2570 "TARGET_WAITKIND_NO_RESUMED\n");
2571 debug_exit ();
2572 }
2573
2574 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2575 return null_ptid;
2576 }
2577
2578 event_child = get_thread_lwp (current_inferior);
2579
2580 /* linux_wait_for_event only returns an exit status for the last
2581 child of a process. Report it. */
2582 if (WIFEXITED (w) || WIFSIGNALED (w))
2583 {
2584 if (WIFEXITED (w))
2585 {
2586 ourstatus->kind = TARGET_WAITKIND_EXITED;
2587 ourstatus->value.integer = WEXITSTATUS (w);
2588
2589 if (debug_threads)
2590 {
2591 debug_printf ("linux_wait_1 ret = %s, exited with "
2592 "retcode %d\n",
2593 target_pid_to_str (ptid_of (current_inferior)),
2594 WEXITSTATUS (w));
2595 debug_exit ();
2596 }
2597 }
2598 else
2599 {
2600 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2601 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2602
2603 if (debug_threads)
2604 {
2605 debug_printf ("linux_wait_1 ret = %s, terminated with "
2606 "signal %d\n",
2607 target_pid_to_str (ptid_of (current_inferior)),
2608 WTERMSIG (w));
2609 debug_exit ();
2610 }
2611 }
2612
2613 return ptid_of (current_inferior);
2614 }
2615
2616 /* If this event was not handled before, and is not a SIGTRAP, we
2617 report it. SIGILL and SIGSEGV are also treated as traps in case
2618 a breakpoint is inserted at the current PC. If this target does
2619 not support internal breakpoints at all, we also report the
2620 SIGTRAP without further processing; it's of no concern to us. */
2621 maybe_internal_trap
2622 = (supports_breakpoints ()
2623 && (WSTOPSIG (w) == SIGTRAP
2624 || ((WSTOPSIG (w) == SIGILL
2625 || WSTOPSIG (w) == SIGSEGV)
2626 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2627
2628 if (maybe_internal_trap)
2629 {
2630 /* Handle anything that requires bookkeeping before deciding to
2631 report the event or continue waiting. */
2632
2633 /* First check if we can explain the SIGTRAP with an internal
2634 breakpoint, or if we should possibly report the event to GDB.
2635 Do this before anything that may remove or insert a
2636 breakpoint. */
2637 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2638
2639 /* We have a SIGTRAP, possibly a step-over dance has just
2640 finished. If so, tweak the state machine accordingly,
2641 reinsert breakpoints and delete any reinsert (software
2642 single-step) breakpoints. */
2643 step_over_finished = finish_step_over (event_child);
2644
2645 /* Now invoke the callbacks of any internal breakpoints there. */
2646 check_breakpoints (event_child->stop_pc);
2647
2648 /* Handle tracepoint data collecting. This may overflow the
2649 trace buffer, and cause a tracing stop, removing
2650 breakpoints. */
2651 trace_event = handle_tracepoints (event_child);
2652
2653 if (bp_explains_trap)
2654 {
2655 /* If we stepped or ran into an internal breakpoint, we've
2656 already handled it. So next time we resume (from this
2657 PC), we should step over it. */
2658 if (debug_threads)
2659 debug_printf ("Hit a gdbserver breakpoint.\n");
2660
2661 if (breakpoint_here (event_child->stop_pc))
2662 event_child->need_step_over = 1;
2663 }
2664 }
2665 else
2666 {
2667 /* We have some other signal, possibly a step-over dance was in
2668 progress, and it should be cancelled too. */
2669 step_over_finished = finish_step_over (event_child);
2670 }
2671
2672 /* We have all the data we need. Either report the event to GDB, or
2673 resume threads and keep waiting for more. */
2674
2675 /* If we're collecting a fast tracepoint, finish the collection and
2676 move out of the jump pad before delivering a signal. See
2677 linux_stabilize_threads. */
2678
2679 if (WIFSTOPPED (w)
2680 && WSTOPSIG (w) != SIGTRAP
2681 && supports_fast_tracepoints ()
2682 && agent_loaded_p ())
2683 {
2684 if (debug_threads)
2685 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2686 "to defer or adjust it.\n",
2687 WSTOPSIG (w), lwpid_of (current_inferior));
2688
2689 /* Allow debugging the jump pad itself. */
2690 if (current_inferior->last_resume_kind != resume_step
2691 && maybe_move_out_of_jump_pad (event_child, &w))
2692 {
2693 enqueue_one_deferred_signal (event_child, &w);
2694
2695 if (debug_threads)
2696 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2697 WSTOPSIG (w), lwpid_of (current_inferior));
2698
2699 linux_resume_one_lwp (event_child, 0, 0, NULL);
2700 goto retry;
2701 }
2702 }
2703
2704 if (event_child->collecting_fast_tracepoint)
2705 {
2706 if (debug_threads)
2707 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2708 "Check if we're already there.\n",
2709 lwpid_of (current_inferior),
2710 event_child->collecting_fast_tracepoint);
2711
2712 trace_event = 1;
2713
2714 event_child->collecting_fast_tracepoint
2715 = linux_fast_tracepoint_collecting (event_child, NULL);
2716
2717 if (event_child->collecting_fast_tracepoint != 1)
2718 {
2719 /* No longer need this breakpoint. */
2720 if (event_child->exit_jump_pad_bkpt != NULL)
2721 {
2722 if (debug_threads)
2723 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2724 "stopping all threads momentarily.\n");
2725
2726 /* Other running threads could hit this breakpoint.
2727 We don't handle moribund locations like GDB does,
2728 instead we always pause all threads when removing
2729 breakpoints, so that any step-over or
2730 decr_pc_after_break adjustment is always taken
2731 care of while the breakpoint is still
2732 inserted. */
2733 stop_all_lwps (1, event_child);
2734 cancel_breakpoints ();
2735
2736 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2737 event_child->exit_jump_pad_bkpt = NULL;
2738
2739 unstop_all_lwps (1, event_child);
2740
2741 gdb_assert (event_child->suspended >= 0);
2742 }
2743 }
2744
2745 if (event_child->collecting_fast_tracepoint == 0)
2746 {
2747 if (debug_threads)
2748 debug_printf ("fast tracepoint finished "
2749 "collecting successfully.\n");
2750
2751 /* We may have a deferred signal to report. */
2752 if (dequeue_one_deferred_signal (event_child, &w))
2753 {
2754 if (debug_threads)
2755 debug_printf ("dequeued one signal.\n");
2756 }
2757 else
2758 {
2759 if (debug_threads)
2760 debug_printf ("no deferred signals.\n");
2761
2762 if (stabilizing_threads)
2763 {
2764 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2765 ourstatus->value.sig = GDB_SIGNAL_0;
2766
2767 if (debug_threads)
2768 {
2769 debug_printf ("linux_wait_1 ret = %s, stopped "
2770 "while stabilizing threads\n",
2771 target_pid_to_str (ptid_of (current_inferior)));
2772 debug_exit ();
2773 }
2774
2775 return ptid_of (current_inferior);
2776 }
2777 }
2778 }
2779 }
2780
2781 /* Check whether GDB would be interested in this event. */
2782
2783 /* If GDB is not interested in this signal, don't stop other
2784 threads, and don't report it to GDB. Just resume the inferior
2785 right away. We do this for threading-related signals as well as
2786 any that GDB specifically requested we ignore. But never ignore
2787 SIGSTOP if we sent it ourselves, and do not ignore signals when
2788 stepping - they may require special handling to skip the signal
2789 handler. */
2790 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2791 thread library? */
2792 if (WIFSTOPPED (w)
2793 && current_inferior->last_resume_kind != resume_step
2794 && (
2795 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2796 (current_process ()->private->thread_db != NULL
2797 && (WSTOPSIG (w) == __SIGRTMIN
2798 || WSTOPSIG (w) == __SIGRTMIN + 1))
2799 ||
2800 #endif
2801 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2802 && !(WSTOPSIG (w) == SIGSTOP
2803 && current_inferior->last_resume_kind == resume_stop))))
2804 {
2805 siginfo_t info, *info_p;
2806
2807 if (debug_threads)
2808 debug_printf ("Ignored signal %d for LWP %ld.\n",
2809 WSTOPSIG (w), lwpid_of (current_inferior));
2810
2811 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
2812 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2813 info_p = &info;
2814 else
2815 info_p = NULL;
2816 linux_resume_one_lwp (event_child, event_child->stepping,
2817 WSTOPSIG (w), info_p);
2818 goto retry;
2819 }
2820
2821 /* Note that all addresses are always "out of the step range" when
2822 there's no range to begin with. */
2823 in_step_range = lwp_in_step_range (event_child);
2824
2825 /* If GDB wanted this thread to single step, and the thread is out
2826 of the step range, we always want to report the SIGTRAP, and let
2827 GDB handle it. Watchpoints should always be reported. So should
2828 signals we can't explain. A SIGTRAP we can't explain could be a
2829 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2830 do, we're be able to handle GDB breakpoints on top of internal
2831 breakpoints, by handling the internal breakpoint and still
2832 reporting the event to GDB. If we don't, we're out of luck, GDB
2833 won't see the breakpoint hit. */
2834 report_to_gdb = (!maybe_internal_trap
2835 || (current_inferior->last_resume_kind == resume_step
2836 && !in_step_range)
2837 || event_child->stopped_by_watchpoint
2838 || (!step_over_finished && !in_step_range
2839 && !bp_explains_trap && !trace_event)
2840 || (gdb_breakpoint_here (event_child->stop_pc)
2841 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2842 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2843
2844 run_breakpoint_commands (event_child->stop_pc);
2845
2846 /* We found no reason GDB would want us to stop. We either hit one
2847 of our own breakpoints, or finished an internal step GDB
2848 shouldn't know about. */
2849 if (!report_to_gdb)
2850 {
2851 if (debug_threads)
2852 {
2853 if (bp_explains_trap)
2854 debug_printf ("Hit a gdbserver breakpoint.\n");
2855 if (step_over_finished)
2856 debug_printf ("Step-over finished.\n");
2857 if (trace_event)
2858 debug_printf ("Tracepoint event.\n");
2859 if (lwp_in_step_range (event_child))
2860 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2861 paddress (event_child->stop_pc),
2862 paddress (event_child->step_range_start),
2863 paddress (event_child->step_range_end));
2864 }
2865
2866 /* We're not reporting this breakpoint to GDB, so apply the
2867 decr_pc_after_break adjustment to the inferior's regcache
2868 ourselves. */
2869
2870 if (the_low_target.set_pc != NULL)
2871 {
2872 struct regcache *regcache
2873 = get_thread_regcache (current_inferior, 1);
2874 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2875 }
2876
2877 /* We may have finished stepping over a breakpoint. If so,
2878 we've stopped and suspended all LWPs momentarily except the
2879 stepping one. This is where we resume them all again. We're
2880 going to keep waiting, so use proceed, which handles stepping
2881 over the next breakpoint. */
2882 if (debug_threads)
2883 debug_printf ("proceeding all threads.\n");
2884
2885 if (step_over_finished)
2886 unsuspend_all_lwps (event_child);
2887
2888 proceed_all_lwps ();
2889 goto retry;
2890 }
2891
2892 if (debug_threads)
2893 {
2894 if (current_inferior->last_resume_kind == resume_step)
2895 {
2896 if (event_child->step_range_start == event_child->step_range_end)
2897 debug_printf ("GDB wanted to single-step, reporting event.\n");
2898 else if (!lwp_in_step_range (event_child))
2899 debug_printf ("Out of step range, reporting event.\n");
2900 }
2901 if (event_child->stopped_by_watchpoint)
2902 debug_printf ("Stopped by watchpoint.\n");
2903 if (gdb_breakpoint_here (event_child->stop_pc))
2904 debug_printf ("Stopped by GDB breakpoint.\n");
2905 if (debug_threads)
2906 debug_printf ("Hit a non-gdbserver trap event.\n");
2907 }
2908
2909 /* Alright, we're going to report a stop. */
2910
2911 if (!non_stop && !stabilizing_threads)
2912 {
2913 /* In all-stop, stop all threads. */
2914 stop_all_lwps (0, NULL);
2915
2916 /* If we're not waiting for a specific LWP, choose an event LWP
2917 from among those that have had events. Giving equal priority
2918 to all LWPs that have had events helps prevent
2919 starvation. */
2920 if (ptid_equal (ptid, minus_one_ptid))
2921 {
2922 event_child->status_pending_p = 1;
2923 event_child->status_pending = w;
2924
2925 select_event_lwp (&event_child);
2926
2927 /* current_inferior and event_child must stay in sync. */
2928 current_inferior = get_lwp_thread (event_child);
2929
2930 event_child->status_pending_p = 0;
2931 w = event_child->status_pending;
2932 }
2933
2934 /* Now that we've selected our final event LWP, cancel any
2935 breakpoints in other LWPs that have hit a GDB breakpoint.
2936 See the comment in cancel_breakpoints_callback to find out
2937 why. */
2938 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
2939
2940 /* If we were going a step-over, all other threads but the stepping one
2941 had been paused in start_step_over, with their suspend counts
2942 incremented. We don't want to do a full unstop/unpause, because we're
2943 in all-stop mode (so we want threads stopped), but we still need to
2944 unsuspend the other threads, to decrement their `suspended' count
2945 back. */
2946 if (step_over_finished)
2947 unsuspend_all_lwps (event_child);
2948
2949 /* Stabilize threads (move out of jump pads). */
2950 stabilize_threads ();
2951 }
2952 else
2953 {
2954 /* If we just finished a step-over, then all threads had been
2955 momentarily paused. In all-stop, that's fine, we want
2956 threads stopped by now anyway. In non-stop, we need to
2957 re-resume threads that GDB wanted to be running. */
2958 if (step_over_finished)
2959 unstop_all_lwps (1, event_child);
2960 }
2961
2962 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2963
2964 if (current_inferior->last_resume_kind == resume_stop
2965 && WSTOPSIG (w) == SIGSTOP)
2966 {
2967 /* A thread that has been requested to stop by GDB with vCont;t,
2968 and it stopped cleanly, so report as SIG0. The use of
2969 SIGSTOP is an implementation detail. */
2970 ourstatus->value.sig = GDB_SIGNAL_0;
2971 }
2972 else if (current_inferior->last_resume_kind == resume_stop
2973 && WSTOPSIG (w) != SIGSTOP)
2974 {
2975 /* A thread that has been requested to stop by GDB with vCont;t,
2976 but, it stopped for other reasons. */
2977 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2978 }
2979 else
2980 {
2981 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2982 }
2983
2984 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2985
2986 if (debug_threads)
2987 {
2988 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2989 target_pid_to_str (ptid_of (current_inferior)),
2990 ourstatus->kind, ourstatus->value.sig);
2991 debug_exit ();
2992 }
2993
2994 return ptid_of (current_inferior);
2995 }
2996
2997 /* Get rid of any pending event in the pipe. */
2998 static void
2999 async_file_flush (void)
3000 {
3001 int ret;
3002 char buf;
3003
3004 do
3005 ret = read (linux_event_pipe[0], &buf, 1);
3006 while (ret >= 0 || (ret == -1 && errno == EINTR));
3007 }
3008
3009 /* Put something in the pipe, so the event loop wakes up. */
3010 static void
3011 async_file_mark (void)
3012 {
3013 int ret;
3014
3015 async_file_flush ();
3016
3017 do
3018 ret = write (linux_event_pipe[1], "+", 1);
3019 while (ret == 0 || (ret == -1 && errno == EINTR));
3020
3021 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3022 be awakened anyway. */
3023 }
3024
3025 static ptid_t
3026 linux_wait (ptid_t ptid,
3027 struct target_waitstatus *ourstatus, int target_options)
3028 {
3029 ptid_t event_ptid;
3030
3031 /* Flush the async file first. */
3032 if (target_is_async_p ())
3033 async_file_flush ();
3034
3035 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3036
3037 /* If at least one stop was reported, there may be more. A single
3038 SIGCHLD can signal more than one child stop. */
3039 if (target_is_async_p ()
3040 && (target_options & TARGET_WNOHANG) != 0
3041 && !ptid_equal (event_ptid, null_ptid))
3042 async_file_mark ();
3043
3044 return event_ptid;
3045 }
3046
3047 /* Send a signal to an LWP. */
3048
3049 static int
3050 kill_lwp (unsigned long lwpid, int signo)
3051 {
3052 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3053 fails, then we are not using nptl threads and we should be using kill. */
3054
3055 #ifdef __NR_tkill
3056 {
3057 static int tkill_failed;
3058
3059 if (!tkill_failed)
3060 {
3061 int ret;
3062
3063 errno = 0;
3064 ret = syscall (__NR_tkill, lwpid, signo);
3065 if (errno != ENOSYS)
3066 return ret;
3067 tkill_failed = 1;
3068 }
3069 }
3070 #endif
3071
3072 return kill (lwpid, signo);
3073 }
3074
3075 void
3076 linux_stop_lwp (struct lwp_info *lwp)
3077 {
3078 send_sigstop (lwp);
3079 }
3080
3081 static void
3082 send_sigstop (struct lwp_info *lwp)
3083 {
3084 int pid;
3085
3086 pid = lwpid_of (get_lwp_thread (lwp));
3087
3088 /* If we already have a pending stop signal for this process, don't
3089 send another. */
3090 if (lwp->stop_expected)
3091 {
3092 if (debug_threads)
3093 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3094
3095 return;
3096 }
3097
3098 if (debug_threads)
3099 debug_printf ("Sending sigstop to lwp %d\n", pid);
3100
3101 lwp->stop_expected = 1;
3102 kill_lwp (pid, SIGSTOP);
3103 }
3104
3105 static int
3106 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3107 {
3108 struct thread_info *thread = (struct thread_info *) entry;
3109 struct lwp_info *lwp = get_thread_lwp (thread);
3110
3111 /* Ignore EXCEPT. */
3112 if (lwp == except)
3113 return 0;
3114
3115 if (lwp->stopped)
3116 return 0;
3117
3118 send_sigstop (lwp);
3119 return 0;
3120 }
3121
3122 /* Increment the suspend count of an LWP, and stop it, if not stopped
3123 yet. */
3124 static int
3125 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3126 void *except)
3127 {
3128 struct thread_info *thread = (struct thread_info *) entry;
3129 struct lwp_info *lwp = get_thread_lwp (thread);
3130
3131 /* Ignore EXCEPT. */
3132 if (lwp == except)
3133 return 0;
3134
3135 lwp->suspended++;
3136
3137 return send_sigstop_callback (entry, except);
3138 }
3139
3140 static void
3141 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3142 {
3143 /* It's dead, really. */
3144 lwp->dead = 1;
3145
3146 /* Store the exit status for later. */
3147 lwp->status_pending_p = 1;
3148 lwp->status_pending = wstat;
3149
3150 /* Prevent trying to stop it. */
3151 lwp->stopped = 1;
3152
3153 /* No further stops are expected from a dead lwp. */
3154 lwp->stop_expected = 0;
3155 }
3156
3157 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3158
3159 static void
3160 wait_for_sigstop (void)
3161 {
3162 struct thread_info *saved_inferior;
3163 ptid_t saved_tid;
3164 int wstat;
3165 int ret;
3166
3167 saved_inferior = current_inferior;
3168 if (saved_inferior != NULL)
3169 saved_tid = saved_inferior->entry.id;
3170 else
3171 saved_tid = null_ptid; /* avoid bogus unused warning */
3172
3173 if (debug_threads)
3174 debug_printf ("wait_for_sigstop: pulling events\n");
3175
3176 /* Passing NULL_PTID as filter indicates we want all events to be
3177 left pending. Eventually this returns when there are no
3178 unwaited-for children left. */
3179 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3180 &wstat, __WALL);
3181 gdb_assert (ret == -1);
3182
3183 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3184 current_inferior = saved_inferior;
3185 else
3186 {
3187 if (debug_threads)
3188 debug_printf ("Previously current thread died.\n");
3189
3190 if (non_stop)
3191 {
3192 /* We can't change the current inferior behind GDB's back,
3193 otherwise, a subsequent command may apply to the wrong
3194 process. */
3195 current_inferior = NULL;
3196 }
3197 else
3198 {
3199 /* Set a valid thread as current. */
3200 set_desired_inferior (0);
3201 }
3202 }
3203 }
3204
3205 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3206 move it out, because we need to report the stop event to GDB. For
3207 example, if the user puts a breakpoint in the jump pad, it's
3208 because she wants to debug it. */
3209
3210 static int
3211 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3212 {
3213 struct thread_info *thread = (struct thread_info *) entry;
3214 struct lwp_info *lwp = get_thread_lwp (thread);
3215
3216 gdb_assert (lwp->suspended == 0);
3217 gdb_assert (lwp->stopped);
3218
3219 /* Allow debugging the jump pad, gdb_collect, etc.. */
3220 return (supports_fast_tracepoints ()
3221 && agent_loaded_p ()
3222 && (gdb_breakpoint_here (lwp->stop_pc)
3223 || lwp->stopped_by_watchpoint
3224 || thread->last_resume_kind == resume_step)
3225 && linux_fast_tracepoint_collecting (lwp, NULL));
3226 }
3227
3228 static void
3229 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3230 {
3231 struct thread_info *thread = (struct thread_info *) entry;
3232 struct lwp_info *lwp = get_thread_lwp (thread);
3233 int *wstat;
3234
3235 gdb_assert (lwp->suspended == 0);
3236 gdb_assert (lwp->stopped);
3237
3238 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3239
3240 /* Allow debugging the jump pad, gdb_collect, etc. */
3241 if (!gdb_breakpoint_here (lwp->stop_pc)
3242 && !lwp->stopped_by_watchpoint
3243 && thread->last_resume_kind != resume_step
3244 && maybe_move_out_of_jump_pad (lwp, wstat))
3245 {
3246 if (debug_threads)
3247 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3248 lwpid_of (thread));
3249
3250 if (wstat)
3251 {
3252 lwp->status_pending_p = 0;
3253 enqueue_one_deferred_signal (lwp, wstat);
3254
3255 if (debug_threads)
3256 debug_printf ("Signal %d for LWP %ld deferred "
3257 "(in jump pad)\n",
3258 WSTOPSIG (*wstat), lwpid_of (thread));
3259 }
3260
3261 linux_resume_one_lwp (lwp, 0, 0, NULL);
3262 }
3263 else
3264 lwp->suspended++;
3265 }
3266
3267 static int
3268 lwp_running (struct inferior_list_entry *entry, void *data)
3269 {
3270 struct thread_info *thread = (struct thread_info *) entry;
3271 struct lwp_info *lwp = get_thread_lwp (thread);
3272
3273 if (lwp->dead)
3274 return 0;
3275 if (lwp->stopped)
3276 return 0;
3277 return 1;
3278 }
3279
3280 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3281 If SUSPEND, then also increase the suspend count of every LWP,
3282 except EXCEPT. */
3283
3284 static void
3285 stop_all_lwps (int suspend, struct lwp_info *except)
3286 {
3287 /* Should not be called recursively. */
3288 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3289
3290 if (debug_threads)
3291 {
3292 debug_enter ();
3293 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3294 suspend ? "stop-and-suspend" : "stop",
3295 except != NULL
3296 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3297 : "none");
3298 }
3299
3300 stopping_threads = (suspend
3301 ? STOPPING_AND_SUSPENDING_THREADS
3302 : STOPPING_THREADS);
3303
3304 if (suspend)
3305 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3306 else
3307 find_inferior (&all_threads, send_sigstop_callback, except);
3308 wait_for_sigstop ();
3309 stopping_threads = NOT_STOPPING_THREADS;
3310
3311 if (debug_threads)
3312 {
3313 debug_printf ("stop_all_lwps done, setting stopping_threads "
3314 "back to !stopping\n");
3315 debug_exit ();
3316 }
3317 }
3318
3319 /* Resume execution of the inferior process.
3320 If STEP is nonzero, single-step it.
3321 If SIGNAL is nonzero, give it that signal. */
3322
3323 static void
3324 linux_resume_one_lwp (struct lwp_info *lwp,
3325 int step, int signal, siginfo_t *info)
3326 {
3327 struct thread_info *thread = get_lwp_thread (lwp);
3328 struct thread_info *saved_inferior;
3329 int fast_tp_collecting;
3330
3331 if (lwp->stopped == 0)
3332 return;
3333
3334 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3335
3336 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3337
3338 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3339 user used the "jump" command, or "set $pc = foo"). */
3340 if (lwp->stop_pc != get_pc (lwp))
3341 {
3342 /* Collecting 'while-stepping' actions doesn't make sense
3343 anymore. */
3344 release_while_stepping_state_list (thread);
3345 }
3346
3347 /* If we have pending signals or status, and a new signal, enqueue the
3348 signal. Also enqueue the signal if we are waiting to reinsert a
3349 breakpoint; it will be picked up again below. */
3350 if (signal != 0
3351 && (lwp->status_pending_p
3352 || lwp->pending_signals != NULL
3353 || lwp->bp_reinsert != 0
3354 || fast_tp_collecting))
3355 {
3356 struct pending_signals *p_sig;
3357 p_sig = xmalloc (sizeof (*p_sig));
3358 p_sig->prev = lwp->pending_signals;
3359 p_sig->signal = signal;
3360 if (info == NULL)
3361 memset (&p_sig->info, 0, sizeof (siginfo_t));
3362 else
3363 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3364 lwp->pending_signals = p_sig;
3365 }
3366
3367 if (lwp->status_pending_p)
3368 {
3369 if (debug_threads)
3370 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3371 " has pending status\n",
3372 lwpid_of (thread), step ? "step" : "continue", signal,
3373 lwp->stop_expected ? "expected" : "not expected");
3374 return;
3375 }
3376
3377 saved_inferior = current_inferior;
3378 current_inferior = thread;
3379
3380 if (debug_threads)
3381 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3382 lwpid_of (thread), step ? "step" : "continue", signal,
3383 lwp->stop_expected ? "expected" : "not expected");
3384
3385 /* This bit needs some thinking about. If we get a signal that
3386 we must report while a single-step reinsert is still pending,
3387 we often end up resuming the thread. It might be better to
3388 (ew) allow a stack of pending events; then we could be sure that
3389 the reinsert happened right away and not lose any signals.
3390
3391 Making this stack would also shrink the window in which breakpoints are
3392 uninserted (see comment in linux_wait_for_lwp) but not enough for
3393 complete correctness, so it won't solve that problem. It may be
3394 worthwhile just to solve this one, however. */
3395 if (lwp->bp_reinsert != 0)
3396 {
3397 if (debug_threads)
3398 debug_printf (" pending reinsert at 0x%s\n",
3399 paddress (lwp->bp_reinsert));
3400
3401 if (can_hardware_single_step ())
3402 {
3403 if (fast_tp_collecting == 0)
3404 {
3405 if (step == 0)
3406 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3407 if (lwp->suspended)
3408 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3409 lwp->suspended);
3410 }
3411
3412 step = 1;
3413 }
3414
3415 /* Postpone any pending signal. It was enqueued above. */
3416 signal = 0;
3417 }
3418
3419 if (fast_tp_collecting == 1)
3420 {
3421 if (debug_threads)
3422 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3423 " (exit-jump-pad-bkpt)\n",
3424 lwpid_of (thread));
3425
3426 /* Postpone any pending signal. It was enqueued above. */
3427 signal = 0;
3428 }
3429 else if (fast_tp_collecting == 2)
3430 {
3431 if (debug_threads)
3432 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3433 " single-stepping\n",
3434 lwpid_of (thread));
3435
3436 if (can_hardware_single_step ())
3437 step = 1;
3438 else
3439 fatal ("moving out of jump pad single-stepping"
3440 " not implemented on this target");
3441
3442 /* Postpone any pending signal. It was enqueued above. */
3443 signal = 0;
3444 }
3445
3446 /* If we have while-stepping actions in this thread set it stepping.
3447 If we have a signal to deliver, it may or may not be set to
3448 SIG_IGN, we don't know. Assume so, and allow collecting
3449 while-stepping into a signal handler. A possible smart thing to
3450 do would be to set an internal breakpoint at the signal return
3451 address, continue, and carry on catching this while-stepping
3452 action only when that breakpoint is hit. A future
3453 enhancement. */
3454 if (thread->while_stepping != NULL
3455 && can_hardware_single_step ())
3456 {
3457 if (debug_threads)
3458 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3459 lwpid_of (thread));
3460 step = 1;
3461 }
3462
3463 if (debug_threads && the_low_target.get_pc != NULL)
3464 {
3465 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3466 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3467 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3468 }
3469
3470 /* If we have pending signals, consume one unless we are trying to
3471 reinsert a breakpoint or we're trying to finish a fast tracepoint
3472 collect. */
3473 if (lwp->pending_signals != NULL
3474 && lwp->bp_reinsert == 0
3475 && fast_tp_collecting == 0)
3476 {
3477 struct pending_signals **p_sig;
3478
3479 p_sig = &lwp->pending_signals;
3480 while ((*p_sig)->prev != NULL)
3481 p_sig = &(*p_sig)->prev;
3482
3483 signal = (*p_sig)->signal;
3484 if ((*p_sig)->info.si_signo != 0)
3485 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3486 &(*p_sig)->info);
3487
3488 free (*p_sig);
3489 *p_sig = NULL;
3490 }
3491
3492 if (the_low_target.prepare_to_resume != NULL)
3493 the_low_target.prepare_to_resume (lwp);
3494
3495 regcache_invalidate_thread (thread);
3496 errno = 0;
3497 lwp->stopped = 0;
3498 lwp->stopped_by_watchpoint = 0;
3499 lwp->stepping = step;
3500 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3501 (PTRACE_TYPE_ARG3) 0,
3502 /* Coerce to a uintptr_t first to avoid potential gcc warning
3503 of coercing an 8 byte integer to a 4 byte pointer. */
3504 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3505
3506 current_inferior = saved_inferior;
3507 if (errno)
3508 {
3509 /* ESRCH from ptrace either means that the thread was already
3510 running (an error) or that it is gone (a race condition). If
3511 it's gone, we will get a notification the next time we wait,
3512 so we can ignore the error. We could differentiate these
3513 two, but it's tricky without waiting; the thread still exists
3514 as a zombie, so sending it signal 0 would succeed. So just
3515 ignore ESRCH. */
3516 if (errno == ESRCH)
3517 return;
3518
3519 perror_with_name ("ptrace");
3520 }
3521 }
3522
3523 struct thread_resume_array
3524 {
3525 struct thread_resume *resume;
3526 size_t n;
3527 };
3528
3529 /* This function is called once per thread via find_inferior.
3530 ARG is a pointer to a thread_resume_array struct.
3531 We look up the thread specified by ENTRY in ARG, and mark the thread
3532 with a pointer to the appropriate resume request.
3533
3534 This algorithm is O(threads * resume elements), but resume elements
3535 is small (and will remain small at least until GDB supports thread
3536 suspension). */
3537
3538 static int
3539 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3540 {
3541 struct thread_info *thread = (struct thread_info *) entry;
3542 struct lwp_info *lwp = get_thread_lwp (thread);
3543 int ndx;
3544 struct thread_resume_array *r;
3545
3546 r = arg;
3547
3548 for (ndx = 0; ndx < r->n; ndx++)
3549 {
3550 ptid_t ptid = r->resume[ndx].thread;
3551 if (ptid_equal (ptid, minus_one_ptid)
3552 || ptid_equal (ptid, entry->id)
3553 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3554 of PID'. */
3555 || (ptid_get_pid (ptid) == pid_of (thread)
3556 && (ptid_is_pid (ptid)
3557 || ptid_get_lwp (ptid) == -1)))
3558 {
3559 if (r->resume[ndx].kind == resume_stop
3560 && thread->last_resume_kind == resume_stop)
3561 {
3562 if (debug_threads)
3563 debug_printf ("already %s LWP %ld at GDB's request\n",
3564 (thread->last_status.kind
3565 == TARGET_WAITKIND_STOPPED)
3566 ? "stopped"
3567 : "stopping",
3568 lwpid_of (thread));
3569
3570 continue;
3571 }
3572
3573 lwp->resume = &r->resume[ndx];
3574 thread->last_resume_kind = lwp->resume->kind;
3575
3576 lwp->step_range_start = lwp->resume->step_range_start;
3577 lwp->step_range_end = lwp->resume->step_range_end;
3578
3579 /* If we had a deferred signal to report, dequeue one now.
3580 This can happen if LWP gets more than one signal while
3581 trying to get out of a jump pad. */
3582 if (lwp->stopped
3583 && !lwp->status_pending_p
3584 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3585 {
3586 lwp->status_pending_p = 1;
3587
3588 if (debug_threads)
3589 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3590 "leaving status pending.\n",
3591 WSTOPSIG (lwp->status_pending),
3592 lwpid_of (thread));
3593 }
3594
3595 return 0;
3596 }
3597 }
3598
3599 /* No resume action for this thread. */
3600 lwp->resume = NULL;
3601
3602 return 0;
3603 }
3604
3605 /* find_inferior callback for linux_resume.
3606 Set *FLAG_P if this lwp has an interesting status pending. */
3607
3608 static int
3609 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3610 {
3611 struct thread_info *thread = (struct thread_info *) entry;
3612 struct lwp_info *lwp = get_thread_lwp (thread);
3613
3614 /* LWPs which will not be resumed are not interesting, because
3615 we might not wait for them next time through linux_wait. */
3616 if (lwp->resume == NULL)
3617 return 0;
3618
3619 if (lwp->status_pending_p)
3620 * (int *) flag_p = 1;
3621
3622 return 0;
3623 }
3624
3625 /* Return 1 if this lwp that GDB wants running is stopped at an
3626 internal breakpoint that we need to step over. It assumes that any
3627 required STOP_PC adjustment has already been propagated to the
3628 inferior's regcache. */
3629
3630 static int
3631 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3632 {
3633 struct thread_info *thread = (struct thread_info *) entry;
3634 struct lwp_info *lwp = get_thread_lwp (thread);
3635 struct thread_info *saved_inferior;
3636 CORE_ADDR pc;
3637
3638 /* LWPs which will not be resumed are not interesting, because we
3639 might not wait for them next time through linux_wait. */
3640
3641 if (!lwp->stopped)
3642 {
3643 if (debug_threads)
3644 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3645 lwpid_of (thread));
3646 return 0;
3647 }
3648
3649 if (thread->last_resume_kind == resume_stop)
3650 {
3651 if (debug_threads)
3652 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3653 " stopped\n",
3654 lwpid_of (thread));
3655 return 0;
3656 }
3657
3658 gdb_assert (lwp->suspended >= 0);
3659
3660 if (lwp->suspended)
3661 {
3662 if (debug_threads)
3663 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3664 lwpid_of (thread));
3665 return 0;
3666 }
3667
3668 if (!lwp->need_step_over)
3669 {
3670 if (debug_threads)
3671 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3672 }
3673
3674 if (lwp->status_pending_p)
3675 {
3676 if (debug_threads)
3677 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3678 " status.\n",
3679 lwpid_of (thread));
3680 return 0;
3681 }
3682
3683 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3684 or we have. */
3685 pc = get_pc (lwp);
3686
3687 /* If the PC has changed since we stopped, then don't do anything,
3688 and let the breakpoint/tracepoint be hit. This happens if, for
3689 instance, GDB handled the decr_pc_after_break subtraction itself,
3690 GDB is OOL stepping this thread, or the user has issued a "jump"
3691 command, or poked thread's registers herself. */
3692 if (pc != lwp->stop_pc)
3693 {
3694 if (debug_threads)
3695 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3696 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3697 lwpid_of (thread),
3698 paddress (lwp->stop_pc), paddress (pc));
3699
3700 lwp->need_step_over = 0;
3701 return 0;
3702 }
3703
3704 saved_inferior = current_inferior;
3705 current_inferior = thread;
3706
3707 /* We can only step over breakpoints we know about. */
3708 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3709 {
3710 /* Don't step over a breakpoint that GDB expects to hit
3711 though. If the condition is being evaluated on the target's side
3712 and it evaluate to false, step over this breakpoint as well. */
3713 if (gdb_breakpoint_here (pc)
3714 && gdb_condition_true_at_breakpoint (pc)
3715 && gdb_no_commands_at_breakpoint (pc))
3716 {
3717 if (debug_threads)
3718 debug_printf ("Need step over [LWP %ld]? yes, but found"
3719 " GDB breakpoint at 0x%s; skipping step over\n",
3720 lwpid_of (thread), paddress (pc));
3721
3722 current_inferior = saved_inferior;
3723 return 0;
3724 }
3725 else
3726 {
3727 if (debug_threads)
3728 debug_printf ("Need step over [LWP %ld]? yes, "
3729 "found breakpoint at 0x%s\n",
3730 lwpid_of (thread), paddress (pc));
3731
3732 /* We've found an lwp that needs stepping over --- return 1 so
3733 that find_inferior stops looking. */
3734 current_inferior = saved_inferior;
3735
3736 /* If the step over is cancelled, this is set again. */
3737 lwp->need_step_over = 0;
3738 return 1;
3739 }
3740 }
3741
3742 current_inferior = saved_inferior;
3743
3744 if (debug_threads)
3745 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3746 " at 0x%s\n",
3747 lwpid_of (thread), paddress (pc));
3748
3749 return 0;
3750 }
3751
3752 /* Start a step-over operation on LWP. When LWP stopped at a
3753 breakpoint, to make progress, we need to remove the breakpoint out
3754 of the way. If we let other threads run while we do that, they may
3755 pass by the breakpoint location and miss hitting it. To avoid
3756 that, a step-over momentarily stops all threads while LWP is
3757 single-stepped while the breakpoint is temporarily uninserted from
3758 the inferior. When the single-step finishes, we reinsert the
3759 breakpoint, and let all threads that are supposed to be running,
3760 run again.
3761
3762 On targets that don't support hardware single-step, we don't
3763 currently support full software single-stepping. Instead, we only
3764 support stepping over the thread event breakpoint, by asking the
3765 low target where to place a reinsert breakpoint. Since this
3766 routine assumes the breakpoint being stepped over is a thread event
3767 breakpoint, it usually assumes the return address of the current
3768 function is a good enough place to set the reinsert breakpoint. */
3769
3770 static int
3771 start_step_over (struct lwp_info *lwp)
3772 {
3773 struct thread_info *thread = get_lwp_thread (lwp);
3774 struct thread_info *saved_inferior;
3775 CORE_ADDR pc;
3776 int step;
3777
3778 if (debug_threads)
3779 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3780 lwpid_of (thread));
3781
3782 stop_all_lwps (1, lwp);
3783 gdb_assert (lwp->suspended == 0);
3784
3785 if (debug_threads)
3786 debug_printf ("Done stopping all threads for step-over.\n");
3787
3788 /* Note, we should always reach here with an already adjusted PC,
3789 either by GDB (if we're resuming due to GDB's request), or by our
3790 caller, if we just finished handling an internal breakpoint GDB
3791 shouldn't care about. */
3792 pc = get_pc (lwp);
3793
3794 saved_inferior = current_inferior;
3795 current_inferior = thread;
3796
3797 lwp->bp_reinsert = pc;
3798 uninsert_breakpoints_at (pc);
3799 uninsert_fast_tracepoint_jumps_at (pc);
3800
3801 if (can_hardware_single_step ())
3802 {
3803 step = 1;
3804 }
3805 else
3806 {
3807 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3808 set_reinsert_breakpoint (raddr);
3809 step = 0;
3810 }
3811
3812 current_inferior = saved_inferior;
3813
3814 linux_resume_one_lwp (lwp, step, 0, NULL);
3815
3816 /* Require next event from this LWP. */
3817 step_over_bkpt = thread->entry.id;
3818 return 1;
3819 }
3820
3821 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3822 start_step_over, if still there, and delete any reinsert
3823 breakpoints we've set, on non hardware single-step targets. */
3824
3825 static int
3826 finish_step_over (struct lwp_info *lwp)
3827 {
3828 if (lwp->bp_reinsert != 0)
3829 {
3830 if (debug_threads)
3831 debug_printf ("Finished step over.\n");
3832
3833 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3834 may be no breakpoint to reinsert there by now. */
3835 reinsert_breakpoints_at (lwp->bp_reinsert);
3836 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3837
3838 lwp->bp_reinsert = 0;
3839
3840 /* Delete any software-single-step reinsert breakpoints. No
3841 longer needed. We don't have to worry about other threads
3842 hitting this trap, and later not being able to explain it,
3843 because we were stepping over a breakpoint, and we hold all
3844 threads but LWP stopped while doing that. */
3845 if (!can_hardware_single_step ())
3846 delete_reinsert_breakpoints ();
3847
3848 step_over_bkpt = null_ptid;
3849 return 1;
3850 }
3851 else
3852 return 0;
3853 }
3854
3855 /* This function is called once per thread. We check the thread's resume
3856 request, which will tell us whether to resume, step, or leave the thread
3857 stopped; and what signal, if any, it should be sent.
3858
3859 For threads which we aren't explicitly told otherwise, we preserve
3860 the stepping flag; this is used for stepping over gdbserver-placed
3861 breakpoints.
3862
3863 If pending_flags was set in any thread, we queue any needed
3864 signals, since we won't actually resume. We already have a pending
3865 event to report, so we don't need to preserve any step requests;
3866 they should be re-issued if necessary. */
3867
3868 static int
3869 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3870 {
3871 struct thread_info *thread = (struct thread_info *) entry;
3872 struct lwp_info *lwp = get_thread_lwp (thread);
3873 int step;
3874 int leave_all_stopped = * (int *) arg;
3875 int leave_pending;
3876
3877 if (lwp->resume == NULL)
3878 return 0;
3879
3880 if (lwp->resume->kind == resume_stop)
3881 {
3882 if (debug_threads)
3883 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3884
3885 if (!lwp->stopped)
3886 {
3887 if (debug_threads)
3888 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3889
3890 /* Stop the thread, and wait for the event asynchronously,
3891 through the event loop. */
3892 send_sigstop (lwp);
3893 }
3894 else
3895 {
3896 if (debug_threads)
3897 debug_printf ("already stopped LWP %ld\n",
3898 lwpid_of (thread));
3899
3900 /* The LWP may have been stopped in an internal event that
3901 was not meant to be notified back to GDB (e.g., gdbserver
3902 breakpoint), so we should be reporting a stop event in
3903 this case too. */
3904
3905 /* If the thread already has a pending SIGSTOP, this is a
3906 no-op. Otherwise, something later will presumably resume
3907 the thread and this will cause it to cancel any pending
3908 operation, due to last_resume_kind == resume_stop. If
3909 the thread already has a pending status to report, we
3910 will still report it the next time we wait - see
3911 status_pending_p_callback. */
3912
3913 /* If we already have a pending signal to report, then
3914 there's no need to queue a SIGSTOP, as this means we're
3915 midway through moving the LWP out of the jumppad, and we
3916 will report the pending signal as soon as that is
3917 finished. */
3918 if (lwp->pending_signals_to_report == NULL)
3919 send_sigstop (lwp);
3920 }
3921
3922 /* For stop requests, we're done. */
3923 lwp->resume = NULL;
3924 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3925 return 0;
3926 }
3927
3928 /* If this thread which is about to be resumed has a pending status,
3929 then don't resume any threads - we can just report the pending
3930 status. Make sure to queue any signals that would otherwise be
3931 sent. In all-stop mode, we do this decision based on if *any*
3932 thread has a pending status. If there's a thread that needs the
3933 step-over-breakpoint dance, then don't resume any other thread
3934 but that particular one. */
3935 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3936
3937 if (!leave_pending)
3938 {
3939 if (debug_threads)
3940 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3941
3942 step = (lwp->resume->kind == resume_step);
3943 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3944 }
3945 else
3946 {
3947 if (debug_threads)
3948 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3949
3950 /* If we have a new signal, enqueue the signal. */
3951 if (lwp->resume->sig != 0)
3952 {
3953 struct pending_signals *p_sig;
3954 p_sig = xmalloc (sizeof (*p_sig));
3955 p_sig->prev = lwp->pending_signals;
3956 p_sig->signal = lwp->resume->sig;
3957 memset (&p_sig->info, 0, sizeof (siginfo_t));
3958
3959 /* If this is the same signal we were previously stopped by,
3960 make sure to queue its siginfo. We can ignore the return
3961 value of ptrace; if it fails, we'll skip
3962 PTRACE_SETSIGINFO. */
3963 if (WIFSTOPPED (lwp->last_status)
3964 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3965 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3966 &p_sig->info);
3967
3968 lwp->pending_signals = p_sig;
3969 }
3970 }
3971
3972 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3973 lwp->resume = NULL;
3974 return 0;
3975 }
3976
3977 static void
3978 linux_resume (struct thread_resume *resume_info, size_t n)
3979 {
3980 struct thread_resume_array array = { resume_info, n };
3981 struct thread_info *need_step_over = NULL;
3982 int any_pending;
3983 int leave_all_stopped;
3984
3985 if (debug_threads)
3986 {
3987 debug_enter ();
3988 debug_printf ("linux_resume:\n");
3989 }
3990
3991 find_inferior (&all_threads, linux_set_resume_request, &array);
3992
3993 /* If there is a thread which would otherwise be resumed, which has
3994 a pending status, then don't resume any threads - we can just
3995 report the pending status. Make sure to queue any signals that
3996 would otherwise be sent. In non-stop mode, we'll apply this
3997 logic to each thread individually. We consume all pending events
3998 before considering to start a step-over (in all-stop). */
3999 any_pending = 0;
4000 if (!non_stop)
4001 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4002
4003 /* If there is a thread which would otherwise be resumed, which is
4004 stopped at a breakpoint that needs stepping over, then don't
4005 resume any threads - have it step over the breakpoint with all
4006 other threads stopped, then resume all threads again. Make sure
4007 to queue any signals that would otherwise be delivered or
4008 queued. */
4009 if (!any_pending && supports_breakpoints ())
4010 need_step_over
4011 = (struct thread_info *) find_inferior (&all_threads,
4012 need_step_over_p, NULL);
4013
4014 leave_all_stopped = (need_step_over != NULL || any_pending);
4015
4016 if (debug_threads)
4017 {
4018 if (need_step_over != NULL)
4019 debug_printf ("Not resuming all, need step over\n");
4020 else if (any_pending)
4021 debug_printf ("Not resuming, all-stop and found "
4022 "an LWP with pending status\n");
4023 else
4024 debug_printf ("Resuming, no pending status or step over needed\n");
4025 }
4026
4027 /* Even if we're leaving threads stopped, queue all signals we'd
4028 otherwise deliver. */
4029 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4030
4031 if (need_step_over)
4032 start_step_over (get_thread_lwp (need_step_over));
4033
4034 if (debug_threads)
4035 {
4036 debug_printf ("linux_resume done\n");
4037 debug_exit ();
4038 }
4039 }
4040
4041 /* This function is called once per thread. We check the thread's
4042 last resume request, which will tell us whether to resume, step, or
4043 leave the thread stopped. Any signal the client requested to be
4044 delivered has already been enqueued at this point.
4045
4046 If any thread that GDB wants running is stopped at an internal
4047 breakpoint that needs stepping over, we start a step-over operation
4048 on that particular thread, and leave all others stopped. */
4049
4050 static int
4051 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4052 {
4053 struct thread_info *thread = (struct thread_info *) entry;
4054 struct lwp_info *lwp = get_thread_lwp (thread);
4055 int step;
4056
4057 if (lwp == except)
4058 return 0;
4059
4060 if (debug_threads)
4061 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4062
4063 if (!lwp->stopped)
4064 {
4065 if (debug_threads)
4066 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4067 return 0;
4068 }
4069
4070 if (thread->last_resume_kind == resume_stop
4071 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4072 {
4073 if (debug_threads)
4074 debug_printf (" client wants LWP to remain %ld stopped\n",
4075 lwpid_of (thread));
4076 return 0;
4077 }
4078
4079 if (lwp->status_pending_p)
4080 {
4081 if (debug_threads)
4082 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4083 lwpid_of (thread));
4084 return 0;
4085 }
4086
4087 gdb_assert (lwp->suspended >= 0);
4088
4089 if (lwp->suspended)
4090 {
4091 if (debug_threads)
4092 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4093 return 0;
4094 }
4095
4096 if (thread->last_resume_kind == resume_stop
4097 && lwp->pending_signals_to_report == NULL
4098 && lwp->collecting_fast_tracepoint == 0)
4099 {
4100 /* We haven't reported this LWP as stopped yet (otherwise, the
4101 last_status.kind check above would catch it, and we wouldn't
4102 reach here. This LWP may have been momentarily paused by a
4103 stop_all_lwps call while handling for example, another LWP's
4104 step-over. In that case, the pending expected SIGSTOP signal
4105 that was queued at vCont;t handling time will have already
4106 been consumed by wait_for_sigstop, and so we need to requeue
4107 another one here. Note that if the LWP already has a SIGSTOP
4108 pending, this is a no-op. */
4109
4110 if (debug_threads)
4111 debug_printf ("Client wants LWP %ld to stop. "
4112 "Making sure it has a SIGSTOP pending\n",
4113 lwpid_of (thread));
4114
4115 send_sigstop (lwp);
4116 }
4117
4118 step = thread->last_resume_kind == resume_step;
4119 linux_resume_one_lwp (lwp, step, 0, NULL);
4120 return 0;
4121 }
4122
4123 static int
4124 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4125 {
4126 struct thread_info *thread = (struct thread_info *) entry;
4127 struct lwp_info *lwp = get_thread_lwp (thread);
4128
4129 if (lwp == except)
4130 return 0;
4131
4132 lwp->suspended--;
4133 gdb_assert (lwp->suspended >= 0);
4134
4135 return proceed_one_lwp (entry, except);
4136 }
4137
4138 /* When we finish a step-over, set threads running again. If there's
4139 another thread that may need a step-over, now's the time to start
4140 it. Eventually, we'll move all threads past their breakpoints. */
4141
4142 static void
4143 proceed_all_lwps (void)
4144 {
4145 struct thread_info *need_step_over;
4146
4147 /* If there is a thread which would otherwise be resumed, which is
4148 stopped at a breakpoint that needs stepping over, then don't
4149 resume any threads - have it step over the breakpoint with all
4150 other threads stopped, then resume all threads again. */
4151
4152 if (supports_breakpoints ())
4153 {
4154 need_step_over
4155 = (struct thread_info *) find_inferior (&all_threads,
4156 need_step_over_p, NULL);
4157
4158 if (need_step_over != NULL)
4159 {
4160 if (debug_threads)
4161 debug_printf ("proceed_all_lwps: found "
4162 "thread %ld needing a step-over\n",
4163 lwpid_of (need_step_over));
4164
4165 start_step_over (get_thread_lwp (need_step_over));
4166 return;
4167 }
4168 }
4169
4170 if (debug_threads)
4171 debug_printf ("Proceeding, no step-over needed\n");
4172
4173 find_inferior (&all_threads, proceed_one_lwp, NULL);
4174 }
4175
4176 /* Stopped LWPs that the client wanted to be running, that don't have
4177 pending statuses, are set to run again, except for EXCEPT, if not
4178 NULL. This undoes a stop_all_lwps call. */
4179
4180 static void
4181 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4182 {
4183 if (debug_threads)
4184 {
4185 debug_enter ();
4186 if (except)
4187 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4188 lwpid_of (get_lwp_thread (except)));
4189 else
4190 debug_printf ("unstopping all lwps\n");
4191 }
4192
4193 if (unsuspend)
4194 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4195 else
4196 find_inferior (&all_threads, proceed_one_lwp, except);
4197
4198 if (debug_threads)
4199 {
4200 debug_printf ("unstop_all_lwps done\n");
4201 debug_exit ();
4202 }
4203 }
4204
4205
4206 #ifdef HAVE_LINUX_REGSETS
4207
4208 #define use_linux_regsets 1
4209
4210 /* Returns true if REGSET has been disabled. */
4211
4212 static int
4213 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4214 {
4215 return (info->disabled_regsets != NULL
4216 && info->disabled_regsets[regset - info->regsets]);
4217 }
4218
4219 /* Disable REGSET. */
4220
4221 static void
4222 disable_regset (struct regsets_info *info, struct regset_info *regset)
4223 {
4224 int dr_offset;
4225
4226 dr_offset = regset - info->regsets;
4227 if (info->disabled_regsets == NULL)
4228 info->disabled_regsets = xcalloc (1, info->num_regsets);
4229 info->disabled_regsets[dr_offset] = 1;
4230 }
4231
4232 static int
4233 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4234 struct regcache *regcache)
4235 {
4236 struct regset_info *regset;
4237 int saw_general_regs = 0;
4238 int pid;
4239 struct iovec iov;
4240
4241 regset = regsets_info->regsets;
4242
4243 pid = lwpid_of (current_inferior);
4244 while (regset->size >= 0)
4245 {
4246 void *buf, *data;
4247 int nt_type, res;
4248
4249 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4250 {
4251 regset ++;
4252 continue;
4253 }
4254
4255 buf = xmalloc (regset->size);
4256
4257 nt_type = regset->nt_type;
4258 if (nt_type)
4259 {
4260 iov.iov_base = buf;
4261 iov.iov_len = regset->size;
4262 data = (void *) &iov;
4263 }
4264 else
4265 data = buf;
4266
4267 #ifndef __sparc__
4268 res = ptrace (regset->get_request, pid,
4269 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4270 #else
4271 res = ptrace (regset->get_request, pid, data, nt_type);
4272 #endif
4273 if (res < 0)
4274 {
4275 if (errno == EIO)
4276 {
4277 /* If we get EIO on a regset, do not try it again for
4278 this process mode. */
4279 disable_regset (regsets_info, regset);
4280 free (buf);
4281 continue;
4282 }
4283 else
4284 {
4285 char s[256];
4286 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4287 pid);
4288 perror (s);
4289 }
4290 }
4291 else if (regset->type == GENERAL_REGS)
4292 saw_general_regs = 1;
4293 regset->store_function (regcache, buf);
4294 regset ++;
4295 free (buf);
4296 }
4297 if (saw_general_regs)
4298 return 0;
4299 else
4300 return 1;
4301 }
4302
4303 static int
4304 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4305 struct regcache *regcache)
4306 {
4307 struct regset_info *regset;
4308 int saw_general_regs = 0;
4309 int pid;
4310 struct iovec iov;
4311
4312 regset = regsets_info->regsets;
4313
4314 pid = lwpid_of (current_inferior);
4315 while (regset->size >= 0)
4316 {
4317 void *buf, *data;
4318 int nt_type, res;
4319
4320 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4321 {
4322 regset ++;
4323 continue;
4324 }
4325
4326 buf = xmalloc (regset->size);
4327
4328 /* First fill the buffer with the current register set contents,
4329 in case there are any items in the kernel's regset that are
4330 not in gdbserver's regcache. */
4331
4332 nt_type = regset->nt_type;
4333 if (nt_type)
4334 {
4335 iov.iov_base = buf;
4336 iov.iov_len = regset->size;
4337 data = (void *) &iov;
4338 }
4339 else
4340 data = buf;
4341
4342 #ifndef __sparc__
4343 res = ptrace (regset->get_request, pid,
4344 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4345 #else
4346 res = ptrace (regset->get_request, pid, data, nt_type);
4347 #endif
4348
4349 if (res == 0)
4350 {
4351 /* Then overlay our cached registers on that. */
4352 regset->fill_function (regcache, buf);
4353
4354 /* Only now do we write the register set. */
4355 #ifndef __sparc__
4356 res = ptrace (regset->set_request, pid,
4357 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4358 #else
4359 res = ptrace (regset->set_request, pid, data, nt_type);
4360 #endif
4361 }
4362
4363 if (res < 0)
4364 {
4365 if (errno == EIO)
4366 {
4367 /* If we get EIO on a regset, do not try it again for
4368 this process mode. */
4369 disable_regset (regsets_info, regset);
4370 free (buf);
4371 continue;
4372 }
4373 else if (errno == ESRCH)
4374 {
4375 /* At this point, ESRCH should mean the process is
4376 already gone, in which case we simply ignore attempts
4377 to change its registers. See also the related
4378 comment in linux_resume_one_lwp. */
4379 free (buf);
4380 return 0;
4381 }
4382 else
4383 {
4384 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4385 }
4386 }
4387 else if (regset->type == GENERAL_REGS)
4388 saw_general_regs = 1;
4389 regset ++;
4390 free (buf);
4391 }
4392 if (saw_general_regs)
4393 return 0;
4394 else
4395 return 1;
4396 }
4397
4398 #else /* !HAVE_LINUX_REGSETS */
4399
4400 #define use_linux_regsets 0
4401 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4402 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4403
4404 #endif
4405
4406 /* Return 1 if register REGNO is supported by one of the regset ptrace
4407 calls or 0 if it has to be transferred individually. */
4408
4409 static int
4410 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4411 {
4412 unsigned char mask = 1 << (regno % 8);
4413 size_t index = regno / 8;
4414
4415 return (use_linux_regsets
4416 && (regs_info->regset_bitmap == NULL
4417 || (regs_info->regset_bitmap[index] & mask) != 0));
4418 }
4419
4420 #ifdef HAVE_LINUX_USRREGS
4421
4422 int
4423 register_addr (const struct usrregs_info *usrregs, int regnum)
4424 {
4425 int addr;
4426
4427 if (regnum < 0 || regnum >= usrregs->num_regs)
4428 error ("Invalid register number %d.", regnum);
4429
4430 addr = usrregs->regmap[regnum];
4431
4432 return addr;
4433 }
4434
4435 /* Fetch one register. */
4436 static void
4437 fetch_register (const struct usrregs_info *usrregs,
4438 struct regcache *regcache, int regno)
4439 {
4440 CORE_ADDR regaddr;
4441 int i, size;
4442 char *buf;
4443 int pid;
4444
4445 if (regno >= usrregs->num_regs)
4446 return;
4447 if ((*the_low_target.cannot_fetch_register) (regno))
4448 return;
4449
4450 regaddr = register_addr (usrregs, regno);
4451 if (regaddr == -1)
4452 return;
4453
4454 size = ((register_size (regcache->tdesc, regno)
4455 + sizeof (PTRACE_XFER_TYPE) - 1)
4456 & -sizeof (PTRACE_XFER_TYPE));
4457 buf = alloca (size);
4458
4459 pid = lwpid_of (current_inferior);
4460 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4461 {
4462 errno = 0;
4463 *(PTRACE_XFER_TYPE *) (buf + i) =
4464 ptrace (PTRACE_PEEKUSER, pid,
4465 /* Coerce to a uintptr_t first to avoid potential gcc warning
4466 of coercing an 8 byte integer to a 4 byte pointer. */
4467 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4468 regaddr += sizeof (PTRACE_XFER_TYPE);
4469 if (errno != 0)
4470 error ("reading register %d: %s", regno, strerror (errno));
4471 }
4472
4473 if (the_low_target.supply_ptrace_register)
4474 the_low_target.supply_ptrace_register (regcache, regno, buf);
4475 else
4476 supply_register (regcache, regno, buf);
4477 }
4478
4479 /* Store one register. */
4480 static void
4481 store_register (const struct usrregs_info *usrregs,
4482 struct regcache *regcache, int regno)
4483 {
4484 CORE_ADDR regaddr;
4485 int i, size;
4486 char *buf;
4487 int pid;
4488
4489 if (regno >= usrregs->num_regs)
4490 return;
4491 if ((*the_low_target.cannot_store_register) (regno))
4492 return;
4493
4494 regaddr = register_addr (usrregs, regno);
4495 if (regaddr == -1)
4496 return;
4497
4498 size = ((register_size (regcache->tdesc, regno)
4499 + sizeof (PTRACE_XFER_TYPE) - 1)
4500 & -sizeof (PTRACE_XFER_TYPE));
4501 buf = alloca (size);
4502 memset (buf, 0, size);
4503
4504 if (the_low_target.collect_ptrace_register)
4505 the_low_target.collect_ptrace_register (regcache, regno, buf);
4506 else
4507 collect_register (regcache, regno, buf);
4508
4509 pid = lwpid_of (current_inferior);
4510 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4511 {
4512 errno = 0;
4513 ptrace (PTRACE_POKEUSER, pid,
4514 /* Coerce to a uintptr_t first to avoid potential gcc warning
4515 about coercing an 8 byte integer to a 4 byte pointer. */
4516 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4517 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4518 if (errno != 0)
4519 {
4520 /* At this point, ESRCH should mean the process is
4521 already gone, in which case we simply ignore attempts
4522 to change its registers. See also the related
4523 comment in linux_resume_one_lwp. */
4524 if (errno == ESRCH)
4525 return;
4526
4527 if ((*the_low_target.cannot_store_register) (regno) == 0)
4528 error ("writing register %d: %s", regno, strerror (errno));
4529 }
4530 regaddr += sizeof (PTRACE_XFER_TYPE);
4531 }
4532 }
4533
4534 /* Fetch all registers, or just one, from the child process.
4535 If REGNO is -1, do this for all registers, skipping any that are
4536 assumed to have been retrieved by regsets_fetch_inferior_registers,
4537 unless ALL is non-zero.
4538 Otherwise, REGNO specifies which register (so we can save time). */
4539 static void
4540 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4541 struct regcache *regcache, int regno, int all)
4542 {
4543 struct usrregs_info *usr = regs_info->usrregs;
4544
4545 if (regno == -1)
4546 {
4547 for (regno = 0; regno < usr->num_regs; regno++)
4548 if (all || !linux_register_in_regsets (regs_info, regno))
4549 fetch_register (usr, regcache, regno);
4550 }
4551 else
4552 fetch_register (usr, regcache, regno);
4553 }
4554
4555 /* Store our register values back into the inferior.
4556 If REGNO is -1, do this for all registers, skipping any that are
4557 assumed to have been saved by regsets_store_inferior_registers,
4558 unless ALL is non-zero.
4559 Otherwise, REGNO specifies which register (so we can save time). */
4560 static void
4561 usr_store_inferior_registers (const struct regs_info *regs_info,
4562 struct regcache *regcache, int regno, int all)
4563 {
4564 struct usrregs_info *usr = regs_info->usrregs;
4565
4566 if (regno == -1)
4567 {
4568 for (regno = 0; regno < usr->num_regs; regno++)
4569 if (all || !linux_register_in_regsets (regs_info, regno))
4570 store_register (usr, regcache, regno);
4571 }
4572 else
4573 store_register (usr, regcache, regno);
4574 }
4575
4576 #else /* !HAVE_LINUX_USRREGS */
4577
4578 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4579 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4580
4581 #endif
4582
4583
4584 void
4585 linux_fetch_registers (struct regcache *regcache, int regno)
4586 {
4587 int use_regsets;
4588 int all = 0;
4589 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4590
4591 if (regno == -1)
4592 {
4593 if (the_low_target.fetch_register != NULL
4594 && regs_info->usrregs != NULL)
4595 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4596 (*the_low_target.fetch_register) (regcache, regno);
4597
4598 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4599 if (regs_info->usrregs != NULL)
4600 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4601 }
4602 else
4603 {
4604 if (the_low_target.fetch_register != NULL
4605 && (*the_low_target.fetch_register) (regcache, regno))
4606 return;
4607
4608 use_regsets = linux_register_in_regsets (regs_info, regno);
4609 if (use_regsets)
4610 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4611 regcache);
4612 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4613 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4614 }
4615 }
4616
4617 void
4618 linux_store_registers (struct regcache *regcache, int regno)
4619 {
4620 int use_regsets;
4621 int all = 0;
4622 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4623
4624 if (regno == -1)
4625 {
4626 all = regsets_store_inferior_registers (regs_info->regsets_info,
4627 regcache);
4628 if (regs_info->usrregs != NULL)
4629 usr_store_inferior_registers (regs_info, regcache, regno, all);
4630 }
4631 else
4632 {
4633 use_regsets = linux_register_in_regsets (regs_info, regno);
4634 if (use_regsets)
4635 all = regsets_store_inferior_registers (regs_info->regsets_info,
4636 regcache);
4637 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4638 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4639 }
4640 }
4641
4642
4643 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4644 to debugger memory starting at MYADDR. */
4645
4646 static int
4647 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4648 {
4649 int pid = lwpid_of (current_inferior);
4650 register PTRACE_XFER_TYPE *buffer;
4651 register CORE_ADDR addr;
4652 register int count;
4653 char filename[64];
4654 register int i;
4655 int ret;
4656 int fd;
4657
4658 /* Try using /proc. Don't bother for one word. */
4659 if (len >= 3 * sizeof (long))
4660 {
4661 int bytes;
4662
4663 /* We could keep this file open and cache it - possibly one per
4664 thread. That requires some juggling, but is even faster. */
4665 sprintf (filename, "/proc/%d/mem", pid);
4666 fd = open (filename, O_RDONLY | O_LARGEFILE);
4667 if (fd == -1)
4668 goto no_proc;
4669
4670 /* If pread64 is available, use it. It's faster if the kernel
4671 supports it (only one syscall), and it's 64-bit safe even on
4672 32-bit platforms (for instance, SPARC debugging a SPARC64
4673 application). */
4674 #ifdef HAVE_PREAD64
4675 bytes = pread64 (fd, myaddr, len, memaddr);
4676 #else
4677 bytes = -1;
4678 if (lseek (fd, memaddr, SEEK_SET) != -1)
4679 bytes = read (fd, myaddr, len);
4680 #endif
4681
4682 close (fd);
4683 if (bytes == len)
4684 return 0;
4685
4686 /* Some data was read, we'll try to get the rest with ptrace. */
4687 if (bytes > 0)
4688 {
4689 memaddr += bytes;
4690 myaddr += bytes;
4691 len -= bytes;
4692 }
4693 }
4694
4695 no_proc:
4696 /* Round starting address down to longword boundary. */
4697 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4698 /* Round ending address up; get number of longwords that makes. */
4699 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4700 / sizeof (PTRACE_XFER_TYPE));
4701 /* Allocate buffer of that many longwords. */
4702 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4703
4704 /* Read all the longwords */
4705 errno = 0;
4706 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4707 {
4708 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4709 about coercing an 8 byte integer to a 4 byte pointer. */
4710 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4711 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4712 (PTRACE_TYPE_ARG4) 0);
4713 if (errno)
4714 break;
4715 }
4716 ret = errno;
4717
4718 /* Copy appropriate bytes out of the buffer. */
4719 if (i > 0)
4720 {
4721 i *= sizeof (PTRACE_XFER_TYPE);
4722 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4723 memcpy (myaddr,
4724 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4725 i < len ? i : len);
4726 }
4727
4728 return ret;
4729 }
4730
4731 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4732 memory at MEMADDR. On failure (cannot write to the inferior)
4733 returns the value of errno. Always succeeds if LEN is zero. */
4734
4735 static int
4736 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4737 {
4738 register int i;
4739 /* Round starting address down to longword boundary. */
4740 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4741 /* Round ending address up; get number of longwords that makes. */
4742 register int count
4743 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4744 / sizeof (PTRACE_XFER_TYPE);
4745
4746 /* Allocate buffer of that many longwords. */
4747 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4748 alloca (count * sizeof (PTRACE_XFER_TYPE));
4749
4750 int pid = lwpid_of (current_inferior);
4751
4752 if (len == 0)
4753 {
4754 /* Zero length write always succeeds. */
4755 return 0;
4756 }
4757
4758 if (debug_threads)
4759 {
4760 /* Dump up to four bytes. */
4761 unsigned int val = * (unsigned int *) myaddr;
4762 if (len == 1)
4763 val = val & 0xff;
4764 else if (len == 2)
4765 val = val & 0xffff;
4766 else if (len == 3)
4767 val = val & 0xffffff;
4768 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4769 val, (long)memaddr);
4770 }
4771
4772 /* Fill start and end extra bytes of buffer with existing memory data. */
4773
4774 errno = 0;
4775 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4776 about coercing an 8 byte integer to a 4 byte pointer. */
4777 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4778 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4779 (PTRACE_TYPE_ARG4) 0);
4780 if (errno)
4781 return errno;
4782
4783 if (count > 1)
4784 {
4785 errno = 0;
4786 buffer[count - 1]
4787 = ptrace (PTRACE_PEEKTEXT, pid,
4788 /* Coerce to a uintptr_t first to avoid potential gcc warning
4789 about coercing an 8 byte integer to a 4 byte pointer. */
4790 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4791 * sizeof (PTRACE_XFER_TYPE)),
4792 (PTRACE_TYPE_ARG4) 0);
4793 if (errno)
4794 return errno;
4795 }
4796
4797 /* Copy data to be written over corresponding part of buffer. */
4798
4799 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4800 myaddr, len);
4801
4802 /* Write the entire buffer. */
4803
4804 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4805 {
4806 errno = 0;
4807 ptrace (PTRACE_POKETEXT, pid,
4808 /* Coerce to a uintptr_t first to avoid potential gcc warning
4809 about coercing an 8 byte integer to a 4 byte pointer. */
4810 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4811 (PTRACE_TYPE_ARG4) buffer[i]);
4812 if (errno)
4813 return errno;
4814 }
4815
4816 return 0;
4817 }
4818
4819 static void
4820 linux_look_up_symbols (void)
4821 {
4822 #ifdef USE_THREAD_DB
4823 struct process_info *proc = current_process ();
4824
4825 if (proc->private->thread_db != NULL)
4826 return;
4827
4828 /* If the kernel supports tracing clones, then we don't need to
4829 use the magic thread event breakpoint to learn about
4830 threads. */
4831 thread_db_init (!linux_supports_traceclone ());
4832 #endif
4833 }
4834
4835 static void
4836 linux_request_interrupt (void)
4837 {
4838 extern unsigned long signal_pid;
4839
4840 if (!ptid_equal (cont_thread, null_ptid)
4841 && !ptid_equal (cont_thread, minus_one_ptid))
4842 {
4843 int lwpid;
4844
4845 lwpid = lwpid_of (current_inferior);
4846 kill_lwp (lwpid, SIGINT);
4847 }
4848 else
4849 kill_lwp (signal_pid, SIGINT);
4850 }
4851
4852 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4853 to debugger memory starting at MYADDR. */
4854
4855 static int
4856 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4857 {
4858 char filename[PATH_MAX];
4859 int fd, n;
4860 int pid = lwpid_of (current_inferior);
4861
4862 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4863
4864 fd = open (filename, O_RDONLY);
4865 if (fd < 0)
4866 return -1;
4867
4868 if (offset != (CORE_ADDR) 0
4869 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4870 n = -1;
4871 else
4872 n = read (fd, myaddr, len);
4873
4874 close (fd);
4875
4876 return n;
4877 }
4878
4879 /* These breakpoint and watchpoint related wrapper functions simply
4880 pass on the function call if the target has registered a
4881 corresponding function. */
4882
4883 static int
4884 linux_supports_z_point_type (char z_type)
4885 {
4886 return (the_low_target.supports_z_point_type != NULL
4887 && the_low_target.supports_z_point_type (z_type));
4888 }
4889
4890 static int
4891 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4892 int size, struct raw_breakpoint *bp)
4893 {
4894 if (the_low_target.insert_point != NULL)
4895 return the_low_target.insert_point (type, addr, size, bp);
4896 else
4897 /* Unsupported (see target.h). */
4898 return 1;
4899 }
4900
4901 static int
4902 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4903 int size, struct raw_breakpoint *bp)
4904 {
4905 if (the_low_target.remove_point != NULL)
4906 return the_low_target.remove_point (type, addr, size, bp);
4907 else
4908 /* Unsupported (see target.h). */
4909 return 1;
4910 }
4911
4912 static int
4913 linux_stopped_by_watchpoint (void)
4914 {
4915 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4916
4917 return lwp->stopped_by_watchpoint;
4918 }
4919
4920 static CORE_ADDR
4921 linux_stopped_data_address (void)
4922 {
4923 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4924
4925 return lwp->stopped_data_address;
4926 }
4927
4928 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4929 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4930 && defined(PT_TEXT_END_ADDR)
4931
4932 /* This is only used for targets that define PT_TEXT_ADDR,
4933 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4934 the target has different ways of acquiring this information, like
4935 loadmaps. */
4936
4937 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4938 to tell gdb about. */
4939
4940 static int
4941 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4942 {
4943 unsigned long text, text_end, data;
4944 int pid = lwpid_of (get_thread_lwp (current_inferior));
4945
4946 errno = 0;
4947
4948 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4949 (PTRACE_TYPE_ARG4) 0);
4950 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4951 (PTRACE_TYPE_ARG4) 0);
4952 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4953 (PTRACE_TYPE_ARG4) 0);
4954
4955 if (errno == 0)
4956 {
4957 /* Both text and data offsets produced at compile-time (and so
4958 used by gdb) are relative to the beginning of the program,
4959 with the data segment immediately following the text segment.
4960 However, the actual runtime layout in memory may put the data
4961 somewhere else, so when we send gdb a data base-address, we
4962 use the real data base address and subtract the compile-time
4963 data base-address from it (which is just the length of the
4964 text segment). BSS immediately follows data in both
4965 cases. */
4966 *text_p = text;
4967 *data_p = data - (text_end - text);
4968
4969 return 1;
4970 }
4971 return 0;
4972 }
4973 #endif
4974
4975 static int
4976 linux_qxfer_osdata (const char *annex,
4977 unsigned char *readbuf, unsigned const char *writebuf,
4978 CORE_ADDR offset, int len)
4979 {
4980 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4981 }
4982
4983 /* Convert a native/host siginfo object, into/from the siginfo in the
4984 layout of the inferiors' architecture. */
4985
4986 static void
4987 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4988 {
4989 int done = 0;
4990
4991 if (the_low_target.siginfo_fixup != NULL)
4992 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4993
4994 /* If there was no callback, or the callback didn't do anything,
4995 then just do a straight memcpy. */
4996 if (!done)
4997 {
4998 if (direction == 1)
4999 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5000 else
5001 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5002 }
5003 }
5004
5005 static int
5006 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5007 unsigned const char *writebuf, CORE_ADDR offset, int len)
5008 {
5009 int pid;
5010 siginfo_t siginfo;
5011 char inf_siginfo[sizeof (siginfo_t)];
5012
5013 if (current_inferior == NULL)
5014 return -1;
5015
5016 pid = lwpid_of (current_inferior);
5017
5018 if (debug_threads)
5019 debug_printf ("%s siginfo for lwp %d.\n",
5020 readbuf != NULL ? "Reading" : "Writing",
5021 pid);
5022
5023 if (offset >= sizeof (siginfo))
5024 return -1;
5025
5026 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5027 return -1;
5028
5029 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5030 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5031 inferior with a 64-bit GDBSERVER should look the same as debugging it
5032 with a 32-bit GDBSERVER, we need to convert it. */
5033 siginfo_fixup (&siginfo, inf_siginfo, 0);
5034
5035 if (offset + len > sizeof (siginfo))
5036 len = sizeof (siginfo) - offset;
5037
5038 if (readbuf != NULL)
5039 memcpy (readbuf, inf_siginfo + offset, len);
5040 else
5041 {
5042 memcpy (inf_siginfo + offset, writebuf, len);
5043
5044 /* Convert back to ptrace layout before flushing it out. */
5045 siginfo_fixup (&siginfo, inf_siginfo, 1);
5046
5047 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5048 return -1;
5049 }
5050
5051 return len;
5052 }
5053
5054 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5055 so we notice when children change state; as the handler for the
5056 sigsuspend in my_waitpid. */
5057
5058 static void
5059 sigchld_handler (int signo)
5060 {
5061 int old_errno = errno;
5062
5063 if (debug_threads)
5064 {
5065 do
5066 {
5067 /* fprintf is not async-signal-safe, so call write
5068 directly. */
5069 if (write (2, "sigchld_handler\n",
5070 sizeof ("sigchld_handler\n") - 1) < 0)
5071 break; /* just ignore */
5072 } while (0);
5073 }
5074
5075 if (target_is_async_p ())
5076 async_file_mark (); /* trigger a linux_wait */
5077
5078 errno = old_errno;
5079 }
5080
5081 static int
5082 linux_supports_non_stop (void)
5083 {
5084 return 1;
5085 }
5086
5087 static int
5088 linux_async (int enable)
5089 {
5090 int previous = (linux_event_pipe[0] != -1);
5091
5092 if (debug_threads)
5093 debug_printf ("linux_async (%d), previous=%d\n",
5094 enable, previous);
5095
5096 if (previous != enable)
5097 {
5098 sigset_t mask;
5099 sigemptyset (&mask);
5100 sigaddset (&mask, SIGCHLD);
5101
5102 sigprocmask (SIG_BLOCK, &mask, NULL);
5103
5104 if (enable)
5105 {
5106 if (pipe (linux_event_pipe) == -1)
5107 fatal ("creating event pipe failed.");
5108
5109 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5110 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5111
5112 /* Register the event loop handler. */
5113 add_file_handler (linux_event_pipe[0],
5114 handle_target_event, NULL);
5115
5116 /* Always trigger a linux_wait. */
5117 async_file_mark ();
5118 }
5119 else
5120 {
5121 delete_file_handler (linux_event_pipe[0]);
5122
5123 close (linux_event_pipe[0]);
5124 close (linux_event_pipe[1]);
5125 linux_event_pipe[0] = -1;
5126 linux_event_pipe[1] = -1;
5127 }
5128
5129 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5130 }
5131
5132 return previous;
5133 }
5134
5135 static int
5136 linux_start_non_stop (int nonstop)
5137 {
5138 /* Register or unregister from event-loop accordingly. */
5139 linux_async (nonstop);
5140 return 0;
5141 }
5142
5143 static int
5144 linux_supports_multi_process (void)
5145 {
5146 return 1;
5147 }
5148
5149 static int
5150 linux_supports_disable_randomization (void)
5151 {
5152 #ifdef HAVE_PERSONALITY
5153 return 1;
5154 #else
5155 return 0;
5156 #endif
5157 }
5158
5159 static int
5160 linux_supports_agent (void)
5161 {
5162 return 1;
5163 }
5164
5165 static int
5166 linux_supports_range_stepping (void)
5167 {
5168 if (*the_low_target.supports_range_stepping == NULL)
5169 return 0;
5170
5171 return (*the_low_target.supports_range_stepping) ();
5172 }
5173
5174 /* Enumerate spufs IDs for process PID. */
5175 static int
5176 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5177 {
5178 int pos = 0;
5179 int written = 0;
5180 char path[128];
5181 DIR *dir;
5182 struct dirent *entry;
5183
5184 sprintf (path, "/proc/%ld/fd", pid);
5185 dir = opendir (path);
5186 if (!dir)
5187 return -1;
5188
5189 rewinddir (dir);
5190 while ((entry = readdir (dir)) != NULL)
5191 {
5192 struct stat st;
5193 struct statfs stfs;
5194 int fd;
5195
5196 fd = atoi (entry->d_name);
5197 if (!fd)
5198 continue;
5199
5200 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5201 if (stat (path, &st) != 0)
5202 continue;
5203 if (!S_ISDIR (st.st_mode))
5204 continue;
5205
5206 if (statfs (path, &stfs) != 0)
5207 continue;
5208 if (stfs.f_type != SPUFS_MAGIC)
5209 continue;
5210
5211 if (pos >= offset && pos + 4 <= offset + len)
5212 {
5213 *(unsigned int *)(buf + pos - offset) = fd;
5214 written += 4;
5215 }
5216 pos += 4;
5217 }
5218
5219 closedir (dir);
5220 return written;
5221 }
5222
5223 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5224 object type, using the /proc file system. */
5225 static int
5226 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5227 unsigned const char *writebuf,
5228 CORE_ADDR offset, int len)
5229 {
5230 long pid = lwpid_of (current_inferior);
5231 char buf[128];
5232 int fd = 0;
5233 int ret = 0;
5234
5235 if (!writebuf && !readbuf)
5236 return -1;
5237
5238 if (!*annex)
5239 {
5240 if (!readbuf)
5241 return -1;
5242 else
5243 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5244 }
5245
5246 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5247 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5248 if (fd <= 0)
5249 return -1;
5250
5251 if (offset != 0
5252 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5253 {
5254 close (fd);
5255 return 0;
5256 }
5257
5258 if (writebuf)
5259 ret = write (fd, writebuf, (size_t) len);
5260 else
5261 ret = read (fd, readbuf, (size_t) len);
5262
5263 close (fd);
5264 return ret;
5265 }
5266
5267 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5268 struct target_loadseg
5269 {
5270 /* Core address to which the segment is mapped. */
5271 Elf32_Addr addr;
5272 /* VMA recorded in the program header. */
5273 Elf32_Addr p_vaddr;
5274 /* Size of this segment in memory. */
5275 Elf32_Word p_memsz;
5276 };
5277
5278 # if defined PT_GETDSBT
5279 struct target_loadmap
5280 {
5281 /* Protocol version number, must be zero. */
5282 Elf32_Word version;
5283 /* Pointer to the DSBT table, its size, and the DSBT index. */
5284 unsigned *dsbt_table;
5285 unsigned dsbt_size, dsbt_index;
5286 /* Number of segments in this map. */
5287 Elf32_Word nsegs;
5288 /* The actual memory map. */
5289 struct target_loadseg segs[/*nsegs*/];
5290 };
5291 # define LINUX_LOADMAP PT_GETDSBT
5292 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5293 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5294 # else
5295 struct target_loadmap
5296 {
5297 /* Protocol version number, must be zero. */
5298 Elf32_Half version;
5299 /* Number of segments in this map. */
5300 Elf32_Half nsegs;
5301 /* The actual memory map. */
5302 struct target_loadseg segs[/*nsegs*/];
5303 };
5304 # define LINUX_LOADMAP PTRACE_GETFDPIC
5305 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5306 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5307 # endif
5308
5309 static int
5310 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5311 unsigned char *myaddr, unsigned int len)
5312 {
5313 int pid = lwpid_of (current_inferior);
5314 int addr = -1;
5315 struct target_loadmap *data = NULL;
5316 unsigned int actual_length, copy_length;
5317
5318 if (strcmp (annex, "exec") == 0)
5319 addr = (int) LINUX_LOADMAP_EXEC;
5320 else if (strcmp (annex, "interp") == 0)
5321 addr = (int) LINUX_LOADMAP_INTERP;
5322 else
5323 return -1;
5324
5325 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5326 return -1;
5327
5328 if (data == NULL)
5329 return -1;
5330
5331 actual_length = sizeof (struct target_loadmap)
5332 + sizeof (struct target_loadseg) * data->nsegs;
5333
5334 if (offset < 0 || offset > actual_length)
5335 return -1;
5336
5337 copy_length = actual_length - offset < len ? actual_length - offset : len;
5338 memcpy (myaddr, (char *) data + offset, copy_length);
5339 return copy_length;
5340 }
5341 #else
5342 # define linux_read_loadmap NULL
5343 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5344
5345 static void
5346 linux_process_qsupported (const char *query)
5347 {
5348 if (the_low_target.process_qsupported != NULL)
5349 the_low_target.process_qsupported (query);
5350 }
5351
5352 static int
5353 linux_supports_tracepoints (void)
5354 {
5355 if (*the_low_target.supports_tracepoints == NULL)
5356 return 0;
5357
5358 return (*the_low_target.supports_tracepoints) ();
5359 }
5360
5361 static CORE_ADDR
5362 linux_read_pc (struct regcache *regcache)
5363 {
5364 if (the_low_target.get_pc == NULL)
5365 return 0;
5366
5367 return (*the_low_target.get_pc) (regcache);
5368 }
5369
5370 static void
5371 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5372 {
5373 gdb_assert (the_low_target.set_pc != NULL);
5374
5375 (*the_low_target.set_pc) (regcache, pc);
5376 }
5377
5378 static int
5379 linux_thread_stopped (struct thread_info *thread)
5380 {
5381 return get_thread_lwp (thread)->stopped;
5382 }
5383
5384 /* This exposes stop-all-threads functionality to other modules. */
5385
5386 static void
5387 linux_pause_all (int freeze)
5388 {
5389 stop_all_lwps (freeze, NULL);
5390 }
5391
5392 /* This exposes unstop-all-threads functionality to other gdbserver
5393 modules. */
5394
5395 static void
5396 linux_unpause_all (int unfreeze)
5397 {
5398 unstop_all_lwps (unfreeze, NULL);
5399 }
5400
5401 static int
5402 linux_prepare_to_access_memory (void)
5403 {
5404 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5405 running LWP. */
5406 if (non_stop)
5407 linux_pause_all (1);
5408 return 0;
5409 }
5410
5411 static void
5412 linux_done_accessing_memory (void)
5413 {
5414 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5415 running LWP. */
5416 if (non_stop)
5417 linux_unpause_all (1);
5418 }
5419
5420 static int
5421 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5422 CORE_ADDR collector,
5423 CORE_ADDR lockaddr,
5424 ULONGEST orig_size,
5425 CORE_ADDR *jump_entry,
5426 CORE_ADDR *trampoline,
5427 ULONGEST *trampoline_size,
5428 unsigned char *jjump_pad_insn,
5429 ULONGEST *jjump_pad_insn_size,
5430 CORE_ADDR *adjusted_insn_addr,
5431 CORE_ADDR *adjusted_insn_addr_end,
5432 char *err)
5433 {
5434 return (*the_low_target.install_fast_tracepoint_jump_pad)
5435 (tpoint, tpaddr, collector, lockaddr, orig_size,
5436 jump_entry, trampoline, trampoline_size,
5437 jjump_pad_insn, jjump_pad_insn_size,
5438 adjusted_insn_addr, adjusted_insn_addr_end,
5439 err);
5440 }
5441
5442 static struct emit_ops *
5443 linux_emit_ops (void)
5444 {
5445 if (the_low_target.emit_ops != NULL)
5446 return (*the_low_target.emit_ops) ();
5447 else
5448 return NULL;
5449 }
5450
5451 static int
5452 linux_get_min_fast_tracepoint_insn_len (void)
5453 {
5454 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5455 }
5456
5457 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5458
5459 static int
5460 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5461 CORE_ADDR *phdr_memaddr, int *num_phdr)
5462 {
5463 char filename[PATH_MAX];
5464 int fd;
5465 const int auxv_size = is_elf64
5466 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5467 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5468
5469 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5470
5471 fd = open (filename, O_RDONLY);
5472 if (fd < 0)
5473 return 1;
5474
5475 *phdr_memaddr = 0;
5476 *num_phdr = 0;
5477 while (read (fd, buf, auxv_size) == auxv_size
5478 && (*phdr_memaddr == 0 || *num_phdr == 0))
5479 {
5480 if (is_elf64)
5481 {
5482 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5483
5484 switch (aux->a_type)
5485 {
5486 case AT_PHDR:
5487 *phdr_memaddr = aux->a_un.a_val;
5488 break;
5489 case AT_PHNUM:
5490 *num_phdr = aux->a_un.a_val;
5491 break;
5492 }
5493 }
5494 else
5495 {
5496 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5497
5498 switch (aux->a_type)
5499 {
5500 case AT_PHDR:
5501 *phdr_memaddr = aux->a_un.a_val;
5502 break;
5503 case AT_PHNUM:
5504 *num_phdr = aux->a_un.a_val;
5505 break;
5506 }
5507 }
5508 }
5509
5510 close (fd);
5511
5512 if (*phdr_memaddr == 0 || *num_phdr == 0)
5513 {
5514 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5515 "phdr_memaddr = %ld, phdr_num = %d",
5516 (long) *phdr_memaddr, *num_phdr);
5517 return 2;
5518 }
5519
5520 return 0;
5521 }
5522
5523 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5524
5525 static CORE_ADDR
5526 get_dynamic (const int pid, const int is_elf64)
5527 {
5528 CORE_ADDR phdr_memaddr, relocation;
5529 int num_phdr, i;
5530 unsigned char *phdr_buf;
5531 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5532
5533 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5534 return 0;
5535
5536 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5537 phdr_buf = alloca (num_phdr * phdr_size);
5538
5539 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5540 return 0;
5541
5542 /* Compute relocation: it is expected to be 0 for "regular" executables,
5543 non-zero for PIE ones. */
5544 relocation = -1;
5545 for (i = 0; relocation == -1 && i < num_phdr; i++)
5546 if (is_elf64)
5547 {
5548 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5549
5550 if (p->p_type == PT_PHDR)
5551 relocation = phdr_memaddr - p->p_vaddr;
5552 }
5553 else
5554 {
5555 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5556
5557 if (p->p_type == PT_PHDR)
5558 relocation = phdr_memaddr - p->p_vaddr;
5559 }
5560
5561 if (relocation == -1)
5562 {
5563 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5564 any real world executables, including PIE executables, have always
5565 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5566 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5567 or present DT_DEBUG anyway (fpc binaries are statically linked).
5568
5569 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5570
5571 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5572
5573 return 0;
5574 }
5575
5576 for (i = 0; i < num_phdr; i++)
5577 {
5578 if (is_elf64)
5579 {
5580 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5581
5582 if (p->p_type == PT_DYNAMIC)
5583 return p->p_vaddr + relocation;
5584 }
5585 else
5586 {
5587 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5588
5589 if (p->p_type == PT_DYNAMIC)
5590 return p->p_vaddr + relocation;
5591 }
5592 }
5593
5594 return 0;
5595 }
5596
5597 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5598 can be 0 if the inferior does not yet have the library list initialized.
5599 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5600 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5601
5602 static CORE_ADDR
5603 get_r_debug (const int pid, const int is_elf64)
5604 {
5605 CORE_ADDR dynamic_memaddr;
5606 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5607 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5608 CORE_ADDR map = -1;
5609
5610 dynamic_memaddr = get_dynamic (pid, is_elf64);
5611 if (dynamic_memaddr == 0)
5612 return map;
5613
5614 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5615 {
5616 if (is_elf64)
5617 {
5618 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5619 #ifdef DT_MIPS_RLD_MAP
5620 union
5621 {
5622 Elf64_Xword map;
5623 unsigned char buf[sizeof (Elf64_Xword)];
5624 }
5625 rld_map;
5626
5627 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5628 {
5629 if (linux_read_memory (dyn->d_un.d_val,
5630 rld_map.buf, sizeof (rld_map.buf)) == 0)
5631 return rld_map.map;
5632 else
5633 break;
5634 }
5635 #endif /* DT_MIPS_RLD_MAP */
5636
5637 if (dyn->d_tag == DT_DEBUG && map == -1)
5638 map = dyn->d_un.d_val;
5639
5640 if (dyn->d_tag == DT_NULL)
5641 break;
5642 }
5643 else
5644 {
5645 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5646 #ifdef DT_MIPS_RLD_MAP
5647 union
5648 {
5649 Elf32_Word map;
5650 unsigned char buf[sizeof (Elf32_Word)];
5651 }
5652 rld_map;
5653
5654 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5655 {
5656 if (linux_read_memory (dyn->d_un.d_val,
5657 rld_map.buf, sizeof (rld_map.buf)) == 0)
5658 return rld_map.map;
5659 else
5660 break;
5661 }
5662 #endif /* DT_MIPS_RLD_MAP */
5663
5664 if (dyn->d_tag == DT_DEBUG && map == -1)
5665 map = dyn->d_un.d_val;
5666
5667 if (dyn->d_tag == DT_NULL)
5668 break;
5669 }
5670
5671 dynamic_memaddr += dyn_size;
5672 }
5673
5674 return map;
5675 }
5676
5677 /* Read one pointer from MEMADDR in the inferior. */
5678
5679 static int
5680 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5681 {
5682 int ret;
5683
5684 /* Go through a union so this works on either big or little endian
5685 hosts, when the inferior's pointer size is smaller than the size
5686 of CORE_ADDR. It is assumed the inferior's endianness is the
5687 same of the superior's. */
5688 union
5689 {
5690 CORE_ADDR core_addr;
5691 unsigned int ui;
5692 unsigned char uc;
5693 } addr;
5694
5695 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5696 if (ret == 0)
5697 {
5698 if (ptr_size == sizeof (CORE_ADDR))
5699 *ptr = addr.core_addr;
5700 else if (ptr_size == sizeof (unsigned int))
5701 *ptr = addr.ui;
5702 else
5703 gdb_assert_not_reached ("unhandled pointer size");
5704 }
5705 return ret;
5706 }
5707
5708 struct link_map_offsets
5709 {
5710 /* Offset and size of r_debug.r_version. */
5711 int r_version_offset;
5712
5713 /* Offset and size of r_debug.r_map. */
5714 int r_map_offset;
5715
5716 /* Offset to l_addr field in struct link_map. */
5717 int l_addr_offset;
5718
5719 /* Offset to l_name field in struct link_map. */
5720 int l_name_offset;
5721
5722 /* Offset to l_ld field in struct link_map. */
5723 int l_ld_offset;
5724
5725 /* Offset to l_next field in struct link_map. */
5726 int l_next_offset;
5727
5728 /* Offset to l_prev field in struct link_map. */
5729 int l_prev_offset;
5730 };
5731
5732 /* Construct qXfer:libraries-svr4:read reply. */
5733
5734 static int
5735 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5736 unsigned const char *writebuf,
5737 CORE_ADDR offset, int len)
5738 {
5739 char *document;
5740 unsigned document_len;
5741 struct process_info_private *const priv = current_process ()->private;
5742 char filename[PATH_MAX];
5743 int pid, is_elf64;
5744
5745 static const struct link_map_offsets lmo_32bit_offsets =
5746 {
5747 0, /* r_version offset. */
5748 4, /* r_debug.r_map offset. */
5749 0, /* l_addr offset in link_map. */
5750 4, /* l_name offset in link_map. */
5751 8, /* l_ld offset in link_map. */
5752 12, /* l_next offset in link_map. */
5753 16 /* l_prev offset in link_map. */
5754 };
5755
5756 static const struct link_map_offsets lmo_64bit_offsets =
5757 {
5758 0, /* r_version offset. */
5759 8, /* r_debug.r_map offset. */
5760 0, /* l_addr offset in link_map. */
5761 8, /* l_name offset in link_map. */
5762 16, /* l_ld offset in link_map. */
5763 24, /* l_next offset in link_map. */
5764 32 /* l_prev offset in link_map. */
5765 };
5766 const struct link_map_offsets *lmo;
5767 unsigned int machine;
5768 int ptr_size;
5769 CORE_ADDR lm_addr = 0, lm_prev = 0;
5770 int allocated = 1024;
5771 char *p;
5772 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5773 int header_done = 0;
5774
5775 if (writebuf != NULL)
5776 return -2;
5777 if (readbuf == NULL)
5778 return -1;
5779
5780 pid = lwpid_of (current_inferior);
5781 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5782 is_elf64 = elf_64_file_p (filename, &machine);
5783 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5784 ptr_size = is_elf64 ? 8 : 4;
5785
5786 while (annex[0] != '\0')
5787 {
5788 const char *sep;
5789 CORE_ADDR *addrp;
5790 int len;
5791
5792 sep = strchr (annex, '=');
5793 if (sep == NULL)
5794 break;
5795
5796 len = sep - annex;
5797 if (len == 5 && strncmp (annex, "start", 5) == 0)
5798 addrp = &lm_addr;
5799 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5800 addrp = &lm_prev;
5801 else
5802 {
5803 annex = strchr (sep, ';');
5804 if (annex == NULL)
5805 break;
5806 annex++;
5807 continue;
5808 }
5809
5810 annex = decode_address_to_semicolon (addrp, sep + 1);
5811 }
5812
5813 if (lm_addr == 0)
5814 {
5815 int r_version = 0;
5816
5817 if (priv->r_debug == 0)
5818 priv->r_debug = get_r_debug (pid, is_elf64);
5819
5820 /* We failed to find DT_DEBUG. Such situation will not change
5821 for this inferior - do not retry it. Report it to GDB as
5822 E01, see for the reasons at the GDB solib-svr4.c side. */
5823 if (priv->r_debug == (CORE_ADDR) -1)
5824 return -1;
5825
5826 if (priv->r_debug != 0)
5827 {
5828 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5829 (unsigned char *) &r_version,
5830 sizeof (r_version)) != 0
5831 || r_version != 1)
5832 {
5833 warning ("unexpected r_debug version %d", r_version);
5834 }
5835 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5836 &lm_addr, ptr_size) != 0)
5837 {
5838 warning ("unable to read r_map from 0x%lx",
5839 (long) priv->r_debug + lmo->r_map_offset);
5840 }
5841 }
5842 }
5843
5844 document = xmalloc (allocated);
5845 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5846 p = document + strlen (document);
5847
5848 while (lm_addr
5849 && read_one_ptr (lm_addr + lmo->l_name_offset,
5850 &l_name, ptr_size) == 0
5851 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5852 &l_addr, ptr_size) == 0
5853 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5854 &l_ld, ptr_size) == 0
5855 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5856 &l_prev, ptr_size) == 0
5857 && read_one_ptr (lm_addr + lmo->l_next_offset,
5858 &l_next, ptr_size) == 0)
5859 {
5860 unsigned char libname[PATH_MAX];
5861
5862 if (lm_prev != l_prev)
5863 {
5864 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5865 (long) lm_prev, (long) l_prev);
5866 break;
5867 }
5868
5869 /* Ignore the first entry even if it has valid name as the first entry
5870 corresponds to the main executable. The first entry should not be
5871 skipped if the dynamic loader was loaded late by a static executable
5872 (see solib-svr4.c parameter ignore_first). But in such case the main
5873 executable does not have PT_DYNAMIC present and this function already
5874 exited above due to failed get_r_debug. */
5875 if (lm_prev == 0)
5876 {
5877 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5878 p = p + strlen (p);
5879 }
5880 else
5881 {
5882 /* Not checking for error because reading may stop before
5883 we've got PATH_MAX worth of characters. */
5884 libname[0] = '\0';
5885 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5886 libname[sizeof (libname) - 1] = '\0';
5887 if (libname[0] != '\0')
5888 {
5889 /* 6x the size for xml_escape_text below. */
5890 size_t len = 6 * strlen ((char *) libname);
5891 char *name;
5892
5893 if (!header_done)
5894 {
5895 /* Terminate `<library-list-svr4'. */
5896 *p++ = '>';
5897 header_done = 1;
5898 }
5899
5900 while (allocated < p - document + len + 200)
5901 {
5902 /* Expand to guarantee sufficient storage. */
5903 uintptr_t document_len = p - document;
5904
5905 document = xrealloc (document, 2 * allocated);
5906 allocated *= 2;
5907 p = document + document_len;
5908 }
5909
5910 name = xml_escape_text ((char *) libname);
5911 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5912 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5913 name, (unsigned long) lm_addr,
5914 (unsigned long) l_addr, (unsigned long) l_ld);
5915 free (name);
5916 }
5917 }
5918
5919 lm_prev = lm_addr;
5920 lm_addr = l_next;
5921 }
5922
5923 if (!header_done)
5924 {
5925 /* Empty list; terminate `<library-list-svr4'. */
5926 strcpy (p, "/>");
5927 }
5928 else
5929 strcpy (p, "</library-list-svr4>");
5930
5931 document_len = strlen (document);
5932 if (offset < document_len)
5933 document_len -= offset;
5934 else
5935 document_len = 0;
5936 if (len > document_len)
5937 len = document_len;
5938
5939 memcpy (readbuf, document + offset, len);
5940 xfree (document);
5941
5942 return len;
5943 }
5944
5945 #ifdef HAVE_LINUX_BTRACE
5946
5947 /* See to_enable_btrace target method. */
5948
5949 static struct btrace_target_info *
5950 linux_low_enable_btrace (ptid_t ptid)
5951 {
5952 struct btrace_target_info *tinfo;
5953
5954 tinfo = linux_enable_btrace (ptid);
5955
5956 if (tinfo != NULL)
5957 {
5958 struct thread_info *thread = find_thread_ptid (ptid);
5959 struct regcache *regcache = get_thread_regcache (thread, 0);
5960
5961 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5962 }
5963
5964 return tinfo;
5965 }
5966
5967 /* See to_disable_btrace target method. */
5968
5969 static int
5970 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5971 {
5972 enum btrace_error err;
5973
5974 err = linux_disable_btrace (tinfo);
5975 return (err == BTRACE_ERR_NONE ? 0 : -1);
5976 }
5977
5978 /* See to_read_btrace target method. */
5979
5980 static int
5981 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5982 int type)
5983 {
5984 VEC (btrace_block_s) *btrace;
5985 struct btrace_block *block;
5986 enum btrace_error err;
5987 int i;
5988
5989 btrace = NULL;
5990 err = linux_read_btrace (&btrace, tinfo, type);
5991 if (err != BTRACE_ERR_NONE)
5992 {
5993 if (err == BTRACE_ERR_OVERFLOW)
5994 buffer_grow_str0 (buffer, "E.Overflow.");
5995 else
5996 buffer_grow_str0 (buffer, "E.Generic Error.");
5997
5998 return -1;
5999 }
6000
6001 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6002 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6003
6004 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
6005 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6006 paddress (block->begin), paddress (block->end));
6007
6008 buffer_grow_str0 (buffer, "</btrace>\n");
6009
6010 VEC_free (btrace_block_s, btrace);
6011
6012 return 0;
6013 }
6014 #endif /* HAVE_LINUX_BTRACE */
6015
6016 static struct target_ops linux_target_ops = {
6017 linux_create_inferior,
6018 linux_attach,
6019 linux_kill,
6020 linux_detach,
6021 linux_mourn,
6022 linux_join,
6023 linux_thread_alive,
6024 linux_resume,
6025 linux_wait,
6026 linux_fetch_registers,
6027 linux_store_registers,
6028 linux_prepare_to_access_memory,
6029 linux_done_accessing_memory,
6030 linux_read_memory,
6031 linux_write_memory,
6032 linux_look_up_symbols,
6033 linux_request_interrupt,
6034 linux_read_auxv,
6035 linux_supports_z_point_type,
6036 linux_insert_point,
6037 linux_remove_point,
6038 linux_stopped_by_watchpoint,
6039 linux_stopped_data_address,
6040 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6041 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6042 && defined(PT_TEXT_END_ADDR)
6043 linux_read_offsets,
6044 #else
6045 NULL,
6046 #endif
6047 #ifdef USE_THREAD_DB
6048 thread_db_get_tls_address,
6049 #else
6050 NULL,
6051 #endif
6052 linux_qxfer_spu,
6053 hostio_last_error_from_errno,
6054 linux_qxfer_osdata,
6055 linux_xfer_siginfo,
6056 linux_supports_non_stop,
6057 linux_async,
6058 linux_start_non_stop,
6059 linux_supports_multi_process,
6060 #ifdef USE_THREAD_DB
6061 thread_db_handle_monitor_command,
6062 #else
6063 NULL,
6064 #endif
6065 linux_common_core_of_thread,
6066 linux_read_loadmap,
6067 linux_process_qsupported,
6068 linux_supports_tracepoints,
6069 linux_read_pc,
6070 linux_write_pc,
6071 linux_thread_stopped,
6072 NULL,
6073 linux_pause_all,
6074 linux_unpause_all,
6075 linux_cancel_breakpoints,
6076 linux_stabilize_threads,
6077 linux_install_fast_tracepoint_jump_pad,
6078 linux_emit_ops,
6079 linux_supports_disable_randomization,
6080 linux_get_min_fast_tracepoint_insn_len,
6081 linux_qxfer_libraries_svr4,
6082 linux_supports_agent,
6083 #ifdef HAVE_LINUX_BTRACE
6084 linux_supports_btrace,
6085 linux_low_enable_btrace,
6086 linux_low_disable_btrace,
6087 linux_low_read_btrace,
6088 #else
6089 NULL,
6090 NULL,
6091 NULL,
6092 NULL,
6093 #endif
6094 linux_supports_range_stepping,
6095 };
6096
6097 static void
6098 linux_init_signals ()
6099 {
6100 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6101 to find what the cancel signal actually is. */
6102 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6103 signal (__SIGRTMIN+1, SIG_IGN);
6104 #endif
6105 }
6106
6107 #ifdef HAVE_LINUX_REGSETS
6108 void
6109 initialize_regsets_info (struct regsets_info *info)
6110 {
6111 for (info->num_regsets = 0;
6112 info->regsets[info->num_regsets].size >= 0;
6113 info->num_regsets++)
6114 ;
6115 }
6116 #endif
6117
6118 void
6119 initialize_low (void)
6120 {
6121 struct sigaction sigchld_action;
6122 memset (&sigchld_action, 0, sizeof (sigchld_action));
6123 set_target_ops (&linux_target_ops);
6124 set_breakpoint_data (the_low_target.breakpoint,
6125 the_low_target.breakpoint_len);
6126 linux_init_signals ();
6127 linux_ptrace_init_warnings ();
6128
6129 sigchld_action.sa_handler = sigchld_handler;
6130 sigemptyset (&sigchld_action.sa_mask);
6131 sigchld_action.sa_flags = SA_RESTART;
6132 sigaction (SIGCHLD, &sigchld_action, NULL);
6133
6134 initialize_low_arch ();
6135 }