Linux: don't resume new LWPs until we've pulled all events out of the kernel
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 #endif
107
108 #ifndef HAVE_ELF32_AUXV_T
109 /* Copied from glibc's elf.h. */
110 typedef struct
111 {
112 uint32_t a_type; /* Entry type */
113 union
114 {
115 uint32_t a_val; /* Integer value */
116 /* We use to have pointer elements added here. We cannot do that,
117 though, since it does not work when using 32-bit definitions
118 on 64-bit platforms and vice versa. */
119 } a_un;
120 } Elf32_auxv_t;
121 #endif
122
123 #ifndef HAVE_ELF64_AUXV_T
124 /* Copied from glibc's elf.h. */
125 typedef struct
126 {
127 uint64_t a_type; /* Entry type */
128 union
129 {
130 uint64_t a_val; /* Integer value */
131 /* We use to have pointer elements added here. We cannot do that,
132 though, since it does not work when using 32-bit definitions
133 on 64-bit platforms and vice versa. */
134 } a_un;
135 } Elf64_auxv_t;
136 #endif
137
138 /* A list of all unknown processes which receive stop signals. Some
139 other process will presumably claim each of these as forked
140 children momentarily. */
141
142 struct simple_pid_list
143 {
144 /* The process ID. */
145 int pid;
146
147 /* The status as reported by waitpid. */
148 int status;
149
150 /* Next in chain. */
151 struct simple_pid_list *next;
152 };
153 struct simple_pid_list *stopped_pids;
154
155 /* Trivial list manipulation functions to keep track of a list of new
156 stopped processes. */
157
158 static void
159 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
160 {
161 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
162
163 new_pid->pid = pid;
164 new_pid->status = status;
165 new_pid->next = *listp;
166 *listp = new_pid;
167 }
168
169 static int
170 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
171 {
172 struct simple_pid_list **p;
173
174 for (p = listp; *p != NULL; p = &(*p)->next)
175 if ((*p)->pid == pid)
176 {
177 struct simple_pid_list *next = (*p)->next;
178
179 *statusp = (*p)->status;
180 xfree (*p);
181 *p = next;
182 return 1;
183 }
184 return 0;
185 }
186
187 enum stopping_threads_kind
188 {
189 /* Not stopping threads presently. */
190 NOT_STOPPING_THREADS,
191
192 /* Stopping threads. */
193 STOPPING_THREADS,
194
195 /* Stopping and suspending threads. */
196 STOPPING_AND_SUSPENDING_THREADS
197 };
198
199 /* This is set while stop_all_lwps is in effect. */
200 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
201
202 /* FIXME make into a target method? */
203 int using_threads = 1;
204
205 /* True if we're presently stabilizing threads (moving them out of
206 jump pads). */
207 static int stabilizing_threads;
208
209 static void linux_resume_one_lwp (struct lwp_info *lwp,
210 int step, int signal, siginfo_t *info);
211 static void linux_resume (struct thread_resume *resume_info, size_t n);
212 static void stop_all_lwps (int suspend, struct lwp_info *except);
213 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
214 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
215 int *wstat, int options);
216 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
217 static struct lwp_info *add_lwp (ptid_t ptid);
218 static int linux_stopped_by_watchpoint (void);
219 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
220 static void proceed_all_lwps (void);
221 static int finish_step_over (struct lwp_info *lwp);
222 static int kill_lwp (unsigned long lwpid, int signo);
223
224 /* When the event-loop is doing a step-over, this points at the thread
225 being stepped. */
226 ptid_t step_over_bkpt;
227
228 /* True if the low target can hardware single-step. Such targets
229 don't need a BREAKPOINT_REINSERT_ADDR callback. */
230
231 static int
232 can_hardware_single_step (void)
233 {
234 return (the_low_target.breakpoint_reinsert_addr == NULL);
235 }
236
237 /* True if the low target supports memory breakpoints. If so, we'll
238 have a GET_PC implementation. */
239
240 static int
241 supports_breakpoints (void)
242 {
243 return (the_low_target.get_pc != NULL);
244 }
245
246 /* Returns true if this target can support fast tracepoints. This
247 does not mean that the in-process agent has been loaded in the
248 inferior. */
249
250 static int
251 supports_fast_tracepoints (void)
252 {
253 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
254 }
255
256 /* True if LWP is stopped in its stepping range. */
257
258 static int
259 lwp_in_step_range (struct lwp_info *lwp)
260 {
261 CORE_ADDR pc = lwp->stop_pc;
262
263 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
264 }
265
266 struct pending_signals
267 {
268 int signal;
269 siginfo_t info;
270 struct pending_signals *prev;
271 };
272
273 /* The read/write ends of the pipe registered as waitable file in the
274 event loop. */
275 static int linux_event_pipe[2] = { -1, -1 };
276
277 /* True if we're currently in async mode. */
278 #define target_is_async_p() (linux_event_pipe[0] != -1)
279
280 static void send_sigstop (struct lwp_info *lwp);
281 static void wait_for_sigstop (void);
282
283 /* Return non-zero if HEADER is a 64-bit ELF file. */
284
285 static int
286 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
287 {
288 if (header->e_ident[EI_MAG0] == ELFMAG0
289 && header->e_ident[EI_MAG1] == ELFMAG1
290 && header->e_ident[EI_MAG2] == ELFMAG2
291 && header->e_ident[EI_MAG3] == ELFMAG3)
292 {
293 *machine = header->e_machine;
294 return header->e_ident[EI_CLASS] == ELFCLASS64;
295
296 }
297 *machine = EM_NONE;
298 return -1;
299 }
300
301 /* Return non-zero if FILE is a 64-bit ELF file,
302 zero if the file is not a 64-bit ELF file,
303 and -1 if the file is not accessible or doesn't exist. */
304
305 static int
306 elf_64_file_p (const char *file, unsigned int *machine)
307 {
308 Elf64_Ehdr header;
309 int fd;
310
311 fd = open (file, O_RDONLY);
312 if (fd < 0)
313 return -1;
314
315 if (read (fd, &header, sizeof (header)) != sizeof (header))
316 {
317 close (fd);
318 return 0;
319 }
320 close (fd);
321
322 return elf_64_header_p (&header, machine);
323 }
324
325 /* Accepts an integer PID; Returns true if the executable PID is
326 running is a 64-bit ELF file.. */
327
328 int
329 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
330 {
331 char file[PATH_MAX];
332
333 sprintf (file, "/proc/%d/exe", pid);
334 return elf_64_file_p (file, machine);
335 }
336
337 static void
338 delete_lwp (struct lwp_info *lwp)
339 {
340 struct thread_info *thr = get_lwp_thread (lwp);
341
342 if (debug_threads)
343 debug_printf ("deleting %ld\n", lwpid_of (thr));
344
345 remove_thread (thr);
346 free (lwp->arch_private);
347 free (lwp);
348 }
349
350 /* Add a process to the common process list, and set its private
351 data. */
352
353 static struct process_info *
354 linux_add_process (int pid, int attached)
355 {
356 struct process_info *proc;
357
358 proc = add_process (pid, attached);
359 proc->private = xcalloc (1, sizeof (*proc->private));
360
361 /* Set the arch when the first LWP stops. */
362 proc->private->new_inferior = 1;
363
364 if (the_low_target.new_process != NULL)
365 proc->private->arch_private = the_low_target.new_process ();
366
367 return proc;
368 }
369
370 static CORE_ADDR get_pc (struct lwp_info *lwp);
371
372 /* Handle a GNU/Linux extended wait response. If we see a clone
373 event, we need to add the new LWP to our list (and not report the
374 trap to higher layers). */
375
376 static void
377 handle_extended_wait (struct lwp_info *event_child, int wstat)
378 {
379 int event = linux_ptrace_get_extended_event (wstat);
380 struct thread_info *event_thr = get_lwp_thread (event_child);
381 struct lwp_info *new_lwp;
382
383 if (event == PTRACE_EVENT_CLONE)
384 {
385 ptid_t ptid;
386 unsigned long new_pid;
387 int ret, status;
388
389 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
390 &new_pid);
391
392 /* If we haven't already seen the new PID stop, wait for it now. */
393 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
394 {
395 /* The new child has a pending SIGSTOP. We can't affect it until it
396 hits the SIGSTOP, but we're already attached. */
397
398 ret = my_waitpid (new_pid, &status, __WALL);
399
400 if (ret == -1)
401 perror_with_name ("waiting for new child");
402 else if (ret != new_pid)
403 warning ("wait returned unexpected PID %d", ret);
404 else if (!WIFSTOPPED (status))
405 warning ("wait returned unexpected status 0x%x", status);
406 }
407
408 if (debug_threads)
409 debug_printf ("HEW: Got clone event "
410 "from LWP %ld, new child is LWP %ld\n",
411 lwpid_of (event_thr), new_pid);
412
413 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
414 new_lwp = add_lwp (ptid);
415
416 /* Either we're going to immediately resume the new thread
417 or leave it stopped. linux_resume_one_lwp is a nop if it
418 thinks the thread is currently running, so set this first
419 before calling linux_resume_one_lwp. */
420 new_lwp->stopped = 1;
421
422 /* If we're suspending all threads, leave this one suspended
423 too. */
424 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
425 new_lwp->suspended = 1;
426
427 /* Normally we will get the pending SIGSTOP. But in some cases
428 we might get another signal delivered to the group first.
429 If we do get another signal, be sure not to lose it. */
430 if (WSTOPSIG (status) != SIGSTOP)
431 {
432 new_lwp->stop_expected = 1;
433 new_lwp->status_pending_p = 1;
434 new_lwp->status_pending = status;
435 }
436 }
437 }
438
439 /* Return the PC as read from the regcache of LWP, without any
440 adjustment. */
441
442 static CORE_ADDR
443 get_pc (struct lwp_info *lwp)
444 {
445 struct thread_info *saved_thread;
446 struct regcache *regcache;
447 CORE_ADDR pc;
448
449 if (the_low_target.get_pc == NULL)
450 return 0;
451
452 saved_thread = current_thread;
453 current_thread = get_lwp_thread (lwp);
454
455 regcache = get_thread_regcache (current_thread, 1);
456 pc = (*the_low_target.get_pc) (regcache);
457
458 if (debug_threads)
459 debug_printf ("pc is 0x%lx\n", (long) pc);
460
461 current_thread = saved_thread;
462 return pc;
463 }
464
465 /* This function should only be called if LWP got a SIGTRAP.
466 The SIGTRAP could mean several things.
467
468 On i386, where decr_pc_after_break is non-zero:
469
470 If we were single-stepping this process using PTRACE_SINGLESTEP, we
471 will get only the one SIGTRAP. The value of $eip will be the next
472 instruction. If the instruction we stepped over was a breakpoint,
473 we need to decrement the PC.
474
475 If we continue the process using PTRACE_CONT, we will get a
476 SIGTRAP when we hit a breakpoint. The value of $eip will be
477 the instruction after the breakpoint (i.e. needs to be
478 decremented). If we report the SIGTRAP to GDB, we must also
479 report the undecremented PC. If the breakpoint is removed, we
480 must resume at the decremented PC.
481
482 On a non-decr_pc_after_break machine with hardware or kernel
483 single-step:
484
485 If we either single-step a breakpoint instruction, or continue and
486 hit a breakpoint instruction, our PC will point at the breakpoint
487 instruction. */
488
489 static int
490 check_stopped_by_breakpoint (struct lwp_info *lwp)
491 {
492 CORE_ADDR pc;
493 CORE_ADDR sw_breakpoint_pc;
494 struct thread_info *saved_thread;
495
496 if (the_low_target.get_pc == NULL)
497 return 0;
498
499 pc = get_pc (lwp);
500 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
501
502 /* breakpoint_at reads from the current thread. */
503 saved_thread = current_thread;
504 current_thread = get_lwp_thread (lwp);
505
506 /* We may have just stepped a breakpoint instruction. E.g., in
507 non-stop mode, GDB first tells the thread A to step a range, and
508 then the user inserts a breakpoint inside the range. In that
509 case, we need to report the breakpoint PC. But, when we're
510 trying to step past one of our own breakpoints, that happens to
511 have been placed on top of a permanent breakpoint instruction, we
512 shouldn't adjust the PC, otherwise the program would keep
513 trapping the permanent breakpoint forever. */
514 if ((!lwp->stepping
515 || (!ptid_equal (ptid_of (current_thread), step_over_bkpt)
516 && lwp->stop_pc == sw_breakpoint_pc))
517 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
518 {
519 if (debug_threads)
520 {
521 struct thread_info *thr = get_lwp_thread (lwp);
522
523 debug_printf ("CSBB: %s stopped by software breakpoint\n",
524 target_pid_to_str (ptid_of (thr)));
525 }
526
527 /* Back up the PC if necessary. */
528 if (pc != sw_breakpoint_pc)
529 {
530 struct regcache *regcache
531 = get_thread_regcache (current_thread, 1);
532 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
533 }
534
535 lwp->stop_pc = sw_breakpoint_pc;
536 lwp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
537 current_thread = saved_thread;
538 return 1;
539 }
540
541 if (hardware_breakpoint_inserted_here (pc))
542 {
543 if (debug_threads)
544 {
545 struct thread_info *thr = get_lwp_thread (lwp);
546
547 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
548 target_pid_to_str (ptid_of (thr)));
549 }
550
551 lwp->stop_pc = pc;
552 lwp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
553 current_thread = saved_thread;
554 return 1;
555 }
556
557 current_thread = saved_thread;
558 return 0;
559 }
560
561 static struct lwp_info *
562 add_lwp (ptid_t ptid)
563 {
564 struct lwp_info *lwp;
565
566 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
567 memset (lwp, 0, sizeof (*lwp));
568
569 if (the_low_target.new_thread != NULL)
570 lwp->arch_private = the_low_target.new_thread ();
571
572 lwp->thread = add_thread (ptid, lwp);
573
574 return lwp;
575 }
576
577 /* Start an inferior process and returns its pid.
578 ALLARGS is a vector of program-name and args. */
579
580 static int
581 linux_create_inferior (char *program, char **allargs)
582 {
583 struct lwp_info *new_lwp;
584 int pid;
585 ptid_t ptid;
586 struct cleanup *restore_personality
587 = maybe_disable_address_space_randomization (disable_randomization);
588
589 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
590 pid = vfork ();
591 #else
592 pid = fork ();
593 #endif
594 if (pid < 0)
595 perror_with_name ("fork");
596
597 if (pid == 0)
598 {
599 close_most_fds ();
600 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
601
602 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
603 signal (__SIGRTMIN + 1, SIG_DFL);
604 #endif
605
606 setpgid (0, 0);
607
608 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
609 stdout to stderr so that inferior i/o doesn't corrupt the connection.
610 Also, redirect stdin to /dev/null. */
611 if (remote_connection_is_stdio ())
612 {
613 close (0);
614 open ("/dev/null", O_RDONLY);
615 dup2 (2, 1);
616 if (write (2, "stdin/stdout redirected\n",
617 sizeof ("stdin/stdout redirected\n") - 1) < 0)
618 {
619 /* Errors ignored. */;
620 }
621 }
622
623 execv (program, allargs);
624 if (errno == ENOENT)
625 execvp (program, allargs);
626
627 fprintf (stderr, "Cannot exec %s: %s.\n", program,
628 strerror (errno));
629 fflush (stderr);
630 _exit (0177);
631 }
632
633 do_cleanups (restore_personality);
634
635 linux_add_process (pid, 0);
636
637 ptid = ptid_build (pid, pid, 0);
638 new_lwp = add_lwp (ptid);
639 new_lwp->must_set_ptrace_flags = 1;
640
641 return pid;
642 }
643
644 /* Attach to an inferior process. Returns 0 on success, ERRNO on
645 error. */
646
647 int
648 linux_attach_lwp (ptid_t ptid)
649 {
650 struct lwp_info *new_lwp;
651 int lwpid = ptid_get_lwp (ptid);
652
653 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
654 != 0)
655 return errno;
656
657 new_lwp = add_lwp (ptid);
658
659 /* We need to wait for SIGSTOP before being able to make the next
660 ptrace call on this LWP. */
661 new_lwp->must_set_ptrace_flags = 1;
662
663 if (linux_proc_pid_is_stopped (lwpid))
664 {
665 if (debug_threads)
666 debug_printf ("Attached to a stopped process\n");
667
668 /* The process is definitely stopped. It is in a job control
669 stop, unless the kernel predates the TASK_STOPPED /
670 TASK_TRACED distinction, in which case it might be in a
671 ptrace stop. Make sure it is in a ptrace stop; from there we
672 can kill it, signal it, et cetera.
673
674 First make sure there is a pending SIGSTOP. Since we are
675 already attached, the process can not transition from stopped
676 to running without a PTRACE_CONT; so we know this signal will
677 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
678 probably already in the queue (unless this kernel is old
679 enough to use TASK_STOPPED for ptrace stops); but since
680 SIGSTOP is not an RT signal, it can only be queued once. */
681 kill_lwp (lwpid, SIGSTOP);
682
683 /* Finally, resume the stopped process. This will deliver the
684 SIGSTOP (or a higher priority signal, just like normal
685 PTRACE_ATTACH), which we'll catch later on. */
686 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
687 }
688
689 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
690 brings it to a halt.
691
692 There are several cases to consider here:
693
694 1) gdbserver has already attached to the process and is being notified
695 of a new thread that is being created.
696 In this case we should ignore that SIGSTOP and resume the
697 process. This is handled below by setting stop_expected = 1,
698 and the fact that add_thread sets last_resume_kind ==
699 resume_continue.
700
701 2) This is the first thread (the process thread), and we're attaching
702 to it via attach_inferior.
703 In this case we want the process thread to stop.
704 This is handled by having linux_attach set last_resume_kind ==
705 resume_stop after we return.
706
707 If the pid we are attaching to is also the tgid, we attach to and
708 stop all the existing threads. Otherwise, we attach to pid and
709 ignore any other threads in the same group as this pid.
710
711 3) GDB is connecting to gdbserver and is requesting an enumeration of all
712 existing threads.
713 In this case we want the thread to stop.
714 FIXME: This case is currently not properly handled.
715 We should wait for the SIGSTOP but don't. Things work apparently
716 because enough time passes between when we ptrace (ATTACH) and when
717 gdb makes the next ptrace call on the thread.
718
719 On the other hand, if we are currently trying to stop all threads, we
720 should treat the new thread as if we had sent it a SIGSTOP. This works
721 because we are guaranteed that the add_lwp call above added us to the
722 end of the list, and so the new thread has not yet reached
723 wait_for_sigstop (but will). */
724 new_lwp->stop_expected = 1;
725
726 return 0;
727 }
728
729 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
730 already attached. Returns true if a new LWP is found, false
731 otherwise. */
732
733 static int
734 attach_proc_task_lwp_callback (ptid_t ptid)
735 {
736 /* Is this a new thread? */
737 if (find_thread_ptid (ptid) == NULL)
738 {
739 int lwpid = ptid_get_lwp (ptid);
740 int err;
741
742 if (debug_threads)
743 debug_printf ("Found new lwp %d\n", lwpid);
744
745 err = linux_attach_lwp (ptid);
746
747 /* Be quiet if we simply raced with the thread exiting. EPERM
748 is returned if the thread's task still exists, and is marked
749 as exited or zombie, as well as other conditions, so in that
750 case, confirm the status in /proc/PID/status. */
751 if (err == ESRCH
752 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
753 {
754 if (debug_threads)
755 {
756 debug_printf ("Cannot attach to lwp %d: "
757 "thread is gone (%d: %s)\n",
758 lwpid, err, strerror (err));
759 }
760 }
761 else if (err != 0)
762 {
763 warning (_("Cannot attach to lwp %d: %s"),
764 lwpid,
765 linux_ptrace_attach_fail_reason_string (ptid, err));
766 }
767
768 return 1;
769 }
770 return 0;
771 }
772
773 /* Attach to PID. If PID is the tgid, attach to it and all
774 of its threads. */
775
776 static int
777 linux_attach (unsigned long pid)
778 {
779 ptid_t ptid = ptid_build (pid, pid, 0);
780 int err;
781
782 /* Attach to PID. We will check for other threads
783 soon. */
784 err = linux_attach_lwp (ptid);
785 if (err != 0)
786 error ("Cannot attach to process %ld: %s",
787 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
788
789 linux_add_process (pid, 1);
790
791 if (!non_stop)
792 {
793 struct thread_info *thread;
794
795 /* Don't ignore the initial SIGSTOP if we just attached to this
796 process. It will be collected by wait shortly. */
797 thread = find_thread_ptid (ptid_build (pid, pid, 0));
798 thread->last_resume_kind = resume_stop;
799 }
800
801 /* We must attach to every LWP. If /proc is mounted, use that to
802 find them now. On the one hand, the inferior may be using raw
803 clone instead of using pthreads. On the other hand, even if it
804 is using pthreads, GDB may not be connected yet (thread_db needs
805 to do symbol lookups, through qSymbol). Also, thread_db walks
806 structures in the inferior's address space to find the list of
807 threads/LWPs, and those structures may well be corrupted. Note
808 that once thread_db is loaded, we'll still use it to list threads
809 and associate pthread info with each LWP. */
810 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
811 return 0;
812 }
813
814 struct counter
815 {
816 int pid;
817 int count;
818 };
819
820 static int
821 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
822 {
823 struct counter *counter = args;
824
825 if (ptid_get_pid (entry->id) == counter->pid)
826 {
827 if (++counter->count > 1)
828 return 1;
829 }
830
831 return 0;
832 }
833
834 static int
835 last_thread_of_process_p (int pid)
836 {
837 struct counter counter = { pid , 0 };
838
839 return (find_inferior (&all_threads,
840 second_thread_of_pid_p, &counter) == NULL);
841 }
842
843 /* Kill LWP. */
844
845 static void
846 linux_kill_one_lwp (struct lwp_info *lwp)
847 {
848 struct thread_info *thr = get_lwp_thread (lwp);
849 int pid = lwpid_of (thr);
850
851 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
852 there is no signal context, and ptrace(PTRACE_KILL) (or
853 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
854 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
855 alternative is to kill with SIGKILL. We only need one SIGKILL
856 per process, not one for each thread. But since we still support
857 linuxthreads, and we also support debugging programs using raw
858 clone without CLONE_THREAD, we send one for each thread. For
859 years, we used PTRACE_KILL only, so we're being a bit paranoid
860 about some old kernels where PTRACE_KILL might work better
861 (dubious if there are any such, but that's why it's paranoia), so
862 we try SIGKILL first, PTRACE_KILL second, and so we're fine
863 everywhere. */
864
865 errno = 0;
866 kill_lwp (pid, SIGKILL);
867 if (debug_threads)
868 {
869 int save_errno = errno;
870
871 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
872 target_pid_to_str (ptid_of (thr)),
873 save_errno ? strerror (save_errno) : "OK");
874 }
875
876 errno = 0;
877 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
878 if (debug_threads)
879 {
880 int save_errno = errno;
881
882 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
883 target_pid_to_str (ptid_of (thr)),
884 save_errno ? strerror (save_errno) : "OK");
885 }
886 }
887
888 /* Kill LWP and wait for it to die. */
889
890 static void
891 kill_wait_lwp (struct lwp_info *lwp)
892 {
893 struct thread_info *thr = get_lwp_thread (lwp);
894 int pid = ptid_get_pid (ptid_of (thr));
895 int lwpid = ptid_get_lwp (ptid_of (thr));
896 int wstat;
897 int res;
898
899 if (debug_threads)
900 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
901
902 do
903 {
904 linux_kill_one_lwp (lwp);
905
906 /* Make sure it died. Notes:
907
908 - The loop is most likely unnecessary.
909
910 - We don't use linux_wait_for_event as that could delete lwps
911 while we're iterating over them. We're not interested in
912 any pending status at this point, only in making sure all
913 wait status on the kernel side are collected until the
914 process is reaped.
915
916 - We don't use __WALL here as the __WALL emulation relies on
917 SIGCHLD, and killing a stopped process doesn't generate
918 one, nor an exit status.
919 */
920 res = my_waitpid (lwpid, &wstat, 0);
921 if (res == -1 && errno == ECHILD)
922 res = my_waitpid (lwpid, &wstat, __WCLONE);
923 } while (res > 0 && WIFSTOPPED (wstat));
924
925 gdb_assert (res > 0);
926 }
927
928 /* Callback for `find_inferior'. Kills an lwp of a given process,
929 except the leader. */
930
931 static int
932 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
933 {
934 struct thread_info *thread = (struct thread_info *) entry;
935 struct lwp_info *lwp = get_thread_lwp (thread);
936 int pid = * (int *) args;
937
938 if (ptid_get_pid (entry->id) != pid)
939 return 0;
940
941 /* We avoid killing the first thread here, because of a Linux kernel (at
942 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
943 the children get a chance to be reaped, it will remain a zombie
944 forever. */
945
946 if (lwpid_of (thread) == pid)
947 {
948 if (debug_threads)
949 debug_printf ("lkop: is last of process %s\n",
950 target_pid_to_str (entry->id));
951 return 0;
952 }
953
954 kill_wait_lwp (lwp);
955 return 0;
956 }
957
958 static int
959 linux_kill (int pid)
960 {
961 struct process_info *process;
962 struct lwp_info *lwp;
963
964 process = find_process_pid (pid);
965 if (process == NULL)
966 return -1;
967
968 /* If we're killing a running inferior, make sure it is stopped
969 first, as PTRACE_KILL will not work otherwise. */
970 stop_all_lwps (0, NULL);
971
972 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
973
974 /* See the comment in linux_kill_one_lwp. We did not kill the first
975 thread in the list, so do so now. */
976 lwp = find_lwp_pid (pid_to_ptid (pid));
977
978 if (lwp == NULL)
979 {
980 if (debug_threads)
981 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
982 pid);
983 }
984 else
985 kill_wait_lwp (lwp);
986
987 the_target->mourn (process);
988
989 /* Since we presently can only stop all lwps of all processes, we
990 need to unstop lwps of other processes. */
991 unstop_all_lwps (0, NULL);
992 return 0;
993 }
994
995 /* Get pending signal of THREAD, for detaching purposes. This is the
996 signal the thread last stopped for, which we need to deliver to the
997 thread when detaching, otherwise, it'd be suppressed/lost. */
998
999 static int
1000 get_detach_signal (struct thread_info *thread)
1001 {
1002 enum gdb_signal signo = GDB_SIGNAL_0;
1003 int status;
1004 struct lwp_info *lp = get_thread_lwp (thread);
1005
1006 if (lp->status_pending_p)
1007 status = lp->status_pending;
1008 else
1009 {
1010 /* If the thread had been suspended by gdbserver, and it stopped
1011 cleanly, then it'll have stopped with SIGSTOP. But we don't
1012 want to deliver that SIGSTOP. */
1013 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1014 || thread->last_status.value.sig == GDB_SIGNAL_0)
1015 return 0;
1016
1017 /* Otherwise, we may need to deliver the signal we
1018 intercepted. */
1019 status = lp->last_status;
1020 }
1021
1022 if (!WIFSTOPPED (status))
1023 {
1024 if (debug_threads)
1025 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1026 target_pid_to_str (ptid_of (thread)));
1027 return 0;
1028 }
1029
1030 /* Extended wait statuses aren't real SIGTRAPs. */
1031 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1032 {
1033 if (debug_threads)
1034 debug_printf ("GPS: lwp %s had stopped with extended "
1035 "status: no pending signal\n",
1036 target_pid_to_str (ptid_of (thread)));
1037 return 0;
1038 }
1039
1040 signo = gdb_signal_from_host (WSTOPSIG (status));
1041
1042 if (program_signals_p && !program_signals[signo])
1043 {
1044 if (debug_threads)
1045 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1046 target_pid_to_str (ptid_of (thread)),
1047 gdb_signal_to_string (signo));
1048 return 0;
1049 }
1050 else if (!program_signals_p
1051 /* If we have no way to know which signals GDB does not
1052 want to have passed to the program, assume
1053 SIGTRAP/SIGINT, which is GDB's default. */
1054 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1055 {
1056 if (debug_threads)
1057 debug_printf ("GPS: lwp %s had signal %s, "
1058 "but we don't know if we should pass it. "
1059 "Default to not.\n",
1060 target_pid_to_str (ptid_of (thread)),
1061 gdb_signal_to_string (signo));
1062 return 0;
1063 }
1064 else
1065 {
1066 if (debug_threads)
1067 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1068 target_pid_to_str (ptid_of (thread)),
1069 gdb_signal_to_string (signo));
1070
1071 return WSTOPSIG (status);
1072 }
1073 }
1074
1075 static int
1076 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1077 {
1078 struct thread_info *thread = (struct thread_info *) entry;
1079 struct lwp_info *lwp = get_thread_lwp (thread);
1080 int pid = * (int *) args;
1081 int sig;
1082
1083 if (ptid_get_pid (entry->id) != pid)
1084 return 0;
1085
1086 /* If there is a pending SIGSTOP, get rid of it. */
1087 if (lwp->stop_expected)
1088 {
1089 if (debug_threads)
1090 debug_printf ("Sending SIGCONT to %s\n",
1091 target_pid_to_str (ptid_of (thread)));
1092
1093 kill_lwp (lwpid_of (thread), SIGCONT);
1094 lwp->stop_expected = 0;
1095 }
1096
1097 /* Flush any pending changes to the process's registers. */
1098 regcache_invalidate_thread (thread);
1099
1100 /* Pass on any pending signal for this thread. */
1101 sig = get_detach_signal (thread);
1102
1103 /* Finally, let it resume. */
1104 if (the_low_target.prepare_to_resume != NULL)
1105 the_low_target.prepare_to_resume (lwp);
1106 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1107 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1108 error (_("Can't detach %s: %s"),
1109 target_pid_to_str (ptid_of (thread)),
1110 strerror (errno));
1111
1112 delete_lwp (lwp);
1113 return 0;
1114 }
1115
1116 static int
1117 linux_detach (int pid)
1118 {
1119 struct process_info *process;
1120
1121 process = find_process_pid (pid);
1122 if (process == NULL)
1123 return -1;
1124
1125 /* Stop all threads before detaching. First, ptrace requires that
1126 the thread is stopped to sucessfully detach. Second, thread_db
1127 may need to uninstall thread event breakpoints from memory, which
1128 only works with a stopped process anyway. */
1129 stop_all_lwps (0, NULL);
1130
1131 #ifdef USE_THREAD_DB
1132 thread_db_detach (process);
1133 #endif
1134
1135 /* Stabilize threads (move out of jump pads). */
1136 stabilize_threads ();
1137
1138 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1139
1140 the_target->mourn (process);
1141
1142 /* Since we presently can only stop all lwps of all processes, we
1143 need to unstop lwps of other processes. */
1144 unstop_all_lwps (0, NULL);
1145 return 0;
1146 }
1147
1148 /* Remove all LWPs that belong to process PROC from the lwp list. */
1149
1150 static int
1151 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1152 {
1153 struct thread_info *thread = (struct thread_info *) entry;
1154 struct lwp_info *lwp = get_thread_lwp (thread);
1155 struct process_info *process = proc;
1156
1157 if (pid_of (thread) == pid_of (process))
1158 delete_lwp (lwp);
1159
1160 return 0;
1161 }
1162
1163 static void
1164 linux_mourn (struct process_info *process)
1165 {
1166 struct process_info_private *priv;
1167
1168 #ifdef USE_THREAD_DB
1169 thread_db_mourn (process);
1170 #endif
1171
1172 find_inferior (&all_threads, delete_lwp_callback, process);
1173
1174 /* Freeing all private data. */
1175 priv = process->private;
1176 free (priv->arch_private);
1177 free (priv);
1178 process->private = NULL;
1179
1180 remove_process (process);
1181 }
1182
1183 static void
1184 linux_join (int pid)
1185 {
1186 int status, ret;
1187
1188 do {
1189 ret = my_waitpid (pid, &status, 0);
1190 if (WIFEXITED (status) || WIFSIGNALED (status))
1191 break;
1192 } while (ret != -1 || errno != ECHILD);
1193 }
1194
1195 /* Return nonzero if the given thread is still alive. */
1196 static int
1197 linux_thread_alive (ptid_t ptid)
1198 {
1199 struct lwp_info *lwp = find_lwp_pid (ptid);
1200
1201 /* We assume we always know if a thread exits. If a whole process
1202 exited but we still haven't been able to report it to GDB, we'll
1203 hold on to the last lwp of the dead process. */
1204 if (lwp != NULL)
1205 return !lwp->dead;
1206 else
1207 return 0;
1208 }
1209
1210 /* Return 1 if this lwp still has an interesting status pending. If
1211 not (e.g., it had stopped for a breakpoint that is gone), return
1212 false. */
1213
1214 static int
1215 thread_still_has_status_pending_p (struct thread_info *thread)
1216 {
1217 struct lwp_info *lp = get_thread_lwp (thread);
1218
1219 if (!lp->status_pending_p)
1220 return 0;
1221
1222 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1223 report any status pending the LWP may have. */
1224 if (thread->last_resume_kind == resume_stop
1225 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1226 return 0;
1227
1228 if (thread->last_resume_kind != resume_stop
1229 && (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1230 || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT))
1231 {
1232 struct thread_info *saved_thread;
1233 CORE_ADDR pc;
1234 int discard = 0;
1235
1236 gdb_assert (lp->last_status != 0);
1237
1238 pc = get_pc (lp);
1239
1240 saved_thread = current_thread;
1241 current_thread = thread;
1242
1243 if (pc != lp->stop_pc)
1244 {
1245 if (debug_threads)
1246 debug_printf ("PC of %ld changed\n",
1247 lwpid_of (thread));
1248 discard = 1;
1249 }
1250 else if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1251 && !(*the_low_target.breakpoint_at) (pc))
1252 {
1253 if (debug_threads)
1254 debug_printf ("previous SW breakpoint of %ld gone\n",
1255 lwpid_of (thread));
1256 discard = 1;
1257 }
1258 else if (lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT
1259 && !hardware_breakpoint_inserted_here (pc))
1260 {
1261 if (debug_threads)
1262 debug_printf ("previous HW breakpoint of %ld gone\n",
1263 lwpid_of (thread));
1264 discard = 1;
1265 }
1266
1267 current_thread = saved_thread;
1268
1269 if (discard)
1270 {
1271 if (debug_threads)
1272 debug_printf ("discarding pending breakpoint status\n");
1273 lp->status_pending_p = 0;
1274 return 0;
1275 }
1276 }
1277
1278 return 1;
1279 }
1280
1281 /* Return 1 if this lwp has an interesting status pending. */
1282 static int
1283 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1284 {
1285 struct thread_info *thread = (struct thread_info *) entry;
1286 struct lwp_info *lp = get_thread_lwp (thread);
1287 ptid_t ptid = * (ptid_t *) arg;
1288
1289 /* Check if we're only interested in events from a specific process
1290 or its lwps. */
1291 if (!ptid_equal (minus_one_ptid, ptid)
1292 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1293 return 0;
1294
1295 if (lp->status_pending_p
1296 && !thread_still_has_status_pending_p (thread))
1297 {
1298 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1299 return 0;
1300 }
1301
1302 return lp->status_pending_p;
1303 }
1304
1305 static int
1306 same_lwp (struct inferior_list_entry *entry, void *data)
1307 {
1308 ptid_t ptid = *(ptid_t *) data;
1309 int lwp;
1310
1311 if (ptid_get_lwp (ptid) != 0)
1312 lwp = ptid_get_lwp (ptid);
1313 else
1314 lwp = ptid_get_pid (ptid);
1315
1316 if (ptid_get_lwp (entry->id) == lwp)
1317 return 1;
1318
1319 return 0;
1320 }
1321
1322 struct lwp_info *
1323 find_lwp_pid (ptid_t ptid)
1324 {
1325 struct inferior_list_entry *thread
1326 = find_inferior (&all_threads, same_lwp, &ptid);
1327
1328 if (thread == NULL)
1329 return NULL;
1330
1331 return get_thread_lwp ((struct thread_info *) thread);
1332 }
1333
1334 /* Return the number of known LWPs in the tgid given by PID. */
1335
1336 static int
1337 num_lwps (int pid)
1338 {
1339 struct inferior_list_entry *inf, *tmp;
1340 int count = 0;
1341
1342 ALL_INFERIORS (&all_threads, inf, tmp)
1343 {
1344 if (ptid_get_pid (inf->id) == pid)
1345 count++;
1346 }
1347
1348 return count;
1349 }
1350
1351 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1352 their exits until all other threads in the group have exited. */
1353
1354 static void
1355 check_zombie_leaders (void)
1356 {
1357 struct process_info *proc, *tmp;
1358
1359 ALL_PROCESSES (proc, tmp)
1360 {
1361 pid_t leader_pid = pid_of (proc);
1362 struct lwp_info *leader_lp;
1363
1364 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1365
1366 if (debug_threads)
1367 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1368 "num_lwps=%d, zombie=%d\n",
1369 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1370 linux_proc_pid_is_zombie (leader_pid));
1371
1372 if (leader_lp != NULL
1373 /* Check if there are other threads in the group, as we may
1374 have raced with the inferior simply exiting. */
1375 && !last_thread_of_process_p (leader_pid)
1376 && linux_proc_pid_is_zombie (leader_pid))
1377 {
1378 /* A leader zombie can mean one of two things:
1379
1380 - It exited, and there's an exit status pending
1381 available, or only the leader exited (not the whole
1382 program). In the latter case, we can't waitpid the
1383 leader's exit status until all other threads are gone.
1384
1385 - There are 3 or more threads in the group, and a thread
1386 other than the leader exec'd. On an exec, the Linux
1387 kernel destroys all other threads (except the execing
1388 one) in the thread group, and resets the execing thread's
1389 tid to the tgid. No exit notification is sent for the
1390 execing thread -- from the ptracer's perspective, it
1391 appears as though the execing thread just vanishes.
1392 Until we reap all other threads except the leader and the
1393 execing thread, the leader will be zombie, and the
1394 execing thread will be in `D (disc sleep)'. As soon as
1395 all other threads are reaped, the execing thread changes
1396 it's tid to the tgid, and the previous (zombie) leader
1397 vanishes, giving place to the "new" leader. We could try
1398 distinguishing the exit and exec cases, by waiting once
1399 more, and seeing if something comes out, but it doesn't
1400 sound useful. The previous leader _does_ go away, and
1401 we'll re-add the new one once we see the exec event
1402 (which is just the same as what would happen if the
1403 previous leader did exit voluntarily before some other
1404 thread execs). */
1405
1406 if (debug_threads)
1407 fprintf (stderr,
1408 "CZL: Thread group leader %d zombie "
1409 "(it exited, or another thread execd).\n",
1410 leader_pid);
1411
1412 delete_lwp (leader_lp);
1413 }
1414 }
1415 }
1416
1417 /* Callback for `find_inferior'. Returns the first LWP that is not
1418 stopped. ARG is a PTID filter. */
1419
1420 static int
1421 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1422 {
1423 struct thread_info *thr = (struct thread_info *) entry;
1424 struct lwp_info *lwp;
1425 ptid_t filter = *(ptid_t *) arg;
1426
1427 if (!ptid_match (ptid_of (thr), filter))
1428 return 0;
1429
1430 lwp = get_thread_lwp (thr);
1431 if (!lwp->stopped)
1432 return 1;
1433
1434 return 0;
1435 }
1436
1437 /* This function should only be called if the LWP got a SIGTRAP.
1438
1439 Handle any tracepoint steps or hits. Return true if a tracepoint
1440 event was handled, 0 otherwise. */
1441
1442 static int
1443 handle_tracepoints (struct lwp_info *lwp)
1444 {
1445 struct thread_info *tinfo = get_lwp_thread (lwp);
1446 int tpoint_related_event = 0;
1447
1448 gdb_assert (lwp->suspended == 0);
1449
1450 /* If this tracepoint hit causes a tracing stop, we'll immediately
1451 uninsert tracepoints. To do this, we temporarily pause all
1452 threads, unpatch away, and then unpause threads. We need to make
1453 sure the unpausing doesn't resume LWP too. */
1454 lwp->suspended++;
1455
1456 /* And we need to be sure that any all-threads-stopping doesn't try
1457 to move threads out of the jump pads, as it could deadlock the
1458 inferior (LWP could be in the jump pad, maybe even holding the
1459 lock.) */
1460
1461 /* Do any necessary step collect actions. */
1462 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1463
1464 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1465
1466 /* See if we just hit a tracepoint and do its main collect
1467 actions. */
1468 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1469
1470 lwp->suspended--;
1471
1472 gdb_assert (lwp->suspended == 0);
1473 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1474
1475 if (tpoint_related_event)
1476 {
1477 if (debug_threads)
1478 debug_printf ("got a tracepoint event\n");
1479 return 1;
1480 }
1481
1482 return 0;
1483 }
1484
1485 /* Convenience wrapper. Returns true if LWP is presently collecting a
1486 fast tracepoint. */
1487
1488 static int
1489 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1490 struct fast_tpoint_collect_status *status)
1491 {
1492 CORE_ADDR thread_area;
1493 struct thread_info *thread = get_lwp_thread (lwp);
1494
1495 if (the_low_target.get_thread_area == NULL)
1496 return 0;
1497
1498 /* Get the thread area address. This is used to recognize which
1499 thread is which when tracing with the in-process agent library.
1500 We don't read anything from the address, and treat it as opaque;
1501 it's the address itself that we assume is unique per-thread. */
1502 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1503 return 0;
1504
1505 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1506 }
1507
1508 /* The reason we resume in the caller, is because we want to be able
1509 to pass lwp->status_pending as WSTAT, and we need to clear
1510 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1511 refuses to resume. */
1512
1513 static int
1514 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1515 {
1516 struct thread_info *saved_thread;
1517
1518 saved_thread = current_thread;
1519 current_thread = get_lwp_thread (lwp);
1520
1521 if ((wstat == NULL
1522 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1523 && supports_fast_tracepoints ()
1524 && agent_loaded_p ())
1525 {
1526 struct fast_tpoint_collect_status status;
1527 int r;
1528
1529 if (debug_threads)
1530 debug_printf ("Checking whether LWP %ld needs to move out of the "
1531 "jump pad.\n",
1532 lwpid_of (current_thread));
1533
1534 r = linux_fast_tracepoint_collecting (lwp, &status);
1535
1536 if (wstat == NULL
1537 || (WSTOPSIG (*wstat) != SIGILL
1538 && WSTOPSIG (*wstat) != SIGFPE
1539 && WSTOPSIG (*wstat) != SIGSEGV
1540 && WSTOPSIG (*wstat) != SIGBUS))
1541 {
1542 lwp->collecting_fast_tracepoint = r;
1543
1544 if (r != 0)
1545 {
1546 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1547 {
1548 /* Haven't executed the original instruction yet.
1549 Set breakpoint there, and wait till it's hit,
1550 then single-step until exiting the jump pad. */
1551 lwp->exit_jump_pad_bkpt
1552 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1553 }
1554
1555 if (debug_threads)
1556 debug_printf ("Checking whether LWP %ld needs to move out of "
1557 "the jump pad...it does\n",
1558 lwpid_of (current_thread));
1559 current_thread = saved_thread;
1560
1561 return 1;
1562 }
1563 }
1564 else
1565 {
1566 /* If we get a synchronous signal while collecting, *and*
1567 while executing the (relocated) original instruction,
1568 reset the PC to point at the tpoint address, before
1569 reporting to GDB. Otherwise, it's an IPA lib bug: just
1570 report the signal to GDB, and pray for the best. */
1571
1572 lwp->collecting_fast_tracepoint = 0;
1573
1574 if (r != 0
1575 && (status.adjusted_insn_addr <= lwp->stop_pc
1576 && lwp->stop_pc < status.adjusted_insn_addr_end))
1577 {
1578 siginfo_t info;
1579 struct regcache *regcache;
1580
1581 /* The si_addr on a few signals references the address
1582 of the faulting instruction. Adjust that as
1583 well. */
1584 if ((WSTOPSIG (*wstat) == SIGILL
1585 || WSTOPSIG (*wstat) == SIGFPE
1586 || WSTOPSIG (*wstat) == SIGBUS
1587 || WSTOPSIG (*wstat) == SIGSEGV)
1588 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1589 (PTRACE_TYPE_ARG3) 0, &info) == 0
1590 /* Final check just to make sure we don't clobber
1591 the siginfo of non-kernel-sent signals. */
1592 && (uintptr_t) info.si_addr == lwp->stop_pc)
1593 {
1594 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1595 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1596 (PTRACE_TYPE_ARG3) 0, &info);
1597 }
1598
1599 regcache = get_thread_regcache (current_thread, 1);
1600 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1601 lwp->stop_pc = status.tpoint_addr;
1602
1603 /* Cancel any fast tracepoint lock this thread was
1604 holding. */
1605 force_unlock_trace_buffer ();
1606 }
1607
1608 if (lwp->exit_jump_pad_bkpt != NULL)
1609 {
1610 if (debug_threads)
1611 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1612 "stopping all threads momentarily.\n");
1613
1614 stop_all_lwps (1, lwp);
1615
1616 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1617 lwp->exit_jump_pad_bkpt = NULL;
1618
1619 unstop_all_lwps (1, lwp);
1620
1621 gdb_assert (lwp->suspended >= 0);
1622 }
1623 }
1624 }
1625
1626 if (debug_threads)
1627 debug_printf ("Checking whether LWP %ld needs to move out of the "
1628 "jump pad...no\n",
1629 lwpid_of (current_thread));
1630
1631 current_thread = saved_thread;
1632 return 0;
1633 }
1634
1635 /* Enqueue one signal in the "signals to report later when out of the
1636 jump pad" list. */
1637
1638 static void
1639 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1640 {
1641 struct pending_signals *p_sig;
1642 struct thread_info *thread = get_lwp_thread (lwp);
1643
1644 if (debug_threads)
1645 debug_printf ("Deferring signal %d for LWP %ld.\n",
1646 WSTOPSIG (*wstat), lwpid_of (thread));
1647
1648 if (debug_threads)
1649 {
1650 struct pending_signals *sig;
1651
1652 for (sig = lwp->pending_signals_to_report;
1653 sig != NULL;
1654 sig = sig->prev)
1655 debug_printf (" Already queued %d\n",
1656 sig->signal);
1657
1658 debug_printf (" (no more currently queued signals)\n");
1659 }
1660
1661 /* Don't enqueue non-RT signals if they are already in the deferred
1662 queue. (SIGSTOP being the easiest signal to see ending up here
1663 twice) */
1664 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1665 {
1666 struct pending_signals *sig;
1667
1668 for (sig = lwp->pending_signals_to_report;
1669 sig != NULL;
1670 sig = sig->prev)
1671 {
1672 if (sig->signal == WSTOPSIG (*wstat))
1673 {
1674 if (debug_threads)
1675 debug_printf ("Not requeuing already queued non-RT signal %d"
1676 " for LWP %ld\n",
1677 sig->signal,
1678 lwpid_of (thread));
1679 return;
1680 }
1681 }
1682 }
1683
1684 p_sig = xmalloc (sizeof (*p_sig));
1685 p_sig->prev = lwp->pending_signals_to_report;
1686 p_sig->signal = WSTOPSIG (*wstat);
1687 memset (&p_sig->info, 0, sizeof (siginfo_t));
1688 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1689 &p_sig->info);
1690
1691 lwp->pending_signals_to_report = p_sig;
1692 }
1693
1694 /* Dequeue one signal from the "signals to report later when out of
1695 the jump pad" list. */
1696
1697 static int
1698 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1699 {
1700 struct thread_info *thread = get_lwp_thread (lwp);
1701
1702 if (lwp->pending_signals_to_report != NULL)
1703 {
1704 struct pending_signals **p_sig;
1705
1706 p_sig = &lwp->pending_signals_to_report;
1707 while ((*p_sig)->prev != NULL)
1708 p_sig = &(*p_sig)->prev;
1709
1710 *wstat = W_STOPCODE ((*p_sig)->signal);
1711 if ((*p_sig)->info.si_signo != 0)
1712 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1713 &(*p_sig)->info);
1714 free (*p_sig);
1715 *p_sig = NULL;
1716
1717 if (debug_threads)
1718 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1719 WSTOPSIG (*wstat), lwpid_of (thread));
1720
1721 if (debug_threads)
1722 {
1723 struct pending_signals *sig;
1724
1725 for (sig = lwp->pending_signals_to_report;
1726 sig != NULL;
1727 sig = sig->prev)
1728 debug_printf (" Still queued %d\n",
1729 sig->signal);
1730
1731 debug_printf (" (no more queued signals)\n");
1732 }
1733
1734 return 1;
1735 }
1736
1737 return 0;
1738 }
1739
1740 /* Return true if the event in LP may be caused by breakpoint. */
1741
1742 static int
1743 wstatus_maybe_breakpoint (int wstatus)
1744 {
1745 return (WIFSTOPPED (wstatus)
1746 && (WSTOPSIG (wstatus) == SIGTRAP
1747 /* SIGILL and SIGSEGV are also treated as traps in case a
1748 breakpoint is inserted at the current PC. */
1749 || WSTOPSIG (wstatus) == SIGILL
1750 || WSTOPSIG (wstatus) == SIGSEGV));
1751 }
1752
1753 /* Fetch the possibly triggered data watchpoint info and store it in
1754 CHILD.
1755
1756 On some archs, like x86, that use debug registers to set
1757 watchpoints, it's possible that the way to know which watched
1758 address trapped, is to check the register that is used to select
1759 which address to watch. Problem is, between setting the watchpoint
1760 and reading back which data address trapped, the user may change
1761 the set of watchpoints, and, as a consequence, GDB changes the
1762 debug registers in the inferior. To avoid reading back a stale
1763 stopped-data-address when that happens, we cache in LP the fact
1764 that a watchpoint trapped, and the corresponding data address, as
1765 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1766 registers meanwhile, we have the cached data we can rely on. */
1767
1768 static int
1769 check_stopped_by_watchpoint (struct lwp_info *child)
1770 {
1771 if (the_low_target.stopped_by_watchpoint != NULL)
1772 {
1773 struct thread_info *saved_thread;
1774
1775 saved_thread = current_thread;
1776 current_thread = get_lwp_thread (child);
1777
1778 if (the_low_target.stopped_by_watchpoint ())
1779 {
1780 child->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
1781
1782 if (the_low_target.stopped_data_address != NULL)
1783 child->stopped_data_address
1784 = the_low_target.stopped_data_address ();
1785 else
1786 child->stopped_data_address = 0;
1787 }
1788
1789 current_thread = saved_thread;
1790 }
1791
1792 return child->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
1793 }
1794
1795 /* Do low-level handling of the event, and check if we should go on
1796 and pass it to caller code. Return the affected lwp if we are, or
1797 NULL otherwise. */
1798
1799 static struct lwp_info *
1800 linux_low_filter_event (int lwpid, int wstat)
1801 {
1802 struct lwp_info *child;
1803 struct thread_info *thread;
1804 int have_stop_pc = 0;
1805
1806 child = find_lwp_pid (pid_to_ptid (lwpid));
1807
1808 /* If we didn't find a process, one of two things presumably happened:
1809 - A process we started and then detached from has exited. Ignore it.
1810 - A process we are controlling has forked and the new child's stop
1811 was reported to us by the kernel. Save its PID. */
1812 if (child == NULL && WIFSTOPPED (wstat))
1813 {
1814 add_to_pid_list (&stopped_pids, lwpid, wstat);
1815 return NULL;
1816 }
1817 else if (child == NULL)
1818 return NULL;
1819
1820 thread = get_lwp_thread (child);
1821
1822 child->stopped = 1;
1823
1824 child->last_status = wstat;
1825
1826 /* Check if the thread has exited. */
1827 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1828 {
1829 if (debug_threads)
1830 debug_printf ("LLFE: %d exited.\n", lwpid);
1831 if (num_lwps (pid_of (thread)) > 1)
1832 {
1833
1834 /* If there is at least one more LWP, then the exit signal was
1835 not the end of the debugged application and should be
1836 ignored. */
1837 delete_lwp (child);
1838 return NULL;
1839 }
1840 else
1841 {
1842 /* This was the last lwp in the process. Since events are
1843 serialized to GDB core, and we can't report this one
1844 right now, but GDB core and the other target layers will
1845 want to be notified about the exit code/signal, leave the
1846 status pending for the next time we're able to report
1847 it. */
1848 mark_lwp_dead (child, wstat);
1849 return child;
1850 }
1851 }
1852
1853 gdb_assert (WIFSTOPPED (wstat));
1854
1855 if (WIFSTOPPED (wstat))
1856 {
1857 struct process_info *proc;
1858
1859 /* Architecture-specific setup after inferior is running. This
1860 needs to happen after we have attached to the inferior and it
1861 is stopped for the first time, but before we access any
1862 inferior registers. */
1863 proc = find_process_pid (pid_of (thread));
1864 if (proc->private->new_inferior)
1865 {
1866 struct thread_info *saved_thread;
1867
1868 saved_thread = current_thread;
1869 current_thread = thread;
1870
1871 the_low_target.arch_setup ();
1872
1873 current_thread = saved_thread;
1874
1875 proc->private->new_inferior = 0;
1876 }
1877 }
1878
1879 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1880 {
1881 struct process_info *proc = find_process_pid (pid_of (thread));
1882
1883 linux_enable_event_reporting (lwpid, proc->attached);
1884 child->must_set_ptrace_flags = 0;
1885 }
1886
1887 /* Be careful to not overwrite stop_pc until
1888 check_stopped_by_breakpoint is called. */
1889 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1890 && linux_is_extended_waitstatus (wstat))
1891 {
1892 child->stop_pc = get_pc (child);
1893 handle_extended_wait (child, wstat);
1894 return NULL;
1895 }
1896
1897 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1898 && check_stopped_by_watchpoint (child))
1899 ;
1900 else if (WIFSTOPPED (wstat) && wstatus_maybe_breakpoint (wstat))
1901 {
1902 if (check_stopped_by_breakpoint (child))
1903 have_stop_pc = 1;
1904 }
1905
1906 if (!have_stop_pc)
1907 child->stop_pc = get_pc (child);
1908
1909 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1910 && child->stop_expected)
1911 {
1912 if (debug_threads)
1913 debug_printf ("Expected stop.\n");
1914 child->stop_expected = 0;
1915
1916 if (thread->last_resume_kind == resume_stop)
1917 {
1918 /* We want to report the stop to the core. Treat the
1919 SIGSTOP as a normal event. */
1920 }
1921 else if (stopping_threads != NOT_STOPPING_THREADS)
1922 {
1923 /* Stopping threads. We don't want this SIGSTOP to end up
1924 pending. */
1925 return NULL;
1926 }
1927 else
1928 {
1929 /* Filter out the event. */
1930 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1931 return NULL;
1932 }
1933 }
1934
1935 child->status_pending_p = 1;
1936 child->status_pending = wstat;
1937 return child;
1938 }
1939
1940 /* Resume LWPs that are currently stopped without any pending status
1941 to report, but are resumed from the core's perspective. */
1942
1943 static void
1944 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
1945 {
1946 struct thread_info *thread = (struct thread_info *) entry;
1947 struct lwp_info *lp = get_thread_lwp (thread);
1948
1949 if (lp->stopped
1950 && !lp->status_pending_p
1951 && thread->last_resume_kind != resume_stop
1952 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1953 {
1954 int step = thread->last_resume_kind == resume_step;
1955
1956 if (debug_threads)
1957 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
1958 target_pid_to_str (ptid_of (thread)),
1959 paddress (lp->stop_pc),
1960 step);
1961
1962 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
1963 }
1964 }
1965
1966 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1967 match FILTER_PTID (leaving others pending). The PTIDs can be:
1968 minus_one_ptid, to specify any child; a pid PTID, specifying all
1969 lwps of a thread group; or a PTID representing a single lwp. Store
1970 the stop status through the status pointer WSTAT. OPTIONS is
1971 passed to the waitpid call. Return 0 if no event was found and
1972 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1973 was found. Return the PID of the stopped child otherwise. */
1974
1975 static int
1976 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1977 int *wstatp, int options)
1978 {
1979 struct thread_info *event_thread;
1980 struct lwp_info *event_child, *requested_child;
1981 sigset_t block_mask, prev_mask;
1982
1983 retry:
1984 /* N.B. event_thread points to the thread_info struct that contains
1985 event_child. Keep them in sync. */
1986 event_thread = NULL;
1987 event_child = NULL;
1988 requested_child = NULL;
1989
1990 /* Check for a lwp with a pending status. */
1991
1992 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
1993 {
1994 event_thread = (struct thread_info *)
1995 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
1996 if (event_thread != NULL)
1997 event_child = get_thread_lwp (event_thread);
1998 if (debug_threads && event_thread)
1999 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2000 }
2001 else if (!ptid_equal (filter_ptid, null_ptid))
2002 {
2003 requested_child = find_lwp_pid (filter_ptid);
2004
2005 if (stopping_threads == NOT_STOPPING_THREADS
2006 && requested_child->status_pending_p
2007 && requested_child->collecting_fast_tracepoint)
2008 {
2009 enqueue_one_deferred_signal (requested_child,
2010 &requested_child->status_pending);
2011 requested_child->status_pending_p = 0;
2012 requested_child->status_pending = 0;
2013 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2014 }
2015
2016 if (requested_child->suspended
2017 && requested_child->status_pending_p)
2018 {
2019 internal_error (__FILE__, __LINE__,
2020 "requesting an event out of a"
2021 " suspended child?");
2022 }
2023
2024 if (requested_child->status_pending_p)
2025 {
2026 event_child = requested_child;
2027 event_thread = get_lwp_thread (event_child);
2028 }
2029 }
2030
2031 if (event_child != NULL)
2032 {
2033 if (debug_threads)
2034 debug_printf ("Got an event from pending child %ld (%04x)\n",
2035 lwpid_of (event_thread), event_child->status_pending);
2036 *wstatp = event_child->status_pending;
2037 event_child->status_pending_p = 0;
2038 event_child->status_pending = 0;
2039 current_thread = event_thread;
2040 return lwpid_of (event_thread);
2041 }
2042
2043 /* But if we don't find a pending event, we'll have to wait.
2044
2045 We only enter this loop if no process has a pending wait status.
2046 Thus any action taken in response to a wait status inside this
2047 loop is responding as soon as we detect the status, not after any
2048 pending events. */
2049
2050 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2051 all signals while here. */
2052 sigfillset (&block_mask);
2053 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2054
2055 /* Always pull all events out of the kernel. We'll randomly select
2056 an event LWP out of all that have events, to prevent
2057 starvation. */
2058 while (event_child == NULL)
2059 {
2060 pid_t ret = 0;
2061
2062 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2063 quirks:
2064
2065 - If the thread group leader exits while other threads in the
2066 thread group still exist, waitpid(TGID, ...) hangs. That
2067 waitpid won't return an exit status until the other threads
2068 in the group are reaped.
2069
2070 - When a non-leader thread execs, that thread just vanishes
2071 without reporting an exit (so we'd hang if we waited for it
2072 explicitly in that case). The exec event is reported to
2073 the TGID pid (although we don't currently enable exec
2074 events). */
2075 errno = 0;
2076 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2077
2078 if (debug_threads)
2079 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2080 ret, errno ? strerror (errno) : "ERRNO-OK");
2081
2082 if (ret > 0)
2083 {
2084 if (debug_threads)
2085 {
2086 debug_printf ("LLW: waitpid %ld received %s\n",
2087 (long) ret, status_to_str (*wstatp));
2088 }
2089
2090 /* Filter all events. IOW, leave all events pending. We'll
2091 randomly select an event LWP out of all that have events
2092 below. */
2093 linux_low_filter_event (ret, *wstatp);
2094 /* Retry until nothing comes out of waitpid. A single
2095 SIGCHLD can indicate more than one child stopped. */
2096 continue;
2097 }
2098
2099 /* Now that we've pulled all events out of the kernel, resume
2100 LWPs that don't have an interesting event to report. */
2101 if (stopping_threads == NOT_STOPPING_THREADS)
2102 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2103
2104 /* ... and find an LWP with a status to report to the core, if
2105 any. */
2106 event_thread = (struct thread_info *)
2107 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2108 if (event_thread != NULL)
2109 {
2110 event_child = get_thread_lwp (event_thread);
2111 *wstatp = event_child->status_pending;
2112 event_child->status_pending_p = 0;
2113 event_child->status_pending = 0;
2114 break;
2115 }
2116
2117 /* Check for zombie thread group leaders. Those can't be reaped
2118 until all other threads in the thread group are. */
2119 check_zombie_leaders ();
2120
2121 /* If there are no resumed children left in the set of LWPs we
2122 want to wait for, bail. We can't just block in
2123 waitpid/sigsuspend, because lwps might have been left stopped
2124 in trace-stop state, and we'd be stuck forever waiting for
2125 their status to change (which would only happen if we resumed
2126 them). Even if WNOHANG is set, this return code is preferred
2127 over 0 (below), as it is more detailed. */
2128 if ((find_inferior (&all_threads,
2129 not_stopped_callback,
2130 &wait_ptid) == NULL))
2131 {
2132 if (debug_threads)
2133 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2134 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2135 return -1;
2136 }
2137
2138 /* No interesting event to report to the caller. */
2139 if ((options & WNOHANG))
2140 {
2141 if (debug_threads)
2142 debug_printf ("WNOHANG set, no event found\n");
2143
2144 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2145 return 0;
2146 }
2147
2148 /* Block until we get an event reported with SIGCHLD. */
2149 if (debug_threads)
2150 debug_printf ("sigsuspend'ing\n");
2151
2152 sigsuspend (&prev_mask);
2153 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2154 goto retry;
2155 }
2156
2157 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2158
2159 current_thread = event_thread;
2160
2161 /* Check for thread exit. */
2162 if (! WIFSTOPPED (*wstatp))
2163 {
2164 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2165
2166 if (debug_threads)
2167 debug_printf ("LWP %d is the last lwp of process. "
2168 "Process %ld exiting.\n",
2169 pid_of (event_thread), lwpid_of (event_thread));
2170 return lwpid_of (event_thread);
2171 }
2172
2173 return lwpid_of (event_thread);
2174 }
2175
2176 /* Wait for an event from child(ren) PTID. PTIDs can be:
2177 minus_one_ptid, to specify any child; a pid PTID, specifying all
2178 lwps of a thread group; or a PTID representing a single lwp. Store
2179 the stop status through the status pointer WSTAT. OPTIONS is
2180 passed to the waitpid call. Return 0 if no event was found and
2181 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2182 was found. Return the PID of the stopped child otherwise. */
2183
2184 static int
2185 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2186 {
2187 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2188 }
2189
2190 /* Count the LWP's that have had events. */
2191
2192 static int
2193 count_events_callback (struct inferior_list_entry *entry, void *data)
2194 {
2195 struct thread_info *thread = (struct thread_info *) entry;
2196 int *count = data;
2197
2198 gdb_assert (count != NULL);
2199
2200 /* Count only resumed LWPs that have an event pending. */
2201 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2202 && thread->last_resume_kind != resume_stop
2203 && thread->status_pending_p)
2204 (*count)++;
2205
2206 return 0;
2207 }
2208
2209 /* Select the LWP (if any) that is currently being single-stepped. */
2210
2211 static int
2212 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2213 {
2214 struct thread_info *thread = (struct thread_info *) entry;
2215 struct lwp_info *lp = get_thread_lwp (thread);
2216
2217 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2218 && thread->last_resume_kind == resume_step
2219 && lp->status_pending_p)
2220 return 1;
2221 else
2222 return 0;
2223 }
2224
2225 /* Select the Nth LWP that has had a SIGTRAP event that should be
2226 reported to GDB. */
2227
2228 static int
2229 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2230 {
2231 struct thread_info *thread = (struct thread_info *) entry;
2232 int *selector = data;
2233
2234 gdb_assert (selector != NULL);
2235
2236 /* Select only resumed LWPs that have an event pending. */
2237 if (thread->last_resume_kind != resume_stop
2238 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2239 && thread->status_pending_p)
2240 if ((*selector)-- == 0)
2241 return 1;
2242
2243 return 0;
2244 }
2245
2246 /* Select one LWP out of those that have events pending. */
2247
2248 static void
2249 select_event_lwp (struct lwp_info **orig_lp)
2250 {
2251 int num_events = 0;
2252 int random_selector;
2253 struct thread_info *event_thread = NULL;
2254
2255 /* In all-stop, give preference to the LWP that is being
2256 single-stepped. There will be at most one, and it's the LWP that
2257 the core is most interested in. If we didn't do this, then we'd
2258 have to handle pending step SIGTRAPs somehow in case the core
2259 later continues the previously-stepped thread, otherwise we'd
2260 report the pending SIGTRAP, and the core, not having stepped the
2261 thread, wouldn't understand what the trap was for, and therefore
2262 would report it to the user as a random signal. */
2263 if (!non_stop)
2264 {
2265 event_thread
2266 = (struct thread_info *) find_inferior (&all_threads,
2267 select_singlestep_lwp_callback,
2268 NULL);
2269 if (event_thread != NULL)
2270 {
2271 if (debug_threads)
2272 debug_printf ("SEL: Select single-step %s\n",
2273 target_pid_to_str (ptid_of (event_thread)));
2274 }
2275 }
2276 if (event_thread == NULL)
2277 {
2278 /* No single-stepping LWP. Select one at random, out of those
2279 which have had SIGTRAP events. */
2280
2281 /* First see how many SIGTRAP events we have. */
2282 find_inferior (&all_threads, count_events_callback, &num_events);
2283
2284 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2285 random_selector = (int)
2286 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2287
2288 if (debug_threads && num_events > 1)
2289 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2290 num_events, random_selector);
2291
2292 event_thread
2293 = (struct thread_info *) find_inferior (&all_threads,
2294 select_event_lwp_callback,
2295 &random_selector);
2296 }
2297
2298 if (event_thread != NULL)
2299 {
2300 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2301
2302 /* Switch the event LWP. */
2303 *orig_lp = event_lp;
2304 }
2305 }
2306
2307 /* Decrement the suspend count of an LWP. */
2308
2309 static int
2310 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2311 {
2312 struct thread_info *thread = (struct thread_info *) entry;
2313 struct lwp_info *lwp = get_thread_lwp (thread);
2314
2315 /* Ignore EXCEPT. */
2316 if (lwp == except)
2317 return 0;
2318
2319 lwp->suspended--;
2320
2321 gdb_assert (lwp->suspended >= 0);
2322 return 0;
2323 }
2324
2325 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2326 NULL. */
2327
2328 static void
2329 unsuspend_all_lwps (struct lwp_info *except)
2330 {
2331 find_inferior (&all_threads, unsuspend_one_lwp, except);
2332 }
2333
2334 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2335 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2336 void *data);
2337 static int lwp_running (struct inferior_list_entry *entry, void *data);
2338 static ptid_t linux_wait_1 (ptid_t ptid,
2339 struct target_waitstatus *ourstatus,
2340 int target_options);
2341
2342 /* Stabilize threads (move out of jump pads).
2343
2344 If a thread is midway collecting a fast tracepoint, we need to
2345 finish the collection and move it out of the jump pad before
2346 reporting the signal.
2347
2348 This avoids recursion while collecting (when a signal arrives
2349 midway, and the signal handler itself collects), which would trash
2350 the trace buffer. In case the user set a breakpoint in a signal
2351 handler, this avoids the backtrace showing the jump pad, etc..
2352 Most importantly, there are certain things we can't do safely if
2353 threads are stopped in a jump pad (or in its callee's). For
2354 example:
2355
2356 - starting a new trace run. A thread still collecting the
2357 previous run, could trash the trace buffer when resumed. The trace
2358 buffer control structures would have been reset but the thread had
2359 no way to tell. The thread could even midway memcpy'ing to the
2360 buffer, which would mean that when resumed, it would clobber the
2361 trace buffer that had been set for a new run.
2362
2363 - we can't rewrite/reuse the jump pads for new tracepoints
2364 safely. Say you do tstart while a thread is stopped midway while
2365 collecting. When the thread is later resumed, it finishes the
2366 collection, and returns to the jump pad, to execute the original
2367 instruction that was under the tracepoint jump at the time the
2368 older run had been started. If the jump pad had been rewritten
2369 since for something else in the new run, the thread would now
2370 execute the wrong / random instructions. */
2371
2372 static void
2373 linux_stabilize_threads (void)
2374 {
2375 struct thread_info *saved_thread;
2376 struct thread_info *thread_stuck;
2377
2378 thread_stuck
2379 = (struct thread_info *) find_inferior (&all_threads,
2380 stuck_in_jump_pad_callback,
2381 NULL);
2382 if (thread_stuck != NULL)
2383 {
2384 if (debug_threads)
2385 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2386 lwpid_of (thread_stuck));
2387 return;
2388 }
2389
2390 saved_thread = current_thread;
2391
2392 stabilizing_threads = 1;
2393
2394 /* Kick 'em all. */
2395 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2396
2397 /* Loop until all are stopped out of the jump pads. */
2398 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2399 {
2400 struct target_waitstatus ourstatus;
2401 struct lwp_info *lwp;
2402 int wstat;
2403
2404 /* Note that we go through the full wait even loop. While
2405 moving threads out of jump pad, we need to be able to step
2406 over internal breakpoints and such. */
2407 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2408
2409 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2410 {
2411 lwp = get_thread_lwp (current_thread);
2412
2413 /* Lock it. */
2414 lwp->suspended++;
2415
2416 if (ourstatus.value.sig != GDB_SIGNAL_0
2417 || current_thread->last_resume_kind == resume_stop)
2418 {
2419 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2420 enqueue_one_deferred_signal (lwp, &wstat);
2421 }
2422 }
2423 }
2424
2425 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2426
2427 stabilizing_threads = 0;
2428
2429 current_thread = saved_thread;
2430
2431 if (debug_threads)
2432 {
2433 thread_stuck
2434 = (struct thread_info *) find_inferior (&all_threads,
2435 stuck_in_jump_pad_callback,
2436 NULL);
2437 if (thread_stuck != NULL)
2438 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2439 lwpid_of (thread_stuck));
2440 }
2441 }
2442
2443 static void async_file_mark (void);
2444
2445 /* Convenience function that is called when the kernel reports an
2446 event that is not passed out to GDB. */
2447
2448 static ptid_t
2449 ignore_event (struct target_waitstatus *ourstatus)
2450 {
2451 /* If we got an event, there may still be others, as a single
2452 SIGCHLD can indicate more than one child stopped. This forces
2453 another target_wait call. */
2454 async_file_mark ();
2455
2456 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2457 return null_ptid;
2458 }
2459
2460 /* Wait for process, returns status. */
2461
2462 static ptid_t
2463 linux_wait_1 (ptid_t ptid,
2464 struct target_waitstatus *ourstatus, int target_options)
2465 {
2466 int w;
2467 struct lwp_info *event_child;
2468 int options;
2469 int pid;
2470 int step_over_finished;
2471 int bp_explains_trap;
2472 int maybe_internal_trap;
2473 int report_to_gdb;
2474 int trace_event;
2475 int in_step_range;
2476
2477 if (debug_threads)
2478 {
2479 debug_enter ();
2480 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2481 }
2482
2483 /* Translate generic target options into linux options. */
2484 options = __WALL;
2485 if (target_options & TARGET_WNOHANG)
2486 options |= WNOHANG;
2487
2488 bp_explains_trap = 0;
2489 trace_event = 0;
2490 in_step_range = 0;
2491 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2492
2493 if (ptid_equal (step_over_bkpt, null_ptid))
2494 pid = linux_wait_for_event (ptid, &w, options);
2495 else
2496 {
2497 if (debug_threads)
2498 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2499 target_pid_to_str (step_over_bkpt));
2500 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2501 }
2502
2503 if (pid == 0)
2504 {
2505 gdb_assert (target_options & TARGET_WNOHANG);
2506
2507 if (debug_threads)
2508 {
2509 debug_printf ("linux_wait_1 ret = null_ptid, "
2510 "TARGET_WAITKIND_IGNORE\n");
2511 debug_exit ();
2512 }
2513
2514 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2515 return null_ptid;
2516 }
2517 else if (pid == -1)
2518 {
2519 if (debug_threads)
2520 {
2521 debug_printf ("linux_wait_1 ret = null_ptid, "
2522 "TARGET_WAITKIND_NO_RESUMED\n");
2523 debug_exit ();
2524 }
2525
2526 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2527 return null_ptid;
2528 }
2529
2530 event_child = get_thread_lwp (current_thread);
2531
2532 /* linux_wait_for_event only returns an exit status for the last
2533 child of a process. Report it. */
2534 if (WIFEXITED (w) || WIFSIGNALED (w))
2535 {
2536 if (WIFEXITED (w))
2537 {
2538 ourstatus->kind = TARGET_WAITKIND_EXITED;
2539 ourstatus->value.integer = WEXITSTATUS (w);
2540
2541 if (debug_threads)
2542 {
2543 debug_printf ("linux_wait_1 ret = %s, exited with "
2544 "retcode %d\n",
2545 target_pid_to_str (ptid_of (current_thread)),
2546 WEXITSTATUS (w));
2547 debug_exit ();
2548 }
2549 }
2550 else
2551 {
2552 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2553 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2554
2555 if (debug_threads)
2556 {
2557 debug_printf ("linux_wait_1 ret = %s, terminated with "
2558 "signal %d\n",
2559 target_pid_to_str (ptid_of (current_thread)),
2560 WTERMSIG (w));
2561 debug_exit ();
2562 }
2563 }
2564
2565 return ptid_of (current_thread);
2566 }
2567
2568 /* If this event was not handled before, and is not a SIGTRAP, we
2569 report it. SIGILL and SIGSEGV are also treated as traps in case
2570 a breakpoint is inserted at the current PC. If this target does
2571 not support internal breakpoints at all, we also report the
2572 SIGTRAP without further processing; it's of no concern to us. */
2573 maybe_internal_trap
2574 = (supports_breakpoints ()
2575 && (WSTOPSIG (w) == SIGTRAP
2576 || ((WSTOPSIG (w) == SIGILL
2577 || WSTOPSIG (w) == SIGSEGV)
2578 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2579
2580 if (maybe_internal_trap)
2581 {
2582 /* Handle anything that requires bookkeeping before deciding to
2583 report the event or continue waiting. */
2584
2585 /* First check if we can explain the SIGTRAP with an internal
2586 breakpoint, or if we should possibly report the event to GDB.
2587 Do this before anything that may remove or insert a
2588 breakpoint. */
2589 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2590
2591 /* We have a SIGTRAP, possibly a step-over dance has just
2592 finished. If so, tweak the state machine accordingly,
2593 reinsert breakpoints and delete any reinsert (software
2594 single-step) breakpoints. */
2595 step_over_finished = finish_step_over (event_child);
2596
2597 /* Now invoke the callbacks of any internal breakpoints there. */
2598 check_breakpoints (event_child->stop_pc);
2599
2600 /* Handle tracepoint data collecting. This may overflow the
2601 trace buffer, and cause a tracing stop, removing
2602 breakpoints. */
2603 trace_event = handle_tracepoints (event_child);
2604
2605 if (bp_explains_trap)
2606 {
2607 /* If we stepped or ran into an internal breakpoint, we've
2608 already handled it. So next time we resume (from this
2609 PC), we should step over it. */
2610 if (debug_threads)
2611 debug_printf ("Hit a gdbserver breakpoint.\n");
2612
2613 if (breakpoint_here (event_child->stop_pc))
2614 event_child->need_step_over = 1;
2615 }
2616 }
2617 else
2618 {
2619 /* We have some other signal, possibly a step-over dance was in
2620 progress, and it should be cancelled too. */
2621 step_over_finished = finish_step_over (event_child);
2622 }
2623
2624 /* We have all the data we need. Either report the event to GDB, or
2625 resume threads and keep waiting for more. */
2626
2627 /* If we're collecting a fast tracepoint, finish the collection and
2628 move out of the jump pad before delivering a signal. See
2629 linux_stabilize_threads. */
2630
2631 if (WIFSTOPPED (w)
2632 && WSTOPSIG (w) != SIGTRAP
2633 && supports_fast_tracepoints ()
2634 && agent_loaded_p ())
2635 {
2636 if (debug_threads)
2637 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2638 "to defer or adjust it.\n",
2639 WSTOPSIG (w), lwpid_of (current_thread));
2640
2641 /* Allow debugging the jump pad itself. */
2642 if (current_thread->last_resume_kind != resume_step
2643 && maybe_move_out_of_jump_pad (event_child, &w))
2644 {
2645 enqueue_one_deferred_signal (event_child, &w);
2646
2647 if (debug_threads)
2648 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2649 WSTOPSIG (w), lwpid_of (current_thread));
2650
2651 linux_resume_one_lwp (event_child, 0, 0, NULL);
2652
2653 return ignore_event (ourstatus);
2654 }
2655 }
2656
2657 if (event_child->collecting_fast_tracepoint)
2658 {
2659 if (debug_threads)
2660 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2661 "Check if we're already there.\n",
2662 lwpid_of (current_thread),
2663 event_child->collecting_fast_tracepoint);
2664
2665 trace_event = 1;
2666
2667 event_child->collecting_fast_tracepoint
2668 = linux_fast_tracepoint_collecting (event_child, NULL);
2669
2670 if (event_child->collecting_fast_tracepoint != 1)
2671 {
2672 /* No longer need this breakpoint. */
2673 if (event_child->exit_jump_pad_bkpt != NULL)
2674 {
2675 if (debug_threads)
2676 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2677 "stopping all threads momentarily.\n");
2678
2679 /* Other running threads could hit this breakpoint.
2680 We don't handle moribund locations like GDB does,
2681 instead we always pause all threads when removing
2682 breakpoints, so that any step-over or
2683 decr_pc_after_break adjustment is always taken
2684 care of while the breakpoint is still
2685 inserted. */
2686 stop_all_lwps (1, event_child);
2687
2688 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2689 event_child->exit_jump_pad_bkpt = NULL;
2690
2691 unstop_all_lwps (1, event_child);
2692
2693 gdb_assert (event_child->suspended >= 0);
2694 }
2695 }
2696
2697 if (event_child->collecting_fast_tracepoint == 0)
2698 {
2699 if (debug_threads)
2700 debug_printf ("fast tracepoint finished "
2701 "collecting successfully.\n");
2702
2703 /* We may have a deferred signal to report. */
2704 if (dequeue_one_deferred_signal (event_child, &w))
2705 {
2706 if (debug_threads)
2707 debug_printf ("dequeued one signal.\n");
2708 }
2709 else
2710 {
2711 if (debug_threads)
2712 debug_printf ("no deferred signals.\n");
2713
2714 if (stabilizing_threads)
2715 {
2716 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2717 ourstatus->value.sig = GDB_SIGNAL_0;
2718
2719 if (debug_threads)
2720 {
2721 debug_printf ("linux_wait_1 ret = %s, stopped "
2722 "while stabilizing threads\n",
2723 target_pid_to_str (ptid_of (current_thread)));
2724 debug_exit ();
2725 }
2726
2727 return ptid_of (current_thread);
2728 }
2729 }
2730 }
2731 }
2732
2733 /* Check whether GDB would be interested in this event. */
2734
2735 /* If GDB is not interested in this signal, don't stop other
2736 threads, and don't report it to GDB. Just resume the inferior
2737 right away. We do this for threading-related signals as well as
2738 any that GDB specifically requested we ignore. But never ignore
2739 SIGSTOP if we sent it ourselves, and do not ignore signals when
2740 stepping - they may require special handling to skip the signal
2741 handler. */
2742 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2743 thread library? */
2744 if (WIFSTOPPED (w)
2745 && current_thread->last_resume_kind != resume_step
2746 && (
2747 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2748 (current_process ()->private->thread_db != NULL
2749 && (WSTOPSIG (w) == __SIGRTMIN
2750 || WSTOPSIG (w) == __SIGRTMIN + 1))
2751 ||
2752 #endif
2753 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2754 && !(WSTOPSIG (w) == SIGSTOP
2755 && current_thread->last_resume_kind == resume_stop))))
2756 {
2757 siginfo_t info, *info_p;
2758
2759 if (debug_threads)
2760 debug_printf ("Ignored signal %d for LWP %ld.\n",
2761 WSTOPSIG (w), lwpid_of (current_thread));
2762
2763 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2764 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2765 info_p = &info;
2766 else
2767 info_p = NULL;
2768 linux_resume_one_lwp (event_child, event_child->stepping,
2769 WSTOPSIG (w), info_p);
2770 return ignore_event (ourstatus);
2771 }
2772
2773 /* Note that all addresses are always "out of the step range" when
2774 there's no range to begin with. */
2775 in_step_range = lwp_in_step_range (event_child);
2776
2777 /* If GDB wanted this thread to single step, and the thread is out
2778 of the step range, we always want to report the SIGTRAP, and let
2779 GDB handle it. Watchpoints should always be reported. So should
2780 signals we can't explain. A SIGTRAP we can't explain could be a
2781 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2782 do, we're be able to handle GDB breakpoints on top of internal
2783 breakpoints, by handling the internal breakpoint and still
2784 reporting the event to GDB. If we don't, we're out of luck, GDB
2785 won't see the breakpoint hit. */
2786 report_to_gdb = (!maybe_internal_trap
2787 || (current_thread->last_resume_kind == resume_step
2788 && !in_step_range)
2789 || event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT
2790 || (!step_over_finished && !in_step_range
2791 && !bp_explains_trap && !trace_event)
2792 || (gdb_breakpoint_here (event_child->stop_pc)
2793 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2794 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2795
2796 run_breakpoint_commands (event_child->stop_pc);
2797
2798 /* We found no reason GDB would want us to stop. We either hit one
2799 of our own breakpoints, or finished an internal step GDB
2800 shouldn't know about. */
2801 if (!report_to_gdb)
2802 {
2803 if (debug_threads)
2804 {
2805 if (bp_explains_trap)
2806 debug_printf ("Hit a gdbserver breakpoint.\n");
2807 if (step_over_finished)
2808 debug_printf ("Step-over finished.\n");
2809 if (trace_event)
2810 debug_printf ("Tracepoint event.\n");
2811 if (lwp_in_step_range (event_child))
2812 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2813 paddress (event_child->stop_pc),
2814 paddress (event_child->step_range_start),
2815 paddress (event_child->step_range_end));
2816 }
2817
2818 /* We're not reporting this breakpoint to GDB, so apply the
2819 decr_pc_after_break adjustment to the inferior's regcache
2820 ourselves. */
2821
2822 if (the_low_target.set_pc != NULL)
2823 {
2824 struct regcache *regcache
2825 = get_thread_regcache (current_thread, 1);
2826 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2827 }
2828
2829 /* We may have finished stepping over a breakpoint. If so,
2830 we've stopped and suspended all LWPs momentarily except the
2831 stepping one. This is where we resume them all again. We're
2832 going to keep waiting, so use proceed, which handles stepping
2833 over the next breakpoint. */
2834 if (debug_threads)
2835 debug_printf ("proceeding all threads.\n");
2836
2837 if (step_over_finished)
2838 unsuspend_all_lwps (event_child);
2839
2840 proceed_all_lwps ();
2841 return ignore_event (ourstatus);
2842 }
2843
2844 if (debug_threads)
2845 {
2846 if (current_thread->last_resume_kind == resume_step)
2847 {
2848 if (event_child->step_range_start == event_child->step_range_end)
2849 debug_printf ("GDB wanted to single-step, reporting event.\n");
2850 else if (!lwp_in_step_range (event_child))
2851 debug_printf ("Out of step range, reporting event.\n");
2852 }
2853 if (event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT)
2854 debug_printf ("Stopped by watchpoint.\n");
2855 else if (gdb_breakpoint_here (event_child->stop_pc))
2856 debug_printf ("Stopped by GDB breakpoint.\n");
2857 if (debug_threads)
2858 debug_printf ("Hit a non-gdbserver trap event.\n");
2859 }
2860
2861 /* Alright, we're going to report a stop. */
2862
2863 if (!stabilizing_threads)
2864 {
2865 /* In all-stop, stop all threads. */
2866 if (!non_stop)
2867 stop_all_lwps (0, NULL);
2868
2869 /* If we're not waiting for a specific LWP, choose an event LWP
2870 from among those that have had events. Giving equal priority
2871 to all LWPs that have had events helps prevent
2872 starvation. */
2873 if (ptid_equal (ptid, minus_one_ptid))
2874 {
2875 event_child->status_pending_p = 1;
2876 event_child->status_pending = w;
2877
2878 select_event_lwp (&event_child);
2879
2880 /* current_thread and event_child must stay in sync. */
2881 current_thread = get_lwp_thread (event_child);
2882
2883 event_child->status_pending_p = 0;
2884 w = event_child->status_pending;
2885 }
2886
2887 if (step_over_finished)
2888 {
2889 if (!non_stop)
2890 {
2891 /* If we were doing a step-over, all other threads but
2892 the stepping one had been paused in start_step_over,
2893 with their suspend counts incremented. We don't want
2894 to do a full unstop/unpause, because we're in
2895 all-stop mode (so we want threads stopped), but we
2896 still need to unsuspend the other threads, to
2897 decrement their `suspended' count back. */
2898 unsuspend_all_lwps (event_child);
2899 }
2900 else
2901 {
2902 /* If we just finished a step-over, then all threads had
2903 been momentarily paused. In all-stop, that's fine,
2904 we want threads stopped by now anyway. In non-stop,
2905 we need to re-resume threads that GDB wanted to be
2906 running. */
2907 unstop_all_lwps (1, event_child);
2908 }
2909 }
2910
2911 /* Stabilize threads (move out of jump pads). */
2912 if (!non_stop)
2913 stabilize_threads ();
2914 }
2915 else
2916 {
2917 /* If we just finished a step-over, then all threads had been
2918 momentarily paused. In all-stop, that's fine, we want
2919 threads stopped by now anyway. In non-stop, we need to
2920 re-resume threads that GDB wanted to be running. */
2921 if (step_over_finished)
2922 unstop_all_lwps (1, event_child);
2923 }
2924
2925 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2926
2927 /* Now that we've selected our final event LWP, un-adjust its PC if
2928 it was a software breakpoint. */
2929 if (event_child->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
2930 {
2931 int decr_pc = the_low_target.decr_pc_after_break;
2932
2933 if (decr_pc != 0)
2934 {
2935 struct regcache *regcache
2936 = get_thread_regcache (current_thread, 1);
2937 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
2938 }
2939 }
2940
2941 if (current_thread->last_resume_kind == resume_stop
2942 && WSTOPSIG (w) == SIGSTOP)
2943 {
2944 /* A thread that has been requested to stop by GDB with vCont;t,
2945 and it stopped cleanly, so report as SIG0. The use of
2946 SIGSTOP is an implementation detail. */
2947 ourstatus->value.sig = GDB_SIGNAL_0;
2948 }
2949 else if (current_thread->last_resume_kind == resume_stop
2950 && WSTOPSIG (w) != SIGSTOP)
2951 {
2952 /* A thread that has been requested to stop by GDB with vCont;t,
2953 but, it stopped for other reasons. */
2954 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2955 }
2956 else
2957 {
2958 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2959 }
2960
2961 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2962
2963 if (debug_threads)
2964 {
2965 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2966 target_pid_to_str (ptid_of (current_thread)),
2967 ourstatus->kind, ourstatus->value.sig);
2968 debug_exit ();
2969 }
2970
2971 return ptid_of (current_thread);
2972 }
2973
2974 /* Get rid of any pending event in the pipe. */
2975 static void
2976 async_file_flush (void)
2977 {
2978 int ret;
2979 char buf;
2980
2981 do
2982 ret = read (linux_event_pipe[0], &buf, 1);
2983 while (ret >= 0 || (ret == -1 && errno == EINTR));
2984 }
2985
2986 /* Put something in the pipe, so the event loop wakes up. */
2987 static void
2988 async_file_mark (void)
2989 {
2990 int ret;
2991
2992 async_file_flush ();
2993
2994 do
2995 ret = write (linux_event_pipe[1], "+", 1);
2996 while (ret == 0 || (ret == -1 && errno == EINTR));
2997
2998 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2999 be awakened anyway. */
3000 }
3001
3002 static ptid_t
3003 linux_wait (ptid_t ptid,
3004 struct target_waitstatus *ourstatus, int target_options)
3005 {
3006 ptid_t event_ptid;
3007
3008 /* Flush the async file first. */
3009 if (target_is_async_p ())
3010 async_file_flush ();
3011
3012 do
3013 {
3014 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3015 }
3016 while ((target_options & TARGET_WNOHANG) == 0
3017 && ptid_equal (event_ptid, null_ptid)
3018 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3019
3020 /* If at least one stop was reported, there may be more. A single
3021 SIGCHLD can signal more than one child stop. */
3022 if (target_is_async_p ()
3023 && (target_options & TARGET_WNOHANG) != 0
3024 && !ptid_equal (event_ptid, null_ptid))
3025 async_file_mark ();
3026
3027 return event_ptid;
3028 }
3029
3030 /* Send a signal to an LWP. */
3031
3032 static int
3033 kill_lwp (unsigned long lwpid, int signo)
3034 {
3035 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3036 fails, then we are not using nptl threads and we should be using kill. */
3037
3038 #ifdef __NR_tkill
3039 {
3040 static int tkill_failed;
3041
3042 if (!tkill_failed)
3043 {
3044 int ret;
3045
3046 errno = 0;
3047 ret = syscall (__NR_tkill, lwpid, signo);
3048 if (errno != ENOSYS)
3049 return ret;
3050 tkill_failed = 1;
3051 }
3052 }
3053 #endif
3054
3055 return kill (lwpid, signo);
3056 }
3057
3058 void
3059 linux_stop_lwp (struct lwp_info *lwp)
3060 {
3061 send_sigstop (lwp);
3062 }
3063
3064 static void
3065 send_sigstop (struct lwp_info *lwp)
3066 {
3067 int pid;
3068
3069 pid = lwpid_of (get_lwp_thread (lwp));
3070
3071 /* If we already have a pending stop signal for this process, don't
3072 send another. */
3073 if (lwp->stop_expected)
3074 {
3075 if (debug_threads)
3076 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3077
3078 return;
3079 }
3080
3081 if (debug_threads)
3082 debug_printf ("Sending sigstop to lwp %d\n", pid);
3083
3084 lwp->stop_expected = 1;
3085 kill_lwp (pid, SIGSTOP);
3086 }
3087
3088 static int
3089 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3090 {
3091 struct thread_info *thread = (struct thread_info *) entry;
3092 struct lwp_info *lwp = get_thread_lwp (thread);
3093
3094 /* Ignore EXCEPT. */
3095 if (lwp == except)
3096 return 0;
3097
3098 if (lwp->stopped)
3099 return 0;
3100
3101 send_sigstop (lwp);
3102 return 0;
3103 }
3104
3105 /* Increment the suspend count of an LWP, and stop it, if not stopped
3106 yet. */
3107 static int
3108 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3109 void *except)
3110 {
3111 struct thread_info *thread = (struct thread_info *) entry;
3112 struct lwp_info *lwp = get_thread_lwp (thread);
3113
3114 /* Ignore EXCEPT. */
3115 if (lwp == except)
3116 return 0;
3117
3118 lwp->suspended++;
3119
3120 return send_sigstop_callback (entry, except);
3121 }
3122
3123 static void
3124 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3125 {
3126 /* It's dead, really. */
3127 lwp->dead = 1;
3128
3129 /* Store the exit status for later. */
3130 lwp->status_pending_p = 1;
3131 lwp->status_pending = wstat;
3132
3133 /* Prevent trying to stop it. */
3134 lwp->stopped = 1;
3135
3136 /* No further stops are expected from a dead lwp. */
3137 lwp->stop_expected = 0;
3138 }
3139
3140 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3141
3142 static void
3143 wait_for_sigstop (void)
3144 {
3145 struct thread_info *saved_thread;
3146 ptid_t saved_tid;
3147 int wstat;
3148 int ret;
3149
3150 saved_thread = current_thread;
3151 if (saved_thread != NULL)
3152 saved_tid = saved_thread->entry.id;
3153 else
3154 saved_tid = null_ptid; /* avoid bogus unused warning */
3155
3156 if (debug_threads)
3157 debug_printf ("wait_for_sigstop: pulling events\n");
3158
3159 /* Passing NULL_PTID as filter indicates we want all events to be
3160 left pending. Eventually this returns when there are no
3161 unwaited-for children left. */
3162 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3163 &wstat, __WALL);
3164 gdb_assert (ret == -1);
3165
3166 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3167 current_thread = saved_thread;
3168 else
3169 {
3170 if (debug_threads)
3171 debug_printf ("Previously current thread died.\n");
3172
3173 if (non_stop)
3174 {
3175 /* We can't change the current inferior behind GDB's back,
3176 otherwise, a subsequent command may apply to the wrong
3177 process. */
3178 current_thread = NULL;
3179 }
3180 else
3181 {
3182 /* Set a valid thread as current. */
3183 set_desired_thread (0);
3184 }
3185 }
3186 }
3187
3188 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3189 move it out, because we need to report the stop event to GDB. For
3190 example, if the user puts a breakpoint in the jump pad, it's
3191 because she wants to debug it. */
3192
3193 static int
3194 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3195 {
3196 struct thread_info *thread = (struct thread_info *) entry;
3197 struct lwp_info *lwp = get_thread_lwp (thread);
3198
3199 gdb_assert (lwp->suspended == 0);
3200 gdb_assert (lwp->stopped);
3201
3202 /* Allow debugging the jump pad, gdb_collect, etc.. */
3203 return (supports_fast_tracepoints ()
3204 && agent_loaded_p ()
3205 && (gdb_breakpoint_here (lwp->stop_pc)
3206 || lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT
3207 || thread->last_resume_kind == resume_step)
3208 && linux_fast_tracepoint_collecting (lwp, NULL));
3209 }
3210
3211 static void
3212 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3213 {
3214 struct thread_info *thread = (struct thread_info *) entry;
3215 struct lwp_info *lwp = get_thread_lwp (thread);
3216 int *wstat;
3217
3218 gdb_assert (lwp->suspended == 0);
3219 gdb_assert (lwp->stopped);
3220
3221 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3222
3223 /* Allow debugging the jump pad, gdb_collect, etc. */
3224 if (!gdb_breakpoint_here (lwp->stop_pc)
3225 && lwp->stop_reason != LWP_STOPPED_BY_WATCHPOINT
3226 && thread->last_resume_kind != resume_step
3227 && maybe_move_out_of_jump_pad (lwp, wstat))
3228 {
3229 if (debug_threads)
3230 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3231 lwpid_of (thread));
3232
3233 if (wstat)
3234 {
3235 lwp->status_pending_p = 0;
3236 enqueue_one_deferred_signal (lwp, wstat);
3237
3238 if (debug_threads)
3239 debug_printf ("Signal %d for LWP %ld deferred "
3240 "(in jump pad)\n",
3241 WSTOPSIG (*wstat), lwpid_of (thread));
3242 }
3243
3244 linux_resume_one_lwp (lwp, 0, 0, NULL);
3245 }
3246 else
3247 lwp->suspended++;
3248 }
3249
3250 static int
3251 lwp_running (struct inferior_list_entry *entry, void *data)
3252 {
3253 struct thread_info *thread = (struct thread_info *) entry;
3254 struct lwp_info *lwp = get_thread_lwp (thread);
3255
3256 if (lwp->dead)
3257 return 0;
3258 if (lwp->stopped)
3259 return 0;
3260 return 1;
3261 }
3262
3263 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3264 If SUSPEND, then also increase the suspend count of every LWP,
3265 except EXCEPT. */
3266
3267 static void
3268 stop_all_lwps (int suspend, struct lwp_info *except)
3269 {
3270 /* Should not be called recursively. */
3271 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3272
3273 if (debug_threads)
3274 {
3275 debug_enter ();
3276 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3277 suspend ? "stop-and-suspend" : "stop",
3278 except != NULL
3279 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3280 : "none");
3281 }
3282
3283 stopping_threads = (suspend
3284 ? STOPPING_AND_SUSPENDING_THREADS
3285 : STOPPING_THREADS);
3286
3287 if (suspend)
3288 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3289 else
3290 find_inferior (&all_threads, send_sigstop_callback, except);
3291 wait_for_sigstop ();
3292 stopping_threads = NOT_STOPPING_THREADS;
3293
3294 if (debug_threads)
3295 {
3296 debug_printf ("stop_all_lwps done, setting stopping_threads "
3297 "back to !stopping\n");
3298 debug_exit ();
3299 }
3300 }
3301
3302 /* Resume execution of the inferior process.
3303 If STEP is nonzero, single-step it.
3304 If SIGNAL is nonzero, give it that signal. */
3305
3306 static void
3307 linux_resume_one_lwp (struct lwp_info *lwp,
3308 int step, int signal, siginfo_t *info)
3309 {
3310 struct thread_info *thread = get_lwp_thread (lwp);
3311 struct thread_info *saved_thread;
3312 int fast_tp_collecting;
3313
3314 if (lwp->stopped == 0)
3315 return;
3316
3317 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3318
3319 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3320
3321 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3322 user used the "jump" command, or "set $pc = foo"). */
3323 if (lwp->stop_pc != get_pc (lwp))
3324 {
3325 /* Collecting 'while-stepping' actions doesn't make sense
3326 anymore. */
3327 release_while_stepping_state_list (thread);
3328 }
3329
3330 /* If we have pending signals or status, and a new signal, enqueue the
3331 signal. Also enqueue the signal if we are waiting to reinsert a
3332 breakpoint; it will be picked up again below. */
3333 if (signal != 0
3334 && (lwp->status_pending_p
3335 || lwp->pending_signals != NULL
3336 || lwp->bp_reinsert != 0
3337 || fast_tp_collecting))
3338 {
3339 struct pending_signals *p_sig;
3340 p_sig = xmalloc (sizeof (*p_sig));
3341 p_sig->prev = lwp->pending_signals;
3342 p_sig->signal = signal;
3343 if (info == NULL)
3344 memset (&p_sig->info, 0, sizeof (siginfo_t));
3345 else
3346 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3347 lwp->pending_signals = p_sig;
3348 }
3349
3350 if (lwp->status_pending_p)
3351 {
3352 if (debug_threads)
3353 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3354 " has pending status\n",
3355 lwpid_of (thread), step ? "step" : "continue", signal,
3356 lwp->stop_expected ? "expected" : "not expected");
3357 return;
3358 }
3359
3360 saved_thread = current_thread;
3361 current_thread = thread;
3362
3363 if (debug_threads)
3364 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3365 lwpid_of (thread), step ? "step" : "continue", signal,
3366 lwp->stop_expected ? "expected" : "not expected");
3367
3368 /* This bit needs some thinking about. If we get a signal that
3369 we must report while a single-step reinsert is still pending,
3370 we often end up resuming the thread. It might be better to
3371 (ew) allow a stack of pending events; then we could be sure that
3372 the reinsert happened right away and not lose any signals.
3373
3374 Making this stack would also shrink the window in which breakpoints are
3375 uninserted (see comment in linux_wait_for_lwp) but not enough for
3376 complete correctness, so it won't solve that problem. It may be
3377 worthwhile just to solve this one, however. */
3378 if (lwp->bp_reinsert != 0)
3379 {
3380 if (debug_threads)
3381 debug_printf (" pending reinsert at 0x%s\n",
3382 paddress (lwp->bp_reinsert));
3383
3384 if (can_hardware_single_step ())
3385 {
3386 if (fast_tp_collecting == 0)
3387 {
3388 if (step == 0)
3389 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3390 if (lwp->suspended)
3391 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3392 lwp->suspended);
3393 }
3394
3395 step = 1;
3396 }
3397
3398 /* Postpone any pending signal. It was enqueued above. */
3399 signal = 0;
3400 }
3401
3402 if (fast_tp_collecting == 1)
3403 {
3404 if (debug_threads)
3405 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3406 " (exit-jump-pad-bkpt)\n",
3407 lwpid_of (thread));
3408
3409 /* Postpone any pending signal. It was enqueued above. */
3410 signal = 0;
3411 }
3412 else if (fast_tp_collecting == 2)
3413 {
3414 if (debug_threads)
3415 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3416 " single-stepping\n",
3417 lwpid_of (thread));
3418
3419 if (can_hardware_single_step ())
3420 step = 1;
3421 else
3422 {
3423 internal_error (__FILE__, __LINE__,
3424 "moving out of jump pad single-stepping"
3425 " not implemented on this target");
3426 }
3427
3428 /* Postpone any pending signal. It was enqueued above. */
3429 signal = 0;
3430 }
3431
3432 /* If we have while-stepping actions in this thread set it stepping.
3433 If we have a signal to deliver, it may or may not be set to
3434 SIG_IGN, we don't know. Assume so, and allow collecting
3435 while-stepping into a signal handler. A possible smart thing to
3436 do would be to set an internal breakpoint at the signal return
3437 address, continue, and carry on catching this while-stepping
3438 action only when that breakpoint is hit. A future
3439 enhancement. */
3440 if (thread->while_stepping != NULL
3441 && can_hardware_single_step ())
3442 {
3443 if (debug_threads)
3444 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3445 lwpid_of (thread));
3446 step = 1;
3447 }
3448
3449 if (the_low_target.get_pc != NULL)
3450 {
3451 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3452
3453 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3454
3455 if (debug_threads)
3456 {
3457 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3458 (long) lwp->stop_pc);
3459 }
3460 }
3461
3462 /* If we have pending signals, consume one unless we are trying to
3463 reinsert a breakpoint or we're trying to finish a fast tracepoint
3464 collect. */
3465 if (lwp->pending_signals != NULL
3466 && lwp->bp_reinsert == 0
3467 && fast_tp_collecting == 0)
3468 {
3469 struct pending_signals **p_sig;
3470
3471 p_sig = &lwp->pending_signals;
3472 while ((*p_sig)->prev != NULL)
3473 p_sig = &(*p_sig)->prev;
3474
3475 signal = (*p_sig)->signal;
3476 if ((*p_sig)->info.si_signo != 0)
3477 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3478 &(*p_sig)->info);
3479
3480 free (*p_sig);
3481 *p_sig = NULL;
3482 }
3483
3484 if (the_low_target.prepare_to_resume != NULL)
3485 the_low_target.prepare_to_resume (lwp);
3486
3487 regcache_invalidate_thread (thread);
3488 errno = 0;
3489 lwp->stopped = 0;
3490 lwp->stop_reason = LWP_STOPPED_BY_NO_REASON;
3491 lwp->stepping = step;
3492 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3493 (PTRACE_TYPE_ARG3) 0,
3494 /* Coerce to a uintptr_t first to avoid potential gcc warning
3495 of coercing an 8 byte integer to a 4 byte pointer. */
3496 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3497
3498 current_thread = saved_thread;
3499 if (errno)
3500 {
3501 /* ESRCH from ptrace either means that the thread was already
3502 running (an error) or that it is gone (a race condition). If
3503 it's gone, we will get a notification the next time we wait,
3504 so we can ignore the error. We could differentiate these
3505 two, but it's tricky without waiting; the thread still exists
3506 as a zombie, so sending it signal 0 would succeed. So just
3507 ignore ESRCH. */
3508 if (errno == ESRCH)
3509 return;
3510
3511 perror_with_name ("ptrace");
3512 }
3513 }
3514
3515 struct thread_resume_array
3516 {
3517 struct thread_resume *resume;
3518 size_t n;
3519 };
3520
3521 /* This function is called once per thread via find_inferior.
3522 ARG is a pointer to a thread_resume_array struct.
3523 We look up the thread specified by ENTRY in ARG, and mark the thread
3524 with a pointer to the appropriate resume request.
3525
3526 This algorithm is O(threads * resume elements), but resume elements
3527 is small (and will remain small at least until GDB supports thread
3528 suspension). */
3529
3530 static int
3531 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3532 {
3533 struct thread_info *thread = (struct thread_info *) entry;
3534 struct lwp_info *lwp = get_thread_lwp (thread);
3535 int ndx;
3536 struct thread_resume_array *r;
3537
3538 r = arg;
3539
3540 for (ndx = 0; ndx < r->n; ndx++)
3541 {
3542 ptid_t ptid = r->resume[ndx].thread;
3543 if (ptid_equal (ptid, minus_one_ptid)
3544 || ptid_equal (ptid, entry->id)
3545 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3546 of PID'. */
3547 || (ptid_get_pid (ptid) == pid_of (thread)
3548 && (ptid_is_pid (ptid)
3549 || ptid_get_lwp (ptid) == -1)))
3550 {
3551 if (r->resume[ndx].kind == resume_stop
3552 && thread->last_resume_kind == resume_stop)
3553 {
3554 if (debug_threads)
3555 debug_printf ("already %s LWP %ld at GDB's request\n",
3556 (thread->last_status.kind
3557 == TARGET_WAITKIND_STOPPED)
3558 ? "stopped"
3559 : "stopping",
3560 lwpid_of (thread));
3561
3562 continue;
3563 }
3564
3565 lwp->resume = &r->resume[ndx];
3566 thread->last_resume_kind = lwp->resume->kind;
3567
3568 lwp->step_range_start = lwp->resume->step_range_start;
3569 lwp->step_range_end = lwp->resume->step_range_end;
3570
3571 /* If we had a deferred signal to report, dequeue one now.
3572 This can happen if LWP gets more than one signal while
3573 trying to get out of a jump pad. */
3574 if (lwp->stopped
3575 && !lwp->status_pending_p
3576 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3577 {
3578 lwp->status_pending_p = 1;
3579
3580 if (debug_threads)
3581 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3582 "leaving status pending.\n",
3583 WSTOPSIG (lwp->status_pending),
3584 lwpid_of (thread));
3585 }
3586
3587 return 0;
3588 }
3589 }
3590
3591 /* No resume action for this thread. */
3592 lwp->resume = NULL;
3593
3594 return 0;
3595 }
3596
3597 /* find_inferior callback for linux_resume.
3598 Set *FLAG_P if this lwp has an interesting status pending. */
3599
3600 static int
3601 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3602 {
3603 struct thread_info *thread = (struct thread_info *) entry;
3604 struct lwp_info *lwp = get_thread_lwp (thread);
3605
3606 /* LWPs which will not be resumed are not interesting, because
3607 we might not wait for them next time through linux_wait. */
3608 if (lwp->resume == NULL)
3609 return 0;
3610
3611 if (thread_still_has_status_pending_p (thread))
3612 * (int *) flag_p = 1;
3613
3614 return 0;
3615 }
3616
3617 /* Return 1 if this lwp that GDB wants running is stopped at an
3618 internal breakpoint that we need to step over. It assumes that any
3619 required STOP_PC adjustment has already been propagated to the
3620 inferior's regcache. */
3621
3622 static int
3623 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3624 {
3625 struct thread_info *thread = (struct thread_info *) entry;
3626 struct lwp_info *lwp = get_thread_lwp (thread);
3627 struct thread_info *saved_thread;
3628 CORE_ADDR pc;
3629
3630 /* LWPs which will not be resumed are not interesting, because we
3631 might not wait for them next time through linux_wait. */
3632
3633 if (!lwp->stopped)
3634 {
3635 if (debug_threads)
3636 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3637 lwpid_of (thread));
3638 return 0;
3639 }
3640
3641 if (thread->last_resume_kind == resume_stop)
3642 {
3643 if (debug_threads)
3644 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3645 " stopped\n",
3646 lwpid_of (thread));
3647 return 0;
3648 }
3649
3650 gdb_assert (lwp->suspended >= 0);
3651
3652 if (lwp->suspended)
3653 {
3654 if (debug_threads)
3655 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3656 lwpid_of (thread));
3657 return 0;
3658 }
3659
3660 if (!lwp->need_step_over)
3661 {
3662 if (debug_threads)
3663 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3664 }
3665
3666 if (lwp->status_pending_p)
3667 {
3668 if (debug_threads)
3669 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3670 " status.\n",
3671 lwpid_of (thread));
3672 return 0;
3673 }
3674
3675 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3676 or we have. */
3677 pc = get_pc (lwp);
3678
3679 /* If the PC has changed since we stopped, then don't do anything,
3680 and let the breakpoint/tracepoint be hit. This happens if, for
3681 instance, GDB handled the decr_pc_after_break subtraction itself,
3682 GDB is OOL stepping this thread, or the user has issued a "jump"
3683 command, or poked thread's registers herself. */
3684 if (pc != lwp->stop_pc)
3685 {
3686 if (debug_threads)
3687 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3688 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3689 lwpid_of (thread),
3690 paddress (lwp->stop_pc), paddress (pc));
3691
3692 lwp->need_step_over = 0;
3693 return 0;
3694 }
3695
3696 saved_thread = current_thread;
3697 current_thread = thread;
3698
3699 /* We can only step over breakpoints we know about. */
3700 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3701 {
3702 /* Don't step over a breakpoint that GDB expects to hit
3703 though. If the condition is being evaluated on the target's side
3704 and it evaluate to false, step over this breakpoint as well. */
3705 if (gdb_breakpoint_here (pc)
3706 && gdb_condition_true_at_breakpoint (pc)
3707 && gdb_no_commands_at_breakpoint (pc))
3708 {
3709 if (debug_threads)
3710 debug_printf ("Need step over [LWP %ld]? yes, but found"
3711 " GDB breakpoint at 0x%s; skipping step over\n",
3712 lwpid_of (thread), paddress (pc));
3713
3714 current_thread = saved_thread;
3715 return 0;
3716 }
3717 else
3718 {
3719 if (debug_threads)
3720 debug_printf ("Need step over [LWP %ld]? yes, "
3721 "found breakpoint at 0x%s\n",
3722 lwpid_of (thread), paddress (pc));
3723
3724 /* We've found an lwp that needs stepping over --- return 1 so
3725 that find_inferior stops looking. */
3726 current_thread = saved_thread;
3727
3728 /* If the step over is cancelled, this is set again. */
3729 lwp->need_step_over = 0;
3730 return 1;
3731 }
3732 }
3733
3734 current_thread = saved_thread;
3735
3736 if (debug_threads)
3737 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3738 " at 0x%s\n",
3739 lwpid_of (thread), paddress (pc));
3740
3741 return 0;
3742 }
3743
3744 /* Start a step-over operation on LWP. When LWP stopped at a
3745 breakpoint, to make progress, we need to remove the breakpoint out
3746 of the way. If we let other threads run while we do that, they may
3747 pass by the breakpoint location and miss hitting it. To avoid
3748 that, a step-over momentarily stops all threads while LWP is
3749 single-stepped while the breakpoint is temporarily uninserted from
3750 the inferior. When the single-step finishes, we reinsert the
3751 breakpoint, and let all threads that are supposed to be running,
3752 run again.
3753
3754 On targets that don't support hardware single-step, we don't
3755 currently support full software single-stepping. Instead, we only
3756 support stepping over the thread event breakpoint, by asking the
3757 low target where to place a reinsert breakpoint. Since this
3758 routine assumes the breakpoint being stepped over is a thread event
3759 breakpoint, it usually assumes the return address of the current
3760 function is a good enough place to set the reinsert breakpoint. */
3761
3762 static int
3763 start_step_over (struct lwp_info *lwp)
3764 {
3765 struct thread_info *thread = get_lwp_thread (lwp);
3766 struct thread_info *saved_thread;
3767 CORE_ADDR pc;
3768 int step;
3769
3770 if (debug_threads)
3771 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3772 lwpid_of (thread));
3773
3774 stop_all_lwps (1, lwp);
3775 gdb_assert (lwp->suspended == 0);
3776
3777 if (debug_threads)
3778 debug_printf ("Done stopping all threads for step-over.\n");
3779
3780 /* Note, we should always reach here with an already adjusted PC,
3781 either by GDB (if we're resuming due to GDB's request), or by our
3782 caller, if we just finished handling an internal breakpoint GDB
3783 shouldn't care about. */
3784 pc = get_pc (lwp);
3785
3786 saved_thread = current_thread;
3787 current_thread = thread;
3788
3789 lwp->bp_reinsert = pc;
3790 uninsert_breakpoints_at (pc);
3791 uninsert_fast_tracepoint_jumps_at (pc);
3792
3793 if (can_hardware_single_step ())
3794 {
3795 step = 1;
3796 }
3797 else
3798 {
3799 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3800 set_reinsert_breakpoint (raddr);
3801 step = 0;
3802 }
3803
3804 current_thread = saved_thread;
3805
3806 linux_resume_one_lwp (lwp, step, 0, NULL);
3807
3808 /* Require next event from this LWP. */
3809 step_over_bkpt = thread->entry.id;
3810 return 1;
3811 }
3812
3813 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3814 start_step_over, if still there, and delete any reinsert
3815 breakpoints we've set, on non hardware single-step targets. */
3816
3817 static int
3818 finish_step_over (struct lwp_info *lwp)
3819 {
3820 if (lwp->bp_reinsert != 0)
3821 {
3822 if (debug_threads)
3823 debug_printf ("Finished step over.\n");
3824
3825 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3826 may be no breakpoint to reinsert there by now. */
3827 reinsert_breakpoints_at (lwp->bp_reinsert);
3828 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3829
3830 lwp->bp_reinsert = 0;
3831
3832 /* Delete any software-single-step reinsert breakpoints. No
3833 longer needed. We don't have to worry about other threads
3834 hitting this trap, and later not being able to explain it,
3835 because we were stepping over a breakpoint, and we hold all
3836 threads but LWP stopped while doing that. */
3837 if (!can_hardware_single_step ())
3838 delete_reinsert_breakpoints ();
3839
3840 step_over_bkpt = null_ptid;
3841 return 1;
3842 }
3843 else
3844 return 0;
3845 }
3846
3847 /* This function is called once per thread. We check the thread's resume
3848 request, which will tell us whether to resume, step, or leave the thread
3849 stopped; and what signal, if any, it should be sent.
3850
3851 For threads which we aren't explicitly told otherwise, we preserve
3852 the stepping flag; this is used for stepping over gdbserver-placed
3853 breakpoints.
3854
3855 If pending_flags was set in any thread, we queue any needed
3856 signals, since we won't actually resume. We already have a pending
3857 event to report, so we don't need to preserve any step requests;
3858 they should be re-issued if necessary. */
3859
3860 static int
3861 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3862 {
3863 struct thread_info *thread = (struct thread_info *) entry;
3864 struct lwp_info *lwp = get_thread_lwp (thread);
3865 int step;
3866 int leave_all_stopped = * (int *) arg;
3867 int leave_pending;
3868
3869 if (lwp->resume == NULL)
3870 return 0;
3871
3872 if (lwp->resume->kind == resume_stop)
3873 {
3874 if (debug_threads)
3875 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3876
3877 if (!lwp->stopped)
3878 {
3879 if (debug_threads)
3880 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3881
3882 /* Stop the thread, and wait for the event asynchronously,
3883 through the event loop. */
3884 send_sigstop (lwp);
3885 }
3886 else
3887 {
3888 if (debug_threads)
3889 debug_printf ("already stopped LWP %ld\n",
3890 lwpid_of (thread));
3891
3892 /* The LWP may have been stopped in an internal event that
3893 was not meant to be notified back to GDB (e.g., gdbserver
3894 breakpoint), so we should be reporting a stop event in
3895 this case too. */
3896
3897 /* If the thread already has a pending SIGSTOP, this is a
3898 no-op. Otherwise, something later will presumably resume
3899 the thread and this will cause it to cancel any pending
3900 operation, due to last_resume_kind == resume_stop. If
3901 the thread already has a pending status to report, we
3902 will still report it the next time we wait - see
3903 status_pending_p_callback. */
3904
3905 /* If we already have a pending signal to report, then
3906 there's no need to queue a SIGSTOP, as this means we're
3907 midway through moving the LWP out of the jumppad, and we
3908 will report the pending signal as soon as that is
3909 finished. */
3910 if (lwp->pending_signals_to_report == NULL)
3911 send_sigstop (lwp);
3912 }
3913
3914 /* For stop requests, we're done. */
3915 lwp->resume = NULL;
3916 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3917 return 0;
3918 }
3919
3920 /* If this thread which is about to be resumed has a pending status,
3921 then don't resume any threads - we can just report the pending
3922 status. Make sure to queue any signals that would otherwise be
3923 sent. In all-stop mode, we do this decision based on if *any*
3924 thread has a pending status. If there's a thread that needs the
3925 step-over-breakpoint dance, then don't resume any other thread
3926 but that particular one. */
3927 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3928
3929 if (!leave_pending)
3930 {
3931 if (debug_threads)
3932 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3933
3934 step = (lwp->resume->kind == resume_step);
3935 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3936 }
3937 else
3938 {
3939 if (debug_threads)
3940 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3941
3942 /* If we have a new signal, enqueue the signal. */
3943 if (lwp->resume->sig != 0)
3944 {
3945 struct pending_signals *p_sig;
3946 p_sig = xmalloc (sizeof (*p_sig));
3947 p_sig->prev = lwp->pending_signals;
3948 p_sig->signal = lwp->resume->sig;
3949 memset (&p_sig->info, 0, sizeof (siginfo_t));
3950
3951 /* If this is the same signal we were previously stopped by,
3952 make sure to queue its siginfo. We can ignore the return
3953 value of ptrace; if it fails, we'll skip
3954 PTRACE_SETSIGINFO. */
3955 if (WIFSTOPPED (lwp->last_status)
3956 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3957 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3958 &p_sig->info);
3959
3960 lwp->pending_signals = p_sig;
3961 }
3962 }
3963
3964 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3965 lwp->resume = NULL;
3966 return 0;
3967 }
3968
3969 static void
3970 linux_resume (struct thread_resume *resume_info, size_t n)
3971 {
3972 struct thread_resume_array array = { resume_info, n };
3973 struct thread_info *need_step_over = NULL;
3974 int any_pending;
3975 int leave_all_stopped;
3976
3977 if (debug_threads)
3978 {
3979 debug_enter ();
3980 debug_printf ("linux_resume:\n");
3981 }
3982
3983 find_inferior (&all_threads, linux_set_resume_request, &array);
3984
3985 /* If there is a thread which would otherwise be resumed, which has
3986 a pending status, then don't resume any threads - we can just
3987 report the pending status. Make sure to queue any signals that
3988 would otherwise be sent. In non-stop mode, we'll apply this
3989 logic to each thread individually. We consume all pending events
3990 before considering to start a step-over (in all-stop). */
3991 any_pending = 0;
3992 if (!non_stop)
3993 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
3994
3995 /* If there is a thread which would otherwise be resumed, which is
3996 stopped at a breakpoint that needs stepping over, then don't
3997 resume any threads - have it step over the breakpoint with all
3998 other threads stopped, then resume all threads again. Make sure
3999 to queue any signals that would otherwise be delivered or
4000 queued. */
4001 if (!any_pending && supports_breakpoints ())
4002 need_step_over
4003 = (struct thread_info *) find_inferior (&all_threads,
4004 need_step_over_p, NULL);
4005
4006 leave_all_stopped = (need_step_over != NULL || any_pending);
4007
4008 if (debug_threads)
4009 {
4010 if (need_step_over != NULL)
4011 debug_printf ("Not resuming all, need step over\n");
4012 else if (any_pending)
4013 debug_printf ("Not resuming, all-stop and found "
4014 "an LWP with pending status\n");
4015 else
4016 debug_printf ("Resuming, no pending status or step over needed\n");
4017 }
4018
4019 /* Even if we're leaving threads stopped, queue all signals we'd
4020 otherwise deliver. */
4021 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4022
4023 if (need_step_over)
4024 start_step_over (get_thread_lwp (need_step_over));
4025
4026 if (debug_threads)
4027 {
4028 debug_printf ("linux_resume done\n");
4029 debug_exit ();
4030 }
4031 }
4032
4033 /* This function is called once per thread. We check the thread's
4034 last resume request, which will tell us whether to resume, step, or
4035 leave the thread stopped. Any signal the client requested to be
4036 delivered has already been enqueued at this point.
4037
4038 If any thread that GDB wants running is stopped at an internal
4039 breakpoint that needs stepping over, we start a step-over operation
4040 on that particular thread, and leave all others stopped. */
4041
4042 static int
4043 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4044 {
4045 struct thread_info *thread = (struct thread_info *) entry;
4046 struct lwp_info *lwp = get_thread_lwp (thread);
4047 int step;
4048
4049 if (lwp == except)
4050 return 0;
4051
4052 if (debug_threads)
4053 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4054
4055 if (!lwp->stopped)
4056 {
4057 if (debug_threads)
4058 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4059 return 0;
4060 }
4061
4062 if (thread->last_resume_kind == resume_stop
4063 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4064 {
4065 if (debug_threads)
4066 debug_printf (" client wants LWP to remain %ld stopped\n",
4067 lwpid_of (thread));
4068 return 0;
4069 }
4070
4071 if (lwp->status_pending_p)
4072 {
4073 if (debug_threads)
4074 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4075 lwpid_of (thread));
4076 return 0;
4077 }
4078
4079 gdb_assert (lwp->suspended >= 0);
4080
4081 if (lwp->suspended)
4082 {
4083 if (debug_threads)
4084 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4085 return 0;
4086 }
4087
4088 if (thread->last_resume_kind == resume_stop
4089 && lwp->pending_signals_to_report == NULL
4090 && lwp->collecting_fast_tracepoint == 0)
4091 {
4092 /* We haven't reported this LWP as stopped yet (otherwise, the
4093 last_status.kind check above would catch it, and we wouldn't
4094 reach here. This LWP may have been momentarily paused by a
4095 stop_all_lwps call while handling for example, another LWP's
4096 step-over. In that case, the pending expected SIGSTOP signal
4097 that was queued at vCont;t handling time will have already
4098 been consumed by wait_for_sigstop, and so we need to requeue
4099 another one here. Note that if the LWP already has a SIGSTOP
4100 pending, this is a no-op. */
4101
4102 if (debug_threads)
4103 debug_printf ("Client wants LWP %ld to stop. "
4104 "Making sure it has a SIGSTOP pending\n",
4105 lwpid_of (thread));
4106
4107 send_sigstop (lwp);
4108 }
4109
4110 step = thread->last_resume_kind == resume_step;
4111 linux_resume_one_lwp (lwp, step, 0, NULL);
4112 return 0;
4113 }
4114
4115 static int
4116 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4117 {
4118 struct thread_info *thread = (struct thread_info *) entry;
4119 struct lwp_info *lwp = get_thread_lwp (thread);
4120
4121 if (lwp == except)
4122 return 0;
4123
4124 lwp->suspended--;
4125 gdb_assert (lwp->suspended >= 0);
4126
4127 return proceed_one_lwp (entry, except);
4128 }
4129
4130 /* When we finish a step-over, set threads running again. If there's
4131 another thread that may need a step-over, now's the time to start
4132 it. Eventually, we'll move all threads past their breakpoints. */
4133
4134 static void
4135 proceed_all_lwps (void)
4136 {
4137 struct thread_info *need_step_over;
4138
4139 /* If there is a thread which would otherwise be resumed, which is
4140 stopped at a breakpoint that needs stepping over, then don't
4141 resume any threads - have it step over the breakpoint with all
4142 other threads stopped, then resume all threads again. */
4143
4144 if (supports_breakpoints ())
4145 {
4146 need_step_over
4147 = (struct thread_info *) find_inferior (&all_threads,
4148 need_step_over_p, NULL);
4149
4150 if (need_step_over != NULL)
4151 {
4152 if (debug_threads)
4153 debug_printf ("proceed_all_lwps: found "
4154 "thread %ld needing a step-over\n",
4155 lwpid_of (need_step_over));
4156
4157 start_step_over (get_thread_lwp (need_step_over));
4158 return;
4159 }
4160 }
4161
4162 if (debug_threads)
4163 debug_printf ("Proceeding, no step-over needed\n");
4164
4165 find_inferior (&all_threads, proceed_one_lwp, NULL);
4166 }
4167
4168 /* Stopped LWPs that the client wanted to be running, that don't have
4169 pending statuses, are set to run again, except for EXCEPT, if not
4170 NULL. This undoes a stop_all_lwps call. */
4171
4172 static void
4173 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4174 {
4175 if (debug_threads)
4176 {
4177 debug_enter ();
4178 if (except)
4179 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4180 lwpid_of (get_lwp_thread (except)));
4181 else
4182 debug_printf ("unstopping all lwps\n");
4183 }
4184
4185 if (unsuspend)
4186 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4187 else
4188 find_inferior (&all_threads, proceed_one_lwp, except);
4189
4190 if (debug_threads)
4191 {
4192 debug_printf ("unstop_all_lwps done\n");
4193 debug_exit ();
4194 }
4195 }
4196
4197
4198 #ifdef HAVE_LINUX_REGSETS
4199
4200 #define use_linux_regsets 1
4201
4202 /* Returns true if REGSET has been disabled. */
4203
4204 static int
4205 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4206 {
4207 return (info->disabled_regsets != NULL
4208 && info->disabled_regsets[regset - info->regsets]);
4209 }
4210
4211 /* Disable REGSET. */
4212
4213 static void
4214 disable_regset (struct regsets_info *info, struct regset_info *regset)
4215 {
4216 int dr_offset;
4217
4218 dr_offset = regset - info->regsets;
4219 if (info->disabled_regsets == NULL)
4220 info->disabled_regsets = xcalloc (1, info->num_regsets);
4221 info->disabled_regsets[dr_offset] = 1;
4222 }
4223
4224 static int
4225 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4226 struct regcache *regcache)
4227 {
4228 struct regset_info *regset;
4229 int saw_general_regs = 0;
4230 int pid;
4231 struct iovec iov;
4232
4233 pid = lwpid_of (current_thread);
4234 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4235 {
4236 void *buf, *data;
4237 int nt_type, res;
4238
4239 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4240 continue;
4241
4242 buf = xmalloc (regset->size);
4243
4244 nt_type = regset->nt_type;
4245 if (nt_type)
4246 {
4247 iov.iov_base = buf;
4248 iov.iov_len = regset->size;
4249 data = (void *) &iov;
4250 }
4251 else
4252 data = buf;
4253
4254 #ifndef __sparc__
4255 res = ptrace (regset->get_request, pid,
4256 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4257 #else
4258 res = ptrace (regset->get_request, pid, data, nt_type);
4259 #endif
4260 if (res < 0)
4261 {
4262 if (errno == EIO)
4263 {
4264 /* If we get EIO on a regset, do not try it again for
4265 this process mode. */
4266 disable_regset (regsets_info, regset);
4267 }
4268 else if (errno == ENODATA)
4269 {
4270 /* ENODATA may be returned if the regset is currently
4271 not "active". This can happen in normal operation,
4272 so suppress the warning in this case. */
4273 }
4274 else
4275 {
4276 char s[256];
4277 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4278 pid);
4279 perror (s);
4280 }
4281 }
4282 else
4283 {
4284 if (regset->type == GENERAL_REGS)
4285 saw_general_regs = 1;
4286 regset->store_function (regcache, buf);
4287 }
4288 free (buf);
4289 }
4290 if (saw_general_regs)
4291 return 0;
4292 else
4293 return 1;
4294 }
4295
4296 static int
4297 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4298 struct regcache *regcache)
4299 {
4300 struct regset_info *regset;
4301 int saw_general_regs = 0;
4302 int pid;
4303 struct iovec iov;
4304
4305 pid = lwpid_of (current_thread);
4306 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4307 {
4308 void *buf, *data;
4309 int nt_type, res;
4310
4311 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4312 || regset->fill_function == NULL)
4313 continue;
4314
4315 buf = xmalloc (regset->size);
4316
4317 /* First fill the buffer with the current register set contents,
4318 in case there are any items in the kernel's regset that are
4319 not in gdbserver's regcache. */
4320
4321 nt_type = regset->nt_type;
4322 if (nt_type)
4323 {
4324 iov.iov_base = buf;
4325 iov.iov_len = regset->size;
4326 data = (void *) &iov;
4327 }
4328 else
4329 data = buf;
4330
4331 #ifndef __sparc__
4332 res = ptrace (regset->get_request, pid,
4333 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4334 #else
4335 res = ptrace (regset->get_request, pid, data, nt_type);
4336 #endif
4337
4338 if (res == 0)
4339 {
4340 /* Then overlay our cached registers on that. */
4341 regset->fill_function (regcache, buf);
4342
4343 /* Only now do we write the register set. */
4344 #ifndef __sparc__
4345 res = ptrace (regset->set_request, pid,
4346 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4347 #else
4348 res = ptrace (regset->set_request, pid, data, nt_type);
4349 #endif
4350 }
4351
4352 if (res < 0)
4353 {
4354 if (errno == EIO)
4355 {
4356 /* If we get EIO on a regset, do not try it again for
4357 this process mode. */
4358 disable_regset (regsets_info, regset);
4359 }
4360 else if (errno == ESRCH)
4361 {
4362 /* At this point, ESRCH should mean the process is
4363 already gone, in which case we simply ignore attempts
4364 to change its registers. See also the related
4365 comment in linux_resume_one_lwp. */
4366 free (buf);
4367 return 0;
4368 }
4369 else
4370 {
4371 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4372 }
4373 }
4374 else if (regset->type == GENERAL_REGS)
4375 saw_general_regs = 1;
4376 free (buf);
4377 }
4378 if (saw_general_regs)
4379 return 0;
4380 else
4381 return 1;
4382 }
4383
4384 #else /* !HAVE_LINUX_REGSETS */
4385
4386 #define use_linux_regsets 0
4387 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4388 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4389
4390 #endif
4391
4392 /* Return 1 if register REGNO is supported by one of the regset ptrace
4393 calls or 0 if it has to be transferred individually. */
4394
4395 static int
4396 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4397 {
4398 unsigned char mask = 1 << (regno % 8);
4399 size_t index = regno / 8;
4400
4401 return (use_linux_regsets
4402 && (regs_info->regset_bitmap == NULL
4403 || (regs_info->regset_bitmap[index] & mask) != 0));
4404 }
4405
4406 #ifdef HAVE_LINUX_USRREGS
4407
4408 int
4409 register_addr (const struct usrregs_info *usrregs, int regnum)
4410 {
4411 int addr;
4412
4413 if (regnum < 0 || regnum >= usrregs->num_regs)
4414 error ("Invalid register number %d.", regnum);
4415
4416 addr = usrregs->regmap[regnum];
4417
4418 return addr;
4419 }
4420
4421 /* Fetch one register. */
4422 static void
4423 fetch_register (const struct usrregs_info *usrregs,
4424 struct regcache *regcache, int regno)
4425 {
4426 CORE_ADDR regaddr;
4427 int i, size;
4428 char *buf;
4429 int pid;
4430
4431 if (regno >= usrregs->num_regs)
4432 return;
4433 if ((*the_low_target.cannot_fetch_register) (regno))
4434 return;
4435
4436 regaddr = register_addr (usrregs, regno);
4437 if (regaddr == -1)
4438 return;
4439
4440 size = ((register_size (regcache->tdesc, regno)
4441 + sizeof (PTRACE_XFER_TYPE) - 1)
4442 & -sizeof (PTRACE_XFER_TYPE));
4443 buf = alloca (size);
4444
4445 pid = lwpid_of (current_thread);
4446 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4447 {
4448 errno = 0;
4449 *(PTRACE_XFER_TYPE *) (buf + i) =
4450 ptrace (PTRACE_PEEKUSER, pid,
4451 /* Coerce to a uintptr_t first to avoid potential gcc warning
4452 of coercing an 8 byte integer to a 4 byte pointer. */
4453 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4454 regaddr += sizeof (PTRACE_XFER_TYPE);
4455 if (errno != 0)
4456 error ("reading register %d: %s", regno, strerror (errno));
4457 }
4458
4459 if (the_low_target.supply_ptrace_register)
4460 the_low_target.supply_ptrace_register (regcache, regno, buf);
4461 else
4462 supply_register (regcache, regno, buf);
4463 }
4464
4465 /* Store one register. */
4466 static void
4467 store_register (const struct usrregs_info *usrregs,
4468 struct regcache *regcache, int regno)
4469 {
4470 CORE_ADDR regaddr;
4471 int i, size;
4472 char *buf;
4473 int pid;
4474
4475 if (regno >= usrregs->num_regs)
4476 return;
4477 if ((*the_low_target.cannot_store_register) (regno))
4478 return;
4479
4480 regaddr = register_addr (usrregs, regno);
4481 if (regaddr == -1)
4482 return;
4483
4484 size = ((register_size (regcache->tdesc, regno)
4485 + sizeof (PTRACE_XFER_TYPE) - 1)
4486 & -sizeof (PTRACE_XFER_TYPE));
4487 buf = alloca (size);
4488 memset (buf, 0, size);
4489
4490 if (the_low_target.collect_ptrace_register)
4491 the_low_target.collect_ptrace_register (regcache, regno, buf);
4492 else
4493 collect_register (regcache, regno, buf);
4494
4495 pid = lwpid_of (current_thread);
4496 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4497 {
4498 errno = 0;
4499 ptrace (PTRACE_POKEUSER, pid,
4500 /* Coerce to a uintptr_t first to avoid potential gcc warning
4501 about coercing an 8 byte integer to a 4 byte pointer. */
4502 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4503 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4504 if (errno != 0)
4505 {
4506 /* At this point, ESRCH should mean the process is
4507 already gone, in which case we simply ignore attempts
4508 to change its registers. See also the related
4509 comment in linux_resume_one_lwp. */
4510 if (errno == ESRCH)
4511 return;
4512
4513 if ((*the_low_target.cannot_store_register) (regno) == 0)
4514 error ("writing register %d: %s", regno, strerror (errno));
4515 }
4516 regaddr += sizeof (PTRACE_XFER_TYPE);
4517 }
4518 }
4519
4520 /* Fetch all registers, or just one, from the child process.
4521 If REGNO is -1, do this for all registers, skipping any that are
4522 assumed to have been retrieved by regsets_fetch_inferior_registers,
4523 unless ALL is non-zero.
4524 Otherwise, REGNO specifies which register (so we can save time). */
4525 static void
4526 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4527 struct regcache *regcache, int regno, int all)
4528 {
4529 struct usrregs_info *usr = regs_info->usrregs;
4530
4531 if (regno == -1)
4532 {
4533 for (regno = 0; regno < usr->num_regs; regno++)
4534 if (all || !linux_register_in_regsets (regs_info, regno))
4535 fetch_register (usr, regcache, regno);
4536 }
4537 else
4538 fetch_register (usr, regcache, regno);
4539 }
4540
4541 /* Store our register values back into the inferior.
4542 If REGNO is -1, do this for all registers, skipping any that are
4543 assumed to have been saved by regsets_store_inferior_registers,
4544 unless ALL is non-zero.
4545 Otherwise, REGNO specifies which register (so we can save time). */
4546 static void
4547 usr_store_inferior_registers (const struct regs_info *regs_info,
4548 struct regcache *regcache, int regno, int all)
4549 {
4550 struct usrregs_info *usr = regs_info->usrregs;
4551
4552 if (regno == -1)
4553 {
4554 for (regno = 0; regno < usr->num_regs; regno++)
4555 if (all || !linux_register_in_regsets (regs_info, regno))
4556 store_register (usr, regcache, regno);
4557 }
4558 else
4559 store_register (usr, regcache, regno);
4560 }
4561
4562 #else /* !HAVE_LINUX_USRREGS */
4563
4564 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4565 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4566
4567 #endif
4568
4569
4570 void
4571 linux_fetch_registers (struct regcache *regcache, int regno)
4572 {
4573 int use_regsets;
4574 int all = 0;
4575 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4576
4577 if (regno == -1)
4578 {
4579 if (the_low_target.fetch_register != NULL
4580 && regs_info->usrregs != NULL)
4581 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4582 (*the_low_target.fetch_register) (regcache, regno);
4583
4584 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4585 if (regs_info->usrregs != NULL)
4586 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4587 }
4588 else
4589 {
4590 if (the_low_target.fetch_register != NULL
4591 && (*the_low_target.fetch_register) (regcache, regno))
4592 return;
4593
4594 use_regsets = linux_register_in_regsets (regs_info, regno);
4595 if (use_regsets)
4596 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4597 regcache);
4598 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4599 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4600 }
4601 }
4602
4603 void
4604 linux_store_registers (struct regcache *regcache, int regno)
4605 {
4606 int use_regsets;
4607 int all = 0;
4608 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4609
4610 if (regno == -1)
4611 {
4612 all = regsets_store_inferior_registers (regs_info->regsets_info,
4613 regcache);
4614 if (regs_info->usrregs != NULL)
4615 usr_store_inferior_registers (regs_info, regcache, regno, all);
4616 }
4617 else
4618 {
4619 use_regsets = linux_register_in_regsets (regs_info, regno);
4620 if (use_regsets)
4621 all = regsets_store_inferior_registers (regs_info->regsets_info,
4622 regcache);
4623 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4624 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4625 }
4626 }
4627
4628
4629 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4630 to debugger memory starting at MYADDR. */
4631
4632 static int
4633 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4634 {
4635 int pid = lwpid_of (current_thread);
4636 register PTRACE_XFER_TYPE *buffer;
4637 register CORE_ADDR addr;
4638 register int count;
4639 char filename[64];
4640 register int i;
4641 int ret;
4642 int fd;
4643
4644 /* Try using /proc. Don't bother for one word. */
4645 if (len >= 3 * sizeof (long))
4646 {
4647 int bytes;
4648
4649 /* We could keep this file open and cache it - possibly one per
4650 thread. That requires some juggling, but is even faster. */
4651 sprintf (filename, "/proc/%d/mem", pid);
4652 fd = open (filename, O_RDONLY | O_LARGEFILE);
4653 if (fd == -1)
4654 goto no_proc;
4655
4656 /* If pread64 is available, use it. It's faster if the kernel
4657 supports it (only one syscall), and it's 64-bit safe even on
4658 32-bit platforms (for instance, SPARC debugging a SPARC64
4659 application). */
4660 #ifdef HAVE_PREAD64
4661 bytes = pread64 (fd, myaddr, len, memaddr);
4662 #else
4663 bytes = -1;
4664 if (lseek (fd, memaddr, SEEK_SET) != -1)
4665 bytes = read (fd, myaddr, len);
4666 #endif
4667
4668 close (fd);
4669 if (bytes == len)
4670 return 0;
4671
4672 /* Some data was read, we'll try to get the rest with ptrace. */
4673 if (bytes > 0)
4674 {
4675 memaddr += bytes;
4676 myaddr += bytes;
4677 len -= bytes;
4678 }
4679 }
4680
4681 no_proc:
4682 /* Round starting address down to longword boundary. */
4683 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4684 /* Round ending address up; get number of longwords that makes. */
4685 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4686 / sizeof (PTRACE_XFER_TYPE));
4687 /* Allocate buffer of that many longwords. */
4688 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4689
4690 /* Read all the longwords */
4691 errno = 0;
4692 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4693 {
4694 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4695 about coercing an 8 byte integer to a 4 byte pointer. */
4696 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4697 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4698 (PTRACE_TYPE_ARG4) 0);
4699 if (errno)
4700 break;
4701 }
4702 ret = errno;
4703
4704 /* Copy appropriate bytes out of the buffer. */
4705 if (i > 0)
4706 {
4707 i *= sizeof (PTRACE_XFER_TYPE);
4708 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4709 memcpy (myaddr,
4710 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4711 i < len ? i : len);
4712 }
4713
4714 return ret;
4715 }
4716
4717 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4718 memory at MEMADDR. On failure (cannot write to the inferior)
4719 returns the value of errno. Always succeeds if LEN is zero. */
4720
4721 static int
4722 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4723 {
4724 register int i;
4725 /* Round starting address down to longword boundary. */
4726 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4727 /* Round ending address up; get number of longwords that makes. */
4728 register int count
4729 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4730 / sizeof (PTRACE_XFER_TYPE);
4731
4732 /* Allocate buffer of that many longwords. */
4733 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4734 alloca (count * sizeof (PTRACE_XFER_TYPE));
4735
4736 int pid = lwpid_of (current_thread);
4737
4738 if (len == 0)
4739 {
4740 /* Zero length write always succeeds. */
4741 return 0;
4742 }
4743
4744 if (debug_threads)
4745 {
4746 /* Dump up to four bytes. */
4747 unsigned int val = * (unsigned int *) myaddr;
4748 if (len == 1)
4749 val = val & 0xff;
4750 else if (len == 2)
4751 val = val & 0xffff;
4752 else if (len == 3)
4753 val = val & 0xffffff;
4754 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4755 val, (long)memaddr);
4756 }
4757
4758 /* Fill start and end extra bytes of buffer with existing memory data. */
4759
4760 errno = 0;
4761 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4762 about coercing an 8 byte integer to a 4 byte pointer. */
4763 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4764 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4765 (PTRACE_TYPE_ARG4) 0);
4766 if (errno)
4767 return errno;
4768
4769 if (count > 1)
4770 {
4771 errno = 0;
4772 buffer[count - 1]
4773 = ptrace (PTRACE_PEEKTEXT, pid,
4774 /* Coerce to a uintptr_t first to avoid potential gcc warning
4775 about coercing an 8 byte integer to a 4 byte pointer. */
4776 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4777 * sizeof (PTRACE_XFER_TYPE)),
4778 (PTRACE_TYPE_ARG4) 0);
4779 if (errno)
4780 return errno;
4781 }
4782
4783 /* Copy data to be written over corresponding part of buffer. */
4784
4785 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4786 myaddr, len);
4787
4788 /* Write the entire buffer. */
4789
4790 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4791 {
4792 errno = 0;
4793 ptrace (PTRACE_POKETEXT, pid,
4794 /* Coerce to a uintptr_t first to avoid potential gcc warning
4795 about coercing an 8 byte integer to a 4 byte pointer. */
4796 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4797 (PTRACE_TYPE_ARG4) buffer[i]);
4798 if (errno)
4799 return errno;
4800 }
4801
4802 return 0;
4803 }
4804
4805 static void
4806 linux_look_up_symbols (void)
4807 {
4808 #ifdef USE_THREAD_DB
4809 struct process_info *proc = current_process ();
4810
4811 if (proc->private->thread_db != NULL)
4812 return;
4813
4814 /* If the kernel supports tracing clones, then we don't need to
4815 use the magic thread event breakpoint to learn about
4816 threads. */
4817 thread_db_init (!linux_supports_traceclone ());
4818 #endif
4819 }
4820
4821 static void
4822 linux_request_interrupt (void)
4823 {
4824 extern unsigned long signal_pid;
4825
4826 /* Send a SIGINT to the process group. This acts just like the user
4827 typed a ^C on the controlling terminal. */
4828 kill (-signal_pid, SIGINT);
4829 }
4830
4831 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4832 to debugger memory starting at MYADDR. */
4833
4834 static int
4835 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4836 {
4837 char filename[PATH_MAX];
4838 int fd, n;
4839 int pid = lwpid_of (current_thread);
4840
4841 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4842
4843 fd = open (filename, O_RDONLY);
4844 if (fd < 0)
4845 return -1;
4846
4847 if (offset != (CORE_ADDR) 0
4848 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4849 n = -1;
4850 else
4851 n = read (fd, myaddr, len);
4852
4853 close (fd);
4854
4855 return n;
4856 }
4857
4858 /* These breakpoint and watchpoint related wrapper functions simply
4859 pass on the function call if the target has registered a
4860 corresponding function. */
4861
4862 static int
4863 linux_supports_z_point_type (char z_type)
4864 {
4865 return (the_low_target.supports_z_point_type != NULL
4866 && the_low_target.supports_z_point_type (z_type));
4867 }
4868
4869 static int
4870 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4871 int size, struct raw_breakpoint *bp)
4872 {
4873 if (the_low_target.insert_point != NULL)
4874 return the_low_target.insert_point (type, addr, size, bp);
4875 else
4876 /* Unsupported (see target.h). */
4877 return 1;
4878 }
4879
4880 static int
4881 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4882 int size, struct raw_breakpoint *bp)
4883 {
4884 if (the_low_target.remove_point != NULL)
4885 return the_low_target.remove_point (type, addr, size, bp);
4886 else
4887 /* Unsupported (see target.h). */
4888 return 1;
4889 }
4890
4891 static int
4892 linux_stopped_by_watchpoint (void)
4893 {
4894 struct lwp_info *lwp = get_thread_lwp (current_thread);
4895
4896 return lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
4897 }
4898
4899 static CORE_ADDR
4900 linux_stopped_data_address (void)
4901 {
4902 struct lwp_info *lwp = get_thread_lwp (current_thread);
4903
4904 return lwp->stopped_data_address;
4905 }
4906
4907 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4908 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4909 && defined(PT_TEXT_END_ADDR)
4910
4911 /* This is only used for targets that define PT_TEXT_ADDR,
4912 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4913 the target has different ways of acquiring this information, like
4914 loadmaps. */
4915
4916 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4917 to tell gdb about. */
4918
4919 static int
4920 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4921 {
4922 unsigned long text, text_end, data;
4923 int pid = lwpid_of (get_thread_lwp (current_thread));
4924
4925 errno = 0;
4926
4927 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4928 (PTRACE_TYPE_ARG4) 0);
4929 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4930 (PTRACE_TYPE_ARG4) 0);
4931 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4932 (PTRACE_TYPE_ARG4) 0);
4933
4934 if (errno == 0)
4935 {
4936 /* Both text and data offsets produced at compile-time (and so
4937 used by gdb) are relative to the beginning of the program,
4938 with the data segment immediately following the text segment.
4939 However, the actual runtime layout in memory may put the data
4940 somewhere else, so when we send gdb a data base-address, we
4941 use the real data base address and subtract the compile-time
4942 data base-address from it (which is just the length of the
4943 text segment). BSS immediately follows data in both
4944 cases. */
4945 *text_p = text;
4946 *data_p = data - (text_end - text);
4947
4948 return 1;
4949 }
4950 return 0;
4951 }
4952 #endif
4953
4954 static int
4955 linux_qxfer_osdata (const char *annex,
4956 unsigned char *readbuf, unsigned const char *writebuf,
4957 CORE_ADDR offset, int len)
4958 {
4959 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4960 }
4961
4962 /* Convert a native/host siginfo object, into/from the siginfo in the
4963 layout of the inferiors' architecture. */
4964
4965 static void
4966 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4967 {
4968 int done = 0;
4969
4970 if (the_low_target.siginfo_fixup != NULL)
4971 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4972
4973 /* If there was no callback, or the callback didn't do anything,
4974 then just do a straight memcpy. */
4975 if (!done)
4976 {
4977 if (direction == 1)
4978 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4979 else
4980 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4981 }
4982 }
4983
4984 static int
4985 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4986 unsigned const char *writebuf, CORE_ADDR offset, int len)
4987 {
4988 int pid;
4989 siginfo_t siginfo;
4990 char inf_siginfo[sizeof (siginfo_t)];
4991
4992 if (current_thread == NULL)
4993 return -1;
4994
4995 pid = lwpid_of (current_thread);
4996
4997 if (debug_threads)
4998 debug_printf ("%s siginfo for lwp %d.\n",
4999 readbuf != NULL ? "Reading" : "Writing",
5000 pid);
5001
5002 if (offset >= sizeof (siginfo))
5003 return -1;
5004
5005 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5006 return -1;
5007
5008 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5009 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5010 inferior with a 64-bit GDBSERVER should look the same as debugging it
5011 with a 32-bit GDBSERVER, we need to convert it. */
5012 siginfo_fixup (&siginfo, inf_siginfo, 0);
5013
5014 if (offset + len > sizeof (siginfo))
5015 len = sizeof (siginfo) - offset;
5016
5017 if (readbuf != NULL)
5018 memcpy (readbuf, inf_siginfo + offset, len);
5019 else
5020 {
5021 memcpy (inf_siginfo + offset, writebuf, len);
5022
5023 /* Convert back to ptrace layout before flushing it out. */
5024 siginfo_fixup (&siginfo, inf_siginfo, 1);
5025
5026 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5027 return -1;
5028 }
5029
5030 return len;
5031 }
5032
5033 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5034 so we notice when children change state; as the handler for the
5035 sigsuspend in my_waitpid. */
5036
5037 static void
5038 sigchld_handler (int signo)
5039 {
5040 int old_errno = errno;
5041
5042 if (debug_threads)
5043 {
5044 do
5045 {
5046 /* fprintf is not async-signal-safe, so call write
5047 directly. */
5048 if (write (2, "sigchld_handler\n",
5049 sizeof ("sigchld_handler\n") - 1) < 0)
5050 break; /* just ignore */
5051 } while (0);
5052 }
5053
5054 if (target_is_async_p ())
5055 async_file_mark (); /* trigger a linux_wait */
5056
5057 errno = old_errno;
5058 }
5059
5060 static int
5061 linux_supports_non_stop (void)
5062 {
5063 return 1;
5064 }
5065
5066 static int
5067 linux_async (int enable)
5068 {
5069 int previous = target_is_async_p ();
5070
5071 if (debug_threads)
5072 debug_printf ("linux_async (%d), previous=%d\n",
5073 enable, previous);
5074
5075 if (previous != enable)
5076 {
5077 sigset_t mask;
5078 sigemptyset (&mask);
5079 sigaddset (&mask, SIGCHLD);
5080
5081 sigprocmask (SIG_BLOCK, &mask, NULL);
5082
5083 if (enable)
5084 {
5085 if (pipe (linux_event_pipe) == -1)
5086 {
5087 linux_event_pipe[0] = -1;
5088 linux_event_pipe[1] = -1;
5089 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5090
5091 warning ("creating event pipe failed.");
5092 return previous;
5093 }
5094
5095 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5096 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5097
5098 /* Register the event loop handler. */
5099 add_file_handler (linux_event_pipe[0],
5100 handle_target_event, NULL);
5101
5102 /* Always trigger a linux_wait. */
5103 async_file_mark ();
5104 }
5105 else
5106 {
5107 delete_file_handler (linux_event_pipe[0]);
5108
5109 close (linux_event_pipe[0]);
5110 close (linux_event_pipe[1]);
5111 linux_event_pipe[0] = -1;
5112 linux_event_pipe[1] = -1;
5113 }
5114
5115 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5116 }
5117
5118 return previous;
5119 }
5120
5121 static int
5122 linux_start_non_stop (int nonstop)
5123 {
5124 /* Register or unregister from event-loop accordingly. */
5125 linux_async (nonstop);
5126
5127 if (target_is_async_p () != (nonstop != 0))
5128 return -1;
5129
5130 return 0;
5131 }
5132
5133 static int
5134 linux_supports_multi_process (void)
5135 {
5136 return 1;
5137 }
5138
5139 static int
5140 linux_supports_disable_randomization (void)
5141 {
5142 #ifdef HAVE_PERSONALITY
5143 return 1;
5144 #else
5145 return 0;
5146 #endif
5147 }
5148
5149 static int
5150 linux_supports_agent (void)
5151 {
5152 return 1;
5153 }
5154
5155 static int
5156 linux_supports_range_stepping (void)
5157 {
5158 if (*the_low_target.supports_range_stepping == NULL)
5159 return 0;
5160
5161 return (*the_low_target.supports_range_stepping) ();
5162 }
5163
5164 /* Enumerate spufs IDs for process PID. */
5165 static int
5166 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5167 {
5168 int pos = 0;
5169 int written = 0;
5170 char path[128];
5171 DIR *dir;
5172 struct dirent *entry;
5173
5174 sprintf (path, "/proc/%ld/fd", pid);
5175 dir = opendir (path);
5176 if (!dir)
5177 return -1;
5178
5179 rewinddir (dir);
5180 while ((entry = readdir (dir)) != NULL)
5181 {
5182 struct stat st;
5183 struct statfs stfs;
5184 int fd;
5185
5186 fd = atoi (entry->d_name);
5187 if (!fd)
5188 continue;
5189
5190 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5191 if (stat (path, &st) != 0)
5192 continue;
5193 if (!S_ISDIR (st.st_mode))
5194 continue;
5195
5196 if (statfs (path, &stfs) != 0)
5197 continue;
5198 if (stfs.f_type != SPUFS_MAGIC)
5199 continue;
5200
5201 if (pos >= offset && pos + 4 <= offset + len)
5202 {
5203 *(unsigned int *)(buf + pos - offset) = fd;
5204 written += 4;
5205 }
5206 pos += 4;
5207 }
5208
5209 closedir (dir);
5210 return written;
5211 }
5212
5213 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5214 object type, using the /proc file system. */
5215 static int
5216 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5217 unsigned const char *writebuf,
5218 CORE_ADDR offset, int len)
5219 {
5220 long pid = lwpid_of (current_thread);
5221 char buf[128];
5222 int fd = 0;
5223 int ret = 0;
5224
5225 if (!writebuf && !readbuf)
5226 return -1;
5227
5228 if (!*annex)
5229 {
5230 if (!readbuf)
5231 return -1;
5232 else
5233 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5234 }
5235
5236 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5237 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5238 if (fd <= 0)
5239 return -1;
5240
5241 if (offset != 0
5242 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5243 {
5244 close (fd);
5245 return 0;
5246 }
5247
5248 if (writebuf)
5249 ret = write (fd, writebuf, (size_t) len);
5250 else
5251 ret = read (fd, readbuf, (size_t) len);
5252
5253 close (fd);
5254 return ret;
5255 }
5256
5257 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5258 struct target_loadseg
5259 {
5260 /* Core address to which the segment is mapped. */
5261 Elf32_Addr addr;
5262 /* VMA recorded in the program header. */
5263 Elf32_Addr p_vaddr;
5264 /* Size of this segment in memory. */
5265 Elf32_Word p_memsz;
5266 };
5267
5268 # if defined PT_GETDSBT
5269 struct target_loadmap
5270 {
5271 /* Protocol version number, must be zero. */
5272 Elf32_Word version;
5273 /* Pointer to the DSBT table, its size, and the DSBT index. */
5274 unsigned *dsbt_table;
5275 unsigned dsbt_size, dsbt_index;
5276 /* Number of segments in this map. */
5277 Elf32_Word nsegs;
5278 /* The actual memory map. */
5279 struct target_loadseg segs[/*nsegs*/];
5280 };
5281 # define LINUX_LOADMAP PT_GETDSBT
5282 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5283 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5284 # else
5285 struct target_loadmap
5286 {
5287 /* Protocol version number, must be zero. */
5288 Elf32_Half version;
5289 /* Number of segments in this map. */
5290 Elf32_Half nsegs;
5291 /* The actual memory map. */
5292 struct target_loadseg segs[/*nsegs*/];
5293 };
5294 # define LINUX_LOADMAP PTRACE_GETFDPIC
5295 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5296 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5297 # endif
5298
5299 static int
5300 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5301 unsigned char *myaddr, unsigned int len)
5302 {
5303 int pid = lwpid_of (current_thread);
5304 int addr = -1;
5305 struct target_loadmap *data = NULL;
5306 unsigned int actual_length, copy_length;
5307
5308 if (strcmp (annex, "exec") == 0)
5309 addr = (int) LINUX_LOADMAP_EXEC;
5310 else if (strcmp (annex, "interp") == 0)
5311 addr = (int) LINUX_LOADMAP_INTERP;
5312 else
5313 return -1;
5314
5315 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5316 return -1;
5317
5318 if (data == NULL)
5319 return -1;
5320
5321 actual_length = sizeof (struct target_loadmap)
5322 + sizeof (struct target_loadseg) * data->nsegs;
5323
5324 if (offset < 0 || offset > actual_length)
5325 return -1;
5326
5327 copy_length = actual_length - offset < len ? actual_length - offset : len;
5328 memcpy (myaddr, (char *) data + offset, copy_length);
5329 return copy_length;
5330 }
5331 #else
5332 # define linux_read_loadmap NULL
5333 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5334
5335 static void
5336 linux_process_qsupported (const char *query)
5337 {
5338 if (the_low_target.process_qsupported != NULL)
5339 the_low_target.process_qsupported (query);
5340 }
5341
5342 static int
5343 linux_supports_tracepoints (void)
5344 {
5345 if (*the_low_target.supports_tracepoints == NULL)
5346 return 0;
5347
5348 return (*the_low_target.supports_tracepoints) ();
5349 }
5350
5351 static CORE_ADDR
5352 linux_read_pc (struct regcache *regcache)
5353 {
5354 if (the_low_target.get_pc == NULL)
5355 return 0;
5356
5357 return (*the_low_target.get_pc) (regcache);
5358 }
5359
5360 static void
5361 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5362 {
5363 gdb_assert (the_low_target.set_pc != NULL);
5364
5365 (*the_low_target.set_pc) (regcache, pc);
5366 }
5367
5368 static int
5369 linux_thread_stopped (struct thread_info *thread)
5370 {
5371 return get_thread_lwp (thread)->stopped;
5372 }
5373
5374 /* This exposes stop-all-threads functionality to other modules. */
5375
5376 static void
5377 linux_pause_all (int freeze)
5378 {
5379 stop_all_lwps (freeze, NULL);
5380 }
5381
5382 /* This exposes unstop-all-threads functionality to other gdbserver
5383 modules. */
5384
5385 static void
5386 linux_unpause_all (int unfreeze)
5387 {
5388 unstop_all_lwps (unfreeze, NULL);
5389 }
5390
5391 static int
5392 linux_prepare_to_access_memory (void)
5393 {
5394 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5395 running LWP. */
5396 if (non_stop)
5397 linux_pause_all (1);
5398 return 0;
5399 }
5400
5401 static void
5402 linux_done_accessing_memory (void)
5403 {
5404 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5405 running LWP. */
5406 if (non_stop)
5407 linux_unpause_all (1);
5408 }
5409
5410 static int
5411 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5412 CORE_ADDR collector,
5413 CORE_ADDR lockaddr,
5414 ULONGEST orig_size,
5415 CORE_ADDR *jump_entry,
5416 CORE_ADDR *trampoline,
5417 ULONGEST *trampoline_size,
5418 unsigned char *jjump_pad_insn,
5419 ULONGEST *jjump_pad_insn_size,
5420 CORE_ADDR *adjusted_insn_addr,
5421 CORE_ADDR *adjusted_insn_addr_end,
5422 char *err)
5423 {
5424 return (*the_low_target.install_fast_tracepoint_jump_pad)
5425 (tpoint, tpaddr, collector, lockaddr, orig_size,
5426 jump_entry, trampoline, trampoline_size,
5427 jjump_pad_insn, jjump_pad_insn_size,
5428 adjusted_insn_addr, adjusted_insn_addr_end,
5429 err);
5430 }
5431
5432 static struct emit_ops *
5433 linux_emit_ops (void)
5434 {
5435 if (the_low_target.emit_ops != NULL)
5436 return (*the_low_target.emit_ops) ();
5437 else
5438 return NULL;
5439 }
5440
5441 static int
5442 linux_get_min_fast_tracepoint_insn_len (void)
5443 {
5444 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5445 }
5446
5447 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5448
5449 static int
5450 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5451 CORE_ADDR *phdr_memaddr, int *num_phdr)
5452 {
5453 char filename[PATH_MAX];
5454 int fd;
5455 const int auxv_size = is_elf64
5456 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5457 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5458
5459 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5460
5461 fd = open (filename, O_RDONLY);
5462 if (fd < 0)
5463 return 1;
5464
5465 *phdr_memaddr = 0;
5466 *num_phdr = 0;
5467 while (read (fd, buf, auxv_size) == auxv_size
5468 && (*phdr_memaddr == 0 || *num_phdr == 0))
5469 {
5470 if (is_elf64)
5471 {
5472 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5473
5474 switch (aux->a_type)
5475 {
5476 case AT_PHDR:
5477 *phdr_memaddr = aux->a_un.a_val;
5478 break;
5479 case AT_PHNUM:
5480 *num_phdr = aux->a_un.a_val;
5481 break;
5482 }
5483 }
5484 else
5485 {
5486 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5487
5488 switch (aux->a_type)
5489 {
5490 case AT_PHDR:
5491 *phdr_memaddr = aux->a_un.a_val;
5492 break;
5493 case AT_PHNUM:
5494 *num_phdr = aux->a_un.a_val;
5495 break;
5496 }
5497 }
5498 }
5499
5500 close (fd);
5501
5502 if (*phdr_memaddr == 0 || *num_phdr == 0)
5503 {
5504 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5505 "phdr_memaddr = %ld, phdr_num = %d",
5506 (long) *phdr_memaddr, *num_phdr);
5507 return 2;
5508 }
5509
5510 return 0;
5511 }
5512
5513 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5514
5515 static CORE_ADDR
5516 get_dynamic (const int pid, const int is_elf64)
5517 {
5518 CORE_ADDR phdr_memaddr, relocation;
5519 int num_phdr, i;
5520 unsigned char *phdr_buf;
5521 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5522
5523 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5524 return 0;
5525
5526 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5527 phdr_buf = alloca (num_phdr * phdr_size);
5528
5529 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5530 return 0;
5531
5532 /* Compute relocation: it is expected to be 0 for "regular" executables,
5533 non-zero for PIE ones. */
5534 relocation = -1;
5535 for (i = 0; relocation == -1 && i < num_phdr; i++)
5536 if (is_elf64)
5537 {
5538 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5539
5540 if (p->p_type == PT_PHDR)
5541 relocation = phdr_memaddr - p->p_vaddr;
5542 }
5543 else
5544 {
5545 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5546
5547 if (p->p_type == PT_PHDR)
5548 relocation = phdr_memaddr - p->p_vaddr;
5549 }
5550
5551 if (relocation == -1)
5552 {
5553 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5554 any real world executables, including PIE executables, have always
5555 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5556 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5557 or present DT_DEBUG anyway (fpc binaries are statically linked).
5558
5559 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5560
5561 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5562
5563 return 0;
5564 }
5565
5566 for (i = 0; i < num_phdr; i++)
5567 {
5568 if (is_elf64)
5569 {
5570 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5571
5572 if (p->p_type == PT_DYNAMIC)
5573 return p->p_vaddr + relocation;
5574 }
5575 else
5576 {
5577 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5578
5579 if (p->p_type == PT_DYNAMIC)
5580 return p->p_vaddr + relocation;
5581 }
5582 }
5583
5584 return 0;
5585 }
5586
5587 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5588 can be 0 if the inferior does not yet have the library list initialized.
5589 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5590 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5591
5592 static CORE_ADDR
5593 get_r_debug (const int pid, const int is_elf64)
5594 {
5595 CORE_ADDR dynamic_memaddr;
5596 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5597 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5598 CORE_ADDR map = -1;
5599
5600 dynamic_memaddr = get_dynamic (pid, is_elf64);
5601 if (dynamic_memaddr == 0)
5602 return map;
5603
5604 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5605 {
5606 if (is_elf64)
5607 {
5608 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5609 #ifdef DT_MIPS_RLD_MAP
5610 union
5611 {
5612 Elf64_Xword map;
5613 unsigned char buf[sizeof (Elf64_Xword)];
5614 }
5615 rld_map;
5616
5617 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5618 {
5619 if (linux_read_memory (dyn->d_un.d_val,
5620 rld_map.buf, sizeof (rld_map.buf)) == 0)
5621 return rld_map.map;
5622 else
5623 break;
5624 }
5625 #endif /* DT_MIPS_RLD_MAP */
5626
5627 if (dyn->d_tag == DT_DEBUG && map == -1)
5628 map = dyn->d_un.d_val;
5629
5630 if (dyn->d_tag == DT_NULL)
5631 break;
5632 }
5633 else
5634 {
5635 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5636 #ifdef DT_MIPS_RLD_MAP
5637 union
5638 {
5639 Elf32_Word map;
5640 unsigned char buf[sizeof (Elf32_Word)];
5641 }
5642 rld_map;
5643
5644 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5645 {
5646 if (linux_read_memory (dyn->d_un.d_val,
5647 rld_map.buf, sizeof (rld_map.buf)) == 0)
5648 return rld_map.map;
5649 else
5650 break;
5651 }
5652 #endif /* DT_MIPS_RLD_MAP */
5653
5654 if (dyn->d_tag == DT_DEBUG && map == -1)
5655 map = dyn->d_un.d_val;
5656
5657 if (dyn->d_tag == DT_NULL)
5658 break;
5659 }
5660
5661 dynamic_memaddr += dyn_size;
5662 }
5663
5664 return map;
5665 }
5666
5667 /* Read one pointer from MEMADDR in the inferior. */
5668
5669 static int
5670 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5671 {
5672 int ret;
5673
5674 /* Go through a union so this works on either big or little endian
5675 hosts, when the inferior's pointer size is smaller than the size
5676 of CORE_ADDR. It is assumed the inferior's endianness is the
5677 same of the superior's. */
5678 union
5679 {
5680 CORE_ADDR core_addr;
5681 unsigned int ui;
5682 unsigned char uc;
5683 } addr;
5684
5685 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5686 if (ret == 0)
5687 {
5688 if (ptr_size == sizeof (CORE_ADDR))
5689 *ptr = addr.core_addr;
5690 else if (ptr_size == sizeof (unsigned int))
5691 *ptr = addr.ui;
5692 else
5693 gdb_assert_not_reached ("unhandled pointer size");
5694 }
5695 return ret;
5696 }
5697
5698 struct link_map_offsets
5699 {
5700 /* Offset and size of r_debug.r_version. */
5701 int r_version_offset;
5702
5703 /* Offset and size of r_debug.r_map. */
5704 int r_map_offset;
5705
5706 /* Offset to l_addr field in struct link_map. */
5707 int l_addr_offset;
5708
5709 /* Offset to l_name field in struct link_map. */
5710 int l_name_offset;
5711
5712 /* Offset to l_ld field in struct link_map. */
5713 int l_ld_offset;
5714
5715 /* Offset to l_next field in struct link_map. */
5716 int l_next_offset;
5717
5718 /* Offset to l_prev field in struct link_map. */
5719 int l_prev_offset;
5720 };
5721
5722 /* Construct qXfer:libraries-svr4:read reply. */
5723
5724 static int
5725 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5726 unsigned const char *writebuf,
5727 CORE_ADDR offset, int len)
5728 {
5729 char *document;
5730 unsigned document_len;
5731 struct process_info_private *const priv = current_process ()->private;
5732 char filename[PATH_MAX];
5733 int pid, is_elf64;
5734
5735 static const struct link_map_offsets lmo_32bit_offsets =
5736 {
5737 0, /* r_version offset. */
5738 4, /* r_debug.r_map offset. */
5739 0, /* l_addr offset in link_map. */
5740 4, /* l_name offset in link_map. */
5741 8, /* l_ld offset in link_map. */
5742 12, /* l_next offset in link_map. */
5743 16 /* l_prev offset in link_map. */
5744 };
5745
5746 static const struct link_map_offsets lmo_64bit_offsets =
5747 {
5748 0, /* r_version offset. */
5749 8, /* r_debug.r_map offset. */
5750 0, /* l_addr offset in link_map. */
5751 8, /* l_name offset in link_map. */
5752 16, /* l_ld offset in link_map. */
5753 24, /* l_next offset in link_map. */
5754 32 /* l_prev offset in link_map. */
5755 };
5756 const struct link_map_offsets *lmo;
5757 unsigned int machine;
5758 int ptr_size;
5759 CORE_ADDR lm_addr = 0, lm_prev = 0;
5760 int allocated = 1024;
5761 char *p;
5762 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5763 int header_done = 0;
5764
5765 if (writebuf != NULL)
5766 return -2;
5767 if (readbuf == NULL)
5768 return -1;
5769
5770 pid = lwpid_of (current_thread);
5771 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5772 is_elf64 = elf_64_file_p (filename, &machine);
5773 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5774 ptr_size = is_elf64 ? 8 : 4;
5775
5776 while (annex[0] != '\0')
5777 {
5778 const char *sep;
5779 CORE_ADDR *addrp;
5780 int len;
5781
5782 sep = strchr (annex, '=');
5783 if (sep == NULL)
5784 break;
5785
5786 len = sep - annex;
5787 if (len == 5 && strncmp (annex, "start", 5) == 0)
5788 addrp = &lm_addr;
5789 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5790 addrp = &lm_prev;
5791 else
5792 {
5793 annex = strchr (sep, ';');
5794 if (annex == NULL)
5795 break;
5796 annex++;
5797 continue;
5798 }
5799
5800 annex = decode_address_to_semicolon (addrp, sep + 1);
5801 }
5802
5803 if (lm_addr == 0)
5804 {
5805 int r_version = 0;
5806
5807 if (priv->r_debug == 0)
5808 priv->r_debug = get_r_debug (pid, is_elf64);
5809
5810 /* We failed to find DT_DEBUG. Such situation will not change
5811 for this inferior - do not retry it. Report it to GDB as
5812 E01, see for the reasons at the GDB solib-svr4.c side. */
5813 if (priv->r_debug == (CORE_ADDR) -1)
5814 return -1;
5815
5816 if (priv->r_debug != 0)
5817 {
5818 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5819 (unsigned char *) &r_version,
5820 sizeof (r_version)) != 0
5821 || r_version != 1)
5822 {
5823 warning ("unexpected r_debug version %d", r_version);
5824 }
5825 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5826 &lm_addr, ptr_size) != 0)
5827 {
5828 warning ("unable to read r_map from 0x%lx",
5829 (long) priv->r_debug + lmo->r_map_offset);
5830 }
5831 }
5832 }
5833
5834 document = xmalloc (allocated);
5835 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5836 p = document + strlen (document);
5837
5838 while (lm_addr
5839 && read_one_ptr (lm_addr + lmo->l_name_offset,
5840 &l_name, ptr_size) == 0
5841 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5842 &l_addr, ptr_size) == 0
5843 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5844 &l_ld, ptr_size) == 0
5845 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5846 &l_prev, ptr_size) == 0
5847 && read_one_ptr (lm_addr + lmo->l_next_offset,
5848 &l_next, ptr_size) == 0)
5849 {
5850 unsigned char libname[PATH_MAX];
5851
5852 if (lm_prev != l_prev)
5853 {
5854 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5855 (long) lm_prev, (long) l_prev);
5856 break;
5857 }
5858
5859 /* Ignore the first entry even if it has valid name as the first entry
5860 corresponds to the main executable. The first entry should not be
5861 skipped if the dynamic loader was loaded late by a static executable
5862 (see solib-svr4.c parameter ignore_first). But in such case the main
5863 executable does not have PT_DYNAMIC present and this function already
5864 exited above due to failed get_r_debug. */
5865 if (lm_prev == 0)
5866 {
5867 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5868 p = p + strlen (p);
5869 }
5870 else
5871 {
5872 /* Not checking for error because reading may stop before
5873 we've got PATH_MAX worth of characters. */
5874 libname[0] = '\0';
5875 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5876 libname[sizeof (libname) - 1] = '\0';
5877 if (libname[0] != '\0')
5878 {
5879 /* 6x the size for xml_escape_text below. */
5880 size_t len = 6 * strlen ((char *) libname);
5881 char *name;
5882
5883 if (!header_done)
5884 {
5885 /* Terminate `<library-list-svr4'. */
5886 *p++ = '>';
5887 header_done = 1;
5888 }
5889
5890 while (allocated < p - document + len + 200)
5891 {
5892 /* Expand to guarantee sufficient storage. */
5893 uintptr_t document_len = p - document;
5894
5895 document = xrealloc (document, 2 * allocated);
5896 allocated *= 2;
5897 p = document + document_len;
5898 }
5899
5900 name = xml_escape_text ((char *) libname);
5901 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5902 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5903 name, (unsigned long) lm_addr,
5904 (unsigned long) l_addr, (unsigned long) l_ld);
5905 free (name);
5906 }
5907 }
5908
5909 lm_prev = lm_addr;
5910 lm_addr = l_next;
5911 }
5912
5913 if (!header_done)
5914 {
5915 /* Empty list; terminate `<library-list-svr4'. */
5916 strcpy (p, "/>");
5917 }
5918 else
5919 strcpy (p, "</library-list-svr4>");
5920
5921 document_len = strlen (document);
5922 if (offset < document_len)
5923 document_len -= offset;
5924 else
5925 document_len = 0;
5926 if (len > document_len)
5927 len = document_len;
5928
5929 memcpy (readbuf, document + offset, len);
5930 xfree (document);
5931
5932 return len;
5933 }
5934
5935 #ifdef HAVE_LINUX_BTRACE
5936
5937 /* See to_enable_btrace target method. */
5938
5939 static struct btrace_target_info *
5940 linux_low_enable_btrace (ptid_t ptid)
5941 {
5942 struct btrace_target_info *tinfo;
5943
5944 tinfo = linux_enable_btrace (ptid);
5945
5946 if (tinfo != NULL)
5947 {
5948 struct thread_info *thread = find_thread_ptid (ptid);
5949 struct regcache *regcache = get_thread_regcache (thread, 0);
5950
5951 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5952 }
5953
5954 return tinfo;
5955 }
5956
5957 /* See to_disable_btrace target method. */
5958
5959 static int
5960 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5961 {
5962 enum btrace_error err;
5963
5964 err = linux_disable_btrace (tinfo);
5965 return (err == BTRACE_ERR_NONE ? 0 : -1);
5966 }
5967
5968 /* See to_read_btrace target method. */
5969
5970 static int
5971 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5972 int type)
5973 {
5974 VEC (btrace_block_s) *btrace;
5975 struct btrace_block *block;
5976 enum btrace_error err;
5977 int i;
5978
5979 btrace = NULL;
5980 err = linux_read_btrace (&btrace, tinfo, type);
5981 if (err != BTRACE_ERR_NONE)
5982 {
5983 if (err == BTRACE_ERR_OVERFLOW)
5984 buffer_grow_str0 (buffer, "E.Overflow.");
5985 else
5986 buffer_grow_str0 (buffer, "E.Generic Error.");
5987
5988 return -1;
5989 }
5990
5991 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5992 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5993
5994 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5995 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5996 paddress (block->begin), paddress (block->end));
5997
5998 buffer_grow_str0 (buffer, "</btrace>\n");
5999
6000 VEC_free (btrace_block_s, btrace);
6001
6002 return 0;
6003 }
6004 #endif /* HAVE_LINUX_BTRACE */
6005
6006 static struct target_ops linux_target_ops = {
6007 linux_create_inferior,
6008 linux_attach,
6009 linux_kill,
6010 linux_detach,
6011 linux_mourn,
6012 linux_join,
6013 linux_thread_alive,
6014 linux_resume,
6015 linux_wait,
6016 linux_fetch_registers,
6017 linux_store_registers,
6018 linux_prepare_to_access_memory,
6019 linux_done_accessing_memory,
6020 linux_read_memory,
6021 linux_write_memory,
6022 linux_look_up_symbols,
6023 linux_request_interrupt,
6024 linux_read_auxv,
6025 linux_supports_z_point_type,
6026 linux_insert_point,
6027 linux_remove_point,
6028 linux_stopped_by_watchpoint,
6029 linux_stopped_data_address,
6030 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6031 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6032 && defined(PT_TEXT_END_ADDR)
6033 linux_read_offsets,
6034 #else
6035 NULL,
6036 #endif
6037 #ifdef USE_THREAD_DB
6038 thread_db_get_tls_address,
6039 #else
6040 NULL,
6041 #endif
6042 linux_qxfer_spu,
6043 hostio_last_error_from_errno,
6044 linux_qxfer_osdata,
6045 linux_xfer_siginfo,
6046 linux_supports_non_stop,
6047 linux_async,
6048 linux_start_non_stop,
6049 linux_supports_multi_process,
6050 #ifdef USE_THREAD_DB
6051 thread_db_handle_monitor_command,
6052 #else
6053 NULL,
6054 #endif
6055 linux_common_core_of_thread,
6056 linux_read_loadmap,
6057 linux_process_qsupported,
6058 linux_supports_tracepoints,
6059 linux_read_pc,
6060 linux_write_pc,
6061 linux_thread_stopped,
6062 NULL,
6063 linux_pause_all,
6064 linux_unpause_all,
6065 linux_stabilize_threads,
6066 linux_install_fast_tracepoint_jump_pad,
6067 linux_emit_ops,
6068 linux_supports_disable_randomization,
6069 linux_get_min_fast_tracepoint_insn_len,
6070 linux_qxfer_libraries_svr4,
6071 linux_supports_agent,
6072 #ifdef HAVE_LINUX_BTRACE
6073 linux_supports_btrace,
6074 linux_low_enable_btrace,
6075 linux_low_disable_btrace,
6076 linux_low_read_btrace,
6077 #else
6078 NULL,
6079 NULL,
6080 NULL,
6081 NULL,
6082 #endif
6083 linux_supports_range_stepping,
6084 };
6085
6086 static void
6087 linux_init_signals ()
6088 {
6089 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6090 to find what the cancel signal actually is. */
6091 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6092 signal (__SIGRTMIN+1, SIG_IGN);
6093 #endif
6094 }
6095
6096 #ifdef HAVE_LINUX_REGSETS
6097 void
6098 initialize_regsets_info (struct regsets_info *info)
6099 {
6100 for (info->num_regsets = 0;
6101 info->regsets[info->num_regsets].size >= 0;
6102 info->num_regsets++)
6103 ;
6104 }
6105 #endif
6106
6107 void
6108 initialize_low (void)
6109 {
6110 struct sigaction sigchld_action;
6111 memset (&sigchld_action, 0, sizeof (sigchld_action));
6112 set_target_ops (&linux_target_ops);
6113 set_breakpoint_data (the_low_target.breakpoint,
6114 the_low_target.breakpoint_len);
6115 linux_init_signals ();
6116 linux_ptrace_init_warnings ();
6117
6118 sigchld_action.sa_handler = sigchld_handler;
6119 sigemptyset (&sigchld_action.sa_mask);
6120 sigchld_action.sa_flags = SA_RESTART;
6121 sigaction (SIGCHLD, &sigchld_action, NULL);
6122
6123 initialize_low_arch ();
6124 }