Add backlink from lwp_info to thread_info.
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <stdio.h>
28 #include <sys/ptrace.h>
29 #include "linux-ptrace.h"
30 #include "linux-procfs.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include "filestuff.h"
48 #include "tracepoint.h"
49 #include "hostio.h"
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "linux-btrace.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
142 representation of the thread ID.
143
144 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
145 the same as the LWP ID.
146
147 ``all_processes'' is keyed by the "overall process ID", which
148 GNU/Linux calls tgid, "thread group ID". */
149
150 struct inferior_list all_lwps;
151
152 /* A list of all unknown processes which receive stop signals. Some
153 other process will presumably claim each of these as forked
154 children momentarily. */
155
156 struct simple_pid_list
157 {
158 /* The process ID. */
159 int pid;
160
161 /* The status as reported by waitpid. */
162 int status;
163
164 /* Next in chain. */
165 struct simple_pid_list *next;
166 };
167 struct simple_pid_list *stopped_pids;
168
169 /* Trivial list manipulation functions to keep track of a list of new
170 stopped processes. */
171
172 static void
173 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
174 {
175 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
176
177 new_pid->pid = pid;
178 new_pid->status = status;
179 new_pid->next = *listp;
180 *listp = new_pid;
181 }
182
183 static int
184 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
185 {
186 struct simple_pid_list **p;
187
188 for (p = listp; *p != NULL; p = &(*p)->next)
189 if ((*p)->pid == pid)
190 {
191 struct simple_pid_list *next = (*p)->next;
192
193 *statusp = (*p)->status;
194 xfree (*p);
195 *p = next;
196 return 1;
197 }
198 return 0;
199 }
200
201 enum stopping_threads_kind
202 {
203 /* Not stopping threads presently. */
204 NOT_STOPPING_THREADS,
205
206 /* Stopping threads. */
207 STOPPING_THREADS,
208
209 /* Stopping and suspending threads. */
210 STOPPING_AND_SUSPENDING_THREADS
211 };
212
213 /* This is set while stop_all_lwps is in effect. */
214 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
215
216 /* FIXME make into a target method? */
217 int using_threads = 1;
218
219 /* True if we're presently stabilizing threads (moving them out of
220 jump pads). */
221 static int stabilizing_threads;
222
223 static void linux_resume_one_lwp (struct lwp_info *lwp,
224 int step, int signal, siginfo_t *info);
225 static void linux_resume (struct thread_resume *resume_info, size_t n);
226 static void stop_all_lwps (int suspend, struct lwp_info *except);
227 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
228 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
229 static struct lwp_info *add_lwp (ptid_t ptid);
230 static int linux_stopped_by_watchpoint (void);
231 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
232 static void proceed_all_lwps (void);
233 static int finish_step_over (struct lwp_info *lwp);
234 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
235 static int kill_lwp (unsigned long lwpid, int signo);
236
237 /* True if the low target can hardware single-step. Such targets
238 don't need a BREAKPOINT_REINSERT_ADDR callback. */
239
240 static int
241 can_hardware_single_step (void)
242 {
243 return (the_low_target.breakpoint_reinsert_addr == NULL);
244 }
245
246 /* True if the low target supports memory breakpoints. If so, we'll
247 have a GET_PC implementation. */
248
249 static int
250 supports_breakpoints (void)
251 {
252 return (the_low_target.get_pc != NULL);
253 }
254
255 /* Returns true if this target can support fast tracepoints. This
256 does not mean that the in-process agent has been loaded in the
257 inferior. */
258
259 static int
260 supports_fast_tracepoints (void)
261 {
262 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
263 }
264
265 /* True if LWP is stopped in its stepping range. */
266
267 static int
268 lwp_in_step_range (struct lwp_info *lwp)
269 {
270 CORE_ADDR pc = lwp->stop_pc;
271
272 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
273 }
274
275 struct pending_signals
276 {
277 int signal;
278 siginfo_t info;
279 struct pending_signals *prev;
280 };
281
282 /* The read/write ends of the pipe registered as waitable file in the
283 event loop. */
284 static int linux_event_pipe[2] = { -1, -1 };
285
286 /* True if we're currently in async mode. */
287 #define target_is_async_p() (linux_event_pipe[0] != -1)
288
289 static void send_sigstop (struct lwp_info *lwp);
290 static void wait_for_sigstop (struct inferior_list_entry *entry);
291
292 /* Return non-zero if HEADER is a 64-bit ELF file. */
293
294 static int
295 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
296 {
297 if (header->e_ident[EI_MAG0] == ELFMAG0
298 && header->e_ident[EI_MAG1] == ELFMAG1
299 && header->e_ident[EI_MAG2] == ELFMAG2
300 && header->e_ident[EI_MAG3] == ELFMAG3)
301 {
302 *machine = header->e_machine;
303 return header->e_ident[EI_CLASS] == ELFCLASS64;
304
305 }
306 *machine = EM_NONE;
307 return -1;
308 }
309
310 /* Return non-zero if FILE is a 64-bit ELF file,
311 zero if the file is not a 64-bit ELF file,
312 and -1 if the file is not accessible or doesn't exist. */
313
314 static int
315 elf_64_file_p (const char *file, unsigned int *machine)
316 {
317 Elf64_Ehdr header;
318 int fd;
319
320 fd = open (file, O_RDONLY);
321 if (fd < 0)
322 return -1;
323
324 if (read (fd, &header, sizeof (header)) != sizeof (header))
325 {
326 close (fd);
327 return 0;
328 }
329 close (fd);
330
331 return elf_64_header_p (&header, machine);
332 }
333
334 /* Accepts an integer PID; Returns true if the executable PID is
335 running is a 64-bit ELF file.. */
336
337 int
338 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
339 {
340 char file[PATH_MAX];
341
342 sprintf (file, "/proc/%d/exe", pid);
343 return elf_64_file_p (file, machine);
344 }
345
346 static void
347 delete_lwp (struct lwp_info *lwp)
348 {
349 remove_thread (get_lwp_thread (lwp));
350 remove_inferior (&all_lwps, &lwp->entry);
351 free (lwp->arch_private);
352 free (lwp);
353 }
354
355 /* Add a process to the common process list, and set its private
356 data. */
357
358 static struct process_info *
359 linux_add_process (int pid, int attached)
360 {
361 struct process_info *proc;
362
363 proc = add_process (pid, attached);
364 proc->private = xcalloc (1, sizeof (*proc->private));
365
366 /* Set the arch when the first LWP stops. */
367 proc->private->new_inferior = 1;
368
369 if (the_low_target.new_process != NULL)
370 proc->private->arch_private = the_low_target.new_process ();
371
372 return proc;
373 }
374
375 /* Handle a GNU/Linux extended wait response. If we see a clone
376 event, we need to add the new LWP to our list (and not report the
377 trap to higher layers). */
378
379 static void
380 handle_extended_wait (struct lwp_info *event_child, int wstat)
381 {
382 int event = wstat >> 16;
383 struct lwp_info *new_lwp;
384
385 if (event == PTRACE_EVENT_CLONE)
386 {
387 ptid_t ptid;
388 unsigned long new_pid;
389 int ret, status;
390
391 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_TYPE_ARG3) 0,
392 &new_pid);
393
394 /* If we haven't already seen the new PID stop, wait for it now. */
395 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
396 {
397 /* The new child has a pending SIGSTOP. We can't affect it until it
398 hits the SIGSTOP, but we're already attached. */
399
400 ret = my_waitpid (new_pid, &status, __WALL);
401
402 if (ret == -1)
403 perror_with_name ("waiting for new child");
404 else if (ret != new_pid)
405 warning ("wait returned unexpected PID %d", ret);
406 else if (!WIFSTOPPED (status))
407 warning ("wait returned unexpected status 0x%x", status);
408 }
409
410 ptid = ptid_build (pid_of (event_child), new_pid, 0);
411 new_lwp = add_lwp (ptid);
412
413 /* Either we're going to immediately resume the new thread
414 or leave it stopped. linux_resume_one_lwp is a nop if it
415 thinks the thread is currently running, so set this first
416 before calling linux_resume_one_lwp. */
417 new_lwp->stopped = 1;
418
419 /* If we're suspending all threads, leave this one suspended
420 too. */
421 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
422 new_lwp->suspended = 1;
423
424 /* Normally we will get the pending SIGSTOP. But in some cases
425 we might get another signal delivered to the group first.
426 If we do get another signal, be sure not to lose it. */
427 if (WSTOPSIG (status) == SIGSTOP)
428 {
429 if (stopping_threads != NOT_STOPPING_THREADS)
430 new_lwp->stop_pc = get_stop_pc (new_lwp);
431 else
432 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
433 }
434 else
435 {
436 new_lwp->stop_expected = 1;
437
438 if (stopping_threads != NOT_STOPPING_THREADS)
439 {
440 new_lwp->stop_pc = get_stop_pc (new_lwp);
441 new_lwp->status_pending_p = 1;
442 new_lwp->status_pending = status;
443 }
444 else
445 /* Pass the signal on. This is what GDB does - except
446 shouldn't we really report it instead? */
447 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
448 }
449
450 /* Always resume the current thread. If we are stopping
451 threads, it will have a pending SIGSTOP; we may as well
452 collect it now. */
453 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
454 }
455 }
456
457 /* Return the PC as read from the regcache of LWP, without any
458 adjustment. */
459
460 static CORE_ADDR
461 get_pc (struct lwp_info *lwp)
462 {
463 struct thread_info *saved_inferior;
464 struct regcache *regcache;
465 CORE_ADDR pc;
466
467 if (the_low_target.get_pc == NULL)
468 return 0;
469
470 saved_inferior = current_inferior;
471 current_inferior = get_lwp_thread (lwp);
472
473 regcache = get_thread_regcache (current_inferior, 1);
474 pc = (*the_low_target.get_pc) (regcache);
475
476 if (debug_threads)
477 debug_printf ("pc is 0x%lx\n", (long) pc);
478
479 current_inferior = saved_inferior;
480 return pc;
481 }
482
483 /* This function should only be called if LWP got a SIGTRAP.
484 The SIGTRAP could mean several things.
485
486 On i386, where decr_pc_after_break is non-zero:
487 If we were single-stepping this process using PTRACE_SINGLESTEP,
488 we will get only the one SIGTRAP (even if the instruction we
489 stepped over was a breakpoint). The value of $eip will be the
490 next instruction.
491 If we continue the process using PTRACE_CONT, we will get a
492 SIGTRAP when we hit a breakpoint. The value of $eip will be
493 the instruction after the breakpoint (i.e. needs to be
494 decremented). If we report the SIGTRAP to GDB, we must also
495 report the undecremented PC. If we cancel the SIGTRAP, we
496 must resume at the decremented PC.
497
498 (Presumably, not yet tested) On a non-decr_pc_after_break machine
499 with hardware or kernel single-step:
500 If we single-step over a breakpoint instruction, our PC will
501 point at the following instruction. If we continue and hit a
502 breakpoint instruction, our PC will point at the breakpoint
503 instruction. */
504
505 static CORE_ADDR
506 get_stop_pc (struct lwp_info *lwp)
507 {
508 CORE_ADDR stop_pc;
509
510 if (the_low_target.get_pc == NULL)
511 return 0;
512
513 stop_pc = get_pc (lwp);
514
515 if (WSTOPSIG (lwp->last_status) == SIGTRAP
516 && !lwp->stepping
517 && !lwp->stopped_by_watchpoint
518 && lwp->last_status >> 16 == 0)
519 stop_pc -= the_low_target.decr_pc_after_break;
520
521 if (debug_threads)
522 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
523
524 return stop_pc;
525 }
526
527 static struct lwp_info *
528 add_lwp (ptid_t ptid)
529 {
530 struct lwp_info *lwp;
531
532 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
533 memset (lwp, 0, sizeof (*lwp));
534
535 lwp->entry.id = ptid;
536
537 if (the_low_target.new_thread != NULL)
538 lwp->arch_private = the_low_target.new_thread ();
539
540 add_inferior_to_list (&all_lwps, &lwp->entry);
541 lwp->thread = add_thread (ptid, lwp);
542
543 return lwp;
544 }
545
546 /* Start an inferior process and returns its pid.
547 ALLARGS is a vector of program-name and args. */
548
549 static int
550 linux_create_inferior (char *program, char **allargs)
551 {
552 #ifdef HAVE_PERSONALITY
553 int personality_orig = 0, personality_set = 0;
554 #endif
555 struct lwp_info *new_lwp;
556 int pid;
557 ptid_t ptid;
558
559 #ifdef HAVE_PERSONALITY
560 if (disable_randomization)
561 {
562 errno = 0;
563 personality_orig = personality (0xffffffff);
564 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
565 {
566 personality_set = 1;
567 personality (personality_orig | ADDR_NO_RANDOMIZE);
568 }
569 if (errno != 0 || (personality_set
570 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
571 warning ("Error disabling address space randomization: %s",
572 strerror (errno));
573 }
574 #endif
575
576 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
577 pid = vfork ();
578 #else
579 pid = fork ();
580 #endif
581 if (pid < 0)
582 perror_with_name ("fork");
583
584 if (pid == 0)
585 {
586 close_most_fds ();
587 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
588
589 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
590 signal (__SIGRTMIN + 1, SIG_DFL);
591 #endif
592
593 setpgid (0, 0);
594
595 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
596 stdout to stderr so that inferior i/o doesn't corrupt the connection.
597 Also, redirect stdin to /dev/null. */
598 if (remote_connection_is_stdio ())
599 {
600 close (0);
601 open ("/dev/null", O_RDONLY);
602 dup2 (2, 1);
603 if (write (2, "stdin/stdout redirected\n",
604 sizeof ("stdin/stdout redirected\n") - 1) < 0)
605 {
606 /* Errors ignored. */;
607 }
608 }
609
610 execv (program, allargs);
611 if (errno == ENOENT)
612 execvp (program, allargs);
613
614 fprintf (stderr, "Cannot exec %s: %s.\n", program,
615 strerror (errno));
616 fflush (stderr);
617 _exit (0177);
618 }
619
620 #ifdef HAVE_PERSONALITY
621 if (personality_set)
622 {
623 errno = 0;
624 personality (personality_orig);
625 if (errno != 0)
626 warning ("Error restoring address space randomization: %s",
627 strerror (errno));
628 }
629 #endif
630
631 linux_add_process (pid, 0);
632
633 ptid = ptid_build (pid, pid, 0);
634 new_lwp = add_lwp (ptid);
635 new_lwp->must_set_ptrace_flags = 1;
636
637 return pid;
638 }
639
640 /* Attach to an inferior process. */
641
642 static void
643 linux_attach_lwp_1 (unsigned long lwpid, int initial)
644 {
645 ptid_t ptid;
646 struct lwp_info *new_lwp;
647
648 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
649 != 0)
650 {
651 struct buffer buffer;
652
653 if (!initial)
654 {
655 /* If we fail to attach to an LWP, just warn. */
656 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
657 strerror (errno), errno);
658 fflush (stderr);
659 return;
660 }
661
662 /* If we fail to attach to a process, report an error. */
663 buffer_init (&buffer);
664 linux_ptrace_attach_warnings (lwpid, &buffer);
665 buffer_grow_str0 (&buffer, "");
666 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
667 lwpid, strerror (errno), errno);
668 }
669
670 if (initial)
671 /* If lwp is the tgid, we handle adding existing threads later.
672 Otherwise we just add lwp without bothering about any other
673 threads. */
674 ptid = ptid_build (lwpid, lwpid, 0);
675 else
676 {
677 /* Note that extracting the pid from the current inferior is
678 safe, since we're always called in the context of the same
679 process as this new thread. */
680 int pid = pid_of (get_thread_lwp (current_inferior));
681 ptid = ptid_build (pid, lwpid, 0);
682 }
683
684 new_lwp = add_lwp (ptid);
685
686 /* We need to wait for SIGSTOP before being able to make the next
687 ptrace call on this LWP. */
688 new_lwp->must_set_ptrace_flags = 1;
689
690 if (linux_proc_pid_is_stopped (lwpid))
691 {
692 if (debug_threads)
693 debug_printf ("Attached to a stopped process\n");
694
695 /* The process is definitely stopped. It is in a job control
696 stop, unless the kernel predates the TASK_STOPPED /
697 TASK_TRACED distinction, in which case it might be in a
698 ptrace stop. Make sure it is in a ptrace stop; from there we
699 can kill it, signal it, et cetera.
700
701 First make sure there is a pending SIGSTOP. Since we are
702 already attached, the process can not transition from stopped
703 to running without a PTRACE_CONT; so we know this signal will
704 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
705 probably already in the queue (unless this kernel is old
706 enough to use TASK_STOPPED for ptrace stops); but since
707 SIGSTOP is not an RT signal, it can only be queued once. */
708 kill_lwp (lwpid, SIGSTOP);
709
710 /* Finally, resume the stopped process. This will deliver the
711 SIGSTOP (or a higher priority signal, just like normal
712 PTRACE_ATTACH), which we'll catch later on. */
713 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
714 }
715
716 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
717 brings it to a halt.
718
719 There are several cases to consider here:
720
721 1) gdbserver has already attached to the process and is being notified
722 of a new thread that is being created.
723 In this case we should ignore that SIGSTOP and resume the
724 process. This is handled below by setting stop_expected = 1,
725 and the fact that add_thread sets last_resume_kind ==
726 resume_continue.
727
728 2) This is the first thread (the process thread), and we're attaching
729 to it via attach_inferior.
730 In this case we want the process thread to stop.
731 This is handled by having linux_attach set last_resume_kind ==
732 resume_stop after we return.
733
734 If the pid we are attaching to is also the tgid, we attach to and
735 stop all the existing threads. Otherwise, we attach to pid and
736 ignore any other threads in the same group as this pid.
737
738 3) GDB is connecting to gdbserver and is requesting an enumeration of all
739 existing threads.
740 In this case we want the thread to stop.
741 FIXME: This case is currently not properly handled.
742 We should wait for the SIGSTOP but don't. Things work apparently
743 because enough time passes between when we ptrace (ATTACH) and when
744 gdb makes the next ptrace call on the thread.
745
746 On the other hand, if we are currently trying to stop all threads, we
747 should treat the new thread as if we had sent it a SIGSTOP. This works
748 because we are guaranteed that the add_lwp call above added us to the
749 end of the list, and so the new thread has not yet reached
750 wait_for_sigstop (but will). */
751 new_lwp->stop_expected = 1;
752 }
753
754 void
755 linux_attach_lwp (unsigned long lwpid)
756 {
757 linux_attach_lwp_1 (lwpid, 0);
758 }
759
760 /* Attach to PID. If PID is the tgid, attach to it and all
761 of its threads. */
762
763 static int
764 linux_attach (unsigned long pid)
765 {
766 /* Attach to PID. We will check for other threads
767 soon. */
768 linux_attach_lwp_1 (pid, 1);
769 linux_add_process (pid, 1);
770
771 if (!non_stop)
772 {
773 struct thread_info *thread;
774
775 /* Don't ignore the initial SIGSTOP if we just attached to this
776 process. It will be collected by wait shortly. */
777 thread = find_thread_ptid (ptid_build (pid, pid, 0));
778 thread->last_resume_kind = resume_stop;
779 }
780
781 if (linux_proc_get_tgid (pid) == pid)
782 {
783 DIR *dir;
784 char pathname[128];
785
786 sprintf (pathname, "/proc/%ld/task", pid);
787
788 dir = opendir (pathname);
789
790 if (!dir)
791 {
792 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
793 fflush (stderr);
794 }
795 else
796 {
797 /* At this point we attached to the tgid. Scan the task for
798 existing threads. */
799 unsigned long lwp;
800 int new_threads_found;
801 int iterations = 0;
802 struct dirent *dp;
803
804 while (iterations < 2)
805 {
806 new_threads_found = 0;
807 /* Add all the other threads. While we go through the
808 threads, new threads may be spawned. Cycle through
809 the list of threads until we have done two iterations without
810 finding new threads. */
811 while ((dp = readdir (dir)) != NULL)
812 {
813 /* Fetch one lwp. */
814 lwp = strtoul (dp->d_name, NULL, 10);
815
816 /* Is this a new thread? */
817 if (lwp
818 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
819 {
820 linux_attach_lwp_1 (lwp, 0);
821 new_threads_found++;
822
823 if (debug_threads)
824 debug_printf ("Found and attached to new lwp %ld\n",
825 lwp);
826 }
827 }
828
829 if (!new_threads_found)
830 iterations++;
831 else
832 iterations = 0;
833
834 rewinddir (dir);
835 }
836 closedir (dir);
837 }
838 }
839
840 return 0;
841 }
842
843 struct counter
844 {
845 int pid;
846 int count;
847 };
848
849 static int
850 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
851 {
852 struct counter *counter = args;
853
854 if (ptid_get_pid (entry->id) == counter->pid)
855 {
856 if (++counter->count > 1)
857 return 1;
858 }
859
860 return 0;
861 }
862
863 static int
864 last_thread_of_process_p (struct thread_info *thread)
865 {
866 ptid_t ptid = thread->entry.id;
867 int pid = ptid_get_pid (ptid);
868 struct counter counter = { pid , 0 };
869
870 return (find_inferior (&all_threads,
871 second_thread_of_pid_p, &counter) == NULL);
872 }
873
874 /* Kill LWP. */
875
876 static void
877 linux_kill_one_lwp (struct lwp_info *lwp)
878 {
879 int pid = lwpid_of (lwp);
880
881 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
882 there is no signal context, and ptrace(PTRACE_KILL) (or
883 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
884 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
885 alternative is to kill with SIGKILL. We only need one SIGKILL
886 per process, not one for each thread. But since we still support
887 linuxthreads, and we also support debugging programs using raw
888 clone without CLONE_THREAD, we send one for each thread. For
889 years, we used PTRACE_KILL only, so we're being a bit paranoid
890 about some old kernels where PTRACE_KILL might work better
891 (dubious if there are any such, but that's why it's paranoia), so
892 we try SIGKILL first, PTRACE_KILL second, and so we're fine
893 everywhere. */
894
895 errno = 0;
896 kill (pid, SIGKILL);
897 if (debug_threads)
898 debug_printf ("LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
899 target_pid_to_str (ptid_of (lwp)),
900 errno ? strerror (errno) : "OK");
901
902 errno = 0;
903 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
904 if (debug_threads)
905 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
906 target_pid_to_str (ptid_of (lwp)),
907 errno ? strerror (errno) : "OK");
908 }
909
910 /* Callback for `find_inferior'. Kills an lwp of a given process,
911 except the leader. */
912
913 static int
914 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
915 {
916 struct thread_info *thread = (struct thread_info *) entry;
917 struct lwp_info *lwp = get_thread_lwp (thread);
918 int wstat;
919 int pid = * (int *) args;
920
921 if (ptid_get_pid (entry->id) != pid)
922 return 0;
923
924 /* We avoid killing the first thread here, because of a Linux kernel (at
925 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
926 the children get a chance to be reaped, it will remain a zombie
927 forever. */
928
929 if (lwpid_of (lwp) == pid)
930 {
931 if (debug_threads)
932 debug_printf ("lkop: is last of process %s\n",
933 target_pid_to_str (entry->id));
934 return 0;
935 }
936
937 do
938 {
939 linux_kill_one_lwp (lwp);
940
941 /* Make sure it died. The loop is most likely unnecessary. */
942 pid = linux_wait_for_event (lwp->entry.id, &wstat, __WALL);
943 } while (pid > 0 && WIFSTOPPED (wstat));
944
945 return 0;
946 }
947
948 static int
949 linux_kill (int pid)
950 {
951 struct process_info *process;
952 struct lwp_info *lwp;
953 int wstat;
954 int lwpid;
955
956 process = find_process_pid (pid);
957 if (process == NULL)
958 return -1;
959
960 /* If we're killing a running inferior, make sure it is stopped
961 first, as PTRACE_KILL will not work otherwise. */
962 stop_all_lwps (0, NULL);
963
964 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
965
966 /* See the comment in linux_kill_one_lwp. We did not kill the first
967 thread in the list, so do so now. */
968 lwp = find_lwp_pid (pid_to_ptid (pid));
969
970 if (lwp == NULL)
971 {
972 if (debug_threads)
973 debug_printf ("lk_1: cannot find lwp %ld, for pid: %d\n",
974 lwpid_of (lwp), pid);
975 }
976 else
977 {
978 if (debug_threads)
979 debug_printf ("lk_1: killing lwp %ld, for pid: %d\n",
980 lwpid_of (lwp), pid);
981
982 do
983 {
984 linux_kill_one_lwp (lwp);
985
986 /* Make sure it died. The loop is most likely unnecessary. */
987 lwpid = linux_wait_for_event (lwp->entry.id, &wstat, __WALL);
988 } while (lwpid > 0 && WIFSTOPPED (wstat));
989 }
990
991 the_target->mourn (process);
992
993 /* Since we presently can only stop all lwps of all processes, we
994 need to unstop lwps of other processes. */
995 unstop_all_lwps (0, NULL);
996 return 0;
997 }
998
999 /* Get pending signal of THREAD, for detaching purposes. This is the
1000 signal the thread last stopped for, which we need to deliver to the
1001 thread when detaching, otherwise, it'd be suppressed/lost. */
1002
1003 static int
1004 get_detach_signal (struct thread_info *thread)
1005 {
1006 enum gdb_signal signo = GDB_SIGNAL_0;
1007 int status;
1008 struct lwp_info *lp = get_thread_lwp (thread);
1009
1010 if (lp->status_pending_p)
1011 status = lp->status_pending;
1012 else
1013 {
1014 /* If the thread had been suspended by gdbserver, and it stopped
1015 cleanly, then it'll have stopped with SIGSTOP. But we don't
1016 want to deliver that SIGSTOP. */
1017 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1018 || thread->last_status.value.sig == GDB_SIGNAL_0)
1019 return 0;
1020
1021 /* Otherwise, we may need to deliver the signal we
1022 intercepted. */
1023 status = lp->last_status;
1024 }
1025
1026 if (!WIFSTOPPED (status))
1027 {
1028 if (debug_threads)
1029 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1030 target_pid_to_str (ptid_of (lp)));
1031 return 0;
1032 }
1033
1034 /* Extended wait statuses aren't real SIGTRAPs. */
1035 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1036 {
1037 if (debug_threads)
1038 debug_printf ("GPS: lwp %s had stopped with extended "
1039 "status: no pending signal\n",
1040 target_pid_to_str (ptid_of (lp)));
1041 return 0;
1042 }
1043
1044 signo = gdb_signal_from_host (WSTOPSIG (status));
1045
1046 if (program_signals_p && !program_signals[signo])
1047 {
1048 if (debug_threads)
1049 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1050 target_pid_to_str (ptid_of (lp)),
1051 gdb_signal_to_string (signo));
1052 return 0;
1053 }
1054 else if (!program_signals_p
1055 /* If we have no way to know which signals GDB does not
1056 want to have passed to the program, assume
1057 SIGTRAP/SIGINT, which is GDB's default. */
1058 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1059 {
1060 if (debug_threads)
1061 debug_printf ("GPS: lwp %s had signal %s, "
1062 "but we don't know if we should pass it. "
1063 "Default to not.\n",
1064 target_pid_to_str (ptid_of (lp)),
1065 gdb_signal_to_string (signo));
1066 return 0;
1067 }
1068 else
1069 {
1070 if (debug_threads)
1071 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1072 target_pid_to_str (ptid_of (lp)),
1073 gdb_signal_to_string (signo));
1074
1075 return WSTOPSIG (status);
1076 }
1077 }
1078
1079 static int
1080 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1081 {
1082 struct thread_info *thread = (struct thread_info *) entry;
1083 struct lwp_info *lwp = get_thread_lwp (thread);
1084 int pid = * (int *) args;
1085 int sig;
1086
1087 if (ptid_get_pid (entry->id) != pid)
1088 return 0;
1089
1090 /* If there is a pending SIGSTOP, get rid of it. */
1091 if (lwp->stop_expected)
1092 {
1093 if (debug_threads)
1094 debug_printf ("Sending SIGCONT to %s\n",
1095 target_pid_to_str (ptid_of (lwp)));
1096
1097 kill_lwp (lwpid_of (lwp), SIGCONT);
1098 lwp->stop_expected = 0;
1099 }
1100
1101 /* Flush any pending changes to the process's registers. */
1102 regcache_invalidate_thread (get_lwp_thread (lwp));
1103
1104 /* Pass on any pending signal for this thread. */
1105 sig = get_detach_signal (thread);
1106
1107 /* Finally, let it resume. */
1108 if (the_low_target.prepare_to_resume != NULL)
1109 the_low_target.prepare_to_resume (lwp);
1110 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1111 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1112 error (_("Can't detach %s: %s"),
1113 target_pid_to_str (ptid_of (lwp)),
1114 strerror (errno));
1115
1116 delete_lwp (lwp);
1117 return 0;
1118 }
1119
1120 static int
1121 linux_detach (int pid)
1122 {
1123 struct process_info *process;
1124
1125 process = find_process_pid (pid);
1126 if (process == NULL)
1127 return -1;
1128
1129 /* Stop all threads before detaching. First, ptrace requires that
1130 the thread is stopped to sucessfully detach. Second, thread_db
1131 may need to uninstall thread event breakpoints from memory, which
1132 only works with a stopped process anyway. */
1133 stop_all_lwps (0, NULL);
1134
1135 #ifdef USE_THREAD_DB
1136 thread_db_detach (process);
1137 #endif
1138
1139 /* Stabilize threads (move out of jump pads). */
1140 stabilize_threads ();
1141
1142 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1143
1144 the_target->mourn (process);
1145
1146 /* Since we presently can only stop all lwps of all processes, we
1147 need to unstop lwps of other processes. */
1148 unstop_all_lwps (0, NULL);
1149 return 0;
1150 }
1151
1152 /* Remove all LWPs that belong to process PROC from the lwp list. */
1153
1154 static int
1155 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1156 {
1157 struct lwp_info *lwp = (struct lwp_info *) entry;
1158 struct process_info *process = proc;
1159
1160 if (pid_of (lwp) == pid_of (process))
1161 delete_lwp (lwp);
1162
1163 return 0;
1164 }
1165
1166 static void
1167 linux_mourn (struct process_info *process)
1168 {
1169 struct process_info_private *priv;
1170
1171 #ifdef USE_THREAD_DB
1172 thread_db_mourn (process);
1173 #endif
1174
1175 find_inferior (&all_lwps, delete_lwp_callback, process);
1176
1177 /* Freeing all private data. */
1178 priv = process->private;
1179 free (priv->arch_private);
1180 free (priv);
1181 process->private = NULL;
1182
1183 remove_process (process);
1184 }
1185
1186 static void
1187 linux_join (int pid)
1188 {
1189 int status, ret;
1190
1191 do {
1192 ret = my_waitpid (pid, &status, 0);
1193 if (WIFEXITED (status) || WIFSIGNALED (status))
1194 break;
1195 } while (ret != -1 || errno != ECHILD);
1196 }
1197
1198 /* Return nonzero if the given thread is still alive. */
1199 static int
1200 linux_thread_alive (ptid_t ptid)
1201 {
1202 struct lwp_info *lwp = find_lwp_pid (ptid);
1203
1204 /* We assume we always know if a thread exits. If a whole process
1205 exited but we still haven't been able to report it to GDB, we'll
1206 hold on to the last lwp of the dead process. */
1207 if (lwp != NULL)
1208 return !lwp->dead;
1209 else
1210 return 0;
1211 }
1212
1213 /* Return 1 if this lwp has an interesting status pending. */
1214 static int
1215 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1216 {
1217 struct lwp_info *lwp = (struct lwp_info *) entry;
1218 ptid_t ptid = * (ptid_t *) arg;
1219 struct thread_info *thread;
1220
1221 /* Check if we're only interested in events from a specific process
1222 or its lwps. */
1223 if (!ptid_equal (minus_one_ptid, ptid)
1224 && ptid_get_pid (ptid) != ptid_get_pid (lwp->entry.id))
1225 return 0;
1226
1227 thread = get_lwp_thread (lwp);
1228
1229 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1230 report any status pending the LWP may have. */
1231 if (thread->last_resume_kind == resume_stop
1232 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1233 return 0;
1234
1235 return lwp->status_pending_p;
1236 }
1237
1238 static int
1239 same_lwp (struct inferior_list_entry *entry, void *data)
1240 {
1241 ptid_t ptid = *(ptid_t *) data;
1242 int lwp;
1243
1244 if (ptid_get_lwp (ptid) != 0)
1245 lwp = ptid_get_lwp (ptid);
1246 else
1247 lwp = ptid_get_pid (ptid);
1248
1249 if (ptid_get_lwp (entry->id) == lwp)
1250 return 1;
1251
1252 return 0;
1253 }
1254
1255 struct lwp_info *
1256 find_lwp_pid (ptid_t ptid)
1257 {
1258 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1259 }
1260
1261 static struct lwp_info *
1262 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1263 {
1264 int ret;
1265 int to_wait_for = -1;
1266 struct lwp_info *child = NULL;
1267
1268 if (debug_threads)
1269 debug_printf ("linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1270
1271 if (ptid_equal (ptid, minus_one_ptid))
1272 to_wait_for = -1; /* any child */
1273 else
1274 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1275
1276 options |= __WALL;
1277
1278 retry:
1279
1280 ret = my_waitpid (to_wait_for, wstatp, options);
1281 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1282 return NULL;
1283 else if (ret == -1)
1284 perror_with_name ("waitpid");
1285
1286 if (debug_threads
1287 && (!WIFSTOPPED (*wstatp)
1288 || (WSTOPSIG (*wstatp) != 32
1289 && WSTOPSIG (*wstatp) != 33)))
1290 debug_printf ("Got an event from %d (%x)\n", ret, *wstatp);
1291
1292 child = find_lwp_pid (pid_to_ptid (ret));
1293
1294 /* If we didn't find a process, one of two things presumably happened:
1295 - A process we started and then detached from has exited. Ignore it.
1296 - A process we are controlling has forked and the new child's stop
1297 was reported to us by the kernel. Save its PID. */
1298 if (child == NULL && WIFSTOPPED (*wstatp))
1299 {
1300 add_to_pid_list (&stopped_pids, ret, *wstatp);
1301 goto retry;
1302 }
1303 else if (child == NULL)
1304 goto retry;
1305
1306 child->stopped = 1;
1307
1308 child->last_status = *wstatp;
1309
1310 if (WIFSTOPPED (*wstatp))
1311 {
1312 struct process_info *proc;
1313
1314 /* Architecture-specific setup after inferior is running. This
1315 needs to happen after we have attached to the inferior and it
1316 is stopped for the first time, but before we access any
1317 inferior registers. */
1318 proc = find_process_pid (pid_of (child));
1319 if (proc->private->new_inferior)
1320 {
1321 struct thread_info *saved_inferior;
1322
1323 saved_inferior = current_inferior;
1324 current_inferior = get_lwp_thread (child);
1325
1326 the_low_target.arch_setup ();
1327
1328 current_inferior = saved_inferior;
1329
1330 proc->private->new_inferior = 0;
1331 }
1332 }
1333
1334 /* Fetch the possibly triggered data watchpoint info and store it in
1335 CHILD.
1336
1337 On some archs, like x86, that use debug registers to set
1338 watchpoints, it's possible that the way to know which watched
1339 address trapped, is to check the register that is used to select
1340 which address to watch. Problem is, between setting the
1341 watchpoint and reading back which data address trapped, the user
1342 may change the set of watchpoints, and, as a consequence, GDB
1343 changes the debug registers in the inferior. To avoid reading
1344 back a stale stopped-data-address when that happens, we cache in
1345 LP the fact that a watchpoint trapped, and the corresponding data
1346 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1347 changes the debug registers meanwhile, we have the cached data we
1348 can rely on. */
1349
1350 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1351 {
1352 if (the_low_target.stopped_by_watchpoint == NULL)
1353 {
1354 child->stopped_by_watchpoint = 0;
1355 }
1356 else
1357 {
1358 struct thread_info *saved_inferior;
1359
1360 saved_inferior = current_inferior;
1361 current_inferior = get_lwp_thread (child);
1362
1363 child->stopped_by_watchpoint
1364 = the_low_target.stopped_by_watchpoint ();
1365
1366 if (child->stopped_by_watchpoint)
1367 {
1368 if (the_low_target.stopped_data_address != NULL)
1369 child->stopped_data_address
1370 = the_low_target.stopped_data_address ();
1371 else
1372 child->stopped_data_address = 0;
1373 }
1374
1375 current_inferior = saved_inferior;
1376 }
1377 }
1378
1379 /* Store the STOP_PC, with adjustment applied. This depends on the
1380 architecture being defined already (so that CHILD has a valid
1381 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1382 not). */
1383 if (WIFSTOPPED (*wstatp))
1384 child->stop_pc = get_stop_pc (child);
1385
1386 if (debug_threads
1387 && WIFSTOPPED (*wstatp)
1388 && the_low_target.get_pc != NULL)
1389 {
1390 struct thread_info *saved_inferior = current_inferior;
1391 struct regcache *regcache;
1392 CORE_ADDR pc;
1393
1394 current_inferior = get_lwp_thread (child);
1395 regcache = get_thread_regcache (current_inferior, 1);
1396 pc = (*the_low_target.get_pc) (regcache);
1397 debug_printf ("linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1398 current_inferior = saved_inferior;
1399 }
1400
1401 return child;
1402 }
1403
1404 /* This function should only be called if the LWP got a SIGTRAP.
1405
1406 Handle any tracepoint steps or hits. Return true if a tracepoint
1407 event was handled, 0 otherwise. */
1408
1409 static int
1410 handle_tracepoints (struct lwp_info *lwp)
1411 {
1412 struct thread_info *tinfo = get_lwp_thread (lwp);
1413 int tpoint_related_event = 0;
1414
1415 /* If this tracepoint hit causes a tracing stop, we'll immediately
1416 uninsert tracepoints. To do this, we temporarily pause all
1417 threads, unpatch away, and then unpause threads. We need to make
1418 sure the unpausing doesn't resume LWP too. */
1419 lwp->suspended++;
1420
1421 /* And we need to be sure that any all-threads-stopping doesn't try
1422 to move threads out of the jump pads, as it could deadlock the
1423 inferior (LWP could be in the jump pad, maybe even holding the
1424 lock.) */
1425
1426 /* Do any necessary step collect actions. */
1427 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1428
1429 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1430
1431 /* See if we just hit a tracepoint and do its main collect
1432 actions. */
1433 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1434
1435 lwp->suspended--;
1436
1437 gdb_assert (lwp->suspended == 0);
1438 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1439
1440 if (tpoint_related_event)
1441 {
1442 if (debug_threads)
1443 debug_printf ("got a tracepoint event\n");
1444 return 1;
1445 }
1446
1447 return 0;
1448 }
1449
1450 /* Convenience wrapper. Returns true if LWP is presently collecting a
1451 fast tracepoint. */
1452
1453 static int
1454 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1455 struct fast_tpoint_collect_status *status)
1456 {
1457 CORE_ADDR thread_area;
1458
1459 if (the_low_target.get_thread_area == NULL)
1460 return 0;
1461
1462 /* Get the thread area address. This is used to recognize which
1463 thread is which when tracing with the in-process agent library.
1464 We don't read anything from the address, and treat it as opaque;
1465 it's the address itself that we assume is unique per-thread. */
1466 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1467 return 0;
1468
1469 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1470 }
1471
1472 /* The reason we resume in the caller, is because we want to be able
1473 to pass lwp->status_pending as WSTAT, and we need to clear
1474 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1475 refuses to resume. */
1476
1477 static int
1478 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1479 {
1480 struct thread_info *saved_inferior;
1481
1482 saved_inferior = current_inferior;
1483 current_inferior = get_lwp_thread (lwp);
1484
1485 if ((wstat == NULL
1486 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1487 && supports_fast_tracepoints ()
1488 && agent_loaded_p ())
1489 {
1490 struct fast_tpoint_collect_status status;
1491 int r;
1492
1493 if (debug_threads)
1494 debug_printf ("Checking whether LWP %ld needs to move out of the "
1495 "jump pad.\n",
1496 lwpid_of (lwp));
1497
1498 r = linux_fast_tracepoint_collecting (lwp, &status);
1499
1500 if (wstat == NULL
1501 || (WSTOPSIG (*wstat) != SIGILL
1502 && WSTOPSIG (*wstat) != SIGFPE
1503 && WSTOPSIG (*wstat) != SIGSEGV
1504 && WSTOPSIG (*wstat) != SIGBUS))
1505 {
1506 lwp->collecting_fast_tracepoint = r;
1507
1508 if (r != 0)
1509 {
1510 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1511 {
1512 /* Haven't executed the original instruction yet.
1513 Set breakpoint there, and wait till it's hit,
1514 then single-step until exiting the jump pad. */
1515 lwp->exit_jump_pad_bkpt
1516 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1517 }
1518
1519 if (debug_threads)
1520 debug_printf ("Checking whether LWP %ld needs to move out of "
1521 "the jump pad...it does\n",
1522 lwpid_of (lwp));
1523 current_inferior = saved_inferior;
1524
1525 return 1;
1526 }
1527 }
1528 else
1529 {
1530 /* If we get a synchronous signal while collecting, *and*
1531 while executing the (relocated) original instruction,
1532 reset the PC to point at the tpoint address, before
1533 reporting to GDB. Otherwise, it's an IPA lib bug: just
1534 report the signal to GDB, and pray for the best. */
1535
1536 lwp->collecting_fast_tracepoint = 0;
1537
1538 if (r != 0
1539 && (status.adjusted_insn_addr <= lwp->stop_pc
1540 && lwp->stop_pc < status.adjusted_insn_addr_end))
1541 {
1542 siginfo_t info;
1543 struct regcache *regcache;
1544
1545 /* The si_addr on a few signals references the address
1546 of the faulting instruction. Adjust that as
1547 well. */
1548 if ((WSTOPSIG (*wstat) == SIGILL
1549 || WSTOPSIG (*wstat) == SIGFPE
1550 || WSTOPSIG (*wstat) == SIGBUS
1551 || WSTOPSIG (*wstat) == SIGSEGV)
1552 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1553 (PTRACE_TYPE_ARG3) 0, &info) == 0
1554 /* Final check just to make sure we don't clobber
1555 the siginfo of non-kernel-sent signals. */
1556 && (uintptr_t) info.si_addr == lwp->stop_pc)
1557 {
1558 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1559 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1560 (PTRACE_TYPE_ARG3) 0, &info);
1561 }
1562
1563 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1564 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1565 lwp->stop_pc = status.tpoint_addr;
1566
1567 /* Cancel any fast tracepoint lock this thread was
1568 holding. */
1569 force_unlock_trace_buffer ();
1570 }
1571
1572 if (lwp->exit_jump_pad_bkpt != NULL)
1573 {
1574 if (debug_threads)
1575 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1576 "stopping all threads momentarily.\n");
1577
1578 stop_all_lwps (1, lwp);
1579 cancel_breakpoints ();
1580
1581 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1582 lwp->exit_jump_pad_bkpt = NULL;
1583
1584 unstop_all_lwps (1, lwp);
1585
1586 gdb_assert (lwp->suspended >= 0);
1587 }
1588 }
1589 }
1590
1591 if (debug_threads)
1592 debug_printf ("Checking whether LWP %ld needs to move out of the "
1593 "jump pad...no\n",
1594 lwpid_of (lwp));
1595
1596 current_inferior = saved_inferior;
1597 return 0;
1598 }
1599
1600 /* Enqueue one signal in the "signals to report later when out of the
1601 jump pad" list. */
1602
1603 static void
1604 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1605 {
1606 struct pending_signals *p_sig;
1607
1608 if (debug_threads)
1609 debug_printf ("Deferring signal %d for LWP %ld.\n",
1610 WSTOPSIG (*wstat), lwpid_of (lwp));
1611
1612 if (debug_threads)
1613 {
1614 struct pending_signals *sig;
1615
1616 for (sig = lwp->pending_signals_to_report;
1617 sig != NULL;
1618 sig = sig->prev)
1619 debug_printf (" Already queued %d\n",
1620 sig->signal);
1621
1622 debug_printf (" (no more currently queued signals)\n");
1623 }
1624
1625 /* Don't enqueue non-RT signals if they are already in the deferred
1626 queue. (SIGSTOP being the easiest signal to see ending up here
1627 twice) */
1628 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1629 {
1630 struct pending_signals *sig;
1631
1632 for (sig = lwp->pending_signals_to_report;
1633 sig != NULL;
1634 sig = sig->prev)
1635 {
1636 if (sig->signal == WSTOPSIG (*wstat))
1637 {
1638 if (debug_threads)
1639 debug_printf ("Not requeuing already queued non-RT signal %d"
1640 " for LWP %ld\n",
1641 sig->signal,
1642 lwpid_of (lwp));
1643 return;
1644 }
1645 }
1646 }
1647
1648 p_sig = xmalloc (sizeof (*p_sig));
1649 p_sig->prev = lwp->pending_signals_to_report;
1650 p_sig->signal = WSTOPSIG (*wstat);
1651 memset (&p_sig->info, 0, sizeof (siginfo_t));
1652 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1653 &p_sig->info);
1654
1655 lwp->pending_signals_to_report = p_sig;
1656 }
1657
1658 /* Dequeue one signal from the "signals to report later when out of
1659 the jump pad" list. */
1660
1661 static int
1662 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1663 {
1664 if (lwp->pending_signals_to_report != NULL)
1665 {
1666 struct pending_signals **p_sig;
1667
1668 p_sig = &lwp->pending_signals_to_report;
1669 while ((*p_sig)->prev != NULL)
1670 p_sig = &(*p_sig)->prev;
1671
1672 *wstat = W_STOPCODE ((*p_sig)->signal);
1673 if ((*p_sig)->info.si_signo != 0)
1674 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1675 &(*p_sig)->info);
1676 free (*p_sig);
1677 *p_sig = NULL;
1678
1679 if (debug_threads)
1680 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1681 WSTOPSIG (*wstat), lwpid_of (lwp));
1682
1683 if (debug_threads)
1684 {
1685 struct pending_signals *sig;
1686
1687 for (sig = lwp->pending_signals_to_report;
1688 sig != NULL;
1689 sig = sig->prev)
1690 debug_printf (" Still queued %d\n",
1691 sig->signal);
1692
1693 debug_printf (" (no more queued signals)\n");
1694 }
1695
1696 return 1;
1697 }
1698
1699 return 0;
1700 }
1701
1702 /* Arrange for a breakpoint to be hit again later. We don't keep the
1703 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1704 will handle the current event, eventually we will resume this LWP,
1705 and this breakpoint will trap again. */
1706
1707 static int
1708 cancel_breakpoint (struct lwp_info *lwp)
1709 {
1710 struct thread_info *saved_inferior;
1711
1712 /* There's nothing to do if we don't support breakpoints. */
1713 if (!supports_breakpoints ())
1714 return 0;
1715
1716 /* breakpoint_at reads from current inferior. */
1717 saved_inferior = current_inferior;
1718 current_inferior = get_lwp_thread (lwp);
1719
1720 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1721 {
1722 if (debug_threads)
1723 debug_printf ("CB: Push back breakpoint for %s\n",
1724 target_pid_to_str (ptid_of (lwp)));
1725
1726 /* Back up the PC if necessary. */
1727 if (the_low_target.decr_pc_after_break)
1728 {
1729 struct regcache *regcache
1730 = get_thread_regcache (current_inferior, 1);
1731 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1732 }
1733
1734 current_inferior = saved_inferior;
1735 return 1;
1736 }
1737 else
1738 {
1739 if (debug_threads)
1740 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1741 paddress (lwp->stop_pc),
1742 target_pid_to_str (ptid_of (lwp)));
1743 }
1744
1745 current_inferior = saved_inferior;
1746 return 0;
1747 }
1748
1749 /* When the event-loop is doing a step-over, this points at the thread
1750 being stepped. */
1751 ptid_t step_over_bkpt;
1752
1753 /* Wait for an event from child PID. If PID is -1, wait for any
1754 child. Store the stop status through the status pointer WSTAT.
1755 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1756 event was found and OPTIONS contains WNOHANG. Return the PID of
1757 the stopped child and update current_inferior otherwise. */
1758
1759 static int
1760 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1761 {
1762 struct lwp_info *event_child, *requested_child;
1763 ptid_t wait_ptid;
1764
1765 event_child = NULL;
1766 requested_child = NULL;
1767
1768 /* Check for a lwp with a pending status. */
1769
1770 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1771 {
1772 event_child = (struct lwp_info *)
1773 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1774 if (debug_threads && event_child)
1775 debug_printf ("Got a pending child %ld\n", lwpid_of (event_child));
1776 }
1777 else
1778 {
1779 requested_child = find_lwp_pid (ptid);
1780
1781 if (stopping_threads == NOT_STOPPING_THREADS
1782 && requested_child->status_pending_p
1783 && requested_child->collecting_fast_tracepoint)
1784 {
1785 enqueue_one_deferred_signal (requested_child,
1786 &requested_child->status_pending);
1787 requested_child->status_pending_p = 0;
1788 requested_child->status_pending = 0;
1789 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1790 }
1791
1792 if (requested_child->suspended
1793 && requested_child->status_pending_p)
1794 fatal ("requesting an event out of a suspended child?");
1795
1796 if (requested_child->status_pending_p)
1797 event_child = requested_child;
1798 }
1799
1800 if (event_child != NULL)
1801 {
1802 if (debug_threads)
1803 debug_printf ("Got an event from pending child %ld (%04x)\n",
1804 lwpid_of (event_child), event_child->status_pending);
1805 *wstat = event_child->status_pending;
1806 event_child->status_pending_p = 0;
1807 event_child->status_pending = 0;
1808 current_inferior = get_lwp_thread (event_child);
1809 return lwpid_of (event_child);
1810 }
1811
1812 if (ptid_is_pid (ptid))
1813 {
1814 /* A request to wait for a specific tgid. This is not possible
1815 with waitpid, so instead, we wait for any child, and leave
1816 children we're not interested in right now with a pending
1817 status to report later. */
1818 wait_ptid = minus_one_ptid;
1819 }
1820 else
1821 wait_ptid = ptid;
1822
1823 /* We only enter this loop if no process has a pending wait status. Thus
1824 any action taken in response to a wait status inside this loop is
1825 responding as soon as we detect the status, not after any pending
1826 events. */
1827 while (1)
1828 {
1829 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1830
1831 if ((options & WNOHANG) && event_child == NULL)
1832 {
1833 if (debug_threads)
1834 debug_printf ("WNOHANG set, no event found\n");
1835 return 0;
1836 }
1837
1838 if (event_child == NULL)
1839 error ("event from unknown child");
1840
1841 if (ptid_is_pid (ptid)
1842 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1843 {
1844 if (! WIFSTOPPED (*wstat))
1845 mark_lwp_dead (event_child, *wstat);
1846 else
1847 {
1848 event_child->status_pending_p = 1;
1849 event_child->status_pending = *wstat;
1850 }
1851 continue;
1852 }
1853
1854 current_inferior = get_lwp_thread (event_child);
1855
1856 /* Check for thread exit. */
1857 if (! WIFSTOPPED (*wstat))
1858 {
1859 if (debug_threads)
1860 debug_printf ("LWP %ld exiting\n", lwpid_of (event_child));
1861
1862 /* If the last thread is exiting, just return. */
1863 if (last_thread_of_process_p (current_inferior))
1864 {
1865 if (debug_threads)
1866 debug_printf ("LWP %ld is last lwp of process\n",
1867 lwpid_of (event_child));
1868 return lwpid_of (event_child);
1869 }
1870
1871 if (!non_stop)
1872 {
1873 current_inferior = get_first_thread ();
1874 if (debug_threads)
1875 debug_printf ("Current inferior is now %ld\n",
1876 lwpid_of (get_thread_lwp (current_inferior)));
1877 }
1878 else
1879 {
1880 current_inferior = NULL;
1881 if (debug_threads)
1882 debug_printf ("Current inferior is now <NULL>\n");
1883 }
1884
1885 /* If we were waiting for this particular child to do something...
1886 well, it did something. */
1887 if (requested_child != NULL)
1888 {
1889 int lwpid = lwpid_of (event_child);
1890
1891 /* Cancel the step-over operation --- the thread that
1892 started it is gone. */
1893 if (finish_step_over (event_child))
1894 unstop_all_lwps (1, event_child);
1895 delete_lwp (event_child);
1896 return lwpid;
1897 }
1898
1899 delete_lwp (event_child);
1900
1901 /* Wait for a more interesting event. */
1902 continue;
1903 }
1904
1905 if (event_child->must_set_ptrace_flags)
1906 {
1907 linux_enable_event_reporting (lwpid_of (event_child));
1908 event_child->must_set_ptrace_flags = 0;
1909 }
1910
1911 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1912 && *wstat >> 16 != 0)
1913 {
1914 handle_extended_wait (event_child, *wstat);
1915 continue;
1916 }
1917
1918 if (WIFSTOPPED (*wstat)
1919 && WSTOPSIG (*wstat) == SIGSTOP
1920 && event_child->stop_expected)
1921 {
1922 int should_stop;
1923
1924 if (debug_threads)
1925 debug_printf ("Expected stop.\n");
1926 event_child->stop_expected = 0;
1927
1928 should_stop = (current_inferior->last_resume_kind == resume_stop
1929 || stopping_threads != NOT_STOPPING_THREADS);
1930
1931 if (!should_stop)
1932 {
1933 linux_resume_one_lwp (event_child,
1934 event_child->stepping, 0, NULL);
1935 continue;
1936 }
1937 }
1938
1939 return lwpid_of (event_child);
1940 }
1941
1942 /* NOTREACHED */
1943 return 0;
1944 }
1945
1946 /* Count the LWP's that have had events. */
1947
1948 static int
1949 count_events_callback (struct inferior_list_entry *entry, void *data)
1950 {
1951 struct lwp_info *lp = (struct lwp_info *) entry;
1952 struct thread_info *thread = get_lwp_thread (lp);
1953 int *count = data;
1954
1955 gdb_assert (count != NULL);
1956
1957 /* Count only resumed LWPs that have a SIGTRAP event pending that
1958 should be reported to GDB. */
1959 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1960 && thread->last_resume_kind != resume_stop
1961 && lp->status_pending_p
1962 && WIFSTOPPED (lp->status_pending)
1963 && WSTOPSIG (lp->status_pending) == SIGTRAP
1964 && !breakpoint_inserted_here (lp->stop_pc))
1965 (*count)++;
1966
1967 return 0;
1968 }
1969
1970 /* Select the LWP (if any) that is currently being single-stepped. */
1971
1972 static int
1973 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1974 {
1975 struct lwp_info *lp = (struct lwp_info *) entry;
1976 struct thread_info *thread = get_lwp_thread (lp);
1977
1978 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1979 && thread->last_resume_kind == resume_step
1980 && lp->status_pending_p)
1981 return 1;
1982 else
1983 return 0;
1984 }
1985
1986 /* Select the Nth LWP that has had a SIGTRAP event that should be
1987 reported to GDB. */
1988
1989 static int
1990 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1991 {
1992 struct lwp_info *lp = (struct lwp_info *) entry;
1993 struct thread_info *thread = get_lwp_thread (lp);
1994 int *selector = data;
1995
1996 gdb_assert (selector != NULL);
1997
1998 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1999 if (thread->last_resume_kind != resume_stop
2000 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2001 && lp->status_pending_p
2002 && WIFSTOPPED (lp->status_pending)
2003 && WSTOPSIG (lp->status_pending) == SIGTRAP
2004 && !breakpoint_inserted_here (lp->stop_pc))
2005 if ((*selector)-- == 0)
2006 return 1;
2007
2008 return 0;
2009 }
2010
2011 static int
2012 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2013 {
2014 struct lwp_info *lp = (struct lwp_info *) entry;
2015 struct thread_info *thread = get_lwp_thread (lp);
2016 struct lwp_info *event_lp = data;
2017
2018 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2019 if (lp == event_lp)
2020 return 0;
2021
2022 /* If a LWP other than the LWP that we're reporting an event for has
2023 hit a GDB breakpoint (as opposed to some random trap signal),
2024 then just arrange for it to hit it again later. We don't keep
2025 the SIGTRAP status and don't forward the SIGTRAP signal to the
2026 LWP. We will handle the current event, eventually we will resume
2027 all LWPs, and this one will get its breakpoint trap again.
2028
2029 If we do not do this, then we run the risk that the user will
2030 delete or disable the breakpoint, but the LWP will have already
2031 tripped on it. */
2032
2033 if (thread->last_resume_kind != resume_stop
2034 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2035 && lp->status_pending_p
2036 && WIFSTOPPED (lp->status_pending)
2037 && WSTOPSIG (lp->status_pending) == SIGTRAP
2038 && !lp->stepping
2039 && !lp->stopped_by_watchpoint
2040 && cancel_breakpoint (lp))
2041 /* Throw away the SIGTRAP. */
2042 lp->status_pending_p = 0;
2043
2044 return 0;
2045 }
2046
2047 static void
2048 linux_cancel_breakpoints (void)
2049 {
2050 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2051 }
2052
2053 /* Select one LWP out of those that have events pending. */
2054
2055 static void
2056 select_event_lwp (struct lwp_info **orig_lp)
2057 {
2058 int num_events = 0;
2059 int random_selector;
2060 struct lwp_info *event_lp;
2061
2062 /* Give preference to any LWP that is being single-stepped. */
2063 event_lp
2064 = (struct lwp_info *) find_inferior (&all_lwps,
2065 select_singlestep_lwp_callback, NULL);
2066 if (event_lp != NULL)
2067 {
2068 if (debug_threads)
2069 debug_printf ("SEL: Select single-step %s\n",
2070 target_pid_to_str (ptid_of (event_lp)));
2071 }
2072 else
2073 {
2074 /* No single-stepping LWP. Select one at random, out of those
2075 which have had SIGTRAP events. */
2076
2077 /* First see how many SIGTRAP events we have. */
2078 find_inferior (&all_lwps, count_events_callback, &num_events);
2079
2080 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2081 random_selector = (int)
2082 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2083
2084 if (debug_threads && num_events > 1)
2085 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2086 num_events, random_selector);
2087
2088 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2089 select_event_lwp_callback,
2090 &random_selector);
2091 }
2092
2093 if (event_lp != NULL)
2094 {
2095 /* Switch the event LWP. */
2096 *orig_lp = event_lp;
2097 }
2098 }
2099
2100 /* Decrement the suspend count of an LWP. */
2101
2102 static int
2103 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2104 {
2105 struct lwp_info *lwp = (struct lwp_info *) entry;
2106
2107 /* Ignore EXCEPT. */
2108 if (lwp == except)
2109 return 0;
2110
2111 lwp->suspended--;
2112
2113 gdb_assert (lwp->suspended >= 0);
2114 return 0;
2115 }
2116
2117 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2118 NULL. */
2119
2120 static void
2121 unsuspend_all_lwps (struct lwp_info *except)
2122 {
2123 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2124 }
2125
2126 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2127 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2128 void *data);
2129 static int lwp_running (struct inferior_list_entry *entry, void *data);
2130 static ptid_t linux_wait_1 (ptid_t ptid,
2131 struct target_waitstatus *ourstatus,
2132 int target_options);
2133
2134 /* Stabilize threads (move out of jump pads).
2135
2136 If a thread is midway collecting a fast tracepoint, we need to
2137 finish the collection and move it out of the jump pad before
2138 reporting the signal.
2139
2140 This avoids recursion while collecting (when a signal arrives
2141 midway, and the signal handler itself collects), which would trash
2142 the trace buffer. In case the user set a breakpoint in a signal
2143 handler, this avoids the backtrace showing the jump pad, etc..
2144 Most importantly, there are certain things we can't do safely if
2145 threads are stopped in a jump pad (or in its callee's). For
2146 example:
2147
2148 - starting a new trace run. A thread still collecting the
2149 previous run, could trash the trace buffer when resumed. The trace
2150 buffer control structures would have been reset but the thread had
2151 no way to tell. The thread could even midway memcpy'ing to the
2152 buffer, which would mean that when resumed, it would clobber the
2153 trace buffer that had been set for a new run.
2154
2155 - we can't rewrite/reuse the jump pads for new tracepoints
2156 safely. Say you do tstart while a thread is stopped midway while
2157 collecting. When the thread is later resumed, it finishes the
2158 collection, and returns to the jump pad, to execute the original
2159 instruction that was under the tracepoint jump at the time the
2160 older run had been started. If the jump pad had been rewritten
2161 since for something else in the new run, the thread would now
2162 execute the wrong / random instructions. */
2163
2164 static void
2165 linux_stabilize_threads (void)
2166 {
2167 struct thread_info *save_inferior;
2168 struct lwp_info *lwp_stuck;
2169
2170 lwp_stuck
2171 = (struct lwp_info *) find_inferior (&all_lwps,
2172 stuck_in_jump_pad_callback, NULL);
2173 if (lwp_stuck != NULL)
2174 {
2175 if (debug_threads)
2176 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2177 lwpid_of (lwp_stuck));
2178 return;
2179 }
2180
2181 save_inferior = current_inferior;
2182
2183 stabilizing_threads = 1;
2184
2185 /* Kick 'em all. */
2186 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2187
2188 /* Loop until all are stopped out of the jump pads. */
2189 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2190 {
2191 struct target_waitstatus ourstatus;
2192 struct lwp_info *lwp;
2193 int wstat;
2194
2195 /* Note that we go through the full wait even loop. While
2196 moving threads out of jump pad, we need to be able to step
2197 over internal breakpoints and such. */
2198 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2199
2200 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2201 {
2202 lwp = get_thread_lwp (current_inferior);
2203
2204 /* Lock it. */
2205 lwp->suspended++;
2206
2207 if (ourstatus.value.sig != GDB_SIGNAL_0
2208 || current_inferior->last_resume_kind == resume_stop)
2209 {
2210 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2211 enqueue_one_deferred_signal (lwp, &wstat);
2212 }
2213 }
2214 }
2215
2216 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2217
2218 stabilizing_threads = 0;
2219
2220 current_inferior = save_inferior;
2221
2222 if (debug_threads)
2223 {
2224 lwp_stuck
2225 = (struct lwp_info *) find_inferior (&all_lwps,
2226 stuck_in_jump_pad_callback, NULL);
2227 if (lwp_stuck != NULL)
2228 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2229 lwpid_of (lwp_stuck));
2230 }
2231 }
2232
2233 /* Wait for process, returns status. */
2234
2235 static ptid_t
2236 linux_wait_1 (ptid_t ptid,
2237 struct target_waitstatus *ourstatus, int target_options)
2238 {
2239 int w;
2240 struct lwp_info *event_child;
2241 int options;
2242 int pid;
2243 int step_over_finished;
2244 int bp_explains_trap;
2245 int maybe_internal_trap;
2246 int report_to_gdb;
2247 int trace_event;
2248 int in_step_range;
2249
2250 if (debug_threads)
2251 {
2252 debug_enter ();
2253 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2254 }
2255
2256 /* Translate generic target options into linux options. */
2257 options = __WALL;
2258 if (target_options & TARGET_WNOHANG)
2259 options |= WNOHANG;
2260
2261 retry:
2262 bp_explains_trap = 0;
2263 trace_event = 0;
2264 in_step_range = 0;
2265 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2266
2267 /* If we were only supposed to resume one thread, only wait for
2268 that thread - if it's still alive. If it died, however - which
2269 can happen if we're coming from the thread death case below -
2270 then we need to make sure we restart the other threads. We could
2271 pick a thread at random or restart all; restarting all is less
2272 arbitrary. */
2273 if (!non_stop
2274 && !ptid_equal (cont_thread, null_ptid)
2275 && !ptid_equal (cont_thread, minus_one_ptid))
2276 {
2277 struct thread_info *thread;
2278
2279 thread = (struct thread_info *) find_inferior_id (&all_threads,
2280 cont_thread);
2281
2282 /* No stepping, no signal - unless one is pending already, of course. */
2283 if (thread == NULL)
2284 {
2285 struct thread_resume resume_info;
2286 resume_info.thread = minus_one_ptid;
2287 resume_info.kind = resume_continue;
2288 resume_info.sig = 0;
2289 linux_resume (&resume_info, 1);
2290 }
2291 else
2292 ptid = cont_thread;
2293 }
2294
2295 if (ptid_equal (step_over_bkpt, null_ptid))
2296 pid = linux_wait_for_event (ptid, &w, options);
2297 else
2298 {
2299 if (debug_threads)
2300 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2301 target_pid_to_str (step_over_bkpt));
2302 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2303 }
2304
2305 if (pid == 0) /* only if TARGET_WNOHANG */
2306 {
2307 if (debug_threads)
2308 {
2309 debug_printf ("linux_wait_1 ret = null_ptid\n");
2310 debug_exit ();
2311 }
2312 return null_ptid;
2313 }
2314
2315 event_child = get_thread_lwp (current_inferior);
2316
2317 /* If we are waiting for a particular child, and it exited,
2318 linux_wait_for_event will return its exit status. Similarly if
2319 the last child exited. If this is not the last child, however,
2320 do not report it as exited until there is a 'thread exited' response
2321 available in the remote protocol. Instead, just wait for another event.
2322 This should be safe, because if the thread crashed we will already
2323 have reported the termination signal to GDB; that should stop any
2324 in-progress stepping operations, etc.
2325
2326 Report the exit status of the last thread to exit. This matches
2327 LinuxThreads' behavior. */
2328
2329 if (last_thread_of_process_p (current_inferior))
2330 {
2331 if (WIFEXITED (w) || WIFSIGNALED (w))
2332 {
2333 if (WIFEXITED (w))
2334 {
2335 ourstatus->kind = TARGET_WAITKIND_EXITED;
2336 ourstatus->value.integer = WEXITSTATUS (w);
2337
2338 if (debug_threads)
2339 {
2340 debug_printf ("linux_wait_1 ret = %s, exited with "
2341 "retcode %d\n",
2342 target_pid_to_str (ptid_of (event_child)),
2343 WEXITSTATUS (w));
2344 debug_exit ();
2345 }
2346 }
2347 else
2348 {
2349 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2350 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2351
2352 if (debug_threads)
2353 {
2354 debug_printf ("linux_wait_1 ret = %s, terminated with "
2355 "signal %d\n",
2356 target_pid_to_str (ptid_of (event_child)),
2357 WTERMSIG (w));
2358 debug_exit ();
2359 }
2360 }
2361
2362 return ptid_of (event_child);
2363 }
2364 }
2365 else
2366 {
2367 if (!WIFSTOPPED (w))
2368 goto retry;
2369 }
2370
2371 /* If this event was not handled before, and is not a SIGTRAP, we
2372 report it. SIGILL and SIGSEGV are also treated as traps in case
2373 a breakpoint is inserted at the current PC. If this target does
2374 not support internal breakpoints at all, we also report the
2375 SIGTRAP without further processing; it's of no concern to us. */
2376 maybe_internal_trap
2377 = (supports_breakpoints ()
2378 && (WSTOPSIG (w) == SIGTRAP
2379 || ((WSTOPSIG (w) == SIGILL
2380 || WSTOPSIG (w) == SIGSEGV)
2381 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2382
2383 if (maybe_internal_trap)
2384 {
2385 /* Handle anything that requires bookkeeping before deciding to
2386 report the event or continue waiting. */
2387
2388 /* First check if we can explain the SIGTRAP with an internal
2389 breakpoint, or if we should possibly report the event to GDB.
2390 Do this before anything that may remove or insert a
2391 breakpoint. */
2392 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2393
2394 /* We have a SIGTRAP, possibly a step-over dance has just
2395 finished. If so, tweak the state machine accordingly,
2396 reinsert breakpoints and delete any reinsert (software
2397 single-step) breakpoints. */
2398 step_over_finished = finish_step_over (event_child);
2399
2400 /* Now invoke the callbacks of any internal breakpoints there. */
2401 check_breakpoints (event_child->stop_pc);
2402
2403 /* Handle tracepoint data collecting. This may overflow the
2404 trace buffer, and cause a tracing stop, removing
2405 breakpoints. */
2406 trace_event = handle_tracepoints (event_child);
2407
2408 if (bp_explains_trap)
2409 {
2410 /* If we stepped or ran into an internal breakpoint, we've
2411 already handled it. So next time we resume (from this
2412 PC), we should step over it. */
2413 if (debug_threads)
2414 debug_printf ("Hit a gdbserver breakpoint.\n");
2415
2416 if (breakpoint_here (event_child->stop_pc))
2417 event_child->need_step_over = 1;
2418 }
2419 }
2420 else
2421 {
2422 /* We have some other signal, possibly a step-over dance was in
2423 progress, and it should be cancelled too. */
2424 step_over_finished = finish_step_over (event_child);
2425 }
2426
2427 /* We have all the data we need. Either report the event to GDB, or
2428 resume threads and keep waiting for more. */
2429
2430 /* If we're collecting a fast tracepoint, finish the collection and
2431 move out of the jump pad before delivering a signal. See
2432 linux_stabilize_threads. */
2433
2434 if (WIFSTOPPED (w)
2435 && WSTOPSIG (w) != SIGTRAP
2436 && supports_fast_tracepoints ()
2437 && agent_loaded_p ())
2438 {
2439 if (debug_threads)
2440 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2441 "to defer or adjust it.\n",
2442 WSTOPSIG (w), lwpid_of (event_child));
2443
2444 /* Allow debugging the jump pad itself. */
2445 if (current_inferior->last_resume_kind != resume_step
2446 && maybe_move_out_of_jump_pad (event_child, &w))
2447 {
2448 enqueue_one_deferred_signal (event_child, &w);
2449
2450 if (debug_threads)
2451 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2452 WSTOPSIG (w), lwpid_of (event_child));
2453
2454 linux_resume_one_lwp (event_child, 0, 0, NULL);
2455 goto retry;
2456 }
2457 }
2458
2459 if (event_child->collecting_fast_tracepoint)
2460 {
2461 if (debug_threads)
2462 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2463 "Check if we're already there.\n",
2464 lwpid_of (event_child),
2465 event_child->collecting_fast_tracepoint);
2466
2467 trace_event = 1;
2468
2469 event_child->collecting_fast_tracepoint
2470 = linux_fast_tracepoint_collecting (event_child, NULL);
2471
2472 if (event_child->collecting_fast_tracepoint != 1)
2473 {
2474 /* No longer need this breakpoint. */
2475 if (event_child->exit_jump_pad_bkpt != NULL)
2476 {
2477 if (debug_threads)
2478 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2479 "stopping all threads momentarily.\n");
2480
2481 /* Other running threads could hit this breakpoint.
2482 We don't handle moribund locations like GDB does,
2483 instead we always pause all threads when removing
2484 breakpoints, so that any step-over or
2485 decr_pc_after_break adjustment is always taken
2486 care of while the breakpoint is still
2487 inserted. */
2488 stop_all_lwps (1, event_child);
2489 cancel_breakpoints ();
2490
2491 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2492 event_child->exit_jump_pad_bkpt = NULL;
2493
2494 unstop_all_lwps (1, event_child);
2495
2496 gdb_assert (event_child->suspended >= 0);
2497 }
2498 }
2499
2500 if (event_child->collecting_fast_tracepoint == 0)
2501 {
2502 if (debug_threads)
2503 debug_printf ("fast tracepoint finished "
2504 "collecting successfully.\n");
2505
2506 /* We may have a deferred signal to report. */
2507 if (dequeue_one_deferred_signal (event_child, &w))
2508 {
2509 if (debug_threads)
2510 debug_printf ("dequeued one signal.\n");
2511 }
2512 else
2513 {
2514 if (debug_threads)
2515 debug_printf ("no deferred signals.\n");
2516
2517 if (stabilizing_threads)
2518 {
2519 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2520 ourstatus->value.sig = GDB_SIGNAL_0;
2521
2522 if (debug_threads)
2523 {
2524 debug_printf ("linux_wait_1 ret = %s, stopped "
2525 "while stabilizing threads\n",
2526 target_pid_to_str (ptid_of (event_child)));
2527 debug_exit ();
2528 }
2529
2530 return ptid_of (event_child);
2531 }
2532 }
2533 }
2534 }
2535
2536 /* Check whether GDB would be interested in this event. */
2537
2538 /* If GDB is not interested in this signal, don't stop other
2539 threads, and don't report it to GDB. Just resume the inferior
2540 right away. We do this for threading-related signals as well as
2541 any that GDB specifically requested we ignore. But never ignore
2542 SIGSTOP if we sent it ourselves, and do not ignore signals when
2543 stepping - they may require special handling to skip the signal
2544 handler. */
2545 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2546 thread library? */
2547 if (WIFSTOPPED (w)
2548 && current_inferior->last_resume_kind != resume_step
2549 && (
2550 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2551 (current_process ()->private->thread_db != NULL
2552 && (WSTOPSIG (w) == __SIGRTMIN
2553 || WSTOPSIG (w) == __SIGRTMIN + 1))
2554 ||
2555 #endif
2556 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2557 && !(WSTOPSIG (w) == SIGSTOP
2558 && current_inferior->last_resume_kind == resume_stop))))
2559 {
2560 siginfo_t info, *info_p;
2561
2562 if (debug_threads)
2563 debug_printf ("Ignored signal %d for LWP %ld.\n",
2564 WSTOPSIG (w), lwpid_of (event_child));
2565
2566 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2567 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2568 info_p = &info;
2569 else
2570 info_p = NULL;
2571 linux_resume_one_lwp (event_child, event_child->stepping,
2572 WSTOPSIG (w), info_p);
2573 goto retry;
2574 }
2575
2576 /* Note that all addresses are always "out of the step range" when
2577 there's no range to begin with. */
2578 in_step_range = lwp_in_step_range (event_child);
2579
2580 /* If GDB wanted this thread to single step, and the thread is out
2581 of the step range, we always want to report the SIGTRAP, and let
2582 GDB handle it. Watchpoints should always be reported. So should
2583 signals we can't explain. A SIGTRAP we can't explain could be a
2584 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2585 do, we're be able to handle GDB breakpoints on top of internal
2586 breakpoints, by handling the internal breakpoint and still
2587 reporting the event to GDB. If we don't, we're out of luck, GDB
2588 won't see the breakpoint hit. */
2589 report_to_gdb = (!maybe_internal_trap
2590 || (current_inferior->last_resume_kind == resume_step
2591 && !in_step_range)
2592 || event_child->stopped_by_watchpoint
2593 || (!step_over_finished && !in_step_range
2594 && !bp_explains_trap && !trace_event)
2595 || (gdb_breakpoint_here (event_child->stop_pc)
2596 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2597 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2598
2599 run_breakpoint_commands (event_child->stop_pc);
2600
2601 /* We found no reason GDB would want us to stop. We either hit one
2602 of our own breakpoints, or finished an internal step GDB
2603 shouldn't know about. */
2604 if (!report_to_gdb)
2605 {
2606 if (debug_threads)
2607 {
2608 if (bp_explains_trap)
2609 debug_printf ("Hit a gdbserver breakpoint.\n");
2610 if (step_over_finished)
2611 debug_printf ("Step-over finished.\n");
2612 if (trace_event)
2613 debug_printf ("Tracepoint event.\n");
2614 if (lwp_in_step_range (event_child))
2615 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2616 paddress (event_child->stop_pc),
2617 paddress (event_child->step_range_start),
2618 paddress (event_child->step_range_end));
2619 }
2620
2621 /* We're not reporting this breakpoint to GDB, so apply the
2622 decr_pc_after_break adjustment to the inferior's regcache
2623 ourselves. */
2624
2625 if (the_low_target.set_pc != NULL)
2626 {
2627 struct regcache *regcache
2628 = get_thread_regcache (get_lwp_thread (event_child), 1);
2629 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2630 }
2631
2632 /* We may have finished stepping over a breakpoint. If so,
2633 we've stopped and suspended all LWPs momentarily except the
2634 stepping one. This is where we resume them all again. We're
2635 going to keep waiting, so use proceed, which handles stepping
2636 over the next breakpoint. */
2637 if (debug_threads)
2638 debug_printf ("proceeding all threads.\n");
2639
2640 if (step_over_finished)
2641 unsuspend_all_lwps (event_child);
2642
2643 proceed_all_lwps ();
2644 goto retry;
2645 }
2646
2647 if (debug_threads)
2648 {
2649 if (current_inferior->last_resume_kind == resume_step)
2650 {
2651 if (event_child->step_range_start == event_child->step_range_end)
2652 debug_printf ("GDB wanted to single-step, reporting event.\n");
2653 else if (!lwp_in_step_range (event_child))
2654 debug_printf ("Out of step range, reporting event.\n");
2655 }
2656 if (event_child->stopped_by_watchpoint)
2657 debug_printf ("Stopped by watchpoint.\n");
2658 if (gdb_breakpoint_here (event_child->stop_pc))
2659 debug_printf ("Stopped by GDB breakpoint.\n");
2660 if (debug_threads)
2661 debug_printf ("Hit a non-gdbserver trap event.\n");
2662 }
2663
2664 /* Alright, we're going to report a stop. */
2665
2666 if (!non_stop && !stabilizing_threads)
2667 {
2668 /* In all-stop, stop all threads. */
2669 stop_all_lwps (0, NULL);
2670
2671 /* If we're not waiting for a specific LWP, choose an event LWP
2672 from among those that have had events. Giving equal priority
2673 to all LWPs that have had events helps prevent
2674 starvation. */
2675 if (ptid_equal (ptid, minus_one_ptid))
2676 {
2677 event_child->status_pending_p = 1;
2678 event_child->status_pending = w;
2679
2680 select_event_lwp (&event_child);
2681
2682 /* current_inferior and event_child must stay in sync. */
2683 current_inferior = get_lwp_thread (event_child);
2684
2685 event_child->status_pending_p = 0;
2686 w = event_child->status_pending;
2687 }
2688
2689 /* Now that we've selected our final event LWP, cancel any
2690 breakpoints in other LWPs that have hit a GDB breakpoint.
2691 See the comment in cancel_breakpoints_callback to find out
2692 why. */
2693 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2694
2695 /* If we were going a step-over, all other threads but the stepping one
2696 had been paused in start_step_over, with their suspend counts
2697 incremented. We don't want to do a full unstop/unpause, because we're
2698 in all-stop mode (so we want threads stopped), but we still need to
2699 unsuspend the other threads, to decrement their `suspended' count
2700 back. */
2701 if (step_over_finished)
2702 unsuspend_all_lwps (event_child);
2703
2704 /* Stabilize threads (move out of jump pads). */
2705 stabilize_threads ();
2706 }
2707 else
2708 {
2709 /* If we just finished a step-over, then all threads had been
2710 momentarily paused. In all-stop, that's fine, we want
2711 threads stopped by now anyway. In non-stop, we need to
2712 re-resume threads that GDB wanted to be running. */
2713 if (step_over_finished)
2714 unstop_all_lwps (1, event_child);
2715 }
2716
2717 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2718
2719 if (current_inferior->last_resume_kind == resume_stop
2720 && WSTOPSIG (w) == SIGSTOP)
2721 {
2722 /* A thread that has been requested to stop by GDB with vCont;t,
2723 and it stopped cleanly, so report as SIG0. The use of
2724 SIGSTOP is an implementation detail. */
2725 ourstatus->value.sig = GDB_SIGNAL_0;
2726 }
2727 else if (current_inferior->last_resume_kind == resume_stop
2728 && WSTOPSIG (w) != SIGSTOP)
2729 {
2730 /* A thread that has been requested to stop by GDB with vCont;t,
2731 but, it stopped for other reasons. */
2732 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2733 }
2734 else
2735 {
2736 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2737 }
2738
2739 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2740
2741 if (debug_threads)
2742 {
2743 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2744 target_pid_to_str (ptid_of (event_child)),
2745 ourstatus->kind, ourstatus->value.sig);
2746 debug_exit ();
2747 }
2748
2749 return ptid_of (event_child);
2750 }
2751
2752 /* Get rid of any pending event in the pipe. */
2753 static void
2754 async_file_flush (void)
2755 {
2756 int ret;
2757 char buf;
2758
2759 do
2760 ret = read (linux_event_pipe[0], &buf, 1);
2761 while (ret >= 0 || (ret == -1 && errno == EINTR));
2762 }
2763
2764 /* Put something in the pipe, so the event loop wakes up. */
2765 static void
2766 async_file_mark (void)
2767 {
2768 int ret;
2769
2770 async_file_flush ();
2771
2772 do
2773 ret = write (linux_event_pipe[1], "+", 1);
2774 while (ret == 0 || (ret == -1 && errno == EINTR));
2775
2776 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2777 be awakened anyway. */
2778 }
2779
2780 static ptid_t
2781 linux_wait (ptid_t ptid,
2782 struct target_waitstatus *ourstatus, int target_options)
2783 {
2784 ptid_t event_ptid;
2785
2786 /* Flush the async file first. */
2787 if (target_is_async_p ())
2788 async_file_flush ();
2789
2790 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2791
2792 /* If at least one stop was reported, there may be more. A single
2793 SIGCHLD can signal more than one child stop. */
2794 if (target_is_async_p ()
2795 && (target_options & TARGET_WNOHANG) != 0
2796 && !ptid_equal (event_ptid, null_ptid))
2797 async_file_mark ();
2798
2799 return event_ptid;
2800 }
2801
2802 /* Send a signal to an LWP. */
2803
2804 static int
2805 kill_lwp (unsigned long lwpid, int signo)
2806 {
2807 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2808 fails, then we are not using nptl threads and we should be using kill. */
2809
2810 #ifdef __NR_tkill
2811 {
2812 static int tkill_failed;
2813
2814 if (!tkill_failed)
2815 {
2816 int ret;
2817
2818 errno = 0;
2819 ret = syscall (__NR_tkill, lwpid, signo);
2820 if (errno != ENOSYS)
2821 return ret;
2822 tkill_failed = 1;
2823 }
2824 }
2825 #endif
2826
2827 return kill (lwpid, signo);
2828 }
2829
2830 void
2831 linux_stop_lwp (struct lwp_info *lwp)
2832 {
2833 send_sigstop (lwp);
2834 }
2835
2836 static void
2837 send_sigstop (struct lwp_info *lwp)
2838 {
2839 int pid;
2840
2841 pid = lwpid_of (lwp);
2842
2843 /* If we already have a pending stop signal for this process, don't
2844 send another. */
2845 if (lwp->stop_expected)
2846 {
2847 if (debug_threads)
2848 debug_printf ("Have pending sigstop for lwp %d\n", pid);
2849
2850 return;
2851 }
2852
2853 if (debug_threads)
2854 debug_printf ("Sending sigstop to lwp %d\n", pid);
2855
2856 lwp->stop_expected = 1;
2857 kill_lwp (pid, SIGSTOP);
2858 }
2859
2860 static int
2861 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2862 {
2863 struct lwp_info *lwp = (struct lwp_info *) entry;
2864
2865 /* Ignore EXCEPT. */
2866 if (lwp == except)
2867 return 0;
2868
2869 if (lwp->stopped)
2870 return 0;
2871
2872 send_sigstop (lwp);
2873 return 0;
2874 }
2875
2876 /* Increment the suspend count of an LWP, and stop it, if not stopped
2877 yet. */
2878 static int
2879 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2880 void *except)
2881 {
2882 struct lwp_info *lwp = (struct lwp_info *) entry;
2883
2884 /* Ignore EXCEPT. */
2885 if (lwp == except)
2886 return 0;
2887
2888 lwp->suspended++;
2889
2890 return send_sigstop_callback (entry, except);
2891 }
2892
2893 static void
2894 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2895 {
2896 /* It's dead, really. */
2897 lwp->dead = 1;
2898
2899 /* Store the exit status for later. */
2900 lwp->status_pending_p = 1;
2901 lwp->status_pending = wstat;
2902
2903 /* Prevent trying to stop it. */
2904 lwp->stopped = 1;
2905
2906 /* No further stops are expected from a dead lwp. */
2907 lwp->stop_expected = 0;
2908 }
2909
2910 static void
2911 wait_for_sigstop (struct inferior_list_entry *entry)
2912 {
2913 struct lwp_info *lwp = (struct lwp_info *) entry;
2914 struct thread_info *saved_inferior;
2915 int wstat;
2916 ptid_t saved_tid;
2917 ptid_t ptid;
2918 int pid;
2919
2920 if (lwp->stopped)
2921 {
2922 if (debug_threads)
2923 debug_printf ("wait_for_sigstop: LWP %ld already stopped\n",
2924 lwpid_of (lwp));
2925 return;
2926 }
2927
2928 saved_inferior = current_inferior;
2929 if (saved_inferior != NULL)
2930 saved_tid = saved_inferior->entry.id;
2931 else
2932 saved_tid = null_ptid; /* avoid bogus unused warning */
2933
2934 ptid = lwp->entry.id;
2935
2936 if (debug_threads)
2937 debug_printf ("wait_for_sigstop: pulling one event\n");
2938
2939 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2940
2941 /* If we stopped with a non-SIGSTOP signal, save it for later
2942 and record the pending SIGSTOP. If the process exited, just
2943 return. */
2944 if (WIFSTOPPED (wstat))
2945 {
2946 if (debug_threads)
2947 debug_printf ("LWP %ld stopped with signal %d\n",
2948 lwpid_of (lwp), WSTOPSIG (wstat));
2949
2950 if (WSTOPSIG (wstat) != SIGSTOP)
2951 {
2952 if (debug_threads)
2953 debug_printf ("LWP %ld stopped with non-sigstop status %06x\n",
2954 lwpid_of (lwp), wstat);
2955
2956 lwp->status_pending_p = 1;
2957 lwp->status_pending = wstat;
2958 }
2959 }
2960 else
2961 {
2962 if (debug_threads)
2963 debug_printf ("Process %d exited while stopping LWPs\n", pid);
2964
2965 lwp = find_lwp_pid (pid_to_ptid (pid));
2966 if (lwp)
2967 {
2968 /* Leave this status pending for the next time we're able to
2969 report it. In the mean time, we'll report this lwp as
2970 dead to GDB, so GDB doesn't try to read registers and
2971 memory from it. This can only happen if this was the
2972 last thread of the process; otherwise, PID is removed
2973 from the thread tables before linux_wait_for_event
2974 returns. */
2975 mark_lwp_dead (lwp, wstat);
2976 }
2977 }
2978
2979 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2980 current_inferior = saved_inferior;
2981 else
2982 {
2983 if (debug_threads)
2984 debug_printf ("Previously current thread died.\n");
2985
2986 if (non_stop)
2987 {
2988 /* We can't change the current inferior behind GDB's back,
2989 otherwise, a subsequent command may apply to the wrong
2990 process. */
2991 current_inferior = NULL;
2992 }
2993 else
2994 {
2995 /* Set a valid thread as current. */
2996 set_desired_inferior (0);
2997 }
2998 }
2999 }
3000
3001 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3002 move it out, because we need to report the stop event to GDB. For
3003 example, if the user puts a breakpoint in the jump pad, it's
3004 because she wants to debug it. */
3005
3006 static int
3007 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3008 {
3009 struct lwp_info *lwp = (struct lwp_info *) entry;
3010 struct thread_info *thread = get_lwp_thread (lwp);
3011
3012 gdb_assert (lwp->suspended == 0);
3013 gdb_assert (lwp->stopped);
3014
3015 /* Allow debugging the jump pad, gdb_collect, etc.. */
3016 return (supports_fast_tracepoints ()
3017 && agent_loaded_p ()
3018 && (gdb_breakpoint_here (lwp->stop_pc)
3019 || lwp->stopped_by_watchpoint
3020 || thread->last_resume_kind == resume_step)
3021 && linux_fast_tracepoint_collecting (lwp, NULL));
3022 }
3023
3024 static void
3025 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3026 {
3027 struct lwp_info *lwp = (struct lwp_info *) entry;
3028 struct thread_info *thread = get_lwp_thread (lwp);
3029 int *wstat;
3030
3031 gdb_assert (lwp->suspended == 0);
3032 gdb_assert (lwp->stopped);
3033
3034 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3035
3036 /* Allow debugging the jump pad, gdb_collect, etc. */
3037 if (!gdb_breakpoint_here (lwp->stop_pc)
3038 && !lwp->stopped_by_watchpoint
3039 && thread->last_resume_kind != resume_step
3040 && maybe_move_out_of_jump_pad (lwp, wstat))
3041 {
3042 if (debug_threads)
3043 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3044 lwpid_of (lwp));
3045
3046 if (wstat)
3047 {
3048 lwp->status_pending_p = 0;
3049 enqueue_one_deferred_signal (lwp, wstat);
3050
3051 if (debug_threads)
3052 debug_printf ("Signal %d for LWP %ld deferred "
3053 "(in jump pad)\n",
3054 WSTOPSIG (*wstat), lwpid_of (lwp));
3055 }
3056
3057 linux_resume_one_lwp (lwp, 0, 0, NULL);
3058 }
3059 else
3060 lwp->suspended++;
3061 }
3062
3063 static int
3064 lwp_running (struct inferior_list_entry *entry, void *data)
3065 {
3066 struct lwp_info *lwp = (struct lwp_info *) entry;
3067
3068 if (lwp->dead)
3069 return 0;
3070 if (lwp->stopped)
3071 return 0;
3072 return 1;
3073 }
3074
3075 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3076 If SUSPEND, then also increase the suspend count of every LWP,
3077 except EXCEPT. */
3078
3079 static void
3080 stop_all_lwps (int suspend, struct lwp_info *except)
3081 {
3082 /* Should not be called recursively. */
3083 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3084
3085 if (debug_threads)
3086 {
3087 debug_enter ();
3088 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3089 suspend ? "stop-and-suspend" : "stop",
3090 except != NULL
3091 ? target_pid_to_str (ptid_of (except))
3092 : "none");
3093 }
3094
3095 stopping_threads = (suspend
3096 ? STOPPING_AND_SUSPENDING_THREADS
3097 : STOPPING_THREADS);
3098
3099 if (suspend)
3100 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3101 else
3102 find_inferior (&all_lwps, send_sigstop_callback, except);
3103 for_each_inferior (&all_lwps, wait_for_sigstop);
3104 stopping_threads = NOT_STOPPING_THREADS;
3105
3106 if (debug_threads)
3107 {
3108 debug_printf ("stop_all_lwps done, setting stopping_threads "
3109 "back to !stopping\n");
3110 debug_exit ();
3111 }
3112 }
3113
3114 /* Resume execution of the inferior process.
3115 If STEP is nonzero, single-step it.
3116 If SIGNAL is nonzero, give it that signal. */
3117
3118 static void
3119 linux_resume_one_lwp (struct lwp_info *lwp,
3120 int step, int signal, siginfo_t *info)
3121 {
3122 struct thread_info *saved_inferior;
3123 int fast_tp_collecting;
3124
3125 if (lwp->stopped == 0)
3126 return;
3127
3128 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3129
3130 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3131
3132 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3133 user used the "jump" command, or "set $pc = foo"). */
3134 if (lwp->stop_pc != get_pc (lwp))
3135 {
3136 /* Collecting 'while-stepping' actions doesn't make sense
3137 anymore. */
3138 release_while_stepping_state_list (get_lwp_thread (lwp));
3139 }
3140
3141 /* If we have pending signals or status, and a new signal, enqueue the
3142 signal. Also enqueue the signal if we are waiting to reinsert a
3143 breakpoint; it will be picked up again below. */
3144 if (signal != 0
3145 && (lwp->status_pending_p
3146 || lwp->pending_signals != NULL
3147 || lwp->bp_reinsert != 0
3148 || fast_tp_collecting))
3149 {
3150 struct pending_signals *p_sig;
3151 p_sig = xmalloc (sizeof (*p_sig));
3152 p_sig->prev = lwp->pending_signals;
3153 p_sig->signal = signal;
3154 if (info == NULL)
3155 memset (&p_sig->info, 0, sizeof (siginfo_t));
3156 else
3157 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3158 lwp->pending_signals = p_sig;
3159 }
3160
3161 if (lwp->status_pending_p)
3162 {
3163 if (debug_threads)
3164 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3165 " has pending status\n",
3166 lwpid_of (lwp), step ? "step" : "continue", signal,
3167 lwp->stop_expected ? "expected" : "not expected");
3168 return;
3169 }
3170
3171 saved_inferior = current_inferior;
3172 current_inferior = get_lwp_thread (lwp);
3173
3174 if (debug_threads)
3175 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3176 lwpid_of (lwp), step ? "step" : "continue", signal,
3177 lwp->stop_expected ? "expected" : "not expected");
3178
3179 /* This bit needs some thinking about. If we get a signal that
3180 we must report while a single-step reinsert is still pending,
3181 we often end up resuming the thread. It might be better to
3182 (ew) allow a stack of pending events; then we could be sure that
3183 the reinsert happened right away and not lose any signals.
3184
3185 Making this stack would also shrink the window in which breakpoints are
3186 uninserted (see comment in linux_wait_for_lwp) but not enough for
3187 complete correctness, so it won't solve that problem. It may be
3188 worthwhile just to solve this one, however. */
3189 if (lwp->bp_reinsert != 0)
3190 {
3191 if (debug_threads)
3192 debug_printf (" pending reinsert at 0x%s\n",
3193 paddress (lwp->bp_reinsert));
3194
3195 if (can_hardware_single_step ())
3196 {
3197 if (fast_tp_collecting == 0)
3198 {
3199 if (step == 0)
3200 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3201 if (lwp->suspended)
3202 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3203 lwp->suspended);
3204 }
3205
3206 step = 1;
3207 }
3208
3209 /* Postpone any pending signal. It was enqueued above. */
3210 signal = 0;
3211 }
3212
3213 if (fast_tp_collecting == 1)
3214 {
3215 if (debug_threads)
3216 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3217 " (exit-jump-pad-bkpt)\n",
3218 lwpid_of (lwp));
3219
3220 /* Postpone any pending signal. It was enqueued above. */
3221 signal = 0;
3222 }
3223 else if (fast_tp_collecting == 2)
3224 {
3225 if (debug_threads)
3226 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3227 " single-stepping\n",
3228 lwpid_of (lwp));
3229
3230 if (can_hardware_single_step ())
3231 step = 1;
3232 else
3233 fatal ("moving out of jump pad single-stepping"
3234 " not implemented on this target");
3235
3236 /* Postpone any pending signal. It was enqueued above. */
3237 signal = 0;
3238 }
3239
3240 /* If we have while-stepping actions in this thread set it stepping.
3241 If we have a signal to deliver, it may or may not be set to
3242 SIG_IGN, we don't know. Assume so, and allow collecting
3243 while-stepping into a signal handler. A possible smart thing to
3244 do would be to set an internal breakpoint at the signal return
3245 address, continue, and carry on catching this while-stepping
3246 action only when that breakpoint is hit. A future
3247 enhancement. */
3248 if (get_lwp_thread (lwp)->while_stepping != NULL
3249 && can_hardware_single_step ())
3250 {
3251 if (debug_threads)
3252 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3253 lwpid_of (lwp));
3254 step = 1;
3255 }
3256
3257 if (debug_threads && the_low_target.get_pc != NULL)
3258 {
3259 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3260 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3261 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3262 }
3263
3264 /* If we have pending signals, consume one unless we are trying to
3265 reinsert a breakpoint or we're trying to finish a fast tracepoint
3266 collect. */
3267 if (lwp->pending_signals != NULL
3268 && lwp->bp_reinsert == 0
3269 && fast_tp_collecting == 0)
3270 {
3271 struct pending_signals **p_sig;
3272
3273 p_sig = &lwp->pending_signals;
3274 while ((*p_sig)->prev != NULL)
3275 p_sig = &(*p_sig)->prev;
3276
3277 signal = (*p_sig)->signal;
3278 if ((*p_sig)->info.si_signo != 0)
3279 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3280 &(*p_sig)->info);
3281
3282 free (*p_sig);
3283 *p_sig = NULL;
3284 }
3285
3286 if (the_low_target.prepare_to_resume != NULL)
3287 the_low_target.prepare_to_resume (lwp);
3288
3289 regcache_invalidate_thread (get_lwp_thread (lwp));
3290 errno = 0;
3291 lwp->stopped = 0;
3292 lwp->stopped_by_watchpoint = 0;
3293 lwp->stepping = step;
3294 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3295 (PTRACE_TYPE_ARG3) 0,
3296 /* Coerce to a uintptr_t first to avoid potential gcc warning
3297 of coercing an 8 byte integer to a 4 byte pointer. */
3298 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3299
3300 current_inferior = saved_inferior;
3301 if (errno)
3302 {
3303 /* ESRCH from ptrace either means that the thread was already
3304 running (an error) or that it is gone (a race condition). If
3305 it's gone, we will get a notification the next time we wait,
3306 so we can ignore the error. We could differentiate these
3307 two, but it's tricky without waiting; the thread still exists
3308 as a zombie, so sending it signal 0 would succeed. So just
3309 ignore ESRCH. */
3310 if (errno == ESRCH)
3311 return;
3312
3313 perror_with_name ("ptrace");
3314 }
3315 }
3316
3317 struct thread_resume_array
3318 {
3319 struct thread_resume *resume;
3320 size_t n;
3321 };
3322
3323 /* This function is called once per thread via find_inferior.
3324 ARG is a pointer to a thread_resume_array struct.
3325 We look up the thread specified by ENTRY in ARG, and mark the thread
3326 with a pointer to the appropriate resume request.
3327
3328 This algorithm is O(threads * resume elements), but resume elements
3329 is small (and will remain small at least until GDB supports thread
3330 suspension). */
3331
3332 static int
3333 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3334 {
3335 struct lwp_info *lwp;
3336 struct thread_info *thread;
3337 int ndx;
3338 struct thread_resume_array *r;
3339
3340 thread = (struct thread_info *) entry;
3341 lwp = get_thread_lwp (thread);
3342 r = arg;
3343
3344 for (ndx = 0; ndx < r->n; ndx++)
3345 {
3346 ptid_t ptid = r->resume[ndx].thread;
3347 if (ptid_equal (ptid, minus_one_ptid)
3348 || ptid_equal (ptid, entry->id)
3349 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3350 of PID'. */
3351 || (ptid_get_pid (ptid) == pid_of (lwp)
3352 && (ptid_is_pid (ptid)
3353 || ptid_get_lwp (ptid) == -1)))
3354 {
3355 if (r->resume[ndx].kind == resume_stop
3356 && thread->last_resume_kind == resume_stop)
3357 {
3358 if (debug_threads)
3359 debug_printf ("already %s LWP %ld at GDB's request\n",
3360 (thread->last_status.kind
3361 == TARGET_WAITKIND_STOPPED)
3362 ? "stopped"
3363 : "stopping",
3364 lwpid_of (lwp));
3365
3366 continue;
3367 }
3368
3369 lwp->resume = &r->resume[ndx];
3370 thread->last_resume_kind = lwp->resume->kind;
3371
3372 lwp->step_range_start = lwp->resume->step_range_start;
3373 lwp->step_range_end = lwp->resume->step_range_end;
3374
3375 /* If we had a deferred signal to report, dequeue one now.
3376 This can happen if LWP gets more than one signal while
3377 trying to get out of a jump pad. */
3378 if (lwp->stopped
3379 && !lwp->status_pending_p
3380 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3381 {
3382 lwp->status_pending_p = 1;
3383
3384 if (debug_threads)
3385 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3386 "leaving status pending.\n",
3387 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3388 }
3389
3390 return 0;
3391 }
3392 }
3393
3394 /* No resume action for this thread. */
3395 lwp->resume = NULL;
3396
3397 return 0;
3398 }
3399
3400 /* find_inferior callback for linux_resume.
3401 Set *FLAG_P if this lwp has an interesting status pending. */
3402
3403 static int
3404 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3405 {
3406 struct lwp_info *lwp = (struct lwp_info *) entry;
3407
3408 /* LWPs which will not be resumed are not interesting, because
3409 we might not wait for them next time through linux_wait. */
3410 if (lwp->resume == NULL)
3411 return 0;
3412
3413 if (lwp->status_pending_p)
3414 * (int *) flag_p = 1;
3415
3416 return 0;
3417 }
3418
3419 /* Return 1 if this lwp that GDB wants running is stopped at an
3420 internal breakpoint that we need to step over. It assumes that any
3421 required STOP_PC adjustment has already been propagated to the
3422 inferior's regcache. */
3423
3424 static int
3425 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3426 {
3427 struct lwp_info *lwp = (struct lwp_info *) entry;
3428 struct thread_info *thread;
3429 struct thread_info *saved_inferior;
3430 CORE_ADDR pc;
3431
3432 /* LWPs which will not be resumed are not interesting, because we
3433 might not wait for them next time through linux_wait. */
3434
3435 if (!lwp->stopped)
3436 {
3437 if (debug_threads)
3438 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3439 lwpid_of (lwp));
3440 return 0;
3441 }
3442
3443 thread = get_lwp_thread (lwp);
3444
3445 if (thread->last_resume_kind == resume_stop)
3446 {
3447 if (debug_threads)
3448 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3449 " stopped\n",
3450 lwpid_of (lwp));
3451 return 0;
3452 }
3453
3454 gdb_assert (lwp->suspended >= 0);
3455
3456 if (lwp->suspended)
3457 {
3458 if (debug_threads)
3459 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3460 lwpid_of (lwp));
3461 return 0;
3462 }
3463
3464 if (!lwp->need_step_over)
3465 {
3466 if (debug_threads)
3467 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3468 }
3469
3470 if (lwp->status_pending_p)
3471 {
3472 if (debug_threads)
3473 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3474 " status.\n",
3475 lwpid_of (lwp));
3476 return 0;
3477 }
3478
3479 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3480 or we have. */
3481 pc = get_pc (lwp);
3482
3483 /* If the PC has changed since we stopped, then don't do anything,
3484 and let the breakpoint/tracepoint be hit. This happens if, for
3485 instance, GDB handled the decr_pc_after_break subtraction itself,
3486 GDB is OOL stepping this thread, or the user has issued a "jump"
3487 command, or poked thread's registers herself. */
3488 if (pc != lwp->stop_pc)
3489 {
3490 if (debug_threads)
3491 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3492 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3493 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3494
3495 lwp->need_step_over = 0;
3496 return 0;
3497 }
3498
3499 saved_inferior = current_inferior;
3500 current_inferior = thread;
3501
3502 /* We can only step over breakpoints we know about. */
3503 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3504 {
3505 /* Don't step over a breakpoint that GDB expects to hit
3506 though. If the condition is being evaluated on the target's side
3507 and it evaluate to false, step over this breakpoint as well. */
3508 if (gdb_breakpoint_here (pc)
3509 && gdb_condition_true_at_breakpoint (pc)
3510 && gdb_no_commands_at_breakpoint (pc))
3511 {
3512 if (debug_threads)
3513 debug_printf ("Need step over [LWP %ld]? yes, but found"
3514 " GDB breakpoint at 0x%s; skipping step over\n",
3515 lwpid_of (lwp), paddress (pc));
3516
3517 current_inferior = saved_inferior;
3518 return 0;
3519 }
3520 else
3521 {
3522 if (debug_threads)
3523 debug_printf ("Need step over [LWP %ld]? yes, "
3524 "found breakpoint at 0x%s\n",
3525 lwpid_of (lwp), paddress (pc));
3526
3527 /* We've found an lwp that needs stepping over --- return 1 so
3528 that find_inferior stops looking. */
3529 current_inferior = saved_inferior;
3530
3531 /* If the step over is cancelled, this is set again. */
3532 lwp->need_step_over = 0;
3533 return 1;
3534 }
3535 }
3536
3537 current_inferior = saved_inferior;
3538
3539 if (debug_threads)
3540 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3541 " at 0x%s\n",
3542 lwpid_of (lwp), paddress (pc));
3543
3544 return 0;
3545 }
3546
3547 /* Start a step-over operation on LWP. When LWP stopped at a
3548 breakpoint, to make progress, we need to remove the breakpoint out
3549 of the way. If we let other threads run while we do that, they may
3550 pass by the breakpoint location and miss hitting it. To avoid
3551 that, a step-over momentarily stops all threads while LWP is
3552 single-stepped while the breakpoint is temporarily uninserted from
3553 the inferior. When the single-step finishes, we reinsert the
3554 breakpoint, and let all threads that are supposed to be running,
3555 run again.
3556
3557 On targets that don't support hardware single-step, we don't
3558 currently support full software single-stepping. Instead, we only
3559 support stepping over the thread event breakpoint, by asking the
3560 low target where to place a reinsert breakpoint. Since this
3561 routine assumes the breakpoint being stepped over is a thread event
3562 breakpoint, it usually assumes the return address of the current
3563 function is a good enough place to set the reinsert breakpoint. */
3564
3565 static int
3566 start_step_over (struct lwp_info *lwp)
3567 {
3568 struct thread_info *saved_inferior;
3569 CORE_ADDR pc;
3570 int step;
3571
3572 if (debug_threads)
3573 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3574 lwpid_of (lwp));
3575
3576 stop_all_lwps (1, lwp);
3577 gdb_assert (lwp->suspended == 0);
3578
3579 if (debug_threads)
3580 debug_printf ("Done stopping all threads for step-over.\n");
3581
3582 /* Note, we should always reach here with an already adjusted PC,
3583 either by GDB (if we're resuming due to GDB's request), or by our
3584 caller, if we just finished handling an internal breakpoint GDB
3585 shouldn't care about. */
3586 pc = get_pc (lwp);
3587
3588 saved_inferior = current_inferior;
3589 current_inferior = get_lwp_thread (lwp);
3590
3591 lwp->bp_reinsert = pc;
3592 uninsert_breakpoints_at (pc);
3593 uninsert_fast_tracepoint_jumps_at (pc);
3594
3595 if (can_hardware_single_step ())
3596 {
3597 step = 1;
3598 }
3599 else
3600 {
3601 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3602 set_reinsert_breakpoint (raddr);
3603 step = 0;
3604 }
3605
3606 current_inferior = saved_inferior;
3607
3608 linux_resume_one_lwp (lwp, step, 0, NULL);
3609
3610 /* Require next event from this LWP. */
3611 step_over_bkpt = lwp->entry.id;
3612 return 1;
3613 }
3614
3615 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3616 start_step_over, if still there, and delete any reinsert
3617 breakpoints we've set, on non hardware single-step targets. */
3618
3619 static int
3620 finish_step_over (struct lwp_info *lwp)
3621 {
3622 if (lwp->bp_reinsert != 0)
3623 {
3624 if (debug_threads)
3625 debug_printf ("Finished step over.\n");
3626
3627 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3628 may be no breakpoint to reinsert there by now. */
3629 reinsert_breakpoints_at (lwp->bp_reinsert);
3630 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3631
3632 lwp->bp_reinsert = 0;
3633
3634 /* Delete any software-single-step reinsert breakpoints. No
3635 longer needed. We don't have to worry about other threads
3636 hitting this trap, and later not being able to explain it,
3637 because we were stepping over a breakpoint, and we hold all
3638 threads but LWP stopped while doing that. */
3639 if (!can_hardware_single_step ())
3640 delete_reinsert_breakpoints ();
3641
3642 step_over_bkpt = null_ptid;
3643 return 1;
3644 }
3645 else
3646 return 0;
3647 }
3648
3649 /* This function is called once per thread. We check the thread's resume
3650 request, which will tell us whether to resume, step, or leave the thread
3651 stopped; and what signal, if any, it should be sent.
3652
3653 For threads which we aren't explicitly told otherwise, we preserve
3654 the stepping flag; this is used for stepping over gdbserver-placed
3655 breakpoints.
3656
3657 If pending_flags was set in any thread, we queue any needed
3658 signals, since we won't actually resume. We already have a pending
3659 event to report, so we don't need to preserve any step requests;
3660 they should be re-issued if necessary. */
3661
3662 static int
3663 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3664 {
3665 struct lwp_info *lwp;
3666 struct thread_info *thread;
3667 int step;
3668 int leave_all_stopped = * (int *) arg;
3669 int leave_pending;
3670
3671 thread = (struct thread_info *) entry;
3672 lwp = get_thread_lwp (thread);
3673
3674 if (lwp->resume == NULL)
3675 return 0;
3676
3677 if (lwp->resume->kind == resume_stop)
3678 {
3679 if (debug_threads)
3680 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (lwp));
3681
3682 if (!lwp->stopped)
3683 {
3684 if (debug_threads)
3685 debug_printf ("stopping LWP %ld\n", lwpid_of (lwp));
3686
3687 /* Stop the thread, and wait for the event asynchronously,
3688 through the event loop. */
3689 send_sigstop (lwp);
3690 }
3691 else
3692 {
3693 if (debug_threads)
3694 debug_printf ("already stopped LWP %ld\n",
3695 lwpid_of (lwp));
3696
3697 /* The LWP may have been stopped in an internal event that
3698 was not meant to be notified back to GDB (e.g., gdbserver
3699 breakpoint), so we should be reporting a stop event in
3700 this case too. */
3701
3702 /* If the thread already has a pending SIGSTOP, this is a
3703 no-op. Otherwise, something later will presumably resume
3704 the thread and this will cause it to cancel any pending
3705 operation, due to last_resume_kind == resume_stop. If
3706 the thread already has a pending status to report, we
3707 will still report it the next time we wait - see
3708 status_pending_p_callback. */
3709
3710 /* If we already have a pending signal to report, then
3711 there's no need to queue a SIGSTOP, as this means we're
3712 midway through moving the LWP out of the jumppad, and we
3713 will report the pending signal as soon as that is
3714 finished. */
3715 if (lwp->pending_signals_to_report == NULL)
3716 send_sigstop (lwp);
3717 }
3718
3719 /* For stop requests, we're done. */
3720 lwp->resume = NULL;
3721 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3722 return 0;
3723 }
3724
3725 /* If this thread which is about to be resumed has a pending status,
3726 then don't resume any threads - we can just report the pending
3727 status. Make sure to queue any signals that would otherwise be
3728 sent. In all-stop mode, we do this decision based on if *any*
3729 thread has a pending status. If there's a thread that needs the
3730 step-over-breakpoint dance, then don't resume any other thread
3731 but that particular one. */
3732 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3733
3734 if (!leave_pending)
3735 {
3736 if (debug_threads)
3737 debug_printf ("resuming LWP %ld\n", lwpid_of (lwp));
3738
3739 step = (lwp->resume->kind == resume_step);
3740 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3741 }
3742 else
3743 {
3744 if (debug_threads)
3745 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (lwp));
3746
3747 /* If we have a new signal, enqueue the signal. */
3748 if (lwp->resume->sig != 0)
3749 {
3750 struct pending_signals *p_sig;
3751 p_sig = xmalloc (sizeof (*p_sig));
3752 p_sig->prev = lwp->pending_signals;
3753 p_sig->signal = lwp->resume->sig;
3754 memset (&p_sig->info, 0, sizeof (siginfo_t));
3755
3756 /* If this is the same signal we were previously stopped by,
3757 make sure to queue its siginfo. We can ignore the return
3758 value of ptrace; if it fails, we'll skip
3759 PTRACE_SETSIGINFO. */
3760 if (WIFSTOPPED (lwp->last_status)
3761 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3762 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3763 &p_sig->info);
3764
3765 lwp->pending_signals = p_sig;
3766 }
3767 }
3768
3769 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3770 lwp->resume = NULL;
3771 return 0;
3772 }
3773
3774 static void
3775 linux_resume (struct thread_resume *resume_info, size_t n)
3776 {
3777 struct thread_resume_array array = { resume_info, n };
3778 struct lwp_info *need_step_over = NULL;
3779 int any_pending;
3780 int leave_all_stopped;
3781
3782 if (debug_threads)
3783 {
3784 debug_enter ();
3785 debug_printf ("linux_resume:\n");
3786 }
3787
3788 find_inferior (&all_threads, linux_set_resume_request, &array);
3789
3790 /* If there is a thread which would otherwise be resumed, which has
3791 a pending status, then don't resume any threads - we can just
3792 report the pending status. Make sure to queue any signals that
3793 would otherwise be sent. In non-stop mode, we'll apply this
3794 logic to each thread individually. We consume all pending events
3795 before considering to start a step-over (in all-stop). */
3796 any_pending = 0;
3797 if (!non_stop)
3798 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3799
3800 /* If there is a thread which would otherwise be resumed, which is
3801 stopped at a breakpoint that needs stepping over, then don't
3802 resume any threads - have it step over the breakpoint with all
3803 other threads stopped, then resume all threads again. Make sure
3804 to queue any signals that would otherwise be delivered or
3805 queued. */
3806 if (!any_pending && supports_breakpoints ())
3807 need_step_over
3808 = (struct lwp_info *) find_inferior (&all_lwps,
3809 need_step_over_p, NULL);
3810
3811 leave_all_stopped = (need_step_over != NULL || any_pending);
3812
3813 if (debug_threads)
3814 {
3815 if (need_step_over != NULL)
3816 debug_printf ("Not resuming all, need step over\n");
3817 else if (any_pending)
3818 debug_printf ("Not resuming, all-stop and found "
3819 "an LWP with pending status\n");
3820 else
3821 debug_printf ("Resuming, no pending status or step over needed\n");
3822 }
3823
3824 /* Even if we're leaving threads stopped, queue all signals we'd
3825 otherwise deliver. */
3826 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3827
3828 if (need_step_over)
3829 start_step_over (need_step_over);
3830
3831 if (debug_threads)
3832 {
3833 debug_printf ("linux_resume done\n");
3834 debug_exit ();
3835 }
3836 }
3837
3838 /* This function is called once per thread. We check the thread's
3839 last resume request, which will tell us whether to resume, step, or
3840 leave the thread stopped. Any signal the client requested to be
3841 delivered has already been enqueued at this point.
3842
3843 If any thread that GDB wants running is stopped at an internal
3844 breakpoint that needs stepping over, we start a step-over operation
3845 on that particular thread, and leave all others stopped. */
3846
3847 static int
3848 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3849 {
3850 struct lwp_info *lwp = (struct lwp_info *) entry;
3851 struct thread_info *thread;
3852 int step;
3853
3854 if (lwp == except)
3855 return 0;
3856
3857 if (debug_threads)
3858 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3859
3860 if (!lwp->stopped)
3861 {
3862 if (debug_threads)
3863 debug_printf (" LWP %ld already running\n", lwpid_of (lwp));
3864 return 0;
3865 }
3866
3867 thread = get_lwp_thread (lwp);
3868
3869 if (thread->last_resume_kind == resume_stop
3870 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3871 {
3872 if (debug_threads)
3873 debug_printf (" client wants LWP to remain %ld stopped\n",
3874 lwpid_of (lwp));
3875 return 0;
3876 }
3877
3878 if (lwp->status_pending_p)
3879 {
3880 if (debug_threads)
3881 debug_printf (" LWP %ld has pending status, leaving stopped\n",
3882 lwpid_of (lwp));
3883 return 0;
3884 }
3885
3886 gdb_assert (lwp->suspended >= 0);
3887
3888 if (lwp->suspended)
3889 {
3890 if (debug_threads)
3891 debug_printf (" LWP %ld is suspended\n", lwpid_of (lwp));
3892 return 0;
3893 }
3894
3895 if (thread->last_resume_kind == resume_stop
3896 && lwp->pending_signals_to_report == NULL
3897 && lwp->collecting_fast_tracepoint == 0)
3898 {
3899 /* We haven't reported this LWP as stopped yet (otherwise, the
3900 last_status.kind check above would catch it, and we wouldn't
3901 reach here. This LWP may have been momentarily paused by a
3902 stop_all_lwps call while handling for example, another LWP's
3903 step-over. In that case, the pending expected SIGSTOP signal
3904 that was queued at vCont;t handling time will have already
3905 been consumed by wait_for_sigstop, and so we need to requeue
3906 another one here. Note that if the LWP already has a SIGSTOP
3907 pending, this is a no-op. */
3908
3909 if (debug_threads)
3910 debug_printf ("Client wants LWP %ld to stop. "
3911 "Making sure it has a SIGSTOP pending\n",
3912 lwpid_of (lwp));
3913
3914 send_sigstop (lwp);
3915 }
3916
3917 step = thread->last_resume_kind == resume_step;
3918 linux_resume_one_lwp (lwp, step, 0, NULL);
3919 return 0;
3920 }
3921
3922 static int
3923 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3924 {
3925 struct lwp_info *lwp = (struct lwp_info *) entry;
3926
3927 if (lwp == except)
3928 return 0;
3929
3930 lwp->suspended--;
3931 gdb_assert (lwp->suspended >= 0);
3932
3933 return proceed_one_lwp (entry, except);
3934 }
3935
3936 /* When we finish a step-over, set threads running again. If there's
3937 another thread that may need a step-over, now's the time to start
3938 it. Eventually, we'll move all threads past their breakpoints. */
3939
3940 static void
3941 proceed_all_lwps (void)
3942 {
3943 struct lwp_info *need_step_over;
3944
3945 /* If there is a thread which would otherwise be resumed, which is
3946 stopped at a breakpoint that needs stepping over, then don't
3947 resume any threads - have it step over the breakpoint with all
3948 other threads stopped, then resume all threads again. */
3949
3950 if (supports_breakpoints ())
3951 {
3952 need_step_over
3953 = (struct lwp_info *) find_inferior (&all_lwps,
3954 need_step_over_p, NULL);
3955
3956 if (need_step_over != NULL)
3957 {
3958 if (debug_threads)
3959 debug_printf ("proceed_all_lwps: found "
3960 "thread %ld needing a step-over\n",
3961 lwpid_of (need_step_over));
3962
3963 start_step_over (need_step_over);
3964 return;
3965 }
3966 }
3967
3968 if (debug_threads)
3969 debug_printf ("Proceeding, no step-over needed\n");
3970
3971 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3972 }
3973
3974 /* Stopped LWPs that the client wanted to be running, that don't have
3975 pending statuses, are set to run again, except for EXCEPT, if not
3976 NULL. This undoes a stop_all_lwps call. */
3977
3978 static void
3979 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3980 {
3981 if (debug_threads)
3982 {
3983 debug_enter ();
3984 if (except)
3985 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
3986 lwpid_of (except));
3987 else
3988 debug_printf ("unstopping all lwps\n");
3989 }
3990
3991 if (unsuspend)
3992 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3993 else
3994 find_inferior (&all_lwps, proceed_one_lwp, except);
3995
3996 if (debug_threads)
3997 {
3998 debug_printf ("unstop_all_lwps done\n");
3999 debug_exit ();
4000 }
4001 }
4002
4003
4004 #ifdef HAVE_LINUX_REGSETS
4005
4006 #define use_linux_regsets 1
4007
4008 /* Returns true if REGSET has been disabled. */
4009
4010 static int
4011 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4012 {
4013 return (info->disabled_regsets != NULL
4014 && info->disabled_regsets[regset - info->regsets]);
4015 }
4016
4017 /* Disable REGSET. */
4018
4019 static void
4020 disable_regset (struct regsets_info *info, struct regset_info *regset)
4021 {
4022 int dr_offset;
4023
4024 dr_offset = regset - info->regsets;
4025 if (info->disabled_regsets == NULL)
4026 info->disabled_regsets = xcalloc (1, info->num_regsets);
4027 info->disabled_regsets[dr_offset] = 1;
4028 }
4029
4030 static int
4031 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4032 struct regcache *regcache)
4033 {
4034 struct regset_info *regset;
4035 int saw_general_regs = 0;
4036 int pid;
4037 struct iovec iov;
4038
4039 regset = regsets_info->regsets;
4040
4041 pid = lwpid_of (get_thread_lwp (current_inferior));
4042 while (regset->size >= 0)
4043 {
4044 void *buf, *data;
4045 int nt_type, res;
4046
4047 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4048 {
4049 regset ++;
4050 continue;
4051 }
4052
4053 buf = xmalloc (regset->size);
4054
4055 nt_type = regset->nt_type;
4056 if (nt_type)
4057 {
4058 iov.iov_base = buf;
4059 iov.iov_len = regset->size;
4060 data = (void *) &iov;
4061 }
4062 else
4063 data = buf;
4064
4065 #ifndef __sparc__
4066 res = ptrace (regset->get_request, pid,
4067 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4068 #else
4069 res = ptrace (regset->get_request, pid, data, nt_type);
4070 #endif
4071 if (res < 0)
4072 {
4073 if (errno == EIO)
4074 {
4075 /* If we get EIO on a regset, do not try it again for
4076 this process mode. */
4077 disable_regset (regsets_info, regset);
4078 free (buf);
4079 continue;
4080 }
4081 else
4082 {
4083 char s[256];
4084 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4085 pid);
4086 perror (s);
4087 }
4088 }
4089 else if (regset->type == GENERAL_REGS)
4090 saw_general_regs = 1;
4091 regset->store_function (regcache, buf);
4092 regset ++;
4093 free (buf);
4094 }
4095 if (saw_general_regs)
4096 return 0;
4097 else
4098 return 1;
4099 }
4100
4101 static int
4102 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4103 struct regcache *regcache)
4104 {
4105 struct regset_info *regset;
4106 int saw_general_regs = 0;
4107 int pid;
4108 struct iovec iov;
4109
4110 regset = regsets_info->regsets;
4111
4112 pid = lwpid_of (get_thread_lwp (current_inferior));
4113 while (regset->size >= 0)
4114 {
4115 void *buf, *data;
4116 int nt_type, res;
4117
4118 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4119 {
4120 regset ++;
4121 continue;
4122 }
4123
4124 buf = xmalloc (regset->size);
4125
4126 /* First fill the buffer with the current register set contents,
4127 in case there are any items in the kernel's regset that are
4128 not in gdbserver's regcache. */
4129
4130 nt_type = regset->nt_type;
4131 if (nt_type)
4132 {
4133 iov.iov_base = buf;
4134 iov.iov_len = regset->size;
4135 data = (void *) &iov;
4136 }
4137 else
4138 data = buf;
4139
4140 #ifndef __sparc__
4141 res = ptrace (regset->get_request, pid,
4142 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4143 #else
4144 res = ptrace (regset->get_request, pid, data, nt_type);
4145 #endif
4146
4147 if (res == 0)
4148 {
4149 /* Then overlay our cached registers on that. */
4150 regset->fill_function (regcache, buf);
4151
4152 /* Only now do we write the register set. */
4153 #ifndef __sparc__
4154 res = ptrace (regset->set_request, pid,
4155 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4156 #else
4157 res = ptrace (regset->set_request, pid, data, nt_type);
4158 #endif
4159 }
4160
4161 if (res < 0)
4162 {
4163 if (errno == EIO)
4164 {
4165 /* If we get EIO on a regset, do not try it again for
4166 this process mode. */
4167 disable_regset (regsets_info, regset);
4168 free (buf);
4169 continue;
4170 }
4171 else if (errno == ESRCH)
4172 {
4173 /* At this point, ESRCH should mean the process is
4174 already gone, in which case we simply ignore attempts
4175 to change its registers. See also the related
4176 comment in linux_resume_one_lwp. */
4177 free (buf);
4178 return 0;
4179 }
4180 else
4181 {
4182 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4183 }
4184 }
4185 else if (regset->type == GENERAL_REGS)
4186 saw_general_regs = 1;
4187 regset ++;
4188 free (buf);
4189 }
4190 if (saw_general_regs)
4191 return 0;
4192 else
4193 return 1;
4194 }
4195
4196 #else /* !HAVE_LINUX_REGSETS */
4197
4198 #define use_linux_regsets 0
4199 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4200 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4201
4202 #endif
4203
4204 /* Return 1 if register REGNO is supported by one of the regset ptrace
4205 calls or 0 if it has to be transferred individually. */
4206
4207 static int
4208 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4209 {
4210 unsigned char mask = 1 << (regno % 8);
4211 size_t index = regno / 8;
4212
4213 return (use_linux_regsets
4214 && (regs_info->regset_bitmap == NULL
4215 || (regs_info->regset_bitmap[index] & mask) != 0));
4216 }
4217
4218 #ifdef HAVE_LINUX_USRREGS
4219
4220 int
4221 register_addr (const struct usrregs_info *usrregs, int regnum)
4222 {
4223 int addr;
4224
4225 if (regnum < 0 || regnum >= usrregs->num_regs)
4226 error ("Invalid register number %d.", regnum);
4227
4228 addr = usrregs->regmap[regnum];
4229
4230 return addr;
4231 }
4232
4233 /* Fetch one register. */
4234 static void
4235 fetch_register (const struct usrregs_info *usrregs,
4236 struct regcache *regcache, int regno)
4237 {
4238 CORE_ADDR regaddr;
4239 int i, size;
4240 char *buf;
4241 int pid;
4242
4243 if (regno >= usrregs->num_regs)
4244 return;
4245 if ((*the_low_target.cannot_fetch_register) (regno))
4246 return;
4247
4248 regaddr = register_addr (usrregs, regno);
4249 if (regaddr == -1)
4250 return;
4251
4252 size = ((register_size (regcache->tdesc, regno)
4253 + sizeof (PTRACE_XFER_TYPE) - 1)
4254 & -sizeof (PTRACE_XFER_TYPE));
4255 buf = alloca (size);
4256
4257 pid = lwpid_of (get_thread_lwp (current_inferior));
4258 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4259 {
4260 errno = 0;
4261 *(PTRACE_XFER_TYPE *) (buf + i) =
4262 ptrace (PTRACE_PEEKUSER, pid,
4263 /* Coerce to a uintptr_t first to avoid potential gcc warning
4264 of coercing an 8 byte integer to a 4 byte pointer. */
4265 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4266 regaddr += sizeof (PTRACE_XFER_TYPE);
4267 if (errno != 0)
4268 error ("reading register %d: %s", regno, strerror (errno));
4269 }
4270
4271 if (the_low_target.supply_ptrace_register)
4272 the_low_target.supply_ptrace_register (regcache, regno, buf);
4273 else
4274 supply_register (regcache, regno, buf);
4275 }
4276
4277 /* Store one register. */
4278 static void
4279 store_register (const struct usrregs_info *usrregs,
4280 struct regcache *regcache, int regno)
4281 {
4282 CORE_ADDR regaddr;
4283 int i, size;
4284 char *buf;
4285 int pid;
4286
4287 if (regno >= usrregs->num_regs)
4288 return;
4289 if ((*the_low_target.cannot_store_register) (regno))
4290 return;
4291
4292 regaddr = register_addr (usrregs, regno);
4293 if (regaddr == -1)
4294 return;
4295
4296 size = ((register_size (regcache->tdesc, regno)
4297 + sizeof (PTRACE_XFER_TYPE) - 1)
4298 & -sizeof (PTRACE_XFER_TYPE));
4299 buf = alloca (size);
4300 memset (buf, 0, size);
4301
4302 if (the_low_target.collect_ptrace_register)
4303 the_low_target.collect_ptrace_register (regcache, regno, buf);
4304 else
4305 collect_register (regcache, regno, buf);
4306
4307 pid = lwpid_of (get_thread_lwp (current_inferior));
4308 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4309 {
4310 errno = 0;
4311 ptrace (PTRACE_POKEUSER, pid,
4312 /* Coerce to a uintptr_t first to avoid potential gcc warning
4313 about coercing an 8 byte integer to a 4 byte pointer. */
4314 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4315 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4316 if (errno != 0)
4317 {
4318 /* At this point, ESRCH should mean the process is
4319 already gone, in which case we simply ignore attempts
4320 to change its registers. See also the related
4321 comment in linux_resume_one_lwp. */
4322 if (errno == ESRCH)
4323 return;
4324
4325 if ((*the_low_target.cannot_store_register) (regno) == 0)
4326 error ("writing register %d: %s", regno, strerror (errno));
4327 }
4328 regaddr += sizeof (PTRACE_XFER_TYPE);
4329 }
4330 }
4331
4332 /* Fetch all registers, or just one, from the child process.
4333 If REGNO is -1, do this for all registers, skipping any that are
4334 assumed to have been retrieved by regsets_fetch_inferior_registers,
4335 unless ALL is non-zero.
4336 Otherwise, REGNO specifies which register (so we can save time). */
4337 static void
4338 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4339 struct regcache *regcache, int regno, int all)
4340 {
4341 struct usrregs_info *usr = regs_info->usrregs;
4342
4343 if (regno == -1)
4344 {
4345 for (regno = 0; regno < usr->num_regs; regno++)
4346 if (all || !linux_register_in_regsets (regs_info, regno))
4347 fetch_register (usr, regcache, regno);
4348 }
4349 else
4350 fetch_register (usr, regcache, regno);
4351 }
4352
4353 /* Store our register values back into the inferior.
4354 If REGNO is -1, do this for all registers, skipping any that are
4355 assumed to have been saved by regsets_store_inferior_registers,
4356 unless ALL is non-zero.
4357 Otherwise, REGNO specifies which register (so we can save time). */
4358 static void
4359 usr_store_inferior_registers (const struct regs_info *regs_info,
4360 struct regcache *regcache, int regno, int all)
4361 {
4362 struct usrregs_info *usr = regs_info->usrregs;
4363
4364 if (regno == -1)
4365 {
4366 for (regno = 0; regno < usr->num_regs; regno++)
4367 if (all || !linux_register_in_regsets (regs_info, regno))
4368 store_register (usr, regcache, regno);
4369 }
4370 else
4371 store_register (usr, regcache, regno);
4372 }
4373
4374 #else /* !HAVE_LINUX_USRREGS */
4375
4376 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4377 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4378
4379 #endif
4380
4381
4382 void
4383 linux_fetch_registers (struct regcache *regcache, int regno)
4384 {
4385 int use_regsets;
4386 int all = 0;
4387 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4388
4389 if (regno == -1)
4390 {
4391 if (the_low_target.fetch_register != NULL
4392 && regs_info->usrregs != NULL)
4393 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4394 (*the_low_target.fetch_register) (regcache, regno);
4395
4396 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4397 if (regs_info->usrregs != NULL)
4398 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4399 }
4400 else
4401 {
4402 if (the_low_target.fetch_register != NULL
4403 && (*the_low_target.fetch_register) (regcache, regno))
4404 return;
4405
4406 use_regsets = linux_register_in_regsets (regs_info, regno);
4407 if (use_regsets)
4408 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4409 regcache);
4410 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4411 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4412 }
4413 }
4414
4415 void
4416 linux_store_registers (struct regcache *regcache, int regno)
4417 {
4418 int use_regsets;
4419 int all = 0;
4420 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4421
4422 if (regno == -1)
4423 {
4424 all = regsets_store_inferior_registers (regs_info->regsets_info,
4425 regcache);
4426 if (regs_info->usrregs != NULL)
4427 usr_store_inferior_registers (regs_info, regcache, regno, all);
4428 }
4429 else
4430 {
4431 use_regsets = linux_register_in_regsets (regs_info, regno);
4432 if (use_regsets)
4433 all = regsets_store_inferior_registers (regs_info->regsets_info,
4434 regcache);
4435 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4436 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4437 }
4438 }
4439
4440
4441 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4442 to debugger memory starting at MYADDR. */
4443
4444 static int
4445 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4446 {
4447 int pid = lwpid_of (get_thread_lwp (current_inferior));
4448 register PTRACE_XFER_TYPE *buffer;
4449 register CORE_ADDR addr;
4450 register int count;
4451 char filename[64];
4452 register int i;
4453 int ret;
4454 int fd;
4455
4456 /* Try using /proc. Don't bother for one word. */
4457 if (len >= 3 * sizeof (long))
4458 {
4459 int bytes;
4460
4461 /* We could keep this file open and cache it - possibly one per
4462 thread. That requires some juggling, but is even faster. */
4463 sprintf (filename, "/proc/%d/mem", pid);
4464 fd = open (filename, O_RDONLY | O_LARGEFILE);
4465 if (fd == -1)
4466 goto no_proc;
4467
4468 /* If pread64 is available, use it. It's faster if the kernel
4469 supports it (only one syscall), and it's 64-bit safe even on
4470 32-bit platforms (for instance, SPARC debugging a SPARC64
4471 application). */
4472 #ifdef HAVE_PREAD64
4473 bytes = pread64 (fd, myaddr, len, memaddr);
4474 #else
4475 bytes = -1;
4476 if (lseek (fd, memaddr, SEEK_SET) != -1)
4477 bytes = read (fd, myaddr, len);
4478 #endif
4479
4480 close (fd);
4481 if (bytes == len)
4482 return 0;
4483
4484 /* Some data was read, we'll try to get the rest with ptrace. */
4485 if (bytes > 0)
4486 {
4487 memaddr += bytes;
4488 myaddr += bytes;
4489 len -= bytes;
4490 }
4491 }
4492
4493 no_proc:
4494 /* Round starting address down to longword boundary. */
4495 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4496 /* Round ending address up; get number of longwords that makes. */
4497 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4498 / sizeof (PTRACE_XFER_TYPE));
4499 /* Allocate buffer of that many longwords. */
4500 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4501
4502 /* Read all the longwords */
4503 errno = 0;
4504 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4505 {
4506 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4507 about coercing an 8 byte integer to a 4 byte pointer. */
4508 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4509 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4510 (PTRACE_TYPE_ARG4) 0);
4511 if (errno)
4512 break;
4513 }
4514 ret = errno;
4515
4516 /* Copy appropriate bytes out of the buffer. */
4517 if (i > 0)
4518 {
4519 i *= sizeof (PTRACE_XFER_TYPE);
4520 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4521 memcpy (myaddr,
4522 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4523 i < len ? i : len);
4524 }
4525
4526 return ret;
4527 }
4528
4529 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4530 memory at MEMADDR. On failure (cannot write to the inferior)
4531 returns the value of errno. Always succeeds if LEN is zero. */
4532
4533 static int
4534 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4535 {
4536 register int i;
4537 /* Round starting address down to longword boundary. */
4538 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4539 /* Round ending address up; get number of longwords that makes. */
4540 register int count
4541 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4542 / sizeof (PTRACE_XFER_TYPE);
4543
4544 /* Allocate buffer of that many longwords. */
4545 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4546 alloca (count * sizeof (PTRACE_XFER_TYPE));
4547
4548 int pid = lwpid_of (get_thread_lwp (current_inferior));
4549
4550 if (len == 0)
4551 {
4552 /* Zero length write always succeeds. */
4553 return 0;
4554 }
4555
4556 if (debug_threads)
4557 {
4558 /* Dump up to four bytes. */
4559 unsigned int val = * (unsigned int *) myaddr;
4560 if (len == 1)
4561 val = val & 0xff;
4562 else if (len == 2)
4563 val = val & 0xffff;
4564 else if (len == 3)
4565 val = val & 0xffffff;
4566 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4567 val, (long)memaddr);
4568 }
4569
4570 /* Fill start and end extra bytes of buffer with existing memory data. */
4571
4572 errno = 0;
4573 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4574 about coercing an 8 byte integer to a 4 byte pointer. */
4575 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4576 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4577 (PTRACE_TYPE_ARG4) 0);
4578 if (errno)
4579 return errno;
4580
4581 if (count > 1)
4582 {
4583 errno = 0;
4584 buffer[count - 1]
4585 = ptrace (PTRACE_PEEKTEXT, pid,
4586 /* Coerce to a uintptr_t first to avoid potential gcc warning
4587 about coercing an 8 byte integer to a 4 byte pointer. */
4588 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4589 * sizeof (PTRACE_XFER_TYPE)),
4590 (PTRACE_TYPE_ARG4) 0);
4591 if (errno)
4592 return errno;
4593 }
4594
4595 /* Copy data to be written over corresponding part of buffer. */
4596
4597 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4598 myaddr, len);
4599
4600 /* Write the entire buffer. */
4601
4602 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4603 {
4604 errno = 0;
4605 ptrace (PTRACE_POKETEXT, pid,
4606 /* Coerce to a uintptr_t first to avoid potential gcc warning
4607 about coercing an 8 byte integer to a 4 byte pointer. */
4608 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4609 (PTRACE_TYPE_ARG4) buffer[i]);
4610 if (errno)
4611 return errno;
4612 }
4613
4614 return 0;
4615 }
4616
4617 static void
4618 linux_look_up_symbols (void)
4619 {
4620 #ifdef USE_THREAD_DB
4621 struct process_info *proc = current_process ();
4622
4623 if (proc->private->thread_db != NULL)
4624 return;
4625
4626 /* If the kernel supports tracing clones, then we don't need to
4627 use the magic thread event breakpoint to learn about
4628 threads. */
4629 thread_db_init (!linux_supports_traceclone ());
4630 #endif
4631 }
4632
4633 static void
4634 linux_request_interrupt (void)
4635 {
4636 extern unsigned long signal_pid;
4637
4638 if (!ptid_equal (cont_thread, null_ptid)
4639 && !ptid_equal (cont_thread, minus_one_ptid))
4640 {
4641 struct lwp_info *lwp;
4642 int lwpid;
4643
4644 lwp = get_thread_lwp (current_inferior);
4645 lwpid = lwpid_of (lwp);
4646 kill_lwp (lwpid, SIGINT);
4647 }
4648 else
4649 kill_lwp (signal_pid, SIGINT);
4650 }
4651
4652 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4653 to debugger memory starting at MYADDR. */
4654
4655 static int
4656 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4657 {
4658 char filename[PATH_MAX];
4659 int fd, n;
4660 int pid = lwpid_of (get_thread_lwp (current_inferior));
4661
4662 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4663
4664 fd = open (filename, O_RDONLY);
4665 if (fd < 0)
4666 return -1;
4667
4668 if (offset != (CORE_ADDR) 0
4669 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4670 n = -1;
4671 else
4672 n = read (fd, myaddr, len);
4673
4674 close (fd);
4675
4676 return n;
4677 }
4678
4679 /* These breakpoint and watchpoint related wrapper functions simply
4680 pass on the function call if the target has registered a
4681 corresponding function. */
4682
4683 static int
4684 linux_insert_point (char type, CORE_ADDR addr, int len)
4685 {
4686 if (the_low_target.insert_point != NULL)
4687 return the_low_target.insert_point (type, addr, len);
4688 else
4689 /* Unsupported (see target.h). */
4690 return 1;
4691 }
4692
4693 static int
4694 linux_remove_point (char type, CORE_ADDR addr, int len)
4695 {
4696 if (the_low_target.remove_point != NULL)
4697 return the_low_target.remove_point (type, addr, len);
4698 else
4699 /* Unsupported (see target.h). */
4700 return 1;
4701 }
4702
4703 static int
4704 linux_stopped_by_watchpoint (void)
4705 {
4706 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4707
4708 return lwp->stopped_by_watchpoint;
4709 }
4710
4711 static CORE_ADDR
4712 linux_stopped_data_address (void)
4713 {
4714 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4715
4716 return lwp->stopped_data_address;
4717 }
4718
4719 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4720 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4721 && defined(PT_TEXT_END_ADDR)
4722
4723 /* This is only used for targets that define PT_TEXT_ADDR,
4724 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4725 the target has different ways of acquiring this information, like
4726 loadmaps. */
4727
4728 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4729 to tell gdb about. */
4730
4731 static int
4732 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4733 {
4734 unsigned long text, text_end, data;
4735 int pid = lwpid_of (get_thread_lwp (current_inferior));
4736
4737 errno = 0;
4738
4739 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4740 (PTRACE_TYPE_ARG4) 0);
4741 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4742 (PTRACE_TYPE_ARG4) 0);
4743 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4744 (PTRACE_TYPE_ARG4) 0);
4745
4746 if (errno == 0)
4747 {
4748 /* Both text and data offsets produced at compile-time (and so
4749 used by gdb) are relative to the beginning of the program,
4750 with the data segment immediately following the text segment.
4751 However, the actual runtime layout in memory may put the data
4752 somewhere else, so when we send gdb a data base-address, we
4753 use the real data base address and subtract the compile-time
4754 data base-address from it (which is just the length of the
4755 text segment). BSS immediately follows data in both
4756 cases. */
4757 *text_p = text;
4758 *data_p = data - (text_end - text);
4759
4760 return 1;
4761 }
4762 return 0;
4763 }
4764 #endif
4765
4766 static int
4767 linux_qxfer_osdata (const char *annex,
4768 unsigned char *readbuf, unsigned const char *writebuf,
4769 CORE_ADDR offset, int len)
4770 {
4771 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4772 }
4773
4774 /* Convert a native/host siginfo object, into/from the siginfo in the
4775 layout of the inferiors' architecture. */
4776
4777 static void
4778 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4779 {
4780 int done = 0;
4781
4782 if (the_low_target.siginfo_fixup != NULL)
4783 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4784
4785 /* If there was no callback, or the callback didn't do anything,
4786 then just do a straight memcpy. */
4787 if (!done)
4788 {
4789 if (direction == 1)
4790 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4791 else
4792 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4793 }
4794 }
4795
4796 static int
4797 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4798 unsigned const char *writebuf, CORE_ADDR offset, int len)
4799 {
4800 int pid;
4801 siginfo_t siginfo;
4802 char inf_siginfo[sizeof (siginfo_t)];
4803
4804 if (current_inferior == NULL)
4805 return -1;
4806
4807 pid = lwpid_of (get_thread_lwp (current_inferior));
4808
4809 if (debug_threads)
4810 debug_printf ("%s siginfo for lwp %d.\n",
4811 readbuf != NULL ? "Reading" : "Writing",
4812 pid);
4813
4814 if (offset >= sizeof (siginfo))
4815 return -1;
4816
4817 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4818 return -1;
4819
4820 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4821 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4822 inferior with a 64-bit GDBSERVER should look the same as debugging it
4823 with a 32-bit GDBSERVER, we need to convert it. */
4824 siginfo_fixup (&siginfo, inf_siginfo, 0);
4825
4826 if (offset + len > sizeof (siginfo))
4827 len = sizeof (siginfo) - offset;
4828
4829 if (readbuf != NULL)
4830 memcpy (readbuf, inf_siginfo + offset, len);
4831 else
4832 {
4833 memcpy (inf_siginfo + offset, writebuf, len);
4834
4835 /* Convert back to ptrace layout before flushing it out. */
4836 siginfo_fixup (&siginfo, inf_siginfo, 1);
4837
4838 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4839 return -1;
4840 }
4841
4842 return len;
4843 }
4844
4845 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4846 so we notice when children change state; as the handler for the
4847 sigsuspend in my_waitpid. */
4848
4849 static void
4850 sigchld_handler (int signo)
4851 {
4852 int old_errno = errno;
4853
4854 if (debug_threads)
4855 {
4856 do
4857 {
4858 /* fprintf is not async-signal-safe, so call write
4859 directly. */
4860 if (write (2, "sigchld_handler\n",
4861 sizeof ("sigchld_handler\n") - 1) < 0)
4862 break; /* just ignore */
4863 } while (0);
4864 }
4865
4866 if (target_is_async_p ())
4867 async_file_mark (); /* trigger a linux_wait */
4868
4869 errno = old_errno;
4870 }
4871
4872 static int
4873 linux_supports_non_stop (void)
4874 {
4875 return 1;
4876 }
4877
4878 static int
4879 linux_async (int enable)
4880 {
4881 int previous = (linux_event_pipe[0] != -1);
4882
4883 if (debug_threads)
4884 debug_printf ("linux_async (%d), previous=%d\n",
4885 enable, previous);
4886
4887 if (previous != enable)
4888 {
4889 sigset_t mask;
4890 sigemptyset (&mask);
4891 sigaddset (&mask, SIGCHLD);
4892
4893 sigprocmask (SIG_BLOCK, &mask, NULL);
4894
4895 if (enable)
4896 {
4897 if (pipe (linux_event_pipe) == -1)
4898 fatal ("creating event pipe failed.");
4899
4900 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4901 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4902
4903 /* Register the event loop handler. */
4904 add_file_handler (linux_event_pipe[0],
4905 handle_target_event, NULL);
4906
4907 /* Always trigger a linux_wait. */
4908 async_file_mark ();
4909 }
4910 else
4911 {
4912 delete_file_handler (linux_event_pipe[0]);
4913
4914 close (linux_event_pipe[0]);
4915 close (linux_event_pipe[1]);
4916 linux_event_pipe[0] = -1;
4917 linux_event_pipe[1] = -1;
4918 }
4919
4920 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4921 }
4922
4923 return previous;
4924 }
4925
4926 static int
4927 linux_start_non_stop (int nonstop)
4928 {
4929 /* Register or unregister from event-loop accordingly. */
4930 linux_async (nonstop);
4931 return 0;
4932 }
4933
4934 static int
4935 linux_supports_multi_process (void)
4936 {
4937 return 1;
4938 }
4939
4940 static int
4941 linux_supports_disable_randomization (void)
4942 {
4943 #ifdef HAVE_PERSONALITY
4944 return 1;
4945 #else
4946 return 0;
4947 #endif
4948 }
4949
4950 static int
4951 linux_supports_agent (void)
4952 {
4953 return 1;
4954 }
4955
4956 static int
4957 linux_supports_range_stepping (void)
4958 {
4959 if (*the_low_target.supports_range_stepping == NULL)
4960 return 0;
4961
4962 return (*the_low_target.supports_range_stepping) ();
4963 }
4964
4965 /* Enumerate spufs IDs for process PID. */
4966 static int
4967 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4968 {
4969 int pos = 0;
4970 int written = 0;
4971 char path[128];
4972 DIR *dir;
4973 struct dirent *entry;
4974
4975 sprintf (path, "/proc/%ld/fd", pid);
4976 dir = opendir (path);
4977 if (!dir)
4978 return -1;
4979
4980 rewinddir (dir);
4981 while ((entry = readdir (dir)) != NULL)
4982 {
4983 struct stat st;
4984 struct statfs stfs;
4985 int fd;
4986
4987 fd = atoi (entry->d_name);
4988 if (!fd)
4989 continue;
4990
4991 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4992 if (stat (path, &st) != 0)
4993 continue;
4994 if (!S_ISDIR (st.st_mode))
4995 continue;
4996
4997 if (statfs (path, &stfs) != 0)
4998 continue;
4999 if (stfs.f_type != SPUFS_MAGIC)
5000 continue;
5001
5002 if (pos >= offset && pos + 4 <= offset + len)
5003 {
5004 *(unsigned int *)(buf + pos - offset) = fd;
5005 written += 4;
5006 }
5007 pos += 4;
5008 }
5009
5010 closedir (dir);
5011 return written;
5012 }
5013
5014 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5015 object type, using the /proc file system. */
5016 static int
5017 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5018 unsigned const char *writebuf,
5019 CORE_ADDR offset, int len)
5020 {
5021 long pid = lwpid_of (get_thread_lwp (current_inferior));
5022 char buf[128];
5023 int fd = 0;
5024 int ret = 0;
5025
5026 if (!writebuf && !readbuf)
5027 return -1;
5028
5029 if (!*annex)
5030 {
5031 if (!readbuf)
5032 return -1;
5033 else
5034 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5035 }
5036
5037 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5038 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5039 if (fd <= 0)
5040 return -1;
5041
5042 if (offset != 0
5043 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5044 {
5045 close (fd);
5046 return 0;
5047 }
5048
5049 if (writebuf)
5050 ret = write (fd, writebuf, (size_t) len);
5051 else
5052 ret = read (fd, readbuf, (size_t) len);
5053
5054 close (fd);
5055 return ret;
5056 }
5057
5058 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5059 struct target_loadseg
5060 {
5061 /* Core address to which the segment is mapped. */
5062 Elf32_Addr addr;
5063 /* VMA recorded in the program header. */
5064 Elf32_Addr p_vaddr;
5065 /* Size of this segment in memory. */
5066 Elf32_Word p_memsz;
5067 };
5068
5069 # if defined PT_GETDSBT
5070 struct target_loadmap
5071 {
5072 /* Protocol version number, must be zero. */
5073 Elf32_Word version;
5074 /* Pointer to the DSBT table, its size, and the DSBT index. */
5075 unsigned *dsbt_table;
5076 unsigned dsbt_size, dsbt_index;
5077 /* Number of segments in this map. */
5078 Elf32_Word nsegs;
5079 /* The actual memory map. */
5080 struct target_loadseg segs[/*nsegs*/];
5081 };
5082 # define LINUX_LOADMAP PT_GETDSBT
5083 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5084 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5085 # else
5086 struct target_loadmap
5087 {
5088 /* Protocol version number, must be zero. */
5089 Elf32_Half version;
5090 /* Number of segments in this map. */
5091 Elf32_Half nsegs;
5092 /* The actual memory map. */
5093 struct target_loadseg segs[/*nsegs*/];
5094 };
5095 # define LINUX_LOADMAP PTRACE_GETFDPIC
5096 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5097 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5098 # endif
5099
5100 static int
5101 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5102 unsigned char *myaddr, unsigned int len)
5103 {
5104 int pid = lwpid_of (get_thread_lwp (current_inferior));
5105 int addr = -1;
5106 struct target_loadmap *data = NULL;
5107 unsigned int actual_length, copy_length;
5108
5109 if (strcmp (annex, "exec") == 0)
5110 addr = (int) LINUX_LOADMAP_EXEC;
5111 else if (strcmp (annex, "interp") == 0)
5112 addr = (int) LINUX_LOADMAP_INTERP;
5113 else
5114 return -1;
5115
5116 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5117 return -1;
5118
5119 if (data == NULL)
5120 return -1;
5121
5122 actual_length = sizeof (struct target_loadmap)
5123 + sizeof (struct target_loadseg) * data->nsegs;
5124
5125 if (offset < 0 || offset > actual_length)
5126 return -1;
5127
5128 copy_length = actual_length - offset < len ? actual_length - offset : len;
5129 memcpy (myaddr, (char *) data + offset, copy_length);
5130 return copy_length;
5131 }
5132 #else
5133 # define linux_read_loadmap NULL
5134 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5135
5136 static void
5137 linux_process_qsupported (const char *query)
5138 {
5139 if (the_low_target.process_qsupported != NULL)
5140 the_low_target.process_qsupported (query);
5141 }
5142
5143 static int
5144 linux_supports_tracepoints (void)
5145 {
5146 if (*the_low_target.supports_tracepoints == NULL)
5147 return 0;
5148
5149 return (*the_low_target.supports_tracepoints) ();
5150 }
5151
5152 static CORE_ADDR
5153 linux_read_pc (struct regcache *regcache)
5154 {
5155 if (the_low_target.get_pc == NULL)
5156 return 0;
5157
5158 return (*the_low_target.get_pc) (regcache);
5159 }
5160
5161 static void
5162 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5163 {
5164 gdb_assert (the_low_target.set_pc != NULL);
5165
5166 (*the_low_target.set_pc) (regcache, pc);
5167 }
5168
5169 static int
5170 linux_thread_stopped (struct thread_info *thread)
5171 {
5172 return get_thread_lwp (thread)->stopped;
5173 }
5174
5175 /* This exposes stop-all-threads functionality to other modules. */
5176
5177 static void
5178 linux_pause_all (int freeze)
5179 {
5180 stop_all_lwps (freeze, NULL);
5181 }
5182
5183 /* This exposes unstop-all-threads functionality to other gdbserver
5184 modules. */
5185
5186 static void
5187 linux_unpause_all (int unfreeze)
5188 {
5189 unstop_all_lwps (unfreeze, NULL);
5190 }
5191
5192 static int
5193 linux_prepare_to_access_memory (void)
5194 {
5195 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5196 running LWP. */
5197 if (non_stop)
5198 linux_pause_all (1);
5199 return 0;
5200 }
5201
5202 static void
5203 linux_done_accessing_memory (void)
5204 {
5205 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5206 running LWP. */
5207 if (non_stop)
5208 linux_unpause_all (1);
5209 }
5210
5211 static int
5212 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5213 CORE_ADDR collector,
5214 CORE_ADDR lockaddr,
5215 ULONGEST orig_size,
5216 CORE_ADDR *jump_entry,
5217 CORE_ADDR *trampoline,
5218 ULONGEST *trampoline_size,
5219 unsigned char *jjump_pad_insn,
5220 ULONGEST *jjump_pad_insn_size,
5221 CORE_ADDR *adjusted_insn_addr,
5222 CORE_ADDR *adjusted_insn_addr_end,
5223 char *err)
5224 {
5225 return (*the_low_target.install_fast_tracepoint_jump_pad)
5226 (tpoint, tpaddr, collector, lockaddr, orig_size,
5227 jump_entry, trampoline, trampoline_size,
5228 jjump_pad_insn, jjump_pad_insn_size,
5229 adjusted_insn_addr, adjusted_insn_addr_end,
5230 err);
5231 }
5232
5233 static struct emit_ops *
5234 linux_emit_ops (void)
5235 {
5236 if (the_low_target.emit_ops != NULL)
5237 return (*the_low_target.emit_ops) ();
5238 else
5239 return NULL;
5240 }
5241
5242 static int
5243 linux_get_min_fast_tracepoint_insn_len (void)
5244 {
5245 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5246 }
5247
5248 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5249
5250 static int
5251 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5252 CORE_ADDR *phdr_memaddr, int *num_phdr)
5253 {
5254 char filename[PATH_MAX];
5255 int fd;
5256 const int auxv_size = is_elf64
5257 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5258 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5259
5260 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5261
5262 fd = open (filename, O_RDONLY);
5263 if (fd < 0)
5264 return 1;
5265
5266 *phdr_memaddr = 0;
5267 *num_phdr = 0;
5268 while (read (fd, buf, auxv_size) == auxv_size
5269 && (*phdr_memaddr == 0 || *num_phdr == 0))
5270 {
5271 if (is_elf64)
5272 {
5273 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5274
5275 switch (aux->a_type)
5276 {
5277 case AT_PHDR:
5278 *phdr_memaddr = aux->a_un.a_val;
5279 break;
5280 case AT_PHNUM:
5281 *num_phdr = aux->a_un.a_val;
5282 break;
5283 }
5284 }
5285 else
5286 {
5287 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5288
5289 switch (aux->a_type)
5290 {
5291 case AT_PHDR:
5292 *phdr_memaddr = aux->a_un.a_val;
5293 break;
5294 case AT_PHNUM:
5295 *num_phdr = aux->a_un.a_val;
5296 break;
5297 }
5298 }
5299 }
5300
5301 close (fd);
5302
5303 if (*phdr_memaddr == 0 || *num_phdr == 0)
5304 {
5305 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5306 "phdr_memaddr = %ld, phdr_num = %d",
5307 (long) *phdr_memaddr, *num_phdr);
5308 return 2;
5309 }
5310
5311 return 0;
5312 }
5313
5314 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5315
5316 static CORE_ADDR
5317 get_dynamic (const int pid, const int is_elf64)
5318 {
5319 CORE_ADDR phdr_memaddr, relocation;
5320 int num_phdr, i;
5321 unsigned char *phdr_buf;
5322 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5323
5324 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5325 return 0;
5326
5327 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5328 phdr_buf = alloca (num_phdr * phdr_size);
5329
5330 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5331 return 0;
5332
5333 /* Compute relocation: it is expected to be 0 for "regular" executables,
5334 non-zero for PIE ones. */
5335 relocation = -1;
5336 for (i = 0; relocation == -1 && i < num_phdr; i++)
5337 if (is_elf64)
5338 {
5339 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5340
5341 if (p->p_type == PT_PHDR)
5342 relocation = phdr_memaddr - p->p_vaddr;
5343 }
5344 else
5345 {
5346 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5347
5348 if (p->p_type == PT_PHDR)
5349 relocation = phdr_memaddr - p->p_vaddr;
5350 }
5351
5352 if (relocation == -1)
5353 {
5354 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5355 any real world executables, including PIE executables, have always
5356 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5357 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5358 or present DT_DEBUG anyway (fpc binaries are statically linked).
5359
5360 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5361
5362 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5363
5364 return 0;
5365 }
5366
5367 for (i = 0; i < num_phdr; i++)
5368 {
5369 if (is_elf64)
5370 {
5371 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5372
5373 if (p->p_type == PT_DYNAMIC)
5374 return p->p_vaddr + relocation;
5375 }
5376 else
5377 {
5378 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5379
5380 if (p->p_type == PT_DYNAMIC)
5381 return p->p_vaddr + relocation;
5382 }
5383 }
5384
5385 return 0;
5386 }
5387
5388 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5389 can be 0 if the inferior does not yet have the library list initialized.
5390 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5391 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5392
5393 static CORE_ADDR
5394 get_r_debug (const int pid, const int is_elf64)
5395 {
5396 CORE_ADDR dynamic_memaddr;
5397 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5398 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5399 CORE_ADDR map = -1;
5400
5401 dynamic_memaddr = get_dynamic (pid, is_elf64);
5402 if (dynamic_memaddr == 0)
5403 return map;
5404
5405 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5406 {
5407 if (is_elf64)
5408 {
5409 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5410 #ifdef DT_MIPS_RLD_MAP
5411 union
5412 {
5413 Elf64_Xword map;
5414 unsigned char buf[sizeof (Elf64_Xword)];
5415 }
5416 rld_map;
5417
5418 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5419 {
5420 if (linux_read_memory (dyn->d_un.d_val,
5421 rld_map.buf, sizeof (rld_map.buf)) == 0)
5422 return rld_map.map;
5423 else
5424 break;
5425 }
5426 #endif /* DT_MIPS_RLD_MAP */
5427
5428 if (dyn->d_tag == DT_DEBUG && map == -1)
5429 map = dyn->d_un.d_val;
5430
5431 if (dyn->d_tag == DT_NULL)
5432 break;
5433 }
5434 else
5435 {
5436 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5437 #ifdef DT_MIPS_RLD_MAP
5438 union
5439 {
5440 Elf32_Word map;
5441 unsigned char buf[sizeof (Elf32_Word)];
5442 }
5443 rld_map;
5444
5445 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5446 {
5447 if (linux_read_memory (dyn->d_un.d_val,
5448 rld_map.buf, sizeof (rld_map.buf)) == 0)
5449 return rld_map.map;
5450 else
5451 break;
5452 }
5453 #endif /* DT_MIPS_RLD_MAP */
5454
5455 if (dyn->d_tag == DT_DEBUG && map == -1)
5456 map = dyn->d_un.d_val;
5457
5458 if (dyn->d_tag == DT_NULL)
5459 break;
5460 }
5461
5462 dynamic_memaddr += dyn_size;
5463 }
5464
5465 return map;
5466 }
5467
5468 /* Read one pointer from MEMADDR in the inferior. */
5469
5470 static int
5471 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5472 {
5473 int ret;
5474
5475 /* Go through a union so this works on either big or little endian
5476 hosts, when the inferior's pointer size is smaller than the size
5477 of CORE_ADDR. It is assumed the inferior's endianness is the
5478 same of the superior's. */
5479 union
5480 {
5481 CORE_ADDR core_addr;
5482 unsigned int ui;
5483 unsigned char uc;
5484 } addr;
5485
5486 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5487 if (ret == 0)
5488 {
5489 if (ptr_size == sizeof (CORE_ADDR))
5490 *ptr = addr.core_addr;
5491 else if (ptr_size == sizeof (unsigned int))
5492 *ptr = addr.ui;
5493 else
5494 gdb_assert_not_reached ("unhandled pointer size");
5495 }
5496 return ret;
5497 }
5498
5499 struct link_map_offsets
5500 {
5501 /* Offset and size of r_debug.r_version. */
5502 int r_version_offset;
5503
5504 /* Offset and size of r_debug.r_map. */
5505 int r_map_offset;
5506
5507 /* Offset to l_addr field in struct link_map. */
5508 int l_addr_offset;
5509
5510 /* Offset to l_name field in struct link_map. */
5511 int l_name_offset;
5512
5513 /* Offset to l_ld field in struct link_map. */
5514 int l_ld_offset;
5515
5516 /* Offset to l_next field in struct link_map. */
5517 int l_next_offset;
5518
5519 /* Offset to l_prev field in struct link_map. */
5520 int l_prev_offset;
5521 };
5522
5523 /* Construct qXfer:libraries-svr4:read reply. */
5524
5525 static int
5526 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5527 unsigned const char *writebuf,
5528 CORE_ADDR offset, int len)
5529 {
5530 char *document;
5531 unsigned document_len;
5532 struct process_info_private *const priv = current_process ()->private;
5533 char filename[PATH_MAX];
5534 int pid, is_elf64;
5535
5536 static const struct link_map_offsets lmo_32bit_offsets =
5537 {
5538 0, /* r_version offset. */
5539 4, /* r_debug.r_map offset. */
5540 0, /* l_addr offset in link_map. */
5541 4, /* l_name offset in link_map. */
5542 8, /* l_ld offset in link_map. */
5543 12, /* l_next offset in link_map. */
5544 16 /* l_prev offset in link_map. */
5545 };
5546
5547 static const struct link_map_offsets lmo_64bit_offsets =
5548 {
5549 0, /* r_version offset. */
5550 8, /* r_debug.r_map offset. */
5551 0, /* l_addr offset in link_map. */
5552 8, /* l_name offset in link_map. */
5553 16, /* l_ld offset in link_map. */
5554 24, /* l_next offset in link_map. */
5555 32 /* l_prev offset in link_map. */
5556 };
5557 const struct link_map_offsets *lmo;
5558 unsigned int machine;
5559 int ptr_size;
5560 CORE_ADDR lm_addr = 0, lm_prev = 0;
5561 int allocated = 1024;
5562 char *p;
5563 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5564 int header_done = 0;
5565
5566 if (writebuf != NULL)
5567 return -2;
5568 if (readbuf == NULL)
5569 return -1;
5570
5571 pid = lwpid_of (get_thread_lwp (current_inferior));
5572 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5573 is_elf64 = elf_64_file_p (filename, &machine);
5574 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5575 ptr_size = is_elf64 ? 8 : 4;
5576
5577 while (annex[0] != '\0')
5578 {
5579 const char *sep;
5580 CORE_ADDR *addrp;
5581 int len;
5582
5583 sep = strchr (annex, '=');
5584 if (sep == NULL)
5585 break;
5586
5587 len = sep - annex;
5588 if (len == 5 && strncmp (annex, "start", 5) == 0)
5589 addrp = &lm_addr;
5590 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5591 addrp = &lm_prev;
5592 else
5593 {
5594 annex = strchr (sep, ';');
5595 if (annex == NULL)
5596 break;
5597 annex++;
5598 continue;
5599 }
5600
5601 annex = decode_address_to_semicolon (addrp, sep + 1);
5602 }
5603
5604 if (lm_addr == 0)
5605 {
5606 int r_version = 0;
5607
5608 if (priv->r_debug == 0)
5609 priv->r_debug = get_r_debug (pid, is_elf64);
5610
5611 /* We failed to find DT_DEBUG. Such situation will not change
5612 for this inferior - do not retry it. Report it to GDB as
5613 E01, see for the reasons at the GDB solib-svr4.c side. */
5614 if (priv->r_debug == (CORE_ADDR) -1)
5615 return -1;
5616
5617 if (priv->r_debug != 0)
5618 {
5619 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5620 (unsigned char *) &r_version,
5621 sizeof (r_version)) != 0
5622 || r_version != 1)
5623 {
5624 warning ("unexpected r_debug version %d", r_version);
5625 }
5626 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5627 &lm_addr, ptr_size) != 0)
5628 {
5629 warning ("unable to read r_map from 0x%lx",
5630 (long) priv->r_debug + lmo->r_map_offset);
5631 }
5632 }
5633 }
5634
5635 document = xmalloc (allocated);
5636 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5637 p = document + strlen (document);
5638
5639 while (lm_addr
5640 && read_one_ptr (lm_addr + lmo->l_name_offset,
5641 &l_name, ptr_size) == 0
5642 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5643 &l_addr, ptr_size) == 0
5644 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5645 &l_ld, ptr_size) == 0
5646 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5647 &l_prev, ptr_size) == 0
5648 && read_one_ptr (lm_addr + lmo->l_next_offset,
5649 &l_next, ptr_size) == 0)
5650 {
5651 unsigned char libname[PATH_MAX];
5652
5653 if (lm_prev != l_prev)
5654 {
5655 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5656 (long) lm_prev, (long) l_prev);
5657 break;
5658 }
5659
5660 /* Ignore the first entry even if it has valid name as the first entry
5661 corresponds to the main executable. The first entry should not be
5662 skipped if the dynamic loader was loaded late by a static executable
5663 (see solib-svr4.c parameter ignore_first). But in such case the main
5664 executable does not have PT_DYNAMIC present and this function already
5665 exited above due to failed get_r_debug. */
5666 if (lm_prev == 0)
5667 {
5668 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5669 p = p + strlen (p);
5670 }
5671 else
5672 {
5673 /* Not checking for error because reading may stop before
5674 we've got PATH_MAX worth of characters. */
5675 libname[0] = '\0';
5676 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5677 libname[sizeof (libname) - 1] = '\0';
5678 if (libname[0] != '\0')
5679 {
5680 /* 6x the size for xml_escape_text below. */
5681 size_t len = 6 * strlen ((char *) libname);
5682 char *name;
5683
5684 if (!header_done)
5685 {
5686 /* Terminate `<library-list-svr4'. */
5687 *p++ = '>';
5688 header_done = 1;
5689 }
5690
5691 while (allocated < p - document + len + 200)
5692 {
5693 /* Expand to guarantee sufficient storage. */
5694 uintptr_t document_len = p - document;
5695
5696 document = xrealloc (document, 2 * allocated);
5697 allocated *= 2;
5698 p = document + document_len;
5699 }
5700
5701 name = xml_escape_text ((char *) libname);
5702 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5703 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5704 name, (unsigned long) lm_addr,
5705 (unsigned long) l_addr, (unsigned long) l_ld);
5706 free (name);
5707 }
5708 }
5709
5710 lm_prev = lm_addr;
5711 lm_addr = l_next;
5712 }
5713
5714 if (!header_done)
5715 {
5716 /* Empty list; terminate `<library-list-svr4'. */
5717 strcpy (p, "/>");
5718 }
5719 else
5720 strcpy (p, "</library-list-svr4>");
5721
5722 document_len = strlen (document);
5723 if (offset < document_len)
5724 document_len -= offset;
5725 else
5726 document_len = 0;
5727 if (len > document_len)
5728 len = document_len;
5729
5730 memcpy (readbuf, document + offset, len);
5731 xfree (document);
5732
5733 return len;
5734 }
5735
5736 #ifdef HAVE_LINUX_BTRACE
5737
5738 /* See to_enable_btrace target method. */
5739
5740 static struct btrace_target_info *
5741 linux_low_enable_btrace (ptid_t ptid)
5742 {
5743 struct btrace_target_info *tinfo;
5744
5745 tinfo = linux_enable_btrace (ptid);
5746
5747 if (tinfo != NULL)
5748 {
5749 struct thread_info *thread = find_thread_ptid (ptid);
5750 struct regcache *regcache = get_thread_regcache (thread, 0);
5751
5752 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5753 }
5754
5755 return tinfo;
5756 }
5757
5758 /* See to_disable_btrace target method. */
5759
5760 static int
5761 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5762 {
5763 enum btrace_error err;
5764
5765 err = linux_disable_btrace (tinfo);
5766 return (err == BTRACE_ERR_NONE ? 0 : -1);
5767 }
5768
5769 /* See to_read_btrace target method. */
5770
5771 static int
5772 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5773 int type)
5774 {
5775 VEC (btrace_block_s) *btrace;
5776 struct btrace_block *block;
5777 enum btrace_error err;
5778 int i;
5779
5780 btrace = NULL;
5781 err = linux_read_btrace (&btrace, tinfo, type);
5782 if (err != BTRACE_ERR_NONE)
5783 {
5784 if (err == BTRACE_ERR_OVERFLOW)
5785 buffer_grow_str0 (buffer, "E.Overflow.");
5786 else
5787 buffer_grow_str0 (buffer, "E.Generic Error.");
5788
5789 return -1;
5790 }
5791
5792 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5793 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5794
5795 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5796 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5797 paddress (block->begin), paddress (block->end));
5798
5799 buffer_grow_str0 (buffer, "</btrace>\n");
5800
5801 VEC_free (btrace_block_s, btrace);
5802
5803 return 0;
5804 }
5805 #endif /* HAVE_LINUX_BTRACE */
5806
5807 static struct target_ops linux_target_ops = {
5808 linux_create_inferior,
5809 linux_attach,
5810 linux_kill,
5811 linux_detach,
5812 linux_mourn,
5813 linux_join,
5814 linux_thread_alive,
5815 linux_resume,
5816 linux_wait,
5817 linux_fetch_registers,
5818 linux_store_registers,
5819 linux_prepare_to_access_memory,
5820 linux_done_accessing_memory,
5821 linux_read_memory,
5822 linux_write_memory,
5823 linux_look_up_symbols,
5824 linux_request_interrupt,
5825 linux_read_auxv,
5826 linux_insert_point,
5827 linux_remove_point,
5828 linux_stopped_by_watchpoint,
5829 linux_stopped_data_address,
5830 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5831 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5832 && defined(PT_TEXT_END_ADDR)
5833 linux_read_offsets,
5834 #else
5835 NULL,
5836 #endif
5837 #ifdef USE_THREAD_DB
5838 thread_db_get_tls_address,
5839 #else
5840 NULL,
5841 #endif
5842 linux_qxfer_spu,
5843 hostio_last_error_from_errno,
5844 linux_qxfer_osdata,
5845 linux_xfer_siginfo,
5846 linux_supports_non_stop,
5847 linux_async,
5848 linux_start_non_stop,
5849 linux_supports_multi_process,
5850 #ifdef USE_THREAD_DB
5851 thread_db_handle_monitor_command,
5852 #else
5853 NULL,
5854 #endif
5855 linux_common_core_of_thread,
5856 linux_read_loadmap,
5857 linux_process_qsupported,
5858 linux_supports_tracepoints,
5859 linux_read_pc,
5860 linux_write_pc,
5861 linux_thread_stopped,
5862 NULL,
5863 linux_pause_all,
5864 linux_unpause_all,
5865 linux_cancel_breakpoints,
5866 linux_stabilize_threads,
5867 linux_install_fast_tracepoint_jump_pad,
5868 linux_emit_ops,
5869 linux_supports_disable_randomization,
5870 linux_get_min_fast_tracepoint_insn_len,
5871 linux_qxfer_libraries_svr4,
5872 linux_supports_agent,
5873 #ifdef HAVE_LINUX_BTRACE
5874 linux_supports_btrace,
5875 linux_low_enable_btrace,
5876 linux_low_disable_btrace,
5877 linux_low_read_btrace,
5878 #else
5879 NULL,
5880 NULL,
5881 NULL,
5882 NULL,
5883 #endif
5884 linux_supports_range_stepping,
5885 };
5886
5887 static void
5888 linux_init_signals ()
5889 {
5890 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5891 to find what the cancel signal actually is. */
5892 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5893 signal (__SIGRTMIN+1, SIG_IGN);
5894 #endif
5895 }
5896
5897 #ifdef HAVE_LINUX_REGSETS
5898 void
5899 initialize_regsets_info (struct regsets_info *info)
5900 {
5901 for (info->num_regsets = 0;
5902 info->regsets[info->num_regsets].size >= 0;
5903 info->num_regsets++)
5904 ;
5905 }
5906 #endif
5907
5908 void
5909 initialize_low (void)
5910 {
5911 struct sigaction sigchld_action;
5912 memset (&sigchld_action, 0, sizeof (sigchld_action));
5913 set_target_ops (&linux_target_ops);
5914 set_breakpoint_data (the_low_target.breakpoint,
5915 the_low_target.breakpoint_len);
5916 linux_init_signals ();
5917 linux_ptrace_init_warnings ();
5918
5919 sigchld_action.sa_handler = sigchld_handler;
5920 sigemptyset (&sigchld_action.sa_mask);
5921 sigchld_action.sa_flags = SA_RESTART;
5922 sigaction (SIGCHLD, &sigchld_action, NULL);
5923
5924 initialize_low_arch ();
5925 }