Move have_ptrace_getregset to linux-low.c
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* Does the current host support PTRACE_GETREGSET? */
143 int have_ptrace_getregset = -1;
144
145 /* LWP accessors. */
146
147 /* See nat/linux-nat.h. */
148
149 ptid_t
150 ptid_of_lwp (struct lwp_info *lwp)
151 {
152 return ptid_of (get_lwp_thread (lwp));
153 }
154
155 /* See nat/linux-nat.h. */
156
157 void
158 lwp_set_arch_private_info (struct lwp_info *lwp,
159 struct arch_lwp_info *info)
160 {
161 lwp->arch_private = info;
162 }
163
164 /* See nat/linux-nat.h. */
165
166 struct arch_lwp_info *
167 lwp_arch_private_info (struct lwp_info *lwp)
168 {
169 return lwp->arch_private;
170 }
171
172 /* See nat/linux-nat.h. */
173
174 int
175 lwp_is_stopped (struct lwp_info *lwp)
176 {
177 return lwp->stopped;
178 }
179
180 /* See nat/linux-nat.h. */
181
182 enum target_stop_reason
183 lwp_stop_reason (struct lwp_info *lwp)
184 {
185 return lwp->stop_reason;
186 }
187
188 /* A list of all unknown processes which receive stop signals. Some
189 other process will presumably claim each of these as forked
190 children momentarily. */
191
192 struct simple_pid_list
193 {
194 /* The process ID. */
195 int pid;
196
197 /* The status as reported by waitpid. */
198 int status;
199
200 /* Next in chain. */
201 struct simple_pid_list *next;
202 };
203 struct simple_pid_list *stopped_pids;
204
205 /* Trivial list manipulation functions to keep track of a list of new
206 stopped processes. */
207
208 static void
209 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
210 {
211 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
212
213 new_pid->pid = pid;
214 new_pid->status = status;
215 new_pid->next = *listp;
216 *listp = new_pid;
217 }
218
219 static int
220 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
221 {
222 struct simple_pid_list **p;
223
224 for (p = listp; *p != NULL; p = &(*p)->next)
225 if ((*p)->pid == pid)
226 {
227 struct simple_pid_list *next = (*p)->next;
228
229 *statusp = (*p)->status;
230 xfree (*p);
231 *p = next;
232 return 1;
233 }
234 return 0;
235 }
236
237 enum stopping_threads_kind
238 {
239 /* Not stopping threads presently. */
240 NOT_STOPPING_THREADS,
241
242 /* Stopping threads. */
243 STOPPING_THREADS,
244
245 /* Stopping and suspending threads. */
246 STOPPING_AND_SUSPENDING_THREADS
247 };
248
249 /* This is set while stop_all_lwps is in effect. */
250 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
251
252 /* FIXME make into a target method? */
253 int using_threads = 1;
254
255 /* True if we're presently stabilizing threads (moving them out of
256 jump pads). */
257 static int stabilizing_threads;
258
259 static void linux_resume_one_lwp (struct lwp_info *lwp,
260 int step, int signal, siginfo_t *info);
261 static void linux_resume (struct thread_resume *resume_info, size_t n);
262 static void stop_all_lwps (int suspend, struct lwp_info *except);
263 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
266 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
267 static struct lwp_info *add_lwp (ptid_t ptid);
268 static int linux_stopped_by_watchpoint (void);
269 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
270 static void proceed_all_lwps (void);
271 static int finish_step_over (struct lwp_info *lwp);
272 static int kill_lwp (unsigned long lwpid, int signo);
273
274 /* When the event-loop is doing a step-over, this points at the thread
275 being stepped. */
276 ptid_t step_over_bkpt;
277
278 /* True if the low target can hardware single-step. Such targets
279 don't need a BREAKPOINT_REINSERT_ADDR callback. */
280
281 static int
282 can_hardware_single_step (void)
283 {
284 return (the_low_target.breakpoint_reinsert_addr == NULL);
285 }
286
287 /* True if the low target supports memory breakpoints. If so, we'll
288 have a GET_PC implementation. */
289
290 static int
291 supports_breakpoints (void)
292 {
293 return (the_low_target.get_pc != NULL);
294 }
295
296 /* Returns true if this target can support fast tracepoints. This
297 does not mean that the in-process agent has been loaded in the
298 inferior. */
299
300 static int
301 supports_fast_tracepoints (void)
302 {
303 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
304 }
305
306 /* True if LWP is stopped in its stepping range. */
307
308 static int
309 lwp_in_step_range (struct lwp_info *lwp)
310 {
311 CORE_ADDR pc = lwp->stop_pc;
312
313 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
314 }
315
316 struct pending_signals
317 {
318 int signal;
319 siginfo_t info;
320 struct pending_signals *prev;
321 };
322
323 /* The read/write ends of the pipe registered as waitable file in the
324 event loop. */
325 static int linux_event_pipe[2] = { -1, -1 };
326
327 /* True if we're currently in async mode. */
328 #define target_is_async_p() (linux_event_pipe[0] != -1)
329
330 static void send_sigstop (struct lwp_info *lwp);
331 static void wait_for_sigstop (void);
332
333 /* Return non-zero if HEADER is a 64-bit ELF file. */
334
335 static int
336 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
337 {
338 if (header->e_ident[EI_MAG0] == ELFMAG0
339 && header->e_ident[EI_MAG1] == ELFMAG1
340 && header->e_ident[EI_MAG2] == ELFMAG2
341 && header->e_ident[EI_MAG3] == ELFMAG3)
342 {
343 *machine = header->e_machine;
344 return header->e_ident[EI_CLASS] == ELFCLASS64;
345
346 }
347 *machine = EM_NONE;
348 return -1;
349 }
350
351 /* Return non-zero if FILE is a 64-bit ELF file,
352 zero if the file is not a 64-bit ELF file,
353 and -1 if the file is not accessible or doesn't exist. */
354
355 static int
356 elf_64_file_p (const char *file, unsigned int *machine)
357 {
358 Elf64_Ehdr header;
359 int fd;
360
361 fd = open (file, O_RDONLY);
362 if (fd < 0)
363 return -1;
364
365 if (read (fd, &header, sizeof (header)) != sizeof (header))
366 {
367 close (fd);
368 return 0;
369 }
370 close (fd);
371
372 return elf_64_header_p (&header, machine);
373 }
374
375 /* Accepts an integer PID; Returns true if the executable PID is
376 running is a 64-bit ELF file.. */
377
378 int
379 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
380 {
381 char file[PATH_MAX];
382
383 sprintf (file, "/proc/%d/exe", pid);
384 return elf_64_file_p (file, machine);
385 }
386
387 static void
388 delete_lwp (struct lwp_info *lwp)
389 {
390 struct thread_info *thr = get_lwp_thread (lwp);
391
392 if (debug_threads)
393 debug_printf ("deleting %ld\n", lwpid_of (thr));
394
395 remove_thread (thr);
396 free (lwp->arch_private);
397 free (lwp);
398 }
399
400 /* Add a process to the common process list, and set its private
401 data. */
402
403 static struct process_info *
404 linux_add_process (int pid, int attached)
405 {
406 struct process_info *proc;
407
408 proc = add_process (pid, attached);
409 proc->priv = xcalloc (1, sizeof (*proc->priv));
410
411 if (the_low_target.new_process != NULL)
412 proc->priv->arch_private = the_low_target.new_process ();
413
414 return proc;
415 }
416
417 static CORE_ADDR get_pc (struct lwp_info *lwp);
418
419 /* Handle a GNU/Linux extended wait response. If we see a clone
420 event, we need to add the new LWP to our list (and return 0 so as
421 not to report the trap to higher layers). */
422
423 static int
424 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
425 {
426 int event = linux_ptrace_get_extended_event (wstat);
427 struct thread_info *event_thr = get_lwp_thread (event_lwp);
428 struct lwp_info *new_lwp;
429
430 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
431 || (event == PTRACE_EVENT_CLONE))
432 {
433 ptid_t ptid;
434 unsigned long new_pid;
435 int ret, status;
436
437 /* Get the pid of the new lwp. */
438 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
439 &new_pid);
440
441 /* If we haven't already seen the new PID stop, wait for it now. */
442 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
443 {
444 /* The new child has a pending SIGSTOP. We can't affect it until it
445 hits the SIGSTOP, but we're already attached. */
446
447 ret = my_waitpid (new_pid, &status, __WALL);
448
449 if (ret == -1)
450 perror_with_name ("waiting for new child");
451 else if (ret != new_pid)
452 warning ("wait returned unexpected PID %d", ret);
453 else if (!WIFSTOPPED (status))
454 warning ("wait returned unexpected status 0x%x", status);
455 }
456
457 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
458 {
459 struct process_info *parent_proc;
460 struct process_info *child_proc;
461 struct lwp_info *child_lwp;
462 struct thread_info *child_thr;
463 struct target_desc *tdesc;
464
465 ptid = ptid_build (new_pid, new_pid, 0);
466
467 if (debug_threads)
468 {
469 debug_printf ("HEW: Got fork event from LWP %ld, "
470 "new child is %d\n",
471 ptid_get_lwp (ptid_of (event_thr)),
472 ptid_get_pid (ptid));
473 }
474
475 /* Add the new process to the tables and clone the breakpoint
476 lists of the parent. We need to do this even if the new process
477 will be detached, since we will need the process object and the
478 breakpoints to remove any breakpoints from memory when we
479 detach, and the client side will access registers. */
480 child_proc = linux_add_process (new_pid, 0);
481 gdb_assert (child_proc != NULL);
482 child_lwp = add_lwp (ptid);
483 gdb_assert (child_lwp != NULL);
484 child_lwp->stopped = 1;
485 child_lwp->must_set_ptrace_flags = 1;
486 child_lwp->status_pending_p = 0;
487 child_thr = get_lwp_thread (child_lwp);
488 child_thr->last_resume_kind = resume_stop;
489 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
490
491 parent_proc = get_thread_process (event_thr);
492 child_proc->attached = parent_proc->attached;
493 clone_all_breakpoints (&child_proc->breakpoints,
494 &child_proc->raw_breakpoints,
495 parent_proc->breakpoints);
496
497 tdesc = xmalloc (sizeof (struct target_desc));
498 copy_target_description (tdesc, parent_proc->tdesc);
499 child_proc->tdesc = tdesc;
500
501 /* Clone arch-specific process data. */
502 if (the_low_target.new_fork != NULL)
503 the_low_target.new_fork (parent_proc, child_proc);
504
505 /* Save fork info in the parent thread. */
506 if (event == PTRACE_EVENT_FORK)
507 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
508 else if (event == PTRACE_EVENT_VFORK)
509 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
510
511 event_lwp->waitstatus.value.related_pid = ptid;
512
513 /* The status_pending field contains bits denoting the
514 extended event, so when the pending event is handled,
515 the handler will look at lwp->waitstatus. */
516 event_lwp->status_pending_p = 1;
517 event_lwp->status_pending = wstat;
518
519 /* Report the event. */
520 return 0;
521 }
522
523 if (debug_threads)
524 debug_printf ("HEW: Got clone event "
525 "from LWP %ld, new child is LWP %ld\n",
526 lwpid_of (event_thr), new_pid);
527
528 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
529 new_lwp = add_lwp (ptid);
530
531 /* Either we're going to immediately resume the new thread
532 or leave it stopped. linux_resume_one_lwp is a nop if it
533 thinks the thread is currently running, so set this first
534 before calling linux_resume_one_lwp. */
535 new_lwp->stopped = 1;
536
537 /* If we're suspending all threads, leave this one suspended
538 too. */
539 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
540 new_lwp->suspended = 1;
541
542 /* Normally we will get the pending SIGSTOP. But in some cases
543 we might get another signal delivered to the group first.
544 If we do get another signal, be sure not to lose it. */
545 if (WSTOPSIG (status) != SIGSTOP)
546 {
547 new_lwp->stop_expected = 1;
548 new_lwp->status_pending_p = 1;
549 new_lwp->status_pending = status;
550 }
551
552 /* Don't report the event. */
553 return 1;
554 }
555 else if (event == PTRACE_EVENT_VFORK_DONE)
556 {
557 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
558
559 /* Report the event. */
560 return 0;
561 }
562
563 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
564 }
565
566 /* Return the PC as read from the regcache of LWP, without any
567 adjustment. */
568
569 static CORE_ADDR
570 get_pc (struct lwp_info *lwp)
571 {
572 struct thread_info *saved_thread;
573 struct regcache *regcache;
574 CORE_ADDR pc;
575
576 if (the_low_target.get_pc == NULL)
577 return 0;
578
579 saved_thread = current_thread;
580 current_thread = get_lwp_thread (lwp);
581
582 regcache = get_thread_regcache (current_thread, 1);
583 pc = (*the_low_target.get_pc) (regcache);
584
585 if (debug_threads)
586 debug_printf ("pc is 0x%lx\n", (long) pc);
587
588 current_thread = saved_thread;
589 return pc;
590 }
591
592 /* This function should only be called if LWP got a SIGTRAP.
593 The SIGTRAP could mean several things.
594
595 On i386, where decr_pc_after_break is non-zero:
596
597 If we were single-stepping this process using PTRACE_SINGLESTEP, we
598 will get only the one SIGTRAP. The value of $eip will be the next
599 instruction. If the instruction we stepped over was a breakpoint,
600 we need to decrement the PC.
601
602 If we continue the process using PTRACE_CONT, we will get a
603 SIGTRAP when we hit a breakpoint. The value of $eip will be
604 the instruction after the breakpoint (i.e. needs to be
605 decremented). If we report the SIGTRAP to GDB, we must also
606 report the undecremented PC. If the breakpoint is removed, we
607 must resume at the decremented PC.
608
609 On a non-decr_pc_after_break machine with hardware or kernel
610 single-step:
611
612 If we either single-step a breakpoint instruction, or continue and
613 hit a breakpoint instruction, our PC will point at the breakpoint
614 instruction. */
615
616 static int
617 check_stopped_by_breakpoint (struct lwp_info *lwp)
618 {
619 CORE_ADDR pc;
620 CORE_ADDR sw_breakpoint_pc;
621 struct thread_info *saved_thread;
622 #if USE_SIGTRAP_SIGINFO
623 siginfo_t siginfo;
624 #endif
625
626 if (the_low_target.get_pc == NULL)
627 return 0;
628
629 pc = get_pc (lwp);
630 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
631
632 /* breakpoint_at reads from the current thread. */
633 saved_thread = current_thread;
634 current_thread = get_lwp_thread (lwp);
635
636 #if USE_SIGTRAP_SIGINFO
637 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
638 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
639 {
640 if (siginfo.si_signo == SIGTRAP)
641 {
642 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
643 {
644 if (debug_threads)
645 {
646 struct thread_info *thr = get_lwp_thread (lwp);
647
648 debug_printf ("CSBB: %s stopped by software breakpoint\n",
649 target_pid_to_str (ptid_of (thr)));
650 }
651
652 /* Back up the PC if necessary. */
653 if (pc != sw_breakpoint_pc)
654 {
655 struct regcache *regcache
656 = get_thread_regcache (current_thread, 1);
657 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
658 }
659
660 lwp->stop_pc = sw_breakpoint_pc;
661 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
662 current_thread = saved_thread;
663 return 1;
664 }
665 else if (siginfo.si_code == TRAP_HWBKPT)
666 {
667 if (debug_threads)
668 {
669 struct thread_info *thr = get_lwp_thread (lwp);
670
671 debug_printf ("CSBB: %s stopped by hardware "
672 "breakpoint/watchpoint\n",
673 target_pid_to_str (ptid_of (thr)));
674 }
675
676 lwp->stop_pc = pc;
677 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
678 current_thread = saved_thread;
679 return 1;
680 }
681 else if (siginfo.si_code == TRAP_TRACE)
682 {
683 if (debug_threads)
684 {
685 struct thread_info *thr = get_lwp_thread (lwp);
686
687 debug_printf ("CSBB: %s stopped by trace\n",
688 target_pid_to_str (ptid_of (thr)));
689 }
690 }
691 }
692 }
693 #else
694 /* We may have just stepped a breakpoint instruction. E.g., in
695 non-stop mode, GDB first tells the thread A to step a range, and
696 then the user inserts a breakpoint inside the range. In that
697 case we need to report the breakpoint PC. */
698 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
699 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
700 {
701 if (debug_threads)
702 {
703 struct thread_info *thr = get_lwp_thread (lwp);
704
705 debug_printf ("CSBB: %s stopped by software breakpoint\n",
706 target_pid_to_str (ptid_of (thr)));
707 }
708
709 /* Back up the PC if necessary. */
710 if (pc != sw_breakpoint_pc)
711 {
712 struct regcache *regcache
713 = get_thread_regcache (current_thread, 1);
714 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
715 }
716
717 lwp->stop_pc = sw_breakpoint_pc;
718 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
719 current_thread = saved_thread;
720 return 1;
721 }
722
723 if (hardware_breakpoint_inserted_here (pc))
724 {
725 if (debug_threads)
726 {
727 struct thread_info *thr = get_lwp_thread (lwp);
728
729 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
730 target_pid_to_str (ptid_of (thr)));
731 }
732
733 lwp->stop_pc = pc;
734 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
735 current_thread = saved_thread;
736 return 1;
737 }
738 #endif
739
740 current_thread = saved_thread;
741 return 0;
742 }
743
744 static struct lwp_info *
745 add_lwp (ptid_t ptid)
746 {
747 struct lwp_info *lwp;
748
749 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
750 memset (lwp, 0, sizeof (*lwp));
751
752 if (the_low_target.new_thread != NULL)
753 the_low_target.new_thread (lwp);
754
755 lwp->thread = add_thread (ptid, lwp);
756
757 return lwp;
758 }
759
760 /* Start an inferior process and returns its pid.
761 ALLARGS is a vector of program-name and args. */
762
763 static int
764 linux_create_inferior (char *program, char **allargs)
765 {
766 struct lwp_info *new_lwp;
767 int pid;
768 ptid_t ptid;
769 struct cleanup *restore_personality
770 = maybe_disable_address_space_randomization (disable_randomization);
771
772 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
773 pid = vfork ();
774 #else
775 pid = fork ();
776 #endif
777 if (pid < 0)
778 perror_with_name ("fork");
779
780 if (pid == 0)
781 {
782 close_most_fds ();
783 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
784
785 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
786 signal (__SIGRTMIN + 1, SIG_DFL);
787 #endif
788
789 setpgid (0, 0);
790
791 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
792 stdout to stderr so that inferior i/o doesn't corrupt the connection.
793 Also, redirect stdin to /dev/null. */
794 if (remote_connection_is_stdio ())
795 {
796 close (0);
797 open ("/dev/null", O_RDONLY);
798 dup2 (2, 1);
799 if (write (2, "stdin/stdout redirected\n",
800 sizeof ("stdin/stdout redirected\n") - 1) < 0)
801 {
802 /* Errors ignored. */;
803 }
804 }
805
806 execv (program, allargs);
807 if (errno == ENOENT)
808 execvp (program, allargs);
809
810 fprintf (stderr, "Cannot exec %s: %s.\n", program,
811 strerror (errno));
812 fflush (stderr);
813 _exit (0177);
814 }
815
816 do_cleanups (restore_personality);
817
818 linux_add_process (pid, 0);
819
820 ptid = ptid_build (pid, pid, 0);
821 new_lwp = add_lwp (ptid);
822 new_lwp->must_set_ptrace_flags = 1;
823
824 return pid;
825 }
826
827 /* Implement the arch_setup target_ops method. */
828
829 static void
830 linux_arch_setup (void)
831 {
832 the_low_target.arch_setup ();
833 }
834
835 /* Attach to an inferior process. Returns 0 on success, ERRNO on
836 error. */
837
838 int
839 linux_attach_lwp (ptid_t ptid)
840 {
841 struct lwp_info *new_lwp;
842 int lwpid = ptid_get_lwp (ptid);
843
844 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
845 != 0)
846 return errno;
847
848 new_lwp = add_lwp (ptid);
849
850 /* We need to wait for SIGSTOP before being able to make the next
851 ptrace call on this LWP. */
852 new_lwp->must_set_ptrace_flags = 1;
853
854 if (linux_proc_pid_is_stopped (lwpid))
855 {
856 if (debug_threads)
857 debug_printf ("Attached to a stopped process\n");
858
859 /* The process is definitely stopped. It is in a job control
860 stop, unless the kernel predates the TASK_STOPPED /
861 TASK_TRACED distinction, in which case it might be in a
862 ptrace stop. Make sure it is in a ptrace stop; from there we
863 can kill it, signal it, et cetera.
864
865 First make sure there is a pending SIGSTOP. Since we are
866 already attached, the process can not transition from stopped
867 to running without a PTRACE_CONT; so we know this signal will
868 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
869 probably already in the queue (unless this kernel is old
870 enough to use TASK_STOPPED for ptrace stops); but since
871 SIGSTOP is not an RT signal, it can only be queued once. */
872 kill_lwp (lwpid, SIGSTOP);
873
874 /* Finally, resume the stopped process. This will deliver the
875 SIGSTOP (or a higher priority signal, just like normal
876 PTRACE_ATTACH), which we'll catch later on. */
877 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
878 }
879
880 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
881 brings it to a halt.
882
883 There are several cases to consider here:
884
885 1) gdbserver has already attached to the process and is being notified
886 of a new thread that is being created.
887 In this case we should ignore that SIGSTOP and resume the
888 process. This is handled below by setting stop_expected = 1,
889 and the fact that add_thread sets last_resume_kind ==
890 resume_continue.
891
892 2) This is the first thread (the process thread), and we're attaching
893 to it via attach_inferior.
894 In this case we want the process thread to stop.
895 This is handled by having linux_attach set last_resume_kind ==
896 resume_stop after we return.
897
898 If the pid we are attaching to is also the tgid, we attach to and
899 stop all the existing threads. Otherwise, we attach to pid and
900 ignore any other threads in the same group as this pid.
901
902 3) GDB is connecting to gdbserver and is requesting an enumeration of all
903 existing threads.
904 In this case we want the thread to stop.
905 FIXME: This case is currently not properly handled.
906 We should wait for the SIGSTOP but don't. Things work apparently
907 because enough time passes between when we ptrace (ATTACH) and when
908 gdb makes the next ptrace call on the thread.
909
910 On the other hand, if we are currently trying to stop all threads, we
911 should treat the new thread as if we had sent it a SIGSTOP. This works
912 because we are guaranteed that the add_lwp call above added us to the
913 end of the list, and so the new thread has not yet reached
914 wait_for_sigstop (but will). */
915 new_lwp->stop_expected = 1;
916
917 return 0;
918 }
919
920 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
921 already attached. Returns true if a new LWP is found, false
922 otherwise. */
923
924 static int
925 attach_proc_task_lwp_callback (ptid_t ptid)
926 {
927 /* Is this a new thread? */
928 if (find_thread_ptid (ptid) == NULL)
929 {
930 int lwpid = ptid_get_lwp (ptid);
931 int err;
932
933 if (debug_threads)
934 debug_printf ("Found new lwp %d\n", lwpid);
935
936 err = linux_attach_lwp (ptid);
937
938 /* Be quiet if we simply raced with the thread exiting. EPERM
939 is returned if the thread's task still exists, and is marked
940 as exited or zombie, as well as other conditions, so in that
941 case, confirm the status in /proc/PID/status. */
942 if (err == ESRCH
943 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
944 {
945 if (debug_threads)
946 {
947 debug_printf ("Cannot attach to lwp %d: "
948 "thread is gone (%d: %s)\n",
949 lwpid, err, strerror (err));
950 }
951 }
952 else if (err != 0)
953 {
954 warning (_("Cannot attach to lwp %d: %s"),
955 lwpid,
956 linux_ptrace_attach_fail_reason_string (ptid, err));
957 }
958
959 return 1;
960 }
961 return 0;
962 }
963
964 /* Attach to PID. If PID is the tgid, attach to it and all
965 of its threads. */
966
967 static int
968 linux_attach (unsigned long pid)
969 {
970 ptid_t ptid = ptid_build (pid, pid, 0);
971 int err;
972
973 /* Attach to PID. We will check for other threads
974 soon. */
975 err = linux_attach_lwp (ptid);
976 if (err != 0)
977 error ("Cannot attach to process %ld: %s",
978 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
979
980 linux_add_process (pid, 1);
981
982 if (!non_stop)
983 {
984 struct thread_info *thread;
985
986 /* Don't ignore the initial SIGSTOP if we just attached to this
987 process. It will be collected by wait shortly. */
988 thread = find_thread_ptid (ptid_build (pid, pid, 0));
989 thread->last_resume_kind = resume_stop;
990 }
991
992 /* We must attach to every LWP. If /proc is mounted, use that to
993 find them now. On the one hand, the inferior may be using raw
994 clone instead of using pthreads. On the other hand, even if it
995 is using pthreads, GDB may not be connected yet (thread_db needs
996 to do symbol lookups, through qSymbol). Also, thread_db walks
997 structures in the inferior's address space to find the list of
998 threads/LWPs, and those structures may well be corrupted. Note
999 that once thread_db is loaded, we'll still use it to list threads
1000 and associate pthread info with each LWP. */
1001 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1002 return 0;
1003 }
1004
1005 struct counter
1006 {
1007 int pid;
1008 int count;
1009 };
1010
1011 static int
1012 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1013 {
1014 struct counter *counter = args;
1015
1016 if (ptid_get_pid (entry->id) == counter->pid)
1017 {
1018 if (++counter->count > 1)
1019 return 1;
1020 }
1021
1022 return 0;
1023 }
1024
1025 static int
1026 last_thread_of_process_p (int pid)
1027 {
1028 struct counter counter = { pid , 0 };
1029
1030 return (find_inferior (&all_threads,
1031 second_thread_of_pid_p, &counter) == NULL);
1032 }
1033
1034 /* Kill LWP. */
1035
1036 static void
1037 linux_kill_one_lwp (struct lwp_info *lwp)
1038 {
1039 struct thread_info *thr = get_lwp_thread (lwp);
1040 int pid = lwpid_of (thr);
1041
1042 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1043 there is no signal context, and ptrace(PTRACE_KILL) (or
1044 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1045 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1046 alternative is to kill with SIGKILL. We only need one SIGKILL
1047 per process, not one for each thread. But since we still support
1048 linuxthreads, and we also support debugging programs using raw
1049 clone without CLONE_THREAD, we send one for each thread. For
1050 years, we used PTRACE_KILL only, so we're being a bit paranoid
1051 about some old kernels where PTRACE_KILL might work better
1052 (dubious if there are any such, but that's why it's paranoia), so
1053 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1054 everywhere. */
1055
1056 errno = 0;
1057 kill_lwp (pid, SIGKILL);
1058 if (debug_threads)
1059 {
1060 int save_errno = errno;
1061
1062 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1063 target_pid_to_str (ptid_of (thr)),
1064 save_errno ? strerror (save_errno) : "OK");
1065 }
1066
1067 errno = 0;
1068 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1069 if (debug_threads)
1070 {
1071 int save_errno = errno;
1072
1073 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1074 target_pid_to_str (ptid_of (thr)),
1075 save_errno ? strerror (save_errno) : "OK");
1076 }
1077 }
1078
1079 /* Kill LWP and wait for it to die. */
1080
1081 static void
1082 kill_wait_lwp (struct lwp_info *lwp)
1083 {
1084 struct thread_info *thr = get_lwp_thread (lwp);
1085 int pid = ptid_get_pid (ptid_of (thr));
1086 int lwpid = ptid_get_lwp (ptid_of (thr));
1087 int wstat;
1088 int res;
1089
1090 if (debug_threads)
1091 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1092
1093 do
1094 {
1095 linux_kill_one_lwp (lwp);
1096
1097 /* Make sure it died. Notes:
1098
1099 - The loop is most likely unnecessary.
1100
1101 - We don't use linux_wait_for_event as that could delete lwps
1102 while we're iterating over them. We're not interested in
1103 any pending status at this point, only in making sure all
1104 wait status on the kernel side are collected until the
1105 process is reaped.
1106
1107 - We don't use __WALL here as the __WALL emulation relies on
1108 SIGCHLD, and killing a stopped process doesn't generate
1109 one, nor an exit status.
1110 */
1111 res = my_waitpid (lwpid, &wstat, 0);
1112 if (res == -1 && errno == ECHILD)
1113 res = my_waitpid (lwpid, &wstat, __WCLONE);
1114 } while (res > 0 && WIFSTOPPED (wstat));
1115
1116 /* Even if it was stopped, the child may have already disappeared.
1117 E.g., if it was killed by SIGKILL. */
1118 if (res < 0 && errno != ECHILD)
1119 perror_with_name ("kill_wait_lwp");
1120 }
1121
1122 /* Callback for `find_inferior'. Kills an lwp of a given process,
1123 except the leader. */
1124
1125 static int
1126 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1127 {
1128 struct thread_info *thread = (struct thread_info *) entry;
1129 struct lwp_info *lwp = get_thread_lwp (thread);
1130 int pid = * (int *) args;
1131
1132 if (ptid_get_pid (entry->id) != pid)
1133 return 0;
1134
1135 /* We avoid killing the first thread here, because of a Linux kernel (at
1136 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1137 the children get a chance to be reaped, it will remain a zombie
1138 forever. */
1139
1140 if (lwpid_of (thread) == pid)
1141 {
1142 if (debug_threads)
1143 debug_printf ("lkop: is last of process %s\n",
1144 target_pid_to_str (entry->id));
1145 return 0;
1146 }
1147
1148 kill_wait_lwp (lwp);
1149 return 0;
1150 }
1151
1152 static int
1153 linux_kill (int pid)
1154 {
1155 struct process_info *process;
1156 struct lwp_info *lwp;
1157
1158 process = find_process_pid (pid);
1159 if (process == NULL)
1160 return -1;
1161
1162 /* If we're killing a running inferior, make sure it is stopped
1163 first, as PTRACE_KILL will not work otherwise. */
1164 stop_all_lwps (0, NULL);
1165
1166 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1167
1168 /* See the comment in linux_kill_one_lwp. We did not kill the first
1169 thread in the list, so do so now. */
1170 lwp = find_lwp_pid (pid_to_ptid (pid));
1171
1172 if (lwp == NULL)
1173 {
1174 if (debug_threads)
1175 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1176 pid);
1177 }
1178 else
1179 kill_wait_lwp (lwp);
1180
1181 the_target->mourn (process);
1182
1183 /* Since we presently can only stop all lwps of all processes, we
1184 need to unstop lwps of other processes. */
1185 unstop_all_lwps (0, NULL);
1186 return 0;
1187 }
1188
1189 /* Get pending signal of THREAD, for detaching purposes. This is the
1190 signal the thread last stopped for, which we need to deliver to the
1191 thread when detaching, otherwise, it'd be suppressed/lost. */
1192
1193 static int
1194 get_detach_signal (struct thread_info *thread)
1195 {
1196 enum gdb_signal signo = GDB_SIGNAL_0;
1197 int status;
1198 struct lwp_info *lp = get_thread_lwp (thread);
1199
1200 if (lp->status_pending_p)
1201 status = lp->status_pending;
1202 else
1203 {
1204 /* If the thread had been suspended by gdbserver, and it stopped
1205 cleanly, then it'll have stopped with SIGSTOP. But we don't
1206 want to deliver that SIGSTOP. */
1207 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1208 || thread->last_status.value.sig == GDB_SIGNAL_0)
1209 return 0;
1210
1211 /* Otherwise, we may need to deliver the signal we
1212 intercepted. */
1213 status = lp->last_status;
1214 }
1215
1216 if (!WIFSTOPPED (status))
1217 {
1218 if (debug_threads)
1219 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1220 target_pid_to_str (ptid_of (thread)));
1221 return 0;
1222 }
1223
1224 /* Extended wait statuses aren't real SIGTRAPs. */
1225 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1226 {
1227 if (debug_threads)
1228 debug_printf ("GPS: lwp %s had stopped with extended "
1229 "status: no pending signal\n",
1230 target_pid_to_str (ptid_of (thread)));
1231 return 0;
1232 }
1233
1234 signo = gdb_signal_from_host (WSTOPSIG (status));
1235
1236 if (program_signals_p && !program_signals[signo])
1237 {
1238 if (debug_threads)
1239 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1240 target_pid_to_str (ptid_of (thread)),
1241 gdb_signal_to_string (signo));
1242 return 0;
1243 }
1244 else if (!program_signals_p
1245 /* If we have no way to know which signals GDB does not
1246 want to have passed to the program, assume
1247 SIGTRAP/SIGINT, which is GDB's default. */
1248 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1249 {
1250 if (debug_threads)
1251 debug_printf ("GPS: lwp %s had signal %s, "
1252 "but we don't know if we should pass it. "
1253 "Default to not.\n",
1254 target_pid_to_str (ptid_of (thread)),
1255 gdb_signal_to_string (signo));
1256 return 0;
1257 }
1258 else
1259 {
1260 if (debug_threads)
1261 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1262 target_pid_to_str (ptid_of (thread)),
1263 gdb_signal_to_string (signo));
1264
1265 return WSTOPSIG (status);
1266 }
1267 }
1268
1269 static int
1270 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1271 {
1272 struct thread_info *thread = (struct thread_info *) entry;
1273 struct lwp_info *lwp = get_thread_lwp (thread);
1274 int pid = * (int *) args;
1275 int sig;
1276
1277 if (ptid_get_pid (entry->id) != pid)
1278 return 0;
1279
1280 /* If there is a pending SIGSTOP, get rid of it. */
1281 if (lwp->stop_expected)
1282 {
1283 if (debug_threads)
1284 debug_printf ("Sending SIGCONT to %s\n",
1285 target_pid_to_str (ptid_of (thread)));
1286
1287 kill_lwp (lwpid_of (thread), SIGCONT);
1288 lwp->stop_expected = 0;
1289 }
1290
1291 /* Flush any pending changes to the process's registers. */
1292 regcache_invalidate_thread (thread);
1293
1294 /* Pass on any pending signal for this thread. */
1295 sig = get_detach_signal (thread);
1296
1297 /* Finally, let it resume. */
1298 if (the_low_target.prepare_to_resume != NULL)
1299 the_low_target.prepare_to_resume (lwp);
1300 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1301 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1302 error (_("Can't detach %s: %s"),
1303 target_pid_to_str (ptid_of (thread)),
1304 strerror (errno));
1305
1306 delete_lwp (lwp);
1307 return 0;
1308 }
1309
1310 static int
1311 linux_detach (int pid)
1312 {
1313 struct process_info *process;
1314
1315 process = find_process_pid (pid);
1316 if (process == NULL)
1317 return -1;
1318
1319 /* Stop all threads before detaching. First, ptrace requires that
1320 the thread is stopped to sucessfully detach. Second, thread_db
1321 may need to uninstall thread event breakpoints from memory, which
1322 only works with a stopped process anyway. */
1323 stop_all_lwps (0, NULL);
1324
1325 #ifdef USE_THREAD_DB
1326 thread_db_detach (process);
1327 #endif
1328
1329 /* Stabilize threads (move out of jump pads). */
1330 stabilize_threads ();
1331
1332 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1333
1334 the_target->mourn (process);
1335
1336 /* Since we presently can only stop all lwps of all processes, we
1337 need to unstop lwps of other processes. */
1338 unstop_all_lwps (0, NULL);
1339 return 0;
1340 }
1341
1342 /* Remove all LWPs that belong to process PROC from the lwp list. */
1343
1344 static int
1345 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1346 {
1347 struct thread_info *thread = (struct thread_info *) entry;
1348 struct lwp_info *lwp = get_thread_lwp (thread);
1349 struct process_info *process = proc;
1350
1351 if (pid_of (thread) == pid_of (process))
1352 delete_lwp (lwp);
1353
1354 return 0;
1355 }
1356
1357 static void
1358 linux_mourn (struct process_info *process)
1359 {
1360 struct process_info_private *priv;
1361
1362 #ifdef USE_THREAD_DB
1363 thread_db_mourn (process);
1364 #endif
1365
1366 find_inferior (&all_threads, delete_lwp_callback, process);
1367
1368 /* Freeing all private data. */
1369 priv = process->priv;
1370 free (priv->arch_private);
1371 free (priv);
1372 process->priv = NULL;
1373
1374 remove_process (process);
1375 }
1376
1377 static void
1378 linux_join (int pid)
1379 {
1380 int status, ret;
1381
1382 do {
1383 ret = my_waitpid (pid, &status, 0);
1384 if (WIFEXITED (status) || WIFSIGNALED (status))
1385 break;
1386 } while (ret != -1 || errno != ECHILD);
1387 }
1388
1389 /* Return nonzero if the given thread is still alive. */
1390 static int
1391 linux_thread_alive (ptid_t ptid)
1392 {
1393 struct lwp_info *lwp = find_lwp_pid (ptid);
1394
1395 /* We assume we always know if a thread exits. If a whole process
1396 exited but we still haven't been able to report it to GDB, we'll
1397 hold on to the last lwp of the dead process. */
1398 if (lwp != NULL)
1399 return !lwp->dead;
1400 else
1401 return 0;
1402 }
1403
1404 /* Return 1 if this lwp still has an interesting status pending. If
1405 not (e.g., it had stopped for a breakpoint that is gone), return
1406 false. */
1407
1408 static int
1409 thread_still_has_status_pending_p (struct thread_info *thread)
1410 {
1411 struct lwp_info *lp = get_thread_lwp (thread);
1412
1413 if (!lp->status_pending_p)
1414 return 0;
1415
1416 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1417 report any status pending the LWP may have. */
1418 if (thread->last_resume_kind == resume_stop
1419 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1420 return 0;
1421
1422 if (thread->last_resume_kind != resume_stop
1423 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1424 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1425 {
1426 struct thread_info *saved_thread;
1427 CORE_ADDR pc;
1428 int discard = 0;
1429
1430 gdb_assert (lp->last_status != 0);
1431
1432 pc = get_pc (lp);
1433
1434 saved_thread = current_thread;
1435 current_thread = thread;
1436
1437 if (pc != lp->stop_pc)
1438 {
1439 if (debug_threads)
1440 debug_printf ("PC of %ld changed\n",
1441 lwpid_of (thread));
1442 discard = 1;
1443 }
1444
1445 #if !USE_SIGTRAP_SIGINFO
1446 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1447 && !(*the_low_target.breakpoint_at) (pc))
1448 {
1449 if (debug_threads)
1450 debug_printf ("previous SW breakpoint of %ld gone\n",
1451 lwpid_of (thread));
1452 discard = 1;
1453 }
1454 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1455 && !hardware_breakpoint_inserted_here (pc))
1456 {
1457 if (debug_threads)
1458 debug_printf ("previous HW breakpoint of %ld gone\n",
1459 lwpid_of (thread));
1460 discard = 1;
1461 }
1462 #endif
1463
1464 current_thread = saved_thread;
1465
1466 if (discard)
1467 {
1468 if (debug_threads)
1469 debug_printf ("discarding pending breakpoint status\n");
1470 lp->status_pending_p = 0;
1471 return 0;
1472 }
1473 }
1474
1475 return 1;
1476 }
1477
1478 /* Return 1 if this lwp has an interesting status pending. */
1479 static int
1480 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1481 {
1482 struct thread_info *thread = (struct thread_info *) entry;
1483 struct lwp_info *lp = get_thread_lwp (thread);
1484 ptid_t ptid = * (ptid_t *) arg;
1485
1486 /* Check if we're only interested in events from a specific process
1487 or a specific LWP. */
1488 if (!ptid_match (ptid_of (thread), ptid))
1489 return 0;
1490
1491 if (lp->status_pending_p
1492 && !thread_still_has_status_pending_p (thread))
1493 {
1494 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1495 return 0;
1496 }
1497
1498 return lp->status_pending_p;
1499 }
1500
1501 static int
1502 same_lwp (struct inferior_list_entry *entry, void *data)
1503 {
1504 ptid_t ptid = *(ptid_t *) data;
1505 int lwp;
1506
1507 if (ptid_get_lwp (ptid) != 0)
1508 lwp = ptid_get_lwp (ptid);
1509 else
1510 lwp = ptid_get_pid (ptid);
1511
1512 if (ptid_get_lwp (entry->id) == lwp)
1513 return 1;
1514
1515 return 0;
1516 }
1517
1518 struct lwp_info *
1519 find_lwp_pid (ptid_t ptid)
1520 {
1521 struct inferior_list_entry *thread
1522 = find_inferior (&all_threads, same_lwp, &ptid);
1523
1524 if (thread == NULL)
1525 return NULL;
1526
1527 return get_thread_lwp ((struct thread_info *) thread);
1528 }
1529
1530 /* Return the number of known LWPs in the tgid given by PID. */
1531
1532 static int
1533 num_lwps (int pid)
1534 {
1535 struct inferior_list_entry *inf, *tmp;
1536 int count = 0;
1537
1538 ALL_INFERIORS (&all_threads, inf, tmp)
1539 {
1540 if (ptid_get_pid (inf->id) == pid)
1541 count++;
1542 }
1543
1544 return count;
1545 }
1546
1547 /* The arguments passed to iterate_over_lwps. */
1548
1549 struct iterate_over_lwps_args
1550 {
1551 /* The FILTER argument passed to iterate_over_lwps. */
1552 ptid_t filter;
1553
1554 /* The CALLBACK argument passed to iterate_over_lwps. */
1555 iterate_over_lwps_ftype *callback;
1556
1557 /* The DATA argument passed to iterate_over_lwps. */
1558 void *data;
1559 };
1560
1561 /* Callback for find_inferior used by iterate_over_lwps to filter
1562 calls to the callback supplied to that function. Returning a
1563 nonzero value causes find_inferiors to stop iterating and return
1564 the current inferior_list_entry. Returning zero indicates that
1565 find_inferiors should continue iterating. */
1566
1567 static int
1568 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1569 {
1570 struct iterate_over_lwps_args *args
1571 = (struct iterate_over_lwps_args *) args_p;
1572
1573 if (ptid_match (entry->id, args->filter))
1574 {
1575 struct thread_info *thr = (struct thread_info *) entry;
1576 struct lwp_info *lwp = get_thread_lwp (thr);
1577
1578 return (*args->callback) (lwp, args->data);
1579 }
1580
1581 return 0;
1582 }
1583
1584 /* See nat/linux-nat.h. */
1585
1586 struct lwp_info *
1587 iterate_over_lwps (ptid_t filter,
1588 iterate_over_lwps_ftype callback,
1589 void *data)
1590 {
1591 struct iterate_over_lwps_args args = {filter, callback, data};
1592 struct inferior_list_entry *entry;
1593
1594 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1595 if (entry == NULL)
1596 return NULL;
1597
1598 return get_thread_lwp ((struct thread_info *) entry);
1599 }
1600
1601 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1602 their exits until all other threads in the group have exited. */
1603
1604 static void
1605 check_zombie_leaders (void)
1606 {
1607 struct process_info *proc, *tmp;
1608
1609 ALL_PROCESSES (proc, tmp)
1610 {
1611 pid_t leader_pid = pid_of (proc);
1612 struct lwp_info *leader_lp;
1613
1614 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1615
1616 if (debug_threads)
1617 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1618 "num_lwps=%d, zombie=%d\n",
1619 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1620 linux_proc_pid_is_zombie (leader_pid));
1621
1622 if (leader_lp != NULL
1623 /* Check if there are other threads in the group, as we may
1624 have raced with the inferior simply exiting. */
1625 && !last_thread_of_process_p (leader_pid)
1626 && linux_proc_pid_is_zombie (leader_pid))
1627 {
1628 /* A leader zombie can mean one of two things:
1629
1630 - It exited, and there's an exit status pending
1631 available, or only the leader exited (not the whole
1632 program). In the latter case, we can't waitpid the
1633 leader's exit status until all other threads are gone.
1634
1635 - There are 3 or more threads in the group, and a thread
1636 other than the leader exec'd. On an exec, the Linux
1637 kernel destroys all other threads (except the execing
1638 one) in the thread group, and resets the execing thread's
1639 tid to the tgid. No exit notification is sent for the
1640 execing thread -- from the ptracer's perspective, it
1641 appears as though the execing thread just vanishes.
1642 Until we reap all other threads except the leader and the
1643 execing thread, the leader will be zombie, and the
1644 execing thread will be in `D (disc sleep)'. As soon as
1645 all other threads are reaped, the execing thread changes
1646 it's tid to the tgid, and the previous (zombie) leader
1647 vanishes, giving place to the "new" leader. We could try
1648 distinguishing the exit and exec cases, by waiting once
1649 more, and seeing if something comes out, but it doesn't
1650 sound useful. The previous leader _does_ go away, and
1651 we'll re-add the new one once we see the exec event
1652 (which is just the same as what would happen if the
1653 previous leader did exit voluntarily before some other
1654 thread execs). */
1655
1656 if (debug_threads)
1657 fprintf (stderr,
1658 "CZL: Thread group leader %d zombie "
1659 "(it exited, or another thread execd).\n",
1660 leader_pid);
1661
1662 delete_lwp (leader_lp);
1663 }
1664 }
1665 }
1666
1667 /* Callback for `find_inferior'. Returns the first LWP that is not
1668 stopped. ARG is a PTID filter. */
1669
1670 static int
1671 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1672 {
1673 struct thread_info *thr = (struct thread_info *) entry;
1674 struct lwp_info *lwp;
1675 ptid_t filter = *(ptid_t *) arg;
1676
1677 if (!ptid_match (ptid_of (thr), filter))
1678 return 0;
1679
1680 lwp = get_thread_lwp (thr);
1681 if (!lwp->stopped)
1682 return 1;
1683
1684 return 0;
1685 }
1686
1687 /* This function should only be called if the LWP got a SIGTRAP.
1688
1689 Handle any tracepoint steps or hits. Return true if a tracepoint
1690 event was handled, 0 otherwise. */
1691
1692 static int
1693 handle_tracepoints (struct lwp_info *lwp)
1694 {
1695 struct thread_info *tinfo = get_lwp_thread (lwp);
1696 int tpoint_related_event = 0;
1697
1698 gdb_assert (lwp->suspended == 0);
1699
1700 /* If this tracepoint hit causes a tracing stop, we'll immediately
1701 uninsert tracepoints. To do this, we temporarily pause all
1702 threads, unpatch away, and then unpause threads. We need to make
1703 sure the unpausing doesn't resume LWP too. */
1704 lwp->suspended++;
1705
1706 /* And we need to be sure that any all-threads-stopping doesn't try
1707 to move threads out of the jump pads, as it could deadlock the
1708 inferior (LWP could be in the jump pad, maybe even holding the
1709 lock.) */
1710
1711 /* Do any necessary step collect actions. */
1712 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1713
1714 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1715
1716 /* See if we just hit a tracepoint and do its main collect
1717 actions. */
1718 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1719
1720 lwp->suspended--;
1721
1722 gdb_assert (lwp->suspended == 0);
1723 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1724
1725 if (tpoint_related_event)
1726 {
1727 if (debug_threads)
1728 debug_printf ("got a tracepoint event\n");
1729 return 1;
1730 }
1731
1732 return 0;
1733 }
1734
1735 /* Convenience wrapper. Returns true if LWP is presently collecting a
1736 fast tracepoint. */
1737
1738 static int
1739 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1740 struct fast_tpoint_collect_status *status)
1741 {
1742 CORE_ADDR thread_area;
1743 struct thread_info *thread = get_lwp_thread (lwp);
1744
1745 if (the_low_target.get_thread_area == NULL)
1746 return 0;
1747
1748 /* Get the thread area address. This is used to recognize which
1749 thread is which when tracing with the in-process agent library.
1750 We don't read anything from the address, and treat it as opaque;
1751 it's the address itself that we assume is unique per-thread. */
1752 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1753 return 0;
1754
1755 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1756 }
1757
1758 /* The reason we resume in the caller, is because we want to be able
1759 to pass lwp->status_pending as WSTAT, and we need to clear
1760 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1761 refuses to resume. */
1762
1763 static int
1764 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1765 {
1766 struct thread_info *saved_thread;
1767
1768 saved_thread = current_thread;
1769 current_thread = get_lwp_thread (lwp);
1770
1771 if ((wstat == NULL
1772 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1773 && supports_fast_tracepoints ()
1774 && agent_loaded_p ())
1775 {
1776 struct fast_tpoint_collect_status status;
1777 int r;
1778
1779 if (debug_threads)
1780 debug_printf ("Checking whether LWP %ld needs to move out of the "
1781 "jump pad.\n",
1782 lwpid_of (current_thread));
1783
1784 r = linux_fast_tracepoint_collecting (lwp, &status);
1785
1786 if (wstat == NULL
1787 || (WSTOPSIG (*wstat) != SIGILL
1788 && WSTOPSIG (*wstat) != SIGFPE
1789 && WSTOPSIG (*wstat) != SIGSEGV
1790 && WSTOPSIG (*wstat) != SIGBUS))
1791 {
1792 lwp->collecting_fast_tracepoint = r;
1793
1794 if (r != 0)
1795 {
1796 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1797 {
1798 /* Haven't executed the original instruction yet.
1799 Set breakpoint there, and wait till it's hit,
1800 then single-step until exiting the jump pad. */
1801 lwp->exit_jump_pad_bkpt
1802 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1803 }
1804
1805 if (debug_threads)
1806 debug_printf ("Checking whether LWP %ld needs to move out of "
1807 "the jump pad...it does\n",
1808 lwpid_of (current_thread));
1809 current_thread = saved_thread;
1810
1811 return 1;
1812 }
1813 }
1814 else
1815 {
1816 /* If we get a synchronous signal while collecting, *and*
1817 while executing the (relocated) original instruction,
1818 reset the PC to point at the tpoint address, before
1819 reporting to GDB. Otherwise, it's an IPA lib bug: just
1820 report the signal to GDB, and pray for the best. */
1821
1822 lwp->collecting_fast_tracepoint = 0;
1823
1824 if (r != 0
1825 && (status.adjusted_insn_addr <= lwp->stop_pc
1826 && lwp->stop_pc < status.adjusted_insn_addr_end))
1827 {
1828 siginfo_t info;
1829 struct regcache *regcache;
1830
1831 /* The si_addr on a few signals references the address
1832 of the faulting instruction. Adjust that as
1833 well. */
1834 if ((WSTOPSIG (*wstat) == SIGILL
1835 || WSTOPSIG (*wstat) == SIGFPE
1836 || WSTOPSIG (*wstat) == SIGBUS
1837 || WSTOPSIG (*wstat) == SIGSEGV)
1838 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1839 (PTRACE_TYPE_ARG3) 0, &info) == 0
1840 /* Final check just to make sure we don't clobber
1841 the siginfo of non-kernel-sent signals. */
1842 && (uintptr_t) info.si_addr == lwp->stop_pc)
1843 {
1844 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1845 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1846 (PTRACE_TYPE_ARG3) 0, &info);
1847 }
1848
1849 regcache = get_thread_regcache (current_thread, 1);
1850 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1851 lwp->stop_pc = status.tpoint_addr;
1852
1853 /* Cancel any fast tracepoint lock this thread was
1854 holding. */
1855 force_unlock_trace_buffer ();
1856 }
1857
1858 if (lwp->exit_jump_pad_bkpt != NULL)
1859 {
1860 if (debug_threads)
1861 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1862 "stopping all threads momentarily.\n");
1863
1864 stop_all_lwps (1, lwp);
1865
1866 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1867 lwp->exit_jump_pad_bkpt = NULL;
1868
1869 unstop_all_lwps (1, lwp);
1870
1871 gdb_assert (lwp->suspended >= 0);
1872 }
1873 }
1874 }
1875
1876 if (debug_threads)
1877 debug_printf ("Checking whether LWP %ld needs to move out of the "
1878 "jump pad...no\n",
1879 lwpid_of (current_thread));
1880
1881 current_thread = saved_thread;
1882 return 0;
1883 }
1884
1885 /* Enqueue one signal in the "signals to report later when out of the
1886 jump pad" list. */
1887
1888 static void
1889 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1890 {
1891 struct pending_signals *p_sig;
1892 struct thread_info *thread = get_lwp_thread (lwp);
1893
1894 if (debug_threads)
1895 debug_printf ("Deferring signal %d for LWP %ld.\n",
1896 WSTOPSIG (*wstat), lwpid_of (thread));
1897
1898 if (debug_threads)
1899 {
1900 struct pending_signals *sig;
1901
1902 for (sig = lwp->pending_signals_to_report;
1903 sig != NULL;
1904 sig = sig->prev)
1905 debug_printf (" Already queued %d\n",
1906 sig->signal);
1907
1908 debug_printf (" (no more currently queued signals)\n");
1909 }
1910
1911 /* Don't enqueue non-RT signals if they are already in the deferred
1912 queue. (SIGSTOP being the easiest signal to see ending up here
1913 twice) */
1914 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1915 {
1916 struct pending_signals *sig;
1917
1918 for (sig = lwp->pending_signals_to_report;
1919 sig != NULL;
1920 sig = sig->prev)
1921 {
1922 if (sig->signal == WSTOPSIG (*wstat))
1923 {
1924 if (debug_threads)
1925 debug_printf ("Not requeuing already queued non-RT signal %d"
1926 " for LWP %ld\n",
1927 sig->signal,
1928 lwpid_of (thread));
1929 return;
1930 }
1931 }
1932 }
1933
1934 p_sig = xmalloc (sizeof (*p_sig));
1935 p_sig->prev = lwp->pending_signals_to_report;
1936 p_sig->signal = WSTOPSIG (*wstat);
1937 memset (&p_sig->info, 0, sizeof (siginfo_t));
1938 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1939 &p_sig->info);
1940
1941 lwp->pending_signals_to_report = p_sig;
1942 }
1943
1944 /* Dequeue one signal from the "signals to report later when out of
1945 the jump pad" list. */
1946
1947 static int
1948 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1949 {
1950 struct thread_info *thread = get_lwp_thread (lwp);
1951
1952 if (lwp->pending_signals_to_report != NULL)
1953 {
1954 struct pending_signals **p_sig;
1955
1956 p_sig = &lwp->pending_signals_to_report;
1957 while ((*p_sig)->prev != NULL)
1958 p_sig = &(*p_sig)->prev;
1959
1960 *wstat = W_STOPCODE ((*p_sig)->signal);
1961 if ((*p_sig)->info.si_signo != 0)
1962 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1963 &(*p_sig)->info);
1964 free (*p_sig);
1965 *p_sig = NULL;
1966
1967 if (debug_threads)
1968 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1969 WSTOPSIG (*wstat), lwpid_of (thread));
1970
1971 if (debug_threads)
1972 {
1973 struct pending_signals *sig;
1974
1975 for (sig = lwp->pending_signals_to_report;
1976 sig != NULL;
1977 sig = sig->prev)
1978 debug_printf (" Still queued %d\n",
1979 sig->signal);
1980
1981 debug_printf (" (no more queued signals)\n");
1982 }
1983
1984 return 1;
1985 }
1986
1987 return 0;
1988 }
1989
1990 /* Fetch the possibly triggered data watchpoint info and store it in
1991 CHILD.
1992
1993 On some archs, like x86, that use debug registers to set
1994 watchpoints, it's possible that the way to know which watched
1995 address trapped, is to check the register that is used to select
1996 which address to watch. Problem is, between setting the watchpoint
1997 and reading back which data address trapped, the user may change
1998 the set of watchpoints, and, as a consequence, GDB changes the
1999 debug registers in the inferior. To avoid reading back a stale
2000 stopped-data-address when that happens, we cache in LP the fact
2001 that a watchpoint trapped, and the corresponding data address, as
2002 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2003 registers meanwhile, we have the cached data we can rely on. */
2004
2005 static int
2006 check_stopped_by_watchpoint (struct lwp_info *child)
2007 {
2008 if (the_low_target.stopped_by_watchpoint != NULL)
2009 {
2010 struct thread_info *saved_thread;
2011
2012 saved_thread = current_thread;
2013 current_thread = get_lwp_thread (child);
2014
2015 if (the_low_target.stopped_by_watchpoint ())
2016 {
2017 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2018
2019 if (the_low_target.stopped_data_address != NULL)
2020 child->stopped_data_address
2021 = the_low_target.stopped_data_address ();
2022 else
2023 child->stopped_data_address = 0;
2024 }
2025
2026 current_thread = saved_thread;
2027 }
2028
2029 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2030 }
2031
2032 /* Return the ptrace options that we want to try to enable. */
2033
2034 static int
2035 linux_low_ptrace_options (int attached)
2036 {
2037 int options = 0;
2038
2039 if (!attached)
2040 options |= PTRACE_O_EXITKILL;
2041
2042 if (report_fork_events)
2043 options |= PTRACE_O_TRACEFORK;
2044
2045 if (report_vfork_events)
2046 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2047
2048 return options;
2049 }
2050
2051 /* Do low-level handling of the event, and check if we should go on
2052 and pass it to caller code. Return the affected lwp if we are, or
2053 NULL otherwise. */
2054
2055 static struct lwp_info *
2056 linux_low_filter_event (int lwpid, int wstat)
2057 {
2058 struct lwp_info *child;
2059 struct thread_info *thread;
2060 int have_stop_pc = 0;
2061
2062 child = find_lwp_pid (pid_to_ptid (lwpid));
2063
2064 /* If we didn't find a process, one of two things presumably happened:
2065 - A process we started and then detached from has exited. Ignore it.
2066 - A process we are controlling has forked and the new child's stop
2067 was reported to us by the kernel. Save its PID. */
2068 if (child == NULL && WIFSTOPPED (wstat))
2069 {
2070 add_to_pid_list (&stopped_pids, lwpid, wstat);
2071 return NULL;
2072 }
2073 else if (child == NULL)
2074 return NULL;
2075
2076 thread = get_lwp_thread (child);
2077
2078 child->stopped = 1;
2079
2080 child->last_status = wstat;
2081
2082 /* Check if the thread has exited. */
2083 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2084 {
2085 if (debug_threads)
2086 debug_printf ("LLFE: %d exited.\n", lwpid);
2087 if (num_lwps (pid_of (thread)) > 1)
2088 {
2089
2090 /* If there is at least one more LWP, then the exit signal was
2091 not the end of the debugged application and should be
2092 ignored. */
2093 delete_lwp (child);
2094 return NULL;
2095 }
2096 else
2097 {
2098 /* This was the last lwp in the process. Since events are
2099 serialized to GDB core, and we can't report this one
2100 right now, but GDB core and the other target layers will
2101 want to be notified about the exit code/signal, leave the
2102 status pending for the next time we're able to report
2103 it. */
2104 mark_lwp_dead (child, wstat);
2105 return child;
2106 }
2107 }
2108
2109 gdb_assert (WIFSTOPPED (wstat));
2110
2111 if (WIFSTOPPED (wstat))
2112 {
2113 struct process_info *proc;
2114
2115 /* Architecture-specific setup after inferior is running. */
2116 proc = find_process_pid (pid_of (thread));
2117 if (proc->tdesc == NULL)
2118 {
2119 if (proc->attached)
2120 {
2121 struct thread_info *saved_thread;
2122
2123 /* This needs to happen after we have attached to the
2124 inferior and it is stopped for the first time, but
2125 before we access any inferior registers. */
2126 saved_thread = current_thread;
2127 current_thread = thread;
2128
2129 the_low_target.arch_setup ();
2130
2131 current_thread = saved_thread;
2132 }
2133 else
2134 {
2135 /* The process is started, but GDBserver will do
2136 architecture-specific setup after the program stops at
2137 the first instruction. */
2138 child->status_pending_p = 1;
2139 child->status_pending = wstat;
2140 return child;
2141 }
2142 }
2143 }
2144
2145 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2146 {
2147 struct process_info *proc = find_process_pid (pid_of (thread));
2148 int options = linux_low_ptrace_options (proc->attached);
2149
2150 linux_enable_event_reporting (lwpid, options);
2151 child->must_set_ptrace_flags = 0;
2152 }
2153
2154 /* Be careful to not overwrite stop_pc until
2155 check_stopped_by_breakpoint is called. */
2156 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2157 && linux_is_extended_waitstatus (wstat))
2158 {
2159 child->stop_pc = get_pc (child);
2160 if (handle_extended_wait (child, wstat))
2161 {
2162 /* The event has been handled, so just return without
2163 reporting it. */
2164 return NULL;
2165 }
2166 }
2167
2168 /* Check first whether this was a SW/HW breakpoint before checking
2169 watchpoints, because at least s390 can't tell the data address of
2170 hardware watchpoint hits, and returns stopped-by-watchpoint as
2171 long as there's a watchpoint set. */
2172 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2173 {
2174 if (check_stopped_by_breakpoint (child))
2175 have_stop_pc = 1;
2176 }
2177
2178 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2179 or hardware watchpoint. Check which is which if we got
2180 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2181 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2182 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2183 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2184 check_stopped_by_watchpoint (child);
2185
2186 if (!have_stop_pc)
2187 child->stop_pc = get_pc (child);
2188
2189 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2190 && child->stop_expected)
2191 {
2192 if (debug_threads)
2193 debug_printf ("Expected stop.\n");
2194 child->stop_expected = 0;
2195
2196 if (thread->last_resume_kind == resume_stop)
2197 {
2198 /* We want to report the stop to the core. Treat the
2199 SIGSTOP as a normal event. */
2200 if (debug_threads)
2201 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2202 target_pid_to_str (ptid_of (thread)));
2203 }
2204 else if (stopping_threads != NOT_STOPPING_THREADS)
2205 {
2206 /* Stopping threads. We don't want this SIGSTOP to end up
2207 pending. */
2208 if (debug_threads)
2209 debug_printf ("LLW: SIGSTOP caught for %s "
2210 "while stopping threads.\n",
2211 target_pid_to_str (ptid_of (thread)));
2212 return NULL;
2213 }
2214 else
2215 {
2216 /* This is a delayed SIGSTOP. Filter out the event. */
2217 if (debug_threads)
2218 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2219 child->stepping ? "step" : "continue",
2220 target_pid_to_str (ptid_of (thread)));
2221
2222 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2223 return NULL;
2224 }
2225 }
2226
2227 child->status_pending_p = 1;
2228 child->status_pending = wstat;
2229 return child;
2230 }
2231
2232 /* Resume LWPs that are currently stopped without any pending status
2233 to report, but are resumed from the core's perspective. */
2234
2235 static void
2236 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2237 {
2238 struct thread_info *thread = (struct thread_info *) entry;
2239 struct lwp_info *lp = get_thread_lwp (thread);
2240
2241 if (lp->stopped
2242 && !lp->status_pending_p
2243 && thread->last_resume_kind != resume_stop
2244 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2245 {
2246 int step = thread->last_resume_kind == resume_step;
2247
2248 if (debug_threads)
2249 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2250 target_pid_to_str (ptid_of (thread)),
2251 paddress (lp->stop_pc),
2252 step);
2253
2254 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2255 }
2256 }
2257
2258 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2259 match FILTER_PTID (leaving others pending). The PTIDs can be:
2260 minus_one_ptid, to specify any child; a pid PTID, specifying all
2261 lwps of a thread group; or a PTID representing a single lwp. Store
2262 the stop status through the status pointer WSTAT. OPTIONS is
2263 passed to the waitpid call. Return 0 if no event was found and
2264 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2265 was found. Return the PID of the stopped child otherwise. */
2266
2267 static int
2268 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2269 int *wstatp, int options)
2270 {
2271 struct thread_info *event_thread;
2272 struct lwp_info *event_child, *requested_child;
2273 sigset_t block_mask, prev_mask;
2274
2275 retry:
2276 /* N.B. event_thread points to the thread_info struct that contains
2277 event_child. Keep them in sync. */
2278 event_thread = NULL;
2279 event_child = NULL;
2280 requested_child = NULL;
2281
2282 /* Check for a lwp with a pending status. */
2283
2284 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2285 {
2286 event_thread = (struct thread_info *)
2287 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2288 if (event_thread != NULL)
2289 event_child = get_thread_lwp (event_thread);
2290 if (debug_threads && event_thread)
2291 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2292 }
2293 else if (!ptid_equal (filter_ptid, null_ptid))
2294 {
2295 requested_child = find_lwp_pid (filter_ptid);
2296
2297 if (stopping_threads == NOT_STOPPING_THREADS
2298 && requested_child->status_pending_p
2299 && requested_child->collecting_fast_tracepoint)
2300 {
2301 enqueue_one_deferred_signal (requested_child,
2302 &requested_child->status_pending);
2303 requested_child->status_pending_p = 0;
2304 requested_child->status_pending = 0;
2305 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2306 }
2307
2308 if (requested_child->suspended
2309 && requested_child->status_pending_p)
2310 {
2311 internal_error (__FILE__, __LINE__,
2312 "requesting an event out of a"
2313 " suspended child?");
2314 }
2315
2316 if (requested_child->status_pending_p)
2317 {
2318 event_child = requested_child;
2319 event_thread = get_lwp_thread (event_child);
2320 }
2321 }
2322
2323 if (event_child != NULL)
2324 {
2325 if (debug_threads)
2326 debug_printf ("Got an event from pending child %ld (%04x)\n",
2327 lwpid_of (event_thread), event_child->status_pending);
2328 *wstatp = event_child->status_pending;
2329 event_child->status_pending_p = 0;
2330 event_child->status_pending = 0;
2331 current_thread = event_thread;
2332 return lwpid_of (event_thread);
2333 }
2334
2335 /* But if we don't find a pending event, we'll have to wait.
2336
2337 We only enter this loop if no process has a pending wait status.
2338 Thus any action taken in response to a wait status inside this
2339 loop is responding as soon as we detect the status, not after any
2340 pending events. */
2341
2342 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2343 all signals while here. */
2344 sigfillset (&block_mask);
2345 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2346
2347 /* Always pull all events out of the kernel. We'll randomly select
2348 an event LWP out of all that have events, to prevent
2349 starvation. */
2350 while (event_child == NULL)
2351 {
2352 pid_t ret = 0;
2353
2354 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2355 quirks:
2356
2357 - If the thread group leader exits while other threads in the
2358 thread group still exist, waitpid(TGID, ...) hangs. That
2359 waitpid won't return an exit status until the other threads
2360 in the group are reaped.
2361
2362 - When a non-leader thread execs, that thread just vanishes
2363 without reporting an exit (so we'd hang if we waited for it
2364 explicitly in that case). The exec event is reported to
2365 the TGID pid (although we don't currently enable exec
2366 events). */
2367 errno = 0;
2368 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2369
2370 if (debug_threads)
2371 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2372 ret, errno ? strerror (errno) : "ERRNO-OK");
2373
2374 if (ret > 0)
2375 {
2376 if (debug_threads)
2377 {
2378 debug_printf ("LLW: waitpid %ld received %s\n",
2379 (long) ret, status_to_str (*wstatp));
2380 }
2381
2382 /* Filter all events. IOW, leave all events pending. We'll
2383 randomly select an event LWP out of all that have events
2384 below. */
2385 linux_low_filter_event (ret, *wstatp);
2386 /* Retry until nothing comes out of waitpid. A single
2387 SIGCHLD can indicate more than one child stopped. */
2388 continue;
2389 }
2390
2391 /* Now that we've pulled all events out of the kernel, resume
2392 LWPs that don't have an interesting event to report. */
2393 if (stopping_threads == NOT_STOPPING_THREADS)
2394 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2395
2396 /* ... and find an LWP with a status to report to the core, if
2397 any. */
2398 event_thread = (struct thread_info *)
2399 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2400 if (event_thread != NULL)
2401 {
2402 event_child = get_thread_lwp (event_thread);
2403 *wstatp = event_child->status_pending;
2404 event_child->status_pending_p = 0;
2405 event_child->status_pending = 0;
2406 break;
2407 }
2408
2409 /* Check for zombie thread group leaders. Those can't be reaped
2410 until all other threads in the thread group are. */
2411 check_zombie_leaders ();
2412
2413 /* If there are no resumed children left in the set of LWPs we
2414 want to wait for, bail. We can't just block in
2415 waitpid/sigsuspend, because lwps might have been left stopped
2416 in trace-stop state, and we'd be stuck forever waiting for
2417 their status to change (which would only happen if we resumed
2418 them). Even if WNOHANG is set, this return code is preferred
2419 over 0 (below), as it is more detailed. */
2420 if ((find_inferior (&all_threads,
2421 not_stopped_callback,
2422 &wait_ptid) == NULL))
2423 {
2424 if (debug_threads)
2425 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2426 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2427 return -1;
2428 }
2429
2430 /* No interesting event to report to the caller. */
2431 if ((options & WNOHANG))
2432 {
2433 if (debug_threads)
2434 debug_printf ("WNOHANG set, no event found\n");
2435
2436 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2437 return 0;
2438 }
2439
2440 /* Block until we get an event reported with SIGCHLD. */
2441 if (debug_threads)
2442 debug_printf ("sigsuspend'ing\n");
2443
2444 sigsuspend (&prev_mask);
2445 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2446 goto retry;
2447 }
2448
2449 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2450
2451 current_thread = event_thread;
2452
2453 /* Check for thread exit. */
2454 if (! WIFSTOPPED (*wstatp))
2455 {
2456 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2457
2458 if (debug_threads)
2459 debug_printf ("LWP %d is the last lwp of process. "
2460 "Process %ld exiting.\n",
2461 pid_of (event_thread), lwpid_of (event_thread));
2462 return lwpid_of (event_thread);
2463 }
2464
2465 return lwpid_of (event_thread);
2466 }
2467
2468 /* Wait for an event from child(ren) PTID. PTIDs can be:
2469 minus_one_ptid, to specify any child; a pid PTID, specifying all
2470 lwps of a thread group; or a PTID representing a single lwp. Store
2471 the stop status through the status pointer WSTAT. OPTIONS is
2472 passed to the waitpid call. Return 0 if no event was found and
2473 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2474 was found. Return the PID of the stopped child otherwise. */
2475
2476 static int
2477 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2478 {
2479 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2480 }
2481
2482 /* Count the LWP's that have had events. */
2483
2484 static int
2485 count_events_callback (struct inferior_list_entry *entry, void *data)
2486 {
2487 struct thread_info *thread = (struct thread_info *) entry;
2488 struct lwp_info *lp = get_thread_lwp (thread);
2489 int *count = data;
2490
2491 gdb_assert (count != NULL);
2492
2493 /* Count only resumed LWPs that have an event pending. */
2494 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2495 && lp->status_pending_p)
2496 (*count)++;
2497
2498 return 0;
2499 }
2500
2501 /* Select the LWP (if any) that is currently being single-stepped. */
2502
2503 static int
2504 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2505 {
2506 struct thread_info *thread = (struct thread_info *) entry;
2507 struct lwp_info *lp = get_thread_lwp (thread);
2508
2509 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2510 && thread->last_resume_kind == resume_step
2511 && lp->status_pending_p)
2512 return 1;
2513 else
2514 return 0;
2515 }
2516
2517 /* Select the Nth LWP that has had an event. */
2518
2519 static int
2520 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2521 {
2522 struct thread_info *thread = (struct thread_info *) entry;
2523 struct lwp_info *lp = get_thread_lwp (thread);
2524 int *selector = data;
2525
2526 gdb_assert (selector != NULL);
2527
2528 /* Select only resumed LWPs that have an event pending. */
2529 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2530 && lp->status_pending_p)
2531 if ((*selector)-- == 0)
2532 return 1;
2533
2534 return 0;
2535 }
2536
2537 /* Select one LWP out of those that have events pending. */
2538
2539 static void
2540 select_event_lwp (struct lwp_info **orig_lp)
2541 {
2542 int num_events = 0;
2543 int random_selector;
2544 struct thread_info *event_thread = NULL;
2545
2546 /* In all-stop, give preference to the LWP that is being
2547 single-stepped. There will be at most one, and it's the LWP that
2548 the core is most interested in. If we didn't do this, then we'd
2549 have to handle pending step SIGTRAPs somehow in case the core
2550 later continues the previously-stepped thread, otherwise we'd
2551 report the pending SIGTRAP, and the core, not having stepped the
2552 thread, wouldn't understand what the trap was for, and therefore
2553 would report it to the user as a random signal. */
2554 if (!non_stop)
2555 {
2556 event_thread
2557 = (struct thread_info *) find_inferior (&all_threads,
2558 select_singlestep_lwp_callback,
2559 NULL);
2560 if (event_thread != NULL)
2561 {
2562 if (debug_threads)
2563 debug_printf ("SEL: Select single-step %s\n",
2564 target_pid_to_str (ptid_of (event_thread)));
2565 }
2566 }
2567 if (event_thread == NULL)
2568 {
2569 /* No single-stepping LWP. Select one at random, out of those
2570 which have had events. */
2571
2572 /* First see how many events we have. */
2573 find_inferior (&all_threads, count_events_callback, &num_events);
2574 gdb_assert (num_events > 0);
2575
2576 /* Now randomly pick a LWP out of those that have had
2577 events. */
2578 random_selector = (int)
2579 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2580
2581 if (debug_threads && num_events > 1)
2582 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2583 num_events, random_selector);
2584
2585 event_thread
2586 = (struct thread_info *) find_inferior (&all_threads,
2587 select_event_lwp_callback,
2588 &random_selector);
2589 }
2590
2591 if (event_thread != NULL)
2592 {
2593 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2594
2595 /* Switch the event LWP. */
2596 *orig_lp = event_lp;
2597 }
2598 }
2599
2600 /* Decrement the suspend count of an LWP. */
2601
2602 static int
2603 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2604 {
2605 struct thread_info *thread = (struct thread_info *) entry;
2606 struct lwp_info *lwp = get_thread_lwp (thread);
2607
2608 /* Ignore EXCEPT. */
2609 if (lwp == except)
2610 return 0;
2611
2612 lwp->suspended--;
2613
2614 gdb_assert (lwp->suspended >= 0);
2615 return 0;
2616 }
2617
2618 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2619 NULL. */
2620
2621 static void
2622 unsuspend_all_lwps (struct lwp_info *except)
2623 {
2624 find_inferior (&all_threads, unsuspend_one_lwp, except);
2625 }
2626
2627 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2628 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2629 void *data);
2630 static int lwp_running (struct inferior_list_entry *entry, void *data);
2631 static ptid_t linux_wait_1 (ptid_t ptid,
2632 struct target_waitstatus *ourstatus,
2633 int target_options);
2634
2635 /* Stabilize threads (move out of jump pads).
2636
2637 If a thread is midway collecting a fast tracepoint, we need to
2638 finish the collection and move it out of the jump pad before
2639 reporting the signal.
2640
2641 This avoids recursion while collecting (when a signal arrives
2642 midway, and the signal handler itself collects), which would trash
2643 the trace buffer. In case the user set a breakpoint in a signal
2644 handler, this avoids the backtrace showing the jump pad, etc..
2645 Most importantly, there are certain things we can't do safely if
2646 threads are stopped in a jump pad (or in its callee's). For
2647 example:
2648
2649 - starting a new trace run. A thread still collecting the
2650 previous run, could trash the trace buffer when resumed. The trace
2651 buffer control structures would have been reset but the thread had
2652 no way to tell. The thread could even midway memcpy'ing to the
2653 buffer, which would mean that when resumed, it would clobber the
2654 trace buffer that had been set for a new run.
2655
2656 - we can't rewrite/reuse the jump pads for new tracepoints
2657 safely. Say you do tstart while a thread is stopped midway while
2658 collecting. When the thread is later resumed, it finishes the
2659 collection, and returns to the jump pad, to execute the original
2660 instruction that was under the tracepoint jump at the time the
2661 older run had been started. If the jump pad had been rewritten
2662 since for something else in the new run, the thread would now
2663 execute the wrong / random instructions. */
2664
2665 static void
2666 linux_stabilize_threads (void)
2667 {
2668 struct thread_info *saved_thread;
2669 struct thread_info *thread_stuck;
2670
2671 thread_stuck
2672 = (struct thread_info *) find_inferior (&all_threads,
2673 stuck_in_jump_pad_callback,
2674 NULL);
2675 if (thread_stuck != NULL)
2676 {
2677 if (debug_threads)
2678 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2679 lwpid_of (thread_stuck));
2680 return;
2681 }
2682
2683 saved_thread = current_thread;
2684
2685 stabilizing_threads = 1;
2686
2687 /* Kick 'em all. */
2688 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2689
2690 /* Loop until all are stopped out of the jump pads. */
2691 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2692 {
2693 struct target_waitstatus ourstatus;
2694 struct lwp_info *lwp;
2695 int wstat;
2696
2697 /* Note that we go through the full wait even loop. While
2698 moving threads out of jump pad, we need to be able to step
2699 over internal breakpoints and such. */
2700 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2701
2702 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2703 {
2704 lwp = get_thread_lwp (current_thread);
2705
2706 /* Lock it. */
2707 lwp->suspended++;
2708
2709 if (ourstatus.value.sig != GDB_SIGNAL_0
2710 || current_thread->last_resume_kind == resume_stop)
2711 {
2712 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2713 enqueue_one_deferred_signal (lwp, &wstat);
2714 }
2715 }
2716 }
2717
2718 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2719
2720 stabilizing_threads = 0;
2721
2722 current_thread = saved_thread;
2723
2724 if (debug_threads)
2725 {
2726 thread_stuck
2727 = (struct thread_info *) find_inferior (&all_threads,
2728 stuck_in_jump_pad_callback,
2729 NULL);
2730 if (thread_stuck != NULL)
2731 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2732 lwpid_of (thread_stuck));
2733 }
2734 }
2735
2736 static void async_file_mark (void);
2737
2738 /* Convenience function that is called when the kernel reports an
2739 event that is not passed out to GDB. */
2740
2741 static ptid_t
2742 ignore_event (struct target_waitstatus *ourstatus)
2743 {
2744 /* If we got an event, there may still be others, as a single
2745 SIGCHLD can indicate more than one child stopped. This forces
2746 another target_wait call. */
2747 async_file_mark ();
2748
2749 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2750 return null_ptid;
2751 }
2752
2753 /* Return non-zero if WAITSTATUS reflects an extended linux
2754 event. Otherwise, return zero. */
2755
2756 static int
2757 extended_event_reported (const struct target_waitstatus *waitstatus)
2758 {
2759 if (waitstatus == NULL)
2760 return 0;
2761
2762 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2763 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2764 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2765 }
2766
2767 /* Wait for process, returns status. */
2768
2769 static ptid_t
2770 linux_wait_1 (ptid_t ptid,
2771 struct target_waitstatus *ourstatus, int target_options)
2772 {
2773 int w;
2774 struct lwp_info *event_child;
2775 int options;
2776 int pid;
2777 int step_over_finished;
2778 int bp_explains_trap;
2779 int maybe_internal_trap;
2780 int report_to_gdb;
2781 int trace_event;
2782 int in_step_range;
2783
2784 if (debug_threads)
2785 {
2786 debug_enter ();
2787 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2788 }
2789
2790 /* Translate generic target options into linux options. */
2791 options = __WALL;
2792 if (target_options & TARGET_WNOHANG)
2793 options |= WNOHANG;
2794
2795 bp_explains_trap = 0;
2796 trace_event = 0;
2797 in_step_range = 0;
2798 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2799
2800 if (ptid_equal (step_over_bkpt, null_ptid))
2801 pid = linux_wait_for_event (ptid, &w, options);
2802 else
2803 {
2804 if (debug_threads)
2805 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2806 target_pid_to_str (step_over_bkpt));
2807 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2808 }
2809
2810 if (pid == 0)
2811 {
2812 gdb_assert (target_options & TARGET_WNOHANG);
2813
2814 if (debug_threads)
2815 {
2816 debug_printf ("linux_wait_1 ret = null_ptid, "
2817 "TARGET_WAITKIND_IGNORE\n");
2818 debug_exit ();
2819 }
2820
2821 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2822 return null_ptid;
2823 }
2824 else if (pid == -1)
2825 {
2826 if (debug_threads)
2827 {
2828 debug_printf ("linux_wait_1 ret = null_ptid, "
2829 "TARGET_WAITKIND_NO_RESUMED\n");
2830 debug_exit ();
2831 }
2832
2833 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2834 return null_ptid;
2835 }
2836
2837 event_child = get_thread_lwp (current_thread);
2838
2839 /* linux_wait_for_event only returns an exit status for the last
2840 child of a process. Report it. */
2841 if (WIFEXITED (w) || WIFSIGNALED (w))
2842 {
2843 if (WIFEXITED (w))
2844 {
2845 ourstatus->kind = TARGET_WAITKIND_EXITED;
2846 ourstatus->value.integer = WEXITSTATUS (w);
2847
2848 if (debug_threads)
2849 {
2850 debug_printf ("linux_wait_1 ret = %s, exited with "
2851 "retcode %d\n",
2852 target_pid_to_str (ptid_of (current_thread)),
2853 WEXITSTATUS (w));
2854 debug_exit ();
2855 }
2856 }
2857 else
2858 {
2859 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2860 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2861
2862 if (debug_threads)
2863 {
2864 debug_printf ("linux_wait_1 ret = %s, terminated with "
2865 "signal %d\n",
2866 target_pid_to_str (ptid_of (current_thread)),
2867 WTERMSIG (w));
2868 debug_exit ();
2869 }
2870 }
2871
2872 return ptid_of (current_thread);
2873 }
2874
2875 /* If step-over executes a breakpoint instruction, it means a
2876 gdb/gdbserver breakpoint had been planted on top of a permanent
2877 breakpoint. The PC has been adjusted by
2878 check_stopped_by_breakpoint to point at the breakpoint address.
2879 Advance the PC manually past the breakpoint, otherwise the
2880 program would keep trapping the permanent breakpoint forever. */
2881 if (!ptid_equal (step_over_bkpt, null_ptid)
2882 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2883 {
2884 unsigned int increment_pc = the_low_target.breakpoint_len;
2885
2886 if (debug_threads)
2887 {
2888 debug_printf ("step-over for %s executed software breakpoint\n",
2889 target_pid_to_str (ptid_of (current_thread)));
2890 }
2891
2892 if (increment_pc != 0)
2893 {
2894 struct regcache *regcache
2895 = get_thread_regcache (current_thread, 1);
2896
2897 event_child->stop_pc += increment_pc;
2898 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2899
2900 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2901 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2902 }
2903 }
2904
2905 /* If this event was not handled before, and is not a SIGTRAP, we
2906 report it. SIGILL and SIGSEGV are also treated as traps in case
2907 a breakpoint is inserted at the current PC. If this target does
2908 not support internal breakpoints at all, we also report the
2909 SIGTRAP without further processing; it's of no concern to us. */
2910 maybe_internal_trap
2911 = (supports_breakpoints ()
2912 && (WSTOPSIG (w) == SIGTRAP
2913 || ((WSTOPSIG (w) == SIGILL
2914 || WSTOPSIG (w) == SIGSEGV)
2915 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2916
2917 if (maybe_internal_trap)
2918 {
2919 /* Handle anything that requires bookkeeping before deciding to
2920 report the event or continue waiting. */
2921
2922 /* First check if we can explain the SIGTRAP with an internal
2923 breakpoint, or if we should possibly report the event to GDB.
2924 Do this before anything that may remove or insert a
2925 breakpoint. */
2926 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2927
2928 /* We have a SIGTRAP, possibly a step-over dance has just
2929 finished. If so, tweak the state machine accordingly,
2930 reinsert breakpoints and delete any reinsert (software
2931 single-step) breakpoints. */
2932 step_over_finished = finish_step_over (event_child);
2933
2934 /* Now invoke the callbacks of any internal breakpoints there. */
2935 check_breakpoints (event_child->stop_pc);
2936
2937 /* Handle tracepoint data collecting. This may overflow the
2938 trace buffer, and cause a tracing stop, removing
2939 breakpoints. */
2940 trace_event = handle_tracepoints (event_child);
2941
2942 if (bp_explains_trap)
2943 {
2944 /* If we stepped or ran into an internal breakpoint, we've
2945 already handled it. So next time we resume (from this
2946 PC), we should step over it. */
2947 if (debug_threads)
2948 debug_printf ("Hit a gdbserver breakpoint.\n");
2949
2950 if (breakpoint_here (event_child->stop_pc))
2951 event_child->need_step_over = 1;
2952 }
2953 }
2954 else
2955 {
2956 /* We have some other signal, possibly a step-over dance was in
2957 progress, and it should be cancelled too. */
2958 step_over_finished = finish_step_over (event_child);
2959 }
2960
2961 /* We have all the data we need. Either report the event to GDB, or
2962 resume threads and keep waiting for more. */
2963
2964 /* If we're collecting a fast tracepoint, finish the collection and
2965 move out of the jump pad before delivering a signal. See
2966 linux_stabilize_threads. */
2967
2968 if (WIFSTOPPED (w)
2969 && WSTOPSIG (w) != SIGTRAP
2970 && supports_fast_tracepoints ()
2971 && agent_loaded_p ())
2972 {
2973 if (debug_threads)
2974 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2975 "to defer or adjust it.\n",
2976 WSTOPSIG (w), lwpid_of (current_thread));
2977
2978 /* Allow debugging the jump pad itself. */
2979 if (current_thread->last_resume_kind != resume_step
2980 && maybe_move_out_of_jump_pad (event_child, &w))
2981 {
2982 enqueue_one_deferred_signal (event_child, &w);
2983
2984 if (debug_threads)
2985 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2986 WSTOPSIG (w), lwpid_of (current_thread));
2987
2988 linux_resume_one_lwp (event_child, 0, 0, NULL);
2989
2990 return ignore_event (ourstatus);
2991 }
2992 }
2993
2994 if (event_child->collecting_fast_tracepoint)
2995 {
2996 if (debug_threads)
2997 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2998 "Check if we're already there.\n",
2999 lwpid_of (current_thread),
3000 event_child->collecting_fast_tracepoint);
3001
3002 trace_event = 1;
3003
3004 event_child->collecting_fast_tracepoint
3005 = linux_fast_tracepoint_collecting (event_child, NULL);
3006
3007 if (event_child->collecting_fast_tracepoint != 1)
3008 {
3009 /* No longer need this breakpoint. */
3010 if (event_child->exit_jump_pad_bkpt != NULL)
3011 {
3012 if (debug_threads)
3013 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3014 "stopping all threads momentarily.\n");
3015
3016 /* Other running threads could hit this breakpoint.
3017 We don't handle moribund locations like GDB does,
3018 instead we always pause all threads when removing
3019 breakpoints, so that any step-over or
3020 decr_pc_after_break adjustment is always taken
3021 care of while the breakpoint is still
3022 inserted. */
3023 stop_all_lwps (1, event_child);
3024
3025 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3026 event_child->exit_jump_pad_bkpt = NULL;
3027
3028 unstop_all_lwps (1, event_child);
3029
3030 gdb_assert (event_child->suspended >= 0);
3031 }
3032 }
3033
3034 if (event_child->collecting_fast_tracepoint == 0)
3035 {
3036 if (debug_threads)
3037 debug_printf ("fast tracepoint finished "
3038 "collecting successfully.\n");
3039
3040 /* We may have a deferred signal to report. */
3041 if (dequeue_one_deferred_signal (event_child, &w))
3042 {
3043 if (debug_threads)
3044 debug_printf ("dequeued one signal.\n");
3045 }
3046 else
3047 {
3048 if (debug_threads)
3049 debug_printf ("no deferred signals.\n");
3050
3051 if (stabilizing_threads)
3052 {
3053 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3054 ourstatus->value.sig = GDB_SIGNAL_0;
3055
3056 if (debug_threads)
3057 {
3058 debug_printf ("linux_wait_1 ret = %s, stopped "
3059 "while stabilizing threads\n",
3060 target_pid_to_str (ptid_of (current_thread)));
3061 debug_exit ();
3062 }
3063
3064 return ptid_of (current_thread);
3065 }
3066 }
3067 }
3068 }
3069
3070 /* Check whether GDB would be interested in this event. */
3071
3072 /* If GDB is not interested in this signal, don't stop other
3073 threads, and don't report it to GDB. Just resume the inferior
3074 right away. We do this for threading-related signals as well as
3075 any that GDB specifically requested we ignore. But never ignore
3076 SIGSTOP if we sent it ourselves, and do not ignore signals when
3077 stepping - they may require special handling to skip the signal
3078 handler. Also never ignore signals that could be caused by a
3079 breakpoint. */
3080 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3081 thread library? */
3082 if (WIFSTOPPED (w)
3083 && current_thread->last_resume_kind != resume_step
3084 && (
3085 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3086 (current_process ()->priv->thread_db != NULL
3087 && (WSTOPSIG (w) == __SIGRTMIN
3088 || WSTOPSIG (w) == __SIGRTMIN + 1))
3089 ||
3090 #endif
3091 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3092 && !(WSTOPSIG (w) == SIGSTOP
3093 && current_thread->last_resume_kind == resume_stop)
3094 && !linux_wstatus_maybe_breakpoint (w))))
3095 {
3096 siginfo_t info, *info_p;
3097
3098 if (debug_threads)
3099 debug_printf ("Ignored signal %d for LWP %ld.\n",
3100 WSTOPSIG (w), lwpid_of (current_thread));
3101
3102 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3103 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3104 info_p = &info;
3105 else
3106 info_p = NULL;
3107 linux_resume_one_lwp (event_child, event_child->stepping,
3108 WSTOPSIG (w), info_p);
3109 return ignore_event (ourstatus);
3110 }
3111
3112 /* Note that all addresses are always "out of the step range" when
3113 there's no range to begin with. */
3114 in_step_range = lwp_in_step_range (event_child);
3115
3116 /* If GDB wanted this thread to single step, and the thread is out
3117 of the step range, we always want to report the SIGTRAP, and let
3118 GDB handle it. Watchpoints should always be reported. So should
3119 signals we can't explain. A SIGTRAP we can't explain could be a
3120 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3121 do, we're be able to handle GDB breakpoints on top of internal
3122 breakpoints, by handling the internal breakpoint and still
3123 reporting the event to GDB. If we don't, we're out of luck, GDB
3124 won't see the breakpoint hit. */
3125 report_to_gdb = (!maybe_internal_trap
3126 || (current_thread->last_resume_kind == resume_step
3127 && !in_step_range)
3128 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3129 || (!step_over_finished && !in_step_range
3130 && !bp_explains_trap && !trace_event)
3131 || (gdb_breakpoint_here (event_child->stop_pc)
3132 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3133 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3134 || extended_event_reported (&event_child->waitstatus));
3135
3136 run_breakpoint_commands (event_child->stop_pc);
3137
3138 /* We found no reason GDB would want us to stop. We either hit one
3139 of our own breakpoints, or finished an internal step GDB
3140 shouldn't know about. */
3141 if (!report_to_gdb)
3142 {
3143 if (debug_threads)
3144 {
3145 if (bp_explains_trap)
3146 debug_printf ("Hit a gdbserver breakpoint.\n");
3147 if (step_over_finished)
3148 debug_printf ("Step-over finished.\n");
3149 if (trace_event)
3150 debug_printf ("Tracepoint event.\n");
3151 if (lwp_in_step_range (event_child))
3152 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3153 paddress (event_child->stop_pc),
3154 paddress (event_child->step_range_start),
3155 paddress (event_child->step_range_end));
3156 if (extended_event_reported (&event_child->waitstatus))
3157 {
3158 char *str = target_waitstatus_to_string (ourstatus);
3159 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3160 lwpid_of (get_lwp_thread (event_child)), str);
3161 xfree (str);
3162 }
3163 }
3164
3165 /* We're not reporting this breakpoint to GDB, so apply the
3166 decr_pc_after_break adjustment to the inferior's regcache
3167 ourselves. */
3168
3169 if (the_low_target.set_pc != NULL)
3170 {
3171 struct regcache *regcache
3172 = get_thread_regcache (current_thread, 1);
3173 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3174 }
3175
3176 /* We may have finished stepping over a breakpoint. If so,
3177 we've stopped and suspended all LWPs momentarily except the
3178 stepping one. This is where we resume them all again. We're
3179 going to keep waiting, so use proceed, which handles stepping
3180 over the next breakpoint. */
3181 if (debug_threads)
3182 debug_printf ("proceeding all threads.\n");
3183
3184 if (step_over_finished)
3185 unsuspend_all_lwps (event_child);
3186
3187 proceed_all_lwps ();
3188 return ignore_event (ourstatus);
3189 }
3190
3191 if (debug_threads)
3192 {
3193 if (current_thread->last_resume_kind == resume_step)
3194 {
3195 if (event_child->step_range_start == event_child->step_range_end)
3196 debug_printf ("GDB wanted to single-step, reporting event.\n");
3197 else if (!lwp_in_step_range (event_child))
3198 debug_printf ("Out of step range, reporting event.\n");
3199 }
3200 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3201 debug_printf ("Stopped by watchpoint.\n");
3202 else if (gdb_breakpoint_here (event_child->stop_pc))
3203 debug_printf ("Stopped by GDB breakpoint.\n");
3204 if (debug_threads)
3205 debug_printf ("Hit a non-gdbserver trap event.\n");
3206 }
3207
3208 /* Alright, we're going to report a stop. */
3209
3210 if (!stabilizing_threads)
3211 {
3212 /* In all-stop, stop all threads. */
3213 if (!non_stop)
3214 stop_all_lwps (0, NULL);
3215
3216 /* If we're not waiting for a specific LWP, choose an event LWP
3217 from among those that have had events. Giving equal priority
3218 to all LWPs that have had events helps prevent
3219 starvation. */
3220 if (ptid_equal (ptid, minus_one_ptid))
3221 {
3222 event_child->status_pending_p = 1;
3223 event_child->status_pending = w;
3224
3225 select_event_lwp (&event_child);
3226
3227 /* current_thread and event_child must stay in sync. */
3228 current_thread = get_lwp_thread (event_child);
3229
3230 event_child->status_pending_p = 0;
3231 w = event_child->status_pending;
3232 }
3233
3234 if (step_over_finished)
3235 {
3236 if (!non_stop)
3237 {
3238 /* If we were doing a step-over, all other threads but
3239 the stepping one had been paused in start_step_over,
3240 with their suspend counts incremented. We don't want
3241 to do a full unstop/unpause, because we're in
3242 all-stop mode (so we want threads stopped), but we
3243 still need to unsuspend the other threads, to
3244 decrement their `suspended' count back. */
3245 unsuspend_all_lwps (event_child);
3246 }
3247 else
3248 {
3249 /* If we just finished a step-over, then all threads had
3250 been momentarily paused. In all-stop, that's fine,
3251 we want threads stopped by now anyway. In non-stop,
3252 we need to re-resume threads that GDB wanted to be
3253 running. */
3254 unstop_all_lwps (1, event_child);
3255 }
3256 }
3257
3258 /* Stabilize threads (move out of jump pads). */
3259 if (!non_stop)
3260 stabilize_threads ();
3261 }
3262 else
3263 {
3264 /* If we just finished a step-over, then all threads had been
3265 momentarily paused. In all-stop, that's fine, we want
3266 threads stopped by now anyway. In non-stop, we need to
3267 re-resume threads that GDB wanted to be running. */
3268 if (step_over_finished)
3269 unstop_all_lwps (1, event_child);
3270 }
3271
3272 if (extended_event_reported (&event_child->waitstatus))
3273 {
3274 /* If the reported event is a fork, vfork or exec, let GDB know. */
3275 ourstatus->kind = event_child->waitstatus.kind;
3276 ourstatus->value = event_child->waitstatus.value;
3277
3278 /* Clear the event lwp's waitstatus since we handled it already. */
3279 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3280 }
3281 else
3282 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3283
3284 /* Now that we've selected our final event LWP, un-adjust its PC if
3285 it was a software breakpoint, and the client doesn't know we can
3286 adjust the breakpoint ourselves. */
3287 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3288 && !swbreak_feature)
3289 {
3290 int decr_pc = the_low_target.decr_pc_after_break;
3291
3292 if (decr_pc != 0)
3293 {
3294 struct regcache *regcache
3295 = get_thread_regcache (current_thread, 1);
3296 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3297 }
3298 }
3299
3300 if (current_thread->last_resume_kind == resume_stop
3301 && WSTOPSIG (w) == SIGSTOP)
3302 {
3303 /* A thread that has been requested to stop by GDB with vCont;t,
3304 and it stopped cleanly, so report as SIG0. The use of
3305 SIGSTOP is an implementation detail. */
3306 ourstatus->value.sig = GDB_SIGNAL_0;
3307 }
3308 else if (current_thread->last_resume_kind == resume_stop
3309 && WSTOPSIG (w) != SIGSTOP)
3310 {
3311 /* A thread that has been requested to stop by GDB with vCont;t,
3312 but, it stopped for other reasons. */
3313 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3314 }
3315 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3316 {
3317 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3318 }
3319
3320 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3321
3322 if (debug_threads)
3323 {
3324 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3325 target_pid_to_str (ptid_of (current_thread)),
3326 ourstatus->kind, ourstatus->value.sig);
3327 debug_exit ();
3328 }
3329
3330 return ptid_of (current_thread);
3331 }
3332
3333 /* Get rid of any pending event in the pipe. */
3334 static void
3335 async_file_flush (void)
3336 {
3337 int ret;
3338 char buf;
3339
3340 do
3341 ret = read (linux_event_pipe[0], &buf, 1);
3342 while (ret >= 0 || (ret == -1 && errno == EINTR));
3343 }
3344
3345 /* Put something in the pipe, so the event loop wakes up. */
3346 static void
3347 async_file_mark (void)
3348 {
3349 int ret;
3350
3351 async_file_flush ();
3352
3353 do
3354 ret = write (linux_event_pipe[1], "+", 1);
3355 while (ret == 0 || (ret == -1 && errno == EINTR));
3356
3357 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3358 be awakened anyway. */
3359 }
3360
3361 static ptid_t
3362 linux_wait (ptid_t ptid,
3363 struct target_waitstatus *ourstatus, int target_options)
3364 {
3365 ptid_t event_ptid;
3366
3367 /* Flush the async file first. */
3368 if (target_is_async_p ())
3369 async_file_flush ();
3370
3371 do
3372 {
3373 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3374 }
3375 while ((target_options & TARGET_WNOHANG) == 0
3376 && ptid_equal (event_ptid, null_ptid)
3377 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3378
3379 /* If at least one stop was reported, there may be more. A single
3380 SIGCHLD can signal more than one child stop. */
3381 if (target_is_async_p ()
3382 && (target_options & TARGET_WNOHANG) != 0
3383 && !ptid_equal (event_ptid, null_ptid))
3384 async_file_mark ();
3385
3386 return event_ptid;
3387 }
3388
3389 /* Send a signal to an LWP. */
3390
3391 static int
3392 kill_lwp (unsigned long lwpid, int signo)
3393 {
3394 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3395 fails, then we are not using nptl threads and we should be using kill. */
3396
3397 #ifdef __NR_tkill
3398 {
3399 static int tkill_failed;
3400
3401 if (!tkill_failed)
3402 {
3403 int ret;
3404
3405 errno = 0;
3406 ret = syscall (__NR_tkill, lwpid, signo);
3407 if (errno != ENOSYS)
3408 return ret;
3409 tkill_failed = 1;
3410 }
3411 }
3412 #endif
3413
3414 return kill (lwpid, signo);
3415 }
3416
3417 void
3418 linux_stop_lwp (struct lwp_info *lwp)
3419 {
3420 send_sigstop (lwp);
3421 }
3422
3423 static void
3424 send_sigstop (struct lwp_info *lwp)
3425 {
3426 int pid;
3427
3428 pid = lwpid_of (get_lwp_thread (lwp));
3429
3430 /* If we already have a pending stop signal for this process, don't
3431 send another. */
3432 if (lwp->stop_expected)
3433 {
3434 if (debug_threads)
3435 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3436
3437 return;
3438 }
3439
3440 if (debug_threads)
3441 debug_printf ("Sending sigstop to lwp %d\n", pid);
3442
3443 lwp->stop_expected = 1;
3444 kill_lwp (pid, SIGSTOP);
3445 }
3446
3447 static int
3448 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3449 {
3450 struct thread_info *thread = (struct thread_info *) entry;
3451 struct lwp_info *lwp = get_thread_lwp (thread);
3452
3453 /* Ignore EXCEPT. */
3454 if (lwp == except)
3455 return 0;
3456
3457 if (lwp->stopped)
3458 return 0;
3459
3460 send_sigstop (lwp);
3461 return 0;
3462 }
3463
3464 /* Increment the suspend count of an LWP, and stop it, if not stopped
3465 yet. */
3466 static int
3467 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3468 void *except)
3469 {
3470 struct thread_info *thread = (struct thread_info *) entry;
3471 struct lwp_info *lwp = get_thread_lwp (thread);
3472
3473 /* Ignore EXCEPT. */
3474 if (lwp == except)
3475 return 0;
3476
3477 lwp->suspended++;
3478
3479 return send_sigstop_callback (entry, except);
3480 }
3481
3482 static void
3483 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3484 {
3485 /* It's dead, really. */
3486 lwp->dead = 1;
3487
3488 /* Store the exit status for later. */
3489 lwp->status_pending_p = 1;
3490 lwp->status_pending = wstat;
3491
3492 /* Prevent trying to stop it. */
3493 lwp->stopped = 1;
3494
3495 /* No further stops are expected from a dead lwp. */
3496 lwp->stop_expected = 0;
3497 }
3498
3499 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3500
3501 static void
3502 wait_for_sigstop (void)
3503 {
3504 struct thread_info *saved_thread;
3505 ptid_t saved_tid;
3506 int wstat;
3507 int ret;
3508
3509 saved_thread = current_thread;
3510 if (saved_thread != NULL)
3511 saved_tid = saved_thread->entry.id;
3512 else
3513 saved_tid = null_ptid; /* avoid bogus unused warning */
3514
3515 if (debug_threads)
3516 debug_printf ("wait_for_sigstop: pulling events\n");
3517
3518 /* Passing NULL_PTID as filter indicates we want all events to be
3519 left pending. Eventually this returns when there are no
3520 unwaited-for children left. */
3521 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3522 &wstat, __WALL);
3523 gdb_assert (ret == -1);
3524
3525 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3526 current_thread = saved_thread;
3527 else
3528 {
3529 if (debug_threads)
3530 debug_printf ("Previously current thread died.\n");
3531
3532 if (non_stop)
3533 {
3534 /* We can't change the current inferior behind GDB's back,
3535 otherwise, a subsequent command may apply to the wrong
3536 process. */
3537 current_thread = NULL;
3538 }
3539 else
3540 {
3541 /* Set a valid thread as current. */
3542 set_desired_thread (0);
3543 }
3544 }
3545 }
3546
3547 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3548 move it out, because we need to report the stop event to GDB. For
3549 example, if the user puts a breakpoint in the jump pad, it's
3550 because she wants to debug it. */
3551
3552 static int
3553 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3554 {
3555 struct thread_info *thread = (struct thread_info *) entry;
3556 struct lwp_info *lwp = get_thread_lwp (thread);
3557
3558 gdb_assert (lwp->suspended == 0);
3559 gdb_assert (lwp->stopped);
3560
3561 /* Allow debugging the jump pad, gdb_collect, etc.. */
3562 return (supports_fast_tracepoints ()
3563 && agent_loaded_p ()
3564 && (gdb_breakpoint_here (lwp->stop_pc)
3565 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3566 || thread->last_resume_kind == resume_step)
3567 && linux_fast_tracepoint_collecting (lwp, NULL));
3568 }
3569
3570 static void
3571 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3572 {
3573 struct thread_info *thread = (struct thread_info *) entry;
3574 struct lwp_info *lwp = get_thread_lwp (thread);
3575 int *wstat;
3576
3577 gdb_assert (lwp->suspended == 0);
3578 gdb_assert (lwp->stopped);
3579
3580 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3581
3582 /* Allow debugging the jump pad, gdb_collect, etc. */
3583 if (!gdb_breakpoint_here (lwp->stop_pc)
3584 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3585 && thread->last_resume_kind != resume_step
3586 && maybe_move_out_of_jump_pad (lwp, wstat))
3587 {
3588 if (debug_threads)
3589 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3590 lwpid_of (thread));
3591
3592 if (wstat)
3593 {
3594 lwp->status_pending_p = 0;
3595 enqueue_one_deferred_signal (lwp, wstat);
3596
3597 if (debug_threads)
3598 debug_printf ("Signal %d for LWP %ld deferred "
3599 "(in jump pad)\n",
3600 WSTOPSIG (*wstat), lwpid_of (thread));
3601 }
3602
3603 linux_resume_one_lwp (lwp, 0, 0, NULL);
3604 }
3605 else
3606 lwp->suspended++;
3607 }
3608
3609 static int
3610 lwp_running (struct inferior_list_entry *entry, void *data)
3611 {
3612 struct thread_info *thread = (struct thread_info *) entry;
3613 struct lwp_info *lwp = get_thread_lwp (thread);
3614
3615 if (lwp->dead)
3616 return 0;
3617 if (lwp->stopped)
3618 return 0;
3619 return 1;
3620 }
3621
3622 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3623 If SUSPEND, then also increase the suspend count of every LWP,
3624 except EXCEPT. */
3625
3626 static void
3627 stop_all_lwps (int suspend, struct lwp_info *except)
3628 {
3629 /* Should not be called recursively. */
3630 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3631
3632 if (debug_threads)
3633 {
3634 debug_enter ();
3635 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3636 suspend ? "stop-and-suspend" : "stop",
3637 except != NULL
3638 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3639 : "none");
3640 }
3641
3642 stopping_threads = (suspend
3643 ? STOPPING_AND_SUSPENDING_THREADS
3644 : STOPPING_THREADS);
3645
3646 if (suspend)
3647 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3648 else
3649 find_inferior (&all_threads, send_sigstop_callback, except);
3650 wait_for_sigstop ();
3651 stopping_threads = NOT_STOPPING_THREADS;
3652
3653 if (debug_threads)
3654 {
3655 debug_printf ("stop_all_lwps done, setting stopping_threads "
3656 "back to !stopping\n");
3657 debug_exit ();
3658 }
3659 }
3660
3661 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3662 SIGNAL is nonzero, give it that signal. */
3663
3664 static void
3665 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3666 int step, int signal, siginfo_t *info)
3667 {
3668 struct thread_info *thread = get_lwp_thread (lwp);
3669 struct thread_info *saved_thread;
3670 int fast_tp_collecting;
3671 struct process_info *proc = get_thread_process (thread);
3672
3673 /* Note that target description may not be initialised
3674 (proc->tdesc == NULL) at this point because the program hasn't
3675 stopped at the first instruction yet. It means GDBserver skips
3676 the extra traps from the wrapper program (see option --wrapper).
3677 Code in this function that requires register access should be
3678 guarded by proc->tdesc == NULL or something else. */
3679
3680 if (lwp->stopped == 0)
3681 return;
3682
3683 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3684
3685 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3686
3687 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3688 user used the "jump" command, or "set $pc = foo"). */
3689 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3690 {
3691 /* Collecting 'while-stepping' actions doesn't make sense
3692 anymore. */
3693 release_while_stepping_state_list (thread);
3694 }
3695
3696 /* If we have pending signals or status, and a new signal, enqueue the
3697 signal. Also enqueue the signal if we are waiting to reinsert a
3698 breakpoint; it will be picked up again below. */
3699 if (signal != 0
3700 && (lwp->status_pending_p
3701 || lwp->pending_signals != NULL
3702 || lwp->bp_reinsert != 0
3703 || fast_tp_collecting))
3704 {
3705 struct pending_signals *p_sig;
3706 p_sig = xmalloc (sizeof (*p_sig));
3707 p_sig->prev = lwp->pending_signals;
3708 p_sig->signal = signal;
3709 if (info == NULL)
3710 memset (&p_sig->info, 0, sizeof (siginfo_t));
3711 else
3712 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3713 lwp->pending_signals = p_sig;
3714 }
3715
3716 if (lwp->status_pending_p)
3717 {
3718 if (debug_threads)
3719 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3720 " has pending status\n",
3721 lwpid_of (thread), step ? "step" : "continue", signal,
3722 lwp->stop_expected ? "expected" : "not expected");
3723 return;
3724 }
3725
3726 saved_thread = current_thread;
3727 current_thread = thread;
3728
3729 if (debug_threads)
3730 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3731 lwpid_of (thread), step ? "step" : "continue", signal,
3732 lwp->stop_expected ? "expected" : "not expected");
3733
3734 /* This bit needs some thinking about. If we get a signal that
3735 we must report while a single-step reinsert is still pending,
3736 we often end up resuming the thread. It might be better to
3737 (ew) allow a stack of pending events; then we could be sure that
3738 the reinsert happened right away and not lose any signals.
3739
3740 Making this stack would also shrink the window in which breakpoints are
3741 uninserted (see comment in linux_wait_for_lwp) but not enough for
3742 complete correctness, so it won't solve that problem. It may be
3743 worthwhile just to solve this one, however. */
3744 if (lwp->bp_reinsert != 0)
3745 {
3746 if (debug_threads)
3747 debug_printf (" pending reinsert at 0x%s\n",
3748 paddress (lwp->bp_reinsert));
3749
3750 if (can_hardware_single_step ())
3751 {
3752 if (fast_tp_collecting == 0)
3753 {
3754 if (step == 0)
3755 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3756 if (lwp->suspended)
3757 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3758 lwp->suspended);
3759 }
3760
3761 step = 1;
3762 }
3763
3764 /* Postpone any pending signal. It was enqueued above. */
3765 signal = 0;
3766 }
3767
3768 if (fast_tp_collecting == 1)
3769 {
3770 if (debug_threads)
3771 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3772 " (exit-jump-pad-bkpt)\n",
3773 lwpid_of (thread));
3774
3775 /* Postpone any pending signal. It was enqueued above. */
3776 signal = 0;
3777 }
3778 else if (fast_tp_collecting == 2)
3779 {
3780 if (debug_threads)
3781 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3782 " single-stepping\n",
3783 lwpid_of (thread));
3784
3785 if (can_hardware_single_step ())
3786 step = 1;
3787 else
3788 {
3789 internal_error (__FILE__, __LINE__,
3790 "moving out of jump pad single-stepping"
3791 " not implemented on this target");
3792 }
3793
3794 /* Postpone any pending signal. It was enqueued above. */
3795 signal = 0;
3796 }
3797
3798 /* If we have while-stepping actions in this thread set it stepping.
3799 If we have a signal to deliver, it may or may not be set to
3800 SIG_IGN, we don't know. Assume so, and allow collecting
3801 while-stepping into a signal handler. A possible smart thing to
3802 do would be to set an internal breakpoint at the signal return
3803 address, continue, and carry on catching this while-stepping
3804 action only when that breakpoint is hit. A future
3805 enhancement. */
3806 if (thread->while_stepping != NULL
3807 && can_hardware_single_step ())
3808 {
3809 if (debug_threads)
3810 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3811 lwpid_of (thread));
3812 step = 1;
3813 }
3814
3815 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
3816 {
3817 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3818
3819 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3820
3821 if (debug_threads)
3822 {
3823 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3824 (long) lwp->stop_pc);
3825 }
3826 }
3827
3828 /* If we have pending signals, consume one unless we are trying to
3829 reinsert a breakpoint or we're trying to finish a fast tracepoint
3830 collect. */
3831 if (lwp->pending_signals != NULL
3832 && lwp->bp_reinsert == 0
3833 && fast_tp_collecting == 0)
3834 {
3835 struct pending_signals **p_sig;
3836
3837 p_sig = &lwp->pending_signals;
3838 while ((*p_sig)->prev != NULL)
3839 p_sig = &(*p_sig)->prev;
3840
3841 signal = (*p_sig)->signal;
3842 if ((*p_sig)->info.si_signo != 0)
3843 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3844 &(*p_sig)->info);
3845
3846 free (*p_sig);
3847 *p_sig = NULL;
3848 }
3849
3850 if (the_low_target.prepare_to_resume != NULL)
3851 the_low_target.prepare_to_resume (lwp);
3852
3853 regcache_invalidate_thread (thread);
3854 errno = 0;
3855 lwp->stepping = step;
3856 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3857 (PTRACE_TYPE_ARG3) 0,
3858 /* Coerce to a uintptr_t first to avoid potential gcc warning
3859 of coercing an 8 byte integer to a 4 byte pointer. */
3860 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3861
3862 current_thread = saved_thread;
3863 if (errno)
3864 perror_with_name ("resuming thread");
3865
3866 /* Successfully resumed. Clear state that no longer makes sense,
3867 and mark the LWP as running. Must not do this before resuming
3868 otherwise if that fails other code will be confused. E.g., we'd
3869 later try to stop the LWP and hang forever waiting for a stop
3870 status. Note that we must not throw after this is cleared,
3871 otherwise handle_zombie_lwp_error would get confused. */
3872 lwp->stopped = 0;
3873 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3874 }
3875
3876 /* Called when we try to resume a stopped LWP and that errors out. If
3877 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3878 or about to become), discard the error, clear any pending status
3879 the LWP may have, and return true (we'll collect the exit status
3880 soon enough). Otherwise, return false. */
3881
3882 static int
3883 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3884 {
3885 struct thread_info *thread = get_lwp_thread (lp);
3886
3887 /* If we get an error after resuming the LWP successfully, we'd
3888 confuse !T state for the LWP being gone. */
3889 gdb_assert (lp->stopped);
3890
3891 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3892 because even if ptrace failed with ESRCH, the tracee may be "not
3893 yet fully dead", but already refusing ptrace requests. In that
3894 case the tracee has 'R (Running)' state for a little bit
3895 (observed in Linux 3.18). See also the note on ESRCH in the
3896 ptrace(2) man page. Instead, check whether the LWP has any state
3897 other than ptrace-stopped. */
3898
3899 /* Don't assume anything if /proc/PID/status can't be read. */
3900 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3901 {
3902 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3903 lp->status_pending_p = 0;
3904 return 1;
3905 }
3906 return 0;
3907 }
3908
3909 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3910 disappears while we try to resume it. */
3911
3912 static void
3913 linux_resume_one_lwp (struct lwp_info *lwp,
3914 int step, int signal, siginfo_t *info)
3915 {
3916 TRY
3917 {
3918 linux_resume_one_lwp_throw (lwp, step, signal, info);
3919 }
3920 CATCH (ex, RETURN_MASK_ERROR)
3921 {
3922 if (!check_ptrace_stopped_lwp_gone (lwp))
3923 throw_exception (ex);
3924 }
3925 END_CATCH
3926 }
3927
3928 struct thread_resume_array
3929 {
3930 struct thread_resume *resume;
3931 size_t n;
3932 };
3933
3934 /* This function is called once per thread via find_inferior.
3935 ARG is a pointer to a thread_resume_array struct.
3936 We look up the thread specified by ENTRY in ARG, and mark the thread
3937 with a pointer to the appropriate resume request.
3938
3939 This algorithm is O(threads * resume elements), but resume elements
3940 is small (and will remain small at least until GDB supports thread
3941 suspension). */
3942
3943 static int
3944 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3945 {
3946 struct thread_info *thread = (struct thread_info *) entry;
3947 struct lwp_info *lwp = get_thread_lwp (thread);
3948 int ndx;
3949 struct thread_resume_array *r;
3950
3951 r = arg;
3952
3953 for (ndx = 0; ndx < r->n; ndx++)
3954 {
3955 ptid_t ptid = r->resume[ndx].thread;
3956 if (ptid_equal (ptid, minus_one_ptid)
3957 || ptid_equal (ptid, entry->id)
3958 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3959 of PID'. */
3960 || (ptid_get_pid (ptid) == pid_of (thread)
3961 && (ptid_is_pid (ptid)
3962 || ptid_get_lwp (ptid) == -1)))
3963 {
3964 if (r->resume[ndx].kind == resume_stop
3965 && thread->last_resume_kind == resume_stop)
3966 {
3967 if (debug_threads)
3968 debug_printf ("already %s LWP %ld at GDB's request\n",
3969 (thread->last_status.kind
3970 == TARGET_WAITKIND_STOPPED)
3971 ? "stopped"
3972 : "stopping",
3973 lwpid_of (thread));
3974
3975 continue;
3976 }
3977
3978 lwp->resume = &r->resume[ndx];
3979 thread->last_resume_kind = lwp->resume->kind;
3980
3981 lwp->step_range_start = lwp->resume->step_range_start;
3982 lwp->step_range_end = lwp->resume->step_range_end;
3983
3984 /* If we had a deferred signal to report, dequeue one now.
3985 This can happen if LWP gets more than one signal while
3986 trying to get out of a jump pad. */
3987 if (lwp->stopped
3988 && !lwp->status_pending_p
3989 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3990 {
3991 lwp->status_pending_p = 1;
3992
3993 if (debug_threads)
3994 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3995 "leaving status pending.\n",
3996 WSTOPSIG (lwp->status_pending),
3997 lwpid_of (thread));
3998 }
3999
4000 return 0;
4001 }
4002 }
4003
4004 /* No resume action for this thread. */
4005 lwp->resume = NULL;
4006
4007 return 0;
4008 }
4009
4010 /* find_inferior callback for linux_resume.
4011 Set *FLAG_P if this lwp has an interesting status pending. */
4012
4013 static int
4014 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4015 {
4016 struct thread_info *thread = (struct thread_info *) entry;
4017 struct lwp_info *lwp = get_thread_lwp (thread);
4018
4019 /* LWPs which will not be resumed are not interesting, because
4020 we might not wait for them next time through linux_wait. */
4021 if (lwp->resume == NULL)
4022 return 0;
4023
4024 if (thread_still_has_status_pending_p (thread))
4025 * (int *) flag_p = 1;
4026
4027 return 0;
4028 }
4029
4030 /* Return 1 if this lwp that GDB wants running is stopped at an
4031 internal breakpoint that we need to step over. It assumes that any
4032 required STOP_PC adjustment has already been propagated to the
4033 inferior's regcache. */
4034
4035 static int
4036 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4037 {
4038 struct thread_info *thread = (struct thread_info *) entry;
4039 struct lwp_info *lwp = get_thread_lwp (thread);
4040 struct thread_info *saved_thread;
4041 CORE_ADDR pc;
4042 struct process_info *proc = get_thread_process (thread);
4043
4044 /* GDBserver is skipping the extra traps from the wrapper program,
4045 don't have to do step over. */
4046 if (proc->tdesc == NULL)
4047 return 0;
4048
4049 /* LWPs which will not be resumed are not interesting, because we
4050 might not wait for them next time through linux_wait. */
4051
4052 if (!lwp->stopped)
4053 {
4054 if (debug_threads)
4055 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4056 lwpid_of (thread));
4057 return 0;
4058 }
4059
4060 if (thread->last_resume_kind == resume_stop)
4061 {
4062 if (debug_threads)
4063 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4064 " stopped\n",
4065 lwpid_of (thread));
4066 return 0;
4067 }
4068
4069 gdb_assert (lwp->suspended >= 0);
4070
4071 if (lwp->suspended)
4072 {
4073 if (debug_threads)
4074 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4075 lwpid_of (thread));
4076 return 0;
4077 }
4078
4079 if (!lwp->need_step_over)
4080 {
4081 if (debug_threads)
4082 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4083 }
4084
4085 if (lwp->status_pending_p)
4086 {
4087 if (debug_threads)
4088 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4089 " status.\n",
4090 lwpid_of (thread));
4091 return 0;
4092 }
4093
4094 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4095 or we have. */
4096 pc = get_pc (lwp);
4097
4098 /* If the PC has changed since we stopped, then don't do anything,
4099 and let the breakpoint/tracepoint be hit. This happens if, for
4100 instance, GDB handled the decr_pc_after_break subtraction itself,
4101 GDB is OOL stepping this thread, or the user has issued a "jump"
4102 command, or poked thread's registers herself. */
4103 if (pc != lwp->stop_pc)
4104 {
4105 if (debug_threads)
4106 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4107 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4108 lwpid_of (thread),
4109 paddress (lwp->stop_pc), paddress (pc));
4110
4111 lwp->need_step_over = 0;
4112 return 0;
4113 }
4114
4115 saved_thread = current_thread;
4116 current_thread = thread;
4117
4118 /* We can only step over breakpoints we know about. */
4119 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4120 {
4121 /* Don't step over a breakpoint that GDB expects to hit
4122 though. If the condition is being evaluated on the target's side
4123 and it evaluate to false, step over this breakpoint as well. */
4124 if (gdb_breakpoint_here (pc)
4125 && gdb_condition_true_at_breakpoint (pc)
4126 && gdb_no_commands_at_breakpoint (pc))
4127 {
4128 if (debug_threads)
4129 debug_printf ("Need step over [LWP %ld]? yes, but found"
4130 " GDB breakpoint at 0x%s; skipping step over\n",
4131 lwpid_of (thread), paddress (pc));
4132
4133 current_thread = saved_thread;
4134 return 0;
4135 }
4136 else
4137 {
4138 if (debug_threads)
4139 debug_printf ("Need step over [LWP %ld]? yes, "
4140 "found breakpoint at 0x%s\n",
4141 lwpid_of (thread), paddress (pc));
4142
4143 /* We've found an lwp that needs stepping over --- return 1 so
4144 that find_inferior stops looking. */
4145 current_thread = saved_thread;
4146
4147 /* If the step over is cancelled, this is set again. */
4148 lwp->need_step_over = 0;
4149 return 1;
4150 }
4151 }
4152
4153 current_thread = saved_thread;
4154
4155 if (debug_threads)
4156 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4157 " at 0x%s\n",
4158 lwpid_of (thread), paddress (pc));
4159
4160 return 0;
4161 }
4162
4163 /* Start a step-over operation on LWP. When LWP stopped at a
4164 breakpoint, to make progress, we need to remove the breakpoint out
4165 of the way. If we let other threads run while we do that, they may
4166 pass by the breakpoint location and miss hitting it. To avoid
4167 that, a step-over momentarily stops all threads while LWP is
4168 single-stepped while the breakpoint is temporarily uninserted from
4169 the inferior. When the single-step finishes, we reinsert the
4170 breakpoint, and let all threads that are supposed to be running,
4171 run again.
4172
4173 On targets that don't support hardware single-step, we don't
4174 currently support full software single-stepping. Instead, we only
4175 support stepping over the thread event breakpoint, by asking the
4176 low target where to place a reinsert breakpoint. Since this
4177 routine assumes the breakpoint being stepped over is a thread event
4178 breakpoint, it usually assumes the return address of the current
4179 function is a good enough place to set the reinsert breakpoint. */
4180
4181 static int
4182 start_step_over (struct lwp_info *lwp)
4183 {
4184 struct thread_info *thread = get_lwp_thread (lwp);
4185 struct thread_info *saved_thread;
4186 CORE_ADDR pc;
4187 int step;
4188
4189 if (debug_threads)
4190 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4191 lwpid_of (thread));
4192
4193 stop_all_lwps (1, lwp);
4194 gdb_assert (lwp->suspended == 0);
4195
4196 if (debug_threads)
4197 debug_printf ("Done stopping all threads for step-over.\n");
4198
4199 /* Note, we should always reach here with an already adjusted PC,
4200 either by GDB (if we're resuming due to GDB's request), or by our
4201 caller, if we just finished handling an internal breakpoint GDB
4202 shouldn't care about. */
4203 pc = get_pc (lwp);
4204
4205 saved_thread = current_thread;
4206 current_thread = thread;
4207
4208 lwp->bp_reinsert = pc;
4209 uninsert_breakpoints_at (pc);
4210 uninsert_fast_tracepoint_jumps_at (pc);
4211
4212 if (can_hardware_single_step ())
4213 {
4214 step = 1;
4215 }
4216 else
4217 {
4218 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4219 set_reinsert_breakpoint (raddr);
4220 step = 0;
4221 }
4222
4223 current_thread = saved_thread;
4224
4225 linux_resume_one_lwp (lwp, step, 0, NULL);
4226
4227 /* Require next event from this LWP. */
4228 step_over_bkpt = thread->entry.id;
4229 return 1;
4230 }
4231
4232 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4233 start_step_over, if still there, and delete any reinsert
4234 breakpoints we've set, on non hardware single-step targets. */
4235
4236 static int
4237 finish_step_over (struct lwp_info *lwp)
4238 {
4239 if (lwp->bp_reinsert != 0)
4240 {
4241 if (debug_threads)
4242 debug_printf ("Finished step over.\n");
4243
4244 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4245 may be no breakpoint to reinsert there by now. */
4246 reinsert_breakpoints_at (lwp->bp_reinsert);
4247 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4248
4249 lwp->bp_reinsert = 0;
4250
4251 /* Delete any software-single-step reinsert breakpoints. No
4252 longer needed. We don't have to worry about other threads
4253 hitting this trap, and later not being able to explain it,
4254 because we were stepping over a breakpoint, and we hold all
4255 threads but LWP stopped while doing that. */
4256 if (!can_hardware_single_step ())
4257 delete_reinsert_breakpoints ();
4258
4259 step_over_bkpt = null_ptid;
4260 return 1;
4261 }
4262 else
4263 return 0;
4264 }
4265
4266 /* This function is called once per thread. We check the thread's resume
4267 request, which will tell us whether to resume, step, or leave the thread
4268 stopped; and what signal, if any, it should be sent.
4269
4270 For threads which we aren't explicitly told otherwise, we preserve
4271 the stepping flag; this is used for stepping over gdbserver-placed
4272 breakpoints.
4273
4274 If pending_flags was set in any thread, we queue any needed
4275 signals, since we won't actually resume. We already have a pending
4276 event to report, so we don't need to preserve any step requests;
4277 they should be re-issued if necessary. */
4278
4279 static int
4280 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4281 {
4282 struct thread_info *thread = (struct thread_info *) entry;
4283 struct lwp_info *lwp = get_thread_lwp (thread);
4284 int step;
4285 int leave_all_stopped = * (int *) arg;
4286 int leave_pending;
4287
4288 if (lwp->resume == NULL)
4289 return 0;
4290
4291 if (lwp->resume->kind == resume_stop)
4292 {
4293 if (debug_threads)
4294 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4295
4296 if (!lwp->stopped)
4297 {
4298 if (debug_threads)
4299 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4300
4301 /* Stop the thread, and wait for the event asynchronously,
4302 through the event loop. */
4303 send_sigstop (lwp);
4304 }
4305 else
4306 {
4307 if (debug_threads)
4308 debug_printf ("already stopped LWP %ld\n",
4309 lwpid_of (thread));
4310
4311 /* The LWP may have been stopped in an internal event that
4312 was not meant to be notified back to GDB (e.g., gdbserver
4313 breakpoint), so we should be reporting a stop event in
4314 this case too. */
4315
4316 /* If the thread already has a pending SIGSTOP, this is a
4317 no-op. Otherwise, something later will presumably resume
4318 the thread and this will cause it to cancel any pending
4319 operation, due to last_resume_kind == resume_stop. If
4320 the thread already has a pending status to report, we
4321 will still report it the next time we wait - see
4322 status_pending_p_callback. */
4323
4324 /* If we already have a pending signal to report, then
4325 there's no need to queue a SIGSTOP, as this means we're
4326 midway through moving the LWP out of the jumppad, and we
4327 will report the pending signal as soon as that is
4328 finished. */
4329 if (lwp->pending_signals_to_report == NULL)
4330 send_sigstop (lwp);
4331 }
4332
4333 /* For stop requests, we're done. */
4334 lwp->resume = NULL;
4335 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4336 return 0;
4337 }
4338
4339 /* If this thread which is about to be resumed has a pending status,
4340 then don't resume any threads - we can just report the pending
4341 status. Make sure to queue any signals that would otherwise be
4342 sent. In all-stop mode, we do this decision based on if *any*
4343 thread has a pending status. If there's a thread that needs the
4344 step-over-breakpoint dance, then don't resume any other thread
4345 but that particular one. */
4346 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4347
4348 if (!leave_pending)
4349 {
4350 if (debug_threads)
4351 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4352
4353 step = (lwp->resume->kind == resume_step);
4354 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4355 }
4356 else
4357 {
4358 if (debug_threads)
4359 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4360
4361 /* If we have a new signal, enqueue the signal. */
4362 if (lwp->resume->sig != 0)
4363 {
4364 struct pending_signals *p_sig;
4365 p_sig = xmalloc (sizeof (*p_sig));
4366 p_sig->prev = lwp->pending_signals;
4367 p_sig->signal = lwp->resume->sig;
4368 memset (&p_sig->info, 0, sizeof (siginfo_t));
4369
4370 /* If this is the same signal we were previously stopped by,
4371 make sure to queue its siginfo. We can ignore the return
4372 value of ptrace; if it fails, we'll skip
4373 PTRACE_SETSIGINFO. */
4374 if (WIFSTOPPED (lwp->last_status)
4375 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4376 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4377 &p_sig->info);
4378
4379 lwp->pending_signals = p_sig;
4380 }
4381 }
4382
4383 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4384 lwp->resume = NULL;
4385 return 0;
4386 }
4387
4388 static void
4389 linux_resume (struct thread_resume *resume_info, size_t n)
4390 {
4391 struct thread_resume_array array = { resume_info, n };
4392 struct thread_info *need_step_over = NULL;
4393 int any_pending;
4394 int leave_all_stopped;
4395
4396 if (debug_threads)
4397 {
4398 debug_enter ();
4399 debug_printf ("linux_resume:\n");
4400 }
4401
4402 find_inferior (&all_threads, linux_set_resume_request, &array);
4403
4404 /* If there is a thread which would otherwise be resumed, which has
4405 a pending status, then don't resume any threads - we can just
4406 report the pending status. Make sure to queue any signals that
4407 would otherwise be sent. In non-stop mode, we'll apply this
4408 logic to each thread individually. We consume all pending events
4409 before considering to start a step-over (in all-stop). */
4410 any_pending = 0;
4411 if (!non_stop)
4412 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4413
4414 /* If there is a thread which would otherwise be resumed, which is
4415 stopped at a breakpoint that needs stepping over, then don't
4416 resume any threads - have it step over the breakpoint with all
4417 other threads stopped, then resume all threads again. Make sure
4418 to queue any signals that would otherwise be delivered or
4419 queued. */
4420 if (!any_pending && supports_breakpoints ())
4421 need_step_over
4422 = (struct thread_info *) find_inferior (&all_threads,
4423 need_step_over_p, NULL);
4424
4425 leave_all_stopped = (need_step_over != NULL || any_pending);
4426
4427 if (debug_threads)
4428 {
4429 if (need_step_over != NULL)
4430 debug_printf ("Not resuming all, need step over\n");
4431 else if (any_pending)
4432 debug_printf ("Not resuming, all-stop and found "
4433 "an LWP with pending status\n");
4434 else
4435 debug_printf ("Resuming, no pending status or step over needed\n");
4436 }
4437
4438 /* Even if we're leaving threads stopped, queue all signals we'd
4439 otherwise deliver. */
4440 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4441
4442 if (need_step_over)
4443 start_step_over (get_thread_lwp (need_step_over));
4444
4445 if (debug_threads)
4446 {
4447 debug_printf ("linux_resume done\n");
4448 debug_exit ();
4449 }
4450 }
4451
4452 /* This function is called once per thread. We check the thread's
4453 last resume request, which will tell us whether to resume, step, or
4454 leave the thread stopped. Any signal the client requested to be
4455 delivered has already been enqueued at this point.
4456
4457 If any thread that GDB wants running is stopped at an internal
4458 breakpoint that needs stepping over, we start a step-over operation
4459 on that particular thread, and leave all others stopped. */
4460
4461 static int
4462 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4463 {
4464 struct thread_info *thread = (struct thread_info *) entry;
4465 struct lwp_info *lwp = get_thread_lwp (thread);
4466 int step;
4467
4468 if (lwp == except)
4469 return 0;
4470
4471 if (debug_threads)
4472 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4473
4474 if (!lwp->stopped)
4475 {
4476 if (debug_threads)
4477 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4478 return 0;
4479 }
4480
4481 if (thread->last_resume_kind == resume_stop
4482 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4483 {
4484 if (debug_threads)
4485 debug_printf (" client wants LWP to remain %ld stopped\n",
4486 lwpid_of (thread));
4487 return 0;
4488 }
4489
4490 if (lwp->status_pending_p)
4491 {
4492 if (debug_threads)
4493 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4494 lwpid_of (thread));
4495 return 0;
4496 }
4497
4498 gdb_assert (lwp->suspended >= 0);
4499
4500 if (lwp->suspended)
4501 {
4502 if (debug_threads)
4503 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4504 return 0;
4505 }
4506
4507 if (thread->last_resume_kind == resume_stop
4508 && lwp->pending_signals_to_report == NULL
4509 && lwp->collecting_fast_tracepoint == 0)
4510 {
4511 /* We haven't reported this LWP as stopped yet (otherwise, the
4512 last_status.kind check above would catch it, and we wouldn't
4513 reach here. This LWP may have been momentarily paused by a
4514 stop_all_lwps call while handling for example, another LWP's
4515 step-over. In that case, the pending expected SIGSTOP signal
4516 that was queued at vCont;t handling time will have already
4517 been consumed by wait_for_sigstop, and so we need to requeue
4518 another one here. Note that if the LWP already has a SIGSTOP
4519 pending, this is a no-op. */
4520
4521 if (debug_threads)
4522 debug_printf ("Client wants LWP %ld to stop. "
4523 "Making sure it has a SIGSTOP pending\n",
4524 lwpid_of (thread));
4525
4526 send_sigstop (lwp);
4527 }
4528
4529 step = thread->last_resume_kind == resume_step;
4530 linux_resume_one_lwp (lwp, step, 0, NULL);
4531 return 0;
4532 }
4533
4534 static int
4535 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4536 {
4537 struct thread_info *thread = (struct thread_info *) entry;
4538 struct lwp_info *lwp = get_thread_lwp (thread);
4539
4540 if (lwp == except)
4541 return 0;
4542
4543 lwp->suspended--;
4544 gdb_assert (lwp->suspended >= 0);
4545
4546 return proceed_one_lwp (entry, except);
4547 }
4548
4549 /* When we finish a step-over, set threads running again. If there's
4550 another thread that may need a step-over, now's the time to start
4551 it. Eventually, we'll move all threads past their breakpoints. */
4552
4553 static void
4554 proceed_all_lwps (void)
4555 {
4556 struct thread_info *need_step_over;
4557
4558 /* If there is a thread which would otherwise be resumed, which is
4559 stopped at a breakpoint that needs stepping over, then don't
4560 resume any threads - have it step over the breakpoint with all
4561 other threads stopped, then resume all threads again. */
4562
4563 if (supports_breakpoints ())
4564 {
4565 need_step_over
4566 = (struct thread_info *) find_inferior (&all_threads,
4567 need_step_over_p, NULL);
4568
4569 if (need_step_over != NULL)
4570 {
4571 if (debug_threads)
4572 debug_printf ("proceed_all_lwps: found "
4573 "thread %ld needing a step-over\n",
4574 lwpid_of (need_step_over));
4575
4576 start_step_over (get_thread_lwp (need_step_over));
4577 return;
4578 }
4579 }
4580
4581 if (debug_threads)
4582 debug_printf ("Proceeding, no step-over needed\n");
4583
4584 find_inferior (&all_threads, proceed_one_lwp, NULL);
4585 }
4586
4587 /* Stopped LWPs that the client wanted to be running, that don't have
4588 pending statuses, are set to run again, except for EXCEPT, if not
4589 NULL. This undoes a stop_all_lwps call. */
4590
4591 static void
4592 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4593 {
4594 if (debug_threads)
4595 {
4596 debug_enter ();
4597 if (except)
4598 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4599 lwpid_of (get_lwp_thread (except)));
4600 else
4601 debug_printf ("unstopping all lwps\n");
4602 }
4603
4604 if (unsuspend)
4605 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4606 else
4607 find_inferior (&all_threads, proceed_one_lwp, except);
4608
4609 if (debug_threads)
4610 {
4611 debug_printf ("unstop_all_lwps done\n");
4612 debug_exit ();
4613 }
4614 }
4615
4616
4617 #ifdef HAVE_LINUX_REGSETS
4618
4619 #define use_linux_regsets 1
4620
4621 /* Returns true if REGSET has been disabled. */
4622
4623 static int
4624 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4625 {
4626 return (info->disabled_regsets != NULL
4627 && info->disabled_regsets[regset - info->regsets]);
4628 }
4629
4630 /* Disable REGSET. */
4631
4632 static void
4633 disable_regset (struct regsets_info *info, struct regset_info *regset)
4634 {
4635 int dr_offset;
4636
4637 dr_offset = regset - info->regsets;
4638 if (info->disabled_regsets == NULL)
4639 info->disabled_regsets = xcalloc (1, info->num_regsets);
4640 info->disabled_regsets[dr_offset] = 1;
4641 }
4642
4643 static int
4644 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4645 struct regcache *regcache)
4646 {
4647 struct regset_info *regset;
4648 int saw_general_regs = 0;
4649 int pid;
4650 struct iovec iov;
4651
4652 pid = lwpid_of (current_thread);
4653 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4654 {
4655 void *buf, *data;
4656 int nt_type, res;
4657
4658 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4659 continue;
4660
4661 buf = xmalloc (regset->size);
4662
4663 nt_type = regset->nt_type;
4664 if (nt_type)
4665 {
4666 iov.iov_base = buf;
4667 iov.iov_len = regset->size;
4668 data = (void *) &iov;
4669 }
4670 else
4671 data = buf;
4672
4673 #ifndef __sparc__
4674 res = ptrace (regset->get_request, pid,
4675 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4676 #else
4677 res = ptrace (regset->get_request, pid, data, nt_type);
4678 #endif
4679 if (res < 0)
4680 {
4681 if (errno == EIO)
4682 {
4683 /* If we get EIO on a regset, do not try it again for
4684 this process mode. */
4685 disable_regset (regsets_info, regset);
4686 }
4687 else if (errno == ENODATA)
4688 {
4689 /* ENODATA may be returned if the regset is currently
4690 not "active". This can happen in normal operation,
4691 so suppress the warning in this case. */
4692 }
4693 else
4694 {
4695 char s[256];
4696 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4697 pid);
4698 perror (s);
4699 }
4700 }
4701 else
4702 {
4703 if (regset->type == GENERAL_REGS)
4704 saw_general_regs = 1;
4705 regset->store_function (regcache, buf);
4706 }
4707 free (buf);
4708 }
4709 if (saw_general_regs)
4710 return 0;
4711 else
4712 return 1;
4713 }
4714
4715 static int
4716 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4717 struct regcache *regcache)
4718 {
4719 struct regset_info *regset;
4720 int saw_general_regs = 0;
4721 int pid;
4722 struct iovec iov;
4723
4724 pid = lwpid_of (current_thread);
4725 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4726 {
4727 void *buf, *data;
4728 int nt_type, res;
4729
4730 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4731 || regset->fill_function == NULL)
4732 continue;
4733
4734 buf = xmalloc (regset->size);
4735
4736 /* First fill the buffer with the current register set contents,
4737 in case there are any items in the kernel's regset that are
4738 not in gdbserver's regcache. */
4739
4740 nt_type = regset->nt_type;
4741 if (nt_type)
4742 {
4743 iov.iov_base = buf;
4744 iov.iov_len = regset->size;
4745 data = (void *) &iov;
4746 }
4747 else
4748 data = buf;
4749
4750 #ifndef __sparc__
4751 res = ptrace (regset->get_request, pid,
4752 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4753 #else
4754 res = ptrace (regset->get_request, pid, data, nt_type);
4755 #endif
4756
4757 if (res == 0)
4758 {
4759 /* Then overlay our cached registers on that. */
4760 regset->fill_function (regcache, buf);
4761
4762 /* Only now do we write the register set. */
4763 #ifndef __sparc__
4764 res = ptrace (regset->set_request, pid,
4765 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4766 #else
4767 res = ptrace (regset->set_request, pid, data, nt_type);
4768 #endif
4769 }
4770
4771 if (res < 0)
4772 {
4773 if (errno == EIO)
4774 {
4775 /* If we get EIO on a regset, do not try it again for
4776 this process mode. */
4777 disable_regset (regsets_info, regset);
4778 }
4779 else if (errno == ESRCH)
4780 {
4781 /* At this point, ESRCH should mean the process is
4782 already gone, in which case we simply ignore attempts
4783 to change its registers. See also the related
4784 comment in linux_resume_one_lwp. */
4785 free (buf);
4786 return 0;
4787 }
4788 else
4789 {
4790 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4791 }
4792 }
4793 else if (regset->type == GENERAL_REGS)
4794 saw_general_regs = 1;
4795 free (buf);
4796 }
4797 if (saw_general_regs)
4798 return 0;
4799 else
4800 return 1;
4801 }
4802
4803 #else /* !HAVE_LINUX_REGSETS */
4804
4805 #define use_linux_regsets 0
4806 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4807 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4808
4809 #endif
4810
4811 /* Return 1 if register REGNO is supported by one of the regset ptrace
4812 calls or 0 if it has to be transferred individually. */
4813
4814 static int
4815 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4816 {
4817 unsigned char mask = 1 << (regno % 8);
4818 size_t index = regno / 8;
4819
4820 return (use_linux_regsets
4821 && (regs_info->regset_bitmap == NULL
4822 || (regs_info->regset_bitmap[index] & mask) != 0));
4823 }
4824
4825 #ifdef HAVE_LINUX_USRREGS
4826
4827 int
4828 register_addr (const struct usrregs_info *usrregs, int regnum)
4829 {
4830 int addr;
4831
4832 if (regnum < 0 || regnum >= usrregs->num_regs)
4833 error ("Invalid register number %d.", regnum);
4834
4835 addr = usrregs->regmap[regnum];
4836
4837 return addr;
4838 }
4839
4840 /* Fetch one register. */
4841 static void
4842 fetch_register (const struct usrregs_info *usrregs,
4843 struct regcache *regcache, int regno)
4844 {
4845 CORE_ADDR regaddr;
4846 int i, size;
4847 char *buf;
4848 int pid;
4849
4850 if (regno >= usrregs->num_regs)
4851 return;
4852 if ((*the_low_target.cannot_fetch_register) (regno))
4853 return;
4854
4855 regaddr = register_addr (usrregs, regno);
4856 if (regaddr == -1)
4857 return;
4858
4859 size = ((register_size (regcache->tdesc, regno)
4860 + sizeof (PTRACE_XFER_TYPE) - 1)
4861 & -sizeof (PTRACE_XFER_TYPE));
4862 buf = alloca (size);
4863
4864 pid = lwpid_of (current_thread);
4865 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4866 {
4867 errno = 0;
4868 *(PTRACE_XFER_TYPE *) (buf + i) =
4869 ptrace (PTRACE_PEEKUSER, pid,
4870 /* Coerce to a uintptr_t first to avoid potential gcc warning
4871 of coercing an 8 byte integer to a 4 byte pointer. */
4872 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4873 regaddr += sizeof (PTRACE_XFER_TYPE);
4874 if (errno != 0)
4875 error ("reading register %d: %s", regno, strerror (errno));
4876 }
4877
4878 if (the_low_target.supply_ptrace_register)
4879 the_low_target.supply_ptrace_register (regcache, regno, buf);
4880 else
4881 supply_register (regcache, regno, buf);
4882 }
4883
4884 /* Store one register. */
4885 static void
4886 store_register (const struct usrregs_info *usrregs,
4887 struct regcache *regcache, int regno)
4888 {
4889 CORE_ADDR regaddr;
4890 int i, size;
4891 char *buf;
4892 int pid;
4893
4894 if (regno >= usrregs->num_regs)
4895 return;
4896 if ((*the_low_target.cannot_store_register) (regno))
4897 return;
4898
4899 regaddr = register_addr (usrregs, regno);
4900 if (regaddr == -1)
4901 return;
4902
4903 size = ((register_size (regcache->tdesc, regno)
4904 + sizeof (PTRACE_XFER_TYPE) - 1)
4905 & -sizeof (PTRACE_XFER_TYPE));
4906 buf = alloca (size);
4907 memset (buf, 0, size);
4908
4909 if (the_low_target.collect_ptrace_register)
4910 the_low_target.collect_ptrace_register (regcache, regno, buf);
4911 else
4912 collect_register (regcache, regno, buf);
4913
4914 pid = lwpid_of (current_thread);
4915 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4916 {
4917 errno = 0;
4918 ptrace (PTRACE_POKEUSER, pid,
4919 /* Coerce to a uintptr_t first to avoid potential gcc warning
4920 about coercing an 8 byte integer to a 4 byte pointer. */
4921 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4922 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4923 if (errno != 0)
4924 {
4925 /* At this point, ESRCH should mean the process is
4926 already gone, in which case we simply ignore attempts
4927 to change its registers. See also the related
4928 comment in linux_resume_one_lwp. */
4929 if (errno == ESRCH)
4930 return;
4931
4932 if ((*the_low_target.cannot_store_register) (regno) == 0)
4933 error ("writing register %d: %s", regno, strerror (errno));
4934 }
4935 regaddr += sizeof (PTRACE_XFER_TYPE);
4936 }
4937 }
4938
4939 /* Fetch all registers, or just one, from the child process.
4940 If REGNO is -1, do this for all registers, skipping any that are
4941 assumed to have been retrieved by regsets_fetch_inferior_registers,
4942 unless ALL is non-zero.
4943 Otherwise, REGNO specifies which register (so we can save time). */
4944 static void
4945 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4946 struct regcache *regcache, int regno, int all)
4947 {
4948 struct usrregs_info *usr = regs_info->usrregs;
4949
4950 if (regno == -1)
4951 {
4952 for (regno = 0; regno < usr->num_regs; regno++)
4953 if (all || !linux_register_in_regsets (regs_info, regno))
4954 fetch_register (usr, regcache, regno);
4955 }
4956 else
4957 fetch_register (usr, regcache, regno);
4958 }
4959
4960 /* Store our register values back into the inferior.
4961 If REGNO is -1, do this for all registers, skipping any that are
4962 assumed to have been saved by regsets_store_inferior_registers,
4963 unless ALL is non-zero.
4964 Otherwise, REGNO specifies which register (so we can save time). */
4965 static void
4966 usr_store_inferior_registers (const struct regs_info *regs_info,
4967 struct regcache *regcache, int regno, int all)
4968 {
4969 struct usrregs_info *usr = regs_info->usrregs;
4970
4971 if (regno == -1)
4972 {
4973 for (regno = 0; regno < usr->num_regs; regno++)
4974 if (all || !linux_register_in_regsets (regs_info, regno))
4975 store_register (usr, regcache, regno);
4976 }
4977 else
4978 store_register (usr, regcache, regno);
4979 }
4980
4981 #else /* !HAVE_LINUX_USRREGS */
4982
4983 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4984 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4985
4986 #endif
4987
4988
4989 void
4990 linux_fetch_registers (struct regcache *regcache, int regno)
4991 {
4992 int use_regsets;
4993 int all = 0;
4994 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4995
4996 if (regno == -1)
4997 {
4998 if (the_low_target.fetch_register != NULL
4999 && regs_info->usrregs != NULL)
5000 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5001 (*the_low_target.fetch_register) (regcache, regno);
5002
5003 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5004 if (regs_info->usrregs != NULL)
5005 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5006 }
5007 else
5008 {
5009 if (the_low_target.fetch_register != NULL
5010 && (*the_low_target.fetch_register) (regcache, regno))
5011 return;
5012
5013 use_regsets = linux_register_in_regsets (regs_info, regno);
5014 if (use_regsets)
5015 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5016 regcache);
5017 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5018 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5019 }
5020 }
5021
5022 void
5023 linux_store_registers (struct regcache *regcache, int regno)
5024 {
5025 int use_regsets;
5026 int all = 0;
5027 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5028
5029 if (regno == -1)
5030 {
5031 all = regsets_store_inferior_registers (regs_info->regsets_info,
5032 regcache);
5033 if (regs_info->usrregs != NULL)
5034 usr_store_inferior_registers (regs_info, regcache, regno, all);
5035 }
5036 else
5037 {
5038 use_regsets = linux_register_in_regsets (regs_info, regno);
5039 if (use_regsets)
5040 all = regsets_store_inferior_registers (regs_info->regsets_info,
5041 regcache);
5042 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5043 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5044 }
5045 }
5046
5047
5048 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5049 to debugger memory starting at MYADDR. */
5050
5051 static int
5052 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5053 {
5054 int pid = lwpid_of (current_thread);
5055 register PTRACE_XFER_TYPE *buffer;
5056 register CORE_ADDR addr;
5057 register int count;
5058 char filename[64];
5059 register int i;
5060 int ret;
5061 int fd;
5062
5063 /* Try using /proc. Don't bother for one word. */
5064 if (len >= 3 * sizeof (long))
5065 {
5066 int bytes;
5067
5068 /* We could keep this file open and cache it - possibly one per
5069 thread. That requires some juggling, but is even faster. */
5070 sprintf (filename, "/proc/%d/mem", pid);
5071 fd = open (filename, O_RDONLY | O_LARGEFILE);
5072 if (fd == -1)
5073 goto no_proc;
5074
5075 /* If pread64 is available, use it. It's faster if the kernel
5076 supports it (only one syscall), and it's 64-bit safe even on
5077 32-bit platforms (for instance, SPARC debugging a SPARC64
5078 application). */
5079 #ifdef HAVE_PREAD64
5080 bytes = pread64 (fd, myaddr, len, memaddr);
5081 #else
5082 bytes = -1;
5083 if (lseek (fd, memaddr, SEEK_SET) != -1)
5084 bytes = read (fd, myaddr, len);
5085 #endif
5086
5087 close (fd);
5088 if (bytes == len)
5089 return 0;
5090
5091 /* Some data was read, we'll try to get the rest with ptrace. */
5092 if (bytes > 0)
5093 {
5094 memaddr += bytes;
5095 myaddr += bytes;
5096 len -= bytes;
5097 }
5098 }
5099
5100 no_proc:
5101 /* Round starting address down to longword boundary. */
5102 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5103 /* Round ending address up; get number of longwords that makes. */
5104 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5105 / sizeof (PTRACE_XFER_TYPE));
5106 /* Allocate buffer of that many longwords. */
5107 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5108
5109 /* Read all the longwords */
5110 errno = 0;
5111 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5112 {
5113 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5114 about coercing an 8 byte integer to a 4 byte pointer. */
5115 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5116 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5117 (PTRACE_TYPE_ARG4) 0);
5118 if (errno)
5119 break;
5120 }
5121 ret = errno;
5122
5123 /* Copy appropriate bytes out of the buffer. */
5124 if (i > 0)
5125 {
5126 i *= sizeof (PTRACE_XFER_TYPE);
5127 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5128 memcpy (myaddr,
5129 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5130 i < len ? i : len);
5131 }
5132
5133 return ret;
5134 }
5135
5136 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5137 memory at MEMADDR. On failure (cannot write to the inferior)
5138 returns the value of errno. Always succeeds if LEN is zero. */
5139
5140 static int
5141 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5142 {
5143 register int i;
5144 /* Round starting address down to longword boundary. */
5145 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5146 /* Round ending address up; get number of longwords that makes. */
5147 register int count
5148 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5149 / sizeof (PTRACE_XFER_TYPE);
5150
5151 /* Allocate buffer of that many longwords. */
5152 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5153 alloca (count * sizeof (PTRACE_XFER_TYPE));
5154
5155 int pid = lwpid_of (current_thread);
5156
5157 if (len == 0)
5158 {
5159 /* Zero length write always succeeds. */
5160 return 0;
5161 }
5162
5163 if (debug_threads)
5164 {
5165 /* Dump up to four bytes. */
5166 unsigned int val = * (unsigned int *) myaddr;
5167 if (len == 1)
5168 val = val & 0xff;
5169 else if (len == 2)
5170 val = val & 0xffff;
5171 else if (len == 3)
5172 val = val & 0xffffff;
5173 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5174 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5175 }
5176
5177 /* Fill start and end extra bytes of buffer with existing memory data. */
5178
5179 errno = 0;
5180 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5181 about coercing an 8 byte integer to a 4 byte pointer. */
5182 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5183 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5184 (PTRACE_TYPE_ARG4) 0);
5185 if (errno)
5186 return errno;
5187
5188 if (count > 1)
5189 {
5190 errno = 0;
5191 buffer[count - 1]
5192 = ptrace (PTRACE_PEEKTEXT, pid,
5193 /* Coerce to a uintptr_t first to avoid potential gcc warning
5194 about coercing an 8 byte integer to a 4 byte pointer. */
5195 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5196 * sizeof (PTRACE_XFER_TYPE)),
5197 (PTRACE_TYPE_ARG4) 0);
5198 if (errno)
5199 return errno;
5200 }
5201
5202 /* Copy data to be written over corresponding part of buffer. */
5203
5204 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5205 myaddr, len);
5206
5207 /* Write the entire buffer. */
5208
5209 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5210 {
5211 errno = 0;
5212 ptrace (PTRACE_POKETEXT, pid,
5213 /* Coerce to a uintptr_t first to avoid potential gcc warning
5214 about coercing an 8 byte integer to a 4 byte pointer. */
5215 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5216 (PTRACE_TYPE_ARG4) buffer[i]);
5217 if (errno)
5218 return errno;
5219 }
5220
5221 return 0;
5222 }
5223
5224 static void
5225 linux_look_up_symbols (void)
5226 {
5227 #ifdef USE_THREAD_DB
5228 struct process_info *proc = current_process ();
5229
5230 if (proc->priv->thread_db != NULL)
5231 return;
5232
5233 /* If the kernel supports tracing clones, then we don't need to
5234 use the magic thread event breakpoint to learn about
5235 threads. */
5236 thread_db_init (!linux_supports_traceclone ());
5237 #endif
5238 }
5239
5240 static void
5241 linux_request_interrupt (void)
5242 {
5243 extern unsigned long signal_pid;
5244
5245 /* Send a SIGINT to the process group. This acts just like the user
5246 typed a ^C on the controlling terminal. */
5247 kill (-signal_pid, SIGINT);
5248 }
5249
5250 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5251 to debugger memory starting at MYADDR. */
5252
5253 static int
5254 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5255 {
5256 char filename[PATH_MAX];
5257 int fd, n;
5258 int pid = lwpid_of (current_thread);
5259
5260 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5261
5262 fd = open (filename, O_RDONLY);
5263 if (fd < 0)
5264 return -1;
5265
5266 if (offset != (CORE_ADDR) 0
5267 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5268 n = -1;
5269 else
5270 n = read (fd, myaddr, len);
5271
5272 close (fd);
5273
5274 return n;
5275 }
5276
5277 /* These breakpoint and watchpoint related wrapper functions simply
5278 pass on the function call if the target has registered a
5279 corresponding function. */
5280
5281 static int
5282 linux_supports_z_point_type (char z_type)
5283 {
5284 return (the_low_target.supports_z_point_type != NULL
5285 && the_low_target.supports_z_point_type (z_type));
5286 }
5287
5288 static int
5289 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5290 int size, struct raw_breakpoint *bp)
5291 {
5292 if (type == raw_bkpt_type_sw)
5293 return insert_memory_breakpoint (bp);
5294 else if (the_low_target.insert_point != NULL)
5295 return the_low_target.insert_point (type, addr, size, bp);
5296 else
5297 /* Unsupported (see target.h). */
5298 return 1;
5299 }
5300
5301 static int
5302 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5303 int size, struct raw_breakpoint *bp)
5304 {
5305 if (type == raw_bkpt_type_sw)
5306 return remove_memory_breakpoint (bp);
5307 else if (the_low_target.remove_point != NULL)
5308 return the_low_target.remove_point (type, addr, size, bp);
5309 else
5310 /* Unsupported (see target.h). */
5311 return 1;
5312 }
5313
5314 /* Implement the to_stopped_by_sw_breakpoint target_ops
5315 method. */
5316
5317 static int
5318 linux_stopped_by_sw_breakpoint (void)
5319 {
5320 struct lwp_info *lwp = get_thread_lwp (current_thread);
5321
5322 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5323 }
5324
5325 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5326 method. */
5327
5328 static int
5329 linux_supports_stopped_by_sw_breakpoint (void)
5330 {
5331 return USE_SIGTRAP_SIGINFO;
5332 }
5333
5334 /* Implement the to_stopped_by_hw_breakpoint target_ops
5335 method. */
5336
5337 static int
5338 linux_stopped_by_hw_breakpoint (void)
5339 {
5340 struct lwp_info *lwp = get_thread_lwp (current_thread);
5341
5342 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5343 }
5344
5345 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5346 method. */
5347
5348 static int
5349 linux_supports_stopped_by_hw_breakpoint (void)
5350 {
5351 return USE_SIGTRAP_SIGINFO;
5352 }
5353
5354 /* Implement the supports_conditional_breakpoints target_ops
5355 method. */
5356
5357 static int
5358 linux_supports_conditional_breakpoints (void)
5359 {
5360 /* GDBserver needs to step over the breakpoint if the condition is
5361 false. GDBserver software single step is too simple, so disable
5362 conditional breakpoints if the target doesn't have hardware single
5363 step. */
5364 return can_hardware_single_step ();
5365 }
5366
5367 static int
5368 linux_stopped_by_watchpoint (void)
5369 {
5370 struct lwp_info *lwp = get_thread_lwp (current_thread);
5371
5372 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5373 }
5374
5375 static CORE_ADDR
5376 linux_stopped_data_address (void)
5377 {
5378 struct lwp_info *lwp = get_thread_lwp (current_thread);
5379
5380 return lwp->stopped_data_address;
5381 }
5382
5383 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5384 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5385 && defined(PT_TEXT_END_ADDR)
5386
5387 /* This is only used for targets that define PT_TEXT_ADDR,
5388 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5389 the target has different ways of acquiring this information, like
5390 loadmaps. */
5391
5392 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5393 to tell gdb about. */
5394
5395 static int
5396 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5397 {
5398 unsigned long text, text_end, data;
5399 int pid = lwpid_of (current_thread);
5400
5401 errno = 0;
5402
5403 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5404 (PTRACE_TYPE_ARG4) 0);
5405 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5406 (PTRACE_TYPE_ARG4) 0);
5407 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5408 (PTRACE_TYPE_ARG4) 0);
5409
5410 if (errno == 0)
5411 {
5412 /* Both text and data offsets produced at compile-time (and so
5413 used by gdb) are relative to the beginning of the program,
5414 with the data segment immediately following the text segment.
5415 However, the actual runtime layout in memory may put the data
5416 somewhere else, so when we send gdb a data base-address, we
5417 use the real data base address and subtract the compile-time
5418 data base-address from it (which is just the length of the
5419 text segment). BSS immediately follows data in both
5420 cases. */
5421 *text_p = text;
5422 *data_p = data - (text_end - text);
5423
5424 return 1;
5425 }
5426 return 0;
5427 }
5428 #endif
5429
5430 static int
5431 linux_qxfer_osdata (const char *annex,
5432 unsigned char *readbuf, unsigned const char *writebuf,
5433 CORE_ADDR offset, int len)
5434 {
5435 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5436 }
5437
5438 /* Convert a native/host siginfo object, into/from the siginfo in the
5439 layout of the inferiors' architecture. */
5440
5441 static void
5442 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5443 {
5444 int done = 0;
5445
5446 if (the_low_target.siginfo_fixup != NULL)
5447 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5448
5449 /* If there was no callback, or the callback didn't do anything,
5450 then just do a straight memcpy. */
5451 if (!done)
5452 {
5453 if (direction == 1)
5454 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5455 else
5456 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5457 }
5458 }
5459
5460 static int
5461 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5462 unsigned const char *writebuf, CORE_ADDR offset, int len)
5463 {
5464 int pid;
5465 siginfo_t siginfo;
5466 char inf_siginfo[sizeof (siginfo_t)];
5467
5468 if (current_thread == NULL)
5469 return -1;
5470
5471 pid = lwpid_of (current_thread);
5472
5473 if (debug_threads)
5474 debug_printf ("%s siginfo for lwp %d.\n",
5475 readbuf != NULL ? "Reading" : "Writing",
5476 pid);
5477
5478 if (offset >= sizeof (siginfo))
5479 return -1;
5480
5481 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5482 return -1;
5483
5484 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5485 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5486 inferior with a 64-bit GDBSERVER should look the same as debugging it
5487 with a 32-bit GDBSERVER, we need to convert it. */
5488 siginfo_fixup (&siginfo, inf_siginfo, 0);
5489
5490 if (offset + len > sizeof (siginfo))
5491 len = sizeof (siginfo) - offset;
5492
5493 if (readbuf != NULL)
5494 memcpy (readbuf, inf_siginfo + offset, len);
5495 else
5496 {
5497 memcpy (inf_siginfo + offset, writebuf, len);
5498
5499 /* Convert back to ptrace layout before flushing it out. */
5500 siginfo_fixup (&siginfo, inf_siginfo, 1);
5501
5502 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5503 return -1;
5504 }
5505
5506 return len;
5507 }
5508
5509 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5510 so we notice when children change state; as the handler for the
5511 sigsuspend in my_waitpid. */
5512
5513 static void
5514 sigchld_handler (int signo)
5515 {
5516 int old_errno = errno;
5517
5518 if (debug_threads)
5519 {
5520 do
5521 {
5522 /* fprintf is not async-signal-safe, so call write
5523 directly. */
5524 if (write (2, "sigchld_handler\n",
5525 sizeof ("sigchld_handler\n") - 1) < 0)
5526 break; /* just ignore */
5527 } while (0);
5528 }
5529
5530 if (target_is_async_p ())
5531 async_file_mark (); /* trigger a linux_wait */
5532
5533 errno = old_errno;
5534 }
5535
5536 static int
5537 linux_supports_non_stop (void)
5538 {
5539 return 1;
5540 }
5541
5542 static int
5543 linux_async (int enable)
5544 {
5545 int previous = target_is_async_p ();
5546
5547 if (debug_threads)
5548 debug_printf ("linux_async (%d), previous=%d\n",
5549 enable, previous);
5550
5551 if (previous != enable)
5552 {
5553 sigset_t mask;
5554 sigemptyset (&mask);
5555 sigaddset (&mask, SIGCHLD);
5556
5557 sigprocmask (SIG_BLOCK, &mask, NULL);
5558
5559 if (enable)
5560 {
5561 if (pipe (linux_event_pipe) == -1)
5562 {
5563 linux_event_pipe[0] = -1;
5564 linux_event_pipe[1] = -1;
5565 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5566
5567 warning ("creating event pipe failed.");
5568 return previous;
5569 }
5570
5571 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5572 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5573
5574 /* Register the event loop handler. */
5575 add_file_handler (linux_event_pipe[0],
5576 handle_target_event, NULL);
5577
5578 /* Always trigger a linux_wait. */
5579 async_file_mark ();
5580 }
5581 else
5582 {
5583 delete_file_handler (linux_event_pipe[0]);
5584
5585 close (linux_event_pipe[0]);
5586 close (linux_event_pipe[1]);
5587 linux_event_pipe[0] = -1;
5588 linux_event_pipe[1] = -1;
5589 }
5590
5591 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5592 }
5593
5594 return previous;
5595 }
5596
5597 static int
5598 linux_start_non_stop (int nonstop)
5599 {
5600 /* Register or unregister from event-loop accordingly. */
5601 linux_async (nonstop);
5602
5603 if (target_is_async_p () != (nonstop != 0))
5604 return -1;
5605
5606 return 0;
5607 }
5608
5609 static int
5610 linux_supports_multi_process (void)
5611 {
5612 return 1;
5613 }
5614
5615 /* Check if fork events are supported. */
5616
5617 static int
5618 linux_supports_fork_events (void)
5619 {
5620 return linux_supports_tracefork ();
5621 }
5622
5623 /* Check if vfork events are supported. */
5624
5625 static int
5626 linux_supports_vfork_events (void)
5627 {
5628 return linux_supports_tracefork ();
5629 }
5630
5631 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5632 options for the specified lwp. */
5633
5634 static int
5635 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5636 void *args)
5637 {
5638 struct thread_info *thread = (struct thread_info *) entry;
5639 struct lwp_info *lwp = get_thread_lwp (thread);
5640
5641 if (!lwp->stopped)
5642 {
5643 /* Stop the lwp so we can modify its ptrace options. */
5644 lwp->must_set_ptrace_flags = 1;
5645 linux_stop_lwp (lwp);
5646 }
5647 else
5648 {
5649 /* Already stopped; go ahead and set the ptrace options. */
5650 struct process_info *proc = find_process_pid (pid_of (thread));
5651 int options = linux_low_ptrace_options (proc->attached);
5652
5653 linux_enable_event_reporting (lwpid_of (thread), options);
5654 lwp->must_set_ptrace_flags = 0;
5655 }
5656
5657 return 0;
5658 }
5659
5660 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5661 ptrace flags for all inferiors. This is in case the new GDB connection
5662 doesn't support the same set of events that the previous one did. */
5663
5664 static void
5665 linux_handle_new_gdb_connection (void)
5666 {
5667 pid_t pid;
5668
5669 /* Request that all the lwps reset their ptrace options. */
5670 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5671 }
5672
5673 static int
5674 linux_supports_disable_randomization (void)
5675 {
5676 #ifdef HAVE_PERSONALITY
5677 return 1;
5678 #else
5679 return 0;
5680 #endif
5681 }
5682
5683 static int
5684 linux_supports_agent (void)
5685 {
5686 return 1;
5687 }
5688
5689 static int
5690 linux_supports_range_stepping (void)
5691 {
5692 if (*the_low_target.supports_range_stepping == NULL)
5693 return 0;
5694
5695 return (*the_low_target.supports_range_stepping) ();
5696 }
5697
5698 /* Enumerate spufs IDs for process PID. */
5699 static int
5700 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5701 {
5702 int pos = 0;
5703 int written = 0;
5704 char path[128];
5705 DIR *dir;
5706 struct dirent *entry;
5707
5708 sprintf (path, "/proc/%ld/fd", pid);
5709 dir = opendir (path);
5710 if (!dir)
5711 return -1;
5712
5713 rewinddir (dir);
5714 while ((entry = readdir (dir)) != NULL)
5715 {
5716 struct stat st;
5717 struct statfs stfs;
5718 int fd;
5719
5720 fd = atoi (entry->d_name);
5721 if (!fd)
5722 continue;
5723
5724 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5725 if (stat (path, &st) != 0)
5726 continue;
5727 if (!S_ISDIR (st.st_mode))
5728 continue;
5729
5730 if (statfs (path, &stfs) != 0)
5731 continue;
5732 if (stfs.f_type != SPUFS_MAGIC)
5733 continue;
5734
5735 if (pos >= offset && pos + 4 <= offset + len)
5736 {
5737 *(unsigned int *)(buf + pos - offset) = fd;
5738 written += 4;
5739 }
5740 pos += 4;
5741 }
5742
5743 closedir (dir);
5744 return written;
5745 }
5746
5747 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5748 object type, using the /proc file system. */
5749 static int
5750 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5751 unsigned const char *writebuf,
5752 CORE_ADDR offset, int len)
5753 {
5754 long pid = lwpid_of (current_thread);
5755 char buf[128];
5756 int fd = 0;
5757 int ret = 0;
5758
5759 if (!writebuf && !readbuf)
5760 return -1;
5761
5762 if (!*annex)
5763 {
5764 if (!readbuf)
5765 return -1;
5766 else
5767 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5768 }
5769
5770 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5771 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5772 if (fd <= 0)
5773 return -1;
5774
5775 if (offset != 0
5776 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5777 {
5778 close (fd);
5779 return 0;
5780 }
5781
5782 if (writebuf)
5783 ret = write (fd, writebuf, (size_t) len);
5784 else
5785 ret = read (fd, readbuf, (size_t) len);
5786
5787 close (fd);
5788 return ret;
5789 }
5790
5791 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5792 struct target_loadseg
5793 {
5794 /* Core address to which the segment is mapped. */
5795 Elf32_Addr addr;
5796 /* VMA recorded in the program header. */
5797 Elf32_Addr p_vaddr;
5798 /* Size of this segment in memory. */
5799 Elf32_Word p_memsz;
5800 };
5801
5802 # if defined PT_GETDSBT
5803 struct target_loadmap
5804 {
5805 /* Protocol version number, must be zero. */
5806 Elf32_Word version;
5807 /* Pointer to the DSBT table, its size, and the DSBT index. */
5808 unsigned *dsbt_table;
5809 unsigned dsbt_size, dsbt_index;
5810 /* Number of segments in this map. */
5811 Elf32_Word nsegs;
5812 /* The actual memory map. */
5813 struct target_loadseg segs[/*nsegs*/];
5814 };
5815 # define LINUX_LOADMAP PT_GETDSBT
5816 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5817 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5818 # else
5819 struct target_loadmap
5820 {
5821 /* Protocol version number, must be zero. */
5822 Elf32_Half version;
5823 /* Number of segments in this map. */
5824 Elf32_Half nsegs;
5825 /* The actual memory map. */
5826 struct target_loadseg segs[/*nsegs*/];
5827 };
5828 # define LINUX_LOADMAP PTRACE_GETFDPIC
5829 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5830 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5831 # endif
5832
5833 static int
5834 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5835 unsigned char *myaddr, unsigned int len)
5836 {
5837 int pid = lwpid_of (current_thread);
5838 int addr = -1;
5839 struct target_loadmap *data = NULL;
5840 unsigned int actual_length, copy_length;
5841
5842 if (strcmp (annex, "exec") == 0)
5843 addr = (int) LINUX_LOADMAP_EXEC;
5844 else if (strcmp (annex, "interp") == 0)
5845 addr = (int) LINUX_LOADMAP_INTERP;
5846 else
5847 return -1;
5848
5849 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5850 return -1;
5851
5852 if (data == NULL)
5853 return -1;
5854
5855 actual_length = sizeof (struct target_loadmap)
5856 + sizeof (struct target_loadseg) * data->nsegs;
5857
5858 if (offset < 0 || offset > actual_length)
5859 return -1;
5860
5861 copy_length = actual_length - offset < len ? actual_length - offset : len;
5862 memcpy (myaddr, (char *) data + offset, copy_length);
5863 return copy_length;
5864 }
5865 #else
5866 # define linux_read_loadmap NULL
5867 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5868
5869 static void
5870 linux_process_qsupported (const char *query)
5871 {
5872 if (the_low_target.process_qsupported != NULL)
5873 the_low_target.process_qsupported (query);
5874 }
5875
5876 static int
5877 linux_supports_tracepoints (void)
5878 {
5879 if (*the_low_target.supports_tracepoints == NULL)
5880 return 0;
5881
5882 return (*the_low_target.supports_tracepoints) ();
5883 }
5884
5885 static CORE_ADDR
5886 linux_read_pc (struct regcache *regcache)
5887 {
5888 if (the_low_target.get_pc == NULL)
5889 return 0;
5890
5891 return (*the_low_target.get_pc) (regcache);
5892 }
5893
5894 static void
5895 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5896 {
5897 gdb_assert (the_low_target.set_pc != NULL);
5898
5899 (*the_low_target.set_pc) (regcache, pc);
5900 }
5901
5902 static int
5903 linux_thread_stopped (struct thread_info *thread)
5904 {
5905 return get_thread_lwp (thread)->stopped;
5906 }
5907
5908 /* This exposes stop-all-threads functionality to other modules. */
5909
5910 static void
5911 linux_pause_all (int freeze)
5912 {
5913 stop_all_lwps (freeze, NULL);
5914 }
5915
5916 /* This exposes unstop-all-threads functionality to other gdbserver
5917 modules. */
5918
5919 static void
5920 linux_unpause_all (int unfreeze)
5921 {
5922 unstop_all_lwps (unfreeze, NULL);
5923 }
5924
5925 static int
5926 linux_prepare_to_access_memory (void)
5927 {
5928 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5929 running LWP. */
5930 if (non_stop)
5931 linux_pause_all (1);
5932 return 0;
5933 }
5934
5935 static void
5936 linux_done_accessing_memory (void)
5937 {
5938 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5939 running LWP. */
5940 if (non_stop)
5941 linux_unpause_all (1);
5942 }
5943
5944 static int
5945 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5946 CORE_ADDR collector,
5947 CORE_ADDR lockaddr,
5948 ULONGEST orig_size,
5949 CORE_ADDR *jump_entry,
5950 CORE_ADDR *trampoline,
5951 ULONGEST *trampoline_size,
5952 unsigned char *jjump_pad_insn,
5953 ULONGEST *jjump_pad_insn_size,
5954 CORE_ADDR *adjusted_insn_addr,
5955 CORE_ADDR *adjusted_insn_addr_end,
5956 char *err)
5957 {
5958 return (*the_low_target.install_fast_tracepoint_jump_pad)
5959 (tpoint, tpaddr, collector, lockaddr, orig_size,
5960 jump_entry, trampoline, trampoline_size,
5961 jjump_pad_insn, jjump_pad_insn_size,
5962 adjusted_insn_addr, adjusted_insn_addr_end,
5963 err);
5964 }
5965
5966 static struct emit_ops *
5967 linux_emit_ops (void)
5968 {
5969 if (the_low_target.emit_ops != NULL)
5970 return (*the_low_target.emit_ops) ();
5971 else
5972 return NULL;
5973 }
5974
5975 static int
5976 linux_get_min_fast_tracepoint_insn_len (void)
5977 {
5978 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5979 }
5980
5981 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5982
5983 static int
5984 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5985 CORE_ADDR *phdr_memaddr, int *num_phdr)
5986 {
5987 char filename[PATH_MAX];
5988 int fd;
5989 const int auxv_size = is_elf64
5990 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5991 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5992
5993 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5994
5995 fd = open (filename, O_RDONLY);
5996 if (fd < 0)
5997 return 1;
5998
5999 *phdr_memaddr = 0;
6000 *num_phdr = 0;
6001 while (read (fd, buf, auxv_size) == auxv_size
6002 && (*phdr_memaddr == 0 || *num_phdr == 0))
6003 {
6004 if (is_elf64)
6005 {
6006 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6007
6008 switch (aux->a_type)
6009 {
6010 case AT_PHDR:
6011 *phdr_memaddr = aux->a_un.a_val;
6012 break;
6013 case AT_PHNUM:
6014 *num_phdr = aux->a_un.a_val;
6015 break;
6016 }
6017 }
6018 else
6019 {
6020 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6021
6022 switch (aux->a_type)
6023 {
6024 case AT_PHDR:
6025 *phdr_memaddr = aux->a_un.a_val;
6026 break;
6027 case AT_PHNUM:
6028 *num_phdr = aux->a_un.a_val;
6029 break;
6030 }
6031 }
6032 }
6033
6034 close (fd);
6035
6036 if (*phdr_memaddr == 0 || *num_phdr == 0)
6037 {
6038 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6039 "phdr_memaddr = %ld, phdr_num = %d",
6040 (long) *phdr_memaddr, *num_phdr);
6041 return 2;
6042 }
6043
6044 return 0;
6045 }
6046
6047 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6048
6049 static CORE_ADDR
6050 get_dynamic (const int pid, const int is_elf64)
6051 {
6052 CORE_ADDR phdr_memaddr, relocation;
6053 int num_phdr, i;
6054 unsigned char *phdr_buf;
6055 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6056
6057 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6058 return 0;
6059
6060 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6061 phdr_buf = alloca (num_phdr * phdr_size);
6062
6063 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6064 return 0;
6065
6066 /* Compute relocation: it is expected to be 0 for "regular" executables,
6067 non-zero for PIE ones. */
6068 relocation = -1;
6069 for (i = 0; relocation == -1 && i < num_phdr; i++)
6070 if (is_elf64)
6071 {
6072 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6073
6074 if (p->p_type == PT_PHDR)
6075 relocation = phdr_memaddr - p->p_vaddr;
6076 }
6077 else
6078 {
6079 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6080
6081 if (p->p_type == PT_PHDR)
6082 relocation = phdr_memaddr - p->p_vaddr;
6083 }
6084
6085 if (relocation == -1)
6086 {
6087 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6088 any real world executables, including PIE executables, have always
6089 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6090 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6091 or present DT_DEBUG anyway (fpc binaries are statically linked).
6092
6093 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6094
6095 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6096
6097 return 0;
6098 }
6099
6100 for (i = 0; i < num_phdr; i++)
6101 {
6102 if (is_elf64)
6103 {
6104 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6105
6106 if (p->p_type == PT_DYNAMIC)
6107 return p->p_vaddr + relocation;
6108 }
6109 else
6110 {
6111 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6112
6113 if (p->p_type == PT_DYNAMIC)
6114 return p->p_vaddr + relocation;
6115 }
6116 }
6117
6118 return 0;
6119 }
6120
6121 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6122 can be 0 if the inferior does not yet have the library list initialized.
6123 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6124 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6125
6126 static CORE_ADDR
6127 get_r_debug (const int pid, const int is_elf64)
6128 {
6129 CORE_ADDR dynamic_memaddr;
6130 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6131 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6132 CORE_ADDR map = -1;
6133
6134 dynamic_memaddr = get_dynamic (pid, is_elf64);
6135 if (dynamic_memaddr == 0)
6136 return map;
6137
6138 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6139 {
6140 if (is_elf64)
6141 {
6142 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6143 #ifdef DT_MIPS_RLD_MAP
6144 union
6145 {
6146 Elf64_Xword map;
6147 unsigned char buf[sizeof (Elf64_Xword)];
6148 }
6149 rld_map;
6150
6151 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6152 {
6153 if (linux_read_memory (dyn->d_un.d_val,
6154 rld_map.buf, sizeof (rld_map.buf)) == 0)
6155 return rld_map.map;
6156 else
6157 break;
6158 }
6159 #endif /* DT_MIPS_RLD_MAP */
6160
6161 if (dyn->d_tag == DT_DEBUG && map == -1)
6162 map = dyn->d_un.d_val;
6163
6164 if (dyn->d_tag == DT_NULL)
6165 break;
6166 }
6167 else
6168 {
6169 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6170 #ifdef DT_MIPS_RLD_MAP
6171 union
6172 {
6173 Elf32_Word map;
6174 unsigned char buf[sizeof (Elf32_Word)];
6175 }
6176 rld_map;
6177
6178 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6179 {
6180 if (linux_read_memory (dyn->d_un.d_val,
6181 rld_map.buf, sizeof (rld_map.buf)) == 0)
6182 return rld_map.map;
6183 else
6184 break;
6185 }
6186 #endif /* DT_MIPS_RLD_MAP */
6187
6188 if (dyn->d_tag == DT_DEBUG && map == -1)
6189 map = dyn->d_un.d_val;
6190
6191 if (dyn->d_tag == DT_NULL)
6192 break;
6193 }
6194
6195 dynamic_memaddr += dyn_size;
6196 }
6197
6198 return map;
6199 }
6200
6201 /* Read one pointer from MEMADDR in the inferior. */
6202
6203 static int
6204 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6205 {
6206 int ret;
6207
6208 /* Go through a union so this works on either big or little endian
6209 hosts, when the inferior's pointer size is smaller than the size
6210 of CORE_ADDR. It is assumed the inferior's endianness is the
6211 same of the superior's. */
6212 union
6213 {
6214 CORE_ADDR core_addr;
6215 unsigned int ui;
6216 unsigned char uc;
6217 } addr;
6218
6219 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6220 if (ret == 0)
6221 {
6222 if (ptr_size == sizeof (CORE_ADDR))
6223 *ptr = addr.core_addr;
6224 else if (ptr_size == sizeof (unsigned int))
6225 *ptr = addr.ui;
6226 else
6227 gdb_assert_not_reached ("unhandled pointer size");
6228 }
6229 return ret;
6230 }
6231
6232 struct link_map_offsets
6233 {
6234 /* Offset and size of r_debug.r_version. */
6235 int r_version_offset;
6236
6237 /* Offset and size of r_debug.r_map. */
6238 int r_map_offset;
6239
6240 /* Offset to l_addr field in struct link_map. */
6241 int l_addr_offset;
6242
6243 /* Offset to l_name field in struct link_map. */
6244 int l_name_offset;
6245
6246 /* Offset to l_ld field in struct link_map. */
6247 int l_ld_offset;
6248
6249 /* Offset to l_next field in struct link_map. */
6250 int l_next_offset;
6251
6252 /* Offset to l_prev field in struct link_map. */
6253 int l_prev_offset;
6254 };
6255
6256 /* Construct qXfer:libraries-svr4:read reply. */
6257
6258 static int
6259 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6260 unsigned const char *writebuf,
6261 CORE_ADDR offset, int len)
6262 {
6263 char *document;
6264 unsigned document_len;
6265 struct process_info_private *const priv = current_process ()->priv;
6266 char filename[PATH_MAX];
6267 int pid, is_elf64;
6268
6269 static const struct link_map_offsets lmo_32bit_offsets =
6270 {
6271 0, /* r_version offset. */
6272 4, /* r_debug.r_map offset. */
6273 0, /* l_addr offset in link_map. */
6274 4, /* l_name offset in link_map. */
6275 8, /* l_ld offset in link_map. */
6276 12, /* l_next offset in link_map. */
6277 16 /* l_prev offset in link_map. */
6278 };
6279
6280 static const struct link_map_offsets lmo_64bit_offsets =
6281 {
6282 0, /* r_version offset. */
6283 8, /* r_debug.r_map offset. */
6284 0, /* l_addr offset in link_map. */
6285 8, /* l_name offset in link_map. */
6286 16, /* l_ld offset in link_map. */
6287 24, /* l_next offset in link_map. */
6288 32 /* l_prev offset in link_map. */
6289 };
6290 const struct link_map_offsets *lmo;
6291 unsigned int machine;
6292 int ptr_size;
6293 CORE_ADDR lm_addr = 0, lm_prev = 0;
6294 int allocated = 1024;
6295 char *p;
6296 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6297 int header_done = 0;
6298
6299 if (writebuf != NULL)
6300 return -2;
6301 if (readbuf == NULL)
6302 return -1;
6303
6304 pid = lwpid_of (current_thread);
6305 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6306 is_elf64 = elf_64_file_p (filename, &machine);
6307 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6308 ptr_size = is_elf64 ? 8 : 4;
6309
6310 while (annex[0] != '\0')
6311 {
6312 const char *sep;
6313 CORE_ADDR *addrp;
6314 int len;
6315
6316 sep = strchr (annex, '=');
6317 if (sep == NULL)
6318 break;
6319
6320 len = sep - annex;
6321 if (len == 5 && startswith (annex, "start"))
6322 addrp = &lm_addr;
6323 else if (len == 4 && startswith (annex, "prev"))
6324 addrp = &lm_prev;
6325 else
6326 {
6327 annex = strchr (sep, ';');
6328 if (annex == NULL)
6329 break;
6330 annex++;
6331 continue;
6332 }
6333
6334 annex = decode_address_to_semicolon (addrp, sep + 1);
6335 }
6336
6337 if (lm_addr == 0)
6338 {
6339 int r_version = 0;
6340
6341 if (priv->r_debug == 0)
6342 priv->r_debug = get_r_debug (pid, is_elf64);
6343
6344 /* We failed to find DT_DEBUG. Such situation will not change
6345 for this inferior - do not retry it. Report it to GDB as
6346 E01, see for the reasons at the GDB solib-svr4.c side. */
6347 if (priv->r_debug == (CORE_ADDR) -1)
6348 return -1;
6349
6350 if (priv->r_debug != 0)
6351 {
6352 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6353 (unsigned char *) &r_version,
6354 sizeof (r_version)) != 0
6355 || r_version != 1)
6356 {
6357 warning ("unexpected r_debug version %d", r_version);
6358 }
6359 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6360 &lm_addr, ptr_size) != 0)
6361 {
6362 warning ("unable to read r_map from 0x%lx",
6363 (long) priv->r_debug + lmo->r_map_offset);
6364 }
6365 }
6366 }
6367
6368 document = xmalloc (allocated);
6369 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6370 p = document + strlen (document);
6371
6372 while (lm_addr
6373 && read_one_ptr (lm_addr + lmo->l_name_offset,
6374 &l_name, ptr_size) == 0
6375 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6376 &l_addr, ptr_size) == 0
6377 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6378 &l_ld, ptr_size) == 0
6379 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6380 &l_prev, ptr_size) == 0
6381 && read_one_ptr (lm_addr + lmo->l_next_offset,
6382 &l_next, ptr_size) == 0)
6383 {
6384 unsigned char libname[PATH_MAX];
6385
6386 if (lm_prev != l_prev)
6387 {
6388 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6389 (long) lm_prev, (long) l_prev);
6390 break;
6391 }
6392
6393 /* Ignore the first entry even if it has valid name as the first entry
6394 corresponds to the main executable. The first entry should not be
6395 skipped if the dynamic loader was loaded late by a static executable
6396 (see solib-svr4.c parameter ignore_first). But in such case the main
6397 executable does not have PT_DYNAMIC present and this function already
6398 exited above due to failed get_r_debug. */
6399 if (lm_prev == 0)
6400 {
6401 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6402 p = p + strlen (p);
6403 }
6404 else
6405 {
6406 /* Not checking for error because reading may stop before
6407 we've got PATH_MAX worth of characters. */
6408 libname[0] = '\0';
6409 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6410 libname[sizeof (libname) - 1] = '\0';
6411 if (libname[0] != '\0')
6412 {
6413 /* 6x the size for xml_escape_text below. */
6414 size_t len = 6 * strlen ((char *) libname);
6415 char *name;
6416
6417 if (!header_done)
6418 {
6419 /* Terminate `<library-list-svr4'. */
6420 *p++ = '>';
6421 header_done = 1;
6422 }
6423
6424 while (allocated < p - document + len + 200)
6425 {
6426 /* Expand to guarantee sufficient storage. */
6427 uintptr_t document_len = p - document;
6428
6429 document = xrealloc (document, 2 * allocated);
6430 allocated *= 2;
6431 p = document + document_len;
6432 }
6433
6434 name = xml_escape_text ((char *) libname);
6435 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6436 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6437 name, (unsigned long) lm_addr,
6438 (unsigned long) l_addr, (unsigned long) l_ld);
6439 free (name);
6440 }
6441 }
6442
6443 lm_prev = lm_addr;
6444 lm_addr = l_next;
6445 }
6446
6447 if (!header_done)
6448 {
6449 /* Empty list; terminate `<library-list-svr4'. */
6450 strcpy (p, "/>");
6451 }
6452 else
6453 strcpy (p, "</library-list-svr4>");
6454
6455 document_len = strlen (document);
6456 if (offset < document_len)
6457 document_len -= offset;
6458 else
6459 document_len = 0;
6460 if (len > document_len)
6461 len = document_len;
6462
6463 memcpy (readbuf, document + offset, len);
6464 xfree (document);
6465
6466 return len;
6467 }
6468
6469 #ifdef HAVE_LINUX_BTRACE
6470
6471 /* See to_enable_btrace target method. */
6472
6473 static struct btrace_target_info *
6474 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6475 {
6476 struct btrace_target_info *tinfo;
6477
6478 tinfo = linux_enable_btrace (ptid, conf);
6479
6480 if (tinfo != NULL && tinfo->ptr_bits == 0)
6481 {
6482 struct thread_info *thread = find_thread_ptid (ptid);
6483 struct regcache *regcache = get_thread_regcache (thread, 0);
6484
6485 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6486 }
6487
6488 return tinfo;
6489 }
6490
6491 /* See to_disable_btrace target method. */
6492
6493 static int
6494 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6495 {
6496 enum btrace_error err;
6497
6498 err = linux_disable_btrace (tinfo);
6499 return (err == BTRACE_ERR_NONE ? 0 : -1);
6500 }
6501
6502 /* Encode an Intel(R) Processor Trace configuration. */
6503
6504 static void
6505 linux_low_encode_pt_config (struct buffer *buffer,
6506 const struct btrace_data_pt_config *config)
6507 {
6508 buffer_grow_str (buffer, "<pt-config>\n");
6509
6510 switch (config->cpu.vendor)
6511 {
6512 case CV_INTEL:
6513 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6514 "model=\"%u\" stepping=\"%u\"/>\n",
6515 config->cpu.family, config->cpu.model,
6516 config->cpu.stepping);
6517 break;
6518
6519 default:
6520 break;
6521 }
6522
6523 buffer_grow_str (buffer, "</pt-config>\n");
6524 }
6525
6526 /* Encode a raw buffer. */
6527
6528 static void
6529 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6530 unsigned int size)
6531 {
6532 if (size == 0)
6533 return;
6534
6535 /* We use hex encoding - see common/rsp-low.h. */
6536 buffer_grow_str (buffer, "<raw>\n");
6537
6538 while (size-- > 0)
6539 {
6540 char elem[2];
6541
6542 elem[0] = tohex ((*data >> 4) & 0xf);
6543 elem[1] = tohex (*data++ & 0xf);
6544
6545 buffer_grow (buffer, elem, 2);
6546 }
6547
6548 buffer_grow_str (buffer, "</raw>\n");
6549 }
6550
6551 /* See to_read_btrace target method. */
6552
6553 static int
6554 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6555 int type)
6556 {
6557 struct btrace_data btrace;
6558 struct btrace_block *block;
6559 enum btrace_error err;
6560 int i;
6561
6562 btrace_data_init (&btrace);
6563
6564 err = linux_read_btrace (&btrace, tinfo, type);
6565 if (err != BTRACE_ERR_NONE)
6566 {
6567 if (err == BTRACE_ERR_OVERFLOW)
6568 buffer_grow_str0 (buffer, "E.Overflow.");
6569 else
6570 buffer_grow_str0 (buffer, "E.Generic Error.");
6571
6572 goto err;
6573 }
6574
6575 switch (btrace.format)
6576 {
6577 case BTRACE_FORMAT_NONE:
6578 buffer_grow_str0 (buffer, "E.No Trace.");
6579 goto err;
6580
6581 case BTRACE_FORMAT_BTS:
6582 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6583 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6584
6585 for (i = 0;
6586 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6587 i++)
6588 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6589 paddress (block->begin), paddress (block->end));
6590
6591 buffer_grow_str0 (buffer, "</btrace>\n");
6592 break;
6593
6594 case BTRACE_FORMAT_PT:
6595 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6596 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6597 buffer_grow_str (buffer, "<pt>\n");
6598
6599 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6600
6601 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6602 btrace.variant.pt.size);
6603
6604 buffer_grow_str (buffer, "</pt>\n");
6605 buffer_grow_str0 (buffer, "</btrace>\n");
6606 break;
6607
6608 default:
6609 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6610 goto err;
6611 }
6612
6613 btrace_data_fini (&btrace);
6614 return 0;
6615
6616 err:
6617 btrace_data_fini (&btrace);
6618 return -1;
6619 }
6620
6621 /* See to_btrace_conf target method. */
6622
6623 static int
6624 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6625 struct buffer *buffer)
6626 {
6627 const struct btrace_config *conf;
6628
6629 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6630 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6631
6632 conf = linux_btrace_conf (tinfo);
6633 if (conf != NULL)
6634 {
6635 switch (conf->format)
6636 {
6637 case BTRACE_FORMAT_NONE:
6638 break;
6639
6640 case BTRACE_FORMAT_BTS:
6641 buffer_xml_printf (buffer, "<bts");
6642 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6643 buffer_xml_printf (buffer, " />\n");
6644 break;
6645
6646 case BTRACE_FORMAT_PT:
6647 buffer_xml_printf (buffer, "<pt");
6648 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6649 buffer_xml_printf (buffer, "/>\n");
6650 break;
6651 }
6652 }
6653
6654 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6655 return 0;
6656 }
6657 #endif /* HAVE_LINUX_BTRACE */
6658
6659 /* See nat/linux-nat.h. */
6660
6661 ptid_t
6662 current_lwp_ptid (void)
6663 {
6664 return ptid_of (current_thread);
6665 }
6666
6667 static struct target_ops linux_target_ops = {
6668 linux_create_inferior,
6669 linux_arch_setup,
6670 linux_attach,
6671 linux_kill,
6672 linux_detach,
6673 linux_mourn,
6674 linux_join,
6675 linux_thread_alive,
6676 linux_resume,
6677 linux_wait,
6678 linux_fetch_registers,
6679 linux_store_registers,
6680 linux_prepare_to_access_memory,
6681 linux_done_accessing_memory,
6682 linux_read_memory,
6683 linux_write_memory,
6684 linux_look_up_symbols,
6685 linux_request_interrupt,
6686 linux_read_auxv,
6687 linux_supports_z_point_type,
6688 linux_insert_point,
6689 linux_remove_point,
6690 linux_stopped_by_sw_breakpoint,
6691 linux_supports_stopped_by_sw_breakpoint,
6692 linux_stopped_by_hw_breakpoint,
6693 linux_supports_stopped_by_hw_breakpoint,
6694 linux_supports_conditional_breakpoints,
6695 linux_stopped_by_watchpoint,
6696 linux_stopped_data_address,
6697 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6698 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6699 && defined(PT_TEXT_END_ADDR)
6700 linux_read_offsets,
6701 #else
6702 NULL,
6703 #endif
6704 #ifdef USE_THREAD_DB
6705 thread_db_get_tls_address,
6706 #else
6707 NULL,
6708 #endif
6709 linux_qxfer_spu,
6710 hostio_last_error_from_errno,
6711 linux_qxfer_osdata,
6712 linux_xfer_siginfo,
6713 linux_supports_non_stop,
6714 linux_async,
6715 linux_start_non_stop,
6716 linux_supports_multi_process,
6717 linux_supports_fork_events,
6718 linux_supports_vfork_events,
6719 linux_handle_new_gdb_connection,
6720 #ifdef USE_THREAD_DB
6721 thread_db_handle_monitor_command,
6722 #else
6723 NULL,
6724 #endif
6725 linux_common_core_of_thread,
6726 linux_read_loadmap,
6727 linux_process_qsupported,
6728 linux_supports_tracepoints,
6729 linux_read_pc,
6730 linux_write_pc,
6731 linux_thread_stopped,
6732 NULL,
6733 linux_pause_all,
6734 linux_unpause_all,
6735 linux_stabilize_threads,
6736 linux_install_fast_tracepoint_jump_pad,
6737 linux_emit_ops,
6738 linux_supports_disable_randomization,
6739 linux_get_min_fast_tracepoint_insn_len,
6740 linux_qxfer_libraries_svr4,
6741 linux_supports_agent,
6742 #ifdef HAVE_LINUX_BTRACE
6743 linux_supports_btrace,
6744 linux_low_enable_btrace,
6745 linux_low_disable_btrace,
6746 linux_low_read_btrace,
6747 linux_low_btrace_conf,
6748 #else
6749 NULL,
6750 NULL,
6751 NULL,
6752 NULL,
6753 NULL,
6754 #endif
6755 linux_supports_range_stepping,
6756 linux_proc_pid_to_exec_file,
6757 linux_mntns_open_cloexec,
6758 linux_mntns_unlink,
6759 linux_mntns_readlink,
6760 };
6761
6762 static void
6763 linux_init_signals ()
6764 {
6765 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6766 to find what the cancel signal actually is. */
6767 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6768 signal (__SIGRTMIN+1, SIG_IGN);
6769 #endif
6770 }
6771
6772 #ifdef HAVE_LINUX_REGSETS
6773 void
6774 initialize_regsets_info (struct regsets_info *info)
6775 {
6776 for (info->num_regsets = 0;
6777 info->regsets[info->num_regsets].size >= 0;
6778 info->num_regsets++)
6779 ;
6780 }
6781 #endif
6782
6783 void
6784 initialize_low (void)
6785 {
6786 struct sigaction sigchld_action;
6787 memset (&sigchld_action, 0, sizeof (sigchld_action));
6788 set_target_ops (&linux_target_ops);
6789 set_breakpoint_data (the_low_target.breakpoint,
6790 the_low_target.breakpoint_len);
6791 linux_init_signals ();
6792 linux_ptrace_init_warnings ();
6793
6794 sigchld_action.sa_handler = sigchld_handler;
6795 sigemptyset (&sigchld_action.sa_mask);
6796 sigchld_action.sa_flags = SA_RESTART;
6797 sigaction (SIGCHLD, &sigchld_action, NULL);
6798
6799 initialize_low_arch ();
6800
6801 linux_check_ptrace_features ();
6802 }