Linux gdbserver confused when event randomization picks process exit event
[binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* Does the current host support PTRACE_GETREGSET? */
143 int have_ptrace_getregset = -1;
144
145 /* LWP accessors. */
146
147 /* See nat/linux-nat.h. */
148
149 ptid_t
150 ptid_of_lwp (struct lwp_info *lwp)
151 {
152 return ptid_of (get_lwp_thread (lwp));
153 }
154
155 /* See nat/linux-nat.h. */
156
157 void
158 lwp_set_arch_private_info (struct lwp_info *lwp,
159 struct arch_lwp_info *info)
160 {
161 lwp->arch_private = info;
162 }
163
164 /* See nat/linux-nat.h. */
165
166 struct arch_lwp_info *
167 lwp_arch_private_info (struct lwp_info *lwp)
168 {
169 return lwp->arch_private;
170 }
171
172 /* See nat/linux-nat.h. */
173
174 int
175 lwp_is_stopped (struct lwp_info *lwp)
176 {
177 return lwp->stopped;
178 }
179
180 /* See nat/linux-nat.h. */
181
182 enum target_stop_reason
183 lwp_stop_reason (struct lwp_info *lwp)
184 {
185 return lwp->stop_reason;
186 }
187
188 /* A list of all unknown processes which receive stop signals. Some
189 other process will presumably claim each of these as forked
190 children momentarily. */
191
192 struct simple_pid_list
193 {
194 /* The process ID. */
195 int pid;
196
197 /* The status as reported by waitpid. */
198 int status;
199
200 /* Next in chain. */
201 struct simple_pid_list *next;
202 };
203 struct simple_pid_list *stopped_pids;
204
205 /* Trivial list manipulation functions to keep track of a list of new
206 stopped processes. */
207
208 static void
209 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
210 {
211 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
212
213 new_pid->pid = pid;
214 new_pid->status = status;
215 new_pid->next = *listp;
216 *listp = new_pid;
217 }
218
219 static int
220 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
221 {
222 struct simple_pid_list **p;
223
224 for (p = listp; *p != NULL; p = &(*p)->next)
225 if ((*p)->pid == pid)
226 {
227 struct simple_pid_list *next = (*p)->next;
228
229 *statusp = (*p)->status;
230 xfree (*p);
231 *p = next;
232 return 1;
233 }
234 return 0;
235 }
236
237 enum stopping_threads_kind
238 {
239 /* Not stopping threads presently. */
240 NOT_STOPPING_THREADS,
241
242 /* Stopping threads. */
243 STOPPING_THREADS,
244
245 /* Stopping and suspending threads. */
246 STOPPING_AND_SUSPENDING_THREADS
247 };
248
249 /* This is set while stop_all_lwps is in effect. */
250 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
251
252 /* FIXME make into a target method? */
253 int using_threads = 1;
254
255 /* True if we're presently stabilizing threads (moving them out of
256 jump pads). */
257 static int stabilizing_threads;
258
259 static void linux_resume_one_lwp (struct lwp_info *lwp,
260 int step, int signal, siginfo_t *info);
261 static void linux_resume (struct thread_resume *resume_info, size_t n);
262 static void stop_all_lwps (int suspend, struct lwp_info *except);
263 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
266 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
267 static struct lwp_info *add_lwp (ptid_t ptid);
268 static int linux_stopped_by_watchpoint (void);
269 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
270 static int lwp_is_marked_dead (struct lwp_info *lwp);
271 static void proceed_all_lwps (void);
272 static int finish_step_over (struct lwp_info *lwp);
273 static int kill_lwp (unsigned long lwpid, int signo);
274
275 /* When the event-loop is doing a step-over, this points at the thread
276 being stepped. */
277 ptid_t step_over_bkpt;
278
279 /* True if the low target can hardware single-step. Such targets
280 don't need a BREAKPOINT_REINSERT_ADDR callback. */
281
282 static int
283 can_hardware_single_step (void)
284 {
285 return (the_low_target.breakpoint_reinsert_addr == NULL);
286 }
287
288 /* True if the low target supports memory breakpoints. If so, we'll
289 have a GET_PC implementation. */
290
291 static int
292 supports_breakpoints (void)
293 {
294 return (the_low_target.get_pc != NULL);
295 }
296
297 /* Returns true if this target can support fast tracepoints. This
298 does not mean that the in-process agent has been loaded in the
299 inferior. */
300
301 static int
302 supports_fast_tracepoints (void)
303 {
304 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
305 }
306
307 /* True if LWP is stopped in its stepping range. */
308
309 static int
310 lwp_in_step_range (struct lwp_info *lwp)
311 {
312 CORE_ADDR pc = lwp->stop_pc;
313
314 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
315 }
316
317 struct pending_signals
318 {
319 int signal;
320 siginfo_t info;
321 struct pending_signals *prev;
322 };
323
324 /* The read/write ends of the pipe registered as waitable file in the
325 event loop. */
326 static int linux_event_pipe[2] = { -1, -1 };
327
328 /* True if we're currently in async mode. */
329 #define target_is_async_p() (linux_event_pipe[0] != -1)
330
331 static void send_sigstop (struct lwp_info *lwp);
332 static void wait_for_sigstop (void);
333
334 /* Return non-zero if HEADER is a 64-bit ELF file. */
335
336 static int
337 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
338 {
339 if (header->e_ident[EI_MAG0] == ELFMAG0
340 && header->e_ident[EI_MAG1] == ELFMAG1
341 && header->e_ident[EI_MAG2] == ELFMAG2
342 && header->e_ident[EI_MAG3] == ELFMAG3)
343 {
344 *machine = header->e_machine;
345 return header->e_ident[EI_CLASS] == ELFCLASS64;
346
347 }
348 *machine = EM_NONE;
349 return -1;
350 }
351
352 /* Return non-zero if FILE is a 64-bit ELF file,
353 zero if the file is not a 64-bit ELF file,
354 and -1 if the file is not accessible or doesn't exist. */
355
356 static int
357 elf_64_file_p (const char *file, unsigned int *machine)
358 {
359 Elf64_Ehdr header;
360 int fd;
361
362 fd = open (file, O_RDONLY);
363 if (fd < 0)
364 return -1;
365
366 if (read (fd, &header, sizeof (header)) != sizeof (header))
367 {
368 close (fd);
369 return 0;
370 }
371 close (fd);
372
373 return elf_64_header_p (&header, machine);
374 }
375
376 /* Accepts an integer PID; Returns true if the executable PID is
377 running is a 64-bit ELF file.. */
378
379 int
380 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
381 {
382 char file[PATH_MAX];
383
384 sprintf (file, "/proc/%d/exe", pid);
385 return elf_64_file_p (file, machine);
386 }
387
388 static void
389 delete_lwp (struct lwp_info *lwp)
390 {
391 struct thread_info *thr = get_lwp_thread (lwp);
392
393 if (debug_threads)
394 debug_printf ("deleting %ld\n", lwpid_of (thr));
395
396 remove_thread (thr);
397 free (lwp->arch_private);
398 free (lwp);
399 }
400
401 /* Add a process to the common process list, and set its private
402 data. */
403
404 static struct process_info *
405 linux_add_process (int pid, int attached)
406 {
407 struct process_info *proc;
408
409 proc = add_process (pid, attached);
410 proc->priv = xcalloc (1, sizeof (*proc->priv));
411
412 if (the_low_target.new_process != NULL)
413 proc->priv->arch_private = the_low_target.new_process ();
414
415 return proc;
416 }
417
418 static CORE_ADDR get_pc (struct lwp_info *lwp);
419
420 /* Handle a GNU/Linux extended wait response. If we see a clone
421 event, we need to add the new LWP to our list (and return 0 so as
422 not to report the trap to higher layers). */
423
424 static int
425 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
426 {
427 int event = linux_ptrace_get_extended_event (wstat);
428 struct thread_info *event_thr = get_lwp_thread (event_lwp);
429 struct lwp_info *new_lwp;
430
431 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
432 || (event == PTRACE_EVENT_CLONE))
433 {
434 ptid_t ptid;
435 unsigned long new_pid;
436 int ret, status;
437
438 /* Get the pid of the new lwp. */
439 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
440 &new_pid);
441
442 /* If we haven't already seen the new PID stop, wait for it now. */
443 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
444 {
445 /* The new child has a pending SIGSTOP. We can't affect it until it
446 hits the SIGSTOP, but we're already attached. */
447
448 ret = my_waitpid (new_pid, &status, __WALL);
449
450 if (ret == -1)
451 perror_with_name ("waiting for new child");
452 else if (ret != new_pid)
453 warning ("wait returned unexpected PID %d", ret);
454 else if (!WIFSTOPPED (status))
455 warning ("wait returned unexpected status 0x%x", status);
456 }
457
458 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
459 {
460 struct process_info *parent_proc;
461 struct process_info *child_proc;
462 struct lwp_info *child_lwp;
463 struct thread_info *child_thr;
464 struct target_desc *tdesc;
465
466 ptid = ptid_build (new_pid, new_pid, 0);
467
468 if (debug_threads)
469 {
470 debug_printf ("HEW: Got fork event from LWP %ld, "
471 "new child is %d\n",
472 ptid_get_lwp (ptid_of (event_thr)),
473 ptid_get_pid (ptid));
474 }
475
476 /* Add the new process to the tables and clone the breakpoint
477 lists of the parent. We need to do this even if the new process
478 will be detached, since we will need the process object and the
479 breakpoints to remove any breakpoints from memory when we
480 detach, and the client side will access registers. */
481 child_proc = linux_add_process (new_pid, 0);
482 gdb_assert (child_proc != NULL);
483 child_lwp = add_lwp (ptid);
484 gdb_assert (child_lwp != NULL);
485 child_lwp->stopped = 1;
486 child_lwp->must_set_ptrace_flags = 1;
487 child_lwp->status_pending_p = 0;
488 child_thr = get_lwp_thread (child_lwp);
489 child_thr->last_resume_kind = resume_stop;
490 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
491
492 parent_proc = get_thread_process (event_thr);
493 child_proc->attached = parent_proc->attached;
494 clone_all_breakpoints (&child_proc->breakpoints,
495 &child_proc->raw_breakpoints,
496 parent_proc->breakpoints);
497
498 tdesc = xmalloc (sizeof (struct target_desc));
499 copy_target_description (tdesc, parent_proc->tdesc);
500 child_proc->tdesc = tdesc;
501
502 /* Clone arch-specific process data. */
503 if (the_low_target.new_fork != NULL)
504 the_low_target.new_fork (parent_proc, child_proc);
505
506 /* Save fork info in the parent thread. */
507 if (event == PTRACE_EVENT_FORK)
508 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
509 else if (event == PTRACE_EVENT_VFORK)
510 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
511
512 event_lwp->waitstatus.value.related_pid = ptid;
513
514 /* The status_pending field contains bits denoting the
515 extended event, so when the pending event is handled,
516 the handler will look at lwp->waitstatus. */
517 event_lwp->status_pending_p = 1;
518 event_lwp->status_pending = wstat;
519
520 /* Report the event. */
521 return 0;
522 }
523
524 if (debug_threads)
525 debug_printf ("HEW: Got clone event "
526 "from LWP %ld, new child is LWP %ld\n",
527 lwpid_of (event_thr), new_pid);
528
529 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
530 new_lwp = add_lwp (ptid);
531
532 /* Either we're going to immediately resume the new thread
533 or leave it stopped. linux_resume_one_lwp is a nop if it
534 thinks the thread is currently running, so set this first
535 before calling linux_resume_one_lwp. */
536 new_lwp->stopped = 1;
537
538 /* If we're suspending all threads, leave this one suspended
539 too. */
540 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
541 new_lwp->suspended = 1;
542
543 /* Normally we will get the pending SIGSTOP. But in some cases
544 we might get another signal delivered to the group first.
545 If we do get another signal, be sure not to lose it. */
546 if (WSTOPSIG (status) != SIGSTOP)
547 {
548 new_lwp->stop_expected = 1;
549 new_lwp->status_pending_p = 1;
550 new_lwp->status_pending = status;
551 }
552
553 /* Don't report the event. */
554 return 1;
555 }
556 else if (event == PTRACE_EVENT_VFORK_DONE)
557 {
558 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
559
560 /* Report the event. */
561 return 0;
562 }
563
564 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
565 }
566
567 /* Return the PC as read from the regcache of LWP, without any
568 adjustment. */
569
570 static CORE_ADDR
571 get_pc (struct lwp_info *lwp)
572 {
573 struct thread_info *saved_thread;
574 struct regcache *regcache;
575 CORE_ADDR pc;
576
577 if (the_low_target.get_pc == NULL)
578 return 0;
579
580 saved_thread = current_thread;
581 current_thread = get_lwp_thread (lwp);
582
583 regcache = get_thread_regcache (current_thread, 1);
584 pc = (*the_low_target.get_pc) (regcache);
585
586 if (debug_threads)
587 debug_printf ("pc is 0x%lx\n", (long) pc);
588
589 current_thread = saved_thread;
590 return pc;
591 }
592
593 /* This function should only be called if LWP got a SIGTRAP.
594 The SIGTRAP could mean several things.
595
596 On i386, where decr_pc_after_break is non-zero:
597
598 If we were single-stepping this process using PTRACE_SINGLESTEP, we
599 will get only the one SIGTRAP. The value of $eip will be the next
600 instruction. If the instruction we stepped over was a breakpoint,
601 we need to decrement the PC.
602
603 If we continue the process using PTRACE_CONT, we will get a
604 SIGTRAP when we hit a breakpoint. The value of $eip will be
605 the instruction after the breakpoint (i.e. needs to be
606 decremented). If we report the SIGTRAP to GDB, we must also
607 report the undecremented PC. If the breakpoint is removed, we
608 must resume at the decremented PC.
609
610 On a non-decr_pc_after_break machine with hardware or kernel
611 single-step:
612
613 If we either single-step a breakpoint instruction, or continue and
614 hit a breakpoint instruction, our PC will point at the breakpoint
615 instruction. */
616
617 static int
618 check_stopped_by_breakpoint (struct lwp_info *lwp)
619 {
620 CORE_ADDR pc;
621 CORE_ADDR sw_breakpoint_pc;
622 struct thread_info *saved_thread;
623 #if USE_SIGTRAP_SIGINFO
624 siginfo_t siginfo;
625 #endif
626
627 if (the_low_target.get_pc == NULL)
628 return 0;
629
630 pc = get_pc (lwp);
631 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
632
633 /* breakpoint_at reads from the current thread. */
634 saved_thread = current_thread;
635 current_thread = get_lwp_thread (lwp);
636
637 #if USE_SIGTRAP_SIGINFO
638 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
639 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
640 {
641 if (siginfo.si_signo == SIGTRAP)
642 {
643 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
644 {
645 if (debug_threads)
646 {
647 struct thread_info *thr = get_lwp_thread (lwp);
648
649 debug_printf ("CSBB: %s stopped by software breakpoint\n",
650 target_pid_to_str (ptid_of (thr)));
651 }
652
653 /* Back up the PC if necessary. */
654 if (pc != sw_breakpoint_pc)
655 {
656 struct regcache *regcache
657 = get_thread_regcache (current_thread, 1);
658 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
659 }
660
661 lwp->stop_pc = sw_breakpoint_pc;
662 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
663 current_thread = saved_thread;
664 return 1;
665 }
666 else if (siginfo.si_code == TRAP_HWBKPT)
667 {
668 if (debug_threads)
669 {
670 struct thread_info *thr = get_lwp_thread (lwp);
671
672 debug_printf ("CSBB: %s stopped by hardware "
673 "breakpoint/watchpoint\n",
674 target_pid_to_str (ptid_of (thr)));
675 }
676
677 lwp->stop_pc = pc;
678 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
679 current_thread = saved_thread;
680 return 1;
681 }
682 else if (siginfo.si_code == TRAP_TRACE)
683 {
684 if (debug_threads)
685 {
686 struct thread_info *thr = get_lwp_thread (lwp);
687
688 debug_printf ("CSBB: %s stopped by trace\n",
689 target_pid_to_str (ptid_of (thr)));
690 }
691 }
692 }
693 }
694 #else
695 /* We may have just stepped a breakpoint instruction. E.g., in
696 non-stop mode, GDB first tells the thread A to step a range, and
697 then the user inserts a breakpoint inside the range. In that
698 case we need to report the breakpoint PC. */
699 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
700 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
701 {
702 if (debug_threads)
703 {
704 struct thread_info *thr = get_lwp_thread (lwp);
705
706 debug_printf ("CSBB: %s stopped by software breakpoint\n",
707 target_pid_to_str (ptid_of (thr)));
708 }
709
710 /* Back up the PC if necessary. */
711 if (pc != sw_breakpoint_pc)
712 {
713 struct regcache *regcache
714 = get_thread_regcache (current_thread, 1);
715 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
716 }
717
718 lwp->stop_pc = sw_breakpoint_pc;
719 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
720 current_thread = saved_thread;
721 return 1;
722 }
723
724 if (hardware_breakpoint_inserted_here (pc))
725 {
726 if (debug_threads)
727 {
728 struct thread_info *thr = get_lwp_thread (lwp);
729
730 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
731 target_pid_to_str (ptid_of (thr)));
732 }
733
734 lwp->stop_pc = pc;
735 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
736 current_thread = saved_thread;
737 return 1;
738 }
739 #endif
740
741 current_thread = saved_thread;
742 return 0;
743 }
744
745 static struct lwp_info *
746 add_lwp (ptid_t ptid)
747 {
748 struct lwp_info *lwp;
749
750 lwp = (struct lwp_info *) xcalloc (1, sizeof (*lwp));
751
752 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
753
754 if (the_low_target.new_thread != NULL)
755 the_low_target.new_thread (lwp);
756
757 lwp->thread = add_thread (ptid, lwp);
758
759 return lwp;
760 }
761
762 /* Start an inferior process and returns its pid.
763 ALLARGS is a vector of program-name and args. */
764
765 static int
766 linux_create_inferior (char *program, char **allargs)
767 {
768 struct lwp_info *new_lwp;
769 int pid;
770 ptid_t ptid;
771 struct cleanup *restore_personality
772 = maybe_disable_address_space_randomization (disable_randomization);
773
774 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
775 pid = vfork ();
776 #else
777 pid = fork ();
778 #endif
779 if (pid < 0)
780 perror_with_name ("fork");
781
782 if (pid == 0)
783 {
784 close_most_fds ();
785 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
786
787 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
788 signal (__SIGRTMIN + 1, SIG_DFL);
789 #endif
790
791 setpgid (0, 0);
792
793 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
794 stdout to stderr so that inferior i/o doesn't corrupt the connection.
795 Also, redirect stdin to /dev/null. */
796 if (remote_connection_is_stdio ())
797 {
798 close (0);
799 open ("/dev/null", O_RDONLY);
800 dup2 (2, 1);
801 if (write (2, "stdin/stdout redirected\n",
802 sizeof ("stdin/stdout redirected\n") - 1) < 0)
803 {
804 /* Errors ignored. */;
805 }
806 }
807
808 execv (program, allargs);
809 if (errno == ENOENT)
810 execvp (program, allargs);
811
812 fprintf (stderr, "Cannot exec %s: %s.\n", program,
813 strerror (errno));
814 fflush (stderr);
815 _exit (0177);
816 }
817
818 do_cleanups (restore_personality);
819
820 linux_add_process (pid, 0);
821
822 ptid = ptid_build (pid, pid, 0);
823 new_lwp = add_lwp (ptid);
824 new_lwp->must_set_ptrace_flags = 1;
825
826 return pid;
827 }
828
829 /* Implement the arch_setup target_ops method. */
830
831 static void
832 linux_arch_setup (void)
833 {
834 the_low_target.arch_setup ();
835 }
836
837 /* Attach to an inferior process. Returns 0 on success, ERRNO on
838 error. */
839
840 int
841 linux_attach_lwp (ptid_t ptid)
842 {
843 struct lwp_info *new_lwp;
844 int lwpid = ptid_get_lwp (ptid);
845
846 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
847 != 0)
848 return errno;
849
850 new_lwp = add_lwp (ptid);
851
852 /* We need to wait for SIGSTOP before being able to make the next
853 ptrace call on this LWP. */
854 new_lwp->must_set_ptrace_flags = 1;
855
856 if (linux_proc_pid_is_stopped (lwpid))
857 {
858 if (debug_threads)
859 debug_printf ("Attached to a stopped process\n");
860
861 /* The process is definitely stopped. It is in a job control
862 stop, unless the kernel predates the TASK_STOPPED /
863 TASK_TRACED distinction, in which case it might be in a
864 ptrace stop. Make sure it is in a ptrace stop; from there we
865 can kill it, signal it, et cetera.
866
867 First make sure there is a pending SIGSTOP. Since we are
868 already attached, the process can not transition from stopped
869 to running without a PTRACE_CONT; so we know this signal will
870 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
871 probably already in the queue (unless this kernel is old
872 enough to use TASK_STOPPED for ptrace stops); but since
873 SIGSTOP is not an RT signal, it can only be queued once. */
874 kill_lwp (lwpid, SIGSTOP);
875
876 /* Finally, resume the stopped process. This will deliver the
877 SIGSTOP (or a higher priority signal, just like normal
878 PTRACE_ATTACH), which we'll catch later on. */
879 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
880 }
881
882 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
883 brings it to a halt.
884
885 There are several cases to consider here:
886
887 1) gdbserver has already attached to the process and is being notified
888 of a new thread that is being created.
889 In this case we should ignore that SIGSTOP and resume the
890 process. This is handled below by setting stop_expected = 1,
891 and the fact that add_thread sets last_resume_kind ==
892 resume_continue.
893
894 2) This is the first thread (the process thread), and we're attaching
895 to it via attach_inferior.
896 In this case we want the process thread to stop.
897 This is handled by having linux_attach set last_resume_kind ==
898 resume_stop after we return.
899
900 If the pid we are attaching to is also the tgid, we attach to and
901 stop all the existing threads. Otherwise, we attach to pid and
902 ignore any other threads in the same group as this pid.
903
904 3) GDB is connecting to gdbserver and is requesting an enumeration of all
905 existing threads.
906 In this case we want the thread to stop.
907 FIXME: This case is currently not properly handled.
908 We should wait for the SIGSTOP but don't. Things work apparently
909 because enough time passes between when we ptrace (ATTACH) and when
910 gdb makes the next ptrace call on the thread.
911
912 On the other hand, if we are currently trying to stop all threads, we
913 should treat the new thread as if we had sent it a SIGSTOP. This works
914 because we are guaranteed that the add_lwp call above added us to the
915 end of the list, and so the new thread has not yet reached
916 wait_for_sigstop (but will). */
917 new_lwp->stop_expected = 1;
918
919 return 0;
920 }
921
922 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
923 already attached. Returns true if a new LWP is found, false
924 otherwise. */
925
926 static int
927 attach_proc_task_lwp_callback (ptid_t ptid)
928 {
929 /* Is this a new thread? */
930 if (find_thread_ptid (ptid) == NULL)
931 {
932 int lwpid = ptid_get_lwp (ptid);
933 int err;
934
935 if (debug_threads)
936 debug_printf ("Found new lwp %d\n", lwpid);
937
938 err = linux_attach_lwp (ptid);
939
940 /* Be quiet if we simply raced with the thread exiting. EPERM
941 is returned if the thread's task still exists, and is marked
942 as exited or zombie, as well as other conditions, so in that
943 case, confirm the status in /proc/PID/status. */
944 if (err == ESRCH
945 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
946 {
947 if (debug_threads)
948 {
949 debug_printf ("Cannot attach to lwp %d: "
950 "thread is gone (%d: %s)\n",
951 lwpid, err, strerror (err));
952 }
953 }
954 else if (err != 0)
955 {
956 warning (_("Cannot attach to lwp %d: %s"),
957 lwpid,
958 linux_ptrace_attach_fail_reason_string (ptid, err));
959 }
960
961 return 1;
962 }
963 return 0;
964 }
965
966 /* Attach to PID. If PID is the tgid, attach to it and all
967 of its threads. */
968
969 static int
970 linux_attach (unsigned long pid)
971 {
972 ptid_t ptid = ptid_build (pid, pid, 0);
973 int err;
974
975 /* Attach to PID. We will check for other threads
976 soon. */
977 err = linux_attach_lwp (ptid);
978 if (err != 0)
979 error ("Cannot attach to process %ld: %s",
980 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
981
982 linux_add_process (pid, 1);
983
984 if (!non_stop)
985 {
986 struct thread_info *thread;
987
988 /* Don't ignore the initial SIGSTOP if we just attached to this
989 process. It will be collected by wait shortly. */
990 thread = find_thread_ptid (ptid_build (pid, pid, 0));
991 thread->last_resume_kind = resume_stop;
992 }
993
994 /* We must attach to every LWP. If /proc is mounted, use that to
995 find them now. On the one hand, the inferior may be using raw
996 clone instead of using pthreads. On the other hand, even if it
997 is using pthreads, GDB may not be connected yet (thread_db needs
998 to do symbol lookups, through qSymbol). Also, thread_db walks
999 structures in the inferior's address space to find the list of
1000 threads/LWPs, and those structures may well be corrupted. Note
1001 that once thread_db is loaded, we'll still use it to list threads
1002 and associate pthread info with each LWP. */
1003 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1004 return 0;
1005 }
1006
1007 struct counter
1008 {
1009 int pid;
1010 int count;
1011 };
1012
1013 static int
1014 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1015 {
1016 struct counter *counter = args;
1017
1018 if (ptid_get_pid (entry->id) == counter->pid)
1019 {
1020 if (++counter->count > 1)
1021 return 1;
1022 }
1023
1024 return 0;
1025 }
1026
1027 static int
1028 last_thread_of_process_p (int pid)
1029 {
1030 struct counter counter = { pid , 0 };
1031
1032 return (find_inferior (&all_threads,
1033 second_thread_of_pid_p, &counter) == NULL);
1034 }
1035
1036 /* Kill LWP. */
1037
1038 static void
1039 linux_kill_one_lwp (struct lwp_info *lwp)
1040 {
1041 struct thread_info *thr = get_lwp_thread (lwp);
1042 int pid = lwpid_of (thr);
1043
1044 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1045 there is no signal context, and ptrace(PTRACE_KILL) (or
1046 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1047 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1048 alternative is to kill with SIGKILL. We only need one SIGKILL
1049 per process, not one for each thread. But since we still support
1050 linuxthreads, and we also support debugging programs using raw
1051 clone without CLONE_THREAD, we send one for each thread. For
1052 years, we used PTRACE_KILL only, so we're being a bit paranoid
1053 about some old kernels where PTRACE_KILL might work better
1054 (dubious if there are any such, but that's why it's paranoia), so
1055 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1056 everywhere. */
1057
1058 errno = 0;
1059 kill_lwp (pid, SIGKILL);
1060 if (debug_threads)
1061 {
1062 int save_errno = errno;
1063
1064 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1065 target_pid_to_str (ptid_of (thr)),
1066 save_errno ? strerror (save_errno) : "OK");
1067 }
1068
1069 errno = 0;
1070 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1071 if (debug_threads)
1072 {
1073 int save_errno = errno;
1074
1075 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1076 target_pid_to_str (ptid_of (thr)),
1077 save_errno ? strerror (save_errno) : "OK");
1078 }
1079 }
1080
1081 /* Kill LWP and wait for it to die. */
1082
1083 static void
1084 kill_wait_lwp (struct lwp_info *lwp)
1085 {
1086 struct thread_info *thr = get_lwp_thread (lwp);
1087 int pid = ptid_get_pid (ptid_of (thr));
1088 int lwpid = ptid_get_lwp (ptid_of (thr));
1089 int wstat;
1090 int res;
1091
1092 if (debug_threads)
1093 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1094
1095 do
1096 {
1097 linux_kill_one_lwp (lwp);
1098
1099 /* Make sure it died. Notes:
1100
1101 - The loop is most likely unnecessary.
1102
1103 - We don't use linux_wait_for_event as that could delete lwps
1104 while we're iterating over them. We're not interested in
1105 any pending status at this point, only in making sure all
1106 wait status on the kernel side are collected until the
1107 process is reaped.
1108
1109 - We don't use __WALL here as the __WALL emulation relies on
1110 SIGCHLD, and killing a stopped process doesn't generate
1111 one, nor an exit status.
1112 */
1113 res = my_waitpid (lwpid, &wstat, 0);
1114 if (res == -1 && errno == ECHILD)
1115 res = my_waitpid (lwpid, &wstat, __WCLONE);
1116 } while (res > 0 && WIFSTOPPED (wstat));
1117
1118 /* Even if it was stopped, the child may have already disappeared.
1119 E.g., if it was killed by SIGKILL. */
1120 if (res < 0 && errno != ECHILD)
1121 perror_with_name ("kill_wait_lwp");
1122 }
1123
1124 /* Callback for `find_inferior'. Kills an lwp of a given process,
1125 except the leader. */
1126
1127 static int
1128 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1129 {
1130 struct thread_info *thread = (struct thread_info *) entry;
1131 struct lwp_info *lwp = get_thread_lwp (thread);
1132 int pid = * (int *) args;
1133
1134 if (ptid_get_pid (entry->id) != pid)
1135 return 0;
1136
1137 /* We avoid killing the first thread here, because of a Linux kernel (at
1138 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1139 the children get a chance to be reaped, it will remain a zombie
1140 forever. */
1141
1142 if (lwpid_of (thread) == pid)
1143 {
1144 if (debug_threads)
1145 debug_printf ("lkop: is last of process %s\n",
1146 target_pid_to_str (entry->id));
1147 return 0;
1148 }
1149
1150 kill_wait_lwp (lwp);
1151 return 0;
1152 }
1153
1154 static int
1155 linux_kill (int pid)
1156 {
1157 struct process_info *process;
1158 struct lwp_info *lwp;
1159
1160 process = find_process_pid (pid);
1161 if (process == NULL)
1162 return -1;
1163
1164 /* If we're killing a running inferior, make sure it is stopped
1165 first, as PTRACE_KILL will not work otherwise. */
1166 stop_all_lwps (0, NULL);
1167
1168 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1169
1170 /* See the comment in linux_kill_one_lwp. We did not kill the first
1171 thread in the list, so do so now. */
1172 lwp = find_lwp_pid (pid_to_ptid (pid));
1173
1174 if (lwp == NULL)
1175 {
1176 if (debug_threads)
1177 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1178 pid);
1179 }
1180 else
1181 kill_wait_lwp (lwp);
1182
1183 the_target->mourn (process);
1184
1185 /* Since we presently can only stop all lwps of all processes, we
1186 need to unstop lwps of other processes. */
1187 unstop_all_lwps (0, NULL);
1188 return 0;
1189 }
1190
1191 /* Get pending signal of THREAD, for detaching purposes. This is the
1192 signal the thread last stopped for, which we need to deliver to the
1193 thread when detaching, otherwise, it'd be suppressed/lost. */
1194
1195 static int
1196 get_detach_signal (struct thread_info *thread)
1197 {
1198 enum gdb_signal signo = GDB_SIGNAL_0;
1199 int status;
1200 struct lwp_info *lp = get_thread_lwp (thread);
1201
1202 if (lp->status_pending_p)
1203 status = lp->status_pending;
1204 else
1205 {
1206 /* If the thread had been suspended by gdbserver, and it stopped
1207 cleanly, then it'll have stopped with SIGSTOP. But we don't
1208 want to deliver that SIGSTOP. */
1209 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1210 || thread->last_status.value.sig == GDB_SIGNAL_0)
1211 return 0;
1212
1213 /* Otherwise, we may need to deliver the signal we
1214 intercepted. */
1215 status = lp->last_status;
1216 }
1217
1218 if (!WIFSTOPPED (status))
1219 {
1220 if (debug_threads)
1221 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1222 target_pid_to_str (ptid_of (thread)));
1223 return 0;
1224 }
1225
1226 /* Extended wait statuses aren't real SIGTRAPs. */
1227 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1228 {
1229 if (debug_threads)
1230 debug_printf ("GPS: lwp %s had stopped with extended "
1231 "status: no pending signal\n",
1232 target_pid_to_str (ptid_of (thread)));
1233 return 0;
1234 }
1235
1236 signo = gdb_signal_from_host (WSTOPSIG (status));
1237
1238 if (program_signals_p && !program_signals[signo])
1239 {
1240 if (debug_threads)
1241 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1242 target_pid_to_str (ptid_of (thread)),
1243 gdb_signal_to_string (signo));
1244 return 0;
1245 }
1246 else if (!program_signals_p
1247 /* If we have no way to know which signals GDB does not
1248 want to have passed to the program, assume
1249 SIGTRAP/SIGINT, which is GDB's default. */
1250 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1251 {
1252 if (debug_threads)
1253 debug_printf ("GPS: lwp %s had signal %s, "
1254 "but we don't know if we should pass it. "
1255 "Default to not.\n",
1256 target_pid_to_str (ptid_of (thread)),
1257 gdb_signal_to_string (signo));
1258 return 0;
1259 }
1260 else
1261 {
1262 if (debug_threads)
1263 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1264 target_pid_to_str (ptid_of (thread)),
1265 gdb_signal_to_string (signo));
1266
1267 return WSTOPSIG (status);
1268 }
1269 }
1270
1271 static int
1272 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1273 {
1274 struct thread_info *thread = (struct thread_info *) entry;
1275 struct lwp_info *lwp = get_thread_lwp (thread);
1276 int pid = * (int *) args;
1277 int sig;
1278
1279 if (ptid_get_pid (entry->id) != pid)
1280 return 0;
1281
1282 /* If there is a pending SIGSTOP, get rid of it. */
1283 if (lwp->stop_expected)
1284 {
1285 if (debug_threads)
1286 debug_printf ("Sending SIGCONT to %s\n",
1287 target_pid_to_str (ptid_of (thread)));
1288
1289 kill_lwp (lwpid_of (thread), SIGCONT);
1290 lwp->stop_expected = 0;
1291 }
1292
1293 /* Flush any pending changes to the process's registers. */
1294 regcache_invalidate_thread (thread);
1295
1296 /* Pass on any pending signal for this thread. */
1297 sig = get_detach_signal (thread);
1298
1299 /* Finally, let it resume. */
1300 if (the_low_target.prepare_to_resume != NULL)
1301 the_low_target.prepare_to_resume (lwp);
1302 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1303 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1304 error (_("Can't detach %s: %s"),
1305 target_pid_to_str (ptid_of (thread)),
1306 strerror (errno));
1307
1308 delete_lwp (lwp);
1309 return 0;
1310 }
1311
1312 static int
1313 linux_detach (int pid)
1314 {
1315 struct process_info *process;
1316
1317 process = find_process_pid (pid);
1318 if (process == NULL)
1319 return -1;
1320
1321 /* Stop all threads before detaching. First, ptrace requires that
1322 the thread is stopped to sucessfully detach. Second, thread_db
1323 may need to uninstall thread event breakpoints from memory, which
1324 only works with a stopped process anyway. */
1325 stop_all_lwps (0, NULL);
1326
1327 #ifdef USE_THREAD_DB
1328 thread_db_detach (process);
1329 #endif
1330
1331 /* Stabilize threads (move out of jump pads). */
1332 stabilize_threads ();
1333
1334 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1335
1336 the_target->mourn (process);
1337
1338 /* Since we presently can only stop all lwps of all processes, we
1339 need to unstop lwps of other processes. */
1340 unstop_all_lwps (0, NULL);
1341 return 0;
1342 }
1343
1344 /* Remove all LWPs that belong to process PROC from the lwp list. */
1345
1346 static int
1347 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1348 {
1349 struct thread_info *thread = (struct thread_info *) entry;
1350 struct lwp_info *lwp = get_thread_lwp (thread);
1351 struct process_info *process = proc;
1352
1353 if (pid_of (thread) == pid_of (process))
1354 delete_lwp (lwp);
1355
1356 return 0;
1357 }
1358
1359 static void
1360 linux_mourn (struct process_info *process)
1361 {
1362 struct process_info_private *priv;
1363
1364 #ifdef USE_THREAD_DB
1365 thread_db_mourn (process);
1366 #endif
1367
1368 find_inferior (&all_threads, delete_lwp_callback, process);
1369
1370 /* Freeing all private data. */
1371 priv = process->priv;
1372 free (priv->arch_private);
1373 free (priv);
1374 process->priv = NULL;
1375
1376 remove_process (process);
1377 }
1378
1379 static void
1380 linux_join (int pid)
1381 {
1382 int status, ret;
1383
1384 do {
1385 ret = my_waitpid (pid, &status, 0);
1386 if (WIFEXITED (status) || WIFSIGNALED (status))
1387 break;
1388 } while (ret != -1 || errno != ECHILD);
1389 }
1390
1391 /* Return nonzero if the given thread is still alive. */
1392 static int
1393 linux_thread_alive (ptid_t ptid)
1394 {
1395 struct lwp_info *lwp = find_lwp_pid (ptid);
1396
1397 /* We assume we always know if a thread exits. If a whole process
1398 exited but we still haven't been able to report it to GDB, we'll
1399 hold on to the last lwp of the dead process. */
1400 if (lwp != NULL)
1401 return !lwp_is_marked_dead (lwp);
1402 else
1403 return 0;
1404 }
1405
1406 /* Return 1 if this lwp still has an interesting status pending. If
1407 not (e.g., it had stopped for a breakpoint that is gone), return
1408 false. */
1409
1410 static int
1411 thread_still_has_status_pending_p (struct thread_info *thread)
1412 {
1413 struct lwp_info *lp = get_thread_lwp (thread);
1414
1415 if (!lp->status_pending_p)
1416 return 0;
1417
1418 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1419 report any status pending the LWP may have. */
1420 if (thread->last_resume_kind == resume_stop
1421 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1422 return 0;
1423
1424 if (thread->last_resume_kind != resume_stop
1425 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1426 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1427 {
1428 struct thread_info *saved_thread;
1429 CORE_ADDR pc;
1430 int discard = 0;
1431
1432 gdb_assert (lp->last_status != 0);
1433
1434 pc = get_pc (lp);
1435
1436 saved_thread = current_thread;
1437 current_thread = thread;
1438
1439 if (pc != lp->stop_pc)
1440 {
1441 if (debug_threads)
1442 debug_printf ("PC of %ld changed\n",
1443 lwpid_of (thread));
1444 discard = 1;
1445 }
1446
1447 #if !USE_SIGTRAP_SIGINFO
1448 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1449 && !(*the_low_target.breakpoint_at) (pc))
1450 {
1451 if (debug_threads)
1452 debug_printf ("previous SW breakpoint of %ld gone\n",
1453 lwpid_of (thread));
1454 discard = 1;
1455 }
1456 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1457 && !hardware_breakpoint_inserted_here (pc))
1458 {
1459 if (debug_threads)
1460 debug_printf ("previous HW breakpoint of %ld gone\n",
1461 lwpid_of (thread));
1462 discard = 1;
1463 }
1464 #endif
1465
1466 current_thread = saved_thread;
1467
1468 if (discard)
1469 {
1470 if (debug_threads)
1471 debug_printf ("discarding pending breakpoint status\n");
1472 lp->status_pending_p = 0;
1473 return 0;
1474 }
1475 }
1476
1477 return 1;
1478 }
1479
1480 /* Return 1 if this lwp has an interesting status pending. */
1481 static int
1482 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1483 {
1484 struct thread_info *thread = (struct thread_info *) entry;
1485 struct lwp_info *lp = get_thread_lwp (thread);
1486 ptid_t ptid = * (ptid_t *) arg;
1487
1488 /* Check if we're only interested in events from a specific process
1489 or a specific LWP. */
1490 if (!ptid_match (ptid_of (thread), ptid))
1491 return 0;
1492
1493 if (lp->status_pending_p
1494 && !thread_still_has_status_pending_p (thread))
1495 {
1496 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1497 return 0;
1498 }
1499
1500 return lp->status_pending_p;
1501 }
1502
1503 static int
1504 same_lwp (struct inferior_list_entry *entry, void *data)
1505 {
1506 ptid_t ptid = *(ptid_t *) data;
1507 int lwp;
1508
1509 if (ptid_get_lwp (ptid) != 0)
1510 lwp = ptid_get_lwp (ptid);
1511 else
1512 lwp = ptid_get_pid (ptid);
1513
1514 if (ptid_get_lwp (entry->id) == lwp)
1515 return 1;
1516
1517 return 0;
1518 }
1519
1520 struct lwp_info *
1521 find_lwp_pid (ptid_t ptid)
1522 {
1523 struct inferior_list_entry *thread
1524 = find_inferior (&all_threads, same_lwp, &ptid);
1525
1526 if (thread == NULL)
1527 return NULL;
1528
1529 return get_thread_lwp ((struct thread_info *) thread);
1530 }
1531
1532 /* Return the number of known LWPs in the tgid given by PID. */
1533
1534 static int
1535 num_lwps (int pid)
1536 {
1537 struct inferior_list_entry *inf, *tmp;
1538 int count = 0;
1539
1540 ALL_INFERIORS (&all_threads, inf, tmp)
1541 {
1542 if (ptid_get_pid (inf->id) == pid)
1543 count++;
1544 }
1545
1546 return count;
1547 }
1548
1549 /* The arguments passed to iterate_over_lwps. */
1550
1551 struct iterate_over_lwps_args
1552 {
1553 /* The FILTER argument passed to iterate_over_lwps. */
1554 ptid_t filter;
1555
1556 /* The CALLBACK argument passed to iterate_over_lwps. */
1557 iterate_over_lwps_ftype *callback;
1558
1559 /* The DATA argument passed to iterate_over_lwps. */
1560 void *data;
1561 };
1562
1563 /* Callback for find_inferior used by iterate_over_lwps to filter
1564 calls to the callback supplied to that function. Returning a
1565 nonzero value causes find_inferiors to stop iterating and return
1566 the current inferior_list_entry. Returning zero indicates that
1567 find_inferiors should continue iterating. */
1568
1569 static int
1570 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1571 {
1572 struct iterate_over_lwps_args *args
1573 = (struct iterate_over_lwps_args *) args_p;
1574
1575 if (ptid_match (entry->id, args->filter))
1576 {
1577 struct thread_info *thr = (struct thread_info *) entry;
1578 struct lwp_info *lwp = get_thread_lwp (thr);
1579
1580 return (*args->callback) (lwp, args->data);
1581 }
1582
1583 return 0;
1584 }
1585
1586 /* See nat/linux-nat.h. */
1587
1588 struct lwp_info *
1589 iterate_over_lwps (ptid_t filter,
1590 iterate_over_lwps_ftype callback,
1591 void *data)
1592 {
1593 struct iterate_over_lwps_args args = {filter, callback, data};
1594 struct inferior_list_entry *entry;
1595
1596 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1597 if (entry == NULL)
1598 return NULL;
1599
1600 return get_thread_lwp ((struct thread_info *) entry);
1601 }
1602
1603 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1604 their exits until all other threads in the group have exited. */
1605
1606 static void
1607 check_zombie_leaders (void)
1608 {
1609 struct process_info *proc, *tmp;
1610
1611 ALL_PROCESSES (proc, tmp)
1612 {
1613 pid_t leader_pid = pid_of (proc);
1614 struct lwp_info *leader_lp;
1615
1616 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1617
1618 if (debug_threads)
1619 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1620 "num_lwps=%d, zombie=%d\n",
1621 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1622 linux_proc_pid_is_zombie (leader_pid));
1623
1624 if (leader_lp != NULL
1625 /* Check if there are other threads in the group, as we may
1626 have raced with the inferior simply exiting. */
1627 && !last_thread_of_process_p (leader_pid)
1628 && linux_proc_pid_is_zombie (leader_pid))
1629 {
1630 /* A leader zombie can mean one of two things:
1631
1632 - It exited, and there's an exit status pending
1633 available, or only the leader exited (not the whole
1634 program). In the latter case, we can't waitpid the
1635 leader's exit status until all other threads are gone.
1636
1637 - There are 3 or more threads in the group, and a thread
1638 other than the leader exec'd. On an exec, the Linux
1639 kernel destroys all other threads (except the execing
1640 one) in the thread group, and resets the execing thread's
1641 tid to the tgid. No exit notification is sent for the
1642 execing thread -- from the ptracer's perspective, it
1643 appears as though the execing thread just vanishes.
1644 Until we reap all other threads except the leader and the
1645 execing thread, the leader will be zombie, and the
1646 execing thread will be in `D (disc sleep)'. As soon as
1647 all other threads are reaped, the execing thread changes
1648 it's tid to the tgid, and the previous (zombie) leader
1649 vanishes, giving place to the "new" leader. We could try
1650 distinguishing the exit and exec cases, by waiting once
1651 more, and seeing if something comes out, but it doesn't
1652 sound useful. The previous leader _does_ go away, and
1653 we'll re-add the new one once we see the exec event
1654 (which is just the same as what would happen if the
1655 previous leader did exit voluntarily before some other
1656 thread execs). */
1657
1658 if (debug_threads)
1659 fprintf (stderr,
1660 "CZL: Thread group leader %d zombie "
1661 "(it exited, or another thread execd).\n",
1662 leader_pid);
1663
1664 delete_lwp (leader_lp);
1665 }
1666 }
1667 }
1668
1669 /* Callback for `find_inferior'. Returns the first LWP that is not
1670 stopped. ARG is a PTID filter. */
1671
1672 static int
1673 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1674 {
1675 struct thread_info *thr = (struct thread_info *) entry;
1676 struct lwp_info *lwp;
1677 ptid_t filter = *(ptid_t *) arg;
1678
1679 if (!ptid_match (ptid_of (thr), filter))
1680 return 0;
1681
1682 lwp = get_thread_lwp (thr);
1683 if (!lwp->stopped)
1684 return 1;
1685
1686 return 0;
1687 }
1688
1689 /* This function should only be called if the LWP got a SIGTRAP.
1690
1691 Handle any tracepoint steps or hits. Return true if a tracepoint
1692 event was handled, 0 otherwise. */
1693
1694 static int
1695 handle_tracepoints (struct lwp_info *lwp)
1696 {
1697 struct thread_info *tinfo = get_lwp_thread (lwp);
1698 int tpoint_related_event = 0;
1699
1700 gdb_assert (lwp->suspended == 0);
1701
1702 /* If this tracepoint hit causes a tracing stop, we'll immediately
1703 uninsert tracepoints. To do this, we temporarily pause all
1704 threads, unpatch away, and then unpause threads. We need to make
1705 sure the unpausing doesn't resume LWP too. */
1706 lwp->suspended++;
1707
1708 /* And we need to be sure that any all-threads-stopping doesn't try
1709 to move threads out of the jump pads, as it could deadlock the
1710 inferior (LWP could be in the jump pad, maybe even holding the
1711 lock.) */
1712
1713 /* Do any necessary step collect actions. */
1714 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1715
1716 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1717
1718 /* See if we just hit a tracepoint and do its main collect
1719 actions. */
1720 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1721
1722 lwp->suspended--;
1723
1724 gdb_assert (lwp->suspended == 0);
1725 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1726
1727 if (tpoint_related_event)
1728 {
1729 if (debug_threads)
1730 debug_printf ("got a tracepoint event\n");
1731 return 1;
1732 }
1733
1734 return 0;
1735 }
1736
1737 /* Convenience wrapper. Returns true if LWP is presently collecting a
1738 fast tracepoint. */
1739
1740 static int
1741 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1742 struct fast_tpoint_collect_status *status)
1743 {
1744 CORE_ADDR thread_area;
1745 struct thread_info *thread = get_lwp_thread (lwp);
1746
1747 if (the_low_target.get_thread_area == NULL)
1748 return 0;
1749
1750 /* Get the thread area address. This is used to recognize which
1751 thread is which when tracing with the in-process agent library.
1752 We don't read anything from the address, and treat it as opaque;
1753 it's the address itself that we assume is unique per-thread. */
1754 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1755 return 0;
1756
1757 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1758 }
1759
1760 /* The reason we resume in the caller, is because we want to be able
1761 to pass lwp->status_pending as WSTAT, and we need to clear
1762 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1763 refuses to resume. */
1764
1765 static int
1766 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1767 {
1768 struct thread_info *saved_thread;
1769
1770 saved_thread = current_thread;
1771 current_thread = get_lwp_thread (lwp);
1772
1773 if ((wstat == NULL
1774 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1775 && supports_fast_tracepoints ()
1776 && agent_loaded_p ())
1777 {
1778 struct fast_tpoint_collect_status status;
1779 int r;
1780
1781 if (debug_threads)
1782 debug_printf ("Checking whether LWP %ld needs to move out of the "
1783 "jump pad.\n",
1784 lwpid_of (current_thread));
1785
1786 r = linux_fast_tracepoint_collecting (lwp, &status);
1787
1788 if (wstat == NULL
1789 || (WSTOPSIG (*wstat) != SIGILL
1790 && WSTOPSIG (*wstat) != SIGFPE
1791 && WSTOPSIG (*wstat) != SIGSEGV
1792 && WSTOPSIG (*wstat) != SIGBUS))
1793 {
1794 lwp->collecting_fast_tracepoint = r;
1795
1796 if (r != 0)
1797 {
1798 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1799 {
1800 /* Haven't executed the original instruction yet.
1801 Set breakpoint there, and wait till it's hit,
1802 then single-step until exiting the jump pad. */
1803 lwp->exit_jump_pad_bkpt
1804 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1805 }
1806
1807 if (debug_threads)
1808 debug_printf ("Checking whether LWP %ld needs to move out of "
1809 "the jump pad...it does\n",
1810 lwpid_of (current_thread));
1811 current_thread = saved_thread;
1812
1813 return 1;
1814 }
1815 }
1816 else
1817 {
1818 /* If we get a synchronous signal while collecting, *and*
1819 while executing the (relocated) original instruction,
1820 reset the PC to point at the tpoint address, before
1821 reporting to GDB. Otherwise, it's an IPA lib bug: just
1822 report the signal to GDB, and pray for the best. */
1823
1824 lwp->collecting_fast_tracepoint = 0;
1825
1826 if (r != 0
1827 && (status.adjusted_insn_addr <= lwp->stop_pc
1828 && lwp->stop_pc < status.adjusted_insn_addr_end))
1829 {
1830 siginfo_t info;
1831 struct regcache *regcache;
1832
1833 /* The si_addr on a few signals references the address
1834 of the faulting instruction. Adjust that as
1835 well. */
1836 if ((WSTOPSIG (*wstat) == SIGILL
1837 || WSTOPSIG (*wstat) == SIGFPE
1838 || WSTOPSIG (*wstat) == SIGBUS
1839 || WSTOPSIG (*wstat) == SIGSEGV)
1840 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1841 (PTRACE_TYPE_ARG3) 0, &info) == 0
1842 /* Final check just to make sure we don't clobber
1843 the siginfo of non-kernel-sent signals. */
1844 && (uintptr_t) info.si_addr == lwp->stop_pc)
1845 {
1846 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1847 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1848 (PTRACE_TYPE_ARG3) 0, &info);
1849 }
1850
1851 regcache = get_thread_regcache (current_thread, 1);
1852 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1853 lwp->stop_pc = status.tpoint_addr;
1854
1855 /* Cancel any fast tracepoint lock this thread was
1856 holding. */
1857 force_unlock_trace_buffer ();
1858 }
1859
1860 if (lwp->exit_jump_pad_bkpt != NULL)
1861 {
1862 if (debug_threads)
1863 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1864 "stopping all threads momentarily.\n");
1865
1866 stop_all_lwps (1, lwp);
1867
1868 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1869 lwp->exit_jump_pad_bkpt = NULL;
1870
1871 unstop_all_lwps (1, lwp);
1872
1873 gdb_assert (lwp->suspended >= 0);
1874 }
1875 }
1876 }
1877
1878 if (debug_threads)
1879 debug_printf ("Checking whether LWP %ld needs to move out of the "
1880 "jump pad...no\n",
1881 lwpid_of (current_thread));
1882
1883 current_thread = saved_thread;
1884 return 0;
1885 }
1886
1887 /* Enqueue one signal in the "signals to report later when out of the
1888 jump pad" list. */
1889
1890 static void
1891 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1892 {
1893 struct pending_signals *p_sig;
1894 struct thread_info *thread = get_lwp_thread (lwp);
1895
1896 if (debug_threads)
1897 debug_printf ("Deferring signal %d for LWP %ld.\n",
1898 WSTOPSIG (*wstat), lwpid_of (thread));
1899
1900 if (debug_threads)
1901 {
1902 struct pending_signals *sig;
1903
1904 for (sig = lwp->pending_signals_to_report;
1905 sig != NULL;
1906 sig = sig->prev)
1907 debug_printf (" Already queued %d\n",
1908 sig->signal);
1909
1910 debug_printf (" (no more currently queued signals)\n");
1911 }
1912
1913 /* Don't enqueue non-RT signals if they are already in the deferred
1914 queue. (SIGSTOP being the easiest signal to see ending up here
1915 twice) */
1916 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1917 {
1918 struct pending_signals *sig;
1919
1920 for (sig = lwp->pending_signals_to_report;
1921 sig != NULL;
1922 sig = sig->prev)
1923 {
1924 if (sig->signal == WSTOPSIG (*wstat))
1925 {
1926 if (debug_threads)
1927 debug_printf ("Not requeuing already queued non-RT signal %d"
1928 " for LWP %ld\n",
1929 sig->signal,
1930 lwpid_of (thread));
1931 return;
1932 }
1933 }
1934 }
1935
1936 p_sig = xmalloc (sizeof (*p_sig));
1937 p_sig->prev = lwp->pending_signals_to_report;
1938 p_sig->signal = WSTOPSIG (*wstat);
1939 memset (&p_sig->info, 0, sizeof (siginfo_t));
1940 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1941 &p_sig->info);
1942
1943 lwp->pending_signals_to_report = p_sig;
1944 }
1945
1946 /* Dequeue one signal from the "signals to report later when out of
1947 the jump pad" list. */
1948
1949 static int
1950 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1951 {
1952 struct thread_info *thread = get_lwp_thread (lwp);
1953
1954 if (lwp->pending_signals_to_report != NULL)
1955 {
1956 struct pending_signals **p_sig;
1957
1958 p_sig = &lwp->pending_signals_to_report;
1959 while ((*p_sig)->prev != NULL)
1960 p_sig = &(*p_sig)->prev;
1961
1962 *wstat = W_STOPCODE ((*p_sig)->signal);
1963 if ((*p_sig)->info.si_signo != 0)
1964 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1965 &(*p_sig)->info);
1966 free (*p_sig);
1967 *p_sig = NULL;
1968
1969 if (debug_threads)
1970 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1971 WSTOPSIG (*wstat), lwpid_of (thread));
1972
1973 if (debug_threads)
1974 {
1975 struct pending_signals *sig;
1976
1977 for (sig = lwp->pending_signals_to_report;
1978 sig != NULL;
1979 sig = sig->prev)
1980 debug_printf (" Still queued %d\n",
1981 sig->signal);
1982
1983 debug_printf (" (no more queued signals)\n");
1984 }
1985
1986 return 1;
1987 }
1988
1989 return 0;
1990 }
1991
1992 /* Fetch the possibly triggered data watchpoint info and store it in
1993 CHILD.
1994
1995 On some archs, like x86, that use debug registers to set
1996 watchpoints, it's possible that the way to know which watched
1997 address trapped, is to check the register that is used to select
1998 which address to watch. Problem is, between setting the watchpoint
1999 and reading back which data address trapped, the user may change
2000 the set of watchpoints, and, as a consequence, GDB changes the
2001 debug registers in the inferior. To avoid reading back a stale
2002 stopped-data-address when that happens, we cache in LP the fact
2003 that a watchpoint trapped, and the corresponding data address, as
2004 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2005 registers meanwhile, we have the cached data we can rely on. */
2006
2007 static int
2008 check_stopped_by_watchpoint (struct lwp_info *child)
2009 {
2010 if (the_low_target.stopped_by_watchpoint != NULL)
2011 {
2012 struct thread_info *saved_thread;
2013
2014 saved_thread = current_thread;
2015 current_thread = get_lwp_thread (child);
2016
2017 if (the_low_target.stopped_by_watchpoint ())
2018 {
2019 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2020
2021 if (the_low_target.stopped_data_address != NULL)
2022 child->stopped_data_address
2023 = the_low_target.stopped_data_address ();
2024 else
2025 child->stopped_data_address = 0;
2026 }
2027
2028 current_thread = saved_thread;
2029 }
2030
2031 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2032 }
2033
2034 /* Return the ptrace options that we want to try to enable. */
2035
2036 static int
2037 linux_low_ptrace_options (int attached)
2038 {
2039 int options = 0;
2040
2041 if (!attached)
2042 options |= PTRACE_O_EXITKILL;
2043
2044 if (report_fork_events)
2045 options |= PTRACE_O_TRACEFORK;
2046
2047 if (report_vfork_events)
2048 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2049
2050 return options;
2051 }
2052
2053 /* Do low-level handling of the event, and check if we should go on
2054 and pass it to caller code. Return the affected lwp if we are, or
2055 NULL otherwise. */
2056
2057 static struct lwp_info *
2058 linux_low_filter_event (int lwpid, int wstat)
2059 {
2060 struct lwp_info *child;
2061 struct thread_info *thread;
2062 int have_stop_pc = 0;
2063
2064 child = find_lwp_pid (pid_to_ptid (lwpid));
2065
2066 /* If we didn't find a process, one of two things presumably happened:
2067 - A process we started and then detached from has exited. Ignore it.
2068 - A process we are controlling has forked and the new child's stop
2069 was reported to us by the kernel. Save its PID. */
2070 if (child == NULL && WIFSTOPPED (wstat))
2071 {
2072 add_to_pid_list (&stopped_pids, lwpid, wstat);
2073 return NULL;
2074 }
2075 else if (child == NULL)
2076 return NULL;
2077
2078 thread = get_lwp_thread (child);
2079
2080 child->stopped = 1;
2081
2082 child->last_status = wstat;
2083
2084 /* Check if the thread has exited. */
2085 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2086 {
2087 if (debug_threads)
2088 debug_printf ("LLFE: %d exited.\n", lwpid);
2089 if (num_lwps (pid_of (thread)) > 1)
2090 {
2091
2092 /* If there is at least one more LWP, then the exit signal was
2093 not the end of the debugged application and should be
2094 ignored. */
2095 delete_lwp (child);
2096 return NULL;
2097 }
2098 else
2099 {
2100 /* This was the last lwp in the process. Since events are
2101 serialized to GDB core, and we can't report this one
2102 right now, but GDB core and the other target layers will
2103 want to be notified about the exit code/signal, leave the
2104 status pending for the next time we're able to report
2105 it. */
2106 mark_lwp_dead (child, wstat);
2107 return child;
2108 }
2109 }
2110
2111 gdb_assert (WIFSTOPPED (wstat));
2112
2113 if (WIFSTOPPED (wstat))
2114 {
2115 struct process_info *proc;
2116
2117 /* Architecture-specific setup after inferior is running. */
2118 proc = find_process_pid (pid_of (thread));
2119 if (proc->tdesc == NULL)
2120 {
2121 if (proc->attached)
2122 {
2123 struct thread_info *saved_thread;
2124
2125 /* This needs to happen after we have attached to the
2126 inferior and it is stopped for the first time, but
2127 before we access any inferior registers. */
2128 saved_thread = current_thread;
2129 current_thread = thread;
2130
2131 the_low_target.arch_setup ();
2132
2133 current_thread = saved_thread;
2134 }
2135 else
2136 {
2137 /* The process is started, but GDBserver will do
2138 architecture-specific setup after the program stops at
2139 the first instruction. */
2140 child->status_pending_p = 1;
2141 child->status_pending = wstat;
2142 return child;
2143 }
2144 }
2145 }
2146
2147 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2148 {
2149 struct process_info *proc = find_process_pid (pid_of (thread));
2150 int options = linux_low_ptrace_options (proc->attached);
2151
2152 linux_enable_event_reporting (lwpid, options);
2153 child->must_set_ptrace_flags = 0;
2154 }
2155
2156 /* Be careful to not overwrite stop_pc until
2157 check_stopped_by_breakpoint is called. */
2158 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2159 && linux_is_extended_waitstatus (wstat))
2160 {
2161 child->stop_pc = get_pc (child);
2162 if (handle_extended_wait (child, wstat))
2163 {
2164 /* The event has been handled, so just return without
2165 reporting it. */
2166 return NULL;
2167 }
2168 }
2169
2170 /* Check first whether this was a SW/HW breakpoint before checking
2171 watchpoints, because at least s390 can't tell the data address of
2172 hardware watchpoint hits, and returns stopped-by-watchpoint as
2173 long as there's a watchpoint set. */
2174 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2175 {
2176 if (check_stopped_by_breakpoint (child))
2177 have_stop_pc = 1;
2178 }
2179
2180 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2181 or hardware watchpoint. Check which is which if we got
2182 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2183 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2184 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2185 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2186 check_stopped_by_watchpoint (child);
2187
2188 if (!have_stop_pc)
2189 child->stop_pc = get_pc (child);
2190
2191 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2192 && child->stop_expected)
2193 {
2194 if (debug_threads)
2195 debug_printf ("Expected stop.\n");
2196 child->stop_expected = 0;
2197
2198 if (thread->last_resume_kind == resume_stop)
2199 {
2200 /* We want to report the stop to the core. Treat the
2201 SIGSTOP as a normal event. */
2202 if (debug_threads)
2203 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2204 target_pid_to_str (ptid_of (thread)));
2205 }
2206 else if (stopping_threads != NOT_STOPPING_THREADS)
2207 {
2208 /* Stopping threads. We don't want this SIGSTOP to end up
2209 pending. */
2210 if (debug_threads)
2211 debug_printf ("LLW: SIGSTOP caught for %s "
2212 "while stopping threads.\n",
2213 target_pid_to_str (ptid_of (thread)));
2214 return NULL;
2215 }
2216 else
2217 {
2218 /* This is a delayed SIGSTOP. Filter out the event. */
2219 if (debug_threads)
2220 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2221 child->stepping ? "step" : "continue",
2222 target_pid_to_str (ptid_of (thread)));
2223
2224 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2225 return NULL;
2226 }
2227 }
2228
2229 child->status_pending_p = 1;
2230 child->status_pending = wstat;
2231 return child;
2232 }
2233
2234 /* Resume LWPs that are currently stopped without any pending status
2235 to report, but are resumed from the core's perspective. */
2236
2237 static void
2238 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2239 {
2240 struct thread_info *thread = (struct thread_info *) entry;
2241 struct lwp_info *lp = get_thread_lwp (thread);
2242
2243 if (lp->stopped
2244 && !lp->status_pending_p
2245 && thread->last_resume_kind != resume_stop
2246 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2247 {
2248 int step = thread->last_resume_kind == resume_step;
2249
2250 if (debug_threads)
2251 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2252 target_pid_to_str (ptid_of (thread)),
2253 paddress (lp->stop_pc),
2254 step);
2255
2256 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2257 }
2258 }
2259
2260 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2261 match FILTER_PTID (leaving others pending). The PTIDs can be:
2262 minus_one_ptid, to specify any child; a pid PTID, specifying all
2263 lwps of a thread group; or a PTID representing a single lwp. Store
2264 the stop status through the status pointer WSTAT. OPTIONS is
2265 passed to the waitpid call. Return 0 if no event was found and
2266 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2267 was found. Return the PID of the stopped child otherwise. */
2268
2269 static int
2270 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2271 int *wstatp, int options)
2272 {
2273 struct thread_info *event_thread;
2274 struct lwp_info *event_child, *requested_child;
2275 sigset_t block_mask, prev_mask;
2276
2277 retry:
2278 /* N.B. event_thread points to the thread_info struct that contains
2279 event_child. Keep them in sync. */
2280 event_thread = NULL;
2281 event_child = NULL;
2282 requested_child = NULL;
2283
2284 /* Check for a lwp with a pending status. */
2285
2286 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2287 {
2288 event_thread = (struct thread_info *)
2289 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2290 if (event_thread != NULL)
2291 event_child = get_thread_lwp (event_thread);
2292 if (debug_threads && event_thread)
2293 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2294 }
2295 else if (!ptid_equal (filter_ptid, null_ptid))
2296 {
2297 requested_child = find_lwp_pid (filter_ptid);
2298
2299 if (stopping_threads == NOT_STOPPING_THREADS
2300 && requested_child->status_pending_p
2301 && requested_child->collecting_fast_tracepoint)
2302 {
2303 enqueue_one_deferred_signal (requested_child,
2304 &requested_child->status_pending);
2305 requested_child->status_pending_p = 0;
2306 requested_child->status_pending = 0;
2307 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2308 }
2309
2310 if (requested_child->suspended
2311 && requested_child->status_pending_p)
2312 {
2313 internal_error (__FILE__, __LINE__,
2314 "requesting an event out of a"
2315 " suspended child?");
2316 }
2317
2318 if (requested_child->status_pending_p)
2319 {
2320 event_child = requested_child;
2321 event_thread = get_lwp_thread (event_child);
2322 }
2323 }
2324
2325 if (event_child != NULL)
2326 {
2327 if (debug_threads)
2328 debug_printf ("Got an event from pending child %ld (%04x)\n",
2329 lwpid_of (event_thread), event_child->status_pending);
2330 *wstatp = event_child->status_pending;
2331 event_child->status_pending_p = 0;
2332 event_child->status_pending = 0;
2333 current_thread = event_thread;
2334 return lwpid_of (event_thread);
2335 }
2336
2337 /* But if we don't find a pending event, we'll have to wait.
2338
2339 We only enter this loop if no process has a pending wait status.
2340 Thus any action taken in response to a wait status inside this
2341 loop is responding as soon as we detect the status, not after any
2342 pending events. */
2343
2344 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2345 all signals while here. */
2346 sigfillset (&block_mask);
2347 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2348
2349 /* Always pull all events out of the kernel. We'll randomly select
2350 an event LWP out of all that have events, to prevent
2351 starvation. */
2352 while (event_child == NULL)
2353 {
2354 pid_t ret = 0;
2355
2356 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2357 quirks:
2358
2359 - If the thread group leader exits while other threads in the
2360 thread group still exist, waitpid(TGID, ...) hangs. That
2361 waitpid won't return an exit status until the other threads
2362 in the group are reaped.
2363
2364 - When a non-leader thread execs, that thread just vanishes
2365 without reporting an exit (so we'd hang if we waited for it
2366 explicitly in that case). The exec event is reported to
2367 the TGID pid (although we don't currently enable exec
2368 events). */
2369 errno = 0;
2370 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2371
2372 if (debug_threads)
2373 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2374 ret, errno ? strerror (errno) : "ERRNO-OK");
2375
2376 if (ret > 0)
2377 {
2378 if (debug_threads)
2379 {
2380 debug_printf ("LLW: waitpid %ld received %s\n",
2381 (long) ret, status_to_str (*wstatp));
2382 }
2383
2384 /* Filter all events. IOW, leave all events pending. We'll
2385 randomly select an event LWP out of all that have events
2386 below. */
2387 linux_low_filter_event (ret, *wstatp);
2388 /* Retry until nothing comes out of waitpid. A single
2389 SIGCHLD can indicate more than one child stopped. */
2390 continue;
2391 }
2392
2393 /* Now that we've pulled all events out of the kernel, resume
2394 LWPs that don't have an interesting event to report. */
2395 if (stopping_threads == NOT_STOPPING_THREADS)
2396 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2397
2398 /* ... and find an LWP with a status to report to the core, if
2399 any. */
2400 event_thread = (struct thread_info *)
2401 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2402 if (event_thread != NULL)
2403 {
2404 event_child = get_thread_lwp (event_thread);
2405 *wstatp = event_child->status_pending;
2406 event_child->status_pending_p = 0;
2407 event_child->status_pending = 0;
2408 break;
2409 }
2410
2411 /* Check for zombie thread group leaders. Those can't be reaped
2412 until all other threads in the thread group are. */
2413 check_zombie_leaders ();
2414
2415 /* If there are no resumed children left in the set of LWPs we
2416 want to wait for, bail. We can't just block in
2417 waitpid/sigsuspend, because lwps might have been left stopped
2418 in trace-stop state, and we'd be stuck forever waiting for
2419 their status to change (which would only happen if we resumed
2420 them). Even if WNOHANG is set, this return code is preferred
2421 over 0 (below), as it is more detailed. */
2422 if ((find_inferior (&all_threads,
2423 not_stopped_callback,
2424 &wait_ptid) == NULL))
2425 {
2426 if (debug_threads)
2427 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2428 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2429 return -1;
2430 }
2431
2432 /* No interesting event to report to the caller. */
2433 if ((options & WNOHANG))
2434 {
2435 if (debug_threads)
2436 debug_printf ("WNOHANG set, no event found\n");
2437
2438 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2439 return 0;
2440 }
2441
2442 /* Block until we get an event reported with SIGCHLD. */
2443 if (debug_threads)
2444 debug_printf ("sigsuspend'ing\n");
2445
2446 sigsuspend (&prev_mask);
2447 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2448 goto retry;
2449 }
2450
2451 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2452
2453 current_thread = event_thread;
2454
2455 /* Check for thread exit. */
2456 if (! WIFSTOPPED (*wstatp))
2457 {
2458 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2459
2460 if (debug_threads)
2461 debug_printf ("LWP %d is the last lwp of process. "
2462 "Process %ld exiting.\n",
2463 pid_of (event_thread), lwpid_of (event_thread));
2464 return lwpid_of (event_thread);
2465 }
2466
2467 return lwpid_of (event_thread);
2468 }
2469
2470 /* Wait for an event from child(ren) PTID. PTIDs can be:
2471 minus_one_ptid, to specify any child; a pid PTID, specifying all
2472 lwps of a thread group; or a PTID representing a single lwp. Store
2473 the stop status through the status pointer WSTAT. OPTIONS is
2474 passed to the waitpid call. Return 0 if no event was found and
2475 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2476 was found. Return the PID of the stopped child otherwise. */
2477
2478 static int
2479 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2480 {
2481 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2482 }
2483
2484 /* Count the LWP's that have had events. */
2485
2486 static int
2487 count_events_callback (struct inferior_list_entry *entry, void *data)
2488 {
2489 struct thread_info *thread = (struct thread_info *) entry;
2490 struct lwp_info *lp = get_thread_lwp (thread);
2491 int *count = data;
2492
2493 gdb_assert (count != NULL);
2494
2495 /* Count only resumed LWPs that have an event pending. */
2496 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2497 && lp->status_pending_p)
2498 (*count)++;
2499
2500 return 0;
2501 }
2502
2503 /* Select the LWP (if any) that is currently being single-stepped. */
2504
2505 static int
2506 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2507 {
2508 struct thread_info *thread = (struct thread_info *) entry;
2509 struct lwp_info *lp = get_thread_lwp (thread);
2510
2511 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2512 && thread->last_resume_kind == resume_step
2513 && lp->status_pending_p)
2514 return 1;
2515 else
2516 return 0;
2517 }
2518
2519 /* Select the Nth LWP that has had an event. */
2520
2521 static int
2522 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2523 {
2524 struct thread_info *thread = (struct thread_info *) entry;
2525 struct lwp_info *lp = get_thread_lwp (thread);
2526 int *selector = data;
2527
2528 gdb_assert (selector != NULL);
2529
2530 /* Select only resumed LWPs that have an event pending. */
2531 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2532 && lp->status_pending_p)
2533 if ((*selector)-- == 0)
2534 return 1;
2535
2536 return 0;
2537 }
2538
2539 /* Select one LWP out of those that have events pending. */
2540
2541 static void
2542 select_event_lwp (struct lwp_info **orig_lp)
2543 {
2544 int num_events = 0;
2545 int random_selector;
2546 struct thread_info *event_thread = NULL;
2547
2548 /* In all-stop, give preference to the LWP that is being
2549 single-stepped. There will be at most one, and it's the LWP that
2550 the core is most interested in. If we didn't do this, then we'd
2551 have to handle pending step SIGTRAPs somehow in case the core
2552 later continues the previously-stepped thread, otherwise we'd
2553 report the pending SIGTRAP, and the core, not having stepped the
2554 thread, wouldn't understand what the trap was for, and therefore
2555 would report it to the user as a random signal. */
2556 if (!non_stop)
2557 {
2558 event_thread
2559 = (struct thread_info *) find_inferior (&all_threads,
2560 select_singlestep_lwp_callback,
2561 NULL);
2562 if (event_thread != NULL)
2563 {
2564 if (debug_threads)
2565 debug_printf ("SEL: Select single-step %s\n",
2566 target_pid_to_str (ptid_of (event_thread)));
2567 }
2568 }
2569 if (event_thread == NULL)
2570 {
2571 /* No single-stepping LWP. Select one at random, out of those
2572 which have had events. */
2573
2574 /* First see how many events we have. */
2575 find_inferior (&all_threads, count_events_callback, &num_events);
2576 gdb_assert (num_events > 0);
2577
2578 /* Now randomly pick a LWP out of those that have had
2579 events. */
2580 random_selector = (int)
2581 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2582
2583 if (debug_threads && num_events > 1)
2584 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2585 num_events, random_selector);
2586
2587 event_thread
2588 = (struct thread_info *) find_inferior (&all_threads,
2589 select_event_lwp_callback,
2590 &random_selector);
2591 }
2592
2593 if (event_thread != NULL)
2594 {
2595 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2596
2597 /* Switch the event LWP. */
2598 *orig_lp = event_lp;
2599 }
2600 }
2601
2602 /* Decrement the suspend count of an LWP. */
2603
2604 static int
2605 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2606 {
2607 struct thread_info *thread = (struct thread_info *) entry;
2608 struct lwp_info *lwp = get_thread_lwp (thread);
2609
2610 /* Ignore EXCEPT. */
2611 if (lwp == except)
2612 return 0;
2613
2614 lwp->suspended--;
2615
2616 gdb_assert (lwp->suspended >= 0);
2617 return 0;
2618 }
2619
2620 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2621 NULL. */
2622
2623 static void
2624 unsuspend_all_lwps (struct lwp_info *except)
2625 {
2626 find_inferior (&all_threads, unsuspend_one_lwp, except);
2627 }
2628
2629 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2630 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2631 void *data);
2632 static int lwp_running (struct inferior_list_entry *entry, void *data);
2633 static ptid_t linux_wait_1 (ptid_t ptid,
2634 struct target_waitstatus *ourstatus,
2635 int target_options);
2636
2637 /* Stabilize threads (move out of jump pads).
2638
2639 If a thread is midway collecting a fast tracepoint, we need to
2640 finish the collection and move it out of the jump pad before
2641 reporting the signal.
2642
2643 This avoids recursion while collecting (when a signal arrives
2644 midway, and the signal handler itself collects), which would trash
2645 the trace buffer. In case the user set a breakpoint in a signal
2646 handler, this avoids the backtrace showing the jump pad, etc..
2647 Most importantly, there are certain things we can't do safely if
2648 threads are stopped in a jump pad (or in its callee's). For
2649 example:
2650
2651 - starting a new trace run. A thread still collecting the
2652 previous run, could trash the trace buffer when resumed. The trace
2653 buffer control structures would have been reset but the thread had
2654 no way to tell. The thread could even midway memcpy'ing to the
2655 buffer, which would mean that when resumed, it would clobber the
2656 trace buffer that had been set for a new run.
2657
2658 - we can't rewrite/reuse the jump pads for new tracepoints
2659 safely. Say you do tstart while a thread is stopped midway while
2660 collecting. When the thread is later resumed, it finishes the
2661 collection, and returns to the jump pad, to execute the original
2662 instruction that was under the tracepoint jump at the time the
2663 older run had been started. If the jump pad had been rewritten
2664 since for something else in the new run, the thread would now
2665 execute the wrong / random instructions. */
2666
2667 static void
2668 linux_stabilize_threads (void)
2669 {
2670 struct thread_info *saved_thread;
2671 struct thread_info *thread_stuck;
2672
2673 thread_stuck
2674 = (struct thread_info *) find_inferior (&all_threads,
2675 stuck_in_jump_pad_callback,
2676 NULL);
2677 if (thread_stuck != NULL)
2678 {
2679 if (debug_threads)
2680 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2681 lwpid_of (thread_stuck));
2682 return;
2683 }
2684
2685 saved_thread = current_thread;
2686
2687 stabilizing_threads = 1;
2688
2689 /* Kick 'em all. */
2690 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2691
2692 /* Loop until all are stopped out of the jump pads. */
2693 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2694 {
2695 struct target_waitstatus ourstatus;
2696 struct lwp_info *lwp;
2697 int wstat;
2698
2699 /* Note that we go through the full wait even loop. While
2700 moving threads out of jump pad, we need to be able to step
2701 over internal breakpoints and such. */
2702 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2703
2704 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2705 {
2706 lwp = get_thread_lwp (current_thread);
2707
2708 /* Lock it. */
2709 lwp->suspended++;
2710
2711 if (ourstatus.value.sig != GDB_SIGNAL_0
2712 || current_thread->last_resume_kind == resume_stop)
2713 {
2714 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2715 enqueue_one_deferred_signal (lwp, &wstat);
2716 }
2717 }
2718 }
2719
2720 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2721
2722 stabilizing_threads = 0;
2723
2724 current_thread = saved_thread;
2725
2726 if (debug_threads)
2727 {
2728 thread_stuck
2729 = (struct thread_info *) find_inferior (&all_threads,
2730 stuck_in_jump_pad_callback,
2731 NULL);
2732 if (thread_stuck != NULL)
2733 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2734 lwpid_of (thread_stuck));
2735 }
2736 }
2737
2738 static void async_file_mark (void);
2739
2740 /* Convenience function that is called when the kernel reports an
2741 event that is not passed out to GDB. */
2742
2743 static ptid_t
2744 ignore_event (struct target_waitstatus *ourstatus)
2745 {
2746 /* If we got an event, there may still be others, as a single
2747 SIGCHLD can indicate more than one child stopped. This forces
2748 another target_wait call. */
2749 async_file_mark ();
2750
2751 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2752 return null_ptid;
2753 }
2754
2755 /* Wait for process, returns status. */
2756
2757 static ptid_t
2758 linux_wait_1 (ptid_t ptid,
2759 struct target_waitstatus *ourstatus, int target_options)
2760 {
2761 int w;
2762 struct lwp_info *event_child;
2763 int options;
2764 int pid;
2765 int step_over_finished;
2766 int bp_explains_trap;
2767 int maybe_internal_trap;
2768 int report_to_gdb;
2769 int trace_event;
2770 int in_step_range;
2771
2772 if (debug_threads)
2773 {
2774 debug_enter ();
2775 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2776 }
2777
2778 /* Translate generic target options into linux options. */
2779 options = __WALL;
2780 if (target_options & TARGET_WNOHANG)
2781 options |= WNOHANG;
2782
2783 bp_explains_trap = 0;
2784 trace_event = 0;
2785 in_step_range = 0;
2786 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2787
2788 if (ptid_equal (step_over_bkpt, null_ptid))
2789 pid = linux_wait_for_event (ptid, &w, options);
2790 else
2791 {
2792 if (debug_threads)
2793 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2794 target_pid_to_str (step_over_bkpt));
2795 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2796 }
2797
2798 if (pid == 0)
2799 {
2800 gdb_assert (target_options & TARGET_WNOHANG);
2801
2802 if (debug_threads)
2803 {
2804 debug_printf ("linux_wait_1 ret = null_ptid, "
2805 "TARGET_WAITKIND_IGNORE\n");
2806 debug_exit ();
2807 }
2808
2809 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2810 return null_ptid;
2811 }
2812 else if (pid == -1)
2813 {
2814 if (debug_threads)
2815 {
2816 debug_printf ("linux_wait_1 ret = null_ptid, "
2817 "TARGET_WAITKIND_NO_RESUMED\n");
2818 debug_exit ();
2819 }
2820
2821 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2822 return null_ptid;
2823 }
2824
2825 event_child = get_thread_lwp (current_thread);
2826
2827 /* linux_wait_for_event only returns an exit status for the last
2828 child of a process. Report it. */
2829 if (WIFEXITED (w) || WIFSIGNALED (w))
2830 {
2831 if (WIFEXITED (w))
2832 {
2833 ourstatus->kind = TARGET_WAITKIND_EXITED;
2834 ourstatus->value.integer = WEXITSTATUS (w);
2835
2836 if (debug_threads)
2837 {
2838 debug_printf ("linux_wait_1 ret = %s, exited with "
2839 "retcode %d\n",
2840 target_pid_to_str (ptid_of (current_thread)),
2841 WEXITSTATUS (w));
2842 debug_exit ();
2843 }
2844 }
2845 else
2846 {
2847 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2848 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2849
2850 if (debug_threads)
2851 {
2852 debug_printf ("linux_wait_1 ret = %s, terminated with "
2853 "signal %d\n",
2854 target_pid_to_str (ptid_of (current_thread)),
2855 WTERMSIG (w));
2856 debug_exit ();
2857 }
2858 }
2859
2860 return ptid_of (current_thread);
2861 }
2862
2863 /* If step-over executes a breakpoint instruction, it means a
2864 gdb/gdbserver breakpoint had been planted on top of a permanent
2865 breakpoint. The PC has been adjusted by
2866 check_stopped_by_breakpoint to point at the breakpoint address.
2867 Advance the PC manually past the breakpoint, otherwise the
2868 program would keep trapping the permanent breakpoint forever. */
2869 if (!ptid_equal (step_over_bkpt, null_ptid)
2870 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2871 {
2872 unsigned int increment_pc = the_low_target.breakpoint_len;
2873
2874 if (debug_threads)
2875 {
2876 debug_printf ("step-over for %s executed software breakpoint\n",
2877 target_pid_to_str (ptid_of (current_thread)));
2878 }
2879
2880 if (increment_pc != 0)
2881 {
2882 struct regcache *regcache
2883 = get_thread_regcache (current_thread, 1);
2884
2885 event_child->stop_pc += increment_pc;
2886 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2887
2888 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2889 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2890 }
2891 }
2892
2893 /* If this event was not handled before, and is not a SIGTRAP, we
2894 report it. SIGILL and SIGSEGV are also treated as traps in case
2895 a breakpoint is inserted at the current PC. If this target does
2896 not support internal breakpoints at all, we also report the
2897 SIGTRAP without further processing; it's of no concern to us. */
2898 maybe_internal_trap
2899 = (supports_breakpoints ()
2900 && (WSTOPSIG (w) == SIGTRAP
2901 || ((WSTOPSIG (w) == SIGILL
2902 || WSTOPSIG (w) == SIGSEGV)
2903 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2904
2905 if (maybe_internal_trap)
2906 {
2907 /* Handle anything that requires bookkeeping before deciding to
2908 report the event or continue waiting. */
2909
2910 /* First check if we can explain the SIGTRAP with an internal
2911 breakpoint, or if we should possibly report the event to GDB.
2912 Do this before anything that may remove or insert a
2913 breakpoint. */
2914 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2915
2916 /* We have a SIGTRAP, possibly a step-over dance has just
2917 finished. If so, tweak the state machine accordingly,
2918 reinsert breakpoints and delete any reinsert (software
2919 single-step) breakpoints. */
2920 step_over_finished = finish_step_over (event_child);
2921
2922 /* Now invoke the callbacks of any internal breakpoints there. */
2923 check_breakpoints (event_child->stop_pc);
2924
2925 /* Handle tracepoint data collecting. This may overflow the
2926 trace buffer, and cause a tracing stop, removing
2927 breakpoints. */
2928 trace_event = handle_tracepoints (event_child);
2929
2930 if (bp_explains_trap)
2931 {
2932 /* If we stepped or ran into an internal breakpoint, we've
2933 already handled it. So next time we resume (from this
2934 PC), we should step over it. */
2935 if (debug_threads)
2936 debug_printf ("Hit a gdbserver breakpoint.\n");
2937
2938 if (breakpoint_here (event_child->stop_pc))
2939 event_child->need_step_over = 1;
2940 }
2941 }
2942 else
2943 {
2944 /* We have some other signal, possibly a step-over dance was in
2945 progress, and it should be cancelled too. */
2946 step_over_finished = finish_step_over (event_child);
2947 }
2948
2949 /* We have all the data we need. Either report the event to GDB, or
2950 resume threads and keep waiting for more. */
2951
2952 /* If we're collecting a fast tracepoint, finish the collection and
2953 move out of the jump pad before delivering a signal. See
2954 linux_stabilize_threads. */
2955
2956 if (WIFSTOPPED (w)
2957 && WSTOPSIG (w) != SIGTRAP
2958 && supports_fast_tracepoints ()
2959 && agent_loaded_p ())
2960 {
2961 if (debug_threads)
2962 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2963 "to defer or adjust it.\n",
2964 WSTOPSIG (w), lwpid_of (current_thread));
2965
2966 /* Allow debugging the jump pad itself. */
2967 if (current_thread->last_resume_kind != resume_step
2968 && maybe_move_out_of_jump_pad (event_child, &w))
2969 {
2970 enqueue_one_deferred_signal (event_child, &w);
2971
2972 if (debug_threads)
2973 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2974 WSTOPSIG (w), lwpid_of (current_thread));
2975
2976 linux_resume_one_lwp (event_child, 0, 0, NULL);
2977
2978 return ignore_event (ourstatus);
2979 }
2980 }
2981
2982 if (event_child->collecting_fast_tracepoint)
2983 {
2984 if (debug_threads)
2985 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2986 "Check if we're already there.\n",
2987 lwpid_of (current_thread),
2988 event_child->collecting_fast_tracepoint);
2989
2990 trace_event = 1;
2991
2992 event_child->collecting_fast_tracepoint
2993 = linux_fast_tracepoint_collecting (event_child, NULL);
2994
2995 if (event_child->collecting_fast_tracepoint != 1)
2996 {
2997 /* No longer need this breakpoint. */
2998 if (event_child->exit_jump_pad_bkpt != NULL)
2999 {
3000 if (debug_threads)
3001 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3002 "stopping all threads momentarily.\n");
3003
3004 /* Other running threads could hit this breakpoint.
3005 We don't handle moribund locations like GDB does,
3006 instead we always pause all threads when removing
3007 breakpoints, so that any step-over or
3008 decr_pc_after_break adjustment is always taken
3009 care of while the breakpoint is still
3010 inserted. */
3011 stop_all_lwps (1, event_child);
3012
3013 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3014 event_child->exit_jump_pad_bkpt = NULL;
3015
3016 unstop_all_lwps (1, event_child);
3017
3018 gdb_assert (event_child->suspended >= 0);
3019 }
3020 }
3021
3022 if (event_child->collecting_fast_tracepoint == 0)
3023 {
3024 if (debug_threads)
3025 debug_printf ("fast tracepoint finished "
3026 "collecting successfully.\n");
3027
3028 /* We may have a deferred signal to report. */
3029 if (dequeue_one_deferred_signal (event_child, &w))
3030 {
3031 if (debug_threads)
3032 debug_printf ("dequeued one signal.\n");
3033 }
3034 else
3035 {
3036 if (debug_threads)
3037 debug_printf ("no deferred signals.\n");
3038
3039 if (stabilizing_threads)
3040 {
3041 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3042 ourstatus->value.sig = GDB_SIGNAL_0;
3043
3044 if (debug_threads)
3045 {
3046 debug_printf ("linux_wait_1 ret = %s, stopped "
3047 "while stabilizing threads\n",
3048 target_pid_to_str (ptid_of (current_thread)));
3049 debug_exit ();
3050 }
3051
3052 return ptid_of (current_thread);
3053 }
3054 }
3055 }
3056 }
3057
3058 /* Check whether GDB would be interested in this event. */
3059
3060 /* If GDB is not interested in this signal, don't stop other
3061 threads, and don't report it to GDB. Just resume the inferior
3062 right away. We do this for threading-related signals as well as
3063 any that GDB specifically requested we ignore. But never ignore
3064 SIGSTOP if we sent it ourselves, and do not ignore signals when
3065 stepping - they may require special handling to skip the signal
3066 handler. Also never ignore signals that could be caused by a
3067 breakpoint. */
3068 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3069 thread library? */
3070 if (WIFSTOPPED (w)
3071 && current_thread->last_resume_kind != resume_step
3072 && (
3073 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3074 (current_process ()->priv->thread_db != NULL
3075 && (WSTOPSIG (w) == __SIGRTMIN
3076 || WSTOPSIG (w) == __SIGRTMIN + 1))
3077 ||
3078 #endif
3079 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3080 && !(WSTOPSIG (w) == SIGSTOP
3081 && current_thread->last_resume_kind == resume_stop)
3082 && !linux_wstatus_maybe_breakpoint (w))))
3083 {
3084 siginfo_t info, *info_p;
3085
3086 if (debug_threads)
3087 debug_printf ("Ignored signal %d for LWP %ld.\n",
3088 WSTOPSIG (w), lwpid_of (current_thread));
3089
3090 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3091 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3092 info_p = &info;
3093 else
3094 info_p = NULL;
3095 linux_resume_one_lwp (event_child, event_child->stepping,
3096 WSTOPSIG (w), info_p);
3097 return ignore_event (ourstatus);
3098 }
3099
3100 /* Note that all addresses are always "out of the step range" when
3101 there's no range to begin with. */
3102 in_step_range = lwp_in_step_range (event_child);
3103
3104 /* If GDB wanted this thread to single step, and the thread is out
3105 of the step range, we always want to report the SIGTRAP, and let
3106 GDB handle it. Watchpoints should always be reported. So should
3107 signals we can't explain. A SIGTRAP we can't explain could be a
3108 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3109 do, we're be able to handle GDB breakpoints on top of internal
3110 breakpoints, by handling the internal breakpoint and still
3111 reporting the event to GDB. If we don't, we're out of luck, GDB
3112 won't see the breakpoint hit. */
3113 report_to_gdb = (!maybe_internal_trap
3114 || (current_thread->last_resume_kind == resume_step
3115 && !in_step_range)
3116 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3117 || (!step_over_finished && !in_step_range
3118 && !bp_explains_trap && !trace_event)
3119 || (gdb_breakpoint_here (event_child->stop_pc)
3120 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3121 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3122 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3123
3124 run_breakpoint_commands (event_child->stop_pc);
3125
3126 /* We found no reason GDB would want us to stop. We either hit one
3127 of our own breakpoints, or finished an internal step GDB
3128 shouldn't know about. */
3129 if (!report_to_gdb)
3130 {
3131 if (debug_threads)
3132 {
3133 if (bp_explains_trap)
3134 debug_printf ("Hit a gdbserver breakpoint.\n");
3135 if (step_over_finished)
3136 debug_printf ("Step-over finished.\n");
3137 if (trace_event)
3138 debug_printf ("Tracepoint event.\n");
3139 if (lwp_in_step_range (event_child))
3140 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3141 paddress (event_child->stop_pc),
3142 paddress (event_child->step_range_start),
3143 paddress (event_child->step_range_end));
3144 }
3145
3146 /* We're not reporting this breakpoint to GDB, so apply the
3147 decr_pc_after_break adjustment to the inferior's regcache
3148 ourselves. */
3149
3150 if (the_low_target.set_pc != NULL)
3151 {
3152 struct regcache *regcache
3153 = get_thread_regcache (current_thread, 1);
3154 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3155 }
3156
3157 /* We may have finished stepping over a breakpoint. If so,
3158 we've stopped and suspended all LWPs momentarily except the
3159 stepping one. This is where we resume them all again. We're
3160 going to keep waiting, so use proceed, which handles stepping
3161 over the next breakpoint. */
3162 if (debug_threads)
3163 debug_printf ("proceeding all threads.\n");
3164
3165 if (step_over_finished)
3166 unsuspend_all_lwps (event_child);
3167
3168 proceed_all_lwps ();
3169 return ignore_event (ourstatus);
3170 }
3171
3172 if (debug_threads)
3173 {
3174 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3175 {
3176 char *str;
3177
3178 str = target_waitstatus_to_string (&event_child->waitstatus);
3179 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3180 lwpid_of (get_lwp_thread (event_child)), str);
3181 xfree (str);
3182 }
3183 if (current_thread->last_resume_kind == resume_step)
3184 {
3185 if (event_child->step_range_start == event_child->step_range_end)
3186 debug_printf ("GDB wanted to single-step, reporting event.\n");
3187 else if (!lwp_in_step_range (event_child))
3188 debug_printf ("Out of step range, reporting event.\n");
3189 }
3190 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3191 debug_printf ("Stopped by watchpoint.\n");
3192 else if (gdb_breakpoint_here (event_child->stop_pc))
3193 debug_printf ("Stopped by GDB breakpoint.\n");
3194 if (debug_threads)
3195 debug_printf ("Hit a non-gdbserver trap event.\n");
3196 }
3197
3198 /* Alright, we're going to report a stop. */
3199
3200 if (!stabilizing_threads)
3201 {
3202 /* In all-stop, stop all threads. */
3203 if (!non_stop)
3204 stop_all_lwps (0, NULL);
3205
3206 /* If we're not waiting for a specific LWP, choose an event LWP
3207 from among those that have had events. Giving equal priority
3208 to all LWPs that have had events helps prevent
3209 starvation. */
3210 if (ptid_equal (ptid, minus_one_ptid))
3211 {
3212 event_child->status_pending_p = 1;
3213 event_child->status_pending = w;
3214
3215 select_event_lwp (&event_child);
3216
3217 /* current_thread and event_child must stay in sync. */
3218 current_thread = get_lwp_thread (event_child);
3219
3220 event_child->status_pending_p = 0;
3221 w = event_child->status_pending;
3222 }
3223
3224 if (step_over_finished)
3225 {
3226 if (!non_stop)
3227 {
3228 /* If we were doing a step-over, all other threads but
3229 the stepping one had been paused in start_step_over,
3230 with their suspend counts incremented. We don't want
3231 to do a full unstop/unpause, because we're in
3232 all-stop mode (so we want threads stopped), but we
3233 still need to unsuspend the other threads, to
3234 decrement their `suspended' count back. */
3235 unsuspend_all_lwps (event_child);
3236 }
3237 else
3238 {
3239 /* If we just finished a step-over, then all threads had
3240 been momentarily paused. In all-stop, that's fine,
3241 we want threads stopped by now anyway. In non-stop,
3242 we need to re-resume threads that GDB wanted to be
3243 running. */
3244 unstop_all_lwps (1, event_child);
3245 }
3246 }
3247
3248 /* Stabilize threads (move out of jump pads). */
3249 if (!non_stop)
3250 stabilize_threads ();
3251 }
3252 else
3253 {
3254 /* If we just finished a step-over, then all threads had been
3255 momentarily paused. In all-stop, that's fine, we want
3256 threads stopped by now anyway. In non-stop, we need to
3257 re-resume threads that GDB wanted to be running. */
3258 if (step_over_finished)
3259 unstop_all_lwps (1, event_child);
3260 }
3261
3262 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3263 {
3264 /* If the reported event is an exit, fork, vfork or exec, let
3265 GDB know. */
3266 *ourstatus = event_child->waitstatus;
3267 /* Clear the event lwp's waitstatus since we handled it already. */
3268 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3269 }
3270 else
3271 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3272
3273 /* Now that we've selected our final event LWP, un-adjust its PC if
3274 it was a software breakpoint, and the client doesn't know we can
3275 adjust the breakpoint ourselves. */
3276 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3277 && !swbreak_feature)
3278 {
3279 int decr_pc = the_low_target.decr_pc_after_break;
3280
3281 if (decr_pc != 0)
3282 {
3283 struct regcache *regcache
3284 = get_thread_regcache (current_thread, 1);
3285 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3286 }
3287 }
3288
3289 if (current_thread->last_resume_kind == resume_stop
3290 && WSTOPSIG (w) == SIGSTOP)
3291 {
3292 /* A thread that has been requested to stop by GDB with vCont;t,
3293 and it stopped cleanly, so report as SIG0. The use of
3294 SIGSTOP is an implementation detail. */
3295 ourstatus->value.sig = GDB_SIGNAL_0;
3296 }
3297 else if (current_thread->last_resume_kind == resume_stop
3298 && WSTOPSIG (w) != SIGSTOP)
3299 {
3300 /* A thread that has been requested to stop by GDB with vCont;t,
3301 but, it stopped for other reasons. */
3302 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3303 }
3304 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3305 {
3306 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3307 }
3308
3309 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3310
3311 if (debug_threads)
3312 {
3313 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3314 target_pid_to_str (ptid_of (current_thread)),
3315 ourstatus->kind, ourstatus->value.sig);
3316 debug_exit ();
3317 }
3318
3319 return ptid_of (current_thread);
3320 }
3321
3322 /* Get rid of any pending event in the pipe. */
3323 static void
3324 async_file_flush (void)
3325 {
3326 int ret;
3327 char buf;
3328
3329 do
3330 ret = read (linux_event_pipe[0], &buf, 1);
3331 while (ret >= 0 || (ret == -1 && errno == EINTR));
3332 }
3333
3334 /* Put something in the pipe, so the event loop wakes up. */
3335 static void
3336 async_file_mark (void)
3337 {
3338 int ret;
3339
3340 async_file_flush ();
3341
3342 do
3343 ret = write (linux_event_pipe[1], "+", 1);
3344 while (ret == 0 || (ret == -1 && errno == EINTR));
3345
3346 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3347 be awakened anyway. */
3348 }
3349
3350 static ptid_t
3351 linux_wait (ptid_t ptid,
3352 struct target_waitstatus *ourstatus, int target_options)
3353 {
3354 ptid_t event_ptid;
3355
3356 /* Flush the async file first. */
3357 if (target_is_async_p ())
3358 async_file_flush ();
3359
3360 do
3361 {
3362 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3363 }
3364 while ((target_options & TARGET_WNOHANG) == 0
3365 && ptid_equal (event_ptid, null_ptid)
3366 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3367
3368 /* If at least one stop was reported, there may be more. A single
3369 SIGCHLD can signal more than one child stop. */
3370 if (target_is_async_p ()
3371 && (target_options & TARGET_WNOHANG) != 0
3372 && !ptid_equal (event_ptid, null_ptid))
3373 async_file_mark ();
3374
3375 return event_ptid;
3376 }
3377
3378 /* Send a signal to an LWP. */
3379
3380 static int
3381 kill_lwp (unsigned long lwpid, int signo)
3382 {
3383 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3384 fails, then we are not using nptl threads and we should be using kill. */
3385
3386 #ifdef __NR_tkill
3387 {
3388 static int tkill_failed;
3389
3390 if (!tkill_failed)
3391 {
3392 int ret;
3393
3394 errno = 0;
3395 ret = syscall (__NR_tkill, lwpid, signo);
3396 if (errno != ENOSYS)
3397 return ret;
3398 tkill_failed = 1;
3399 }
3400 }
3401 #endif
3402
3403 return kill (lwpid, signo);
3404 }
3405
3406 void
3407 linux_stop_lwp (struct lwp_info *lwp)
3408 {
3409 send_sigstop (lwp);
3410 }
3411
3412 static void
3413 send_sigstop (struct lwp_info *lwp)
3414 {
3415 int pid;
3416
3417 pid = lwpid_of (get_lwp_thread (lwp));
3418
3419 /* If we already have a pending stop signal for this process, don't
3420 send another. */
3421 if (lwp->stop_expected)
3422 {
3423 if (debug_threads)
3424 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3425
3426 return;
3427 }
3428
3429 if (debug_threads)
3430 debug_printf ("Sending sigstop to lwp %d\n", pid);
3431
3432 lwp->stop_expected = 1;
3433 kill_lwp (pid, SIGSTOP);
3434 }
3435
3436 static int
3437 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3438 {
3439 struct thread_info *thread = (struct thread_info *) entry;
3440 struct lwp_info *lwp = get_thread_lwp (thread);
3441
3442 /* Ignore EXCEPT. */
3443 if (lwp == except)
3444 return 0;
3445
3446 if (lwp->stopped)
3447 return 0;
3448
3449 send_sigstop (lwp);
3450 return 0;
3451 }
3452
3453 /* Increment the suspend count of an LWP, and stop it, if not stopped
3454 yet. */
3455 static int
3456 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3457 void *except)
3458 {
3459 struct thread_info *thread = (struct thread_info *) entry;
3460 struct lwp_info *lwp = get_thread_lwp (thread);
3461
3462 /* Ignore EXCEPT. */
3463 if (lwp == except)
3464 return 0;
3465
3466 lwp->suspended++;
3467
3468 return send_sigstop_callback (entry, except);
3469 }
3470
3471 static void
3472 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3473 {
3474 /* Store the exit status for later. */
3475 lwp->status_pending_p = 1;
3476 lwp->status_pending = wstat;
3477
3478 /* Store in waitstatus as well, as there's nothing else to process
3479 for this event. */
3480 if (WIFEXITED (wstat))
3481 {
3482 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3483 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3484 }
3485 else if (WIFSIGNALED (wstat))
3486 {
3487 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3488 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3489 }
3490
3491 /* Prevent trying to stop it. */
3492 lwp->stopped = 1;
3493
3494 /* No further stops are expected from a dead lwp. */
3495 lwp->stop_expected = 0;
3496 }
3497
3498 /* Return true if LWP has exited already, and has a pending exit event
3499 to report to GDB. */
3500
3501 static int
3502 lwp_is_marked_dead (struct lwp_info *lwp)
3503 {
3504 return (lwp->status_pending_p
3505 && (WIFEXITED (lwp->status_pending)
3506 || WIFSIGNALED (lwp->status_pending)));
3507 }
3508
3509 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3510
3511 static void
3512 wait_for_sigstop (void)
3513 {
3514 struct thread_info *saved_thread;
3515 ptid_t saved_tid;
3516 int wstat;
3517 int ret;
3518
3519 saved_thread = current_thread;
3520 if (saved_thread != NULL)
3521 saved_tid = saved_thread->entry.id;
3522 else
3523 saved_tid = null_ptid; /* avoid bogus unused warning */
3524
3525 if (debug_threads)
3526 debug_printf ("wait_for_sigstop: pulling events\n");
3527
3528 /* Passing NULL_PTID as filter indicates we want all events to be
3529 left pending. Eventually this returns when there are no
3530 unwaited-for children left. */
3531 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3532 &wstat, __WALL);
3533 gdb_assert (ret == -1);
3534
3535 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3536 current_thread = saved_thread;
3537 else
3538 {
3539 if (debug_threads)
3540 debug_printf ("Previously current thread died.\n");
3541
3542 if (non_stop)
3543 {
3544 /* We can't change the current inferior behind GDB's back,
3545 otherwise, a subsequent command may apply to the wrong
3546 process. */
3547 current_thread = NULL;
3548 }
3549 else
3550 {
3551 /* Set a valid thread as current. */
3552 set_desired_thread (0);
3553 }
3554 }
3555 }
3556
3557 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3558 move it out, because we need to report the stop event to GDB. For
3559 example, if the user puts a breakpoint in the jump pad, it's
3560 because she wants to debug it. */
3561
3562 static int
3563 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3564 {
3565 struct thread_info *thread = (struct thread_info *) entry;
3566 struct lwp_info *lwp = get_thread_lwp (thread);
3567
3568 gdb_assert (lwp->suspended == 0);
3569 gdb_assert (lwp->stopped);
3570
3571 /* Allow debugging the jump pad, gdb_collect, etc.. */
3572 return (supports_fast_tracepoints ()
3573 && agent_loaded_p ()
3574 && (gdb_breakpoint_here (lwp->stop_pc)
3575 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3576 || thread->last_resume_kind == resume_step)
3577 && linux_fast_tracepoint_collecting (lwp, NULL));
3578 }
3579
3580 static void
3581 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3582 {
3583 struct thread_info *thread = (struct thread_info *) entry;
3584 struct lwp_info *lwp = get_thread_lwp (thread);
3585 int *wstat;
3586
3587 gdb_assert (lwp->suspended == 0);
3588 gdb_assert (lwp->stopped);
3589
3590 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3591
3592 /* Allow debugging the jump pad, gdb_collect, etc. */
3593 if (!gdb_breakpoint_here (lwp->stop_pc)
3594 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3595 && thread->last_resume_kind != resume_step
3596 && maybe_move_out_of_jump_pad (lwp, wstat))
3597 {
3598 if (debug_threads)
3599 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3600 lwpid_of (thread));
3601
3602 if (wstat)
3603 {
3604 lwp->status_pending_p = 0;
3605 enqueue_one_deferred_signal (lwp, wstat);
3606
3607 if (debug_threads)
3608 debug_printf ("Signal %d for LWP %ld deferred "
3609 "(in jump pad)\n",
3610 WSTOPSIG (*wstat), lwpid_of (thread));
3611 }
3612
3613 linux_resume_one_lwp (lwp, 0, 0, NULL);
3614 }
3615 else
3616 lwp->suspended++;
3617 }
3618
3619 static int
3620 lwp_running (struct inferior_list_entry *entry, void *data)
3621 {
3622 struct thread_info *thread = (struct thread_info *) entry;
3623 struct lwp_info *lwp = get_thread_lwp (thread);
3624
3625 if (lwp_is_marked_dead (lwp))
3626 return 0;
3627 if (lwp->stopped)
3628 return 0;
3629 return 1;
3630 }
3631
3632 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3633 If SUSPEND, then also increase the suspend count of every LWP,
3634 except EXCEPT. */
3635
3636 static void
3637 stop_all_lwps (int suspend, struct lwp_info *except)
3638 {
3639 /* Should not be called recursively. */
3640 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3641
3642 if (debug_threads)
3643 {
3644 debug_enter ();
3645 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3646 suspend ? "stop-and-suspend" : "stop",
3647 except != NULL
3648 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3649 : "none");
3650 }
3651
3652 stopping_threads = (suspend
3653 ? STOPPING_AND_SUSPENDING_THREADS
3654 : STOPPING_THREADS);
3655
3656 if (suspend)
3657 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3658 else
3659 find_inferior (&all_threads, send_sigstop_callback, except);
3660 wait_for_sigstop ();
3661 stopping_threads = NOT_STOPPING_THREADS;
3662
3663 if (debug_threads)
3664 {
3665 debug_printf ("stop_all_lwps done, setting stopping_threads "
3666 "back to !stopping\n");
3667 debug_exit ();
3668 }
3669 }
3670
3671 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3672 SIGNAL is nonzero, give it that signal. */
3673
3674 static void
3675 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3676 int step, int signal, siginfo_t *info)
3677 {
3678 struct thread_info *thread = get_lwp_thread (lwp);
3679 struct thread_info *saved_thread;
3680 int fast_tp_collecting;
3681 struct process_info *proc = get_thread_process (thread);
3682
3683 /* Note that target description may not be initialised
3684 (proc->tdesc == NULL) at this point because the program hasn't
3685 stopped at the first instruction yet. It means GDBserver skips
3686 the extra traps from the wrapper program (see option --wrapper).
3687 Code in this function that requires register access should be
3688 guarded by proc->tdesc == NULL or something else. */
3689
3690 if (lwp->stopped == 0)
3691 return;
3692
3693 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3694
3695 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3696
3697 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3698 user used the "jump" command, or "set $pc = foo"). */
3699 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3700 {
3701 /* Collecting 'while-stepping' actions doesn't make sense
3702 anymore. */
3703 release_while_stepping_state_list (thread);
3704 }
3705
3706 /* If we have pending signals or status, and a new signal, enqueue the
3707 signal. Also enqueue the signal if we are waiting to reinsert a
3708 breakpoint; it will be picked up again below. */
3709 if (signal != 0
3710 && (lwp->status_pending_p
3711 || lwp->pending_signals != NULL
3712 || lwp->bp_reinsert != 0
3713 || fast_tp_collecting))
3714 {
3715 struct pending_signals *p_sig;
3716 p_sig = xmalloc (sizeof (*p_sig));
3717 p_sig->prev = lwp->pending_signals;
3718 p_sig->signal = signal;
3719 if (info == NULL)
3720 memset (&p_sig->info, 0, sizeof (siginfo_t));
3721 else
3722 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3723 lwp->pending_signals = p_sig;
3724 }
3725
3726 if (lwp->status_pending_p)
3727 {
3728 if (debug_threads)
3729 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3730 " has pending status\n",
3731 lwpid_of (thread), step ? "step" : "continue", signal,
3732 lwp->stop_expected ? "expected" : "not expected");
3733 return;
3734 }
3735
3736 saved_thread = current_thread;
3737 current_thread = thread;
3738
3739 if (debug_threads)
3740 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3741 lwpid_of (thread), step ? "step" : "continue", signal,
3742 lwp->stop_expected ? "expected" : "not expected");
3743
3744 /* This bit needs some thinking about. If we get a signal that
3745 we must report while a single-step reinsert is still pending,
3746 we often end up resuming the thread. It might be better to
3747 (ew) allow a stack of pending events; then we could be sure that
3748 the reinsert happened right away and not lose any signals.
3749
3750 Making this stack would also shrink the window in which breakpoints are
3751 uninserted (see comment in linux_wait_for_lwp) but not enough for
3752 complete correctness, so it won't solve that problem. It may be
3753 worthwhile just to solve this one, however. */
3754 if (lwp->bp_reinsert != 0)
3755 {
3756 if (debug_threads)
3757 debug_printf (" pending reinsert at 0x%s\n",
3758 paddress (lwp->bp_reinsert));
3759
3760 if (can_hardware_single_step ())
3761 {
3762 if (fast_tp_collecting == 0)
3763 {
3764 if (step == 0)
3765 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3766 if (lwp->suspended)
3767 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3768 lwp->suspended);
3769 }
3770
3771 step = 1;
3772 }
3773
3774 /* Postpone any pending signal. It was enqueued above. */
3775 signal = 0;
3776 }
3777
3778 if (fast_tp_collecting == 1)
3779 {
3780 if (debug_threads)
3781 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3782 " (exit-jump-pad-bkpt)\n",
3783 lwpid_of (thread));
3784
3785 /* Postpone any pending signal. It was enqueued above. */
3786 signal = 0;
3787 }
3788 else if (fast_tp_collecting == 2)
3789 {
3790 if (debug_threads)
3791 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3792 " single-stepping\n",
3793 lwpid_of (thread));
3794
3795 if (can_hardware_single_step ())
3796 step = 1;
3797 else
3798 {
3799 internal_error (__FILE__, __LINE__,
3800 "moving out of jump pad single-stepping"
3801 " not implemented on this target");
3802 }
3803
3804 /* Postpone any pending signal. It was enqueued above. */
3805 signal = 0;
3806 }
3807
3808 /* If we have while-stepping actions in this thread set it stepping.
3809 If we have a signal to deliver, it may or may not be set to
3810 SIG_IGN, we don't know. Assume so, and allow collecting
3811 while-stepping into a signal handler. A possible smart thing to
3812 do would be to set an internal breakpoint at the signal return
3813 address, continue, and carry on catching this while-stepping
3814 action only when that breakpoint is hit. A future
3815 enhancement. */
3816 if (thread->while_stepping != NULL
3817 && can_hardware_single_step ())
3818 {
3819 if (debug_threads)
3820 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3821 lwpid_of (thread));
3822 step = 1;
3823 }
3824
3825 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
3826 {
3827 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3828
3829 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3830
3831 if (debug_threads)
3832 {
3833 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3834 (long) lwp->stop_pc);
3835 }
3836 }
3837
3838 /* If we have pending signals, consume one unless we are trying to
3839 reinsert a breakpoint or we're trying to finish a fast tracepoint
3840 collect. */
3841 if (lwp->pending_signals != NULL
3842 && lwp->bp_reinsert == 0
3843 && fast_tp_collecting == 0)
3844 {
3845 struct pending_signals **p_sig;
3846
3847 p_sig = &lwp->pending_signals;
3848 while ((*p_sig)->prev != NULL)
3849 p_sig = &(*p_sig)->prev;
3850
3851 signal = (*p_sig)->signal;
3852 if ((*p_sig)->info.si_signo != 0)
3853 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3854 &(*p_sig)->info);
3855
3856 free (*p_sig);
3857 *p_sig = NULL;
3858 }
3859
3860 if (the_low_target.prepare_to_resume != NULL)
3861 the_low_target.prepare_to_resume (lwp);
3862
3863 regcache_invalidate_thread (thread);
3864 errno = 0;
3865 lwp->stepping = step;
3866 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3867 (PTRACE_TYPE_ARG3) 0,
3868 /* Coerce to a uintptr_t first to avoid potential gcc warning
3869 of coercing an 8 byte integer to a 4 byte pointer. */
3870 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3871
3872 current_thread = saved_thread;
3873 if (errno)
3874 perror_with_name ("resuming thread");
3875
3876 /* Successfully resumed. Clear state that no longer makes sense,
3877 and mark the LWP as running. Must not do this before resuming
3878 otherwise if that fails other code will be confused. E.g., we'd
3879 later try to stop the LWP and hang forever waiting for a stop
3880 status. Note that we must not throw after this is cleared,
3881 otherwise handle_zombie_lwp_error would get confused. */
3882 lwp->stopped = 0;
3883 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3884 }
3885
3886 /* Called when we try to resume a stopped LWP and that errors out. If
3887 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3888 or about to become), discard the error, clear any pending status
3889 the LWP may have, and return true (we'll collect the exit status
3890 soon enough). Otherwise, return false. */
3891
3892 static int
3893 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3894 {
3895 struct thread_info *thread = get_lwp_thread (lp);
3896
3897 /* If we get an error after resuming the LWP successfully, we'd
3898 confuse !T state for the LWP being gone. */
3899 gdb_assert (lp->stopped);
3900
3901 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3902 because even if ptrace failed with ESRCH, the tracee may be "not
3903 yet fully dead", but already refusing ptrace requests. In that
3904 case the tracee has 'R (Running)' state for a little bit
3905 (observed in Linux 3.18). See also the note on ESRCH in the
3906 ptrace(2) man page. Instead, check whether the LWP has any state
3907 other than ptrace-stopped. */
3908
3909 /* Don't assume anything if /proc/PID/status can't be read. */
3910 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3911 {
3912 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3913 lp->status_pending_p = 0;
3914 return 1;
3915 }
3916 return 0;
3917 }
3918
3919 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3920 disappears while we try to resume it. */
3921
3922 static void
3923 linux_resume_one_lwp (struct lwp_info *lwp,
3924 int step, int signal, siginfo_t *info)
3925 {
3926 TRY
3927 {
3928 linux_resume_one_lwp_throw (lwp, step, signal, info);
3929 }
3930 CATCH (ex, RETURN_MASK_ERROR)
3931 {
3932 if (!check_ptrace_stopped_lwp_gone (lwp))
3933 throw_exception (ex);
3934 }
3935 END_CATCH
3936 }
3937
3938 struct thread_resume_array
3939 {
3940 struct thread_resume *resume;
3941 size_t n;
3942 };
3943
3944 /* This function is called once per thread via find_inferior.
3945 ARG is a pointer to a thread_resume_array struct.
3946 We look up the thread specified by ENTRY in ARG, and mark the thread
3947 with a pointer to the appropriate resume request.
3948
3949 This algorithm is O(threads * resume elements), but resume elements
3950 is small (and will remain small at least until GDB supports thread
3951 suspension). */
3952
3953 static int
3954 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3955 {
3956 struct thread_info *thread = (struct thread_info *) entry;
3957 struct lwp_info *lwp = get_thread_lwp (thread);
3958 int ndx;
3959 struct thread_resume_array *r;
3960
3961 r = arg;
3962
3963 for (ndx = 0; ndx < r->n; ndx++)
3964 {
3965 ptid_t ptid = r->resume[ndx].thread;
3966 if (ptid_equal (ptid, minus_one_ptid)
3967 || ptid_equal (ptid, entry->id)
3968 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3969 of PID'. */
3970 || (ptid_get_pid (ptid) == pid_of (thread)
3971 && (ptid_is_pid (ptid)
3972 || ptid_get_lwp (ptid) == -1)))
3973 {
3974 if (r->resume[ndx].kind == resume_stop
3975 && thread->last_resume_kind == resume_stop)
3976 {
3977 if (debug_threads)
3978 debug_printf ("already %s LWP %ld at GDB's request\n",
3979 (thread->last_status.kind
3980 == TARGET_WAITKIND_STOPPED)
3981 ? "stopped"
3982 : "stopping",
3983 lwpid_of (thread));
3984
3985 continue;
3986 }
3987
3988 lwp->resume = &r->resume[ndx];
3989 thread->last_resume_kind = lwp->resume->kind;
3990
3991 lwp->step_range_start = lwp->resume->step_range_start;
3992 lwp->step_range_end = lwp->resume->step_range_end;
3993
3994 /* If we had a deferred signal to report, dequeue one now.
3995 This can happen if LWP gets more than one signal while
3996 trying to get out of a jump pad. */
3997 if (lwp->stopped
3998 && !lwp->status_pending_p
3999 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4000 {
4001 lwp->status_pending_p = 1;
4002
4003 if (debug_threads)
4004 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4005 "leaving status pending.\n",
4006 WSTOPSIG (lwp->status_pending),
4007 lwpid_of (thread));
4008 }
4009
4010 return 0;
4011 }
4012 }
4013
4014 /* No resume action for this thread. */
4015 lwp->resume = NULL;
4016
4017 return 0;
4018 }
4019
4020 /* find_inferior callback for linux_resume.
4021 Set *FLAG_P if this lwp has an interesting status pending. */
4022
4023 static int
4024 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4025 {
4026 struct thread_info *thread = (struct thread_info *) entry;
4027 struct lwp_info *lwp = get_thread_lwp (thread);
4028
4029 /* LWPs which will not be resumed are not interesting, because
4030 we might not wait for them next time through linux_wait. */
4031 if (lwp->resume == NULL)
4032 return 0;
4033
4034 if (thread_still_has_status_pending_p (thread))
4035 * (int *) flag_p = 1;
4036
4037 return 0;
4038 }
4039
4040 /* Return 1 if this lwp that GDB wants running is stopped at an
4041 internal breakpoint that we need to step over. It assumes that any
4042 required STOP_PC adjustment has already been propagated to the
4043 inferior's regcache. */
4044
4045 static int
4046 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4047 {
4048 struct thread_info *thread = (struct thread_info *) entry;
4049 struct lwp_info *lwp = get_thread_lwp (thread);
4050 struct thread_info *saved_thread;
4051 CORE_ADDR pc;
4052 struct process_info *proc = get_thread_process (thread);
4053
4054 /* GDBserver is skipping the extra traps from the wrapper program,
4055 don't have to do step over. */
4056 if (proc->tdesc == NULL)
4057 return 0;
4058
4059 /* LWPs which will not be resumed are not interesting, because we
4060 might not wait for them next time through linux_wait. */
4061
4062 if (!lwp->stopped)
4063 {
4064 if (debug_threads)
4065 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4066 lwpid_of (thread));
4067 return 0;
4068 }
4069
4070 if (thread->last_resume_kind == resume_stop)
4071 {
4072 if (debug_threads)
4073 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4074 " stopped\n",
4075 lwpid_of (thread));
4076 return 0;
4077 }
4078
4079 gdb_assert (lwp->suspended >= 0);
4080
4081 if (lwp->suspended)
4082 {
4083 if (debug_threads)
4084 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4085 lwpid_of (thread));
4086 return 0;
4087 }
4088
4089 if (!lwp->need_step_over)
4090 {
4091 if (debug_threads)
4092 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4093 }
4094
4095 if (lwp->status_pending_p)
4096 {
4097 if (debug_threads)
4098 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4099 " status.\n",
4100 lwpid_of (thread));
4101 return 0;
4102 }
4103
4104 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4105 or we have. */
4106 pc = get_pc (lwp);
4107
4108 /* If the PC has changed since we stopped, then don't do anything,
4109 and let the breakpoint/tracepoint be hit. This happens if, for
4110 instance, GDB handled the decr_pc_after_break subtraction itself,
4111 GDB is OOL stepping this thread, or the user has issued a "jump"
4112 command, or poked thread's registers herself. */
4113 if (pc != lwp->stop_pc)
4114 {
4115 if (debug_threads)
4116 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4117 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4118 lwpid_of (thread),
4119 paddress (lwp->stop_pc), paddress (pc));
4120
4121 lwp->need_step_over = 0;
4122 return 0;
4123 }
4124
4125 saved_thread = current_thread;
4126 current_thread = thread;
4127
4128 /* We can only step over breakpoints we know about. */
4129 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4130 {
4131 /* Don't step over a breakpoint that GDB expects to hit
4132 though. If the condition is being evaluated on the target's side
4133 and it evaluate to false, step over this breakpoint as well. */
4134 if (gdb_breakpoint_here (pc)
4135 && gdb_condition_true_at_breakpoint (pc)
4136 && gdb_no_commands_at_breakpoint (pc))
4137 {
4138 if (debug_threads)
4139 debug_printf ("Need step over [LWP %ld]? yes, but found"
4140 " GDB breakpoint at 0x%s; skipping step over\n",
4141 lwpid_of (thread), paddress (pc));
4142
4143 current_thread = saved_thread;
4144 return 0;
4145 }
4146 else
4147 {
4148 if (debug_threads)
4149 debug_printf ("Need step over [LWP %ld]? yes, "
4150 "found breakpoint at 0x%s\n",
4151 lwpid_of (thread), paddress (pc));
4152
4153 /* We've found an lwp that needs stepping over --- return 1 so
4154 that find_inferior stops looking. */
4155 current_thread = saved_thread;
4156
4157 /* If the step over is cancelled, this is set again. */
4158 lwp->need_step_over = 0;
4159 return 1;
4160 }
4161 }
4162
4163 current_thread = saved_thread;
4164
4165 if (debug_threads)
4166 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4167 " at 0x%s\n",
4168 lwpid_of (thread), paddress (pc));
4169
4170 return 0;
4171 }
4172
4173 /* Start a step-over operation on LWP. When LWP stopped at a
4174 breakpoint, to make progress, we need to remove the breakpoint out
4175 of the way. If we let other threads run while we do that, they may
4176 pass by the breakpoint location and miss hitting it. To avoid
4177 that, a step-over momentarily stops all threads while LWP is
4178 single-stepped while the breakpoint is temporarily uninserted from
4179 the inferior. When the single-step finishes, we reinsert the
4180 breakpoint, and let all threads that are supposed to be running,
4181 run again.
4182
4183 On targets that don't support hardware single-step, we don't
4184 currently support full software single-stepping. Instead, we only
4185 support stepping over the thread event breakpoint, by asking the
4186 low target where to place a reinsert breakpoint. Since this
4187 routine assumes the breakpoint being stepped over is a thread event
4188 breakpoint, it usually assumes the return address of the current
4189 function is a good enough place to set the reinsert breakpoint. */
4190
4191 static int
4192 start_step_over (struct lwp_info *lwp)
4193 {
4194 struct thread_info *thread = get_lwp_thread (lwp);
4195 struct thread_info *saved_thread;
4196 CORE_ADDR pc;
4197 int step;
4198
4199 if (debug_threads)
4200 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4201 lwpid_of (thread));
4202
4203 stop_all_lwps (1, lwp);
4204 gdb_assert (lwp->suspended == 0);
4205
4206 if (debug_threads)
4207 debug_printf ("Done stopping all threads for step-over.\n");
4208
4209 /* Note, we should always reach here with an already adjusted PC,
4210 either by GDB (if we're resuming due to GDB's request), or by our
4211 caller, if we just finished handling an internal breakpoint GDB
4212 shouldn't care about. */
4213 pc = get_pc (lwp);
4214
4215 saved_thread = current_thread;
4216 current_thread = thread;
4217
4218 lwp->bp_reinsert = pc;
4219 uninsert_breakpoints_at (pc);
4220 uninsert_fast_tracepoint_jumps_at (pc);
4221
4222 if (can_hardware_single_step ())
4223 {
4224 step = 1;
4225 }
4226 else
4227 {
4228 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4229 set_reinsert_breakpoint (raddr);
4230 step = 0;
4231 }
4232
4233 current_thread = saved_thread;
4234
4235 linux_resume_one_lwp (lwp, step, 0, NULL);
4236
4237 /* Require next event from this LWP. */
4238 step_over_bkpt = thread->entry.id;
4239 return 1;
4240 }
4241
4242 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4243 start_step_over, if still there, and delete any reinsert
4244 breakpoints we've set, on non hardware single-step targets. */
4245
4246 static int
4247 finish_step_over (struct lwp_info *lwp)
4248 {
4249 if (lwp->bp_reinsert != 0)
4250 {
4251 if (debug_threads)
4252 debug_printf ("Finished step over.\n");
4253
4254 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4255 may be no breakpoint to reinsert there by now. */
4256 reinsert_breakpoints_at (lwp->bp_reinsert);
4257 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4258
4259 lwp->bp_reinsert = 0;
4260
4261 /* Delete any software-single-step reinsert breakpoints. No
4262 longer needed. We don't have to worry about other threads
4263 hitting this trap, and later not being able to explain it,
4264 because we were stepping over a breakpoint, and we hold all
4265 threads but LWP stopped while doing that. */
4266 if (!can_hardware_single_step ())
4267 delete_reinsert_breakpoints ();
4268
4269 step_over_bkpt = null_ptid;
4270 return 1;
4271 }
4272 else
4273 return 0;
4274 }
4275
4276 /* This function is called once per thread. We check the thread's resume
4277 request, which will tell us whether to resume, step, or leave the thread
4278 stopped; and what signal, if any, it should be sent.
4279
4280 For threads which we aren't explicitly told otherwise, we preserve
4281 the stepping flag; this is used for stepping over gdbserver-placed
4282 breakpoints.
4283
4284 If pending_flags was set in any thread, we queue any needed
4285 signals, since we won't actually resume. We already have a pending
4286 event to report, so we don't need to preserve any step requests;
4287 they should be re-issued if necessary. */
4288
4289 static int
4290 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4291 {
4292 struct thread_info *thread = (struct thread_info *) entry;
4293 struct lwp_info *lwp = get_thread_lwp (thread);
4294 int step;
4295 int leave_all_stopped = * (int *) arg;
4296 int leave_pending;
4297
4298 if (lwp->resume == NULL)
4299 return 0;
4300
4301 if (lwp->resume->kind == resume_stop)
4302 {
4303 if (debug_threads)
4304 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4305
4306 if (!lwp->stopped)
4307 {
4308 if (debug_threads)
4309 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4310
4311 /* Stop the thread, and wait for the event asynchronously,
4312 through the event loop. */
4313 send_sigstop (lwp);
4314 }
4315 else
4316 {
4317 if (debug_threads)
4318 debug_printf ("already stopped LWP %ld\n",
4319 lwpid_of (thread));
4320
4321 /* The LWP may have been stopped in an internal event that
4322 was not meant to be notified back to GDB (e.g., gdbserver
4323 breakpoint), so we should be reporting a stop event in
4324 this case too. */
4325
4326 /* If the thread already has a pending SIGSTOP, this is a
4327 no-op. Otherwise, something later will presumably resume
4328 the thread and this will cause it to cancel any pending
4329 operation, due to last_resume_kind == resume_stop. If
4330 the thread already has a pending status to report, we
4331 will still report it the next time we wait - see
4332 status_pending_p_callback. */
4333
4334 /* If we already have a pending signal to report, then
4335 there's no need to queue a SIGSTOP, as this means we're
4336 midway through moving the LWP out of the jumppad, and we
4337 will report the pending signal as soon as that is
4338 finished. */
4339 if (lwp->pending_signals_to_report == NULL)
4340 send_sigstop (lwp);
4341 }
4342
4343 /* For stop requests, we're done. */
4344 lwp->resume = NULL;
4345 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4346 return 0;
4347 }
4348
4349 /* If this thread which is about to be resumed has a pending status,
4350 then don't resume any threads - we can just report the pending
4351 status. Make sure to queue any signals that would otherwise be
4352 sent. In all-stop mode, we do this decision based on if *any*
4353 thread has a pending status. If there's a thread that needs the
4354 step-over-breakpoint dance, then don't resume any other thread
4355 but that particular one. */
4356 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4357
4358 if (!leave_pending)
4359 {
4360 if (debug_threads)
4361 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4362
4363 step = (lwp->resume->kind == resume_step);
4364 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4365 }
4366 else
4367 {
4368 if (debug_threads)
4369 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4370
4371 /* If we have a new signal, enqueue the signal. */
4372 if (lwp->resume->sig != 0)
4373 {
4374 struct pending_signals *p_sig;
4375 p_sig = xmalloc (sizeof (*p_sig));
4376 p_sig->prev = lwp->pending_signals;
4377 p_sig->signal = lwp->resume->sig;
4378 memset (&p_sig->info, 0, sizeof (siginfo_t));
4379
4380 /* If this is the same signal we were previously stopped by,
4381 make sure to queue its siginfo. We can ignore the return
4382 value of ptrace; if it fails, we'll skip
4383 PTRACE_SETSIGINFO. */
4384 if (WIFSTOPPED (lwp->last_status)
4385 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4386 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4387 &p_sig->info);
4388
4389 lwp->pending_signals = p_sig;
4390 }
4391 }
4392
4393 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4394 lwp->resume = NULL;
4395 return 0;
4396 }
4397
4398 static void
4399 linux_resume (struct thread_resume *resume_info, size_t n)
4400 {
4401 struct thread_resume_array array = { resume_info, n };
4402 struct thread_info *need_step_over = NULL;
4403 int any_pending;
4404 int leave_all_stopped;
4405
4406 if (debug_threads)
4407 {
4408 debug_enter ();
4409 debug_printf ("linux_resume:\n");
4410 }
4411
4412 find_inferior (&all_threads, linux_set_resume_request, &array);
4413
4414 /* If there is a thread which would otherwise be resumed, which has
4415 a pending status, then don't resume any threads - we can just
4416 report the pending status. Make sure to queue any signals that
4417 would otherwise be sent. In non-stop mode, we'll apply this
4418 logic to each thread individually. We consume all pending events
4419 before considering to start a step-over (in all-stop). */
4420 any_pending = 0;
4421 if (!non_stop)
4422 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4423
4424 /* If there is a thread which would otherwise be resumed, which is
4425 stopped at a breakpoint that needs stepping over, then don't
4426 resume any threads - have it step over the breakpoint with all
4427 other threads stopped, then resume all threads again. Make sure
4428 to queue any signals that would otherwise be delivered or
4429 queued. */
4430 if (!any_pending && supports_breakpoints ())
4431 need_step_over
4432 = (struct thread_info *) find_inferior (&all_threads,
4433 need_step_over_p, NULL);
4434
4435 leave_all_stopped = (need_step_over != NULL || any_pending);
4436
4437 if (debug_threads)
4438 {
4439 if (need_step_over != NULL)
4440 debug_printf ("Not resuming all, need step over\n");
4441 else if (any_pending)
4442 debug_printf ("Not resuming, all-stop and found "
4443 "an LWP with pending status\n");
4444 else
4445 debug_printf ("Resuming, no pending status or step over needed\n");
4446 }
4447
4448 /* Even if we're leaving threads stopped, queue all signals we'd
4449 otherwise deliver. */
4450 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4451
4452 if (need_step_over)
4453 start_step_over (get_thread_lwp (need_step_over));
4454
4455 if (debug_threads)
4456 {
4457 debug_printf ("linux_resume done\n");
4458 debug_exit ();
4459 }
4460 }
4461
4462 /* This function is called once per thread. We check the thread's
4463 last resume request, which will tell us whether to resume, step, or
4464 leave the thread stopped. Any signal the client requested to be
4465 delivered has already been enqueued at this point.
4466
4467 If any thread that GDB wants running is stopped at an internal
4468 breakpoint that needs stepping over, we start a step-over operation
4469 on that particular thread, and leave all others stopped. */
4470
4471 static int
4472 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4473 {
4474 struct thread_info *thread = (struct thread_info *) entry;
4475 struct lwp_info *lwp = get_thread_lwp (thread);
4476 int step;
4477
4478 if (lwp == except)
4479 return 0;
4480
4481 if (debug_threads)
4482 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4483
4484 if (!lwp->stopped)
4485 {
4486 if (debug_threads)
4487 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4488 return 0;
4489 }
4490
4491 if (thread->last_resume_kind == resume_stop
4492 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4493 {
4494 if (debug_threads)
4495 debug_printf (" client wants LWP to remain %ld stopped\n",
4496 lwpid_of (thread));
4497 return 0;
4498 }
4499
4500 if (lwp->status_pending_p)
4501 {
4502 if (debug_threads)
4503 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4504 lwpid_of (thread));
4505 return 0;
4506 }
4507
4508 gdb_assert (lwp->suspended >= 0);
4509
4510 if (lwp->suspended)
4511 {
4512 if (debug_threads)
4513 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4514 return 0;
4515 }
4516
4517 if (thread->last_resume_kind == resume_stop
4518 && lwp->pending_signals_to_report == NULL
4519 && lwp->collecting_fast_tracepoint == 0)
4520 {
4521 /* We haven't reported this LWP as stopped yet (otherwise, the
4522 last_status.kind check above would catch it, and we wouldn't
4523 reach here. This LWP may have been momentarily paused by a
4524 stop_all_lwps call while handling for example, another LWP's
4525 step-over. In that case, the pending expected SIGSTOP signal
4526 that was queued at vCont;t handling time will have already
4527 been consumed by wait_for_sigstop, and so we need to requeue
4528 another one here. Note that if the LWP already has a SIGSTOP
4529 pending, this is a no-op. */
4530
4531 if (debug_threads)
4532 debug_printf ("Client wants LWP %ld to stop. "
4533 "Making sure it has a SIGSTOP pending\n",
4534 lwpid_of (thread));
4535
4536 send_sigstop (lwp);
4537 }
4538
4539 step = thread->last_resume_kind == resume_step;
4540 linux_resume_one_lwp (lwp, step, 0, NULL);
4541 return 0;
4542 }
4543
4544 static int
4545 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4546 {
4547 struct thread_info *thread = (struct thread_info *) entry;
4548 struct lwp_info *lwp = get_thread_lwp (thread);
4549
4550 if (lwp == except)
4551 return 0;
4552
4553 lwp->suspended--;
4554 gdb_assert (lwp->suspended >= 0);
4555
4556 return proceed_one_lwp (entry, except);
4557 }
4558
4559 /* When we finish a step-over, set threads running again. If there's
4560 another thread that may need a step-over, now's the time to start
4561 it. Eventually, we'll move all threads past their breakpoints. */
4562
4563 static void
4564 proceed_all_lwps (void)
4565 {
4566 struct thread_info *need_step_over;
4567
4568 /* If there is a thread which would otherwise be resumed, which is
4569 stopped at a breakpoint that needs stepping over, then don't
4570 resume any threads - have it step over the breakpoint with all
4571 other threads stopped, then resume all threads again. */
4572
4573 if (supports_breakpoints ())
4574 {
4575 need_step_over
4576 = (struct thread_info *) find_inferior (&all_threads,
4577 need_step_over_p, NULL);
4578
4579 if (need_step_over != NULL)
4580 {
4581 if (debug_threads)
4582 debug_printf ("proceed_all_lwps: found "
4583 "thread %ld needing a step-over\n",
4584 lwpid_of (need_step_over));
4585
4586 start_step_over (get_thread_lwp (need_step_over));
4587 return;
4588 }
4589 }
4590
4591 if (debug_threads)
4592 debug_printf ("Proceeding, no step-over needed\n");
4593
4594 find_inferior (&all_threads, proceed_one_lwp, NULL);
4595 }
4596
4597 /* Stopped LWPs that the client wanted to be running, that don't have
4598 pending statuses, are set to run again, except for EXCEPT, if not
4599 NULL. This undoes a stop_all_lwps call. */
4600
4601 static void
4602 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4603 {
4604 if (debug_threads)
4605 {
4606 debug_enter ();
4607 if (except)
4608 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4609 lwpid_of (get_lwp_thread (except)));
4610 else
4611 debug_printf ("unstopping all lwps\n");
4612 }
4613
4614 if (unsuspend)
4615 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4616 else
4617 find_inferior (&all_threads, proceed_one_lwp, except);
4618
4619 if (debug_threads)
4620 {
4621 debug_printf ("unstop_all_lwps done\n");
4622 debug_exit ();
4623 }
4624 }
4625
4626
4627 #ifdef HAVE_LINUX_REGSETS
4628
4629 #define use_linux_regsets 1
4630
4631 /* Returns true if REGSET has been disabled. */
4632
4633 static int
4634 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4635 {
4636 return (info->disabled_regsets != NULL
4637 && info->disabled_regsets[regset - info->regsets]);
4638 }
4639
4640 /* Disable REGSET. */
4641
4642 static void
4643 disable_regset (struct regsets_info *info, struct regset_info *regset)
4644 {
4645 int dr_offset;
4646
4647 dr_offset = regset - info->regsets;
4648 if (info->disabled_regsets == NULL)
4649 info->disabled_regsets = xcalloc (1, info->num_regsets);
4650 info->disabled_regsets[dr_offset] = 1;
4651 }
4652
4653 static int
4654 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4655 struct regcache *regcache)
4656 {
4657 struct regset_info *regset;
4658 int saw_general_regs = 0;
4659 int pid;
4660 struct iovec iov;
4661
4662 pid = lwpid_of (current_thread);
4663 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4664 {
4665 void *buf, *data;
4666 int nt_type, res;
4667
4668 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4669 continue;
4670
4671 buf = xmalloc (regset->size);
4672
4673 nt_type = regset->nt_type;
4674 if (nt_type)
4675 {
4676 iov.iov_base = buf;
4677 iov.iov_len = regset->size;
4678 data = (void *) &iov;
4679 }
4680 else
4681 data = buf;
4682
4683 #ifndef __sparc__
4684 res = ptrace (regset->get_request, pid,
4685 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4686 #else
4687 res = ptrace (regset->get_request, pid, data, nt_type);
4688 #endif
4689 if (res < 0)
4690 {
4691 if (errno == EIO)
4692 {
4693 /* If we get EIO on a regset, do not try it again for
4694 this process mode. */
4695 disable_regset (regsets_info, regset);
4696 }
4697 else if (errno == ENODATA)
4698 {
4699 /* ENODATA may be returned if the regset is currently
4700 not "active". This can happen in normal operation,
4701 so suppress the warning in this case. */
4702 }
4703 else
4704 {
4705 char s[256];
4706 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4707 pid);
4708 perror (s);
4709 }
4710 }
4711 else
4712 {
4713 if (regset->type == GENERAL_REGS)
4714 saw_general_regs = 1;
4715 regset->store_function (regcache, buf);
4716 }
4717 free (buf);
4718 }
4719 if (saw_general_regs)
4720 return 0;
4721 else
4722 return 1;
4723 }
4724
4725 static int
4726 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4727 struct regcache *regcache)
4728 {
4729 struct regset_info *regset;
4730 int saw_general_regs = 0;
4731 int pid;
4732 struct iovec iov;
4733
4734 pid = lwpid_of (current_thread);
4735 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4736 {
4737 void *buf, *data;
4738 int nt_type, res;
4739
4740 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4741 || regset->fill_function == NULL)
4742 continue;
4743
4744 buf = xmalloc (regset->size);
4745
4746 /* First fill the buffer with the current register set contents,
4747 in case there are any items in the kernel's regset that are
4748 not in gdbserver's regcache. */
4749
4750 nt_type = regset->nt_type;
4751 if (nt_type)
4752 {
4753 iov.iov_base = buf;
4754 iov.iov_len = regset->size;
4755 data = (void *) &iov;
4756 }
4757 else
4758 data = buf;
4759
4760 #ifndef __sparc__
4761 res = ptrace (regset->get_request, pid,
4762 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4763 #else
4764 res = ptrace (regset->get_request, pid, data, nt_type);
4765 #endif
4766
4767 if (res == 0)
4768 {
4769 /* Then overlay our cached registers on that. */
4770 regset->fill_function (regcache, buf);
4771
4772 /* Only now do we write the register set. */
4773 #ifndef __sparc__
4774 res = ptrace (regset->set_request, pid,
4775 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4776 #else
4777 res = ptrace (regset->set_request, pid, data, nt_type);
4778 #endif
4779 }
4780
4781 if (res < 0)
4782 {
4783 if (errno == EIO)
4784 {
4785 /* If we get EIO on a regset, do not try it again for
4786 this process mode. */
4787 disable_regset (regsets_info, regset);
4788 }
4789 else if (errno == ESRCH)
4790 {
4791 /* At this point, ESRCH should mean the process is
4792 already gone, in which case we simply ignore attempts
4793 to change its registers. See also the related
4794 comment in linux_resume_one_lwp. */
4795 free (buf);
4796 return 0;
4797 }
4798 else
4799 {
4800 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4801 }
4802 }
4803 else if (regset->type == GENERAL_REGS)
4804 saw_general_regs = 1;
4805 free (buf);
4806 }
4807 if (saw_general_regs)
4808 return 0;
4809 else
4810 return 1;
4811 }
4812
4813 #else /* !HAVE_LINUX_REGSETS */
4814
4815 #define use_linux_regsets 0
4816 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4817 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4818
4819 #endif
4820
4821 /* Return 1 if register REGNO is supported by one of the regset ptrace
4822 calls or 0 if it has to be transferred individually. */
4823
4824 static int
4825 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4826 {
4827 unsigned char mask = 1 << (regno % 8);
4828 size_t index = regno / 8;
4829
4830 return (use_linux_regsets
4831 && (regs_info->regset_bitmap == NULL
4832 || (regs_info->regset_bitmap[index] & mask) != 0));
4833 }
4834
4835 #ifdef HAVE_LINUX_USRREGS
4836
4837 int
4838 register_addr (const struct usrregs_info *usrregs, int regnum)
4839 {
4840 int addr;
4841
4842 if (regnum < 0 || regnum >= usrregs->num_regs)
4843 error ("Invalid register number %d.", regnum);
4844
4845 addr = usrregs->regmap[regnum];
4846
4847 return addr;
4848 }
4849
4850 /* Fetch one register. */
4851 static void
4852 fetch_register (const struct usrregs_info *usrregs,
4853 struct regcache *regcache, int regno)
4854 {
4855 CORE_ADDR regaddr;
4856 int i, size;
4857 char *buf;
4858 int pid;
4859
4860 if (regno >= usrregs->num_regs)
4861 return;
4862 if ((*the_low_target.cannot_fetch_register) (regno))
4863 return;
4864
4865 regaddr = register_addr (usrregs, regno);
4866 if (regaddr == -1)
4867 return;
4868
4869 size = ((register_size (regcache->tdesc, regno)
4870 + sizeof (PTRACE_XFER_TYPE) - 1)
4871 & -sizeof (PTRACE_XFER_TYPE));
4872 buf = alloca (size);
4873
4874 pid = lwpid_of (current_thread);
4875 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4876 {
4877 errno = 0;
4878 *(PTRACE_XFER_TYPE *) (buf + i) =
4879 ptrace (PTRACE_PEEKUSER, pid,
4880 /* Coerce to a uintptr_t first to avoid potential gcc warning
4881 of coercing an 8 byte integer to a 4 byte pointer. */
4882 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4883 regaddr += sizeof (PTRACE_XFER_TYPE);
4884 if (errno != 0)
4885 error ("reading register %d: %s", regno, strerror (errno));
4886 }
4887
4888 if (the_low_target.supply_ptrace_register)
4889 the_low_target.supply_ptrace_register (regcache, regno, buf);
4890 else
4891 supply_register (regcache, regno, buf);
4892 }
4893
4894 /* Store one register. */
4895 static void
4896 store_register (const struct usrregs_info *usrregs,
4897 struct regcache *regcache, int regno)
4898 {
4899 CORE_ADDR regaddr;
4900 int i, size;
4901 char *buf;
4902 int pid;
4903
4904 if (regno >= usrregs->num_regs)
4905 return;
4906 if ((*the_low_target.cannot_store_register) (regno))
4907 return;
4908
4909 regaddr = register_addr (usrregs, regno);
4910 if (regaddr == -1)
4911 return;
4912
4913 size = ((register_size (regcache->tdesc, regno)
4914 + sizeof (PTRACE_XFER_TYPE) - 1)
4915 & -sizeof (PTRACE_XFER_TYPE));
4916 buf = alloca (size);
4917 memset (buf, 0, size);
4918
4919 if (the_low_target.collect_ptrace_register)
4920 the_low_target.collect_ptrace_register (regcache, regno, buf);
4921 else
4922 collect_register (regcache, regno, buf);
4923
4924 pid = lwpid_of (current_thread);
4925 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4926 {
4927 errno = 0;
4928 ptrace (PTRACE_POKEUSER, pid,
4929 /* Coerce to a uintptr_t first to avoid potential gcc warning
4930 about coercing an 8 byte integer to a 4 byte pointer. */
4931 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4932 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4933 if (errno != 0)
4934 {
4935 /* At this point, ESRCH should mean the process is
4936 already gone, in which case we simply ignore attempts
4937 to change its registers. See also the related
4938 comment in linux_resume_one_lwp. */
4939 if (errno == ESRCH)
4940 return;
4941
4942 if ((*the_low_target.cannot_store_register) (regno) == 0)
4943 error ("writing register %d: %s", regno, strerror (errno));
4944 }
4945 regaddr += sizeof (PTRACE_XFER_TYPE);
4946 }
4947 }
4948
4949 /* Fetch all registers, or just one, from the child process.
4950 If REGNO is -1, do this for all registers, skipping any that are
4951 assumed to have been retrieved by regsets_fetch_inferior_registers,
4952 unless ALL is non-zero.
4953 Otherwise, REGNO specifies which register (so we can save time). */
4954 static void
4955 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4956 struct regcache *regcache, int regno, int all)
4957 {
4958 struct usrregs_info *usr = regs_info->usrregs;
4959
4960 if (regno == -1)
4961 {
4962 for (regno = 0; regno < usr->num_regs; regno++)
4963 if (all || !linux_register_in_regsets (regs_info, regno))
4964 fetch_register (usr, regcache, regno);
4965 }
4966 else
4967 fetch_register (usr, regcache, regno);
4968 }
4969
4970 /* Store our register values back into the inferior.
4971 If REGNO is -1, do this for all registers, skipping any that are
4972 assumed to have been saved by regsets_store_inferior_registers,
4973 unless ALL is non-zero.
4974 Otherwise, REGNO specifies which register (so we can save time). */
4975 static void
4976 usr_store_inferior_registers (const struct regs_info *regs_info,
4977 struct regcache *regcache, int regno, int all)
4978 {
4979 struct usrregs_info *usr = regs_info->usrregs;
4980
4981 if (regno == -1)
4982 {
4983 for (regno = 0; regno < usr->num_regs; regno++)
4984 if (all || !linux_register_in_regsets (regs_info, regno))
4985 store_register (usr, regcache, regno);
4986 }
4987 else
4988 store_register (usr, regcache, regno);
4989 }
4990
4991 #else /* !HAVE_LINUX_USRREGS */
4992
4993 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4994 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4995
4996 #endif
4997
4998
4999 void
5000 linux_fetch_registers (struct regcache *regcache, int regno)
5001 {
5002 int use_regsets;
5003 int all = 0;
5004 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5005
5006 if (regno == -1)
5007 {
5008 if (the_low_target.fetch_register != NULL
5009 && regs_info->usrregs != NULL)
5010 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5011 (*the_low_target.fetch_register) (regcache, regno);
5012
5013 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5014 if (regs_info->usrregs != NULL)
5015 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5016 }
5017 else
5018 {
5019 if (the_low_target.fetch_register != NULL
5020 && (*the_low_target.fetch_register) (regcache, regno))
5021 return;
5022
5023 use_regsets = linux_register_in_regsets (regs_info, regno);
5024 if (use_regsets)
5025 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5026 regcache);
5027 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5028 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5029 }
5030 }
5031
5032 void
5033 linux_store_registers (struct regcache *regcache, int regno)
5034 {
5035 int use_regsets;
5036 int all = 0;
5037 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5038
5039 if (regno == -1)
5040 {
5041 all = regsets_store_inferior_registers (regs_info->regsets_info,
5042 regcache);
5043 if (regs_info->usrregs != NULL)
5044 usr_store_inferior_registers (regs_info, regcache, regno, all);
5045 }
5046 else
5047 {
5048 use_regsets = linux_register_in_regsets (regs_info, regno);
5049 if (use_regsets)
5050 all = regsets_store_inferior_registers (regs_info->regsets_info,
5051 regcache);
5052 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5053 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5054 }
5055 }
5056
5057
5058 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5059 to debugger memory starting at MYADDR. */
5060
5061 static int
5062 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5063 {
5064 int pid = lwpid_of (current_thread);
5065 register PTRACE_XFER_TYPE *buffer;
5066 register CORE_ADDR addr;
5067 register int count;
5068 char filename[64];
5069 register int i;
5070 int ret;
5071 int fd;
5072
5073 /* Try using /proc. Don't bother for one word. */
5074 if (len >= 3 * sizeof (long))
5075 {
5076 int bytes;
5077
5078 /* We could keep this file open and cache it - possibly one per
5079 thread. That requires some juggling, but is even faster. */
5080 sprintf (filename, "/proc/%d/mem", pid);
5081 fd = open (filename, O_RDONLY | O_LARGEFILE);
5082 if (fd == -1)
5083 goto no_proc;
5084
5085 /* If pread64 is available, use it. It's faster if the kernel
5086 supports it (only one syscall), and it's 64-bit safe even on
5087 32-bit platforms (for instance, SPARC debugging a SPARC64
5088 application). */
5089 #ifdef HAVE_PREAD64
5090 bytes = pread64 (fd, myaddr, len, memaddr);
5091 #else
5092 bytes = -1;
5093 if (lseek (fd, memaddr, SEEK_SET) != -1)
5094 bytes = read (fd, myaddr, len);
5095 #endif
5096
5097 close (fd);
5098 if (bytes == len)
5099 return 0;
5100
5101 /* Some data was read, we'll try to get the rest with ptrace. */
5102 if (bytes > 0)
5103 {
5104 memaddr += bytes;
5105 myaddr += bytes;
5106 len -= bytes;
5107 }
5108 }
5109
5110 no_proc:
5111 /* Round starting address down to longword boundary. */
5112 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5113 /* Round ending address up; get number of longwords that makes. */
5114 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5115 / sizeof (PTRACE_XFER_TYPE));
5116 /* Allocate buffer of that many longwords. */
5117 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5118
5119 /* Read all the longwords */
5120 errno = 0;
5121 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5122 {
5123 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5124 about coercing an 8 byte integer to a 4 byte pointer. */
5125 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5126 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5127 (PTRACE_TYPE_ARG4) 0);
5128 if (errno)
5129 break;
5130 }
5131 ret = errno;
5132
5133 /* Copy appropriate bytes out of the buffer. */
5134 if (i > 0)
5135 {
5136 i *= sizeof (PTRACE_XFER_TYPE);
5137 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5138 memcpy (myaddr,
5139 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5140 i < len ? i : len);
5141 }
5142
5143 return ret;
5144 }
5145
5146 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5147 memory at MEMADDR. On failure (cannot write to the inferior)
5148 returns the value of errno. Always succeeds if LEN is zero. */
5149
5150 static int
5151 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5152 {
5153 register int i;
5154 /* Round starting address down to longword boundary. */
5155 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5156 /* Round ending address up; get number of longwords that makes. */
5157 register int count
5158 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5159 / sizeof (PTRACE_XFER_TYPE);
5160
5161 /* Allocate buffer of that many longwords. */
5162 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5163 alloca (count * sizeof (PTRACE_XFER_TYPE));
5164
5165 int pid = lwpid_of (current_thread);
5166
5167 if (len == 0)
5168 {
5169 /* Zero length write always succeeds. */
5170 return 0;
5171 }
5172
5173 if (debug_threads)
5174 {
5175 /* Dump up to four bytes. */
5176 unsigned int val = * (unsigned int *) myaddr;
5177 if (len == 1)
5178 val = val & 0xff;
5179 else if (len == 2)
5180 val = val & 0xffff;
5181 else if (len == 3)
5182 val = val & 0xffffff;
5183 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5184 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5185 }
5186
5187 /* Fill start and end extra bytes of buffer with existing memory data. */
5188
5189 errno = 0;
5190 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5191 about coercing an 8 byte integer to a 4 byte pointer. */
5192 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5193 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5194 (PTRACE_TYPE_ARG4) 0);
5195 if (errno)
5196 return errno;
5197
5198 if (count > 1)
5199 {
5200 errno = 0;
5201 buffer[count - 1]
5202 = ptrace (PTRACE_PEEKTEXT, pid,
5203 /* Coerce to a uintptr_t first to avoid potential gcc warning
5204 about coercing an 8 byte integer to a 4 byte pointer. */
5205 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5206 * sizeof (PTRACE_XFER_TYPE)),
5207 (PTRACE_TYPE_ARG4) 0);
5208 if (errno)
5209 return errno;
5210 }
5211
5212 /* Copy data to be written over corresponding part of buffer. */
5213
5214 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5215 myaddr, len);
5216
5217 /* Write the entire buffer. */
5218
5219 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5220 {
5221 errno = 0;
5222 ptrace (PTRACE_POKETEXT, pid,
5223 /* Coerce to a uintptr_t first to avoid potential gcc warning
5224 about coercing an 8 byte integer to a 4 byte pointer. */
5225 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5226 (PTRACE_TYPE_ARG4) buffer[i]);
5227 if (errno)
5228 return errno;
5229 }
5230
5231 return 0;
5232 }
5233
5234 static void
5235 linux_look_up_symbols (void)
5236 {
5237 #ifdef USE_THREAD_DB
5238 struct process_info *proc = current_process ();
5239
5240 if (proc->priv->thread_db != NULL)
5241 return;
5242
5243 /* If the kernel supports tracing clones, then we don't need to
5244 use the magic thread event breakpoint to learn about
5245 threads. */
5246 thread_db_init (!linux_supports_traceclone ());
5247 #endif
5248 }
5249
5250 static void
5251 linux_request_interrupt (void)
5252 {
5253 extern unsigned long signal_pid;
5254
5255 /* Send a SIGINT to the process group. This acts just like the user
5256 typed a ^C on the controlling terminal. */
5257 kill (-signal_pid, SIGINT);
5258 }
5259
5260 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5261 to debugger memory starting at MYADDR. */
5262
5263 static int
5264 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5265 {
5266 char filename[PATH_MAX];
5267 int fd, n;
5268 int pid = lwpid_of (current_thread);
5269
5270 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5271
5272 fd = open (filename, O_RDONLY);
5273 if (fd < 0)
5274 return -1;
5275
5276 if (offset != (CORE_ADDR) 0
5277 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5278 n = -1;
5279 else
5280 n = read (fd, myaddr, len);
5281
5282 close (fd);
5283
5284 return n;
5285 }
5286
5287 /* These breakpoint and watchpoint related wrapper functions simply
5288 pass on the function call if the target has registered a
5289 corresponding function. */
5290
5291 static int
5292 linux_supports_z_point_type (char z_type)
5293 {
5294 return (the_low_target.supports_z_point_type != NULL
5295 && the_low_target.supports_z_point_type (z_type));
5296 }
5297
5298 static int
5299 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5300 int size, struct raw_breakpoint *bp)
5301 {
5302 if (type == raw_bkpt_type_sw)
5303 return insert_memory_breakpoint (bp);
5304 else if (the_low_target.insert_point != NULL)
5305 return the_low_target.insert_point (type, addr, size, bp);
5306 else
5307 /* Unsupported (see target.h). */
5308 return 1;
5309 }
5310
5311 static int
5312 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5313 int size, struct raw_breakpoint *bp)
5314 {
5315 if (type == raw_bkpt_type_sw)
5316 return remove_memory_breakpoint (bp);
5317 else if (the_low_target.remove_point != NULL)
5318 return the_low_target.remove_point (type, addr, size, bp);
5319 else
5320 /* Unsupported (see target.h). */
5321 return 1;
5322 }
5323
5324 /* Implement the to_stopped_by_sw_breakpoint target_ops
5325 method. */
5326
5327 static int
5328 linux_stopped_by_sw_breakpoint (void)
5329 {
5330 struct lwp_info *lwp = get_thread_lwp (current_thread);
5331
5332 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5333 }
5334
5335 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5336 method. */
5337
5338 static int
5339 linux_supports_stopped_by_sw_breakpoint (void)
5340 {
5341 return USE_SIGTRAP_SIGINFO;
5342 }
5343
5344 /* Implement the to_stopped_by_hw_breakpoint target_ops
5345 method. */
5346
5347 static int
5348 linux_stopped_by_hw_breakpoint (void)
5349 {
5350 struct lwp_info *lwp = get_thread_lwp (current_thread);
5351
5352 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5353 }
5354
5355 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5356 method. */
5357
5358 static int
5359 linux_supports_stopped_by_hw_breakpoint (void)
5360 {
5361 return USE_SIGTRAP_SIGINFO;
5362 }
5363
5364 /* Implement the supports_conditional_breakpoints target_ops
5365 method. */
5366
5367 static int
5368 linux_supports_conditional_breakpoints (void)
5369 {
5370 /* GDBserver needs to step over the breakpoint if the condition is
5371 false. GDBserver software single step is too simple, so disable
5372 conditional breakpoints if the target doesn't have hardware single
5373 step. */
5374 return can_hardware_single_step ();
5375 }
5376
5377 static int
5378 linux_stopped_by_watchpoint (void)
5379 {
5380 struct lwp_info *lwp = get_thread_lwp (current_thread);
5381
5382 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5383 }
5384
5385 static CORE_ADDR
5386 linux_stopped_data_address (void)
5387 {
5388 struct lwp_info *lwp = get_thread_lwp (current_thread);
5389
5390 return lwp->stopped_data_address;
5391 }
5392
5393 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5394 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5395 && defined(PT_TEXT_END_ADDR)
5396
5397 /* This is only used for targets that define PT_TEXT_ADDR,
5398 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5399 the target has different ways of acquiring this information, like
5400 loadmaps. */
5401
5402 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5403 to tell gdb about. */
5404
5405 static int
5406 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5407 {
5408 unsigned long text, text_end, data;
5409 int pid = lwpid_of (current_thread);
5410
5411 errno = 0;
5412
5413 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5414 (PTRACE_TYPE_ARG4) 0);
5415 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5416 (PTRACE_TYPE_ARG4) 0);
5417 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5418 (PTRACE_TYPE_ARG4) 0);
5419
5420 if (errno == 0)
5421 {
5422 /* Both text and data offsets produced at compile-time (and so
5423 used by gdb) are relative to the beginning of the program,
5424 with the data segment immediately following the text segment.
5425 However, the actual runtime layout in memory may put the data
5426 somewhere else, so when we send gdb a data base-address, we
5427 use the real data base address and subtract the compile-time
5428 data base-address from it (which is just the length of the
5429 text segment). BSS immediately follows data in both
5430 cases. */
5431 *text_p = text;
5432 *data_p = data - (text_end - text);
5433
5434 return 1;
5435 }
5436 return 0;
5437 }
5438 #endif
5439
5440 static int
5441 linux_qxfer_osdata (const char *annex,
5442 unsigned char *readbuf, unsigned const char *writebuf,
5443 CORE_ADDR offset, int len)
5444 {
5445 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5446 }
5447
5448 /* Convert a native/host siginfo object, into/from the siginfo in the
5449 layout of the inferiors' architecture. */
5450
5451 static void
5452 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5453 {
5454 int done = 0;
5455
5456 if (the_low_target.siginfo_fixup != NULL)
5457 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5458
5459 /* If there was no callback, or the callback didn't do anything,
5460 then just do a straight memcpy. */
5461 if (!done)
5462 {
5463 if (direction == 1)
5464 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5465 else
5466 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5467 }
5468 }
5469
5470 static int
5471 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5472 unsigned const char *writebuf, CORE_ADDR offset, int len)
5473 {
5474 int pid;
5475 siginfo_t siginfo;
5476 char inf_siginfo[sizeof (siginfo_t)];
5477
5478 if (current_thread == NULL)
5479 return -1;
5480
5481 pid = lwpid_of (current_thread);
5482
5483 if (debug_threads)
5484 debug_printf ("%s siginfo for lwp %d.\n",
5485 readbuf != NULL ? "Reading" : "Writing",
5486 pid);
5487
5488 if (offset >= sizeof (siginfo))
5489 return -1;
5490
5491 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5492 return -1;
5493
5494 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5495 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5496 inferior with a 64-bit GDBSERVER should look the same as debugging it
5497 with a 32-bit GDBSERVER, we need to convert it. */
5498 siginfo_fixup (&siginfo, inf_siginfo, 0);
5499
5500 if (offset + len > sizeof (siginfo))
5501 len = sizeof (siginfo) - offset;
5502
5503 if (readbuf != NULL)
5504 memcpy (readbuf, inf_siginfo + offset, len);
5505 else
5506 {
5507 memcpy (inf_siginfo + offset, writebuf, len);
5508
5509 /* Convert back to ptrace layout before flushing it out. */
5510 siginfo_fixup (&siginfo, inf_siginfo, 1);
5511
5512 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5513 return -1;
5514 }
5515
5516 return len;
5517 }
5518
5519 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5520 so we notice when children change state; as the handler for the
5521 sigsuspend in my_waitpid. */
5522
5523 static void
5524 sigchld_handler (int signo)
5525 {
5526 int old_errno = errno;
5527
5528 if (debug_threads)
5529 {
5530 do
5531 {
5532 /* fprintf is not async-signal-safe, so call write
5533 directly. */
5534 if (write (2, "sigchld_handler\n",
5535 sizeof ("sigchld_handler\n") - 1) < 0)
5536 break; /* just ignore */
5537 } while (0);
5538 }
5539
5540 if (target_is_async_p ())
5541 async_file_mark (); /* trigger a linux_wait */
5542
5543 errno = old_errno;
5544 }
5545
5546 static int
5547 linux_supports_non_stop (void)
5548 {
5549 return 1;
5550 }
5551
5552 static int
5553 linux_async (int enable)
5554 {
5555 int previous = target_is_async_p ();
5556
5557 if (debug_threads)
5558 debug_printf ("linux_async (%d), previous=%d\n",
5559 enable, previous);
5560
5561 if (previous != enable)
5562 {
5563 sigset_t mask;
5564 sigemptyset (&mask);
5565 sigaddset (&mask, SIGCHLD);
5566
5567 sigprocmask (SIG_BLOCK, &mask, NULL);
5568
5569 if (enable)
5570 {
5571 if (pipe (linux_event_pipe) == -1)
5572 {
5573 linux_event_pipe[0] = -1;
5574 linux_event_pipe[1] = -1;
5575 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5576
5577 warning ("creating event pipe failed.");
5578 return previous;
5579 }
5580
5581 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5582 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5583
5584 /* Register the event loop handler. */
5585 add_file_handler (linux_event_pipe[0],
5586 handle_target_event, NULL);
5587
5588 /* Always trigger a linux_wait. */
5589 async_file_mark ();
5590 }
5591 else
5592 {
5593 delete_file_handler (linux_event_pipe[0]);
5594
5595 close (linux_event_pipe[0]);
5596 close (linux_event_pipe[1]);
5597 linux_event_pipe[0] = -1;
5598 linux_event_pipe[1] = -1;
5599 }
5600
5601 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5602 }
5603
5604 return previous;
5605 }
5606
5607 static int
5608 linux_start_non_stop (int nonstop)
5609 {
5610 /* Register or unregister from event-loop accordingly. */
5611 linux_async (nonstop);
5612
5613 if (target_is_async_p () != (nonstop != 0))
5614 return -1;
5615
5616 return 0;
5617 }
5618
5619 static int
5620 linux_supports_multi_process (void)
5621 {
5622 return 1;
5623 }
5624
5625 /* Check if fork events are supported. */
5626
5627 static int
5628 linux_supports_fork_events (void)
5629 {
5630 return linux_supports_tracefork ();
5631 }
5632
5633 /* Check if vfork events are supported. */
5634
5635 static int
5636 linux_supports_vfork_events (void)
5637 {
5638 return linux_supports_tracefork ();
5639 }
5640
5641 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5642 options for the specified lwp. */
5643
5644 static int
5645 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5646 void *args)
5647 {
5648 struct thread_info *thread = (struct thread_info *) entry;
5649 struct lwp_info *lwp = get_thread_lwp (thread);
5650
5651 if (!lwp->stopped)
5652 {
5653 /* Stop the lwp so we can modify its ptrace options. */
5654 lwp->must_set_ptrace_flags = 1;
5655 linux_stop_lwp (lwp);
5656 }
5657 else
5658 {
5659 /* Already stopped; go ahead and set the ptrace options. */
5660 struct process_info *proc = find_process_pid (pid_of (thread));
5661 int options = linux_low_ptrace_options (proc->attached);
5662
5663 linux_enable_event_reporting (lwpid_of (thread), options);
5664 lwp->must_set_ptrace_flags = 0;
5665 }
5666
5667 return 0;
5668 }
5669
5670 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5671 ptrace flags for all inferiors. This is in case the new GDB connection
5672 doesn't support the same set of events that the previous one did. */
5673
5674 static void
5675 linux_handle_new_gdb_connection (void)
5676 {
5677 pid_t pid;
5678
5679 /* Request that all the lwps reset their ptrace options. */
5680 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5681 }
5682
5683 static int
5684 linux_supports_disable_randomization (void)
5685 {
5686 #ifdef HAVE_PERSONALITY
5687 return 1;
5688 #else
5689 return 0;
5690 #endif
5691 }
5692
5693 static int
5694 linux_supports_agent (void)
5695 {
5696 return 1;
5697 }
5698
5699 static int
5700 linux_supports_range_stepping (void)
5701 {
5702 if (*the_low_target.supports_range_stepping == NULL)
5703 return 0;
5704
5705 return (*the_low_target.supports_range_stepping) ();
5706 }
5707
5708 /* Enumerate spufs IDs for process PID. */
5709 static int
5710 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5711 {
5712 int pos = 0;
5713 int written = 0;
5714 char path[128];
5715 DIR *dir;
5716 struct dirent *entry;
5717
5718 sprintf (path, "/proc/%ld/fd", pid);
5719 dir = opendir (path);
5720 if (!dir)
5721 return -1;
5722
5723 rewinddir (dir);
5724 while ((entry = readdir (dir)) != NULL)
5725 {
5726 struct stat st;
5727 struct statfs stfs;
5728 int fd;
5729
5730 fd = atoi (entry->d_name);
5731 if (!fd)
5732 continue;
5733
5734 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5735 if (stat (path, &st) != 0)
5736 continue;
5737 if (!S_ISDIR (st.st_mode))
5738 continue;
5739
5740 if (statfs (path, &stfs) != 0)
5741 continue;
5742 if (stfs.f_type != SPUFS_MAGIC)
5743 continue;
5744
5745 if (pos >= offset && pos + 4 <= offset + len)
5746 {
5747 *(unsigned int *)(buf + pos - offset) = fd;
5748 written += 4;
5749 }
5750 pos += 4;
5751 }
5752
5753 closedir (dir);
5754 return written;
5755 }
5756
5757 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5758 object type, using the /proc file system. */
5759 static int
5760 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5761 unsigned const char *writebuf,
5762 CORE_ADDR offset, int len)
5763 {
5764 long pid = lwpid_of (current_thread);
5765 char buf[128];
5766 int fd = 0;
5767 int ret = 0;
5768
5769 if (!writebuf && !readbuf)
5770 return -1;
5771
5772 if (!*annex)
5773 {
5774 if (!readbuf)
5775 return -1;
5776 else
5777 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5778 }
5779
5780 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5781 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5782 if (fd <= 0)
5783 return -1;
5784
5785 if (offset != 0
5786 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5787 {
5788 close (fd);
5789 return 0;
5790 }
5791
5792 if (writebuf)
5793 ret = write (fd, writebuf, (size_t) len);
5794 else
5795 ret = read (fd, readbuf, (size_t) len);
5796
5797 close (fd);
5798 return ret;
5799 }
5800
5801 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5802 struct target_loadseg
5803 {
5804 /* Core address to which the segment is mapped. */
5805 Elf32_Addr addr;
5806 /* VMA recorded in the program header. */
5807 Elf32_Addr p_vaddr;
5808 /* Size of this segment in memory. */
5809 Elf32_Word p_memsz;
5810 };
5811
5812 # if defined PT_GETDSBT
5813 struct target_loadmap
5814 {
5815 /* Protocol version number, must be zero. */
5816 Elf32_Word version;
5817 /* Pointer to the DSBT table, its size, and the DSBT index. */
5818 unsigned *dsbt_table;
5819 unsigned dsbt_size, dsbt_index;
5820 /* Number of segments in this map. */
5821 Elf32_Word nsegs;
5822 /* The actual memory map. */
5823 struct target_loadseg segs[/*nsegs*/];
5824 };
5825 # define LINUX_LOADMAP PT_GETDSBT
5826 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5827 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5828 # else
5829 struct target_loadmap
5830 {
5831 /* Protocol version number, must be zero. */
5832 Elf32_Half version;
5833 /* Number of segments in this map. */
5834 Elf32_Half nsegs;
5835 /* The actual memory map. */
5836 struct target_loadseg segs[/*nsegs*/];
5837 };
5838 # define LINUX_LOADMAP PTRACE_GETFDPIC
5839 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5840 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5841 # endif
5842
5843 static int
5844 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5845 unsigned char *myaddr, unsigned int len)
5846 {
5847 int pid = lwpid_of (current_thread);
5848 int addr = -1;
5849 struct target_loadmap *data = NULL;
5850 unsigned int actual_length, copy_length;
5851
5852 if (strcmp (annex, "exec") == 0)
5853 addr = (int) LINUX_LOADMAP_EXEC;
5854 else if (strcmp (annex, "interp") == 0)
5855 addr = (int) LINUX_LOADMAP_INTERP;
5856 else
5857 return -1;
5858
5859 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5860 return -1;
5861
5862 if (data == NULL)
5863 return -1;
5864
5865 actual_length = sizeof (struct target_loadmap)
5866 + sizeof (struct target_loadseg) * data->nsegs;
5867
5868 if (offset < 0 || offset > actual_length)
5869 return -1;
5870
5871 copy_length = actual_length - offset < len ? actual_length - offset : len;
5872 memcpy (myaddr, (char *) data + offset, copy_length);
5873 return copy_length;
5874 }
5875 #else
5876 # define linux_read_loadmap NULL
5877 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5878
5879 static void
5880 linux_process_qsupported (const char *query)
5881 {
5882 if (the_low_target.process_qsupported != NULL)
5883 the_low_target.process_qsupported (query);
5884 }
5885
5886 static int
5887 linux_supports_tracepoints (void)
5888 {
5889 if (*the_low_target.supports_tracepoints == NULL)
5890 return 0;
5891
5892 return (*the_low_target.supports_tracepoints) ();
5893 }
5894
5895 static CORE_ADDR
5896 linux_read_pc (struct regcache *regcache)
5897 {
5898 if (the_low_target.get_pc == NULL)
5899 return 0;
5900
5901 return (*the_low_target.get_pc) (regcache);
5902 }
5903
5904 static void
5905 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5906 {
5907 gdb_assert (the_low_target.set_pc != NULL);
5908
5909 (*the_low_target.set_pc) (regcache, pc);
5910 }
5911
5912 static int
5913 linux_thread_stopped (struct thread_info *thread)
5914 {
5915 return get_thread_lwp (thread)->stopped;
5916 }
5917
5918 /* This exposes stop-all-threads functionality to other modules. */
5919
5920 static void
5921 linux_pause_all (int freeze)
5922 {
5923 stop_all_lwps (freeze, NULL);
5924 }
5925
5926 /* This exposes unstop-all-threads functionality to other gdbserver
5927 modules. */
5928
5929 static void
5930 linux_unpause_all (int unfreeze)
5931 {
5932 unstop_all_lwps (unfreeze, NULL);
5933 }
5934
5935 static int
5936 linux_prepare_to_access_memory (void)
5937 {
5938 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5939 running LWP. */
5940 if (non_stop)
5941 linux_pause_all (1);
5942 return 0;
5943 }
5944
5945 static void
5946 linux_done_accessing_memory (void)
5947 {
5948 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5949 running LWP. */
5950 if (non_stop)
5951 linux_unpause_all (1);
5952 }
5953
5954 static int
5955 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5956 CORE_ADDR collector,
5957 CORE_ADDR lockaddr,
5958 ULONGEST orig_size,
5959 CORE_ADDR *jump_entry,
5960 CORE_ADDR *trampoline,
5961 ULONGEST *trampoline_size,
5962 unsigned char *jjump_pad_insn,
5963 ULONGEST *jjump_pad_insn_size,
5964 CORE_ADDR *adjusted_insn_addr,
5965 CORE_ADDR *adjusted_insn_addr_end,
5966 char *err)
5967 {
5968 return (*the_low_target.install_fast_tracepoint_jump_pad)
5969 (tpoint, tpaddr, collector, lockaddr, orig_size,
5970 jump_entry, trampoline, trampoline_size,
5971 jjump_pad_insn, jjump_pad_insn_size,
5972 adjusted_insn_addr, adjusted_insn_addr_end,
5973 err);
5974 }
5975
5976 static struct emit_ops *
5977 linux_emit_ops (void)
5978 {
5979 if (the_low_target.emit_ops != NULL)
5980 return (*the_low_target.emit_ops) ();
5981 else
5982 return NULL;
5983 }
5984
5985 static int
5986 linux_get_min_fast_tracepoint_insn_len (void)
5987 {
5988 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5989 }
5990
5991 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5992
5993 static int
5994 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5995 CORE_ADDR *phdr_memaddr, int *num_phdr)
5996 {
5997 char filename[PATH_MAX];
5998 int fd;
5999 const int auxv_size = is_elf64
6000 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6001 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6002
6003 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6004
6005 fd = open (filename, O_RDONLY);
6006 if (fd < 0)
6007 return 1;
6008
6009 *phdr_memaddr = 0;
6010 *num_phdr = 0;
6011 while (read (fd, buf, auxv_size) == auxv_size
6012 && (*phdr_memaddr == 0 || *num_phdr == 0))
6013 {
6014 if (is_elf64)
6015 {
6016 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6017
6018 switch (aux->a_type)
6019 {
6020 case AT_PHDR:
6021 *phdr_memaddr = aux->a_un.a_val;
6022 break;
6023 case AT_PHNUM:
6024 *num_phdr = aux->a_un.a_val;
6025 break;
6026 }
6027 }
6028 else
6029 {
6030 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6031
6032 switch (aux->a_type)
6033 {
6034 case AT_PHDR:
6035 *phdr_memaddr = aux->a_un.a_val;
6036 break;
6037 case AT_PHNUM:
6038 *num_phdr = aux->a_un.a_val;
6039 break;
6040 }
6041 }
6042 }
6043
6044 close (fd);
6045
6046 if (*phdr_memaddr == 0 || *num_phdr == 0)
6047 {
6048 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6049 "phdr_memaddr = %ld, phdr_num = %d",
6050 (long) *phdr_memaddr, *num_phdr);
6051 return 2;
6052 }
6053
6054 return 0;
6055 }
6056
6057 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6058
6059 static CORE_ADDR
6060 get_dynamic (const int pid, const int is_elf64)
6061 {
6062 CORE_ADDR phdr_memaddr, relocation;
6063 int num_phdr, i;
6064 unsigned char *phdr_buf;
6065 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6066
6067 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6068 return 0;
6069
6070 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6071 phdr_buf = alloca (num_phdr * phdr_size);
6072
6073 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6074 return 0;
6075
6076 /* Compute relocation: it is expected to be 0 for "regular" executables,
6077 non-zero for PIE ones. */
6078 relocation = -1;
6079 for (i = 0; relocation == -1 && i < num_phdr; i++)
6080 if (is_elf64)
6081 {
6082 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6083
6084 if (p->p_type == PT_PHDR)
6085 relocation = phdr_memaddr - p->p_vaddr;
6086 }
6087 else
6088 {
6089 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6090
6091 if (p->p_type == PT_PHDR)
6092 relocation = phdr_memaddr - p->p_vaddr;
6093 }
6094
6095 if (relocation == -1)
6096 {
6097 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6098 any real world executables, including PIE executables, have always
6099 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6100 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6101 or present DT_DEBUG anyway (fpc binaries are statically linked).
6102
6103 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6104
6105 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6106
6107 return 0;
6108 }
6109
6110 for (i = 0; i < num_phdr; i++)
6111 {
6112 if (is_elf64)
6113 {
6114 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6115
6116 if (p->p_type == PT_DYNAMIC)
6117 return p->p_vaddr + relocation;
6118 }
6119 else
6120 {
6121 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6122
6123 if (p->p_type == PT_DYNAMIC)
6124 return p->p_vaddr + relocation;
6125 }
6126 }
6127
6128 return 0;
6129 }
6130
6131 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6132 can be 0 if the inferior does not yet have the library list initialized.
6133 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6134 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6135
6136 static CORE_ADDR
6137 get_r_debug (const int pid, const int is_elf64)
6138 {
6139 CORE_ADDR dynamic_memaddr;
6140 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6141 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6142 CORE_ADDR map = -1;
6143
6144 dynamic_memaddr = get_dynamic (pid, is_elf64);
6145 if (dynamic_memaddr == 0)
6146 return map;
6147
6148 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6149 {
6150 if (is_elf64)
6151 {
6152 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6153 #ifdef DT_MIPS_RLD_MAP
6154 union
6155 {
6156 Elf64_Xword map;
6157 unsigned char buf[sizeof (Elf64_Xword)];
6158 }
6159 rld_map;
6160
6161 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6162 {
6163 if (linux_read_memory (dyn->d_un.d_val,
6164 rld_map.buf, sizeof (rld_map.buf)) == 0)
6165 return rld_map.map;
6166 else
6167 break;
6168 }
6169 #endif /* DT_MIPS_RLD_MAP */
6170
6171 if (dyn->d_tag == DT_DEBUG && map == -1)
6172 map = dyn->d_un.d_val;
6173
6174 if (dyn->d_tag == DT_NULL)
6175 break;
6176 }
6177 else
6178 {
6179 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6180 #ifdef DT_MIPS_RLD_MAP
6181 union
6182 {
6183 Elf32_Word map;
6184 unsigned char buf[sizeof (Elf32_Word)];
6185 }
6186 rld_map;
6187
6188 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6189 {
6190 if (linux_read_memory (dyn->d_un.d_val,
6191 rld_map.buf, sizeof (rld_map.buf)) == 0)
6192 return rld_map.map;
6193 else
6194 break;
6195 }
6196 #endif /* DT_MIPS_RLD_MAP */
6197
6198 if (dyn->d_tag == DT_DEBUG && map == -1)
6199 map = dyn->d_un.d_val;
6200
6201 if (dyn->d_tag == DT_NULL)
6202 break;
6203 }
6204
6205 dynamic_memaddr += dyn_size;
6206 }
6207
6208 return map;
6209 }
6210
6211 /* Read one pointer from MEMADDR in the inferior. */
6212
6213 static int
6214 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6215 {
6216 int ret;
6217
6218 /* Go through a union so this works on either big or little endian
6219 hosts, when the inferior's pointer size is smaller than the size
6220 of CORE_ADDR. It is assumed the inferior's endianness is the
6221 same of the superior's. */
6222 union
6223 {
6224 CORE_ADDR core_addr;
6225 unsigned int ui;
6226 unsigned char uc;
6227 } addr;
6228
6229 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6230 if (ret == 0)
6231 {
6232 if (ptr_size == sizeof (CORE_ADDR))
6233 *ptr = addr.core_addr;
6234 else if (ptr_size == sizeof (unsigned int))
6235 *ptr = addr.ui;
6236 else
6237 gdb_assert_not_reached ("unhandled pointer size");
6238 }
6239 return ret;
6240 }
6241
6242 struct link_map_offsets
6243 {
6244 /* Offset and size of r_debug.r_version. */
6245 int r_version_offset;
6246
6247 /* Offset and size of r_debug.r_map. */
6248 int r_map_offset;
6249
6250 /* Offset to l_addr field in struct link_map. */
6251 int l_addr_offset;
6252
6253 /* Offset to l_name field in struct link_map. */
6254 int l_name_offset;
6255
6256 /* Offset to l_ld field in struct link_map. */
6257 int l_ld_offset;
6258
6259 /* Offset to l_next field in struct link_map. */
6260 int l_next_offset;
6261
6262 /* Offset to l_prev field in struct link_map. */
6263 int l_prev_offset;
6264 };
6265
6266 /* Construct qXfer:libraries-svr4:read reply. */
6267
6268 static int
6269 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6270 unsigned const char *writebuf,
6271 CORE_ADDR offset, int len)
6272 {
6273 char *document;
6274 unsigned document_len;
6275 struct process_info_private *const priv = current_process ()->priv;
6276 char filename[PATH_MAX];
6277 int pid, is_elf64;
6278
6279 static const struct link_map_offsets lmo_32bit_offsets =
6280 {
6281 0, /* r_version offset. */
6282 4, /* r_debug.r_map offset. */
6283 0, /* l_addr offset in link_map. */
6284 4, /* l_name offset in link_map. */
6285 8, /* l_ld offset in link_map. */
6286 12, /* l_next offset in link_map. */
6287 16 /* l_prev offset in link_map. */
6288 };
6289
6290 static const struct link_map_offsets lmo_64bit_offsets =
6291 {
6292 0, /* r_version offset. */
6293 8, /* r_debug.r_map offset. */
6294 0, /* l_addr offset in link_map. */
6295 8, /* l_name offset in link_map. */
6296 16, /* l_ld offset in link_map. */
6297 24, /* l_next offset in link_map. */
6298 32 /* l_prev offset in link_map. */
6299 };
6300 const struct link_map_offsets *lmo;
6301 unsigned int machine;
6302 int ptr_size;
6303 CORE_ADDR lm_addr = 0, lm_prev = 0;
6304 int allocated = 1024;
6305 char *p;
6306 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6307 int header_done = 0;
6308
6309 if (writebuf != NULL)
6310 return -2;
6311 if (readbuf == NULL)
6312 return -1;
6313
6314 pid = lwpid_of (current_thread);
6315 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6316 is_elf64 = elf_64_file_p (filename, &machine);
6317 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6318 ptr_size = is_elf64 ? 8 : 4;
6319
6320 while (annex[0] != '\0')
6321 {
6322 const char *sep;
6323 CORE_ADDR *addrp;
6324 int len;
6325
6326 sep = strchr (annex, '=');
6327 if (sep == NULL)
6328 break;
6329
6330 len = sep - annex;
6331 if (len == 5 && startswith (annex, "start"))
6332 addrp = &lm_addr;
6333 else if (len == 4 && startswith (annex, "prev"))
6334 addrp = &lm_prev;
6335 else
6336 {
6337 annex = strchr (sep, ';');
6338 if (annex == NULL)
6339 break;
6340 annex++;
6341 continue;
6342 }
6343
6344 annex = decode_address_to_semicolon (addrp, sep + 1);
6345 }
6346
6347 if (lm_addr == 0)
6348 {
6349 int r_version = 0;
6350
6351 if (priv->r_debug == 0)
6352 priv->r_debug = get_r_debug (pid, is_elf64);
6353
6354 /* We failed to find DT_DEBUG. Such situation will not change
6355 for this inferior - do not retry it. Report it to GDB as
6356 E01, see for the reasons at the GDB solib-svr4.c side. */
6357 if (priv->r_debug == (CORE_ADDR) -1)
6358 return -1;
6359
6360 if (priv->r_debug != 0)
6361 {
6362 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6363 (unsigned char *) &r_version,
6364 sizeof (r_version)) != 0
6365 || r_version != 1)
6366 {
6367 warning ("unexpected r_debug version %d", r_version);
6368 }
6369 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6370 &lm_addr, ptr_size) != 0)
6371 {
6372 warning ("unable to read r_map from 0x%lx",
6373 (long) priv->r_debug + lmo->r_map_offset);
6374 }
6375 }
6376 }
6377
6378 document = xmalloc (allocated);
6379 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6380 p = document + strlen (document);
6381
6382 while (lm_addr
6383 && read_one_ptr (lm_addr + lmo->l_name_offset,
6384 &l_name, ptr_size) == 0
6385 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6386 &l_addr, ptr_size) == 0
6387 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6388 &l_ld, ptr_size) == 0
6389 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6390 &l_prev, ptr_size) == 0
6391 && read_one_ptr (lm_addr + lmo->l_next_offset,
6392 &l_next, ptr_size) == 0)
6393 {
6394 unsigned char libname[PATH_MAX];
6395
6396 if (lm_prev != l_prev)
6397 {
6398 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6399 (long) lm_prev, (long) l_prev);
6400 break;
6401 }
6402
6403 /* Ignore the first entry even if it has valid name as the first entry
6404 corresponds to the main executable. The first entry should not be
6405 skipped if the dynamic loader was loaded late by a static executable
6406 (see solib-svr4.c parameter ignore_first). But in such case the main
6407 executable does not have PT_DYNAMIC present and this function already
6408 exited above due to failed get_r_debug. */
6409 if (lm_prev == 0)
6410 {
6411 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6412 p = p + strlen (p);
6413 }
6414 else
6415 {
6416 /* Not checking for error because reading may stop before
6417 we've got PATH_MAX worth of characters. */
6418 libname[0] = '\0';
6419 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6420 libname[sizeof (libname) - 1] = '\0';
6421 if (libname[0] != '\0')
6422 {
6423 /* 6x the size for xml_escape_text below. */
6424 size_t len = 6 * strlen ((char *) libname);
6425 char *name;
6426
6427 if (!header_done)
6428 {
6429 /* Terminate `<library-list-svr4'. */
6430 *p++ = '>';
6431 header_done = 1;
6432 }
6433
6434 while (allocated < p - document + len + 200)
6435 {
6436 /* Expand to guarantee sufficient storage. */
6437 uintptr_t document_len = p - document;
6438
6439 document = xrealloc (document, 2 * allocated);
6440 allocated *= 2;
6441 p = document + document_len;
6442 }
6443
6444 name = xml_escape_text ((char *) libname);
6445 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6446 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6447 name, (unsigned long) lm_addr,
6448 (unsigned long) l_addr, (unsigned long) l_ld);
6449 free (name);
6450 }
6451 }
6452
6453 lm_prev = lm_addr;
6454 lm_addr = l_next;
6455 }
6456
6457 if (!header_done)
6458 {
6459 /* Empty list; terminate `<library-list-svr4'. */
6460 strcpy (p, "/>");
6461 }
6462 else
6463 strcpy (p, "</library-list-svr4>");
6464
6465 document_len = strlen (document);
6466 if (offset < document_len)
6467 document_len -= offset;
6468 else
6469 document_len = 0;
6470 if (len > document_len)
6471 len = document_len;
6472
6473 memcpy (readbuf, document + offset, len);
6474 xfree (document);
6475
6476 return len;
6477 }
6478
6479 #ifdef HAVE_LINUX_BTRACE
6480
6481 /* See to_enable_btrace target method. */
6482
6483 static struct btrace_target_info *
6484 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6485 {
6486 struct btrace_target_info *tinfo;
6487
6488 tinfo = linux_enable_btrace (ptid, conf);
6489
6490 if (tinfo != NULL && tinfo->ptr_bits == 0)
6491 {
6492 struct thread_info *thread = find_thread_ptid (ptid);
6493 struct regcache *regcache = get_thread_regcache (thread, 0);
6494
6495 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6496 }
6497
6498 return tinfo;
6499 }
6500
6501 /* See to_disable_btrace target method. */
6502
6503 static int
6504 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6505 {
6506 enum btrace_error err;
6507
6508 err = linux_disable_btrace (tinfo);
6509 return (err == BTRACE_ERR_NONE ? 0 : -1);
6510 }
6511
6512 /* Encode an Intel(R) Processor Trace configuration. */
6513
6514 static void
6515 linux_low_encode_pt_config (struct buffer *buffer,
6516 const struct btrace_data_pt_config *config)
6517 {
6518 buffer_grow_str (buffer, "<pt-config>\n");
6519
6520 switch (config->cpu.vendor)
6521 {
6522 case CV_INTEL:
6523 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6524 "model=\"%u\" stepping=\"%u\"/>\n",
6525 config->cpu.family, config->cpu.model,
6526 config->cpu.stepping);
6527 break;
6528
6529 default:
6530 break;
6531 }
6532
6533 buffer_grow_str (buffer, "</pt-config>\n");
6534 }
6535
6536 /* Encode a raw buffer. */
6537
6538 static void
6539 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6540 unsigned int size)
6541 {
6542 if (size == 0)
6543 return;
6544
6545 /* We use hex encoding - see common/rsp-low.h. */
6546 buffer_grow_str (buffer, "<raw>\n");
6547
6548 while (size-- > 0)
6549 {
6550 char elem[2];
6551
6552 elem[0] = tohex ((*data >> 4) & 0xf);
6553 elem[1] = tohex (*data++ & 0xf);
6554
6555 buffer_grow (buffer, elem, 2);
6556 }
6557
6558 buffer_grow_str (buffer, "</raw>\n");
6559 }
6560
6561 /* See to_read_btrace target method. */
6562
6563 static int
6564 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6565 int type)
6566 {
6567 struct btrace_data btrace;
6568 struct btrace_block *block;
6569 enum btrace_error err;
6570 int i;
6571
6572 btrace_data_init (&btrace);
6573
6574 err = linux_read_btrace (&btrace, tinfo, type);
6575 if (err != BTRACE_ERR_NONE)
6576 {
6577 if (err == BTRACE_ERR_OVERFLOW)
6578 buffer_grow_str0 (buffer, "E.Overflow.");
6579 else
6580 buffer_grow_str0 (buffer, "E.Generic Error.");
6581
6582 goto err;
6583 }
6584
6585 switch (btrace.format)
6586 {
6587 case BTRACE_FORMAT_NONE:
6588 buffer_grow_str0 (buffer, "E.No Trace.");
6589 goto err;
6590
6591 case BTRACE_FORMAT_BTS:
6592 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6593 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6594
6595 for (i = 0;
6596 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6597 i++)
6598 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6599 paddress (block->begin), paddress (block->end));
6600
6601 buffer_grow_str0 (buffer, "</btrace>\n");
6602 break;
6603
6604 case BTRACE_FORMAT_PT:
6605 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6606 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6607 buffer_grow_str (buffer, "<pt>\n");
6608
6609 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6610
6611 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6612 btrace.variant.pt.size);
6613
6614 buffer_grow_str (buffer, "</pt>\n");
6615 buffer_grow_str0 (buffer, "</btrace>\n");
6616 break;
6617
6618 default:
6619 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6620 goto err;
6621 }
6622
6623 btrace_data_fini (&btrace);
6624 return 0;
6625
6626 err:
6627 btrace_data_fini (&btrace);
6628 return -1;
6629 }
6630
6631 /* See to_btrace_conf target method. */
6632
6633 static int
6634 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6635 struct buffer *buffer)
6636 {
6637 const struct btrace_config *conf;
6638
6639 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6640 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6641
6642 conf = linux_btrace_conf (tinfo);
6643 if (conf != NULL)
6644 {
6645 switch (conf->format)
6646 {
6647 case BTRACE_FORMAT_NONE:
6648 break;
6649
6650 case BTRACE_FORMAT_BTS:
6651 buffer_xml_printf (buffer, "<bts");
6652 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6653 buffer_xml_printf (buffer, " />\n");
6654 break;
6655
6656 case BTRACE_FORMAT_PT:
6657 buffer_xml_printf (buffer, "<pt");
6658 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6659 buffer_xml_printf (buffer, "/>\n");
6660 break;
6661 }
6662 }
6663
6664 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6665 return 0;
6666 }
6667 #endif /* HAVE_LINUX_BTRACE */
6668
6669 /* See nat/linux-nat.h. */
6670
6671 ptid_t
6672 current_lwp_ptid (void)
6673 {
6674 return ptid_of (current_thread);
6675 }
6676
6677 static struct target_ops linux_target_ops = {
6678 linux_create_inferior,
6679 linux_arch_setup,
6680 linux_attach,
6681 linux_kill,
6682 linux_detach,
6683 linux_mourn,
6684 linux_join,
6685 linux_thread_alive,
6686 linux_resume,
6687 linux_wait,
6688 linux_fetch_registers,
6689 linux_store_registers,
6690 linux_prepare_to_access_memory,
6691 linux_done_accessing_memory,
6692 linux_read_memory,
6693 linux_write_memory,
6694 linux_look_up_symbols,
6695 linux_request_interrupt,
6696 linux_read_auxv,
6697 linux_supports_z_point_type,
6698 linux_insert_point,
6699 linux_remove_point,
6700 linux_stopped_by_sw_breakpoint,
6701 linux_supports_stopped_by_sw_breakpoint,
6702 linux_stopped_by_hw_breakpoint,
6703 linux_supports_stopped_by_hw_breakpoint,
6704 linux_supports_conditional_breakpoints,
6705 linux_stopped_by_watchpoint,
6706 linux_stopped_data_address,
6707 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6708 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6709 && defined(PT_TEXT_END_ADDR)
6710 linux_read_offsets,
6711 #else
6712 NULL,
6713 #endif
6714 #ifdef USE_THREAD_DB
6715 thread_db_get_tls_address,
6716 #else
6717 NULL,
6718 #endif
6719 linux_qxfer_spu,
6720 hostio_last_error_from_errno,
6721 linux_qxfer_osdata,
6722 linux_xfer_siginfo,
6723 linux_supports_non_stop,
6724 linux_async,
6725 linux_start_non_stop,
6726 linux_supports_multi_process,
6727 linux_supports_fork_events,
6728 linux_supports_vfork_events,
6729 linux_handle_new_gdb_connection,
6730 #ifdef USE_THREAD_DB
6731 thread_db_handle_monitor_command,
6732 #else
6733 NULL,
6734 #endif
6735 linux_common_core_of_thread,
6736 linux_read_loadmap,
6737 linux_process_qsupported,
6738 linux_supports_tracepoints,
6739 linux_read_pc,
6740 linux_write_pc,
6741 linux_thread_stopped,
6742 NULL,
6743 linux_pause_all,
6744 linux_unpause_all,
6745 linux_stabilize_threads,
6746 linux_install_fast_tracepoint_jump_pad,
6747 linux_emit_ops,
6748 linux_supports_disable_randomization,
6749 linux_get_min_fast_tracepoint_insn_len,
6750 linux_qxfer_libraries_svr4,
6751 linux_supports_agent,
6752 #ifdef HAVE_LINUX_BTRACE
6753 linux_supports_btrace,
6754 linux_low_enable_btrace,
6755 linux_low_disable_btrace,
6756 linux_low_read_btrace,
6757 linux_low_btrace_conf,
6758 #else
6759 NULL,
6760 NULL,
6761 NULL,
6762 NULL,
6763 NULL,
6764 #endif
6765 linux_supports_range_stepping,
6766 linux_proc_pid_to_exec_file,
6767 linux_mntns_open_cloexec,
6768 linux_mntns_unlink,
6769 linux_mntns_readlink,
6770 };
6771
6772 static void
6773 linux_init_signals ()
6774 {
6775 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6776 to find what the cancel signal actually is. */
6777 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6778 signal (__SIGRTMIN+1, SIG_IGN);
6779 #endif
6780 }
6781
6782 #ifdef HAVE_LINUX_REGSETS
6783 void
6784 initialize_regsets_info (struct regsets_info *info)
6785 {
6786 for (info->num_regsets = 0;
6787 info->regsets[info->num_regsets].size >= 0;
6788 info->num_regsets++)
6789 ;
6790 }
6791 #endif
6792
6793 void
6794 initialize_low (void)
6795 {
6796 struct sigaction sigchld_action;
6797 memset (&sigchld_action, 0, sizeof (sigchld_action));
6798 set_target_ops (&linux_target_ops);
6799 set_breakpoint_data (the_low_target.breakpoint,
6800 the_low_target.breakpoint_len);
6801 linux_init_signals ();
6802 linux_ptrace_init_warnings ();
6803
6804 sigchld_action.sa_handler = sigchld_handler;
6805 sigemptyset (&sigchld_action.sa_mask);
6806 sigchld_action.sa_flags = SA_RESTART;
6807 sigaction (SIGCHLD, &sigchld_action, NULL);
6808
6809 initialize_low_arch ();
6810
6811 linux_check_ptrace_features ();
6812 }