1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
34 #include <sys/ioctl.h>
37 #include <sys/syscall.h>
41 #include <sys/types.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
56 #include "nat/linux-namespaces.h"
59 #define SPUFS_MAGIC 0x23c9b64e
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
77 /* This is the kernel's hard limit. Not to be confused with
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
116 uint32_t a_type
; /* Entry type */
119 uint32_t a_val
; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
131 uint64_t a_type
; /* Entry type */
134 uint64_t a_val
; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
142 /* Does the current host support PTRACE_GETREGSET? */
143 int have_ptrace_getregset
= -1;
147 /* See nat/linux-nat.h. */
150 ptid_of_lwp (struct lwp_info
*lwp
)
152 return ptid_of (get_lwp_thread (lwp
));
155 /* See nat/linux-nat.h. */
158 lwp_set_arch_private_info (struct lwp_info
*lwp
,
159 struct arch_lwp_info
*info
)
161 lwp
->arch_private
= info
;
164 /* See nat/linux-nat.h. */
166 struct arch_lwp_info
*
167 lwp_arch_private_info (struct lwp_info
*lwp
)
169 return lwp
->arch_private
;
172 /* See nat/linux-nat.h. */
175 lwp_is_stopped (struct lwp_info
*lwp
)
180 /* See nat/linux-nat.h. */
182 enum target_stop_reason
183 lwp_stop_reason (struct lwp_info
*lwp
)
185 return lwp
->stop_reason
;
188 /* A list of all unknown processes which receive stop signals. Some
189 other process will presumably claim each of these as forked
190 children momentarily. */
192 struct simple_pid_list
194 /* The process ID. */
197 /* The status as reported by waitpid. */
201 struct simple_pid_list
*next
;
203 struct simple_pid_list
*stopped_pids
;
205 /* Trivial list manipulation functions to keep track of a list of new
206 stopped processes. */
209 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
211 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
214 new_pid
->status
= status
;
215 new_pid
->next
= *listp
;
220 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
222 struct simple_pid_list
**p
;
224 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
225 if ((*p
)->pid
== pid
)
227 struct simple_pid_list
*next
= (*p
)->next
;
229 *statusp
= (*p
)->status
;
237 enum stopping_threads_kind
239 /* Not stopping threads presently. */
240 NOT_STOPPING_THREADS
,
242 /* Stopping threads. */
245 /* Stopping and suspending threads. */
246 STOPPING_AND_SUSPENDING_THREADS
249 /* This is set while stop_all_lwps is in effect. */
250 enum stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
252 /* FIXME make into a target method? */
253 int using_threads
= 1;
255 /* True if we're presently stabilizing threads (moving them out of
257 static int stabilizing_threads
;
259 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
260 int step
, int signal
, siginfo_t
*info
);
261 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
262 static void stop_all_lwps (int suspend
, struct lwp_info
*except
);
263 static void unstop_all_lwps (int unsuspend
, struct lwp_info
*except
);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
265 int *wstat
, int options
);
266 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
267 static struct lwp_info
*add_lwp (ptid_t ptid
);
268 static int linux_stopped_by_watchpoint (void);
269 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
270 static int lwp_is_marked_dead (struct lwp_info
*lwp
);
271 static void proceed_all_lwps (void);
272 static int finish_step_over (struct lwp_info
*lwp
);
273 static int kill_lwp (unsigned long lwpid
, int signo
);
275 /* When the event-loop is doing a step-over, this points at the thread
277 ptid_t step_over_bkpt
;
279 /* True if the low target can hardware single-step. Such targets
280 don't need a BREAKPOINT_REINSERT_ADDR callback. */
283 can_hardware_single_step (void)
285 return (the_low_target
.breakpoint_reinsert_addr
== NULL
);
288 /* True if the low target supports memory breakpoints. If so, we'll
289 have a GET_PC implementation. */
292 supports_breakpoints (void)
294 return (the_low_target
.get_pc
!= NULL
);
297 /* Returns true if this target can support fast tracepoints. This
298 does not mean that the in-process agent has been loaded in the
302 supports_fast_tracepoints (void)
304 return the_low_target
.install_fast_tracepoint_jump_pad
!= NULL
;
307 /* True if LWP is stopped in its stepping range. */
310 lwp_in_step_range (struct lwp_info
*lwp
)
312 CORE_ADDR pc
= lwp
->stop_pc
;
314 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
317 struct pending_signals
321 struct pending_signals
*prev
;
324 /* The read/write ends of the pipe registered as waitable file in the
326 static int linux_event_pipe
[2] = { -1, -1 };
328 /* True if we're currently in async mode. */
329 #define target_is_async_p() (linux_event_pipe[0] != -1)
331 static void send_sigstop (struct lwp_info
*lwp
);
332 static void wait_for_sigstop (void);
334 /* Return non-zero if HEADER is a 64-bit ELF file. */
337 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
339 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
340 && header
->e_ident
[EI_MAG1
] == ELFMAG1
341 && header
->e_ident
[EI_MAG2
] == ELFMAG2
342 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
344 *machine
= header
->e_machine
;
345 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
352 /* Return non-zero if FILE is a 64-bit ELF file,
353 zero if the file is not a 64-bit ELF file,
354 and -1 if the file is not accessible or doesn't exist. */
357 elf_64_file_p (const char *file
, unsigned int *machine
)
362 fd
= open (file
, O_RDONLY
);
366 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
373 return elf_64_header_p (&header
, machine
);
376 /* Accepts an integer PID; Returns true if the executable PID is
377 running is a 64-bit ELF file.. */
380 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
384 sprintf (file
, "/proc/%d/exe", pid
);
385 return elf_64_file_p (file
, machine
);
389 delete_lwp (struct lwp_info
*lwp
)
391 struct thread_info
*thr
= get_lwp_thread (lwp
);
394 debug_printf ("deleting %ld\n", lwpid_of (thr
));
397 free (lwp
->arch_private
);
401 /* Add a process to the common process list, and set its private
404 static struct process_info
*
405 linux_add_process (int pid
, int attached
)
407 struct process_info
*proc
;
409 proc
= add_process (pid
, attached
);
410 proc
->priv
= xcalloc (1, sizeof (*proc
->priv
));
412 if (the_low_target
.new_process
!= NULL
)
413 proc
->priv
->arch_private
= the_low_target
.new_process ();
418 static CORE_ADDR
get_pc (struct lwp_info
*lwp
);
420 /* Handle a GNU/Linux extended wait response. If we see a clone
421 event, we need to add the new LWP to our list (and return 0 so as
422 not to report the trap to higher layers). */
425 handle_extended_wait (struct lwp_info
*event_lwp
, int wstat
)
427 int event
= linux_ptrace_get_extended_event (wstat
);
428 struct thread_info
*event_thr
= get_lwp_thread (event_lwp
);
429 struct lwp_info
*new_lwp
;
431 if ((event
== PTRACE_EVENT_FORK
) || (event
== PTRACE_EVENT_VFORK
)
432 || (event
== PTRACE_EVENT_CLONE
))
435 unsigned long new_pid
;
438 /* Get the pid of the new lwp. */
439 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
442 /* If we haven't already seen the new PID stop, wait for it now. */
443 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
445 /* The new child has a pending SIGSTOP. We can't affect it until it
446 hits the SIGSTOP, but we're already attached. */
448 ret
= my_waitpid (new_pid
, &status
, __WALL
);
451 perror_with_name ("waiting for new child");
452 else if (ret
!= new_pid
)
453 warning ("wait returned unexpected PID %d", ret
);
454 else if (!WIFSTOPPED (status
))
455 warning ("wait returned unexpected status 0x%x", status
);
458 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
460 struct process_info
*parent_proc
;
461 struct process_info
*child_proc
;
462 struct lwp_info
*child_lwp
;
463 struct thread_info
*child_thr
;
464 struct target_desc
*tdesc
;
466 ptid
= ptid_build (new_pid
, new_pid
, 0);
470 debug_printf ("HEW: Got fork event from LWP %ld, "
472 ptid_get_lwp (ptid_of (event_thr
)),
473 ptid_get_pid (ptid
));
476 /* Add the new process to the tables and clone the breakpoint
477 lists of the parent. We need to do this even if the new process
478 will be detached, since we will need the process object and the
479 breakpoints to remove any breakpoints from memory when we
480 detach, and the client side will access registers. */
481 child_proc
= linux_add_process (new_pid
, 0);
482 gdb_assert (child_proc
!= NULL
);
483 child_lwp
= add_lwp (ptid
);
484 gdb_assert (child_lwp
!= NULL
);
485 child_lwp
->stopped
= 1;
486 child_lwp
->must_set_ptrace_flags
= 1;
487 child_lwp
->status_pending_p
= 0;
488 child_thr
= get_lwp_thread (child_lwp
);
489 child_thr
->last_resume_kind
= resume_stop
;
490 child_thr
->last_status
.kind
= TARGET_WAITKIND_STOPPED
;
492 parent_proc
= get_thread_process (event_thr
);
493 child_proc
->attached
= parent_proc
->attached
;
494 clone_all_breakpoints (&child_proc
->breakpoints
,
495 &child_proc
->raw_breakpoints
,
496 parent_proc
->breakpoints
);
498 tdesc
= xmalloc (sizeof (struct target_desc
));
499 copy_target_description (tdesc
, parent_proc
->tdesc
);
500 child_proc
->tdesc
= tdesc
;
502 /* Clone arch-specific process data. */
503 if (the_low_target
.new_fork
!= NULL
)
504 the_low_target
.new_fork (parent_proc
, child_proc
);
506 /* Save fork info in the parent thread. */
507 if (event
== PTRACE_EVENT_FORK
)
508 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_FORKED
;
509 else if (event
== PTRACE_EVENT_VFORK
)
510 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORKED
;
512 event_lwp
->waitstatus
.value
.related_pid
= ptid
;
514 /* The status_pending field contains bits denoting the
515 extended event, so when the pending event is handled,
516 the handler will look at lwp->waitstatus. */
517 event_lwp
->status_pending_p
= 1;
518 event_lwp
->status_pending
= wstat
;
520 /* Report the event. */
525 debug_printf ("HEW: Got clone event "
526 "from LWP %ld, new child is LWP %ld\n",
527 lwpid_of (event_thr
), new_pid
);
529 ptid
= ptid_build (pid_of (event_thr
), new_pid
, 0);
530 new_lwp
= add_lwp (ptid
);
532 /* Either we're going to immediately resume the new thread
533 or leave it stopped. linux_resume_one_lwp is a nop if it
534 thinks the thread is currently running, so set this first
535 before calling linux_resume_one_lwp. */
536 new_lwp
->stopped
= 1;
538 /* If we're suspending all threads, leave this one suspended
540 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
)
541 new_lwp
->suspended
= 1;
543 /* Normally we will get the pending SIGSTOP. But in some cases
544 we might get another signal delivered to the group first.
545 If we do get another signal, be sure not to lose it. */
546 if (WSTOPSIG (status
) != SIGSTOP
)
548 new_lwp
->stop_expected
= 1;
549 new_lwp
->status_pending_p
= 1;
550 new_lwp
->status_pending
= status
;
553 /* Don't report the event. */
556 else if (event
== PTRACE_EVENT_VFORK_DONE
)
558 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORK_DONE
;
560 /* Report the event. */
564 internal_error (__FILE__
, __LINE__
, _("unknown ptrace event %d"), event
);
567 /* Return the PC as read from the regcache of LWP, without any
571 get_pc (struct lwp_info
*lwp
)
573 struct thread_info
*saved_thread
;
574 struct regcache
*regcache
;
577 if (the_low_target
.get_pc
== NULL
)
580 saved_thread
= current_thread
;
581 current_thread
= get_lwp_thread (lwp
);
583 regcache
= get_thread_regcache (current_thread
, 1);
584 pc
= (*the_low_target
.get_pc
) (regcache
);
587 debug_printf ("pc is 0x%lx\n", (long) pc
);
589 current_thread
= saved_thread
;
593 /* This function should only be called if LWP got a SIGTRAP.
594 The SIGTRAP could mean several things.
596 On i386, where decr_pc_after_break is non-zero:
598 If we were single-stepping this process using PTRACE_SINGLESTEP, we
599 will get only the one SIGTRAP. The value of $eip will be the next
600 instruction. If the instruction we stepped over was a breakpoint,
601 we need to decrement the PC.
603 If we continue the process using PTRACE_CONT, we will get a
604 SIGTRAP when we hit a breakpoint. The value of $eip will be
605 the instruction after the breakpoint (i.e. needs to be
606 decremented). If we report the SIGTRAP to GDB, we must also
607 report the undecremented PC. If the breakpoint is removed, we
608 must resume at the decremented PC.
610 On a non-decr_pc_after_break machine with hardware or kernel
613 If we either single-step a breakpoint instruction, or continue and
614 hit a breakpoint instruction, our PC will point at the breakpoint
618 check_stopped_by_breakpoint (struct lwp_info
*lwp
)
621 CORE_ADDR sw_breakpoint_pc
;
622 struct thread_info
*saved_thread
;
623 #if USE_SIGTRAP_SIGINFO
627 if (the_low_target
.get_pc
== NULL
)
631 sw_breakpoint_pc
= pc
- the_low_target
.decr_pc_after_break
;
633 /* breakpoint_at reads from the current thread. */
634 saved_thread
= current_thread
;
635 current_thread
= get_lwp_thread (lwp
);
637 #if USE_SIGTRAP_SIGINFO
638 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
639 (PTRACE_TYPE_ARG3
) 0, &siginfo
) == 0)
641 if (siginfo
.si_signo
== SIGTRAP
)
643 if (siginfo
.si_code
== GDB_ARCH_TRAP_BRKPT
)
647 struct thread_info
*thr
= get_lwp_thread (lwp
);
649 debug_printf ("CSBB: %s stopped by software breakpoint\n",
650 target_pid_to_str (ptid_of (thr
)));
653 /* Back up the PC if necessary. */
654 if (pc
!= sw_breakpoint_pc
)
656 struct regcache
*regcache
657 = get_thread_regcache (current_thread
, 1);
658 (*the_low_target
.set_pc
) (regcache
, sw_breakpoint_pc
);
661 lwp
->stop_pc
= sw_breakpoint_pc
;
662 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
663 current_thread
= saved_thread
;
666 else if (siginfo
.si_code
== TRAP_HWBKPT
)
670 struct thread_info
*thr
= get_lwp_thread (lwp
);
672 debug_printf ("CSBB: %s stopped by hardware "
673 "breakpoint/watchpoint\n",
674 target_pid_to_str (ptid_of (thr
)));
678 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
679 current_thread
= saved_thread
;
682 else if (siginfo
.si_code
== TRAP_TRACE
)
686 struct thread_info
*thr
= get_lwp_thread (lwp
);
688 debug_printf ("CSBB: %s stopped by trace\n",
689 target_pid_to_str (ptid_of (thr
)));
695 /* We may have just stepped a breakpoint instruction. E.g., in
696 non-stop mode, GDB first tells the thread A to step a range, and
697 then the user inserts a breakpoint inside the range. In that
698 case we need to report the breakpoint PC. */
699 if ((!lwp
->stepping
|| lwp
->stop_pc
== sw_breakpoint_pc
)
700 && (*the_low_target
.breakpoint_at
) (sw_breakpoint_pc
))
704 struct thread_info
*thr
= get_lwp_thread (lwp
);
706 debug_printf ("CSBB: %s stopped by software breakpoint\n",
707 target_pid_to_str (ptid_of (thr
)));
710 /* Back up the PC if necessary. */
711 if (pc
!= sw_breakpoint_pc
)
713 struct regcache
*regcache
714 = get_thread_regcache (current_thread
, 1);
715 (*the_low_target
.set_pc
) (regcache
, sw_breakpoint_pc
);
718 lwp
->stop_pc
= sw_breakpoint_pc
;
719 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
720 current_thread
= saved_thread
;
724 if (hardware_breakpoint_inserted_here (pc
))
728 struct thread_info
*thr
= get_lwp_thread (lwp
);
730 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
731 target_pid_to_str (ptid_of (thr
)));
735 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
736 current_thread
= saved_thread
;
741 current_thread
= saved_thread
;
745 static struct lwp_info
*
746 add_lwp (ptid_t ptid
)
748 struct lwp_info
*lwp
;
750 lwp
= (struct lwp_info
*) xcalloc (1, sizeof (*lwp
));
752 lwp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
754 if (the_low_target
.new_thread
!= NULL
)
755 the_low_target
.new_thread (lwp
);
757 lwp
->thread
= add_thread (ptid
, lwp
);
762 /* Start an inferior process and returns its pid.
763 ALLARGS is a vector of program-name and args. */
766 linux_create_inferior (char *program
, char **allargs
)
768 struct lwp_info
*new_lwp
;
771 struct cleanup
*restore_personality
772 = maybe_disable_address_space_randomization (disable_randomization
);
774 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
780 perror_with_name ("fork");
785 ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
787 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
788 signal (__SIGRTMIN
+ 1, SIG_DFL
);
793 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
794 stdout to stderr so that inferior i/o doesn't corrupt the connection.
795 Also, redirect stdin to /dev/null. */
796 if (remote_connection_is_stdio ())
799 open ("/dev/null", O_RDONLY
);
801 if (write (2, "stdin/stdout redirected\n",
802 sizeof ("stdin/stdout redirected\n") - 1) < 0)
804 /* Errors ignored. */;
808 execv (program
, allargs
);
810 execvp (program
, allargs
);
812 fprintf (stderr
, "Cannot exec %s: %s.\n", program
,
818 do_cleanups (restore_personality
);
820 linux_add_process (pid
, 0);
822 ptid
= ptid_build (pid
, pid
, 0);
823 new_lwp
= add_lwp (ptid
);
824 new_lwp
->must_set_ptrace_flags
= 1;
829 /* Implement the arch_setup target_ops method. */
832 linux_arch_setup (void)
834 the_low_target
.arch_setup ();
837 /* Attach to an inferior process. Returns 0 on success, ERRNO on
841 linux_attach_lwp (ptid_t ptid
)
843 struct lwp_info
*new_lwp
;
844 int lwpid
= ptid_get_lwp (ptid
);
846 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
850 new_lwp
= add_lwp (ptid
);
852 /* We need to wait for SIGSTOP before being able to make the next
853 ptrace call on this LWP. */
854 new_lwp
->must_set_ptrace_flags
= 1;
856 if (linux_proc_pid_is_stopped (lwpid
))
859 debug_printf ("Attached to a stopped process\n");
861 /* The process is definitely stopped. It is in a job control
862 stop, unless the kernel predates the TASK_STOPPED /
863 TASK_TRACED distinction, in which case it might be in a
864 ptrace stop. Make sure it is in a ptrace stop; from there we
865 can kill it, signal it, et cetera.
867 First make sure there is a pending SIGSTOP. Since we are
868 already attached, the process can not transition from stopped
869 to running without a PTRACE_CONT; so we know this signal will
870 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
871 probably already in the queue (unless this kernel is old
872 enough to use TASK_STOPPED for ptrace stops); but since
873 SIGSTOP is not an RT signal, it can only be queued once. */
874 kill_lwp (lwpid
, SIGSTOP
);
876 /* Finally, resume the stopped process. This will deliver the
877 SIGSTOP (or a higher priority signal, just like normal
878 PTRACE_ATTACH), which we'll catch later on. */
879 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
882 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
885 There are several cases to consider here:
887 1) gdbserver has already attached to the process and is being notified
888 of a new thread that is being created.
889 In this case we should ignore that SIGSTOP and resume the
890 process. This is handled below by setting stop_expected = 1,
891 and the fact that add_thread sets last_resume_kind ==
894 2) This is the first thread (the process thread), and we're attaching
895 to it via attach_inferior.
896 In this case we want the process thread to stop.
897 This is handled by having linux_attach set last_resume_kind ==
898 resume_stop after we return.
900 If the pid we are attaching to is also the tgid, we attach to and
901 stop all the existing threads. Otherwise, we attach to pid and
902 ignore any other threads in the same group as this pid.
904 3) GDB is connecting to gdbserver and is requesting an enumeration of all
906 In this case we want the thread to stop.
907 FIXME: This case is currently not properly handled.
908 We should wait for the SIGSTOP but don't. Things work apparently
909 because enough time passes between when we ptrace (ATTACH) and when
910 gdb makes the next ptrace call on the thread.
912 On the other hand, if we are currently trying to stop all threads, we
913 should treat the new thread as if we had sent it a SIGSTOP. This works
914 because we are guaranteed that the add_lwp call above added us to the
915 end of the list, and so the new thread has not yet reached
916 wait_for_sigstop (but will). */
917 new_lwp
->stop_expected
= 1;
922 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
923 already attached. Returns true if a new LWP is found, false
927 attach_proc_task_lwp_callback (ptid_t ptid
)
929 /* Is this a new thread? */
930 if (find_thread_ptid (ptid
) == NULL
)
932 int lwpid
= ptid_get_lwp (ptid
);
936 debug_printf ("Found new lwp %d\n", lwpid
);
938 err
= linux_attach_lwp (ptid
);
940 /* Be quiet if we simply raced with the thread exiting. EPERM
941 is returned if the thread's task still exists, and is marked
942 as exited or zombie, as well as other conditions, so in that
943 case, confirm the status in /proc/PID/status. */
945 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
949 debug_printf ("Cannot attach to lwp %d: "
950 "thread is gone (%d: %s)\n",
951 lwpid
, err
, strerror (err
));
956 warning (_("Cannot attach to lwp %d: %s"),
958 linux_ptrace_attach_fail_reason_string (ptid
, err
));
966 /* Attach to PID. If PID is the tgid, attach to it and all
970 linux_attach (unsigned long pid
)
972 ptid_t ptid
= ptid_build (pid
, pid
, 0);
975 /* Attach to PID. We will check for other threads
977 err
= linux_attach_lwp (ptid
);
979 error ("Cannot attach to process %ld: %s",
980 pid
, linux_ptrace_attach_fail_reason_string (ptid
, err
));
982 linux_add_process (pid
, 1);
986 struct thread_info
*thread
;
988 /* Don't ignore the initial SIGSTOP if we just attached to this
989 process. It will be collected by wait shortly. */
990 thread
= find_thread_ptid (ptid_build (pid
, pid
, 0));
991 thread
->last_resume_kind
= resume_stop
;
994 /* We must attach to every LWP. If /proc is mounted, use that to
995 find them now. On the one hand, the inferior may be using raw
996 clone instead of using pthreads. On the other hand, even if it
997 is using pthreads, GDB may not be connected yet (thread_db needs
998 to do symbol lookups, through qSymbol). Also, thread_db walks
999 structures in the inferior's address space to find the list of
1000 threads/LWPs, and those structures may well be corrupted. Note
1001 that once thread_db is loaded, we'll still use it to list threads
1002 and associate pthread info with each LWP. */
1003 linux_proc_attach_tgid_threads (pid
, attach_proc_task_lwp_callback
);
1014 second_thread_of_pid_p (struct inferior_list_entry
*entry
, void *args
)
1016 struct counter
*counter
= args
;
1018 if (ptid_get_pid (entry
->id
) == counter
->pid
)
1020 if (++counter
->count
> 1)
1028 last_thread_of_process_p (int pid
)
1030 struct counter counter
= { pid
, 0 };
1032 return (find_inferior (&all_threads
,
1033 second_thread_of_pid_p
, &counter
) == NULL
);
1039 linux_kill_one_lwp (struct lwp_info
*lwp
)
1041 struct thread_info
*thr
= get_lwp_thread (lwp
);
1042 int pid
= lwpid_of (thr
);
1044 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1045 there is no signal context, and ptrace(PTRACE_KILL) (or
1046 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1047 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1048 alternative is to kill with SIGKILL. We only need one SIGKILL
1049 per process, not one for each thread. But since we still support
1050 linuxthreads, and we also support debugging programs using raw
1051 clone without CLONE_THREAD, we send one for each thread. For
1052 years, we used PTRACE_KILL only, so we're being a bit paranoid
1053 about some old kernels where PTRACE_KILL might work better
1054 (dubious if there are any such, but that's why it's paranoia), so
1055 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1059 kill_lwp (pid
, SIGKILL
);
1062 int save_errno
= errno
;
1064 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1065 target_pid_to_str (ptid_of (thr
)),
1066 save_errno
? strerror (save_errno
) : "OK");
1070 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1073 int save_errno
= errno
;
1075 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1076 target_pid_to_str (ptid_of (thr
)),
1077 save_errno
? strerror (save_errno
) : "OK");
1081 /* Kill LWP and wait for it to die. */
1084 kill_wait_lwp (struct lwp_info
*lwp
)
1086 struct thread_info
*thr
= get_lwp_thread (lwp
);
1087 int pid
= ptid_get_pid (ptid_of (thr
));
1088 int lwpid
= ptid_get_lwp (ptid_of (thr
));
1093 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
1097 linux_kill_one_lwp (lwp
);
1099 /* Make sure it died. Notes:
1101 - The loop is most likely unnecessary.
1103 - We don't use linux_wait_for_event as that could delete lwps
1104 while we're iterating over them. We're not interested in
1105 any pending status at this point, only in making sure all
1106 wait status on the kernel side are collected until the
1109 - We don't use __WALL here as the __WALL emulation relies on
1110 SIGCHLD, and killing a stopped process doesn't generate
1111 one, nor an exit status.
1113 res
= my_waitpid (lwpid
, &wstat
, 0);
1114 if (res
== -1 && errno
== ECHILD
)
1115 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
1116 } while (res
> 0 && WIFSTOPPED (wstat
));
1118 /* Even if it was stopped, the child may have already disappeared.
1119 E.g., if it was killed by SIGKILL. */
1120 if (res
< 0 && errno
!= ECHILD
)
1121 perror_with_name ("kill_wait_lwp");
1124 /* Callback for `find_inferior'. Kills an lwp of a given process,
1125 except the leader. */
1128 kill_one_lwp_callback (struct inferior_list_entry
*entry
, void *args
)
1130 struct thread_info
*thread
= (struct thread_info
*) entry
;
1131 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1132 int pid
= * (int *) args
;
1134 if (ptid_get_pid (entry
->id
) != pid
)
1137 /* We avoid killing the first thread here, because of a Linux kernel (at
1138 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1139 the children get a chance to be reaped, it will remain a zombie
1142 if (lwpid_of (thread
) == pid
)
1145 debug_printf ("lkop: is last of process %s\n",
1146 target_pid_to_str (entry
->id
));
1150 kill_wait_lwp (lwp
);
1155 linux_kill (int pid
)
1157 struct process_info
*process
;
1158 struct lwp_info
*lwp
;
1160 process
= find_process_pid (pid
);
1161 if (process
== NULL
)
1164 /* If we're killing a running inferior, make sure it is stopped
1165 first, as PTRACE_KILL will not work otherwise. */
1166 stop_all_lwps (0, NULL
);
1168 find_inferior (&all_threads
, kill_one_lwp_callback
, &pid
);
1170 /* See the comment in linux_kill_one_lwp. We did not kill the first
1171 thread in the list, so do so now. */
1172 lwp
= find_lwp_pid (pid_to_ptid (pid
));
1177 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1181 kill_wait_lwp (lwp
);
1183 the_target
->mourn (process
);
1185 /* Since we presently can only stop all lwps of all processes, we
1186 need to unstop lwps of other processes. */
1187 unstop_all_lwps (0, NULL
);
1191 /* Get pending signal of THREAD, for detaching purposes. This is the
1192 signal the thread last stopped for, which we need to deliver to the
1193 thread when detaching, otherwise, it'd be suppressed/lost. */
1196 get_detach_signal (struct thread_info
*thread
)
1198 enum gdb_signal signo
= GDB_SIGNAL_0
;
1200 struct lwp_info
*lp
= get_thread_lwp (thread
);
1202 if (lp
->status_pending_p
)
1203 status
= lp
->status_pending
;
1206 /* If the thread had been suspended by gdbserver, and it stopped
1207 cleanly, then it'll have stopped with SIGSTOP. But we don't
1208 want to deliver that SIGSTOP. */
1209 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1210 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1213 /* Otherwise, we may need to deliver the signal we
1215 status
= lp
->last_status
;
1218 if (!WIFSTOPPED (status
))
1221 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1222 target_pid_to_str (ptid_of (thread
)));
1226 /* Extended wait statuses aren't real SIGTRAPs. */
1227 if (WSTOPSIG (status
) == SIGTRAP
&& linux_is_extended_waitstatus (status
))
1230 debug_printf ("GPS: lwp %s had stopped with extended "
1231 "status: no pending signal\n",
1232 target_pid_to_str (ptid_of (thread
)));
1236 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1238 if (program_signals_p
&& !program_signals
[signo
])
1241 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1242 target_pid_to_str (ptid_of (thread
)),
1243 gdb_signal_to_string (signo
));
1246 else if (!program_signals_p
1247 /* If we have no way to know which signals GDB does not
1248 want to have passed to the program, assume
1249 SIGTRAP/SIGINT, which is GDB's default. */
1250 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1253 debug_printf ("GPS: lwp %s had signal %s, "
1254 "but we don't know if we should pass it. "
1255 "Default to not.\n",
1256 target_pid_to_str (ptid_of (thread
)),
1257 gdb_signal_to_string (signo
));
1263 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1264 target_pid_to_str (ptid_of (thread
)),
1265 gdb_signal_to_string (signo
));
1267 return WSTOPSIG (status
);
1272 linux_detach_one_lwp (struct inferior_list_entry
*entry
, void *args
)
1274 struct thread_info
*thread
= (struct thread_info
*) entry
;
1275 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1276 int pid
= * (int *) args
;
1279 if (ptid_get_pid (entry
->id
) != pid
)
1282 /* If there is a pending SIGSTOP, get rid of it. */
1283 if (lwp
->stop_expected
)
1286 debug_printf ("Sending SIGCONT to %s\n",
1287 target_pid_to_str (ptid_of (thread
)));
1289 kill_lwp (lwpid_of (thread
), SIGCONT
);
1290 lwp
->stop_expected
= 0;
1293 /* Flush any pending changes to the process's registers. */
1294 regcache_invalidate_thread (thread
);
1296 /* Pass on any pending signal for this thread. */
1297 sig
= get_detach_signal (thread
);
1299 /* Finally, let it resume. */
1300 if (the_low_target
.prepare_to_resume
!= NULL
)
1301 the_low_target
.prepare_to_resume (lwp
);
1302 if (ptrace (PTRACE_DETACH
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1303 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1304 error (_("Can't detach %s: %s"),
1305 target_pid_to_str (ptid_of (thread
)),
1313 linux_detach (int pid
)
1315 struct process_info
*process
;
1317 process
= find_process_pid (pid
);
1318 if (process
== NULL
)
1321 /* Stop all threads before detaching. First, ptrace requires that
1322 the thread is stopped to sucessfully detach. Second, thread_db
1323 may need to uninstall thread event breakpoints from memory, which
1324 only works with a stopped process anyway. */
1325 stop_all_lwps (0, NULL
);
1327 #ifdef USE_THREAD_DB
1328 thread_db_detach (process
);
1331 /* Stabilize threads (move out of jump pads). */
1332 stabilize_threads ();
1334 find_inferior (&all_threads
, linux_detach_one_lwp
, &pid
);
1336 the_target
->mourn (process
);
1338 /* Since we presently can only stop all lwps of all processes, we
1339 need to unstop lwps of other processes. */
1340 unstop_all_lwps (0, NULL
);
1344 /* Remove all LWPs that belong to process PROC from the lwp list. */
1347 delete_lwp_callback (struct inferior_list_entry
*entry
, void *proc
)
1349 struct thread_info
*thread
= (struct thread_info
*) entry
;
1350 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1351 struct process_info
*process
= proc
;
1353 if (pid_of (thread
) == pid_of (process
))
1360 linux_mourn (struct process_info
*process
)
1362 struct process_info_private
*priv
;
1364 #ifdef USE_THREAD_DB
1365 thread_db_mourn (process
);
1368 find_inferior (&all_threads
, delete_lwp_callback
, process
);
1370 /* Freeing all private data. */
1371 priv
= process
->priv
;
1372 free (priv
->arch_private
);
1374 process
->priv
= NULL
;
1376 remove_process (process
);
1380 linux_join (int pid
)
1385 ret
= my_waitpid (pid
, &status
, 0);
1386 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1388 } while (ret
!= -1 || errno
!= ECHILD
);
1391 /* Return nonzero if the given thread is still alive. */
1393 linux_thread_alive (ptid_t ptid
)
1395 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1397 /* We assume we always know if a thread exits. If a whole process
1398 exited but we still haven't been able to report it to GDB, we'll
1399 hold on to the last lwp of the dead process. */
1401 return !lwp_is_marked_dead (lwp
);
1406 /* Return 1 if this lwp still has an interesting status pending. If
1407 not (e.g., it had stopped for a breakpoint that is gone), return
1411 thread_still_has_status_pending_p (struct thread_info
*thread
)
1413 struct lwp_info
*lp
= get_thread_lwp (thread
);
1415 if (!lp
->status_pending_p
)
1418 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1419 report any status pending the LWP may have. */
1420 if (thread
->last_resume_kind
== resume_stop
1421 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
1424 if (thread
->last_resume_kind
!= resume_stop
1425 && (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1426 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
1428 struct thread_info
*saved_thread
;
1432 gdb_assert (lp
->last_status
!= 0);
1436 saved_thread
= current_thread
;
1437 current_thread
= thread
;
1439 if (pc
!= lp
->stop_pc
)
1442 debug_printf ("PC of %ld changed\n",
1447 #if !USE_SIGTRAP_SIGINFO
1448 else if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1449 && !(*the_low_target
.breakpoint_at
) (pc
))
1452 debug_printf ("previous SW breakpoint of %ld gone\n",
1456 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
1457 && !hardware_breakpoint_inserted_here (pc
))
1460 debug_printf ("previous HW breakpoint of %ld gone\n",
1466 current_thread
= saved_thread
;
1471 debug_printf ("discarding pending breakpoint status\n");
1472 lp
->status_pending_p
= 0;
1480 /* Return 1 if this lwp has an interesting status pending. */
1482 status_pending_p_callback (struct inferior_list_entry
*entry
, void *arg
)
1484 struct thread_info
*thread
= (struct thread_info
*) entry
;
1485 struct lwp_info
*lp
= get_thread_lwp (thread
);
1486 ptid_t ptid
= * (ptid_t
*) arg
;
1488 /* Check if we're only interested in events from a specific process
1489 or a specific LWP. */
1490 if (!ptid_match (ptid_of (thread
), ptid
))
1493 if (lp
->status_pending_p
1494 && !thread_still_has_status_pending_p (thread
))
1496 linux_resume_one_lwp (lp
, lp
->stepping
, GDB_SIGNAL_0
, NULL
);
1500 return lp
->status_pending_p
;
1504 same_lwp (struct inferior_list_entry
*entry
, void *data
)
1506 ptid_t ptid
= *(ptid_t
*) data
;
1509 if (ptid_get_lwp (ptid
) != 0)
1510 lwp
= ptid_get_lwp (ptid
);
1512 lwp
= ptid_get_pid (ptid
);
1514 if (ptid_get_lwp (entry
->id
) == lwp
)
1521 find_lwp_pid (ptid_t ptid
)
1523 struct inferior_list_entry
*thread
1524 = find_inferior (&all_threads
, same_lwp
, &ptid
);
1529 return get_thread_lwp ((struct thread_info
*) thread
);
1532 /* Return the number of known LWPs in the tgid given by PID. */
1537 struct inferior_list_entry
*inf
, *tmp
;
1540 ALL_INFERIORS (&all_threads
, inf
, tmp
)
1542 if (ptid_get_pid (inf
->id
) == pid
)
1549 /* The arguments passed to iterate_over_lwps. */
1551 struct iterate_over_lwps_args
1553 /* The FILTER argument passed to iterate_over_lwps. */
1556 /* The CALLBACK argument passed to iterate_over_lwps. */
1557 iterate_over_lwps_ftype
*callback
;
1559 /* The DATA argument passed to iterate_over_lwps. */
1563 /* Callback for find_inferior used by iterate_over_lwps to filter
1564 calls to the callback supplied to that function. Returning a
1565 nonzero value causes find_inferiors to stop iterating and return
1566 the current inferior_list_entry. Returning zero indicates that
1567 find_inferiors should continue iterating. */
1570 iterate_over_lwps_filter (struct inferior_list_entry
*entry
, void *args_p
)
1572 struct iterate_over_lwps_args
*args
1573 = (struct iterate_over_lwps_args
*) args_p
;
1575 if (ptid_match (entry
->id
, args
->filter
))
1577 struct thread_info
*thr
= (struct thread_info
*) entry
;
1578 struct lwp_info
*lwp
= get_thread_lwp (thr
);
1580 return (*args
->callback
) (lwp
, args
->data
);
1586 /* See nat/linux-nat.h. */
1589 iterate_over_lwps (ptid_t filter
,
1590 iterate_over_lwps_ftype callback
,
1593 struct iterate_over_lwps_args args
= {filter
, callback
, data
};
1594 struct inferior_list_entry
*entry
;
1596 entry
= find_inferior (&all_threads
, iterate_over_lwps_filter
, &args
);
1600 return get_thread_lwp ((struct thread_info
*) entry
);
1603 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1604 their exits until all other threads in the group have exited. */
1607 check_zombie_leaders (void)
1609 struct process_info
*proc
, *tmp
;
1611 ALL_PROCESSES (proc
, tmp
)
1613 pid_t leader_pid
= pid_of (proc
);
1614 struct lwp_info
*leader_lp
;
1616 leader_lp
= find_lwp_pid (pid_to_ptid (leader_pid
));
1619 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1620 "num_lwps=%d, zombie=%d\n",
1621 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1622 linux_proc_pid_is_zombie (leader_pid
));
1624 if (leader_lp
!= NULL
1625 /* Check if there are other threads in the group, as we may
1626 have raced with the inferior simply exiting. */
1627 && !last_thread_of_process_p (leader_pid
)
1628 && linux_proc_pid_is_zombie (leader_pid
))
1630 /* A leader zombie can mean one of two things:
1632 - It exited, and there's an exit status pending
1633 available, or only the leader exited (not the whole
1634 program). In the latter case, we can't waitpid the
1635 leader's exit status until all other threads are gone.
1637 - There are 3 or more threads in the group, and a thread
1638 other than the leader exec'd. On an exec, the Linux
1639 kernel destroys all other threads (except the execing
1640 one) in the thread group, and resets the execing thread's
1641 tid to the tgid. No exit notification is sent for the
1642 execing thread -- from the ptracer's perspective, it
1643 appears as though the execing thread just vanishes.
1644 Until we reap all other threads except the leader and the
1645 execing thread, the leader will be zombie, and the
1646 execing thread will be in `D (disc sleep)'. As soon as
1647 all other threads are reaped, the execing thread changes
1648 it's tid to the tgid, and the previous (zombie) leader
1649 vanishes, giving place to the "new" leader. We could try
1650 distinguishing the exit and exec cases, by waiting once
1651 more, and seeing if something comes out, but it doesn't
1652 sound useful. The previous leader _does_ go away, and
1653 we'll re-add the new one once we see the exec event
1654 (which is just the same as what would happen if the
1655 previous leader did exit voluntarily before some other
1660 "CZL: Thread group leader %d zombie "
1661 "(it exited, or another thread execd).\n",
1664 delete_lwp (leader_lp
);
1669 /* Callback for `find_inferior'. Returns the first LWP that is not
1670 stopped. ARG is a PTID filter. */
1673 not_stopped_callback (struct inferior_list_entry
*entry
, void *arg
)
1675 struct thread_info
*thr
= (struct thread_info
*) entry
;
1676 struct lwp_info
*lwp
;
1677 ptid_t filter
= *(ptid_t
*) arg
;
1679 if (!ptid_match (ptid_of (thr
), filter
))
1682 lwp
= get_thread_lwp (thr
);
1689 /* This function should only be called if the LWP got a SIGTRAP.
1691 Handle any tracepoint steps or hits. Return true if a tracepoint
1692 event was handled, 0 otherwise. */
1695 handle_tracepoints (struct lwp_info
*lwp
)
1697 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1698 int tpoint_related_event
= 0;
1700 gdb_assert (lwp
->suspended
== 0);
1702 /* If this tracepoint hit causes a tracing stop, we'll immediately
1703 uninsert tracepoints. To do this, we temporarily pause all
1704 threads, unpatch away, and then unpause threads. We need to make
1705 sure the unpausing doesn't resume LWP too. */
1708 /* And we need to be sure that any all-threads-stopping doesn't try
1709 to move threads out of the jump pads, as it could deadlock the
1710 inferior (LWP could be in the jump pad, maybe even holding the
1713 /* Do any necessary step collect actions. */
1714 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1716 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1718 /* See if we just hit a tracepoint and do its main collect
1720 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1724 gdb_assert (lwp
->suspended
== 0);
1725 gdb_assert (!stabilizing_threads
|| lwp
->collecting_fast_tracepoint
);
1727 if (tpoint_related_event
)
1730 debug_printf ("got a tracepoint event\n");
1737 /* Convenience wrapper. Returns true if LWP is presently collecting a
1741 linux_fast_tracepoint_collecting (struct lwp_info
*lwp
,
1742 struct fast_tpoint_collect_status
*status
)
1744 CORE_ADDR thread_area
;
1745 struct thread_info
*thread
= get_lwp_thread (lwp
);
1747 if (the_low_target
.get_thread_area
== NULL
)
1750 /* Get the thread area address. This is used to recognize which
1751 thread is which when tracing with the in-process agent library.
1752 We don't read anything from the address, and treat it as opaque;
1753 it's the address itself that we assume is unique per-thread. */
1754 if ((*the_low_target
.get_thread_area
) (lwpid_of (thread
), &thread_area
) == -1)
1757 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
1760 /* The reason we resume in the caller, is because we want to be able
1761 to pass lwp->status_pending as WSTAT, and we need to clear
1762 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1763 refuses to resume. */
1766 maybe_move_out_of_jump_pad (struct lwp_info
*lwp
, int *wstat
)
1768 struct thread_info
*saved_thread
;
1770 saved_thread
= current_thread
;
1771 current_thread
= get_lwp_thread (lwp
);
1774 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
1775 && supports_fast_tracepoints ()
1776 && agent_loaded_p ())
1778 struct fast_tpoint_collect_status status
;
1782 debug_printf ("Checking whether LWP %ld needs to move out of the "
1784 lwpid_of (current_thread
));
1786 r
= linux_fast_tracepoint_collecting (lwp
, &status
);
1789 || (WSTOPSIG (*wstat
) != SIGILL
1790 && WSTOPSIG (*wstat
) != SIGFPE
1791 && WSTOPSIG (*wstat
) != SIGSEGV
1792 && WSTOPSIG (*wstat
) != SIGBUS
))
1794 lwp
->collecting_fast_tracepoint
= r
;
1798 if (r
== 1 && lwp
->exit_jump_pad_bkpt
== NULL
)
1800 /* Haven't executed the original instruction yet.
1801 Set breakpoint there, and wait till it's hit,
1802 then single-step until exiting the jump pad. */
1803 lwp
->exit_jump_pad_bkpt
1804 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
1808 debug_printf ("Checking whether LWP %ld needs to move out of "
1809 "the jump pad...it does\n",
1810 lwpid_of (current_thread
));
1811 current_thread
= saved_thread
;
1818 /* If we get a synchronous signal while collecting, *and*
1819 while executing the (relocated) original instruction,
1820 reset the PC to point at the tpoint address, before
1821 reporting to GDB. Otherwise, it's an IPA lib bug: just
1822 report the signal to GDB, and pray for the best. */
1824 lwp
->collecting_fast_tracepoint
= 0;
1827 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
1828 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
1831 struct regcache
*regcache
;
1833 /* The si_addr on a few signals references the address
1834 of the faulting instruction. Adjust that as
1836 if ((WSTOPSIG (*wstat
) == SIGILL
1837 || WSTOPSIG (*wstat
) == SIGFPE
1838 || WSTOPSIG (*wstat
) == SIGBUS
1839 || WSTOPSIG (*wstat
) == SIGSEGV
)
1840 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
1841 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
1842 /* Final check just to make sure we don't clobber
1843 the siginfo of non-kernel-sent signals. */
1844 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
1846 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
1847 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_thread
),
1848 (PTRACE_TYPE_ARG3
) 0, &info
);
1851 regcache
= get_thread_regcache (current_thread
, 1);
1852 (*the_low_target
.set_pc
) (regcache
, status
.tpoint_addr
);
1853 lwp
->stop_pc
= status
.tpoint_addr
;
1855 /* Cancel any fast tracepoint lock this thread was
1857 force_unlock_trace_buffer ();
1860 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
1863 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1864 "stopping all threads momentarily.\n");
1866 stop_all_lwps (1, lwp
);
1868 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
1869 lwp
->exit_jump_pad_bkpt
= NULL
;
1871 unstop_all_lwps (1, lwp
);
1873 gdb_assert (lwp
->suspended
>= 0);
1879 debug_printf ("Checking whether LWP %ld needs to move out of the "
1881 lwpid_of (current_thread
));
1883 current_thread
= saved_thread
;
1887 /* Enqueue one signal in the "signals to report later when out of the
1891 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1893 struct pending_signals
*p_sig
;
1894 struct thread_info
*thread
= get_lwp_thread (lwp
);
1897 debug_printf ("Deferring signal %d for LWP %ld.\n",
1898 WSTOPSIG (*wstat
), lwpid_of (thread
));
1902 struct pending_signals
*sig
;
1904 for (sig
= lwp
->pending_signals_to_report
;
1907 debug_printf (" Already queued %d\n",
1910 debug_printf (" (no more currently queued signals)\n");
1913 /* Don't enqueue non-RT signals if they are already in the deferred
1914 queue. (SIGSTOP being the easiest signal to see ending up here
1916 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
1918 struct pending_signals
*sig
;
1920 for (sig
= lwp
->pending_signals_to_report
;
1924 if (sig
->signal
== WSTOPSIG (*wstat
))
1927 debug_printf ("Not requeuing already queued non-RT signal %d"
1936 p_sig
= xmalloc (sizeof (*p_sig
));
1937 p_sig
->prev
= lwp
->pending_signals_to_report
;
1938 p_sig
->signal
= WSTOPSIG (*wstat
);
1939 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
1940 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1943 lwp
->pending_signals_to_report
= p_sig
;
1946 /* Dequeue one signal from the "signals to report later when out of
1947 the jump pad" list. */
1950 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1952 struct thread_info
*thread
= get_lwp_thread (lwp
);
1954 if (lwp
->pending_signals_to_report
!= NULL
)
1956 struct pending_signals
**p_sig
;
1958 p_sig
= &lwp
->pending_signals_to_report
;
1959 while ((*p_sig
)->prev
!= NULL
)
1960 p_sig
= &(*p_sig
)->prev
;
1962 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
1963 if ((*p_sig
)->info
.si_signo
!= 0)
1964 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1970 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1971 WSTOPSIG (*wstat
), lwpid_of (thread
));
1975 struct pending_signals
*sig
;
1977 for (sig
= lwp
->pending_signals_to_report
;
1980 debug_printf (" Still queued %d\n",
1983 debug_printf (" (no more queued signals)\n");
1992 /* Fetch the possibly triggered data watchpoint info and store it in
1995 On some archs, like x86, that use debug registers to set
1996 watchpoints, it's possible that the way to know which watched
1997 address trapped, is to check the register that is used to select
1998 which address to watch. Problem is, between setting the watchpoint
1999 and reading back which data address trapped, the user may change
2000 the set of watchpoints, and, as a consequence, GDB changes the
2001 debug registers in the inferior. To avoid reading back a stale
2002 stopped-data-address when that happens, we cache in LP the fact
2003 that a watchpoint trapped, and the corresponding data address, as
2004 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2005 registers meanwhile, we have the cached data we can rely on. */
2008 check_stopped_by_watchpoint (struct lwp_info
*child
)
2010 if (the_low_target
.stopped_by_watchpoint
!= NULL
)
2012 struct thread_info
*saved_thread
;
2014 saved_thread
= current_thread
;
2015 current_thread
= get_lwp_thread (child
);
2017 if (the_low_target
.stopped_by_watchpoint ())
2019 child
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2021 if (the_low_target
.stopped_data_address
!= NULL
)
2022 child
->stopped_data_address
2023 = the_low_target
.stopped_data_address ();
2025 child
->stopped_data_address
= 0;
2028 current_thread
= saved_thread
;
2031 return child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2034 /* Return the ptrace options that we want to try to enable. */
2037 linux_low_ptrace_options (int attached
)
2042 options
|= PTRACE_O_EXITKILL
;
2044 if (report_fork_events
)
2045 options
|= PTRACE_O_TRACEFORK
;
2047 if (report_vfork_events
)
2048 options
|= (PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEVFORKDONE
);
2053 /* Do low-level handling of the event, and check if we should go on
2054 and pass it to caller code. Return the affected lwp if we are, or
2057 static struct lwp_info
*
2058 linux_low_filter_event (int lwpid
, int wstat
)
2060 struct lwp_info
*child
;
2061 struct thread_info
*thread
;
2062 int have_stop_pc
= 0;
2064 child
= find_lwp_pid (pid_to_ptid (lwpid
));
2066 /* If we didn't find a process, one of two things presumably happened:
2067 - A process we started and then detached from has exited. Ignore it.
2068 - A process we are controlling has forked and the new child's stop
2069 was reported to us by the kernel. Save its PID. */
2070 if (child
== NULL
&& WIFSTOPPED (wstat
))
2072 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
2075 else if (child
== NULL
)
2078 thread
= get_lwp_thread (child
);
2082 child
->last_status
= wstat
;
2084 /* Check if the thread has exited. */
2085 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
)))
2088 debug_printf ("LLFE: %d exited.\n", lwpid
);
2089 if (num_lwps (pid_of (thread
)) > 1)
2092 /* If there is at least one more LWP, then the exit signal was
2093 not the end of the debugged application and should be
2100 /* This was the last lwp in the process. Since events are
2101 serialized to GDB core, and we can't report this one
2102 right now, but GDB core and the other target layers will
2103 want to be notified about the exit code/signal, leave the
2104 status pending for the next time we're able to report
2106 mark_lwp_dead (child
, wstat
);
2111 gdb_assert (WIFSTOPPED (wstat
));
2113 if (WIFSTOPPED (wstat
))
2115 struct process_info
*proc
;
2117 /* Architecture-specific setup after inferior is running. */
2118 proc
= find_process_pid (pid_of (thread
));
2119 if (proc
->tdesc
== NULL
)
2123 struct thread_info
*saved_thread
;
2125 /* This needs to happen after we have attached to the
2126 inferior and it is stopped for the first time, but
2127 before we access any inferior registers. */
2128 saved_thread
= current_thread
;
2129 current_thread
= thread
;
2131 the_low_target
.arch_setup ();
2133 current_thread
= saved_thread
;
2137 /* The process is started, but GDBserver will do
2138 architecture-specific setup after the program stops at
2139 the first instruction. */
2140 child
->status_pending_p
= 1;
2141 child
->status_pending
= wstat
;
2147 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
2149 struct process_info
*proc
= find_process_pid (pid_of (thread
));
2150 int options
= linux_low_ptrace_options (proc
->attached
);
2152 linux_enable_event_reporting (lwpid
, options
);
2153 child
->must_set_ptrace_flags
= 0;
2156 /* Be careful to not overwrite stop_pc until
2157 check_stopped_by_breakpoint is called. */
2158 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2159 && linux_is_extended_waitstatus (wstat
))
2161 child
->stop_pc
= get_pc (child
);
2162 if (handle_extended_wait (child
, wstat
))
2164 /* The event has been handled, so just return without
2170 /* Check first whether this was a SW/HW breakpoint before checking
2171 watchpoints, because at least s390 can't tell the data address of
2172 hardware watchpoint hits, and returns stopped-by-watchpoint as
2173 long as there's a watchpoint set. */
2174 if (WIFSTOPPED (wstat
) && linux_wstatus_maybe_breakpoint (wstat
))
2176 if (check_stopped_by_breakpoint (child
))
2180 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2181 or hardware watchpoint. Check which is which if we got
2182 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2183 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2184 && (child
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
2185 || child
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
2186 check_stopped_by_watchpoint (child
);
2189 child
->stop_pc
= get_pc (child
);
2191 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
2192 && child
->stop_expected
)
2195 debug_printf ("Expected stop.\n");
2196 child
->stop_expected
= 0;
2198 if (thread
->last_resume_kind
== resume_stop
)
2200 /* We want to report the stop to the core. Treat the
2201 SIGSTOP as a normal event. */
2203 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2204 target_pid_to_str (ptid_of (thread
)));
2206 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
2208 /* Stopping threads. We don't want this SIGSTOP to end up
2211 debug_printf ("LLW: SIGSTOP caught for %s "
2212 "while stopping threads.\n",
2213 target_pid_to_str (ptid_of (thread
)));
2218 /* This is a delayed SIGSTOP. Filter out the event. */
2220 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2221 child
->stepping
? "step" : "continue",
2222 target_pid_to_str (ptid_of (thread
)));
2224 linux_resume_one_lwp (child
, child
->stepping
, 0, NULL
);
2229 child
->status_pending_p
= 1;
2230 child
->status_pending
= wstat
;
2234 /* Resume LWPs that are currently stopped without any pending status
2235 to report, but are resumed from the core's perspective. */
2238 resume_stopped_resumed_lwps (struct inferior_list_entry
*entry
)
2240 struct thread_info
*thread
= (struct thread_info
*) entry
;
2241 struct lwp_info
*lp
= get_thread_lwp (thread
);
2244 && !lp
->status_pending_p
2245 && thread
->last_resume_kind
!= resume_stop
2246 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
2248 int step
= thread
->last_resume_kind
== resume_step
;
2251 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2252 target_pid_to_str (ptid_of (thread
)),
2253 paddress (lp
->stop_pc
),
2256 linux_resume_one_lwp (lp
, step
, GDB_SIGNAL_0
, NULL
);
2260 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2261 match FILTER_PTID (leaving others pending). The PTIDs can be:
2262 minus_one_ptid, to specify any child; a pid PTID, specifying all
2263 lwps of a thread group; or a PTID representing a single lwp. Store
2264 the stop status through the status pointer WSTAT. OPTIONS is
2265 passed to the waitpid call. Return 0 if no event was found and
2266 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2267 was found. Return the PID of the stopped child otherwise. */
2270 linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
2271 int *wstatp
, int options
)
2273 struct thread_info
*event_thread
;
2274 struct lwp_info
*event_child
, *requested_child
;
2275 sigset_t block_mask
, prev_mask
;
2278 /* N.B. event_thread points to the thread_info struct that contains
2279 event_child. Keep them in sync. */
2280 event_thread
= NULL
;
2282 requested_child
= NULL
;
2284 /* Check for a lwp with a pending status. */
2286 if (ptid_equal (filter_ptid
, minus_one_ptid
) || ptid_is_pid (filter_ptid
))
2288 event_thread
= (struct thread_info
*)
2289 find_inferior (&all_threads
, status_pending_p_callback
, &filter_ptid
);
2290 if (event_thread
!= NULL
)
2291 event_child
= get_thread_lwp (event_thread
);
2292 if (debug_threads
&& event_thread
)
2293 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2295 else if (!ptid_equal (filter_ptid
, null_ptid
))
2297 requested_child
= find_lwp_pid (filter_ptid
);
2299 if (stopping_threads
== NOT_STOPPING_THREADS
2300 && requested_child
->status_pending_p
2301 && requested_child
->collecting_fast_tracepoint
)
2303 enqueue_one_deferred_signal (requested_child
,
2304 &requested_child
->status_pending
);
2305 requested_child
->status_pending_p
= 0;
2306 requested_child
->status_pending
= 0;
2307 linux_resume_one_lwp (requested_child
, 0, 0, NULL
);
2310 if (requested_child
->suspended
2311 && requested_child
->status_pending_p
)
2313 internal_error (__FILE__
, __LINE__
,
2314 "requesting an event out of a"
2315 " suspended child?");
2318 if (requested_child
->status_pending_p
)
2320 event_child
= requested_child
;
2321 event_thread
= get_lwp_thread (event_child
);
2325 if (event_child
!= NULL
)
2328 debug_printf ("Got an event from pending child %ld (%04x)\n",
2329 lwpid_of (event_thread
), event_child
->status_pending
);
2330 *wstatp
= event_child
->status_pending
;
2331 event_child
->status_pending_p
= 0;
2332 event_child
->status_pending
= 0;
2333 current_thread
= event_thread
;
2334 return lwpid_of (event_thread
);
2337 /* But if we don't find a pending event, we'll have to wait.
2339 We only enter this loop if no process has a pending wait status.
2340 Thus any action taken in response to a wait status inside this
2341 loop is responding as soon as we detect the status, not after any
2344 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2345 all signals while here. */
2346 sigfillset (&block_mask
);
2347 sigprocmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2349 /* Always pull all events out of the kernel. We'll randomly select
2350 an event LWP out of all that have events, to prevent
2352 while (event_child
== NULL
)
2356 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2359 - If the thread group leader exits while other threads in the
2360 thread group still exist, waitpid(TGID, ...) hangs. That
2361 waitpid won't return an exit status until the other threads
2362 in the group are reaped.
2364 - When a non-leader thread execs, that thread just vanishes
2365 without reporting an exit (so we'd hang if we waited for it
2366 explicitly in that case). The exec event is reported to
2367 the TGID pid (although we don't currently enable exec
2370 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2373 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2374 ret
, errno
? strerror (errno
) : "ERRNO-OK");
2380 debug_printf ("LLW: waitpid %ld received %s\n",
2381 (long) ret
, status_to_str (*wstatp
));
2384 /* Filter all events. IOW, leave all events pending. We'll
2385 randomly select an event LWP out of all that have events
2387 linux_low_filter_event (ret
, *wstatp
);
2388 /* Retry until nothing comes out of waitpid. A single
2389 SIGCHLD can indicate more than one child stopped. */
2393 /* Now that we've pulled all events out of the kernel, resume
2394 LWPs that don't have an interesting event to report. */
2395 if (stopping_threads
== NOT_STOPPING_THREADS
)
2396 for_each_inferior (&all_threads
, resume_stopped_resumed_lwps
);
2398 /* ... and find an LWP with a status to report to the core, if
2400 event_thread
= (struct thread_info
*)
2401 find_inferior (&all_threads
, status_pending_p_callback
, &filter_ptid
);
2402 if (event_thread
!= NULL
)
2404 event_child
= get_thread_lwp (event_thread
);
2405 *wstatp
= event_child
->status_pending
;
2406 event_child
->status_pending_p
= 0;
2407 event_child
->status_pending
= 0;
2411 /* Check for zombie thread group leaders. Those can't be reaped
2412 until all other threads in the thread group are. */
2413 check_zombie_leaders ();
2415 /* If there are no resumed children left in the set of LWPs we
2416 want to wait for, bail. We can't just block in
2417 waitpid/sigsuspend, because lwps might have been left stopped
2418 in trace-stop state, and we'd be stuck forever waiting for
2419 their status to change (which would only happen if we resumed
2420 them). Even if WNOHANG is set, this return code is preferred
2421 over 0 (below), as it is more detailed. */
2422 if ((find_inferior (&all_threads
,
2423 not_stopped_callback
,
2424 &wait_ptid
) == NULL
))
2427 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2428 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2432 /* No interesting event to report to the caller. */
2433 if ((options
& WNOHANG
))
2436 debug_printf ("WNOHANG set, no event found\n");
2438 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2442 /* Block until we get an event reported with SIGCHLD. */
2444 debug_printf ("sigsuspend'ing\n");
2446 sigsuspend (&prev_mask
);
2447 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2451 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2453 current_thread
= event_thread
;
2455 /* Check for thread exit. */
2456 if (! WIFSTOPPED (*wstatp
))
2458 gdb_assert (last_thread_of_process_p (pid_of (event_thread
)));
2461 debug_printf ("LWP %d is the last lwp of process. "
2462 "Process %ld exiting.\n",
2463 pid_of (event_thread
), lwpid_of (event_thread
));
2464 return lwpid_of (event_thread
);
2467 return lwpid_of (event_thread
);
2470 /* Wait for an event from child(ren) PTID. PTIDs can be:
2471 minus_one_ptid, to specify any child; a pid PTID, specifying all
2472 lwps of a thread group; or a PTID representing a single lwp. Store
2473 the stop status through the status pointer WSTAT. OPTIONS is
2474 passed to the waitpid call. Return 0 if no event was found and
2475 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2476 was found. Return the PID of the stopped child otherwise. */
2479 linux_wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2481 return linux_wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2484 /* Count the LWP's that have had events. */
2487 count_events_callback (struct inferior_list_entry
*entry
, void *data
)
2489 struct thread_info
*thread
= (struct thread_info
*) entry
;
2490 struct lwp_info
*lp
= get_thread_lwp (thread
);
2493 gdb_assert (count
!= NULL
);
2495 /* Count only resumed LWPs that have an event pending. */
2496 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2497 && lp
->status_pending_p
)
2503 /* Select the LWP (if any) that is currently being single-stepped. */
2506 select_singlestep_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2508 struct thread_info
*thread
= (struct thread_info
*) entry
;
2509 struct lwp_info
*lp
= get_thread_lwp (thread
);
2511 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2512 && thread
->last_resume_kind
== resume_step
2513 && lp
->status_pending_p
)
2519 /* Select the Nth LWP that has had an event. */
2522 select_event_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2524 struct thread_info
*thread
= (struct thread_info
*) entry
;
2525 struct lwp_info
*lp
= get_thread_lwp (thread
);
2526 int *selector
= data
;
2528 gdb_assert (selector
!= NULL
);
2530 /* Select only resumed LWPs that have an event pending. */
2531 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2532 && lp
->status_pending_p
)
2533 if ((*selector
)-- == 0)
2539 /* Select one LWP out of those that have events pending. */
2542 select_event_lwp (struct lwp_info
**orig_lp
)
2545 int random_selector
;
2546 struct thread_info
*event_thread
= NULL
;
2548 /* In all-stop, give preference to the LWP that is being
2549 single-stepped. There will be at most one, and it's the LWP that
2550 the core is most interested in. If we didn't do this, then we'd
2551 have to handle pending step SIGTRAPs somehow in case the core
2552 later continues the previously-stepped thread, otherwise we'd
2553 report the pending SIGTRAP, and the core, not having stepped the
2554 thread, wouldn't understand what the trap was for, and therefore
2555 would report it to the user as a random signal. */
2559 = (struct thread_info
*) find_inferior (&all_threads
,
2560 select_singlestep_lwp_callback
,
2562 if (event_thread
!= NULL
)
2565 debug_printf ("SEL: Select single-step %s\n",
2566 target_pid_to_str (ptid_of (event_thread
)));
2569 if (event_thread
== NULL
)
2571 /* No single-stepping LWP. Select one at random, out of those
2572 which have had events. */
2574 /* First see how many events we have. */
2575 find_inferior (&all_threads
, count_events_callback
, &num_events
);
2576 gdb_assert (num_events
> 0);
2578 /* Now randomly pick a LWP out of those that have had
2580 random_selector
= (int)
2581 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2583 if (debug_threads
&& num_events
> 1)
2584 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2585 num_events
, random_selector
);
2588 = (struct thread_info
*) find_inferior (&all_threads
,
2589 select_event_lwp_callback
,
2593 if (event_thread
!= NULL
)
2595 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2597 /* Switch the event LWP. */
2598 *orig_lp
= event_lp
;
2602 /* Decrement the suspend count of an LWP. */
2605 unsuspend_one_lwp (struct inferior_list_entry
*entry
, void *except
)
2607 struct thread_info
*thread
= (struct thread_info
*) entry
;
2608 struct lwp_info
*lwp
= get_thread_lwp (thread
);
2610 /* Ignore EXCEPT. */
2616 gdb_assert (lwp
->suspended
>= 0);
2620 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2624 unsuspend_all_lwps (struct lwp_info
*except
)
2626 find_inferior (&all_threads
, unsuspend_one_lwp
, except
);
2629 static void move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
);
2630 static int stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
,
2632 static int lwp_running (struct inferior_list_entry
*entry
, void *data
);
2633 static ptid_t
linux_wait_1 (ptid_t ptid
,
2634 struct target_waitstatus
*ourstatus
,
2635 int target_options
);
2637 /* Stabilize threads (move out of jump pads).
2639 If a thread is midway collecting a fast tracepoint, we need to
2640 finish the collection and move it out of the jump pad before
2641 reporting the signal.
2643 This avoids recursion while collecting (when a signal arrives
2644 midway, and the signal handler itself collects), which would trash
2645 the trace buffer. In case the user set a breakpoint in a signal
2646 handler, this avoids the backtrace showing the jump pad, etc..
2647 Most importantly, there are certain things we can't do safely if
2648 threads are stopped in a jump pad (or in its callee's). For
2651 - starting a new trace run. A thread still collecting the
2652 previous run, could trash the trace buffer when resumed. The trace
2653 buffer control structures would have been reset but the thread had
2654 no way to tell. The thread could even midway memcpy'ing to the
2655 buffer, which would mean that when resumed, it would clobber the
2656 trace buffer that had been set for a new run.
2658 - we can't rewrite/reuse the jump pads for new tracepoints
2659 safely. Say you do tstart while a thread is stopped midway while
2660 collecting. When the thread is later resumed, it finishes the
2661 collection, and returns to the jump pad, to execute the original
2662 instruction that was under the tracepoint jump at the time the
2663 older run had been started. If the jump pad had been rewritten
2664 since for something else in the new run, the thread would now
2665 execute the wrong / random instructions. */
2668 linux_stabilize_threads (void)
2670 struct thread_info
*saved_thread
;
2671 struct thread_info
*thread_stuck
;
2674 = (struct thread_info
*) find_inferior (&all_threads
,
2675 stuck_in_jump_pad_callback
,
2677 if (thread_stuck
!= NULL
)
2680 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2681 lwpid_of (thread_stuck
));
2685 saved_thread
= current_thread
;
2687 stabilizing_threads
= 1;
2690 for_each_inferior (&all_threads
, move_out_of_jump_pad_callback
);
2692 /* Loop until all are stopped out of the jump pads. */
2693 while (find_inferior (&all_threads
, lwp_running
, NULL
) != NULL
)
2695 struct target_waitstatus ourstatus
;
2696 struct lwp_info
*lwp
;
2699 /* Note that we go through the full wait even loop. While
2700 moving threads out of jump pad, we need to be able to step
2701 over internal breakpoints and such. */
2702 linux_wait_1 (minus_one_ptid
, &ourstatus
, 0);
2704 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2706 lwp
= get_thread_lwp (current_thread
);
2711 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
2712 || current_thread
->last_resume_kind
== resume_stop
)
2714 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
2715 enqueue_one_deferred_signal (lwp
, &wstat
);
2720 find_inferior (&all_threads
, unsuspend_one_lwp
, NULL
);
2722 stabilizing_threads
= 0;
2724 current_thread
= saved_thread
;
2729 = (struct thread_info
*) find_inferior (&all_threads
,
2730 stuck_in_jump_pad_callback
,
2732 if (thread_stuck
!= NULL
)
2733 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2734 lwpid_of (thread_stuck
));
2738 static void async_file_mark (void);
2740 /* Convenience function that is called when the kernel reports an
2741 event that is not passed out to GDB. */
2744 ignore_event (struct target_waitstatus
*ourstatus
)
2746 /* If we got an event, there may still be others, as a single
2747 SIGCHLD can indicate more than one child stopped. This forces
2748 another target_wait call. */
2751 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2755 /* Wait for process, returns status. */
2758 linux_wait_1 (ptid_t ptid
,
2759 struct target_waitstatus
*ourstatus
, int target_options
)
2762 struct lwp_info
*event_child
;
2765 int step_over_finished
;
2766 int bp_explains_trap
;
2767 int maybe_internal_trap
;
2775 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid
));
2778 /* Translate generic target options into linux options. */
2780 if (target_options
& TARGET_WNOHANG
)
2783 bp_explains_trap
= 0;
2786 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2788 if (ptid_equal (step_over_bkpt
, null_ptid
))
2789 pid
= linux_wait_for_event (ptid
, &w
, options
);
2793 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2794 target_pid_to_str (step_over_bkpt
));
2795 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
2800 gdb_assert (target_options
& TARGET_WNOHANG
);
2804 debug_printf ("linux_wait_1 ret = null_ptid, "
2805 "TARGET_WAITKIND_IGNORE\n");
2809 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2816 debug_printf ("linux_wait_1 ret = null_ptid, "
2817 "TARGET_WAITKIND_NO_RESUMED\n");
2821 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
2825 event_child
= get_thread_lwp (current_thread
);
2827 /* linux_wait_for_event only returns an exit status for the last
2828 child of a process. Report it. */
2829 if (WIFEXITED (w
) || WIFSIGNALED (w
))
2833 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
2834 ourstatus
->value
.integer
= WEXITSTATUS (w
);
2838 debug_printf ("linux_wait_1 ret = %s, exited with "
2840 target_pid_to_str (ptid_of (current_thread
)),
2847 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
2848 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
2852 debug_printf ("linux_wait_1 ret = %s, terminated with "
2854 target_pid_to_str (ptid_of (current_thread
)),
2860 return ptid_of (current_thread
);
2863 /* If step-over executes a breakpoint instruction, it means a
2864 gdb/gdbserver breakpoint had been planted on top of a permanent
2865 breakpoint. The PC has been adjusted by
2866 check_stopped_by_breakpoint to point at the breakpoint address.
2867 Advance the PC manually past the breakpoint, otherwise the
2868 program would keep trapping the permanent breakpoint forever. */
2869 if (!ptid_equal (step_over_bkpt
, null_ptid
)
2870 && event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
2872 unsigned int increment_pc
= the_low_target
.breakpoint_len
;
2876 debug_printf ("step-over for %s executed software breakpoint\n",
2877 target_pid_to_str (ptid_of (current_thread
)));
2880 if (increment_pc
!= 0)
2882 struct regcache
*regcache
2883 = get_thread_regcache (current_thread
, 1);
2885 event_child
->stop_pc
+= increment_pc
;
2886 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
2888 if (!(*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))
2889 event_child
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
2893 /* If this event was not handled before, and is not a SIGTRAP, we
2894 report it. SIGILL and SIGSEGV are also treated as traps in case
2895 a breakpoint is inserted at the current PC. If this target does
2896 not support internal breakpoints at all, we also report the
2897 SIGTRAP without further processing; it's of no concern to us. */
2899 = (supports_breakpoints ()
2900 && (WSTOPSIG (w
) == SIGTRAP
2901 || ((WSTOPSIG (w
) == SIGILL
2902 || WSTOPSIG (w
) == SIGSEGV
)
2903 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
2905 if (maybe_internal_trap
)
2907 /* Handle anything that requires bookkeeping before deciding to
2908 report the event or continue waiting. */
2910 /* First check if we can explain the SIGTRAP with an internal
2911 breakpoint, or if we should possibly report the event to GDB.
2912 Do this before anything that may remove or insert a
2914 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
2916 /* We have a SIGTRAP, possibly a step-over dance has just
2917 finished. If so, tweak the state machine accordingly,
2918 reinsert breakpoints and delete any reinsert (software
2919 single-step) breakpoints. */
2920 step_over_finished
= finish_step_over (event_child
);
2922 /* Now invoke the callbacks of any internal breakpoints there. */
2923 check_breakpoints (event_child
->stop_pc
);
2925 /* Handle tracepoint data collecting. This may overflow the
2926 trace buffer, and cause a tracing stop, removing
2928 trace_event
= handle_tracepoints (event_child
);
2930 if (bp_explains_trap
)
2932 /* If we stepped or ran into an internal breakpoint, we've
2933 already handled it. So next time we resume (from this
2934 PC), we should step over it. */
2936 debug_printf ("Hit a gdbserver breakpoint.\n");
2938 if (breakpoint_here (event_child
->stop_pc
))
2939 event_child
->need_step_over
= 1;
2944 /* We have some other signal, possibly a step-over dance was in
2945 progress, and it should be cancelled too. */
2946 step_over_finished
= finish_step_over (event_child
);
2949 /* We have all the data we need. Either report the event to GDB, or
2950 resume threads and keep waiting for more. */
2952 /* If we're collecting a fast tracepoint, finish the collection and
2953 move out of the jump pad before delivering a signal. See
2954 linux_stabilize_threads. */
2957 && WSTOPSIG (w
) != SIGTRAP
2958 && supports_fast_tracepoints ()
2959 && agent_loaded_p ())
2962 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2963 "to defer or adjust it.\n",
2964 WSTOPSIG (w
), lwpid_of (current_thread
));
2966 /* Allow debugging the jump pad itself. */
2967 if (current_thread
->last_resume_kind
!= resume_step
2968 && maybe_move_out_of_jump_pad (event_child
, &w
))
2970 enqueue_one_deferred_signal (event_child
, &w
);
2973 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2974 WSTOPSIG (w
), lwpid_of (current_thread
));
2976 linux_resume_one_lwp (event_child
, 0, 0, NULL
);
2978 return ignore_event (ourstatus
);
2982 if (event_child
->collecting_fast_tracepoint
)
2985 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2986 "Check if we're already there.\n",
2987 lwpid_of (current_thread
),
2988 event_child
->collecting_fast_tracepoint
);
2992 event_child
->collecting_fast_tracepoint
2993 = linux_fast_tracepoint_collecting (event_child
, NULL
);
2995 if (event_child
->collecting_fast_tracepoint
!= 1)
2997 /* No longer need this breakpoint. */
2998 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
3001 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3002 "stopping all threads momentarily.\n");
3004 /* Other running threads could hit this breakpoint.
3005 We don't handle moribund locations like GDB does,
3006 instead we always pause all threads when removing
3007 breakpoints, so that any step-over or
3008 decr_pc_after_break adjustment is always taken
3009 care of while the breakpoint is still
3011 stop_all_lwps (1, event_child
);
3013 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
3014 event_child
->exit_jump_pad_bkpt
= NULL
;
3016 unstop_all_lwps (1, event_child
);
3018 gdb_assert (event_child
->suspended
>= 0);
3022 if (event_child
->collecting_fast_tracepoint
== 0)
3025 debug_printf ("fast tracepoint finished "
3026 "collecting successfully.\n");
3028 /* We may have a deferred signal to report. */
3029 if (dequeue_one_deferred_signal (event_child
, &w
))
3032 debug_printf ("dequeued one signal.\n");
3037 debug_printf ("no deferred signals.\n");
3039 if (stabilizing_threads
)
3041 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3042 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3046 debug_printf ("linux_wait_1 ret = %s, stopped "
3047 "while stabilizing threads\n",
3048 target_pid_to_str (ptid_of (current_thread
)));
3052 return ptid_of (current_thread
);
3058 /* Check whether GDB would be interested in this event. */
3060 /* If GDB is not interested in this signal, don't stop other
3061 threads, and don't report it to GDB. Just resume the inferior
3062 right away. We do this for threading-related signals as well as
3063 any that GDB specifically requested we ignore. But never ignore
3064 SIGSTOP if we sent it ourselves, and do not ignore signals when
3065 stepping - they may require special handling to skip the signal
3066 handler. Also never ignore signals that could be caused by a
3068 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3071 && current_thread
->last_resume_kind
!= resume_step
3073 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3074 (current_process ()->priv
->thread_db
!= NULL
3075 && (WSTOPSIG (w
) == __SIGRTMIN
3076 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
3079 (pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
3080 && !(WSTOPSIG (w
) == SIGSTOP
3081 && current_thread
->last_resume_kind
== resume_stop
)
3082 && !linux_wstatus_maybe_breakpoint (w
))))
3084 siginfo_t info
, *info_p
;
3087 debug_printf ("Ignored signal %d for LWP %ld.\n",
3088 WSTOPSIG (w
), lwpid_of (current_thread
));
3090 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
3091 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
3095 linux_resume_one_lwp (event_child
, event_child
->stepping
,
3096 WSTOPSIG (w
), info_p
);
3097 return ignore_event (ourstatus
);
3100 /* Note that all addresses are always "out of the step range" when
3101 there's no range to begin with. */
3102 in_step_range
= lwp_in_step_range (event_child
);
3104 /* If GDB wanted this thread to single step, and the thread is out
3105 of the step range, we always want to report the SIGTRAP, and let
3106 GDB handle it. Watchpoints should always be reported. So should
3107 signals we can't explain. A SIGTRAP we can't explain could be a
3108 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3109 do, we're be able to handle GDB breakpoints on top of internal
3110 breakpoints, by handling the internal breakpoint and still
3111 reporting the event to GDB. If we don't, we're out of luck, GDB
3112 won't see the breakpoint hit. */
3113 report_to_gdb
= (!maybe_internal_trap
3114 || (current_thread
->last_resume_kind
== resume_step
3116 || event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3117 || (!step_over_finished
&& !in_step_range
3118 && !bp_explains_trap
&& !trace_event
)
3119 || (gdb_breakpoint_here (event_child
->stop_pc
)
3120 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
3121 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
))
3122 || event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
);
3124 run_breakpoint_commands (event_child
->stop_pc
);
3126 /* We found no reason GDB would want us to stop. We either hit one
3127 of our own breakpoints, or finished an internal step GDB
3128 shouldn't know about. */
3133 if (bp_explains_trap
)
3134 debug_printf ("Hit a gdbserver breakpoint.\n");
3135 if (step_over_finished
)
3136 debug_printf ("Step-over finished.\n");
3138 debug_printf ("Tracepoint event.\n");
3139 if (lwp_in_step_range (event_child
))
3140 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3141 paddress (event_child
->stop_pc
),
3142 paddress (event_child
->step_range_start
),
3143 paddress (event_child
->step_range_end
));
3146 /* We're not reporting this breakpoint to GDB, so apply the
3147 decr_pc_after_break adjustment to the inferior's regcache
3150 if (the_low_target
.set_pc
!= NULL
)
3152 struct regcache
*regcache
3153 = get_thread_regcache (current_thread
, 1);
3154 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
3157 /* We may have finished stepping over a breakpoint. If so,
3158 we've stopped and suspended all LWPs momentarily except the
3159 stepping one. This is where we resume them all again. We're
3160 going to keep waiting, so use proceed, which handles stepping
3161 over the next breakpoint. */
3163 debug_printf ("proceeding all threads.\n");
3165 if (step_over_finished
)
3166 unsuspend_all_lwps (event_child
);
3168 proceed_all_lwps ();
3169 return ignore_event (ourstatus
);
3174 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3178 str
= target_waitstatus_to_string (&event_child
->waitstatus
);
3179 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3180 lwpid_of (get_lwp_thread (event_child
)), str
);
3183 if (current_thread
->last_resume_kind
== resume_step
)
3185 if (event_child
->step_range_start
== event_child
->step_range_end
)
3186 debug_printf ("GDB wanted to single-step, reporting event.\n");
3187 else if (!lwp_in_step_range (event_child
))
3188 debug_printf ("Out of step range, reporting event.\n");
3190 if (event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
3191 debug_printf ("Stopped by watchpoint.\n");
3192 else if (gdb_breakpoint_here (event_child
->stop_pc
))
3193 debug_printf ("Stopped by GDB breakpoint.\n");
3195 debug_printf ("Hit a non-gdbserver trap event.\n");
3198 /* Alright, we're going to report a stop. */
3200 if (!stabilizing_threads
)
3202 /* In all-stop, stop all threads. */
3204 stop_all_lwps (0, NULL
);
3206 /* If we're not waiting for a specific LWP, choose an event LWP
3207 from among those that have had events. Giving equal priority
3208 to all LWPs that have had events helps prevent
3210 if (ptid_equal (ptid
, minus_one_ptid
))
3212 event_child
->status_pending_p
= 1;
3213 event_child
->status_pending
= w
;
3215 select_event_lwp (&event_child
);
3217 /* current_thread and event_child must stay in sync. */
3218 current_thread
= get_lwp_thread (event_child
);
3220 event_child
->status_pending_p
= 0;
3221 w
= event_child
->status_pending
;
3224 if (step_over_finished
)
3228 /* If we were doing a step-over, all other threads but
3229 the stepping one had been paused in start_step_over,
3230 with their suspend counts incremented. We don't want
3231 to do a full unstop/unpause, because we're in
3232 all-stop mode (so we want threads stopped), but we
3233 still need to unsuspend the other threads, to
3234 decrement their `suspended' count back. */
3235 unsuspend_all_lwps (event_child
);
3239 /* If we just finished a step-over, then all threads had
3240 been momentarily paused. In all-stop, that's fine,
3241 we want threads stopped by now anyway. In non-stop,
3242 we need to re-resume threads that GDB wanted to be
3244 unstop_all_lwps (1, event_child
);
3248 /* Stabilize threads (move out of jump pads). */
3250 stabilize_threads ();
3254 /* If we just finished a step-over, then all threads had been
3255 momentarily paused. In all-stop, that's fine, we want
3256 threads stopped by now anyway. In non-stop, we need to
3257 re-resume threads that GDB wanted to be running. */
3258 if (step_over_finished
)
3259 unstop_all_lwps (1, event_child
);
3262 if (event_child
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3264 /* If the reported event is an exit, fork, vfork or exec, let
3266 *ourstatus
= event_child
->waitstatus
;
3267 /* Clear the event lwp's waitstatus since we handled it already. */
3268 event_child
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3271 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3273 /* Now that we've selected our final event LWP, un-adjust its PC if
3274 it was a software breakpoint, and the client doesn't know we can
3275 adjust the breakpoint ourselves. */
3276 if (event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3277 && !swbreak_feature
)
3279 int decr_pc
= the_low_target
.decr_pc_after_break
;
3283 struct regcache
*regcache
3284 = get_thread_regcache (current_thread
, 1);
3285 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
+ decr_pc
);
3289 if (current_thread
->last_resume_kind
== resume_stop
3290 && WSTOPSIG (w
) == SIGSTOP
)
3292 /* A thread that has been requested to stop by GDB with vCont;t,
3293 and it stopped cleanly, so report as SIG0. The use of
3294 SIGSTOP is an implementation detail. */
3295 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3297 else if (current_thread
->last_resume_kind
== resume_stop
3298 && WSTOPSIG (w
) != SIGSTOP
)
3300 /* A thread that has been requested to stop by GDB with vCont;t,
3301 but, it stopped for other reasons. */
3302 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3304 else if (ourstatus
->kind
== TARGET_WAITKIND_STOPPED
)
3306 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3309 gdb_assert (ptid_equal (step_over_bkpt
, null_ptid
));
3313 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3314 target_pid_to_str (ptid_of (current_thread
)),
3315 ourstatus
->kind
, ourstatus
->value
.sig
);
3319 return ptid_of (current_thread
);
3322 /* Get rid of any pending event in the pipe. */
3324 async_file_flush (void)
3330 ret
= read (linux_event_pipe
[0], &buf
, 1);
3331 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3334 /* Put something in the pipe, so the event loop wakes up. */
3336 async_file_mark (void)
3340 async_file_flush ();
3343 ret
= write (linux_event_pipe
[1], "+", 1);
3344 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3346 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3347 be awakened anyway. */
3351 linux_wait (ptid_t ptid
,
3352 struct target_waitstatus
*ourstatus
, int target_options
)
3356 /* Flush the async file first. */
3357 if (target_is_async_p ())
3358 async_file_flush ();
3362 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
3364 while ((target_options
& TARGET_WNOHANG
) == 0
3365 && ptid_equal (event_ptid
, null_ptid
)
3366 && ourstatus
->kind
== TARGET_WAITKIND_IGNORE
);
3368 /* If at least one stop was reported, there may be more. A single
3369 SIGCHLD can signal more than one child stop. */
3370 if (target_is_async_p ()
3371 && (target_options
& TARGET_WNOHANG
) != 0
3372 && !ptid_equal (event_ptid
, null_ptid
))
3378 /* Send a signal to an LWP. */
3381 kill_lwp (unsigned long lwpid
, int signo
)
3383 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3384 fails, then we are not using nptl threads and we should be using kill. */
3388 static int tkill_failed
;
3395 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3396 if (errno
!= ENOSYS
)
3403 return kill (lwpid
, signo
);
3407 linux_stop_lwp (struct lwp_info
*lwp
)
3413 send_sigstop (struct lwp_info
*lwp
)
3417 pid
= lwpid_of (get_lwp_thread (lwp
));
3419 /* If we already have a pending stop signal for this process, don't
3421 if (lwp
->stop_expected
)
3424 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3430 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3432 lwp
->stop_expected
= 1;
3433 kill_lwp (pid
, SIGSTOP
);
3437 send_sigstop_callback (struct inferior_list_entry
*entry
, void *except
)
3439 struct thread_info
*thread
= (struct thread_info
*) entry
;
3440 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3442 /* Ignore EXCEPT. */
3453 /* Increment the suspend count of an LWP, and stop it, if not stopped
3456 suspend_and_send_sigstop_callback (struct inferior_list_entry
*entry
,
3459 struct thread_info
*thread
= (struct thread_info
*) entry
;
3460 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3462 /* Ignore EXCEPT. */
3468 return send_sigstop_callback (entry
, except
);
3472 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3474 /* Store the exit status for later. */
3475 lwp
->status_pending_p
= 1;
3476 lwp
->status_pending
= wstat
;
3478 /* Store in waitstatus as well, as there's nothing else to process
3480 if (WIFEXITED (wstat
))
3482 lwp
->waitstatus
.kind
= TARGET_WAITKIND_EXITED
;
3483 lwp
->waitstatus
.value
.integer
= WEXITSTATUS (wstat
);
3485 else if (WIFSIGNALED (wstat
))
3487 lwp
->waitstatus
.kind
= TARGET_WAITKIND_SIGNALLED
;
3488 lwp
->waitstatus
.value
.sig
= gdb_signal_from_host (WTERMSIG (wstat
));
3491 /* Prevent trying to stop it. */
3494 /* No further stops are expected from a dead lwp. */
3495 lwp
->stop_expected
= 0;
3498 /* Return true if LWP has exited already, and has a pending exit event
3499 to report to GDB. */
3502 lwp_is_marked_dead (struct lwp_info
*lwp
)
3504 return (lwp
->status_pending_p
3505 && (WIFEXITED (lwp
->status_pending
)
3506 || WIFSIGNALED (lwp
->status_pending
)));
3509 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3512 wait_for_sigstop (void)
3514 struct thread_info
*saved_thread
;
3519 saved_thread
= current_thread
;
3520 if (saved_thread
!= NULL
)
3521 saved_tid
= saved_thread
->entry
.id
;
3523 saved_tid
= null_ptid
; /* avoid bogus unused warning */
3526 debug_printf ("wait_for_sigstop: pulling events\n");
3528 /* Passing NULL_PTID as filter indicates we want all events to be
3529 left pending. Eventually this returns when there are no
3530 unwaited-for children left. */
3531 ret
= linux_wait_for_event_filtered (minus_one_ptid
, null_ptid
,
3533 gdb_assert (ret
== -1);
3535 if (saved_thread
== NULL
|| linux_thread_alive (saved_tid
))
3536 current_thread
= saved_thread
;
3540 debug_printf ("Previously current thread died.\n");
3544 /* We can't change the current inferior behind GDB's back,
3545 otherwise, a subsequent command may apply to the wrong
3547 current_thread
= NULL
;
3551 /* Set a valid thread as current. */
3552 set_desired_thread (0);
3557 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3558 move it out, because we need to report the stop event to GDB. For
3559 example, if the user puts a breakpoint in the jump pad, it's
3560 because she wants to debug it. */
3563 stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
, void *data
)
3565 struct thread_info
*thread
= (struct thread_info
*) entry
;
3566 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3568 gdb_assert (lwp
->suspended
== 0);
3569 gdb_assert (lwp
->stopped
);
3571 /* Allow debugging the jump pad, gdb_collect, etc.. */
3572 return (supports_fast_tracepoints ()
3573 && agent_loaded_p ()
3574 && (gdb_breakpoint_here (lwp
->stop_pc
)
3575 || lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3576 || thread
->last_resume_kind
== resume_step
)
3577 && linux_fast_tracepoint_collecting (lwp
, NULL
));
3581 move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
)
3583 struct thread_info
*thread
= (struct thread_info
*) entry
;
3584 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3587 gdb_assert (lwp
->suspended
== 0);
3588 gdb_assert (lwp
->stopped
);
3590 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
3592 /* Allow debugging the jump pad, gdb_collect, etc. */
3593 if (!gdb_breakpoint_here (lwp
->stop_pc
)
3594 && lwp
->stop_reason
!= TARGET_STOPPED_BY_WATCHPOINT
3595 && thread
->last_resume_kind
!= resume_step
3596 && maybe_move_out_of_jump_pad (lwp
, wstat
))
3599 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3604 lwp
->status_pending_p
= 0;
3605 enqueue_one_deferred_signal (lwp
, wstat
);
3608 debug_printf ("Signal %d for LWP %ld deferred "
3610 WSTOPSIG (*wstat
), lwpid_of (thread
));
3613 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
3620 lwp_running (struct inferior_list_entry
*entry
, void *data
)
3622 struct thread_info
*thread
= (struct thread_info
*) entry
;
3623 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3625 if (lwp_is_marked_dead (lwp
))
3632 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3633 If SUSPEND, then also increase the suspend count of every LWP,
3637 stop_all_lwps (int suspend
, struct lwp_info
*except
)
3639 /* Should not be called recursively. */
3640 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
3645 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3646 suspend
? "stop-and-suspend" : "stop",
3648 ? target_pid_to_str (ptid_of (get_lwp_thread (except
)))
3652 stopping_threads
= (suspend
3653 ? STOPPING_AND_SUSPENDING_THREADS
3654 : STOPPING_THREADS
);
3657 find_inferior (&all_threads
, suspend_and_send_sigstop_callback
, except
);
3659 find_inferior (&all_threads
, send_sigstop_callback
, except
);
3660 wait_for_sigstop ();
3661 stopping_threads
= NOT_STOPPING_THREADS
;
3665 debug_printf ("stop_all_lwps done, setting stopping_threads "
3666 "back to !stopping\n");
3671 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3672 SIGNAL is nonzero, give it that signal. */
3675 linux_resume_one_lwp_throw (struct lwp_info
*lwp
,
3676 int step
, int signal
, siginfo_t
*info
)
3678 struct thread_info
*thread
= get_lwp_thread (lwp
);
3679 struct thread_info
*saved_thread
;
3680 int fast_tp_collecting
;
3681 struct process_info
*proc
= get_thread_process (thread
);
3683 /* Note that target description may not be initialised
3684 (proc->tdesc == NULL) at this point because the program hasn't
3685 stopped at the first instruction yet. It means GDBserver skips
3686 the extra traps from the wrapper program (see option --wrapper).
3687 Code in this function that requires register access should be
3688 guarded by proc->tdesc == NULL or something else. */
3690 if (lwp
->stopped
== 0)
3693 fast_tp_collecting
= lwp
->collecting_fast_tracepoint
;
3695 gdb_assert (!stabilizing_threads
|| fast_tp_collecting
);
3697 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3698 user used the "jump" command, or "set $pc = foo"). */
3699 if (thread
->while_stepping
!= NULL
&& lwp
->stop_pc
!= get_pc (lwp
))
3701 /* Collecting 'while-stepping' actions doesn't make sense
3703 release_while_stepping_state_list (thread
);
3706 /* If we have pending signals or status, and a new signal, enqueue the
3707 signal. Also enqueue the signal if we are waiting to reinsert a
3708 breakpoint; it will be picked up again below. */
3710 && (lwp
->status_pending_p
3711 || lwp
->pending_signals
!= NULL
3712 || lwp
->bp_reinsert
!= 0
3713 || fast_tp_collecting
))
3715 struct pending_signals
*p_sig
;
3716 p_sig
= xmalloc (sizeof (*p_sig
));
3717 p_sig
->prev
= lwp
->pending_signals
;
3718 p_sig
->signal
= signal
;
3720 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
3722 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
3723 lwp
->pending_signals
= p_sig
;
3726 if (lwp
->status_pending_p
)
3729 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3730 " has pending status\n",
3731 lwpid_of (thread
), step
? "step" : "continue", signal
,
3732 lwp
->stop_expected
? "expected" : "not expected");
3736 saved_thread
= current_thread
;
3737 current_thread
= thread
;
3740 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3741 lwpid_of (thread
), step
? "step" : "continue", signal
,
3742 lwp
->stop_expected
? "expected" : "not expected");
3744 /* This bit needs some thinking about. If we get a signal that
3745 we must report while a single-step reinsert is still pending,
3746 we often end up resuming the thread. It might be better to
3747 (ew) allow a stack of pending events; then we could be sure that
3748 the reinsert happened right away and not lose any signals.
3750 Making this stack would also shrink the window in which breakpoints are
3751 uninserted (see comment in linux_wait_for_lwp) but not enough for
3752 complete correctness, so it won't solve that problem. It may be
3753 worthwhile just to solve this one, however. */
3754 if (lwp
->bp_reinsert
!= 0)
3757 debug_printf (" pending reinsert at 0x%s\n",
3758 paddress (lwp
->bp_reinsert
));
3760 if (can_hardware_single_step ())
3762 if (fast_tp_collecting
== 0)
3765 fprintf (stderr
, "BAD - reinserting but not stepping.\n");
3767 fprintf (stderr
, "BAD - reinserting and suspended(%d).\n",
3774 /* Postpone any pending signal. It was enqueued above. */
3778 if (fast_tp_collecting
== 1)
3781 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3782 " (exit-jump-pad-bkpt)\n",
3785 /* Postpone any pending signal. It was enqueued above. */
3788 else if (fast_tp_collecting
== 2)
3791 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3792 " single-stepping\n",
3795 if (can_hardware_single_step ())
3799 internal_error (__FILE__
, __LINE__
,
3800 "moving out of jump pad single-stepping"
3801 " not implemented on this target");
3804 /* Postpone any pending signal. It was enqueued above. */
3808 /* If we have while-stepping actions in this thread set it stepping.
3809 If we have a signal to deliver, it may or may not be set to
3810 SIG_IGN, we don't know. Assume so, and allow collecting
3811 while-stepping into a signal handler. A possible smart thing to
3812 do would be to set an internal breakpoint at the signal return
3813 address, continue, and carry on catching this while-stepping
3814 action only when that breakpoint is hit. A future
3816 if (thread
->while_stepping
!= NULL
3817 && can_hardware_single_step ())
3820 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3825 if (proc
->tdesc
!= NULL
&& the_low_target
.get_pc
!= NULL
)
3827 struct regcache
*regcache
= get_thread_regcache (current_thread
, 1);
3829 lwp
->stop_pc
= (*the_low_target
.get_pc
) (regcache
);
3833 debug_printf (" %s from pc 0x%lx\n", step
? "step" : "continue",
3834 (long) lwp
->stop_pc
);
3838 /* If we have pending signals, consume one unless we are trying to
3839 reinsert a breakpoint or we're trying to finish a fast tracepoint
3841 if (lwp
->pending_signals
!= NULL
3842 && lwp
->bp_reinsert
== 0
3843 && fast_tp_collecting
== 0)
3845 struct pending_signals
**p_sig
;
3847 p_sig
= &lwp
->pending_signals
;
3848 while ((*p_sig
)->prev
!= NULL
)
3849 p_sig
= &(*p_sig
)->prev
;
3851 signal
= (*p_sig
)->signal
;
3852 if ((*p_sig
)->info
.si_signo
!= 0)
3853 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
3860 if (the_low_target
.prepare_to_resume
!= NULL
)
3861 the_low_target
.prepare_to_resume (lwp
);
3863 regcache_invalidate_thread (thread
);
3865 lwp
->stepping
= step
;
3866 ptrace (step
? PTRACE_SINGLESTEP
: PTRACE_CONT
, lwpid_of (thread
),
3867 (PTRACE_TYPE_ARG3
) 0,
3868 /* Coerce to a uintptr_t first to avoid potential gcc warning
3869 of coercing an 8 byte integer to a 4 byte pointer. */
3870 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
3872 current_thread
= saved_thread
;
3874 perror_with_name ("resuming thread");
3876 /* Successfully resumed. Clear state that no longer makes sense,
3877 and mark the LWP as running. Must not do this before resuming
3878 otherwise if that fails other code will be confused. E.g., we'd
3879 later try to stop the LWP and hang forever waiting for a stop
3880 status. Note that we must not throw after this is cleared,
3881 otherwise handle_zombie_lwp_error would get confused. */
3883 lwp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3886 /* Called when we try to resume a stopped LWP and that errors out. If
3887 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3888 or about to become), discard the error, clear any pending status
3889 the LWP may have, and return true (we'll collect the exit status
3890 soon enough). Otherwise, return false. */
3893 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
3895 struct thread_info
*thread
= get_lwp_thread (lp
);
3897 /* If we get an error after resuming the LWP successfully, we'd
3898 confuse !T state for the LWP being gone. */
3899 gdb_assert (lp
->stopped
);
3901 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3902 because even if ptrace failed with ESRCH, the tracee may be "not
3903 yet fully dead", but already refusing ptrace requests. In that
3904 case the tracee has 'R (Running)' state for a little bit
3905 (observed in Linux 3.18). See also the note on ESRCH in the
3906 ptrace(2) man page. Instead, check whether the LWP has any state
3907 other than ptrace-stopped. */
3909 /* Don't assume anything if /proc/PID/status can't be read. */
3910 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread
)) == 0)
3912 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3913 lp
->status_pending_p
= 0;
3919 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3920 disappears while we try to resume it. */
3923 linux_resume_one_lwp (struct lwp_info
*lwp
,
3924 int step
, int signal
, siginfo_t
*info
)
3928 linux_resume_one_lwp_throw (lwp
, step
, signal
, info
);
3930 CATCH (ex
, RETURN_MASK_ERROR
)
3932 if (!check_ptrace_stopped_lwp_gone (lwp
))
3933 throw_exception (ex
);
3938 struct thread_resume_array
3940 struct thread_resume
*resume
;
3944 /* This function is called once per thread via find_inferior.
3945 ARG is a pointer to a thread_resume_array struct.
3946 We look up the thread specified by ENTRY in ARG, and mark the thread
3947 with a pointer to the appropriate resume request.
3949 This algorithm is O(threads * resume elements), but resume elements
3950 is small (and will remain small at least until GDB supports thread
3954 linux_set_resume_request (struct inferior_list_entry
*entry
, void *arg
)
3956 struct thread_info
*thread
= (struct thread_info
*) entry
;
3957 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3959 struct thread_resume_array
*r
;
3963 for (ndx
= 0; ndx
< r
->n
; ndx
++)
3965 ptid_t ptid
= r
->resume
[ndx
].thread
;
3966 if (ptid_equal (ptid
, minus_one_ptid
)
3967 || ptid_equal (ptid
, entry
->id
)
3968 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3970 || (ptid_get_pid (ptid
) == pid_of (thread
)
3971 && (ptid_is_pid (ptid
)
3972 || ptid_get_lwp (ptid
) == -1)))
3974 if (r
->resume
[ndx
].kind
== resume_stop
3975 && thread
->last_resume_kind
== resume_stop
)
3978 debug_printf ("already %s LWP %ld at GDB's request\n",
3979 (thread
->last_status
.kind
3980 == TARGET_WAITKIND_STOPPED
)
3988 lwp
->resume
= &r
->resume
[ndx
];
3989 thread
->last_resume_kind
= lwp
->resume
->kind
;
3991 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
3992 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
3994 /* If we had a deferred signal to report, dequeue one now.
3995 This can happen if LWP gets more than one signal while
3996 trying to get out of a jump pad. */
3998 && !lwp
->status_pending_p
3999 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
4001 lwp
->status_pending_p
= 1;
4004 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4005 "leaving status pending.\n",
4006 WSTOPSIG (lwp
->status_pending
),
4014 /* No resume action for this thread. */
4020 /* find_inferior callback for linux_resume.
4021 Set *FLAG_P if this lwp has an interesting status pending. */
4024 resume_status_pending_p (struct inferior_list_entry
*entry
, void *flag_p
)
4026 struct thread_info
*thread
= (struct thread_info
*) entry
;
4027 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4029 /* LWPs which will not be resumed are not interesting, because
4030 we might not wait for them next time through linux_wait. */
4031 if (lwp
->resume
== NULL
)
4034 if (thread_still_has_status_pending_p (thread
))
4035 * (int *) flag_p
= 1;
4040 /* Return 1 if this lwp that GDB wants running is stopped at an
4041 internal breakpoint that we need to step over. It assumes that any
4042 required STOP_PC adjustment has already been propagated to the
4043 inferior's regcache. */
4046 need_step_over_p (struct inferior_list_entry
*entry
, void *dummy
)
4048 struct thread_info
*thread
= (struct thread_info
*) entry
;
4049 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4050 struct thread_info
*saved_thread
;
4052 struct process_info
*proc
= get_thread_process (thread
);
4054 /* GDBserver is skipping the extra traps from the wrapper program,
4055 don't have to do step over. */
4056 if (proc
->tdesc
== NULL
)
4059 /* LWPs which will not be resumed are not interesting, because we
4060 might not wait for them next time through linux_wait. */
4065 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4070 if (thread
->last_resume_kind
== resume_stop
)
4073 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4079 gdb_assert (lwp
->suspended
>= 0);
4084 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4089 if (!lwp
->need_step_over
)
4092 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread
));
4095 if (lwp
->status_pending_p
)
4098 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4104 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4108 /* If the PC has changed since we stopped, then don't do anything,
4109 and let the breakpoint/tracepoint be hit. This happens if, for
4110 instance, GDB handled the decr_pc_after_break subtraction itself,
4111 GDB is OOL stepping this thread, or the user has issued a "jump"
4112 command, or poked thread's registers herself. */
4113 if (pc
!= lwp
->stop_pc
)
4116 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4117 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4119 paddress (lwp
->stop_pc
), paddress (pc
));
4121 lwp
->need_step_over
= 0;
4125 saved_thread
= current_thread
;
4126 current_thread
= thread
;
4128 /* We can only step over breakpoints we know about. */
4129 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
4131 /* Don't step over a breakpoint that GDB expects to hit
4132 though. If the condition is being evaluated on the target's side
4133 and it evaluate to false, step over this breakpoint as well. */
4134 if (gdb_breakpoint_here (pc
)
4135 && gdb_condition_true_at_breakpoint (pc
)
4136 && gdb_no_commands_at_breakpoint (pc
))
4139 debug_printf ("Need step over [LWP %ld]? yes, but found"
4140 " GDB breakpoint at 0x%s; skipping step over\n",
4141 lwpid_of (thread
), paddress (pc
));
4143 current_thread
= saved_thread
;
4149 debug_printf ("Need step over [LWP %ld]? yes, "
4150 "found breakpoint at 0x%s\n",
4151 lwpid_of (thread
), paddress (pc
));
4153 /* We've found an lwp that needs stepping over --- return 1 so
4154 that find_inferior stops looking. */
4155 current_thread
= saved_thread
;
4157 /* If the step over is cancelled, this is set again. */
4158 lwp
->need_step_over
= 0;
4163 current_thread
= saved_thread
;
4166 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4168 lwpid_of (thread
), paddress (pc
));
4173 /* Start a step-over operation on LWP. When LWP stopped at a
4174 breakpoint, to make progress, we need to remove the breakpoint out
4175 of the way. If we let other threads run while we do that, they may
4176 pass by the breakpoint location and miss hitting it. To avoid
4177 that, a step-over momentarily stops all threads while LWP is
4178 single-stepped while the breakpoint is temporarily uninserted from
4179 the inferior. When the single-step finishes, we reinsert the
4180 breakpoint, and let all threads that are supposed to be running,
4183 On targets that don't support hardware single-step, we don't
4184 currently support full software single-stepping. Instead, we only
4185 support stepping over the thread event breakpoint, by asking the
4186 low target where to place a reinsert breakpoint. Since this
4187 routine assumes the breakpoint being stepped over is a thread event
4188 breakpoint, it usually assumes the return address of the current
4189 function is a good enough place to set the reinsert breakpoint. */
4192 start_step_over (struct lwp_info
*lwp
)
4194 struct thread_info
*thread
= get_lwp_thread (lwp
);
4195 struct thread_info
*saved_thread
;
4200 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4203 stop_all_lwps (1, lwp
);
4204 gdb_assert (lwp
->suspended
== 0);
4207 debug_printf ("Done stopping all threads for step-over.\n");
4209 /* Note, we should always reach here with an already adjusted PC,
4210 either by GDB (if we're resuming due to GDB's request), or by our
4211 caller, if we just finished handling an internal breakpoint GDB
4212 shouldn't care about. */
4215 saved_thread
= current_thread
;
4216 current_thread
= thread
;
4218 lwp
->bp_reinsert
= pc
;
4219 uninsert_breakpoints_at (pc
);
4220 uninsert_fast_tracepoint_jumps_at (pc
);
4222 if (can_hardware_single_step ())
4228 CORE_ADDR raddr
= (*the_low_target
.breakpoint_reinsert_addr
) ();
4229 set_reinsert_breakpoint (raddr
);
4233 current_thread
= saved_thread
;
4235 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
4237 /* Require next event from this LWP. */
4238 step_over_bkpt
= thread
->entry
.id
;
4242 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4243 start_step_over, if still there, and delete any reinsert
4244 breakpoints we've set, on non hardware single-step targets. */
4247 finish_step_over (struct lwp_info
*lwp
)
4249 if (lwp
->bp_reinsert
!= 0)
4252 debug_printf ("Finished step over.\n");
4254 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4255 may be no breakpoint to reinsert there by now. */
4256 reinsert_breakpoints_at (lwp
->bp_reinsert
);
4257 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
4259 lwp
->bp_reinsert
= 0;
4261 /* Delete any software-single-step reinsert breakpoints. No
4262 longer needed. We don't have to worry about other threads
4263 hitting this trap, and later not being able to explain it,
4264 because we were stepping over a breakpoint, and we hold all
4265 threads but LWP stopped while doing that. */
4266 if (!can_hardware_single_step ())
4267 delete_reinsert_breakpoints ();
4269 step_over_bkpt
= null_ptid
;
4276 /* This function is called once per thread. We check the thread's resume
4277 request, which will tell us whether to resume, step, or leave the thread
4278 stopped; and what signal, if any, it should be sent.
4280 For threads which we aren't explicitly told otherwise, we preserve
4281 the stepping flag; this is used for stepping over gdbserver-placed
4284 If pending_flags was set in any thread, we queue any needed
4285 signals, since we won't actually resume. We already have a pending
4286 event to report, so we don't need to preserve any step requests;
4287 they should be re-issued if necessary. */
4290 linux_resume_one_thread (struct inferior_list_entry
*entry
, void *arg
)
4292 struct thread_info
*thread
= (struct thread_info
*) entry
;
4293 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4295 int leave_all_stopped
= * (int *) arg
;
4298 if (lwp
->resume
== NULL
)
4301 if (lwp
->resume
->kind
== resume_stop
)
4304 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
4309 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
4311 /* Stop the thread, and wait for the event asynchronously,
4312 through the event loop. */
4318 debug_printf ("already stopped LWP %ld\n",
4321 /* The LWP may have been stopped in an internal event that
4322 was not meant to be notified back to GDB (e.g., gdbserver
4323 breakpoint), so we should be reporting a stop event in
4326 /* If the thread already has a pending SIGSTOP, this is a
4327 no-op. Otherwise, something later will presumably resume
4328 the thread and this will cause it to cancel any pending
4329 operation, due to last_resume_kind == resume_stop. If
4330 the thread already has a pending status to report, we
4331 will still report it the next time we wait - see
4332 status_pending_p_callback. */
4334 /* If we already have a pending signal to report, then
4335 there's no need to queue a SIGSTOP, as this means we're
4336 midway through moving the LWP out of the jumppad, and we
4337 will report the pending signal as soon as that is
4339 if (lwp
->pending_signals_to_report
== NULL
)
4343 /* For stop requests, we're done. */
4345 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4349 /* If this thread which is about to be resumed has a pending status,
4350 then don't resume any threads - we can just report the pending
4351 status. Make sure to queue any signals that would otherwise be
4352 sent. In all-stop mode, we do this decision based on if *any*
4353 thread has a pending status. If there's a thread that needs the
4354 step-over-breakpoint dance, then don't resume any other thread
4355 but that particular one. */
4356 leave_pending
= (lwp
->status_pending_p
|| leave_all_stopped
);
4361 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
4363 step
= (lwp
->resume
->kind
== resume_step
);
4364 linux_resume_one_lwp (lwp
, step
, lwp
->resume
->sig
, NULL
);
4369 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
4371 /* If we have a new signal, enqueue the signal. */
4372 if (lwp
->resume
->sig
!= 0)
4374 struct pending_signals
*p_sig
;
4375 p_sig
= xmalloc (sizeof (*p_sig
));
4376 p_sig
->prev
= lwp
->pending_signals
;
4377 p_sig
->signal
= lwp
->resume
->sig
;
4378 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
4380 /* If this is the same signal we were previously stopped by,
4381 make sure to queue its siginfo. We can ignore the return
4382 value of ptrace; if it fails, we'll skip
4383 PTRACE_SETSIGINFO. */
4384 if (WIFSTOPPED (lwp
->last_status
)
4385 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
)
4386 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
4389 lwp
->pending_signals
= p_sig
;
4393 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4399 linux_resume (struct thread_resume
*resume_info
, size_t n
)
4401 struct thread_resume_array array
= { resume_info
, n
};
4402 struct thread_info
*need_step_over
= NULL
;
4404 int leave_all_stopped
;
4409 debug_printf ("linux_resume:\n");
4412 find_inferior (&all_threads
, linux_set_resume_request
, &array
);
4414 /* If there is a thread which would otherwise be resumed, which has
4415 a pending status, then don't resume any threads - we can just
4416 report the pending status. Make sure to queue any signals that
4417 would otherwise be sent. In non-stop mode, we'll apply this
4418 logic to each thread individually. We consume all pending events
4419 before considering to start a step-over (in all-stop). */
4422 find_inferior (&all_threads
, resume_status_pending_p
, &any_pending
);
4424 /* If there is a thread which would otherwise be resumed, which is
4425 stopped at a breakpoint that needs stepping over, then don't
4426 resume any threads - have it step over the breakpoint with all
4427 other threads stopped, then resume all threads again. Make sure
4428 to queue any signals that would otherwise be delivered or
4430 if (!any_pending
&& supports_breakpoints ())
4432 = (struct thread_info
*) find_inferior (&all_threads
,
4433 need_step_over_p
, NULL
);
4435 leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
4439 if (need_step_over
!= NULL
)
4440 debug_printf ("Not resuming all, need step over\n");
4441 else if (any_pending
)
4442 debug_printf ("Not resuming, all-stop and found "
4443 "an LWP with pending status\n");
4445 debug_printf ("Resuming, no pending status or step over needed\n");
4448 /* Even if we're leaving threads stopped, queue all signals we'd
4449 otherwise deliver. */
4450 find_inferior (&all_threads
, linux_resume_one_thread
, &leave_all_stopped
);
4453 start_step_over (get_thread_lwp (need_step_over
));
4457 debug_printf ("linux_resume done\n");
4462 /* This function is called once per thread. We check the thread's
4463 last resume request, which will tell us whether to resume, step, or
4464 leave the thread stopped. Any signal the client requested to be
4465 delivered has already been enqueued at this point.
4467 If any thread that GDB wants running is stopped at an internal
4468 breakpoint that needs stepping over, we start a step-over operation
4469 on that particular thread, and leave all others stopped. */
4472 proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
4474 struct thread_info
*thread
= (struct thread_info
*) entry
;
4475 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4482 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
4487 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
4491 if (thread
->last_resume_kind
== resume_stop
4492 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
4495 debug_printf (" client wants LWP to remain %ld stopped\n",
4500 if (lwp
->status_pending_p
)
4503 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4508 gdb_assert (lwp
->suspended
>= 0);
4513 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
4517 if (thread
->last_resume_kind
== resume_stop
4518 && lwp
->pending_signals_to_report
== NULL
4519 && lwp
->collecting_fast_tracepoint
== 0)
4521 /* We haven't reported this LWP as stopped yet (otherwise, the
4522 last_status.kind check above would catch it, and we wouldn't
4523 reach here. This LWP may have been momentarily paused by a
4524 stop_all_lwps call while handling for example, another LWP's
4525 step-over. In that case, the pending expected SIGSTOP signal
4526 that was queued at vCont;t handling time will have already
4527 been consumed by wait_for_sigstop, and so we need to requeue
4528 another one here. Note that if the LWP already has a SIGSTOP
4529 pending, this is a no-op. */
4532 debug_printf ("Client wants LWP %ld to stop. "
4533 "Making sure it has a SIGSTOP pending\n",
4539 step
= thread
->last_resume_kind
== resume_step
;
4540 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
4545 unsuspend_and_proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
4547 struct thread_info
*thread
= (struct thread_info
*) entry
;
4548 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4554 gdb_assert (lwp
->suspended
>= 0);
4556 return proceed_one_lwp (entry
, except
);
4559 /* When we finish a step-over, set threads running again. If there's
4560 another thread that may need a step-over, now's the time to start
4561 it. Eventually, we'll move all threads past their breakpoints. */
4564 proceed_all_lwps (void)
4566 struct thread_info
*need_step_over
;
4568 /* If there is a thread which would otherwise be resumed, which is
4569 stopped at a breakpoint that needs stepping over, then don't
4570 resume any threads - have it step over the breakpoint with all
4571 other threads stopped, then resume all threads again. */
4573 if (supports_breakpoints ())
4576 = (struct thread_info
*) find_inferior (&all_threads
,
4577 need_step_over_p
, NULL
);
4579 if (need_step_over
!= NULL
)
4582 debug_printf ("proceed_all_lwps: found "
4583 "thread %ld needing a step-over\n",
4584 lwpid_of (need_step_over
));
4586 start_step_over (get_thread_lwp (need_step_over
));
4592 debug_printf ("Proceeding, no step-over needed\n");
4594 find_inferior (&all_threads
, proceed_one_lwp
, NULL
);
4597 /* Stopped LWPs that the client wanted to be running, that don't have
4598 pending statuses, are set to run again, except for EXCEPT, if not
4599 NULL. This undoes a stop_all_lwps call. */
4602 unstop_all_lwps (int unsuspend
, struct lwp_info
*except
)
4608 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4609 lwpid_of (get_lwp_thread (except
)));
4611 debug_printf ("unstopping all lwps\n");
4615 find_inferior (&all_threads
, unsuspend_and_proceed_one_lwp
, except
);
4617 find_inferior (&all_threads
, proceed_one_lwp
, except
);
4621 debug_printf ("unstop_all_lwps done\n");
4627 #ifdef HAVE_LINUX_REGSETS
4629 #define use_linux_regsets 1
4631 /* Returns true if REGSET has been disabled. */
4634 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
4636 return (info
->disabled_regsets
!= NULL
4637 && info
->disabled_regsets
[regset
- info
->regsets
]);
4640 /* Disable REGSET. */
4643 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
4647 dr_offset
= regset
- info
->regsets
;
4648 if (info
->disabled_regsets
== NULL
)
4649 info
->disabled_regsets
= xcalloc (1, info
->num_regsets
);
4650 info
->disabled_regsets
[dr_offset
] = 1;
4654 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
4655 struct regcache
*regcache
)
4657 struct regset_info
*regset
;
4658 int saw_general_regs
= 0;
4662 pid
= lwpid_of (current_thread
);
4663 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
4668 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
4671 buf
= xmalloc (regset
->size
);
4673 nt_type
= regset
->nt_type
;
4677 iov
.iov_len
= regset
->size
;
4678 data
= (void *) &iov
;
4684 res
= ptrace (regset
->get_request
, pid
,
4685 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4687 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4693 /* If we get EIO on a regset, do not try it again for
4694 this process mode. */
4695 disable_regset (regsets_info
, regset
);
4697 else if (errno
== ENODATA
)
4699 /* ENODATA may be returned if the regset is currently
4700 not "active". This can happen in normal operation,
4701 so suppress the warning in this case. */
4706 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4713 if (regset
->type
== GENERAL_REGS
)
4714 saw_general_regs
= 1;
4715 regset
->store_function (regcache
, buf
);
4719 if (saw_general_regs
)
4726 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
4727 struct regcache
*regcache
)
4729 struct regset_info
*regset
;
4730 int saw_general_regs
= 0;
4734 pid
= lwpid_of (current_thread
);
4735 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
4740 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
)
4741 || regset
->fill_function
== NULL
)
4744 buf
= xmalloc (regset
->size
);
4746 /* First fill the buffer with the current register set contents,
4747 in case there are any items in the kernel's regset that are
4748 not in gdbserver's regcache. */
4750 nt_type
= regset
->nt_type
;
4754 iov
.iov_len
= regset
->size
;
4755 data
= (void *) &iov
;
4761 res
= ptrace (regset
->get_request
, pid
,
4762 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4764 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4769 /* Then overlay our cached registers on that. */
4770 regset
->fill_function (regcache
, buf
);
4772 /* Only now do we write the register set. */
4774 res
= ptrace (regset
->set_request
, pid
,
4775 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4777 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
4785 /* If we get EIO on a regset, do not try it again for
4786 this process mode. */
4787 disable_regset (regsets_info
, regset
);
4789 else if (errno
== ESRCH
)
4791 /* At this point, ESRCH should mean the process is
4792 already gone, in which case we simply ignore attempts
4793 to change its registers. See also the related
4794 comment in linux_resume_one_lwp. */
4800 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4803 else if (regset
->type
== GENERAL_REGS
)
4804 saw_general_regs
= 1;
4807 if (saw_general_regs
)
4813 #else /* !HAVE_LINUX_REGSETS */
4815 #define use_linux_regsets 0
4816 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4817 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4821 /* Return 1 if register REGNO is supported by one of the regset ptrace
4822 calls or 0 if it has to be transferred individually. */
4825 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
4827 unsigned char mask
= 1 << (regno
% 8);
4828 size_t index
= regno
/ 8;
4830 return (use_linux_regsets
4831 && (regs_info
->regset_bitmap
== NULL
4832 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
4835 #ifdef HAVE_LINUX_USRREGS
4838 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
4842 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
4843 error ("Invalid register number %d.", regnum
);
4845 addr
= usrregs
->regmap
[regnum
];
4850 /* Fetch one register. */
4852 fetch_register (const struct usrregs_info
*usrregs
,
4853 struct regcache
*regcache
, int regno
)
4860 if (regno
>= usrregs
->num_regs
)
4862 if ((*the_low_target
.cannot_fetch_register
) (regno
))
4865 regaddr
= register_addr (usrregs
, regno
);
4869 size
= ((register_size (regcache
->tdesc
, regno
)
4870 + sizeof (PTRACE_XFER_TYPE
) - 1)
4871 & -sizeof (PTRACE_XFER_TYPE
));
4872 buf
= alloca (size
);
4874 pid
= lwpid_of (current_thread
);
4875 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4878 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
4879 ptrace (PTRACE_PEEKUSER
, pid
,
4880 /* Coerce to a uintptr_t first to avoid potential gcc warning
4881 of coercing an 8 byte integer to a 4 byte pointer. */
4882 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
4883 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4885 error ("reading register %d: %s", regno
, strerror (errno
));
4888 if (the_low_target
.supply_ptrace_register
)
4889 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
4891 supply_register (regcache
, regno
, buf
);
4894 /* Store one register. */
4896 store_register (const struct usrregs_info
*usrregs
,
4897 struct regcache
*regcache
, int regno
)
4904 if (regno
>= usrregs
->num_regs
)
4906 if ((*the_low_target
.cannot_store_register
) (regno
))
4909 regaddr
= register_addr (usrregs
, regno
);
4913 size
= ((register_size (regcache
->tdesc
, regno
)
4914 + sizeof (PTRACE_XFER_TYPE
) - 1)
4915 & -sizeof (PTRACE_XFER_TYPE
));
4916 buf
= alloca (size
);
4917 memset (buf
, 0, size
);
4919 if (the_low_target
.collect_ptrace_register
)
4920 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
4922 collect_register (regcache
, regno
, buf
);
4924 pid
= lwpid_of (current_thread
);
4925 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4928 ptrace (PTRACE_POKEUSER
, pid
,
4929 /* Coerce to a uintptr_t first to avoid potential gcc warning
4930 about coercing an 8 byte integer to a 4 byte pointer. */
4931 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
4932 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
4935 /* At this point, ESRCH should mean the process is
4936 already gone, in which case we simply ignore attempts
4937 to change its registers. See also the related
4938 comment in linux_resume_one_lwp. */
4942 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
4943 error ("writing register %d: %s", regno
, strerror (errno
));
4945 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4949 /* Fetch all registers, or just one, from the child process.
4950 If REGNO is -1, do this for all registers, skipping any that are
4951 assumed to have been retrieved by regsets_fetch_inferior_registers,
4952 unless ALL is non-zero.
4953 Otherwise, REGNO specifies which register (so we can save time). */
4955 usr_fetch_inferior_registers (const struct regs_info
*regs_info
,
4956 struct regcache
*regcache
, int regno
, int all
)
4958 struct usrregs_info
*usr
= regs_info
->usrregs
;
4962 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4963 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4964 fetch_register (usr
, regcache
, regno
);
4967 fetch_register (usr
, regcache
, regno
);
4970 /* Store our register values back into the inferior.
4971 If REGNO is -1, do this for all registers, skipping any that are
4972 assumed to have been saved by regsets_store_inferior_registers,
4973 unless ALL is non-zero.
4974 Otherwise, REGNO specifies which register (so we can save time). */
4976 usr_store_inferior_registers (const struct regs_info
*regs_info
,
4977 struct regcache
*regcache
, int regno
, int all
)
4979 struct usrregs_info
*usr
= regs_info
->usrregs
;
4983 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4984 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4985 store_register (usr
, regcache
, regno
);
4988 store_register (usr
, regcache
, regno
);
4991 #else /* !HAVE_LINUX_USRREGS */
4993 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4994 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5000 linux_fetch_registers (struct regcache
*regcache
, int regno
)
5004 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
5008 if (the_low_target
.fetch_register
!= NULL
5009 && regs_info
->usrregs
!= NULL
)
5010 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
5011 (*the_low_target
.fetch_register
) (regcache
, regno
);
5013 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
5014 if (regs_info
->usrregs
!= NULL
)
5015 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
5019 if (the_low_target
.fetch_register
!= NULL
5020 && (*the_low_target
.fetch_register
) (regcache
, regno
))
5023 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5025 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
5027 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5028 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
5033 linux_store_registers (struct regcache
*regcache
, int regno
)
5037 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
5041 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5043 if (regs_info
->usrregs
!= NULL
)
5044 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
5048 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5050 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5052 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5053 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
5058 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5059 to debugger memory starting at MYADDR. */
5062 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
5064 int pid
= lwpid_of (current_thread
);
5065 register PTRACE_XFER_TYPE
*buffer
;
5066 register CORE_ADDR addr
;
5073 /* Try using /proc. Don't bother for one word. */
5074 if (len
>= 3 * sizeof (long))
5078 /* We could keep this file open and cache it - possibly one per
5079 thread. That requires some juggling, but is even faster. */
5080 sprintf (filename
, "/proc/%d/mem", pid
);
5081 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
5085 /* If pread64 is available, use it. It's faster if the kernel
5086 supports it (only one syscall), and it's 64-bit safe even on
5087 32-bit platforms (for instance, SPARC debugging a SPARC64
5090 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
5093 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
5094 bytes
= read (fd
, myaddr
, len
);
5101 /* Some data was read, we'll try to get the rest with ptrace. */
5111 /* Round starting address down to longword boundary. */
5112 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5113 /* Round ending address up; get number of longwords that makes. */
5114 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5115 / sizeof (PTRACE_XFER_TYPE
));
5116 /* Allocate buffer of that many longwords. */
5117 buffer
= (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
5119 /* Read all the longwords */
5121 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5123 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5124 about coercing an 8 byte integer to a 4 byte pointer. */
5125 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
5126 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5127 (PTRACE_TYPE_ARG4
) 0);
5133 /* Copy appropriate bytes out of the buffer. */
5136 i
*= sizeof (PTRACE_XFER_TYPE
);
5137 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
5139 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5146 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5147 memory at MEMADDR. On failure (cannot write to the inferior)
5148 returns the value of errno. Always succeeds if LEN is zero. */
5151 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
5154 /* Round starting address down to longword boundary. */
5155 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5156 /* Round ending address up; get number of longwords that makes. */
5158 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5159 / sizeof (PTRACE_XFER_TYPE
);
5161 /* Allocate buffer of that many longwords. */
5162 register PTRACE_XFER_TYPE
*buffer
= (PTRACE_XFER_TYPE
*)
5163 alloca (count
* sizeof (PTRACE_XFER_TYPE
));
5165 int pid
= lwpid_of (current_thread
);
5169 /* Zero length write always succeeds. */
5175 /* Dump up to four bytes. */
5176 unsigned int val
= * (unsigned int *) myaddr
;
5182 val
= val
& 0xffffff;
5183 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5184 2 * ((len
< 4) ? len
: 4), val
, (long)memaddr
, pid
);
5187 /* Fill start and end extra bytes of buffer with existing memory data. */
5190 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5191 about coercing an 8 byte integer to a 4 byte pointer. */
5192 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
5193 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5194 (PTRACE_TYPE_ARG4
) 0);
5202 = ptrace (PTRACE_PEEKTEXT
, pid
,
5203 /* Coerce to a uintptr_t first to avoid potential gcc warning
5204 about coercing an 8 byte integer to a 4 byte pointer. */
5205 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
5206 * sizeof (PTRACE_XFER_TYPE
)),
5207 (PTRACE_TYPE_ARG4
) 0);
5212 /* Copy data to be written over corresponding part of buffer. */
5214 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5217 /* Write the entire buffer. */
5219 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5222 ptrace (PTRACE_POKETEXT
, pid
,
5223 /* Coerce to a uintptr_t first to avoid potential gcc warning
5224 about coercing an 8 byte integer to a 4 byte pointer. */
5225 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5226 (PTRACE_TYPE_ARG4
) buffer
[i
]);
5235 linux_look_up_symbols (void)
5237 #ifdef USE_THREAD_DB
5238 struct process_info
*proc
= current_process ();
5240 if (proc
->priv
->thread_db
!= NULL
)
5243 /* If the kernel supports tracing clones, then we don't need to
5244 use the magic thread event breakpoint to learn about
5246 thread_db_init (!linux_supports_traceclone ());
5251 linux_request_interrupt (void)
5253 extern unsigned long signal_pid
;
5255 /* Send a SIGINT to the process group. This acts just like the user
5256 typed a ^C on the controlling terminal. */
5257 kill (-signal_pid
, SIGINT
);
5260 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5261 to debugger memory starting at MYADDR. */
5264 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
5266 char filename
[PATH_MAX
];
5268 int pid
= lwpid_of (current_thread
);
5270 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5272 fd
= open (filename
, O_RDONLY
);
5276 if (offset
!= (CORE_ADDR
) 0
5277 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5280 n
= read (fd
, myaddr
, len
);
5287 /* These breakpoint and watchpoint related wrapper functions simply
5288 pass on the function call if the target has registered a
5289 corresponding function. */
5292 linux_supports_z_point_type (char z_type
)
5294 return (the_low_target
.supports_z_point_type
!= NULL
5295 && the_low_target
.supports_z_point_type (z_type
));
5299 linux_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5300 int size
, struct raw_breakpoint
*bp
)
5302 if (type
== raw_bkpt_type_sw
)
5303 return insert_memory_breakpoint (bp
);
5304 else if (the_low_target
.insert_point
!= NULL
)
5305 return the_low_target
.insert_point (type
, addr
, size
, bp
);
5307 /* Unsupported (see target.h). */
5312 linux_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5313 int size
, struct raw_breakpoint
*bp
)
5315 if (type
== raw_bkpt_type_sw
)
5316 return remove_memory_breakpoint (bp
);
5317 else if (the_low_target
.remove_point
!= NULL
)
5318 return the_low_target
.remove_point (type
, addr
, size
, bp
);
5320 /* Unsupported (see target.h). */
5324 /* Implement the to_stopped_by_sw_breakpoint target_ops
5328 linux_stopped_by_sw_breakpoint (void)
5330 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5332 return (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
);
5335 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5339 linux_supports_stopped_by_sw_breakpoint (void)
5341 return USE_SIGTRAP_SIGINFO
;
5344 /* Implement the to_stopped_by_hw_breakpoint target_ops
5348 linux_stopped_by_hw_breakpoint (void)
5350 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5352 return (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
);
5355 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5359 linux_supports_stopped_by_hw_breakpoint (void)
5361 return USE_SIGTRAP_SIGINFO
;
5364 /* Implement the supports_conditional_breakpoints target_ops
5368 linux_supports_conditional_breakpoints (void)
5370 /* GDBserver needs to step over the breakpoint if the condition is
5371 false. GDBserver software single step is too simple, so disable
5372 conditional breakpoints if the target doesn't have hardware single
5374 return can_hardware_single_step ();
5378 linux_stopped_by_watchpoint (void)
5380 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5382 return lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
5386 linux_stopped_data_address (void)
5388 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5390 return lwp
->stopped_data_address
;
5393 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5394 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5395 && defined(PT_TEXT_END_ADDR)
5397 /* This is only used for targets that define PT_TEXT_ADDR,
5398 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5399 the target has different ways of acquiring this information, like
5402 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5403 to tell gdb about. */
5406 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
5408 unsigned long text
, text_end
, data
;
5409 int pid
= lwpid_of (current_thread
);
5413 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
5414 (PTRACE_TYPE_ARG4
) 0);
5415 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
5416 (PTRACE_TYPE_ARG4
) 0);
5417 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
5418 (PTRACE_TYPE_ARG4
) 0);
5422 /* Both text and data offsets produced at compile-time (and so
5423 used by gdb) are relative to the beginning of the program,
5424 with the data segment immediately following the text segment.
5425 However, the actual runtime layout in memory may put the data
5426 somewhere else, so when we send gdb a data base-address, we
5427 use the real data base address and subtract the compile-time
5428 data base-address from it (which is just the length of the
5429 text segment). BSS immediately follows data in both
5432 *data_p
= data
- (text_end
- text
);
5441 linux_qxfer_osdata (const char *annex
,
5442 unsigned char *readbuf
, unsigned const char *writebuf
,
5443 CORE_ADDR offset
, int len
)
5445 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
5448 /* Convert a native/host siginfo object, into/from the siginfo in the
5449 layout of the inferiors' architecture. */
5452 siginfo_fixup (siginfo_t
*siginfo
, void *inf_siginfo
, int direction
)
5456 if (the_low_target
.siginfo_fixup
!= NULL
)
5457 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
5459 /* If there was no callback, or the callback didn't do anything,
5460 then just do a straight memcpy. */
5464 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
5466 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
5471 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
5472 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
5476 char inf_siginfo
[sizeof (siginfo_t
)];
5478 if (current_thread
== NULL
)
5481 pid
= lwpid_of (current_thread
);
5484 debug_printf ("%s siginfo for lwp %d.\n",
5485 readbuf
!= NULL
? "Reading" : "Writing",
5488 if (offset
>= sizeof (siginfo
))
5491 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5494 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5495 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5496 inferior with a 64-bit GDBSERVER should look the same as debugging it
5497 with a 32-bit GDBSERVER, we need to convert it. */
5498 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
5500 if (offset
+ len
> sizeof (siginfo
))
5501 len
= sizeof (siginfo
) - offset
;
5503 if (readbuf
!= NULL
)
5504 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
5507 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
5509 /* Convert back to ptrace layout before flushing it out. */
5510 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
5512 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5519 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5520 so we notice when children change state; as the handler for the
5521 sigsuspend in my_waitpid. */
5524 sigchld_handler (int signo
)
5526 int old_errno
= errno
;
5532 /* fprintf is not async-signal-safe, so call write
5534 if (write (2, "sigchld_handler\n",
5535 sizeof ("sigchld_handler\n") - 1) < 0)
5536 break; /* just ignore */
5540 if (target_is_async_p ())
5541 async_file_mark (); /* trigger a linux_wait */
5547 linux_supports_non_stop (void)
5553 linux_async (int enable
)
5555 int previous
= target_is_async_p ();
5558 debug_printf ("linux_async (%d), previous=%d\n",
5561 if (previous
!= enable
)
5564 sigemptyset (&mask
);
5565 sigaddset (&mask
, SIGCHLD
);
5567 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
5571 if (pipe (linux_event_pipe
) == -1)
5573 linux_event_pipe
[0] = -1;
5574 linux_event_pipe
[1] = -1;
5575 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
5577 warning ("creating event pipe failed.");
5581 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
5582 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
5584 /* Register the event loop handler. */
5585 add_file_handler (linux_event_pipe
[0],
5586 handle_target_event
, NULL
);
5588 /* Always trigger a linux_wait. */
5593 delete_file_handler (linux_event_pipe
[0]);
5595 close (linux_event_pipe
[0]);
5596 close (linux_event_pipe
[1]);
5597 linux_event_pipe
[0] = -1;
5598 linux_event_pipe
[1] = -1;
5601 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
5608 linux_start_non_stop (int nonstop
)
5610 /* Register or unregister from event-loop accordingly. */
5611 linux_async (nonstop
);
5613 if (target_is_async_p () != (nonstop
!= 0))
5620 linux_supports_multi_process (void)
5625 /* Check if fork events are supported. */
5628 linux_supports_fork_events (void)
5630 return linux_supports_tracefork ();
5633 /* Check if vfork events are supported. */
5636 linux_supports_vfork_events (void)
5638 return linux_supports_tracefork ();
5641 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5642 options for the specified lwp. */
5645 reset_lwp_ptrace_options_callback (struct inferior_list_entry
*entry
,
5648 struct thread_info
*thread
= (struct thread_info
*) entry
;
5649 struct lwp_info
*lwp
= get_thread_lwp (thread
);
5653 /* Stop the lwp so we can modify its ptrace options. */
5654 lwp
->must_set_ptrace_flags
= 1;
5655 linux_stop_lwp (lwp
);
5659 /* Already stopped; go ahead and set the ptrace options. */
5660 struct process_info
*proc
= find_process_pid (pid_of (thread
));
5661 int options
= linux_low_ptrace_options (proc
->attached
);
5663 linux_enable_event_reporting (lwpid_of (thread
), options
);
5664 lwp
->must_set_ptrace_flags
= 0;
5670 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5671 ptrace flags for all inferiors. This is in case the new GDB connection
5672 doesn't support the same set of events that the previous one did. */
5675 linux_handle_new_gdb_connection (void)
5679 /* Request that all the lwps reset their ptrace options. */
5680 find_inferior (&all_threads
, reset_lwp_ptrace_options_callback
, &pid
);
5684 linux_supports_disable_randomization (void)
5686 #ifdef HAVE_PERSONALITY
5694 linux_supports_agent (void)
5700 linux_supports_range_stepping (void)
5702 if (*the_low_target
.supports_range_stepping
== NULL
)
5705 return (*the_low_target
.supports_range_stepping
) ();
5708 /* Enumerate spufs IDs for process PID. */
5710 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
5716 struct dirent
*entry
;
5718 sprintf (path
, "/proc/%ld/fd", pid
);
5719 dir
= opendir (path
);
5724 while ((entry
= readdir (dir
)) != NULL
)
5730 fd
= atoi (entry
->d_name
);
5734 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
5735 if (stat (path
, &st
) != 0)
5737 if (!S_ISDIR (st
.st_mode
))
5740 if (statfs (path
, &stfs
) != 0)
5742 if (stfs
.f_type
!= SPUFS_MAGIC
)
5745 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
5747 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
5757 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5758 object type, using the /proc file system. */
5760 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
5761 unsigned const char *writebuf
,
5762 CORE_ADDR offset
, int len
)
5764 long pid
= lwpid_of (current_thread
);
5769 if (!writebuf
&& !readbuf
)
5777 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
5780 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
5781 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
5786 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5793 ret
= write (fd
, writebuf
, (size_t) len
);
5795 ret
= read (fd
, readbuf
, (size_t) len
);
5801 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5802 struct target_loadseg
5804 /* Core address to which the segment is mapped. */
5806 /* VMA recorded in the program header. */
5808 /* Size of this segment in memory. */
5812 # if defined PT_GETDSBT
5813 struct target_loadmap
5815 /* Protocol version number, must be zero. */
5817 /* Pointer to the DSBT table, its size, and the DSBT index. */
5818 unsigned *dsbt_table
;
5819 unsigned dsbt_size
, dsbt_index
;
5820 /* Number of segments in this map. */
5822 /* The actual memory map. */
5823 struct target_loadseg segs
[/*nsegs*/];
5825 # define LINUX_LOADMAP PT_GETDSBT
5826 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5827 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5829 struct target_loadmap
5831 /* Protocol version number, must be zero. */
5833 /* Number of segments in this map. */
5835 /* The actual memory map. */
5836 struct target_loadseg segs
[/*nsegs*/];
5838 # define LINUX_LOADMAP PTRACE_GETFDPIC
5839 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5840 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5844 linux_read_loadmap (const char *annex
, CORE_ADDR offset
,
5845 unsigned char *myaddr
, unsigned int len
)
5847 int pid
= lwpid_of (current_thread
);
5849 struct target_loadmap
*data
= NULL
;
5850 unsigned int actual_length
, copy_length
;
5852 if (strcmp (annex
, "exec") == 0)
5853 addr
= (int) LINUX_LOADMAP_EXEC
;
5854 else if (strcmp (annex
, "interp") == 0)
5855 addr
= (int) LINUX_LOADMAP_INTERP
;
5859 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
5865 actual_length
= sizeof (struct target_loadmap
)
5866 + sizeof (struct target_loadseg
) * data
->nsegs
;
5868 if (offset
< 0 || offset
> actual_length
)
5871 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
5872 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
5876 # define linux_read_loadmap NULL
5877 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5880 linux_process_qsupported (const char *query
)
5882 if (the_low_target
.process_qsupported
!= NULL
)
5883 the_low_target
.process_qsupported (query
);
5887 linux_supports_tracepoints (void)
5889 if (*the_low_target
.supports_tracepoints
== NULL
)
5892 return (*the_low_target
.supports_tracepoints
) ();
5896 linux_read_pc (struct regcache
*regcache
)
5898 if (the_low_target
.get_pc
== NULL
)
5901 return (*the_low_target
.get_pc
) (regcache
);
5905 linux_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
5907 gdb_assert (the_low_target
.set_pc
!= NULL
);
5909 (*the_low_target
.set_pc
) (regcache
, pc
);
5913 linux_thread_stopped (struct thread_info
*thread
)
5915 return get_thread_lwp (thread
)->stopped
;
5918 /* This exposes stop-all-threads functionality to other modules. */
5921 linux_pause_all (int freeze
)
5923 stop_all_lwps (freeze
, NULL
);
5926 /* This exposes unstop-all-threads functionality to other gdbserver
5930 linux_unpause_all (int unfreeze
)
5932 unstop_all_lwps (unfreeze
, NULL
);
5936 linux_prepare_to_access_memory (void)
5938 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5941 linux_pause_all (1);
5946 linux_done_accessing_memory (void)
5948 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5951 linux_unpause_all (1);
5955 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
5956 CORE_ADDR collector
,
5959 CORE_ADDR
*jump_entry
,
5960 CORE_ADDR
*trampoline
,
5961 ULONGEST
*trampoline_size
,
5962 unsigned char *jjump_pad_insn
,
5963 ULONGEST
*jjump_pad_insn_size
,
5964 CORE_ADDR
*adjusted_insn_addr
,
5965 CORE_ADDR
*adjusted_insn_addr_end
,
5968 return (*the_low_target
.install_fast_tracepoint_jump_pad
)
5969 (tpoint
, tpaddr
, collector
, lockaddr
, orig_size
,
5970 jump_entry
, trampoline
, trampoline_size
,
5971 jjump_pad_insn
, jjump_pad_insn_size
,
5972 adjusted_insn_addr
, adjusted_insn_addr_end
,
5976 static struct emit_ops
*
5977 linux_emit_ops (void)
5979 if (the_low_target
.emit_ops
!= NULL
)
5980 return (*the_low_target
.emit_ops
) ();
5986 linux_get_min_fast_tracepoint_insn_len (void)
5988 return (*the_low_target
.get_min_fast_tracepoint_insn_len
) ();
5991 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5994 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
5995 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
5997 char filename
[PATH_MAX
];
5999 const int auxv_size
= is_elf64
6000 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
6001 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
6003 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
6005 fd
= open (filename
, O_RDONLY
);
6011 while (read (fd
, buf
, auxv_size
) == auxv_size
6012 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
6016 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
6018 switch (aux
->a_type
)
6021 *phdr_memaddr
= aux
->a_un
.a_val
;
6024 *num_phdr
= aux
->a_un
.a_val
;
6030 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
6032 switch (aux
->a_type
)
6035 *phdr_memaddr
= aux
->a_un
.a_val
;
6038 *num_phdr
= aux
->a_un
.a_val
;
6046 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
6048 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6049 "phdr_memaddr = %ld, phdr_num = %d",
6050 (long) *phdr_memaddr
, *num_phdr
);
6057 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6060 get_dynamic (const int pid
, const int is_elf64
)
6062 CORE_ADDR phdr_memaddr
, relocation
;
6064 unsigned char *phdr_buf
;
6065 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
6067 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
6070 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
6071 phdr_buf
= alloca (num_phdr
* phdr_size
);
6073 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
6076 /* Compute relocation: it is expected to be 0 for "regular" executables,
6077 non-zero for PIE ones. */
6079 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
6082 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6084 if (p
->p_type
== PT_PHDR
)
6085 relocation
= phdr_memaddr
- p
->p_vaddr
;
6089 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6091 if (p
->p_type
== PT_PHDR
)
6092 relocation
= phdr_memaddr
- p
->p_vaddr
;
6095 if (relocation
== -1)
6097 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6098 any real world executables, including PIE executables, have always
6099 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6100 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6101 or present DT_DEBUG anyway (fpc binaries are statically linked).
6103 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6105 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6110 for (i
= 0; i
< num_phdr
; i
++)
6114 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6116 if (p
->p_type
== PT_DYNAMIC
)
6117 return p
->p_vaddr
+ relocation
;
6121 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6123 if (p
->p_type
== PT_DYNAMIC
)
6124 return p
->p_vaddr
+ relocation
;
6131 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6132 can be 0 if the inferior does not yet have the library list initialized.
6133 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6134 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6137 get_r_debug (const int pid
, const int is_elf64
)
6139 CORE_ADDR dynamic_memaddr
;
6140 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
6141 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
6144 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
6145 if (dynamic_memaddr
== 0)
6148 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
6152 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
6153 #ifdef DT_MIPS_RLD_MAP
6157 unsigned char buf
[sizeof (Elf64_Xword
)];
6161 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6163 if (linux_read_memory (dyn
->d_un
.d_val
,
6164 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6169 #endif /* DT_MIPS_RLD_MAP */
6171 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6172 map
= dyn
->d_un
.d_val
;
6174 if (dyn
->d_tag
== DT_NULL
)
6179 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
6180 #ifdef DT_MIPS_RLD_MAP
6184 unsigned char buf
[sizeof (Elf32_Word
)];
6188 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6190 if (linux_read_memory (dyn
->d_un
.d_val
,
6191 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6196 #endif /* DT_MIPS_RLD_MAP */
6198 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6199 map
= dyn
->d_un
.d_val
;
6201 if (dyn
->d_tag
== DT_NULL
)
6205 dynamic_memaddr
+= dyn_size
;
6211 /* Read one pointer from MEMADDR in the inferior. */
6214 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
6218 /* Go through a union so this works on either big or little endian
6219 hosts, when the inferior's pointer size is smaller than the size
6220 of CORE_ADDR. It is assumed the inferior's endianness is the
6221 same of the superior's. */
6224 CORE_ADDR core_addr
;
6229 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
6232 if (ptr_size
== sizeof (CORE_ADDR
))
6233 *ptr
= addr
.core_addr
;
6234 else if (ptr_size
== sizeof (unsigned int))
6237 gdb_assert_not_reached ("unhandled pointer size");
6242 struct link_map_offsets
6244 /* Offset and size of r_debug.r_version. */
6245 int r_version_offset
;
6247 /* Offset and size of r_debug.r_map. */
6250 /* Offset to l_addr field in struct link_map. */
6253 /* Offset to l_name field in struct link_map. */
6256 /* Offset to l_ld field in struct link_map. */
6259 /* Offset to l_next field in struct link_map. */
6262 /* Offset to l_prev field in struct link_map. */
6266 /* Construct qXfer:libraries-svr4:read reply. */
6269 linux_qxfer_libraries_svr4 (const char *annex
, unsigned char *readbuf
,
6270 unsigned const char *writebuf
,
6271 CORE_ADDR offset
, int len
)
6274 unsigned document_len
;
6275 struct process_info_private
*const priv
= current_process ()->priv
;
6276 char filename
[PATH_MAX
];
6279 static const struct link_map_offsets lmo_32bit_offsets
=
6281 0, /* r_version offset. */
6282 4, /* r_debug.r_map offset. */
6283 0, /* l_addr offset in link_map. */
6284 4, /* l_name offset in link_map. */
6285 8, /* l_ld offset in link_map. */
6286 12, /* l_next offset in link_map. */
6287 16 /* l_prev offset in link_map. */
6290 static const struct link_map_offsets lmo_64bit_offsets
=
6292 0, /* r_version offset. */
6293 8, /* r_debug.r_map offset. */
6294 0, /* l_addr offset in link_map. */
6295 8, /* l_name offset in link_map. */
6296 16, /* l_ld offset in link_map. */
6297 24, /* l_next offset in link_map. */
6298 32 /* l_prev offset in link_map. */
6300 const struct link_map_offsets
*lmo
;
6301 unsigned int machine
;
6303 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
6304 int allocated
= 1024;
6306 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
6307 int header_done
= 0;
6309 if (writebuf
!= NULL
)
6311 if (readbuf
== NULL
)
6314 pid
= lwpid_of (current_thread
);
6315 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
6316 is_elf64
= elf_64_file_p (filename
, &machine
);
6317 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
6318 ptr_size
= is_elf64
? 8 : 4;
6320 while (annex
[0] != '\0')
6326 sep
= strchr (annex
, '=');
6331 if (len
== 5 && startswith (annex
, "start"))
6333 else if (len
== 4 && startswith (annex
, "prev"))
6337 annex
= strchr (sep
, ';');
6344 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
6351 if (priv
->r_debug
== 0)
6352 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
6354 /* We failed to find DT_DEBUG. Such situation will not change
6355 for this inferior - do not retry it. Report it to GDB as
6356 E01, see for the reasons at the GDB solib-svr4.c side. */
6357 if (priv
->r_debug
== (CORE_ADDR
) -1)
6360 if (priv
->r_debug
!= 0)
6362 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
6363 (unsigned char *) &r_version
,
6364 sizeof (r_version
)) != 0
6367 warning ("unexpected r_debug version %d", r_version
);
6369 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
6370 &lm_addr
, ptr_size
) != 0)
6372 warning ("unable to read r_map from 0x%lx",
6373 (long) priv
->r_debug
+ lmo
->r_map_offset
);
6378 document
= xmalloc (allocated
);
6379 strcpy (document
, "<library-list-svr4 version=\"1.0\"");
6380 p
= document
+ strlen (document
);
6383 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
6384 &l_name
, ptr_size
) == 0
6385 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
6386 &l_addr
, ptr_size
) == 0
6387 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
6388 &l_ld
, ptr_size
) == 0
6389 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
6390 &l_prev
, ptr_size
) == 0
6391 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
6392 &l_next
, ptr_size
) == 0)
6394 unsigned char libname
[PATH_MAX
];
6396 if (lm_prev
!= l_prev
)
6398 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6399 (long) lm_prev
, (long) l_prev
);
6403 /* Ignore the first entry even if it has valid name as the first entry
6404 corresponds to the main executable. The first entry should not be
6405 skipped if the dynamic loader was loaded late by a static executable
6406 (see solib-svr4.c parameter ignore_first). But in such case the main
6407 executable does not have PT_DYNAMIC present and this function already
6408 exited above due to failed get_r_debug. */
6411 sprintf (p
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
6416 /* Not checking for error because reading may stop before
6417 we've got PATH_MAX worth of characters. */
6419 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
6420 libname
[sizeof (libname
) - 1] = '\0';
6421 if (libname
[0] != '\0')
6423 /* 6x the size for xml_escape_text below. */
6424 size_t len
= 6 * strlen ((char *) libname
);
6429 /* Terminate `<library-list-svr4'. */
6434 while (allocated
< p
- document
+ len
+ 200)
6436 /* Expand to guarantee sufficient storage. */
6437 uintptr_t document_len
= p
- document
;
6439 document
= xrealloc (document
, 2 * allocated
);
6441 p
= document
+ document_len
;
6444 name
= xml_escape_text ((char *) libname
);
6445 p
+= sprintf (p
, "<library name=\"%s\" lm=\"0x%lx\" "
6446 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6447 name
, (unsigned long) lm_addr
,
6448 (unsigned long) l_addr
, (unsigned long) l_ld
);
6459 /* Empty list; terminate `<library-list-svr4'. */
6463 strcpy (p
, "</library-list-svr4>");
6465 document_len
= strlen (document
);
6466 if (offset
< document_len
)
6467 document_len
-= offset
;
6470 if (len
> document_len
)
6473 memcpy (readbuf
, document
+ offset
, len
);
6479 #ifdef HAVE_LINUX_BTRACE
6481 /* See to_enable_btrace target method. */
6483 static struct btrace_target_info
*
6484 linux_low_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
6486 struct btrace_target_info
*tinfo
;
6488 tinfo
= linux_enable_btrace (ptid
, conf
);
6490 if (tinfo
!= NULL
&& tinfo
->ptr_bits
== 0)
6492 struct thread_info
*thread
= find_thread_ptid (ptid
);
6493 struct regcache
*regcache
= get_thread_regcache (thread
, 0);
6495 tinfo
->ptr_bits
= register_size (regcache
->tdesc
, 0) * 8;
6501 /* See to_disable_btrace target method. */
6504 linux_low_disable_btrace (struct btrace_target_info
*tinfo
)
6506 enum btrace_error err
;
6508 err
= linux_disable_btrace (tinfo
);
6509 return (err
== BTRACE_ERR_NONE
? 0 : -1);
6512 /* Encode an Intel(R) Processor Trace configuration. */
6515 linux_low_encode_pt_config (struct buffer
*buffer
,
6516 const struct btrace_data_pt_config
*config
)
6518 buffer_grow_str (buffer
, "<pt-config>\n");
6520 switch (config
->cpu
.vendor
)
6523 buffer_xml_printf (buffer
, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6524 "model=\"%u\" stepping=\"%u\"/>\n",
6525 config
->cpu
.family
, config
->cpu
.model
,
6526 config
->cpu
.stepping
);
6533 buffer_grow_str (buffer
, "</pt-config>\n");
6536 /* Encode a raw buffer. */
6539 linux_low_encode_raw (struct buffer
*buffer
, const gdb_byte
*data
,
6545 /* We use hex encoding - see common/rsp-low.h. */
6546 buffer_grow_str (buffer
, "<raw>\n");
6552 elem
[0] = tohex ((*data
>> 4) & 0xf);
6553 elem
[1] = tohex (*data
++ & 0xf);
6555 buffer_grow (buffer
, elem
, 2);
6558 buffer_grow_str (buffer
, "</raw>\n");
6561 /* See to_read_btrace target method. */
6564 linux_low_read_btrace (struct btrace_target_info
*tinfo
, struct buffer
*buffer
,
6567 struct btrace_data btrace
;
6568 struct btrace_block
*block
;
6569 enum btrace_error err
;
6572 btrace_data_init (&btrace
);
6574 err
= linux_read_btrace (&btrace
, tinfo
, type
);
6575 if (err
!= BTRACE_ERR_NONE
)
6577 if (err
== BTRACE_ERR_OVERFLOW
)
6578 buffer_grow_str0 (buffer
, "E.Overflow.");
6580 buffer_grow_str0 (buffer
, "E.Generic Error.");
6585 switch (btrace
.format
)
6587 case BTRACE_FORMAT_NONE
:
6588 buffer_grow_str0 (buffer
, "E.No Trace.");
6591 case BTRACE_FORMAT_BTS
:
6592 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6593 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
6596 VEC_iterate (btrace_block_s
, btrace
.variant
.bts
.blocks
, i
, block
);
6598 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6599 paddress (block
->begin
), paddress (block
->end
));
6601 buffer_grow_str0 (buffer
, "</btrace>\n");
6604 case BTRACE_FORMAT_PT
:
6605 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6606 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
6607 buffer_grow_str (buffer
, "<pt>\n");
6609 linux_low_encode_pt_config (buffer
, &btrace
.variant
.pt
.config
);
6611 linux_low_encode_raw (buffer
, btrace
.variant
.pt
.data
,
6612 btrace
.variant
.pt
.size
);
6614 buffer_grow_str (buffer
, "</pt>\n");
6615 buffer_grow_str0 (buffer
, "</btrace>\n");
6619 buffer_grow_str0 (buffer
, "E.Unsupported Trace Format.");
6623 btrace_data_fini (&btrace
);
6627 btrace_data_fini (&btrace
);
6631 /* See to_btrace_conf target method. */
6634 linux_low_btrace_conf (const struct btrace_target_info
*tinfo
,
6635 struct buffer
*buffer
)
6637 const struct btrace_config
*conf
;
6639 buffer_grow_str (buffer
, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6640 buffer_grow_str (buffer
, "<btrace-conf version=\"1.0\">\n");
6642 conf
= linux_btrace_conf (tinfo
);
6645 switch (conf
->format
)
6647 case BTRACE_FORMAT_NONE
:
6650 case BTRACE_FORMAT_BTS
:
6651 buffer_xml_printf (buffer
, "<bts");
6652 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->bts
.size
);
6653 buffer_xml_printf (buffer
, " />\n");
6656 case BTRACE_FORMAT_PT
:
6657 buffer_xml_printf (buffer
, "<pt");
6658 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->pt
.size
);
6659 buffer_xml_printf (buffer
, "/>\n");
6664 buffer_grow_str0 (buffer
, "</btrace-conf>\n");
6667 #endif /* HAVE_LINUX_BTRACE */
6669 /* See nat/linux-nat.h. */
6672 current_lwp_ptid (void)
6674 return ptid_of (current_thread
);
6677 static struct target_ops linux_target_ops
= {
6678 linux_create_inferior
,
6688 linux_fetch_registers
,
6689 linux_store_registers
,
6690 linux_prepare_to_access_memory
,
6691 linux_done_accessing_memory
,
6694 linux_look_up_symbols
,
6695 linux_request_interrupt
,
6697 linux_supports_z_point_type
,
6700 linux_stopped_by_sw_breakpoint
,
6701 linux_supports_stopped_by_sw_breakpoint
,
6702 linux_stopped_by_hw_breakpoint
,
6703 linux_supports_stopped_by_hw_breakpoint
,
6704 linux_supports_conditional_breakpoints
,
6705 linux_stopped_by_watchpoint
,
6706 linux_stopped_data_address
,
6707 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6708 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6709 && defined(PT_TEXT_END_ADDR)
6714 #ifdef USE_THREAD_DB
6715 thread_db_get_tls_address
,
6720 hostio_last_error_from_errno
,
6723 linux_supports_non_stop
,
6725 linux_start_non_stop
,
6726 linux_supports_multi_process
,
6727 linux_supports_fork_events
,
6728 linux_supports_vfork_events
,
6729 linux_handle_new_gdb_connection
,
6730 #ifdef USE_THREAD_DB
6731 thread_db_handle_monitor_command
,
6735 linux_common_core_of_thread
,
6737 linux_process_qsupported
,
6738 linux_supports_tracepoints
,
6741 linux_thread_stopped
,
6745 linux_stabilize_threads
,
6746 linux_install_fast_tracepoint_jump_pad
,
6748 linux_supports_disable_randomization
,
6749 linux_get_min_fast_tracepoint_insn_len
,
6750 linux_qxfer_libraries_svr4
,
6751 linux_supports_agent
,
6752 #ifdef HAVE_LINUX_BTRACE
6753 linux_supports_btrace
,
6754 linux_low_enable_btrace
,
6755 linux_low_disable_btrace
,
6756 linux_low_read_btrace
,
6757 linux_low_btrace_conf
,
6765 linux_supports_range_stepping
,
6766 linux_proc_pid_to_exec_file
,
6767 linux_mntns_open_cloexec
,
6769 linux_mntns_readlink
,
6773 linux_init_signals ()
6775 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6776 to find what the cancel signal actually is. */
6777 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6778 signal (__SIGRTMIN
+1, SIG_IGN
);
6782 #ifdef HAVE_LINUX_REGSETS
6784 initialize_regsets_info (struct regsets_info
*info
)
6786 for (info
->num_regsets
= 0;
6787 info
->regsets
[info
->num_regsets
].size
>= 0;
6788 info
->num_regsets
++)
6794 initialize_low (void)
6796 struct sigaction sigchld_action
;
6797 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
6798 set_target_ops (&linux_target_ops
);
6799 set_breakpoint_data (the_low_target
.breakpoint
,
6800 the_low_target
.breakpoint_len
);
6801 linux_init_signals ();
6802 linux_ptrace_init_warnings ();
6804 sigchld_action
.sa_handler
= sigchld_handler
;
6805 sigemptyset (&sigchld_action
.sa_mask
);
6806 sigchld_action
.sa_flags
= SA_RESTART
;
6807 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
6809 initialize_low_arch ();
6811 linux_check_ptrace_features ();