1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
34 #include <sys/ioctl.h>
37 #include <sys/syscall.h>
41 #include <sys/types.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
61 #include "nat/linux-namespaces.h"
71 /* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74 #if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77 #if defined(__mcoldfire__)
78 /* These are still undefined in 3.10 kernels. */
79 #define PT_TEXT_ADDR 49*4
80 #define PT_DATA_ADDR 50*4
81 #define PT_TEXT_END_ADDR 51*4
82 /* These are still undefined in 3.10 kernels. */
83 #elif defined(__TMS320C6X__)
84 #define PT_TEXT_ADDR (0x10000*4)
85 #define PT_DATA_ADDR (0x10004*4)
86 #define PT_TEXT_END_ADDR (0x10008*4)
90 #if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95 #define SUPPORTS_READ_OFFSETS
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "gdbsupport/btrace-common.h"
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
107 uint32_t a_type
; /* Entry type */
110 uint32_t a_val
; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
122 uint64_t a_type
; /* Entry type */
125 uint64_t a_val
; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset
= -1;
138 /* See nat/linux-nat.h. */
141 ptid_of_lwp (struct lwp_info
*lwp
)
143 return ptid_of (get_lwp_thread (lwp
));
146 /* See nat/linux-nat.h. */
149 lwp_set_arch_private_info (struct lwp_info
*lwp
,
150 struct arch_lwp_info
*info
)
152 lwp
->arch_private
= info
;
155 /* See nat/linux-nat.h. */
157 struct arch_lwp_info
*
158 lwp_arch_private_info (struct lwp_info
*lwp
)
160 return lwp
->arch_private
;
163 /* See nat/linux-nat.h. */
166 lwp_is_stopped (struct lwp_info
*lwp
)
171 /* See nat/linux-nat.h. */
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info
*lwp
)
176 return lwp
->stop_reason
;
179 /* See nat/linux-nat.h. */
182 lwp_is_stepping (struct lwp_info
*lwp
)
184 return lwp
->stepping
;
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
191 struct simple_pid_list
193 /* The process ID. */
196 /* The status as reported by waitpid. */
200 struct simple_pid_list
*next
;
202 static struct simple_pid_list
*stopped_pids
;
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
208 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
210 struct simple_pid_list
*new_pid
= XNEW (struct simple_pid_list
);
213 new_pid
->status
= status
;
214 new_pid
->next
= *listp
;
219 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
221 struct simple_pid_list
**p
;
223 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
224 if ((*p
)->pid
== pid
)
226 struct simple_pid_list
*next
= (*p
)->next
;
228 *statusp
= (*p
)->status
;
236 enum stopping_threads_kind
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS
,
241 /* Stopping threads. */
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
248 /* This is set while stop_all_lwps is in effect. */
249 static stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
251 /* FIXME make into a target method? */
252 int using_threads
= 1;
254 /* True if we're presently stabilizing threads (moving them out of
256 static int stabilizing_threads
;
258 static void unsuspend_all_lwps (struct lwp_info
*except
);
259 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
260 static int lwp_is_marked_dead (struct lwp_info
*lwp
);
261 static int kill_lwp (unsigned long lwpid
, int signo
);
262 static void enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
);
263 static int linux_low_ptrace_options (int attached
);
264 static int check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
);
266 /* When the event-loop is doing a step-over, this points at the thread
268 static ptid_t step_over_bkpt
;
271 linux_process_target::low_supports_breakpoints ()
277 linux_process_target::low_get_pc (regcache
*regcache
)
283 linux_process_target::low_set_pc (regcache
*regcache
, CORE_ADDR newpc
)
285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
288 std::vector
<CORE_ADDR
>
289 linux_process_target::low_get_next_pcs (regcache
*regcache
)
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
296 linux_process_target::low_decr_pc_after_break ()
301 /* True if LWP is stopped in its stepping range. */
304 lwp_in_step_range (struct lwp_info
*lwp
)
306 CORE_ADDR pc
= lwp
->stop_pc
;
308 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
311 /* The read/write ends of the pipe registered as waitable file in the
313 static int linux_event_pipe
[2] = { -1, -1 };
315 /* True if we're currently in async mode. */
316 #define target_is_async_p() (linux_event_pipe[0] != -1)
318 static void send_sigstop (struct lwp_info
*lwp
);
320 /* Return non-zero if HEADER is a 64-bit ELF file. */
323 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
325 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
326 && header
->e_ident
[EI_MAG1
] == ELFMAG1
327 && header
->e_ident
[EI_MAG2
] == ELFMAG2
328 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
330 *machine
= header
->e_machine
;
331 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
338 /* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
343 elf_64_file_p (const char *file
, unsigned int *machine
)
348 fd
= open (file
, O_RDONLY
);
352 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
359 return elf_64_header_p (&header
, machine
);
362 /* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
366 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
370 sprintf (file
, "/proc/%d/exe", pid
);
371 return elf_64_file_p (file
, machine
);
375 linux_process_target::delete_lwp (lwp_info
*lwp
)
377 struct thread_info
*thr
= get_lwp_thread (lwp
);
380 debug_printf ("deleting %ld\n", lwpid_of (thr
));
384 low_delete_thread (lwp
->arch_private
);
390 linux_process_target::low_delete_thread (arch_lwp_info
*info
)
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info
== nullptr);
398 linux_process_target::add_linux_process (int pid
, int attached
)
400 struct process_info
*proc
;
402 proc
= add_process (pid
, attached
);
403 proc
->priv
= XCNEW (struct process_info_private
);
405 proc
->priv
->arch_private
= low_new_process ();
411 linux_process_target::low_new_process ()
417 linux_process_target::low_delete_process (arch_process_info
*info
)
419 /* Default implementation must be overridden if architecture-specific
421 gdb_assert (info
== nullptr);
425 linux_process_target::low_new_fork (process_info
*parent
, process_info
*child
)
431 linux_process_target::arch_setup_thread (thread_info
*thread
)
433 scoped_restore_current_thread restore_thread
;
434 switch_to_thread (thread
);
440 linux_process_target::handle_extended_wait (lwp_info
**orig_event_lwp
,
443 client_state
&cs
= get_client_state ();
444 struct lwp_info
*event_lwp
= *orig_event_lwp
;
445 int event
= linux_ptrace_get_extended_event (wstat
);
446 struct thread_info
*event_thr
= get_lwp_thread (event_lwp
);
447 struct lwp_info
*new_lwp
;
449 gdb_assert (event_lwp
->waitstatus
.kind () == TARGET_WAITKIND_IGNORE
);
451 /* All extended events we currently use are mid-syscall. Only
452 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
453 you have to be using PTRACE_SEIZE to get that. */
454 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
456 if ((event
== PTRACE_EVENT_FORK
) || (event
== PTRACE_EVENT_VFORK
)
457 || (event
== PTRACE_EVENT_CLONE
))
460 unsigned long new_pid
;
463 /* Get the pid of the new lwp. */
464 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
467 /* If we haven't already seen the new PID stop, wait for it now. */
468 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
470 /* The new child has a pending SIGSTOP. We can't affect it until it
471 hits the SIGSTOP, but we're already attached. */
473 ret
= my_waitpid (new_pid
, &status
, __WALL
);
476 perror_with_name ("waiting for new child");
477 else if (ret
!= new_pid
)
478 warning ("wait returned unexpected PID %d", ret
);
479 else if (!WIFSTOPPED (status
))
480 warning ("wait returned unexpected status 0x%x", status
);
483 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
485 struct process_info
*parent_proc
;
486 struct process_info
*child_proc
;
487 struct lwp_info
*child_lwp
;
488 struct thread_info
*child_thr
;
490 ptid
= ptid_t (new_pid
, new_pid
);
494 debug_printf ("HEW: Got fork event from LWP %ld, "
496 ptid_of (event_thr
).lwp (),
500 /* Add the new process to the tables and clone the breakpoint
501 lists of the parent. We need to do this even if the new process
502 will be detached, since we will need the process object and the
503 breakpoints to remove any breakpoints from memory when we
504 detach, and the client side will access registers. */
505 child_proc
= add_linux_process (new_pid
, 0);
506 gdb_assert (child_proc
!= NULL
);
507 child_lwp
= add_lwp (ptid
);
508 gdb_assert (child_lwp
!= NULL
);
509 child_lwp
->stopped
= 1;
510 child_lwp
->must_set_ptrace_flags
= 1;
511 child_lwp
->status_pending_p
= 0;
512 child_thr
= get_lwp_thread (child_lwp
);
513 child_thr
->last_resume_kind
= resume_stop
;
514 child_thr
->last_status
.set_stopped (GDB_SIGNAL_0
);
516 /* If we're suspending all threads, leave this one suspended
517 too. If the fork/clone parent is stepping over a breakpoint,
518 all other threads have been suspended already. Leave the
519 child suspended too. */
520 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
521 || event_lwp
->bp_reinsert
!= 0)
524 debug_printf ("HEW: leaving child suspended\n");
525 child_lwp
->suspended
= 1;
528 parent_proc
= get_thread_process (event_thr
);
529 child_proc
->attached
= parent_proc
->attached
;
531 if (event_lwp
->bp_reinsert
!= 0
532 && supports_software_single_step ()
533 && event
== PTRACE_EVENT_VFORK
)
535 /* If we leave single-step breakpoints there, child will
536 hit it, so uninsert single-step breakpoints from parent
537 (and child). Once vfork child is done, reinsert
538 them back to parent. */
539 uninsert_single_step_breakpoints (event_thr
);
542 clone_all_breakpoints (child_thr
, event_thr
);
544 target_desc_up tdesc
= allocate_target_description ();
545 copy_target_description (tdesc
.get (), parent_proc
->tdesc
);
546 child_proc
->tdesc
= tdesc
.release ();
548 /* Clone arch-specific process data. */
549 low_new_fork (parent_proc
, child_proc
);
551 /* Save fork info in the parent thread. */
552 if (event
== PTRACE_EVENT_FORK
)
553 event_lwp
->waitstatus
.set_forked (ptid
);
554 else if (event
== PTRACE_EVENT_VFORK
)
555 event_lwp
->waitstatus
.set_vforked (ptid
);
557 /* The status_pending field contains bits denoting the
558 extended event, so when the pending event is handled,
559 the handler will look at lwp->waitstatus. */
560 event_lwp
->status_pending_p
= 1;
561 event_lwp
->status_pending
= wstat
;
563 /* Link the threads until the parent event is passed on to
565 event_lwp
->fork_relative
= child_lwp
;
566 child_lwp
->fork_relative
= event_lwp
;
568 /* If the parent thread is doing step-over with single-step
569 breakpoints, the list of single-step breakpoints are cloned
570 from the parent's. Remove them from the child process.
571 In case of vfork, we'll reinsert them back once vforked
573 if (event_lwp
->bp_reinsert
!= 0
574 && supports_software_single_step ())
576 /* The child process is forked and stopped, so it is safe
577 to access its memory without stopping all other threads
578 from other processes. */
579 delete_single_step_breakpoints (child_thr
);
581 gdb_assert (has_single_step_breakpoints (event_thr
));
582 gdb_assert (!has_single_step_breakpoints (child_thr
));
585 /* Report the event. */
590 debug_printf ("HEW: Got clone event "
591 "from LWP %ld, new child is LWP %ld\n",
592 lwpid_of (event_thr
), new_pid
);
594 ptid
= ptid_t (pid_of (event_thr
), new_pid
);
595 new_lwp
= add_lwp (ptid
);
597 /* Either we're going to immediately resume the new thread
598 or leave it stopped. resume_one_lwp is a nop if it
599 thinks the thread is currently running, so set this first
600 before calling resume_one_lwp. */
601 new_lwp
->stopped
= 1;
603 /* If we're suspending all threads, leave this one suspended
604 too. If the fork/clone parent is stepping over a breakpoint,
605 all other threads have been suspended already. Leave the
606 child suspended too. */
607 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
608 || event_lwp
->bp_reinsert
!= 0)
609 new_lwp
->suspended
= 1;
611 /* Normally we will get the pending SIGSTOP. But in some cases
612 we might get another signal delivered to the group first.
613 If we do get another signal, be sure not to lose it. */
614 if (WSTOPSIG (status
) != SIGSTOP
)
616 new_lwp
->stop_expected
= 1;
617 new_lwp
->status_pending_p
= 1;
618 new_lwp
->status_pending
= status
;
620 else if (cs
.report_thread_events
)
622 new_lwp
->waitstatus
.set_thread_created ();
623 new_lwp
->status_pending_p
= 1;
624 new_lwp
->status_pending
= status
;
628 thread_db_notice_clone (event_thr
, ptid
);
631 /* Don't report the event. */
634 else if (event
== PTRACE_EVENT_VFORK_DONE
)
636 event_lwp
->waitstatus
.set_vfork_done ();
638 if (event_lwp
->bp_reinsert
!= 0 && supports_software_single_step ())
640 reinsert_single_step_breakpoints (event_thr
);
642 gdb_assert (has_single_step_breakpoints (event_thr
));
645 /* Report the event. */
648 else if (event
== PTRACE_EVENT_EXEC
&& cs
.report_exec_events
)
650 struct process_info
*proc
;
651 std::vector
<int> syscalls_to_catch
;
657 debug_printf ("HEW: Got exec event from LWP %ld\n",
658 lwpid_of (event_thr
));
661 /* Get the event ptid. */
662 event_ptid
= ptid_of (event_thr
);
663 event_pid
= event_ptid
.pid ();
665 /* Save the syscall list from the execing process. */
666 proc
= get_thread_process (event_thr
);
667 syscalls_to_catch
= std::move (proc
->syscalls_to_catch
);
669 /* Delete the execing process and all its threads. */
671 switch_to_thread (nullptr);
673 /* Create a new process/lwp/thread. */
674 proc
= add_linux_process (event_pid
, 0);
675 event_lwp
= add_lwp (event_ptid
);
676 event_thr
= get_lwp_thread (event_lwp
);
677 gdb_assert (current_thread
== event_thr
);
678 arch_setup_thread (event_thr
);
680 /* Set the event status. */
681 event_lwp
->waitstatus
.set_execd
683 (linux_proc_pid_to_exec_file (lwpid_of (event_thr
))));
685 /* Mark the exec status as pending. */
686 event_lwp
->stopped
= 1;
687 event_lwp
->status_pending_p
= 1;
688 event_lwp
->status_pending
= wstat
;
689 event_thr
->last_resume_kind
= resume_continue
;
690 event_thr
->last_status
.set_ignore ();
692 /* Update syscall state in the new lwp, effectively mid-syscall too. */
693 event_lwp
->syscall_state
= TARGET_WAITKIND_SYSCALL_ENTRY
;
695 /* Restore the list to catch. Don't rely on the client, which is free
696 to avoid sending a new list when the architecture doesn't change.
697 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
698 proc
->syscalls_to_catch
= std::move (syscalls_to_catch
);
700 /* Report the event. */
701 *orig_event_lwp
= event_lwp
;
705 internal_error (__FILE__
, __LINE__
, _("unknown ptrace event %d"), event
);
709 linux_process_target::get_pc (lwp_info
*lwp
)
711 struct regcache
*regcache
;
714 if (!low_supports_breakpoints ())
717 scoped_restore_current_thread restore_thread
;
718 switch_to_thread (get_lwp_thread (lwp
));
720 regcache
= get_thread_regcache (current_thread
, 1);
721 pc
= low_get_pc (regcache
);
724 debug_printf ("pc is 0x%lx\n", (long) pc
);
730 linux_process_target::get_syscall_trapinfo (lwp_info
*lwp
, int *sysno
)
732 struct regcache
*regcache
;
734 scoped_restore_current_thread restore_thread
;
735 switch_to_thread (get_lwp_thread (lwp
));
737 regcache
= get_thread_regcache (current_thread
, 1);
738 low_get_syscall_trapinfo (regcache
, sysno
);
741 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno
);
745 linux_process_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
747 /* By default, report an unknown system call number. */
748 *sysno
= UNKNOWN_SYSCALL
;
752 linux_process_target::save_stop_reason (lwp_info
*lwp
)
755 CORE_ADDR sw_breakpoint_pc
;
756 #if USE_SIGTRAP_SIGINFO
760 if (!low_supports_breakpoints ())
764 sw_breakpoint_pc
= pc
- low_decr_pc_after_break ();
766 /* breakpoint_at reads from the current thread. */
767 scoped_restore_current_thread restore_thread
;
768 switch_to_thread (get_lwp_thread (lwp
));
770 #if USE_SIGTRAP_SIGINFO
771 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
772 (PTRACE_TYPE_ARG3
) 0, &siginfo
) == 0)
774 if (siginfo
.si_signo
== SIGTRAP
)
776 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
)
777 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
779 /* The si_code is ambiguous on this arch -- check debug
781 if (!check_stopped_by_watchpoint (lwp
))
782 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
784 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo
.si_code
))
786 /* If we determine the LWP stopped for a SW breakpoint,
787 trust it. Particularly don't check watchpoint
788 registers, because at least on s390, we'd find
789 stopped-by-watchpoint as long as there's a watchpoint
791 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
793 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo
.si_code
))
795 /* This can indicate either a hardware breakpoint or
796 hardware watchpoint. Check debug registers. */
797 if (!check_stopped_by_watchpoint (lwp
))
798 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
800 else if (siginfo
.si_code
== TRAP_TRACE
)
802 /* We may have single stepped an instruction that
803 triggered a watchpoint. In that case, on some
804 architectures (such as x86), instead of TRAP_HWBKPT,
805 si_code indicates TRAP_TRACE, and we need to check
806 the debug registers separately. */
807 if (!check_stopped_by_watchpoint (lwp
))
808 lwp
->stop_reason
= TARGET_STOPPED_BY_SINGLE_STEP
;
813 /* We may have just stepped a breakpoint instruction. E.g., in
814 non-stop mode, GDB first tells the thread A to step a range, and
815 then the user inserts a breakpoint inside the range. In that
816 case we need to report the breakpoint PC. */
817 if ((!lwp
->stepping
|| lwp
->stop_pc
== sw_breakpoint_pc
)
818 && low_breakpoint_at (sw_breakpoint_pc
))
819 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
821 if (hardware_breakpoint_inserted_here (pc
))
822 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
824 if (lwp
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
)
825 check_stopped_by_watchpoint (lwp
);
828 if (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
832 struct thread_info
*thr
= get_lwp_thread (lwp
);
834 debug_printf ("CSBB: %s stopped by software breakpoint\n",
835 target_pid_to_str (ptid_of (thr
)).c_str ());
838 /* Back up the PC if necessary. */
839 if (pc
!= sw_breakpoint_pc
)
841 struct regcache
*regcache
842 = get_thread_regcache (current_thread
, 1);
843 low_set_pc (regcache
, sw_breakpoint_pc
);
846 /* Update this so we record the correct stop PC below. */
847 pc
= sw_breakpoint_pc
;
849 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
)
853 struct thread_info
*thr
= get_lwp_thread (lwp
);
855 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
856 target_pid_to_str (ptid_of (thr
)).c_str ());
859 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
863 struct thread_info
*thr
= get_lwp_thread (lwp
);
865 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
866 target_pid_to_str (ptid_of (thr
)).c_str ());
869 else if (lwp
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
)
873 struct thread_info
*thr
= get_lwp_thread (lwp
);
875 debug_printf ("CSBB: %s stopped by trace\n",
876 target_pid_to_str (ptid_of (thr
)).c_str ());
885 linux_process_target::add_lwp (ptid_t ptid
)
887 lwp_info
*lwp
= new lwp_info
;
889 lwp
->thread
= add_thread (ptid
, lwp
);
891 low_new_thread (lwp
);
897 linux_process_target::low_new_thread (lwp_info
*info
)
902 /* Callback to be used when calling fork_inferior, responsible for
903 actually initiating the tracing of the inferior. */
908 if (ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0,
909 (PTRACE_TYPE_ARG4
) 0) < 0)
910 trace_start_error_with_name ("ptrace");
912 if (setpgid (0, 0) < 0)
913 trace_start_error_with_name ("setpgid");
915 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
916 stdout to stderr so that inferior i/o doesn't corrupt the connection.
917 Also, redirect stdin to /dev/null. */
918 if (remote_connection_is_stdio ())
921 trace_start_error_with_name ("close");
922 if (open ("/dev/null", O_RDONLY
) < 0)
923 trace_start_error_with_name ("open");
925 trace_start_error_with_name ("dup2");
926 if (write (2, "stdin/stdout redirected\n",
927 sizeof ("stdin/stdout redirected\n") - 1) < 0)
929 /* Errors ignored. */;
934 /* Start an inferior process and returns its pid.
935 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
936 are its arguments. */
939 linux_process_target::create_inferior (const char *program
,
940 const std::vector
<char *> &program_args
)
942 client_state
&cs
= get_client_state ();
943 struct lwp_info
*new_lwp
;
948 maybe_disable_address_space_randomization restore_personality
949 (cs
.disable_randomization
);
950 std::string str_program_args
= construct_inferior_arguments (program_args
);
952 pid
= fork_inferior (program
,
953 str_program_args
.c_str (),
954 get_environ ()->envp (), linux_ptrace_fun
,
955 NULL
, NULL
, NULL
, NULL
);
958 add_linux_process (pid
, 0);
960 ptid
= ptid_t (pid
, pid
);
961 new_lwp
= add_lwp (ptid
);
962 new_lwp
->must_set_ptrace_flags
= 1;
964 post_fork_inferior (pid
, program
);
969 /* Implement the post_create_inferior target_ops method. */
972 linux_process_target::post_create_inferior ()
974 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
978 if (lwp
->must_set_ptrace_flags
)
980 struct process_info
*proc
= current_process ();
981 int options
= linux_low_ptrace_options (proc
->attached
);
983 linux_enable_event_reporting (lwpid_of (current_thread
), options
);
984 lwp
->must_set_ptrace_flags
= 0;
989 linux_process_target::attach_lwp (ptid_t ptid
)
991 struct lwp_info
*new_lwp
;
992 int lwpid
= ptid
.lwp ();
994 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
998 new_lwp
= add_lwp (ptid
);
1000 /* We need to wait for SIGSTOP before being able to make the next
1001 ptrace call on this LWP. */
1002 new_lwp
->must_set_ptrace_flags
= 1;
1004 if (linux_proc_pid_is_stopped (lwpid
))
1007 debug_printf ("Attached to a stopped process\n");
1009 /* The process is definitely stopped. It is in a job control
1010 stop, unless the kernel predates the TASK_STOPPED /
1011 TASK_TRACED distinction, in which case it might be in a
1012 ptrace stop. Make sure it is in a ptrace stop; from there we
1013 can kill it, signal it, et cetera.
1015 First make sure there is a pending SIGSTOP. Since we are
1016 already attached, the process can not transition from stopped
1017 to running without a PTRACE_CONT; so we know this signal will
1018 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1019 probably already in the queue (unless this kernel is old
1020 enough to use TASK_STOPPED for ptrace stops); but since
1021 SIGSTOP is not an RT signal, it can only be queued once. */
1022 kill_lwp (lwpid
, SIGSTOP
);
1024 /* Finally, resume the stopped process. This will deliver the
1025 SIGSTOP (or a higher priority signal, just like normal
1026 PTRACE_ATTACH), which we'll catch later on. */
1027 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1030 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1031 brings it to a halt.
1033 There are several cases to consider here:
1035 1) gdbserver has already attached to the process and is being notified
1036 of a new thread that is being created.
1037 In this case we should ignore that SIGSTOP and resume the
1038 process. This is handled below by setting stop_expected = 1,
1039 and the fact that add_thread sets last_resume_kind ==
1042 2) This is the first thread (the process thread), and we're attaching
1043 to it via attach_inferior.
1044 In this case we want the process thread to stop.
1045 This is handled by having linux_attach set last_resume_kind ==
1046 resume_stop after we return.
1048 If the pid we are attaching to is also the tgid, we attach to and
1049 stop all the existing threads. Otherwise, we attach to pid and
1050 ignore any other threads in the same group as this pid.
1052 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1054 In this case we want the thread to stop.
1055 FIXME: This case is currently not properly handled.
1056 We should wait for the SIGSTOP but don't. Things work apparently
1057 because enough time passes between when we ptrace (ATTACH) and when
1058 gdb makes the next ptrace call on the thread.
1060 On the other hand, if we are currently trying to stop all threads, we
1061 should treat the new thread as if we had sent it a SIGSTOP. This works
1062 because we are guaranteed that the add_lwp call above added us to the
1063 end of the list, and so the new thread has not yet reached
1064 wait_for_sigstop (but will). */
1065 new_lwp
->stop_expected
= 1;
1070 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1071 already attached. Returns true if a new LWP is found, false
1075 attach_proc_task_lwp_callback (ptid_t ptid
)
1077 /* Is this a new thread? */
1078 if (find_thread_ptid (ptid
) == NULL
)
1080 int lwpid
= ptid
.lwp ();
1084 debug_printf ("Found new lwp %d\n", lwpid
);
1086 err
= the_linux_target
->attach_lwp (ptid
);
1088 /* Be quiet if we simply raced with the thread exiting. EPERM
1089 is returned if the thread's task still exists, and is marked
1090 as exited or zombie, as well as other conditions, so in that
1091 case, confirm the status in /proc/PID/status. */
1093 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
1097 debug_printf ("Cannot attach to lwp %d: "
1098 "thread is gone (%d: %s)\n",
1099 lwpid
, err
, safe_strerror (err
));
1105 = linux_ptrace_attach_fail_reason_string (ptid
, err
);
1107 warning (_("Cannot attach to lwp %d: %s"), lwpid
, reason
.c_str ());
1115 static void async_file_mark (void);
1117 /* Attach to PID. If PID is the tgid, attach to it and all
1121 linux_process_target::attach (unsigned long pid
)
1123 struct process_info
*proc
;
1124 struct thread_info
*initial_thread
;
1125 ptid_t ptid
= ptid_t (pid
, pid
);
1128 proc
= add_linux_process (pid
, 1);
1130 /* Attach to PID. We will check for other threads
1132 err
= attach_lwp (ptid
);
1135 remove_process (proc
);
1137 std::string reason
= linux_ptrace_attach_fail_reason_string (ptid
, err
);
1138 error ("Cannot attach to process %ld: %s", pid
, reason
.c_str ());
1141 /* Don't ignore the initial SIGSTOP if we just attached to this
1142 process. It will be collected by wait shortly. */
1143 initial_thread
= find_thread_ptid (ptid_t (pid
, pid
));
1144 initial_thread
->last_resume_kind
= resume_stop
;
1146 /* We must attach to every LWP. If /proc is mounted, use that to
1147 find them now. On the one hand, the inferior may be using raw
1148 clone instead of using pthreads. On the other hand, even if it
1149 is using pthreads, GDB may not be connected yet (thread_db needs
1150 to do symbol lookups, through qSymbol). Also, thread_db walks
1151 structures in the inferior's address space to find the list of
1152 threads/LWPs, and those structures may well be corrupted. Note
1153 that once thread_db is loaded, we'll still use it to list threads
1154 and associate pthread info with each LWP. */
1155 linux_proc_attach_tgid_threads (pid
, attach_proc_task_lwp_callback
);
1157 /* GDB will shortly read the xml target description for this
1158 process, to figure out the process' architecture. But the target
1159 description is only filled in when the first process/thread in
1160 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1161 that now, otherwise, if GDB is fast enough, it could read the
1162 target description _before_ that initial stop. */
1165 struct lwp_info
*lwp
;
1167 ptid_t pid_ptid
= ptid_t (pid
);
1169 lwpid
= wait_for_event_filtered (pid_ptid
, pid_ptid
, &wstat
, __WALL
);
1170 gdb_assert (lwpid
> 0);
1172 lwp
= find_lwp_pid (ptid_t (lwpid
));
1174 if (!WIFSTOPPED (wstat
) || WSTOPSIG (wstat
) != SIGSTOP
)
1176 lwp
->status_pending_p
= 1;
1177 lwp
->status_pending
= wstat
;
1180 initial_thread
->last_resume_kind
= resume_continue
;
1184 gdb_assert (proc
->tdesc
!= NULL
);
1191 last_thread_of_process_p (int pid
)
1193 bool seen_one
= false;
1195 thread_info
*thread
= find_thread (pid
, [&] (thread_info
*thr_arg
)
1199 /* This is the first thread of this process we see. */
1205 /* This is the second thread of this process we see. */
1210 return thread
== NULL
;
1216 linux_kill_one_lwp (struct lwp_info
*lwp
)
1218 struct thread_info
*thr
= get_lwp_thread (lwp
);
1219 int pid
= lwpid_of (thr
);
1221 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1222 there is no signal context, and ptrace(PTRACE_KILL) (or
1223 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1224 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1225 alternative is to kill with SIGKILL. We only need one SIGKILL
1226 per process, not one for each thread. But since we still support
1227 support debugging programs using raw clone without CLONE_THREAD,
1228 we send one for each thread. For years, we used PTRACE_KILL
1229 only, so we're being a bit paranoid about some old kernels where
1230 PTRACE_KILL might work better (dubious if there are any such, but
1231 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1232 second, and so we're fine everywhere. */
1235 kill_lwp (pid
, SIGKILL
);
1238 int save_errno
= errno
;
1240 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1241 target_pid_to_str (ptid_of (thr
)).c_str (),
1242 save_errno
? safe_strerror (save_errno
) : "OK");
1246 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1249 int save_errno
= errno
;
1251 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1252 target_pid_to_str (ptid_of (thr
)).c_str (),
1253 save_errno
? safe_strerror (save_errno
) : "OK");
1257 /* Kill LWP and wait for it to die. */
1260 kill_wait_lwp (struct lwp_info
*lwp
)
1262 struct thread_info
*thr
= get_lwp_thread (lwp
);
1263 int pid
= ptid_of (thr
).pid ();
1264 int lwpid
= ptid_of (thr
).lwp ();
1269 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
1273 linux_kill_one_lwp (lwp
);
1275 /* Make sure it died. Notes:
1277 - The loop is most likely unnecessary.
1279 - We don't use wait_for_event as that could delete lwps
1280 while we're iterating over them. We're not interested in
1281 any pending status at this point, only in making sure all
1282 wait status on the kernel side are collected until the
1285 - We don't use __WALL here as the __WALL emulation relies on
1286 SIGCHLD, and killing a stopped process doesn't generate
1287 one, nor an exit status.
1289 res
= my_waitpid (lwpid
, &wstat
, 0);
1290 if (res
== -1 && errno
== ECHILD
)
1291 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
1292 } while (res
> 0 && WIFSTOPPED (wstat
));
1294 /* Even if it was stopped, the child may have already disappeared.
1295 E.g., if it was killed by SIGKILL. */
1296 if (res
< 0 && errno
!= ECHILD
)
1297 perror_with_name ("kill_wait_lwp");
1300 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1301 except the leader. */
1304 kill_one_lwp_callback (thread_info
*thread
, int pid
)
1306 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1308 /* We avoid killing the first thread here, because of a Linux kernel (at
1309 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1310 the children get a chance to be reaped, it will remain a zombie
1313 if (lwpid_of (thread
) == pid
)
1316 debug_printf ("lkop: is last of process %s\n",
1317 target_pid_to_str (thread
->id
).c_str ());
1321 kill_wait_lwp (lwp
);
1325 linux_process_target::kill (process_info
*process
)
1327 int pid
= process
->pid
;
1329 /* If we're killing a running inferior, make sure it is stopped
1330 first, as PTRACE_KILL will not work otherwise. */
1331 stop_all_lwps (0, NULL
);
1333 for_each_thread (pid
, [&] (thread_info
*thread
)
1335 kill_one_lwp_callback (thread
, pid
);
1338 /* See the comment in linux_kill_one_lwp. We did not kill the first
1339 thread in the list, so do so now. */
1340 lwp_info
*lwp
= find_lwp_pid (ptid_t (pid
));
1345 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1349 kill_wait_lwp (lwp
);
1353 /* Since we presently can only stop all lwps of all processes, we
1354 need to unstop lwps of other processes. */
1355 unstop_all_lwps (0, NULL
);
1359 /* Get pending signal of THREAD, for detaching purposes. This is the
1360 signal the thread last stopped for, which we need to deliver to the
1361 thread when detaching, otherwise, it'd be suppressed/lost. */
1364 get_detach_signal (struct thread_info
*thread
)
1366 client_state
&cs
= get_client_state ();
1367 enum gdb_signal signo
= GDB_SIGNAL_0
;
1369 struct lwp_info
*lp
= get_thread_lwp (thread
);
1371 if (lp
->status_pending_p
)
1372 status
= lp
->status_pending
;
1375 /* If the thread had been suspended by gdbserver, and it stopped
1376 cleanly, then it'll have stopped with SIGSTOP. But we don't
1377 want to deliver that SIGSTOP. */
1378 if (thread
->last_status
.kind () != TARGET_WAITKIND_STOPPED
1379 || thread
->last_status
.sig () == GDB_SIGNAL_0
)
1382 /* Otherwise, we may need to deliver the signal we
1384 status
= lp
->last_status
;
1387 if (!WIFSTOPPED (status
))
1390 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1391 target_pid_to_str (ptid_of (thread
)).c_str ());
1395 /* Extended wait statuses aren't real SIGTRAPs. */
1396 if (WSTOPSIG (status
) == SIGTRAP
&& linux_is_extended_waitstatus (status
))
1399 debug_printf ("GPS: lwp %s had stopped with extended "
1400 "status: no pending signal\n",
1401 target_pid_to_str (ptid_of (thread
)).c_str ());
1405 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1407 if (cs
.program_signals_p
&& !cs
.program_signals
[signo
])
1410 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1411 target_pid_to_str (ptid_of (thread
)).c_str (),
1412 gdb_signal_to_string (signo
));
1415 else if (!cs
.program_signals_p
1416 /* If we have no way to know which signals GDB does not
1417 want to have passed to the program, assume
1418 SIGTRAP/SIGINT, which is GDB's default. */
1419 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1422 debug_printf ("GPS: lwp %s had signal %s, "
1423 "but we don't know if we should pass it. "
1424 "Default to not.\n",
1425 target_pid_to_str (ptid_of (thread
)).c_str (),
1426 gdb_signal_to_string (signo
));
1432 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1433 target_pid_to_str (ptid_of (thread
)).c_str (),
1434 gdb_signal_to_string (signo
));
1436 return WSTOPSIG (status
);
1441 linux_process_target::detach_one_lwp (lwp_info
*lwp
)
1443 struct thread_info
*thread
= get_lwp_thread (lwp
);
1447 /* If there is a pending SIGSTOP, get rid of it. */
1448 if (lwp
->stop_expected
)
1451 debug_printf ("Sending SIGCONT to %s\n",
1452 target_pid_to_str (ptid_of (thread
)).c_str ());
1454 kill_lwp (lwpid_of (thread
), SIGCONT
);
1455 lwp
->stop_expected
= 0;
1458 /* Pass on any pending signal for this thread. */
1459 sig
= get_detach_signal (thread
);
1461 /* Preparing to resume may try to write registers, and fail if the
1462 lwp is zombie. If that happens, ignore the error. We'll handle
1463 it below, when detach fails with ESRCH. */
1466 /* Flush any pending changes to the process's registers. */
1467 regcache_invalidate_thread (thread
);
1469 /* Finally, let it resume. */
1470 low_prepare_to_resume (lwp
);
1472 catch (const gdb_exception_error
&ex
)
1474 if (!check_ptrace_stopped_lwp_gone (lwp
))
1478 lwpid
= lwpid_of (thread
);
1479 if (ptrace (PTRACE_DETACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0,
1480 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1482 int save_errno
= errno
;
1484 /* We know the thread exists, so ESRCH must mean the lwp is
1485 zombie. This can happen if one of the already-detached
1486 threads exits the whole thread group. In that case we're
1487 still attached, and must reap the lwp. */
1488 if (save_errno
== ESRCH
)
1492 ret
= my_waitpid (lwpid
, &status
, __WALL
);
1495 warning (_("Couldn't reap LWP %d while detaching: %s"),
1496 lwpid
, safe_strerror (errno
));
1498 else if (!WIFEXITED (status
) && !WIFSIGNALED (status
))
1500 warning (_("Reaping LWP %d while detaching "
1501 "returned unexpected status 0x%x"),
1507 error (_("Can't detach %s: %s"),
1508 target_pid_to_str (ptid_of (thread
)).c_str (),
1509 safe_strerror (save_errno
));
1512 else if (debug_threads
)
1514 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1515 target_pid_to_str (ptid_of (thread
)).c_str (),
1523 linux_process_target::detach (process_info
*process
)
1525 struct lwp_info
*main_lwp
;
1527 /* As there's a step over already in progress, let it finish first,
1528 otherwise nesting a stabilize_threads operation on top gets real
1530 complete_ongoing_step_over ();
1532 /* Stop all threads before detaching. First, ptrace requires that
1533 the thread is stopped to successfully detach. Second, thread_db
1534 may need to uninstall thread event breakpoints from memory, which
1535 only works with a stopped process anyway. */
1536 stop_all_lwps (0, NULL
);
1538 #ifdef USE_THREAD_DB
1539 thread_db_detach (process
);
1542 /* Stabilize threads (move out of jump pads). */
1543 target_stabilize_threads ();
1545 /* Detach from the clone lwps first. If the thread group exits just
1546 while we're detaching, we must reap the clone lwps before we're
1547 able to reap the leader. */
1548 for_each_thread (process
->pid
, [this] (thread_info
*thread
)
1550 /* We don't actually detach from the thread group leader just yet.
1551 If the thread group exits, we must reap the zombie clone lwps
1552 before we're able to reap the leader. */
1553 if (thread
->id
.pid () == thread
->id
.lwp ())
1556 lwp_info
*lwp
= get_thread_lwp (thread
);
1557 detach_one_lwp (lwp
);
1560 main_lwp
= find_lwp_pid (ptid_t (process
->pid
));
1561 detach_one_lwp (main_lwp
);
1565 /* Since we presently can only stop all lwps of all processes, we
1566 need to unstop lwps of other processes. */
1567 unstop_all_lwps (0, NULL
);
1571 /* Remove all LWPs that belong to process PROC from the lwp list. */
1574 linux_process_target::mourn (process_info
*process
)
1576 struct process_info_private
*priv
;
1578 #ifdef USE_THREAD_DB
1579 thread_db_mourn (process
);
1582 for_each_thread (process
->pid
, [this] (thread_info
*thread
)
1584 delete_lwp (get_thread_lwp (thread
));
1587 /* Freeing all private data. */
1588 priv
= process
->priv
;
1589 low_delete_process (priv
->arch_private
);
1591 process
->priv
= NULL
;
1593 remove_process (process
);
1597 linux_process_target::join (int pid
)
1602 ret
= my_waitpid (pid
, &status
, 0);
1603 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1605 } while (ret
!= -1 || errno
!= ECHILD
);
1608 /* Return true if the given thread is still alive. */
1611 linux_process_target::thread_alive (ptid_t ptid
)
1613 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1615 /* We assume we always know if a thread exits. If a whole process
1616 exited but we still haven't been able to report it to GDB, we'll
1617 hold on to the last lwp of the dead process. */
1619 return !lwp_is_marked_dead (lwp
);
1625 linux_process_target::thread_still_has_status_pending (thread_info
*thread
)
1627 struct lwp_info
*lp
= get_thread_lwp (thread
);
1629 if (!lp
->status_pending_p
)
1632 if (thread
->last_resume_kind
!= resume_stop
1633 && (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1634 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
1639 gdb_assert (lp
->last_status
!= 0);
1643 scoped_restore_current_thread restore_thread
;
1644 switch_to_thread (thread
);
1646 if (pc
!= lp
->stop_pc
)
1649 debug_printf ("PC of %ld changed\n",
1654 #if !USE_SIGTRAP_SIGINFO
1655 else if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1656 && !low_breakpoint_at (pc
))
1659 debug_printf ("previous SW breakpoint of %ld gone\n",
1663 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
1664 && !hardware_breakpoint_inserted_here (pc
))
1667 debug_printf ("previous HW breakpoint of %ld gone\n",
1676 debug_printf ("discarding pending breakpoint status\n");
1677 lp
->status_pending_p
= 0;
1685 /* Returns true if LWP is resumed from the client's perspective. */
1688 lwp_resumed (struct lwp_info
*lwp
)
1690 struct thread_info
*thread
= get_lwp_thread (lwp
);
1692 if (thread
->last_resume_kind
!= resume_stop
)
1695 /* Did gdb send us a `vCont;t', but we haven't reported the
1696 corresponding stop to gdb yet? If so, the thread is still
1697 resumed/running from gdb's perspective. */
1698 if (thread
->last_resume_kind
== resume_stop
1699 && thread
->last_status
.kind () == TARGET_WAITKIND_IGNORE
)
1706 linux_process_target::status_pending_p_callback (thread_info
*thread
,
1709 struct lwp_info
*lp
= get_thread_lwp (thread
);
1711 /* Check if we're only interested in events from a specific process
1712 or a specific LWP. */
1713 if (!thread
->id
.matches (ptid
))
1716 if (!lwp_resumed (lp
))
1719 if (lp
->status_pending_p
1720 && !thread_still_has_status_pending (thread
))
1722 resume_one_lwp (lp
, lp
->stepping
, GDB_SIGNAL_0
, NULL
);
1726 return lp
->status_pending_p
;
1730 find_lwp_pid (ptid_t ptid
)
1732 thread_info
*thread
= find_thread ([&] (thread_info
*thr_arg
)
1734 int lwp
= ptid
.lwp () != 0 ? ptid
.lwp () : ptid
.pid ();
1735 return thr_arg
->id
.lwp () == lwp
;
1741 return get_thread_lwp (thread
);
1744 /* Return the number of known LWPs in the tgid given by PID. */
1751 for_each_thread (pid
, [&] (thread_info
*thread
)
1759 /* See nat/linux-nat.h. */
1762 iterate_over_lwps (ptid_t filter
,
1763 gdb::function_view
<iterate_over_lwps_ftype
> callback
)
1765 thread_info
*thread
= find_thread (filter
, [&] (thread_info
*thr_arg
)
1767 lwp_info
*lwp
= get_thread_lwp (thr_arg
);
1769 return callback (lwp
);
1775 return get_thread_lwp (thread
);
1779 linux_process_target::check_zombie_leaders ()
1781 for_each_process ([this] (process_info
*proc
) {
1782 pid_t leader_pid
= pid_of (proc
);
1783 struct lwp_info
*leader_lp
;
1785 leader_lp
= find_lwp_pid (ptid_t (leader_pid
));
1788 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1789 "num_lwps=%d, zombie=%d\n",
1790 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1791 linux_proc_pid_is_zombie (leader_pid
));
1793 if (leader_lp
!= NULL
&& !leader_lp
->stopped
1794 /* Check if there are other threads in the group, as we may
1795 have raced with the inferior simply exiting. */
1796 && !last_thread_of_process_p (leader_pid
)
1797 && linux_proc_pid_is_zombie (leader_pid
))
1799 /* A leader zombie can mean one of two things:
1801 - It exited, and there's an exit status pending
1802 available, or only the leader exited (not the whole
1803 program). In the latter case, we can't waitpid the
1804 leader's exit status until all other threads are gone.
1806 - There are 3 or more threads in the group, and a thread
1807 other than the leader exec'd. On an exec, the Linux
1808 kernel destroys all other threads (except the execing
1809 one) in the thread group, and resets the execing thread's
1810 tid to the tgid. No exit notification is sent for the
1811 execing thread -- from the ptracer's perspective, it
1812 appears as though the execing thread just vanishes.
1813 Until we reap all other threads except the leader and the
1814 execing thread, the leader will be zombie, and the
1815 execing thread will be in `D (disc sleep)'. As soon as
1816 all other threads are reaped, the execing thread changes
1817 it's tid to the tgid, and the previous (zombie) leader
1818 vanishes, giving place to the "new" leader. We could try
1819 distinguishing the exit and exec cases, by waiting once
1820 more, and seeing if something comes out, but it doesn't
1821 sound useful. The previous leader _does_ go away, and
1822 we'll re-add the new one once we see the exec event
1823 (which is just the same as what would happen if the
1824 previous leader did exit voluntarily before some other
1828 debug_printf ("CZL: Thread group leader %d zombie "
1829 "(it exited, or another thread execd).\n",
1832 delete_lwp (leader_lp
);
1837 /* Callback for `find_thread'. Returns the first LWP that is not
1841 not_stopped_callback (thread_info
*thread
, ptid_t filter
)
1843 if (!thread
->id
.matches (filter
))
1846 lwp_info
*lwp
= get_thread_lwp (thread
);
1848 return !lwp
->stopped
;
1851 /* Increment LWP's suspend count. */
1854 lwp_suspended_inc (struct lwp_info
*lwp
)
1858 if (debug_threads
&& lwp
->suspended
> 4)
1860 struct thread_info
*thread
= get_lwp_thread (lwp
);
1862 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1863 " suspended=%d\n", lwpid_of (thread
), lwp
->suspended
);
1867 /* Decrement LWP's suspend count. */
1870 lwp_suspended_decr (struct lwp_info
*lwp
)
1874 if (lwp
->suspended
< 0)
1876 struct thread_info
*thread
= get_lwp_thread (lwp
);
1878 internal_error (__FILE__
, __LINE__
,
1879 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread
),
1884 /* This function should only be called if the LWP got a SIGTRAP.
1886 Handle any tracepoint steps or hits. Return true if a tracepoint
1887 event was handled, 0 otherwise. */
1890 handle_tracepoints (struct lwp_info
*lwp
)
1892 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1893 int tpoint_related_event
= 0;
1895 gdb_assert (lwp
->suspended
== 0);
1897 /* If this tracepoint hit causes a tracing stop, we'll immediately
1898 uninsert tracepoints. To do this, we temporarily pause all
1899 threads, unpatch away, and then unpause threads. We need to make
1900 sure the unpausing doesn't resume LWP too. */
1901 lwp_suspended_inc (lwp
);
1903 /* And we need to be sure that any all-threads-stopping doesn't try
1904 to move threads out of the jump pads, as it could deadlock the
1905 inferior (LWP could be in the jump pad, maybe even holding the
1908 /* Do any necessary step collect actions. */
1909 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1911 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1913 /* See if we just hit a tracepoint and do its main collect
1915 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1917 lwp_suspended_decr (lwp
);
1919 gdb_assert (lwp
->suspended
== 0);
1920 gdb_assert (!stabilizing_threads
1921 || (lwp
->collecting_fast_tracepoint
1922 != fast_tpoint_collect_result::not_collecting
));
1924 if (tpoint_related_event
)
1927 debug_printf ("got a tracepoint event\n");
1934 fast_tpoint_collect_result
1935 linux_process_target::linux_fast_tracepoint_collecting
1936 (lwp_info
*lwp
, fast_tpoint_collect_status
*status
)
1938 CORE_ADDR thread_area
;
1939 struct thread_info
*thread
= get_lwp_thread (lwp
);
1941 /* Get the thread area address. This is used to recognize which
1942 thread is which when tracing with the in-process agent library.
1943 We don't read anything from the address, and treat it as opaque;
1944 it's the address itself that we assume is unique per-thread. */
1945 if (low_get_thread_area (lwpid_of (thread
), &thread_area
) == -1)
1946 return fast_tpoint_collect_result::not_collecting
;
1948 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
1952 linux_process_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
1958 linux_process_target::maybe_move_out_of_jump_pad (lwp_info
*lwp
, int *wstat
)
1960 scoped_restore_current_thread restore_thread
;
1961 switch_to_thread (get_lwp_thread (lwp
));
1964 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
1965 && supports_fast_tracepoints ()
1966 && agent_loaded_p ())
1968 struct fast_tpoint_collect_status status
;
1971 debug_printf ("Checking whether LWP %ld needs to move out of the "
1973 lwpid_of (current_thread
));
1975 fast_tpoint_collect_result r
1976 = linux_fast_tracepoint_collecting (lwp
, &status
);
1979 || (WSTOPSIG (*wstat
) != SIGILL
1980 && WSTOPSIG (*wstat
) != SIGFPE
1981 && WSTOPSIG (*wstat
) != SIGSEGV
1982 && WSTOPSIG (*wstat
) != SIGBUS
))
1984 lwp
->collecting_fast_tracepoint
= r
;
1986 if (r
!= fast_tpoint_collect_result::not_collecting
)
1988 if (r
== fast_tpoint_collect_result::before_insn
1989 && lwp
->exit_jump_pad_bkpt
== NULL
)
1991 /* Haven't executed the original instruction yet.
1992 Set breakpoint there, and wait till it's hit,
1993 then single-step until exiting the jump pad. */
1994 lwp
->exit_jump_pad_bkpt
1995 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
1999 debug_printf ("Checking whether LWP %ld needs to move out of "
2000 "the jump pad...it does\n",
2001 lwpid_of (current_thread
));
2008 /* If we get a synchronous signal while collecting, *and*
2009 while executing the (relocated) original instruction,
2010 reset the PC to point at the tpoint address, before
2011 reporting to GDB. Otherwise, it's an IPA lib bug: just
2012 report the signal to GDB, and pray for the best. */
2014 lwp
->collecting_fast_tracepoint
2015 = fast_tpoint_collect_result::not_collecting
;
2017 if (r
!= fast_tpoint_collect_result::not_collecting
2018 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
2019 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
2022 struct regcache
*regcache
;
2024 /* The si_addr on a few signals references the address
2025 of the faulting instruction. Adjust that as
2027 if ((WSTOPSIG (*wstat
) == SIGILL
2028 || WSTOPSIG (*wstat
) == SIGFPE
2029 || WSTOPSIG (*wstat
) == SIGBUS
2030 || WSTOPSIG (*wstat
) == SIGSEGV
)
2031 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
2032 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
2033 /* Final check just to make sure we don't clobber
2034 the siginfo of non-kernel-sent signals. */
2035 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
2037 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
2038 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_thread
),
2039 (PTRACE_TYPE_ARG3
) 0, &info
);
2042 regcache
= get_thread_regcache (current_thread
, 1);
2043 low_set_pc (regcache
, status
.tpoint_addr
);
2044 lwp
->stop_pc
= status
.tpoint_addr
;
2046 /* Cancel any fast tracepoint lock this thread was
2048 force_unlock_trace_buffer ();
2051 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
2054 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2055 "stopping all threads momentarily.\n");
2057 stop_all_lwps (1, lwp
);
2059 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
2060 lwp
->exit_jump_pad_bkpt
= NULL
;
2062 unstop_all_lwps (1, lwp
);
2064 gdb_assert (lwp
->suspended
>= 0);
2070 debug_printf ("Checking whether LWP %ld needs to move out of the "
2072 lwpid_of (current_thread
));
2077 /* Enqueue one signal in the "signals to report later when out of the
2081 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2083 struct thread_info
*thread
= get_lwp_thread (lwp
);
2086 debug_printf ("Deferring signal %d for LWP %ld.\n",
2087 WSTOPSIG (*wstat
), lwpid_of (thread
));
2091 for (const auto &sig
: lwp
->pending_signals_to_report
)
2092 debug_printf (" Already queued %d\n",
2095 debug_printf (" (no more currently queued signals)\n");
2098 /* Don't enqueue non-RT signals if they are already in the deferred
2099 queue. (SIGSTOP being the easiest signal to see ending up here
2101 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
2103 for (const auto &sig
: lwp
->pending_signals_to_report
)
2105 if (sig
.signal
== WSTOPSIG (*wstat
))
2108 debug_printf ("Not requeuing already queued non-RT signal %d"
2117 lwp
->pending_signals_to_report
.emplace_back (WSTOPSIG (*wstat
));
2119 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2120 &lwp
->pending_signals_to_report
.back ().info
);
2123 /* Dequeue one signal from the "signals to report later when out of
2124 the jump pad" list. */
2127 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
2129 struct thread_info
*thread
= get_lwp_thread (lwp
);
2131 if (!lwp
->pending_signals_to_report
.empty ())
2133 const pending_signal
&p_sig
= lwp
->pending_signals_to_report
.front ();
2135 *wstat
= W_STOPCODE (p_sig
.signal
);
2136 if (p_sig
.info
.si_signo
!= 0)
2137 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
2140 lwp
->pending_signals_to_report
.pop_front ();
2143 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2144 WSTOPSIG (*wstat
), lwpid_of (thread
));
2148 for (const auto &sig
: lwp
->pending_signals_to_report
)
2149 debug_printf (" Still queued %d\n",
2152 debug_printf (" (no more queued signals)\n");
2162 linux_process_target::check_stopped_by_watchpoint (lwp_info
*child
)
2164 scoped_restore_current_thread restore_thread
;
2165 switch_to_thread (get_lwp_thread (child
));
2167 if (low_stopped_by_watchpoint ())
2169 child
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2170 child
->stopped_data_address
= low_stopped_data_address ();
2173 return child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2177 linux_process_target::low_stopped_by_watchpoint ()
2183 linux_process_target::low_stopped_data_address ()
2188 /* Return the ptrace options that we want to try to enable. */
2191 linux_low_ptrace_options (int attached
)
2193 client_state
&cs
= get_client_state ();
2197 options
|= PTRACE_O_EXITKILL
;
2199 if (cs
.report_fork_events
)
2200 options
|= PTRACE_O_TRACEFORK
;
2202 if (cs
.report_vfork_events
)
2203 options
|= (PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEVFORKDONE
);
2205 if (cs
.report_exec_events
)
2206 options
|= PTRACE_O_TRACEEXEC
;
2208 options
|= PTRACE_O_TRACESYSGOOD
;
2214 linux_process_target::filter_event (int lwpid
, int wstat
)
2216 client_state
&cs
= get_client_state ();
2217 struct lwp_info
*child
;
2218 struct thread_info
*thread
;
2219 int have_stop_pc
= 0;
2221 child
= find_lwp_pid (ptid_t (lwpid
));
2223 /* Check for stop events reported by a process we didn't already
2224 know about - anything not already in our LWP list.
2226 If we're expecting to receive stopped processes after
2227 fork, vfork, and clone events, then we'll just add the
2228 new one to our list and go back to waiting for the event
2229 to be reported - the stopped process might be returned
2230 from waitpid before or after the event is.
2232 But note the case of a non-leader thread exec'ing after the
2233 leader having exited, and gone from our lists (because
2234 check_zombie_leaders deleted it). The non-leader thread
2235 changes its tid to the tgid. */
2237 if (WIFSTOPPED (wstat
) && child
== NULL
&& WSTOPSIG (wstat
) == SIGTRAP
2238 && linux_ptrace_get_extended_event (wstat
) == PTRACE_EVENT_EXEC
)
2242 /* A multi-thread exec after we had seen the leader exiting. */
2245 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2246 "after exec.\n", lwpid
);
2249 child_ptid
= ptid_t (lwpid
, lwpid
);
2250 child
= add_lwp (child_ptid
);
2252 switch_to_thread (child
->thread
);
2255 /* If we didn't find a process, one of two things presumably happened:
2256 - A process we started and then detached from has exited. Ignore it.
2257 - A process we are controlling has forked and the new child's stop
2258 was reported to us by the kernel. Save its PID. */
2259 if (child
== NULL
&& WIFSTOPPED (wstat
))
2261 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
2264 else if (child
== NULL
)
2267 thread
= get_lwp_thread (child
);
2271 child
->last_status
= wstat
;
2273 /* Check if the thread has exited. */
2274 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
)))
2277 debug_printf ("LLFE: %d exited.\n", lwpid
);
2279 if (finish_step_over (child
))
2281 /* Unsuspend all other LWPs, and set them back running again. */
2282 unsuspend_all_lwps (child
);
2285 /* If there is at least one more LWP, then the exit signal was
2286 not the end of the debugged application and should be
2287 ignored, unless GDB wants to hear about thread exits. */
2288 if (cs
.report_thread_events
2289 || last_thread_of_process_p (pid_of (thread
)))
2291 /* Since events are serialized to GDB core, and we can't
2292 report this one right now. Leave the status pending for
2293 the next time we're able to report it. */
2294 mark_lwp_dead (child
, wstat
);
2304 gdb_assert (WIFSTOPPED (wstat
));
2306 if (WIFSTOPPED (wstat
))
2308 struct process_info
*proc
;
2310 /* Architecture-specific setup after inferior is running. */
2311 proc
= find_process_pid (pid_of (thread
));
2312 if (proc
->tdesc
== NULL
)
2316 /* This needs to happen after we have attached to the
2317 inferior and it is stopped for the first time, but
2318 before we access any inferior registers. */
2319 arch_setup_thread (thread
);
2323 /* The process is started, but GDBserver will do
2324 architecture-specific setup after the program stops at
2325 the first instruction. */
2326 child
->status_pending_p
= 1;
2327 child
->status_pending
= wstat
;
2333 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
2335 struct process_info
*proc
= find_process_pid (pid_of (thread
));
2336 int options
= linux_low_ptrace_options (proc
->attached
);
2338 linux_enable_event_reporting (lwpid
, options
);
2339 child
->must_set_ptrace_flags
= 0;
2342 /* Always update syscall_state, even if it will be filtered later. */
2343 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SYSCALL_SIGTRAP
)
2345 child
->syscall_state
2346 = (child
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2347 ? TARGET_WAITKIND_SYSCALL_RETURN
2348 : TARGET_WAITKIND_SYSCALL_ENTRY
);
2352 /* Almost all other ptrace-stops are known to be outside of system
2353 calls, with further exceptions in handle_extended_wait. */
2354 child
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2357 /* Be careful to not overwrite stop_pc until save_stop_reason is
2359 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2360 && linux_is_extended_waitstatus (wstat
))
2362 child
->stop_pc
= get_pc (child
);
2363 if (handle_extended_wait (&child
, wstat
))
2365 /* The event has been handled, so just return without
2371 if (linux_wstatus_maybe_breakpoint (wstat
))
2373 if (save_stop_reason (child
))
2378 child
->stop_pc
= get_pc (child
);
2380 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
2381 && child
->stop_expected
)
2384 debug_printf ("Expected stop.\n");
2385 child
->stop_expected
= 0;
2387 if (thread
->last_resume_kind
== resume_stop
)
2389 /* We want to report the stop to the core. Treat the
2390 SIGSTOP as a normal event. */
2392 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2393 target_pid_to_str (ptid_of (thread
)).c_str ());
2395 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
2397 /* Stopping threads. We don't want this SIGSTOP to end up
2400 debug_printf ("LLW: SIGSTOP caught for %s "
2401 "while stopping threads.\n",
2402 target_pid_to_str (ptid_of (thread
)).c_str ());
2407 /* This is a delayed SIGSTOP. Filter out the event. */
2409 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2410 child
->stepping
? "step" : "continue",
2411 target_pid_to_str (ptid_of (thread
)).c_str ());
2413 resume_one_lwp (child
, child
->stepping
, 0, NULL
);
2418 child
->status_pending_p
= 1;
2419 child
->status_pending
= wstat
;
2424 linux_process_target::maybe_hw_step (thread_info
*thread
)
2426 if (supports_hardware_single_step ())
2430 /* GDBserver must insert single-step breakpoint for software
2432 gdb_assert (has_single_step_breakpoints (thread
));
2438 linux_process_target::resume_stopped_resumed_lwps (thread_info
*thread
)
2440 struct lwp_info
*lp
= get_thread_lwp (thread
);
2444 && !lp
->status_pending_p
2445 && thread
->last_status
.kind () == TARGET_WAITKIND_IGNORE
)
2449 if (thread
->last_resume_kind
== resume_step
)
2450 step
= maybe_hw_step (thread
);
2453 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2454 target_pid_to_str (ptid_of (thread
)).c_str (),
2455 paddress (lp
->stop_pc
),
2458 resume_one_lwp (lp
, step
, GDB_SIGNAL_0
, NULL
);
2463 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid
,
2465 int *wstatp
, int options
)
2467 struct thread_info
*event_thread
;
2468 struct lwp_info
*event_child
, *requested_child
;
2469 sigset_t block_mask
, prev_mask
;
2472 /* N.B. event_thread points to the thread_info struct that contains
2473 event_child. Keep them in sync. */
2474 event_thread
= NULL
;
2476 requested_child
= NULL
;
2478 /* Check for a lwp with a pending status. */
2480 if (filter_ptid
== minus_one_ptid
|| filter_ptid
.is_pid ())
2482 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2484 return status_pending_p_callback (thread
, filter_ptid
);
2487 if (event_thread
!= NULL
)
2488 event_child
= get_thread_lwp (event_thread
);
2489 if (debug_threads
&& event_thread
)
2490 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2492 else if (filter_ptid
!= null_ptid
)
2494 requested_child
= find_lwp_pid (filter_ptid
);
2496 if (stopping_threads
== NOT_STOPPING_THREADS
2497 && requested_child
->status_pending_p
2498 && (requested_child
->collecting_fast_tracepoint
2499 != fast_tpoint_collect_result::not_collecting
))
2501 enqueue_one_deferred_signal (requested_child
,
2502 &requested_child
->status_pending
);
2503 requested_child
->status_pending_p
= 0;
2504 requested_child
->status_pending
= 0;
2505 resume_one_lwp (requested_child
, 0, 0, NULL
);
2508 if (requested_child
->suspended
2509 && requested_child
->status_pending_p
)
2511 internal_error (__FILE__
, __LINE__
,
2512 "requesting an event out of a"
2513 " suspended child?");
2516 if (requested_child
->status_pending_p
)
2518 event_child
= requested_child
;
2519 event_thread
= get_lwp_thread (event_child
);
2523 if (event_child
!= NULL
)
2526 debug_printf ("Got an event from pending child %ld (%04x)\n",
2527 lwpid_of (event_thread
), event_child
->status_pending
);
2528 *wstatp
= event_child
->status_pending
;
2529 event_child
->status_pending_p
= 0;
2530 event_child
->status_pending
= 0;
2531 switch_to_thread (event_thread
);
2532 return lwpid_of (event_thread
);
2535 /* But if we don't find a pending event, we'll have to wait.
2537 We only enter this loop if no process has a pending wait status.
2538 Thus any action taken in response to a wait status inside this
2539 loop is responding as soon as we detect the status, not after any
2542 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2543 all signals while here. */
2544 sigfillset (&block_mask
);
2545 gdb_sigmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2547 /* Always pull all events out of the kernel. We'll randomly select
2548 an event LWP out of all that have events, to prevent
2550 while (event_child
== NULL
)
2554 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2557 - If the thread group leader exits while other threads in the
2558 thread group still exist, waitpid(TGID, ...) hangs. That
2559 waitpid won't return an exit status until the other threads
2560 in the group are reaped.
2562 - When a non-leader thread execs, that thread just vanishes
2563 without reporting an exit (so we'd hang if we waited for it
2564 explicitly in that case). The exec event is reported to
2567 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2570 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2571 ret
, errno
? safe_strerror (errno
) : "ERRNO-OK");
2577 debug_printf ("LLW: waitpid %ld received %s\n",
2578 (long) ret
, status_to_str (*wstatp
).c_str ());
2581 /* Filter all events. IOW, leave all events pending. We'll
2582 randomly select an event LWP out of all that have events
2584 filter_event (ret
, *wstatp
);
2585 /* Retry until nothing comes out of waitpid. A single
2586 SIGCHLD can indicate more than one child stopped. */
2590 /* Now that we've pulled all events out of the kernel, resume
2591 LWPs that don't have an interesting event to report. */
2592 if (stopping_threads
== NOT_STOPPING_THREADS
)
2593 for_each_thread ([this] (thread_info
*thread
)
2595 resume_stopped_resumed_lwps (thread
);
2598 /* ... and find an LWP with a status to report to the core, if
2600 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2602 return status_pending_p_callback (thread
, filter_ptid
);
2605 if (event_thread
!= NULL
)
2607 event_child
= get_thread_lwp (event_thread
);
2608 *wstatp
= event_child
->status_pending
;
2609 event_child
->status_pending_p
= 0;
2610 event_child
->status_pending
= 0;
2614 /* Check for zombie thread group leaders. Those can't be reaped
2615 until all other threads in the thread group are. */
2616 check_zombie_leaders ();
2618 auto not_stopped
= [&] (thread_info
*thread
)
2620 return not_stopped_callback (thread
, wait_ptid
);
2623 /* If there are no resumed children left in the set of LWPs we
2624 want to wait for, bail. We can't just block in
2625 waitpid/sigsuspend, because lwps might have been left stopped
2626 in trace-stop state, and we'd be stuck forever waiting for
2627 their status to change (which would only happen if we resumed
2628 them). Even if WNOHANG is set, this return code is preferred
2629 over 0 (below), as it is more detailed. */
2630 if (find_thread (not_stopped
) == NULL
)
2633 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2634 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2638 /* No interesting event to report to the caller. */
2639 if ((options
& WNOHANG
))
2642 debug_printf ("WNOHANG set, no event found\n");
2644 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2648 /* Block until we get an event reported with SIGCHLD. */
2650 debug_printf ("sigsuspend'ing\n");
2652 sigsuspend (&prev_mask
);
2653 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2657 gdb_sigmask (SIG_SETMASK
, &prev_mask
, NULL
);
2659 switch_to_thread (event_thread
);
2661 return lwpid_of (event_thread
);
2665 linux_process_target::wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2667 return wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2670 /* Select one LWP out of those that have events pending. */
2673 select_event_lwp (struct lwp_info
**orig_lp
)
2675 struct thread_info
*event_thread
= NULL
;
2677 /* In all-stop, give preference to the LWP that is being
2678 single-stepped. There will be at most one, and it's the LWP that
2679 the core is most interested in. If we didn't do this, then we'd
2680 have to handle pending step SIGTRAPs somehow in case the core
2681 later continues the previously-stepped thread, otherwise we'd
2682 report the pending SIGTRAP, and the core, not having stepped the
2683 thread, wouldn't understand what the trap was for, and therefore
2684 would report it to the user as a random signal. */
2687 event_thread
= find_thread ([] (thread_info
*thread
)
2689 lwp_info
*lp
= get_thread_lwp (thread
);
2691 return (thread
->last_status
.kind () == TARGET_WAITKIND_IGNORE
2692 && thread
->last_resume_kind
== resume_step
2693 && lp
->status_pending_p
);
2696 if (event_thread
!= NULL
)
2699 debug_printf ("SEL: Select single-step %s\n",
2700 target_pid_to_str (ptid_of (event_thread
)).c_str ());
2703 if (event_thread
== NULL
)
2705 /* No single-stepping LWP. Select one at random, out of those
2706 which have had events. */
2708 event_thread
= find_thread_in_random ([&] (thread_info
*thread
)
2710 lwp_info
*lp
= get_thread_lwp (thread
);
2712 /* Only resumed LWPs that have an event pending. */
2713 return (thread
->last_status
.kind () == TARGET_WAITKIND_IGNORE
2714 && lp
->status_pending_p
);
2718 if (event_thread
!= NULL
)
2720 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2722 /* Switch the event LWP. */
2723 *orig_lp
= event_lp
;
2727 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2731 unsuspend_all_lwps (struct lwp_info
*except
)
2733 for_each_thread ([&] (thread_info
*thread
)
2735 lwp_info
*lwp
= get_thread_lwp (thread
);
2738 lwp_suspended_decr (lwp
);
2742 static bool lwp_running (thread_info
*thread
);
2744 /* Stabilize threads (move out of jump pads).
2746 If a thread is midway collecting a fast tracepoint, we need to
2747 finish the collection and move it out of the jump pad before
2748 reporting the signal.
2750 This avoids recursion while collecting (when a signal arrives
2751 midway, and the signal handler itself collects), which would trash
2752 the trace buffer. In case the user set a breakpoint in a signal
2753 handler, this avoids the backtrace showing the jump pad, etc..
2754 Most importantly, there are certain things we can't do safely if
2755 threads are stopped in a jump pad (or in its callee's). For
2758 - starting a new trace run. A thread still collecting the
2759 previous run, could trash the trace buffer when resumed. The trace
2760 buffer control structures would have been reset but the thread had
2761 no way to tell. The thread could even midway memcpy'ing to the
2762 buffer, which would mean that when resumed, it would clobber the
2763 trace buffer that had been set for a new run.
2765 - we can't rewrite/reuse the jump pads for new tracepoints
2766 safely. Say you do tstart while a thread is stopped midway while
2767 collecting. When the thread is later resumed, it finishes the
2768 collection, and returns to the jump pad, to execute the original
2769 instruction that was under the tracepoint jump at the time the
2770 older run had been started. If the jump pad had been rewritten
2771 since for something else in the new run, the thread would now
2772 execute the wrong / random instructions. */
2775 linux_process_target::stabilize_threads ()
2777 thread_info
*thread_stuck
= find_thread ([this] (thread_info
*thread
)
2779 return stuck_in_jump_pad (thread
);
2782 if (thread_stuck
!= NULL
)
2785 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2786 lwpid_of (thread_stuck
));
2790 scoped_restore_current_thread restore_thread
;
2792 stabilizing_threads
= 1;
2795 for_each_thread ([this] (thread_info
*thread
)
2797 move_out_of_jump_pad (thread
);
2800 /* Loop until all are stopped out of the jump pads. */
2801 while (find_thread (lwp_running
) != NULL
)
2803 struct target_waitstatus ourstatus
;
2804 struct lwp_info
*lwp
;
2807 /* Note that we go through the full wait even loop. While
2808 moving threads out of jump pad, we need to be able to step
2809 over internal breakpoints and such. */
2810 wait_1 (minus_one_ptid
, &ourstatus
, 0);
2812 if (ourstatus
.kind () == TARGET_WAITKIND_STOPPED
)
2814 lwp
= get_thread_lwp (current_thread
);
2817 lwp_suspended_inc (lwp
);
2819 if (ourstatus
.sig () != GDB_SIGNAL_0
2820 || current_thread
->last_resume_kind
== resume_stop
)
2822 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.sig ()));
2823 enqueue_one_deferred_signal (lwp
, &wstat
);
2828 unsuspend_all_lwps (NULL
);
2830 stabilizing_threads
= 0;
2834 thread_stuck
= find_thread ([this] (thread_info
*thread
)
2836 return stuck_in_jump_pad (thread
);
2839 if (thread_stuck
!= NULL
)
2840 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2841 lwpid_of (thread_stuck
));
2845 /* Convenience function that is called when the kernel reports an
2846 event that is not passed out to GDB. */
2849 ignore_event (struct target_waitstatus
*ourstatus
)
2851 /* If we got an event, there may still be others, as a single
2852 SIGCHLD can indicate more than one child stopped. This forces
2853 another target_wait call. */
2856 ourstatus
->set_ignore ();
2861 linux_process_target::filter_exit_event (lwp_info
*event_child
,
2862 target_waitstatus
*ourstatus
)
2864 client_state
&cs
= get_client_state ();
2865 struct thread_info
*thread
= get_lwp_thread (event_child
);
2866 ptid_t ptid
= ptid_of (thread
);
2868 if (!last_thread_of_process_p (pid_of (thread
)))
2870 if (cs
.report_thread_events
)
2871 ourstatus
->set_thread_exited (0);
2873 ourstatus
->set_ignore ();
2875 delete_lwp (event_child
);
2880 /* Returns 1 if GDB is interested in any event_child syscalls. */
2883 gdb_catching_syscalls_p (struct lwp_info
*event_child
)
2885 struct thread_info
*thread
= get_lwp_thread (event_child
);
2886 struct process_info
*proc
= get_thread_process (thread
);
2888 return !proc
->syscalls_to_catch
.empty ();
2892 linux_process_target::gdb_catch_this_syscall (lwp_info
*event_child
)
2895 struct thread_info
*thread
= get_lwp_thread (event_child
);
2896 struct process_info
*proc
= get_thread_process (thread
);
2898 if (proc
->syscalls_to_catch
.empty ())
2901 if (proc
->syscalls_to_catch
[0] == ANY_SYSCALL
)
2904 get_syscall_trapinfo (event_child
, &sysno
);
2906 for (int iter
: proc
->syscalls_to_catch
)
2914 linux_process_target::wait_1 (ptid_t ptid
, target_waitstatus
*ourstatus
,
2915 target_wait_flags target_options
)
2917 client_state
&cs
= get_client_state ();
2919 struct lwp_info
*event_child
;
2922 int step_over_finished
;
2923 int bp_explains_trap
;
2924 int maybe_internal_trap
;
2933 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid
).c_str ());
2936 /* Translate generic target options into linux options. */
2938 if (target_options
& TARGET_WNOHANG
)
2941 bp_explains_trap
= 0;
2944 ourstatus
->set_ignore ();
2946 auto status_pending_p_any
= [&] (thread_info
*thread
)
2948 return status_pending_p_callback (thread
, minus_one_ptid
);
2951 auto not_stopped
= [&] (thread_info
*thread
)
2953 return not_stopped_callback (thread
, minus_one_ptid
);
2956 /* Find a resumed LWP, if any. */
2957 if (find_thread (status_pending_p_any
) != NULL
)
2959 else if (find_thread (not_stopped
) != NULL
)
2964 if (step_over_bkpt
== null_ptid
)
2965 pid
= wait_for_event (ptid
, &w
, options
);
2969 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2970 target_pid_to_str (step_over_bkpt
).c_str ());
2971 pid
= wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
2974 if (pid
== 0 || (pid
== -1 && !any_resumed
))
2976 gdb_assert (target_options
& TARGET_WNOHANG
);
2980 debug_printf ("wait_1 ret = null_ptid, "
2981 "TARGET_WAITKIND_IGNORE\n");
2985 ourstatus
->set_ignore ();
2992 debug_printf ("wait_1 ret = null_ptid, "
2993 "TARGET_WAITKIND_NO_RESUMED\n");
2997 ourstatus
->set_no_resumed ();
3001 event_child
= get_thread_lwp (current_thread
);
3003 /* wait_for_event only returns an exit status for the last
3004 child of a process. Report it. */
3005 if (WIFEXITED (w
) || WIFSIGNALED (w
))
3009 ourstatus
->set_exited (WEXITSTATUS (w
));
3013 debug_printf ("wait_1 ret = %s, exited with "
3015 target_pid_to_str (ptid_of (current_thread
)).c_str (),
3022 ourstatus
->set_signalled (gdb_signal_from_host (WTERMSIG (w
)));
3026 debug_printf ("wait_1 ret = %s, terminated with "
3028 target_pid_to_str (ptid_of (current_thread
)).c_str (),
3034 if (ourstatus
->kind () == TARGET_WAITKIND_EXITED
)
3035 return filter_exit_event (event_child
, ourstatus
);
3037 return ptid_of (current_thread
);
3040 /* If step-over executes a breakpoint instruction, in the case of a
3041 hardware single step it means a gdb/gdbserver breakpoint had been
3042 planted on top of a permanent breakpoint, in the case of a software
3043 single step it may just mean that gdbserver hit the reinsert breakpoint.
3044 The PC has been adjusted by save_stop_reason to point at
3045 the breakpoint address.
3046 So in the case of the hardware single step advance the PC manually
3047 past the breakpoint and in the case of software single step advance only
3048 if it's not the single_step_breakpoint we are hitting.
3049 This avoids that a program would keep trapping a permanent breakpoint
3051 if (step_over_bkpt
!= null_ptid
3052 && event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3053 && (event_child
->stepping
3054 || !single_step_breakpoint_inserted_here (event_child
->stop_pc
)))
3056 int increment_pc
= 0;
3057 int breakpoint_kind
= 0;
3058 CORE_ADDR stop_pc
= event_child
->stop_pc
;
3060 breakpoint_kind
= breakpoint_kind_from_current_state (&stop_pc
);
3061 sw_breakpoint_from_kind (breakpoint_kind
, &increment_pc
);
3065 debug_printf ("step-over for %s executed software breakpoint\n",
3066 target_pid_to_str (ptid_of (current_thread
)).c_str ());
3069 if (increment_pc
!= 0)
3071 struct regcache
*regcache
3072 = get_thread_regcache (current_thread
, 1);
3074 event_child
->stop_pc
+= increment_pc
;
3075 low_set_pc (regcache
, event_child
->stop_pc
);
3077 if (!low_breakpoint_at (event_child
->stop_pc
))
3078 event_child
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3082 /* If this event was not handled before, and is not a SIGTRAP, we
3083 report it. SIGILL and SIGSEGV are also treated as traps in case
3084 a breakpoint is inserted at the current PC. If this target does
3085 not support internal breakpoints at all, we also report the
3086 SIGTRAP without further processing; it's of no concern to us. */
3088 = (low_supports_breakpoints ()
3089 && (WSTOPSIG (w
) == SIGTRAP
3090 || ((WSTOPSIG (w
) == SIGILL
3091 || WSTOPSIG (w
) == SIGSEGV
)
3092 && low_breakpoint_at (event_child
->stop_pc
))));
3094 if (maybe_internal_trap
)
3096 /* Handle anything that requires bookkeeping before deciding to
3097 report the event or continue waiting. */
3099 /* First check if we can explain the SIGTRAP with an internal
3100 breakpoint, or if we should possibly report the event to GDB.
3101 Do this before anything that may remove or insert a
3103 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
3105 /* We have a SIGTRAP, possibly a step-over dance has just
3106 finished. If so, tweak the state machine accordingly,
3107 reinsert breakpoints and delete any single-step
3109 step_over_finished
= finish_step_over (event_child
);
3111 /* Now invoke the callbacks of any internal breakpoints there. */
3112 check_breakpoints (event_child
->stop_pc
);
3114 /* Handle tracepoint data collecting. This may overflow the
3115 trace buffer, and cause a tracing stop, removing
3117 trace_event
= handle_tracepoints (event_child
);
3119 if (bp_explains_trap
)
3122 debug_printf ("Hit a gdbserver breakpoint.\n");
3127 /* We have some other signal, possibly a step-over dance was in
3128 progress, and it should be cancelled too. */
3129 step_over_finished
= finish_step_over (event_child
);
3132 /* We have all the data we need. Either report the event to GDB, or
3133 resume threads and keep waiting for more. */
3135 /* If we're collecting a fast tracepoint, finish the collection and
3136 move out of the jump pad before delivering a signal. See
3137 linux_stabilize_threads. */
3140 && WSTOPSIG (w
) != SIGTRAP
3141 && supports_fast_tracepoints ()
3142 && agent_loaded_p ())
3145 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3146 "to defer or adjust it.\n",
3147 WSTOPSIG (w
), lwpid_of (current_thread
));
3149 /* Allow debugging the jump pad itself. */
3150 if (current_thread
->last_resume_kind
!= resume_step
3151 && maybe_move_out_of_jump_pad (event_child
, &w
))
3153 enqueue_one_deferred_signal (event_child
, &w
);
3156 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3157 WSTOPSIG (w
), lwpid_of (current_thread
));
3159 resume_one_lwp (event_child
, 0, 0, NULL
);
3163 return ignore_event (ourstatus
);
3167 if (event_child
->collecting_fast_tracepoint
3168 != fast_tpoint_collect_result::not_collecting
)
3171 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3172 "Check if we're already there.\n",
3173 lwpid_of (current_thread
),
3174 (int) event_child
->collecting_fast_tracepoint
);
3178 event_child
->collecting_fast_tracepoint
3179 = linux_fast_tracepoint_collecting (event_child
, NULL
);
3181 if (event_child
->collecting_fast_tracepoint
3182 != fast_tpoint_collect_result::before_insn
)
3184 /* No longer need this breakpoint. */
3185 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
3188 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3189 "stopping all threads momentarily.\n");
3191 /* Other running threads could hit this breakpoint.
3192 We don't handle moribund locations like GDB does,
3193 instead we always pause all threads when removing
3194 breakpoints, so that any step-over or
3195 decr_pc_after_break adjustment is always taken
3196 care of while the breakpoint is still
3198 stop_all_lwps (1, event_child
);
3200 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
3201 event_child
->exit_jump_pad_bkpt
= NULL
;
3203 unstop_all_lwps (1, event_child
);
3205 gdb_assert (event_child
->suspended
>= 0);
3209 if (event_child
->collecting_fast_tracepoint
3210 == fast_tpoint_collect_result::not_collecting
)
3213 debug_printf ("fast tracepoint finished "
3214 "collecting successfully.\n");
3216 /* We may have a deferred signal to report. */
3217 if (dequeue_one_deferred_signal (event_child
, &w
))
3220 debug_printf ("dequeued one signal.\n");
3225 debug_printf ("no deferred signals.\n");
3227 if (stabilizing_threads
)
3229 ourstatus
->set_stopped (GDB_SIGNAL_0
);
3233 debug_printf ("wait_1 ret = %s, stopped "
3234 "while stabilizing threads\n",
3236 (ptid_of (current_thread
)).c_str ());
3240 return ptid_of (current_thread
);
3246 /* Check whether GDB would be interested in this event. */
3248 /* Check if GDB is interested in this syscall. */
3250 && WSTOPSIG (w
) == SYSCALL_SIGTRAP
3251 && !gdb_catch_this_syscall (event_child
))
3255 debug_printf ("Ignored syscall for LWP %ld.\n",
3256 lwpid_of (current_thread
));
3259 resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
3263 return ignore_event (ourstatus
);
3266 /* If GDB is not interested in this signal, don't stop other
3267 threads, and don't report it to GDB. Just resume the inferior
3268 right away. We do this for threading-related signals as well as
3269 any that GDB specifically requested we ignore. But never ignore
3270 SIGSTOP if we sent it ourselves, and do not ignore signals when
3271 stepping - they may require special handling to skip the signal
3272 handler. Also never ignore signals that could be caused by a
3275 && current_thread
->last_resume_kind
!= resume_step
3277 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3278 (current_process ()->priv
->thread_db
!= NULL
3279 && (WSTOPSIG (w
) == __SIGRTMIN
3280 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
3283 (cs
.pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
3284 && !(WSTOPSIG (w
) == SIGSTOP
3285 && current_thread
->last_resume_kind
== resume_stop
)
3286 && !linux_wstatus_maybe_breakpoint (w
))))
3288 siginfo_t info
, *info_p
;
3291 debug_printf ("Ignored signal %d for LWP %ld.\n",
3292 WSTOPSIG (w
), lwpid_of (current_thread
));
3294 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
3295 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
3300 if (step_over_finished
)
3302 /* We cancelled this thread's step-over above. We still
3303 need to unsuspend all other LWPs, and set them back
3304 running again while the signal handler runs. */
3305 unsuspend_all_lwps (event_child
);
3307 /* Enqueue the pending signal info so that proceed_all_lwps
3309 enqueue_pending_signal (event_child
, WSTOPSIG (w
), info_p
);
3311 proceed_all_lwps ();
3315 resume_one_lwp (event_child
, event_child
->stepping
,
3316 WSTOPSIG (w
), info_p
);
3322 return ignore_event (ourstatus
);
3325 /* Note that all addresses are always "out of the step range" when
3326 there's no range to begin with. */
3327 in_step_range
= lwp_in_step_range (event_child
);
3329 /* If GDB wanted this thread to single step, and the thread is out
3330 of the step range, we always want to report the SIGTRAP, and let
3331 GDB handle it. Watchpoints should always be reported. So should
3332 signals we can't explain. A SIGTRAP we can't explain could be a
3333 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3334 do, we're be able to handle GDB breakpoints on top of internal
3335 breakpoints, by handling the internal breakpoint and still
3336 reporting the event to GDB. If we don't, we're out of luck, GDB
3337 won't see the breakpoint hit. If we see a single-step event but
3338 the thread should be continuing, don't pass the trap to gdb.
3339 That indicates that we had previously finished a single-step but
3340 left the single-step pending -- see
3341 complete_ongoing_step_over. */
3342 report_to_gdb
= (!maybe_internal_trap
3343 || (current_thread
->last_resume_kind
== resume_step
3345 || event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3347 && !bp_explains_trap
3349 && !step_over_finished
3350 && !(current_thread
->last_resume_kind
== resume_continue
3351 && event_child
->stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
))
3352 || (gdb_breakpoint_here (event_child
->stop_pc
)
3353 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
3354 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
))
3355 || event_child
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
);
3357 run_breakpoint_commands (event_child
->stop_pc
);
3359 /* We found no reason GDB would want us to stop. We either hit one
3360 of our own breakpoints, or finished an internal step GDB
3361 shouldn't know about. */
3366 if (bp_explains_trap
)
3367 debug_printf ("Hit a gdbserver breakpoint.\n");
3368 if (step_over_finished
)
3369 debug_printf ("Step-over finished.\n");
3371 debug_printf ("Tracepoint event.\n");
3372 if (lwp_in_step_range (event_child
))
3373 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3374 paddress (event_child
->stop_pc
),
3375 paddress (event_child
->step_range_start
),
3376 paddress (event_child
->step_range_end
));
3379 /* We're not reporting this breakpoint to GDB, so apply the
3380 decr_pc_after_break adjustment to the inferior's regcache
3383 if (low_supports_breakpoints ())
3385 struct regcache
*regcache
3386 = get_thread_regcache (current_thread
, 1);
3387 low_set_pc (regcache
, event_child
->stop_pc
);
3390 if (step_over_finished
)
3392 /* If we have finished stepping over a breakpoint, we've
3393 stopped and suspended all LWPs momentarily except the
3394 stepping one. This is where we resume them all again.
3395 We're going to keep waiting, so use proceed, which
3396 handles stepping over the next breakpoint. */
3397 unsuspend_all_lwps (event_child
);
3401 /* Remove the single-step breakpoints if any. Note that
3402 there isn't single-step breakpoint if we finished stepping
3404 if (supports_software_single_step ()
3405 && has_single_step_breakpoints (current_thread
))
3407 stop_all_lwps (0, event_child
);
3408 delete_single_step_breakpoints (current_thread
);
3409 unstop_all_lwps (0, event_child
);
3414 debug_printf ("proceeding all threads.\n");
3415 proceed_all_lwps ();
3420 return ignore_event (ourstatus
);
3425 if (event_child
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
)
3426 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3427 lwpid_of (get_lwp_thread (event_child
)),
3428 event_child
->waitstatus
.to_string ().c_str ());
3429 if (current_thread
->last_resume_kind
== resume_step
)
3431 if (event_child
->step_range_start
== event_child
->step_range_end
)
3432 debug_printf ("GDB wanted to single-step, reporting event.\n");
3433 else if (!lwp_in_step_range (event_child
))
3434 debug_printf ("Out of step range, reporting event.\n");
3436 if (event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
3437 debug_printf ("Stopped by watchpoint.\n");
3438 else if (gdb_breakpoint_here (event_child
->stop_pc
))
3439 debug_printf ("Stopped by GDB breakpoint.\n");
3441 debug_printf ("Hit a non-gdbserver trap event.\n");
3444 /* Alright, we're going to report a stop. */
3446 /* Remove single-step breakpoints. */
3447 if (supports_software_single_step ())
3449 /* Remove single-step breakpoints or not. It it is true, stop all
3450 lwps, so that other threads won't hit the breakpoint in the
3452 int remove_single_step_breakpoints_p
= 0;
3456 remove_single_step_breakpoints_p
3457 = has_single_step_breakpoints (current_thread
);
3461 /* In all-stop, a stop reply cancels all previous resume
3462 requests. Delete all single-step breakpoints. */
3464 find_thread ([&] (thread_info
*thread
) {
3465 if (has_single_step_breakpoints (thread
))
3467 remove_single_step_breakpoints_p
= 1;
3475 if (remove_single_step_breakpoints_p
)
3477 /* If we remove single-step breakpoints from memory, stop all lwps,
3478 so that other threads won't hit the breakpoint in the staled
3480 stop_all_lwps (0, event_child
);
3484 gdb_assert (has_single_step_breakpoints (current_thread
));
3485 delete_single_step_breakpoints (current_thread
);
3489 for_each_thread ([] (thread_info
*thread
){
3490 if (has_single_step_breakpoints (thread
))
3491 delete_single_step_breakpoints (thread
);
3495 unstop_all_lwps (0, event_child
);
3499 if (!stabilizing_threads
)
3501 /* In all-stop, stop all threads. */
3503 stop_all_lwps (0, NULL
);
3505 if (step_over_finished
)
3509 /* If we were doing a step-over, all other threads but
3510 the stepping one had been paused in start_step_over,
3511 with their suspend counts incremented. We don't want
3512 to do a full unstop/unpause, because we're in
3513 all-stop mode (so we want threads stopped), but we
3514 still need to unsuspend the other threads, to
3515 decrement their `suspended' count back. */
3516 unsuspend_all_lwps (event_child
);
3520 /* If we just finished a step-over, then all threads had
3521 been momentarily paused. In all-stop, that's fine,
3522 we want threads stopped by now anyway. In non-stop,
3523 we need to re-resume threads that GDB wanted to be
3525 unstop_all_lwps (1, event_child
);
3529 /* If we're not waiting for a specific LWP, choose an event LWP
3530 from among those that have had events. Giving equal priority
3531 to all LWPs that have had events helps prevent
3533 if (ptid
== minus_one_ptid
)
3535 event_child
->status_pending_p
= 1;
3536 event_child
->status_pending
= w
;
3538 select_event_lwp (&event_child
);
3540 /* current_thread and event_child must stay in sync. */
3541 switch_to_thread (get_lwp_thread (event_child
));
3543 event_child
->status_pending_p
= 0;
3544 w
= event_child
->status_pending
;
3548 /* Stabilize threads (move out of jump pads). */
3550 target_stabilize_threads ();
3554 /* If we just finished a step-over, then all threads had been
3555 momentarily paused. In all-stop, that's fine, we want
3556 threads stopped by now anyway. In non-stop, we need to
3557 re-resume threads that GDB wanted to be running. */
3558 if (step_over_finished
)
3559 unstop_all_lwps (1, event_child
);
3562 if (event_child
->waitstatus
.kind () != TARGET_WAITKIND_IGNORE
)
3564 /* If the reported event is an exit, fork, vfork or exec, let
3567 /* Break the unreported fork relationship chain. */
3568 if (event_child
->waitstatus
.kind () == TARGET_WAITKIND_FORKED
3569 || event_child
->waitstatus
.kind () == TARGET_WAITKIND_VFORKED
)
3571 event_child
->fork_relative
->fork_relative
= NULL
;
3572 event_child
->fork_relative
= NULL
;
3575 *ourstatus
= event_child
->waitstatus
;
3576 /* Clear the event lwp's waitstatus since we handled it already. */
3577 event_child
->waitstatus
.set_ignore ();
3581 /* The actual stop signal is overwritten below. */
3582 ourstatus
->set_stopped (GDB_SIGNAL_0
);
3585 /* Now that we've selected our final event LWP, un-adjust its PC if
3586 it was a software breakpoint, and the client doesn't know we can
3587 adjust the breakpoint ourselves. */
3588 if (event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3589 && !cs
.swbreak_feature
)
3591 int decr_pc
= low_decr_pc_after_break ();
3595 struct regcache
*regcache
3596 = get_thread_regcache (current_thread
, 1);
3597 low_set_pc (regcache
, event_child
->stop_pc
+ decr_pc
);
3601 if (WSTOPSIG (w
) == SYSCALL_SIGTRAP
)
3605 get_syscall_trapinfo (event_child
, &syscall_number
);
3606 if (event_child
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
)
3607 ourstatus
->set_syscall_entry (syscall_number
);
3608 else if (event_child
->syscall_state
== TARGET_WAITKIND_SYSCALL_RETURN
)
3609 ourstatus
->set_syscall_return (syscall_number
);
3611 gdb_assert_not_reached ("unexpected syscall state");
3613 else if (current_thread
->last_resume_kind
== resume_stop
3614 && WSTOPSIG (w
) == SIGSTOP
)
3616 /* A thread that has been requested to stop by GDB with vCont;t,
3617 and it stopped cleanly, so report as SIG0. The use of
3618 SIGSTOP is an implementation detail. */
3619 ourstatus
->set_stopped (GDB_SIGNAL_0
);
3621 else if (current_thread
->last_resume_kind
== resume_stop
3622 && WSTOPSIG (w
) != SIGSTOP
)
3624 /* A thread that has been requested to stop by GDB with vCont;t,
3625 but, it stopped for other reasons. */
3626 ourstatus
->set_stopped (gdb_signal_from_host (WSTOPSIG (w
)));
3628 else if (ourstatus
->kind () == TARGET_WAITKIND_STOPPED
)
3629 ourstatus
->set_stopped (gdb_signal_from_host (WSTOPSIG (w
)));
3631 gdb_assert (step_over_bkpt
== null_ptid
);
3635 debug_printf ("wait_1 ret = %s, %d, %d\n",
3636 target_pid_to_str (ptid_of (current_thread
)).c_str (),
3637 ourstatus
->kind (), ourstatus
->sig ());
3641 if (ourstatus
->kind () == TARGET_WAITKIND_EXITED
)
3642 return filter_exit_event (event_child
, ourstatus
);
3644 return ptid_of (current_thread
);
3647 /* Get rid of any pending event in the pipe. */
3649 async_file_flush (void)
3655 ret
= read (linux_event_pipe
[0], &buf
, 1);
3656 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3659 /* Put something in the pipe, so the event loop wakes up. */
3661 async_file_mark (void)
3665 async_file_flush ();
3668 ret
= write (linux_event_pipe
[1], "+", 1);
3669 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3671 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3672 be awakened anyway. */
3676 linux_process_target::wait (ptid_t ptid
,
3677 target_waitstatus
*ourstatus
,
3678 target_wait_flags target_options
)
3682 /* Flush the async file first. */
3683 if (target_is_async_p ())
3684 async_file_flush ();
3688 event_ptid
= wait_1 (ptid
, ourstatus
, target_options
);
3690 while ((target_options
& TARGET_WNOHANG
) == 0
3691 && event_ptid
== null_ptid
3692 && ourstatus
->kind () == TARGET_WAITKIND_IGNORE
);
3694 /* If at least one stop was reported, there may be more. A single
3695 SIGCHLD can signal more than one child stop. */
3696 if (target_is_async_p ()
3697 && (target_options
& TARGET_WNOHANG
) != 0
3698 && event_ptid
!= null_ptid
)
3704 /* Send a signal to an LWP. */
3707 kill_lwp (unsigned long lwpid
, int signo
)
3712 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3713 if (errno
== ENOSYS
)
3715 /* If tkill fails, then we are not using nptl threads, a
3716 configuration we no longer support. */
3717 perror_with_name (("tkill"));
3723 linux_stop_lwp (struct lwp_info
*lwp
)
3729 send_sigstop (struct lwp_info
*lwp
)
3733 pid
= lwpid_of (get_lwp_thread (lwp
));
3735 /* If we already have a pending stop signal for this process, don't
3737 if (lwp
->stop_expected
)
3740 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3746 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3748 lwp
->stop_expected
= 1;
3749 kill_lwp (pid
, SIGSTOP
);
3753 send_sigstop (thread_info
*thread
, lwp_info
*except
)
3755 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3757 /* Ignore EXCEPT. */
3767 /* Increment the suspend count of an LWP, and stop it, if not stopped
3770 suspend_and_send_sigstop (thread_info
*thread
, lwp_info
*except
)
3772 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3774 /* Ignore EXCEPT. */
3778 lwp_suspended_inc (lwp
);
3780 send_sigstop (thread
, except
);
3784 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3786 /* Store the exit status for later. */
3787 lwp
->status_pending_p
= 1;
3788 lwp
->status_pending
= wstat
;
3790 /* Store in waitstatus as well, as there's nothing else to process
3792 if (WIFEXITED (wstat
))
3793 lwp
->waitstatus
.set_exited (WEXITSTATUS (wstat
));
3794 else if (WIFSIGNALED (wstat
))
3795 lwp
->waitstatus
.set_signalled (gdb_signal_from_host (WTERMSIG (wstat
)));
3797 /* Prevent trying to stop it. */
3800 /* No further stops are expected from a dead lwp. */
3801 lwp
->stop_expected
= 0;
3804 /* Return true if LWP has exited already, and has a pending exit event
3805 to report to GDB. */
3808 lwp_is_marked_dead (struct lwp_info
*lwp
)
3810 return (lwp
->status_pending_p
3811 && (WIFEXITED (lwp
->status_pending
)
3812 || WIFSIGNALED (lwp
->status_pending
)));
3816 linux_process_target::wait_for_sigstop ()
3818 struct thread_info
*saved_thread
;
3823 saved_thread
= current_thread
;
3824 if (saved_thread
!= NULL
)
3825 saved_tid
= saved_thread
->id
;
3827 saved_tid
= null_ptid
; /* avoid bogus unused warning */
3829 scoped_restore_current_thread restore_thread
;
3832 debug_printf ("wait_for_sigstop: pulling events\n");
3834 /* Passing NULL_PTID as filter indicates we want all events to be
3835 left pending. Eventually this returns when there are no
3836 unwaited-for children left. */
3837 ret
= wait_for_event_filtered (minus_one_ptid
, null_ptid
, &wstat
, __WALL
);
3838 gdb_assert (ret
== -1);
3840 if (saved_thread
== NULL
|| mythread_alive (saved_tid
))
3845 debug_printf ("Previously current thread died.\n");
3847 /* We can't change the current inferior behind GDB's back,
3848 otherwise, a subsequent command may apply to the wrong
3850 restore_thread
.dont_restore ();
3851 switch_to_thread (nullptr);
3856 linux_process_target::stuck_in_jump_pad (thread_info
*thread
)
3858 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3860 if (lwp
->suspended
!= 0)
3862 internal_error (__FILE__
, __LINE__
,
3863 "LWP %ld is suspended, suspended=%d\n",
3864 lwpid_of (thread
), lwp
->suspended
);
3866 gdb_assert (lwp
->stopped
);
3868 /* Allow debugging the jump pad, gdb_collect, etc.. */
3869 return (supports_fast_tracepoints ()
3870 && agent_loaded_p ()
3871 && (gdb_breakpoint_here (lwp
->stop_pc
)
3872 || lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3873 || thread
->last_resume_kind
== resume_step
)
3874 && (linux_fast_tracepoint_collecting (lwp
, NULL
)
3875 != fast_tpoint_collect_result::not_collecting
));
3879 linux_process_target::move_out_of_jump_pad (thread_info
*thread
)
3881 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3884 if (lwp
->suspended
!= 0)
3886 internal_error (__FILE__
, __LINE__
,
3887 "LWP %ld is suspended, suspended=%d\n",
3888 lwpid_of (thread
), lwp
->suspended
);
3890 gdb_assert (lwp
->stopped
);
3892 /* For gdb_breakpoint_here. */
3893 scoped_restore_current_thread restore_thread
;
3894 switch_to_thread (thread
);
3896 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
3898 /* Allow debugging the jump pad, gdb_collect, etc. */
3899 if (!gdb_breakpoint_here (lwp
->stop_pc
)
3900 && lwp
->stop_reason
!= TARGET_STOPPED_BY_WATCHPOINT
3901 && thread
->last_resume_kind
!= resume_step
3902 && maybe_move_out_of_jump_pad (lwp
, wstat
))
3905 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3910 lwp
->status_pending_p
= 0;
3911 enqueue_one_deferred_signal (lwp
, wstat
);
3914 debug_printf ("Signal %d for LWP %ld deferred "
3916 WSTOPSIG (*wstat
), lwpid_of (thread
));
3919 resume_one_lwp (lwp
, 0, 0, NULL
);
3922 lwp_suspended_inc (lwp
);
3926 lwp_running (thread_info
*thread
)
3928 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3930 if (lwp_is_marked_dead (lwp
))
3933 return !lwp
->stopped
;
3937 linux_process_target::stop_all_lwps (int suspend
, lwp_info
*except
)
3939 /* Should not be called recursively. */
3940 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
3945 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3946 suspend
? "stop-and-suspend" : "stop",
3949 (ptid_of (get_lwp_thread (except
))).c_str ()
3953 stopping_threads
= (suspend
3954 ? STOPPING_AND_SUSPENDING_THREADS
3955 : STOPPING_THREADS
);
3958 for_each_thread ([&] (thread_info
*thread
)
3960 suspend_and_send_sigstop (thread
, except
);
3963 for_each_thread ([&] (thread_info
*thread
)
3965 send_sigstop (thread
, except
);
3968 wait_for_sigstop ();
3969 stopping_threads
= NOT_STOPPING_THREADS
;
3973 debug_printf ("stop_all_lwps done, setting stopping_threads "
3974 "back to !stopping\n");
3979 /* Enqueue one signal in the chain of signals which need to be
3980 delivered to this process on next resume. */
3983 enqueue_pending_signal (struct lwp_info
*lwp
, int signal
, siginfo_t
*info
)
3985 lwp
->pending_signals
.emplace_back (signal
);
3986 if (info
== nullptr)
3987 memset (&lwp
->pending_signals
.back ().info
, 0, sizeof (siginfo_t
));
3989 lwp
->pending_signals
.back ().info
= *info
;
3993 linux_process_target::install_software_single_step_breakpoints (lwp_info
*lwp
)
3995 struct thread_info
*thread
= get_lwp_thread (lwp
);
3996 struct regcache
*regcache
= get_thread_regcache (thread
, 1);
3998 scoped_restore_current_thread restore_thread
;
4000 switch_to_thread (thread
);
4001 std::vector
<CORE_ADDR
> next_pcs
= low_get_next_pcs (regcache
);
4003 for (CORE_ADDR pc
: next_pcs
)
4004 set_single_step_breakpoint (pc
, current_ptid
);
4008 linux_process_target::single_step (lwp_info
* lwp
)
4012 if (supports_hardware_single_step ())
4016 else if (supports_software_single_step ())
4018 install_software_single_step_breakpoints (lwp
);
4024 debug_printf ("stepping is not implemented on this target");
4030 /* The signal can be delivered to the inferior if we are not trying to
4031 finish a fast tracepoint collect. Since signal can be delivered in
4032 the step-over, the program may go to signal handler and trap again
4033 after return from the signal handler. We can live with the spurious
4037 lwp_signal_can_be_delivered (struct lwp_info
*lwp
)
4039 return (lwp
->collecting_fast_tracepoint
4040 == fast_tpoint_collect_result::not_collecting
);
4044 linux_process_target::resume_one_lwp_throw (lwp_info
*lwp
, int step
,
4045 int signal
, siginfo_t
*info
)
4047 struct thread_info
*thread
= get_lwp_thread (lwp
);
4049 struct process_info
*proc
= get_thread_process (thread
);
4051 /* Note that target description may not be initialised
4052 (proc->tdesc == NULL) at this point because the program hasn't
4053 stopped at the first instruction yet. It means GDBserver skips
4054 the extra traps from the wrapper program (see option --wrapper).
4055 Code in this function that requires register access should be
4056 guarded by proc->tdesc == NULL or something else. */
4058 if (lwp
->stopped
== 0)
4061 gdb_assert (lwp
->waitstatus
.kind () == TARGET_WAITKIND_IGNORE
);
4063 fast_tpoint_collect_result fast_tp_collecting
4064 = lwp
->collecting_fast_tracepoint
;
4066 gdb_assert (!stabilizing_threads
4067 || (fast_tp_collecting
4068 != fast_tpoint_collect_result::not_collecting
));
4070 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4071 user used the "jump" command, or "set $pc = foo"). */
4072 if (thread
->while_stepping
!= NULL
&& lwp
->stop_pc
!= get_pc (lwp
))
4074 /* Collecting 'while-stepping' actions doesn't make sense
4076 release_while_stepping_state_list (thread
);
4079 /* If we have pending signals or status, and a new signal, enqueue the
4080 signal. Also enqueue the signal if it can't be delivered to the
4081 inferior right now. */
4083 && (lwp
->status_pending_p
4084 || !lwp
->pending_signals
.empty ()
4085 || !lwp_signal_can_be_delivered (lwp
)))
4087 enqueue_pending_signal (lwp
, signal
, info
);
4089 /* Postpone any pending signal. It was enqueued above. */
4093 if (lwp
->status_pending_p
)
4096 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4097 " has pending status\n",
4098 lwpid_of (thread
), step
? "step" : "continue",
4099 lwp
->stop_expected
? "expected" : "not expected");
4103 scoped_restore_current_thread restore_thread
;
4104 switch_to_thread (thread
);
4106 /* This bit needs some thinking about. If we get a signal that
4107 we must report while a single-step reinsert is still pending,
4108 we often end up resuming the thread. It might be better to
4109 (ew) allow a stack of pending events; then we could be sure that
4110 the reinsert happened right away and not lose any signals.
4112 Making this stack would also shrink the window in which breakpoints are
4113 uninserted (see comment in linux_wait_for_lwp) but not enough for
4114 complete correctness, so it won't solve that problem. It may be
4115 worthwhile just to solve this one, however. */
4116 if (lwp
->bp_reinsert
!= 0)
4119 debug_printf (" pending reinsert at 0x%s\n",
4120 paddress (lwp
->bp_reinsert
));
4122 if (supports_hardware_single_step ())
4124 if (fast_tp_collecting
== fast_tpoint_collect_result::not_collecting
)
4127 warning ("BAD - reinserting but not stepping.");
4129 warning ("BAD - reinserting and suspended(%d).",
4134 step
= maybe_hw_step (thread
);
4137 if (fast_tp_collecting
== fast_tpoint_collect_result::before_insn
)
4140 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4141 " (exit-jump-pad-bkpt)\n",
4144 else if (fast_tp_collecting
== fast_tpoint_collect_result::at_insn
)
4147 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4148 " single-stepping\n",
4151 if (supports_hardware_single_step ())
4155 internal_error (__FILE__
, __LINE__
,
4156 "moving out of jump pad single-stepping"
4157 " not implemented on this target");
4161 /* If we have while-stepping actions in this thread set it stepping.
4162 If we have a signal to deliver, it may or may not be set to
4163 SIG_IGN, we don't know. Assume so, and allow collecting
4164 while-stepping into a signal handler. A possible smart thing to
4165 do would be to set an internal breakpoint at the signal return
4166 address, continue, and carry on catching this while-stepping
4167 action only when that breakpoint is hit. A future
4169 if (thread
->while_stepping
!= NULL
)
4172 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4175 step
= single_step (lwp
);
4178 if (proc
->tdesc
!= NULL
&& low_supports_breakpoints ())
4180 struct regcache
*regcache
= get_thread_regcache (current_thread
, 1);
4182 lwp
->stop_pc
= low_get_pc (regcache
);
4186 debug_printf (" %s from pc 0x%lx\n", step
? "step" : "continue",
4187 (long) lwp
->stop_pc
);
4191 /* If we have pending signals, consume one if it can be delivered to
4193 if (!lwp
->pending_signals
.empty () && lwp_signal_can_be_delivered (lwp
))
4195 const pending_signal
&p_sig
= lwp
->pending_signals
.front ();
4197 signal
= p_sig
.signal
;
4198 if (p_sig
.info
.si_signo
!= 0)
4199 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
4202 lwp
->pending_signals
.pop_front ();
4206 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4207 lwpid_of (thread
), step
? "step" : "continue", signal
,
4208 lwp
->stop_expected
? "expected" : "not expected");
4210 low_prepare_to_resume (lwp
);
4212 regcache_invalidate_thread (thread
);
4214 lwp
->stepping
= step
;
4216 ptrace_request
= PTRACE_SINGLESTEP
;
4217 else if (gdb_catching_syscalls_p (lwp
))
4218 ptrace_request
= PTRACE_SYSCALL
;
4220 ptrace_request
= PTRACE_CONT
;
4221 ptrace (ptrace_request
,
4223 (PTRACE_TYPE_ARG3
) 0,
4224 /* Coerce to a uintptr_t first to avoid potential gcc warning
4225 of coercing an 8 byte integer to a 4 byte pointer. */
4226 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
4229 perror_with_name ("resuming thread");
4231 /* Successfully resumed. Clear state that no longer makes sense,
4232 and mark the LWP as running. Must not do this before resuming
4233 otherwise if that fails other code will be confused. E.g., we'd
4234 later try to stop the LWP and hang forever waiting for a stop
4235 status. Note that we must not throw after this is cleared,
4236 otherwise handle_zombie_lwp_error would get confused. */
4238 lwp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4242 linux_process_target::low_prepare_to_resume (lwp_info
*lwp
)
4247 /* Called when we try to resume a stopped LWP and that errors out. If
4248 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4249 or about to become), discard the error, clear any pending status
4250 the LWP may have, and return true (we'll collect the exit status
4251 soon enough). Otherwise, return false. */
4254 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
4256 struct thread_info
*thread
= get_lwp_thread (lp
);
4258 /* If we get an error after resuming the LWP successfully, we'd
4259 confuse !T state for the LWP being gone. */
4260 gdb_assert (lp
->stopped
);
4262 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4263 because even if ptrace failed with ESRCH, the tracee may be "not
4264 yet fully dead", but already refusing ptrace requests. In that
4265 case the tracee has 'R (Running)' state for a little bit
4266 (observed in Linux 3.18). See also the note on ESRCH in the
4267 ptrace(2) man page. Instead, check whether the LWP has any state
4268 other than ptrace-stopped. */
4270 /* Don't assume anything if /proc/PID/status can't be read. */
4271 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread
)) == 0)
4273 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
4274 lp
->status_pending_p
= 0;
4281 linux_process_target::resume_one_lwp (lwp_info
*lwp
, int step
, int signal
,
4286 resume_one_lwp_throw (lwp
, step
, signal
, info
);
4288 catch (const gdb_exception_error
&ex
)
4290 if (!check_ptrace_stopped_lwp_gone (lwp
))
4295 /* This function is called once per thread via for_each_thread.
4296 We look up which resume request applies to THREAD and mark it with a
4297 pointer to the appropriate resume request.
4299 This algorithm is O(threads * resume elements), but resume elements
4300 is small (and will remain small at least until GDB supports thread
4304 linux_set_resume_request (thread_info
*thread
, thread_resume
*resume
, size_t n
)
4306 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4308 for (int ndx
= 0; ndx
< n
; ndx
++)
4310 ptid_t ptid
= resume
[ndx
].thread
;
4311 if (ptid
== minus_one_ptid
4312 || ptid
== thread
->id
4313 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4315 || (ptid
.pid () == pid_of (thread
)
4317 || ptid
.lwp () == -1)))
4319 if (resume
[ndx
].kind
== resume_stop
4320 && thread
->last_resume_kind
== resume_stop
)
4323 debug_printf ("already %s LWP %ld at GDB's request\n",
4324 (thread
->last_status
.kind ()
4325 == TARGET_WAITKIND_STOPPED
)
4333 /* Ignore (wildcard) resume requests for already-resumed
4335 if (resume
[ndx
].kind
!= resume_stop
4336 && thread
->last_resume_kind
!= resume_stop
)
4339 debug_printf ("already %s LWP %ld at GDB's request\n",
4340 (thread
->last_resume_kind
4348 /* Don't let wildcard resumes resume fork children that GDB
4349 does not yet know are new fork children. */
4350 if (lwp
->fork_relative
!= NULL
)
4352 struct lwp_info
*rel
= lwp
->fork_relative
;
4354 if (rel
->status_pending_p
4355 && (rel
->waitstatus
.kind () == TARGET_WAITKIND_FORKED
4356 || rel
->waitstatus
.kind () == TARGET_WAITKIND_VFORKED
))
4359 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4365 /* If the thread has a pending event that has already been
4366 reported to GDBserver core, but GDB has not pulled the
4367 event out of the vStopped queue yet, likewise, ignore the
4368 (wildcard) resume request. */
4369 if (in_queued_stop_replies (thread
->id
))
4372 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4377 lwp
->resume
= &resume
[ndx
];
4378 thread
->last_resume_kind
= lwp
->resume
->kind
;
4380 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
4381 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
4383 /* If we had a deferred signal to report, dequeue one now.
4384 This can happen if LWP gets more than one signal while
4385 trying to get out of a jump pad. */
4387 && !lwp
->status_pending_p
4388 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
4390 lwp
->status_pending_p
= 1;
4393 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4394 "leaving status pending.\n",
4395 WSTOPSIG (lwp
->status_pending
),
4403 /* No resume action for this thread. */
4408 linux_process_target::resume_status_pending (thread_info
*thread
)
4410 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4412 /* LWPs which will not be resumed are not interesting, because
4413 we might not wait for them next time through linux_wait. */
4414 if (lwp
->resume
== NULL
)
4417 return thread_still_has_status_pending (thread
);
4421 linux_process_target::thread_needs_step_over (thread_info
*thread
)
4423 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4425 struct process_info
*proc
= get_thread_process (thread
);
4427 /* GDBserver is skipping the extra traps from the wrapper program,
4428 don't have to do step over. */
4429 if (proc
->tdesc
== NULL
)
4432 /* LWPs which will not be resumed are not interesting, because we
4433 might not wait for them next time through linux_wait. */
4438 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4443 if (thread
->last_resume_kind
== resume_stop
)
4446 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4452 gdb_assert (lwp
->suspended
>= 0);
4457 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4462 if (lwp
->status_pending_p
)
4465 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4471 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4475 /* If the PC has changed since we stopped, then don't do anything,
4476 and let the breakpoint/tracepoint be hit. This happens if, for
4477 instance, GDB handled the decr_pc_after_break subtraction itself,
4478 GDB is OOL stepping this thread, or the user has issued a "jump"
4479 command, or poked thread's registers herself. */
4480 if (pc
!= lwp
->stop_pc
)
4483 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4484 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4486 paddress (lwp
->stop_pc
), paddress (pc
));
4490 /* On software single step target, resume the inferior with signal
4491 rather than stepping over. */
4492 if (supports_software_single_step ()
4493 && !lwp
->pending_signals
.empty ()
4494 && lwp_signal_can_be_delivered (lwp
))
4497 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4504 scoped_restore_current_thread restore_thread
;
4505 switch_to_thread (thread
);
4507 /* We can only step over breakpoints we know about. */
4508 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
4510 /* Don't step over a breakpoint that GDB expects to hit
4511 though. If the condition is being evaluated on the target's side
4512 and it evaluate to false, step over this breakpoint as well. */
4513 if (gdb_breakpoint_here (pc
)
4514 && gdb_condition_true_at_breakpoint (pc
)
4515 && gdb_no_commands_at_breakpoint (pc
))
4518 debug_printf ("Need step over [LWP %ld]? yes, but found"
4519 " GDB breakpoint at 0x%s; skipping step over\n",
4520 lwpid_of (thread
), paddress (pc
));
4527 debug_printf ("Need step over [LWP %ld]? yes, "
4528 "found breakpoint at 0x%s\n",
4529 lwpid_of (thread
), paddress (pc
));
4531 /* We've found an lwp that needs stepping over --- return 1 so
4532 that find_thread stops looking. */
4538 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4540 lwpid_of (thread
), paddress (pc
));
4546 linux_process_target::start_step_over (lwp_info
*lwp
)
4548 struct thread_info
*thread
= get_lwp_thread (lwp
);
4552 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4555 stop_all_lwps (1, lwp
);
4557 if (lwp
->suspended
!= 0)
4559 internal_error (__FILE__
, __LINE__
,
4560 "LWP %ld suspended=%d\n", lwpid_of (thread
),
4565 debug_printf ("Done stopping all threads for step-over.\n");
4567 /* Note, we should always reach here with an already adjusted PC,
4568 either by GDB (if we're resuming due to GDB's request), or by our
4569 caller, if we just finished handling an internal breakpoint GDB
4570 shouldn't care about. */
4575 scoped_restore_current_thread restore_thread
;
4576 switch_to_thread (thread
);
4578 lwp
->bp_reinsert
= pc
;
4579 uninsert_breakpoints_at (pc
);
4580 uninsert_fast_tracepoint_jumps_at (pc
);
4582 step
= single_step (lwp
);
4585 resume_one_lwp (lwp
, step
, 0, NULL
);
4587 /* Require next event from this LWP. */
4588 step_over_bkpt
= thread
->id
;
4592 linux_process_target::finish_step_over (lwp_info
*lwp
)
4594 if (lwp
->bp_reinsert
!= 0)
4596 scoped_restore_current_thread restore_thread
;
4599 debug_printf ("Finished step over.\n");
4601 switch_to_thread (get_lwp_thread (lwp
));
4603 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4604 may be no breakpoint to reinsert there by now. */
4605 reinsert_breakpoints_at (lwp
->bp_reinsert
);
4606 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
4608 lwp
->bp_reinsert
= 0;
4610 /* Delete any single-step breakpoints. No longer needed. We
4611 don't have to worry about other threads hitting this trap,
4612 and later not being able to explain it, because we were
4613 stepping over a breakpoint, and we hold all threads but
4614 LWP stopped while doing that. */
4615 if (!supports_hardware_single_step ())
4617 gdb_assert (has_single_step_breakpoints (current_thread
));
4618 delete_single_step_breakpoints (current_thread
);
4621 step_over_bkpt
= null_ptid
;
4629 linux_process_target::complete_ongoing_step_over ()
4631 if (step_over_bkpt
!= null_ptid
)
4633 struct lwp_info
*lwp
;
4638 debug_printf ("detach: step over in progress, finish it first\n");
4640 /* Passing NULL_PTID as filter indicates we want all events to
4641 be left pending. Eventually this returns when there are no
4642 unwaited-for children left. */
4643 ret
= wait_for_event_filtered (minus_one_ptid
, null_ptid
, &wstat
,
4645 gdb_assert (ret
== -1);
4647 lwp
= find_lwp_pid (step_over_bkpt
);
4650 finish_step_over (lwp
);
4652 /* If we got our step SIGTRAP, don't leave it pending,
4653 otherwise we would report it to GDB as a spurious
4655 gdb_assert (lwp
->status_pending_p
);
4656 if (WIFSTOPPED (lwp
->status_pending
)
4657 && WSTOPSIG (lwp
->status_pending
) == SIGTRAP
)
4659 thread_info
*thread
= get_lwp_thread (lwp
);
4660 if (thread
->last_resume_kind
!= resume_step
)
4663 debug_printf ("detach: discard step-over SIGTRAP\n");
4665 lwp
->status_pending_p
= 0;
4666 lwp
->status_pending
= 0;
4667 resume_one_lwp (lwp
, lwp
->stepping
, 0, NULL
);
4672 debug_printf ("detach: resume_step, "
4673 "not discarding step-over SIGTRAP\n");
4677 step_over_bkpt
= null_ptid
;
4678 unsuspend_all_lwps (lwp
);
4683 linux_process_target::resume_one_thread (thread_info
*thread
,
4684 bool leave_all_stopped
)
4686 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4689 if (lwp
->resume
== NULL
)
4692 if (lwp
->resume
->kind
== resume_stop
)
4695 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
4700 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
4702 /* Stop the thread, and wait for the event asynchronously,
4703 through the event loop. */
4709 debug_printf ("already stopped LWP %ld\n",
4712 /* The LWP may have been stopped in an internal event that
4713 was not meant to be notified back to GDB (e.g., gdbserver
4714 breakpoint), so we should be reporting a stop event in
4717 /* If the thread already has a pending SIGSTOP, this is a
4718 no-op. Otherwise, something later will presumably resume
4719 the thread and this will cause it to cancel any pending
4720 operation, due to last_resume_kind == resume_stop. If
4721 the thread already has a pending status to report, we
4722 will still report it the next time we wait - see
4723 status_pending_p_callback. */
4725 /* If we already have a pending signal to report, then
4726 there's no need to queue a SIGSTOP, as this means we're
4727 midway through moving the LWP out of the jumppad, and we
4728 will report the pending signal as soon as that is
4730 if (lwp
->pending_signals_to_report
.empty ())
4734 /* For stop requests, we're done. */
4736 thread
->last_status
.set_ignore ();
4740 /* If this thread which is about to be resumed has a pending status,
4741 then don't resume it - we can just report the pending status.
4742 Likewise if it is suspended, because e.g., another thread is
4743 stepping past a breakpoint. Make sure to queue any signals that
4744 would otherwise be sent. In all-stop mode, we do this decision
4745 based on if *any* thread has a pending status. If there's a
4746 thread that needs the step-over-breakpoint dance, then don't
4747 resume any other thread but that particular one. */
4748 leave_pending
= (lwp
->suspended
4749 || lwp
->status_pending_p
4750 || leave_all_stopped
);
4752 /* If we have a new signal, enqueue the signal. */
4753 if (lwp
->resume
->sig
!= 0)
4755 siginfo_t info
, *info_p
;
4757 /* If this is the same signal we were previously stopped by,
4758 make sure to queue its siginfo. */
4759 if (WIFSTOPPED (lwp
->last_status
)
4760 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
4761 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
),
4762 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
4767 enqueue_pending_signal (lwp
, lwp
->resume
->sig
, info_p
);
4773 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
4775 proceed_one_lwp (thread
, NULL
);
4780 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
4783 thread
->last_status
.set_ignore ();
4788 linux_process_target::resume (thread_resume
*resume_info
, size_t n
)
4790 struct thread_info
*need_step_over
= NULL
;
4795 debug_printf ("linux_resume:\n");
4798 for_each_thread ([&] (thread_info
*thread
)
4800 linux_set_resume_request (thread
, resume_info
, n
);
4803 /* If there is a thread which would otherwise be resumed, which has
4804 a pending status, then don't resume any threads - we can just
4805 report the pending status. Make sure to queue any signals that
4806 would otherwise be sent. In non-stop mode, we'll apply this
4807 logic to each thread individually. We consume all pending events
4808 before considering to start a step-over (in all-stop). */
4809 bool any_pending
= false;
4811 any_pending
= find_thread ([this] (thread_info
*thread
)
4813 return resume_status_pending (thread
);
4816 /* If there is a thread which would otherwise be resumed, which is
4817 stopped at a breakpoint that needs stepping over, then don't
4818 resume any threads - have it step over the breakpoint with all
4819 other threads stopped, then resume all threads again. Make sure
4820 to queue any signals that would otherwise be delivered or
4822 if (!any_pending
&& low_supports_breakpoints ())
4823 need_step_over
= find_thread ([this] (thread_info
*thread
)
4825 return thread_needs_step_over (thread
);
4828 bool leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
4832 if (need_step_over
!= NULL
)
4833 debug_printf ("Not resuming all, need step over\n");
4834 else if (any_pending
)
4835 debug_printf ("Not resuming, all-stop and found "
4836 "an LWP with pending status\n");
4838 debug_printf ("Resuming, no pending status or step over needed\n");
4841 /* Even if we're leaving threads stopped, queue all signals we'd
4842 otherwise deliver. */
4843 for_each_thread ([&] (thread_info
*thread
)
4845 resume_one_thread (thread
, leave_all_stopped
);
4849 start_step_over (get_thread_lwp (need_step_over
));
4853 debug_printf ("linux_resume done\n");
4857 /* We may have events that were pending that can/should be sent to
4858 the client now. Trigger a linux_wait call. */
4859 if (target_is_async_p ())
4864 linux_process_target::proceed_one_lwp (thread_info
*thread
, lwp_info
*except
)
4866 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4873 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
4878 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
4882 if (thread
->last_resume_kind
== resume_stop
4883 && thread
->last_status
.kind () != TARGET_WAITKIND_IGNORE
)
4886 debug_printf (" client wants LWP to remain %ld stopped\n",
4891 if (lwp
->status_pending_p
)
4894 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4899 gdb_assert (lwp
->suspended
>= 0);
4904 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
4908 if (thread
->last_resume_kind
== resume_stop
4909 && lwp
->pending_signals_to_report
.empty ()
4910 && (lwp
->collecting_fast_tracepoint
4911 == fast_tpoint_collect_result::not_collecting
))
4913 /* We haven't reported this LWP as stopped yet (otherwise, the
4914 last_status.kind check above would catch it, and we wouldn't
4915 reach here. This LWP may have been momentarily paused by a
4916 stop_all_lwps call while handling for example, another LWP's
4917 step-over. In that case, the pending expected SIGSTOP signal
4918 that was queued at vCont;t handling time will have already
4919 been consumed by wait_for_sigstop, and so we need to requeue
4920 another one here. Note that if the LWP already has a SIGSTOP
4921 pending, this is a no-op. */
4924 debug_printf ("Client wants LWP %ld to stop. "
4925 "Making sure it has a SIGSTOP pending\n",
4931 if (thread
->last_resume_kind
== resume_step
)
4934 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4937 /* If resume_step is requested by GDB, install single-step
4938 breakpoints when the thread is about to be actually resumed if
4939 the single-step breakpoints weren't removed. */
4940 if (supports_software_single_step ()
4941 && !has_single_step_breakpoints (thread
))
4942 install_software_single_step_breakpoints (lwp
);
4944 step
= maybe_hw_step (thread
);
4946 else if (lwp
->bp_reinsert
!= 0)
4949 debug_printf (" stepping LWP %ld, reinsert set\n",
4952 step
= maybe_hw_step (thread
);
4957 resume_one_lwp (lwp
, step
, 0, NULL
);
4961 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info
*thread
,
4964 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4969 lwp_suspended_decr (lwp
);
4971 proceed_one_lwp (thread
, except
);
4975 linux_process_target::proceed_all_lwps ()
4977 struct thread_info
*need_step_over
;
4979 /* If there is a thread which would otherwise be resumed, which is
4980 stopped at a breakpoint that needs stepping over, then don't
4981 resume any threads - have it step over the breakpoint with all
4982 other threads stopped, then resume all threads again. */
4984 if (low_supports_breakpoints ())
4986 need_step_over
= find_thread ([this] (thread_info
*thread
)
4988 return thread_needs_step_over (thread
);
4991 if (need_step_over
!= NULL
)
4994 debug_printf ("proceed_all_lwps: found "
4995 "thread %ld needing a step-over\n",
4996 lwpid_of (need_step_over
));
4998 start_step_over (get_thread_lwp (need_step_over
));
5004 debug_printf ("Proceeding, no step-over needed\n");
5006 for_each_thread ([this] (thread_info
*thread
)
5008 proceed_one_lwp (thread
, NULL
);
5013 linux_process_target::unstop_all_lwps (int unsuspend
, lwp_info
*except
)
5019 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5020 lwpid_of (get_lwp_thread (except
)));
5022 debug_printf ("unstopping all lwps\n");
5026 for_each_thread ([&] (thread_info
*thread
)
5028 unsuspend_and_proceed_one_lwp (thread
, except
);
5031 for_each_thread ([&] (thread_info
*thread
)
5033 proceed_one_lwp (thread
, except
);
5038 debug_printf ("unstop_all_lwps done\n");
5044 #ifdef HAVE_LINUX_REGSETS
5046 #define use_linux_regsets 1
5048 /* Returns true if REGSET has been disabled. */
5051 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
5053 return (info
->disabled_regsets
!= NULL
5054 && info
->disabled_regsets
[regset
- info
->regsets
]);
5057 /* Disable REGSET. */
5060 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
5064 dr_offset
= regset
- info
->regsets
;
5065 if (info
->disabled_regsets
== NULL
)
5066 info
->disabled_regsets
= (char *) xcalloc (1, info
->num_regsets
);
5067 info
->disabled_regsets
[dr_offset
] = 1;
5071 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
5072 struct regcache
*regcache
)
5074 struct regset_info
*regset
;
5075 int saw_general_regs
= 0;
5079 pid
= lwpid_of (current_thread
);
5080 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5085 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
5088 buf
= xmalloc (regset
->size
);
5090 nt_type
= regset
->nt_type
;
5094 iov
.iov_len
= regset
->size
;
5095 data
= (void *) &iov
;
5101 res
= ptrace (regset
->get_request
, pid
,
5102 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5104 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5109 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5111 /* If we get EIO on a regset, or an EINVAL and the regset is
5112 optional, do not try it again for this process mode. */
5113 disable_regset (regsets_info
, regset
);
5115 else if (errno
== ENODATA
)
5117 /* ENODATA may be returned if the regset is currently
5118 not "active". This can happen in normal operation,
5119 so suppress the warning in this case. */
5121 else if (errno
== ESRCH
)
5123 /* At this point, ESRCH should mean the process is
5124 already gone, in which case we simply ignore attempts
5125 to read its registers. */
5130 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5137 if (regset
->type
== GENERAL_REGS
)
5138 saw_general_regs
= 1;
5139 regset
->store_function (regcache
, buf
);
5143 if (saw_general_regs
)
5150 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
5151 struct regcache
*regcache
)
5153 struct regset_info
*regset
;
5154 int saw_general_regs
= 0;
5158 pid
= lwpid_of (current_thread
);
5159 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
5164 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
)
5165 || regset
->fill_function
== NULL
)
5168 buf
= xmalloc (regset
->size
);
5170 /* First fill the buffer with the current register set contents,
5171 in case there are any items in the kernel's regset that are
5172 not in gdbserver's regcache. */
5174 nt_type
= regset
->nt_type
;
5178 iov
.iov_len
= regset
->size
;
5179 data
= (void *) &iov
;
5185 res
= ptrace (regset
->get_request
, pid
,
5186 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5188 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
5193 /* Then overlay our cached registers on that. */
5194 regset
->fill_function (regcache
, buf
);
5196 /* Only now do we write the register set. */
5198 res
= ptrace (regset
->set_request
, pid
,
5199 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
5201 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
5208 || (errno
== EINVAL
&& regset
->type
== OPTIONAL_REGS
))
5210 /* If we get EIO on a regset, or an EINVAL and the regset is
5211 optional, do not try it again for this process mode. */
5212 disable_regset (regsets_info
, regset
);
5214 else if (errno
== ESRCH
)
5216 /* At this point, ESRCH should mean the process is
5217 already gone, in which case we simply ignore attempts
5218 to change its registers. See also the related
5219 comment in resume_one_lwp. */
5225 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5228 else if (regset
->type
== GENERAL_REGS
)
5229 saw_general_regs
= 1;
5232 if (saw_general_regs
)
5238 #else /* !HAVE_LINUX_REGSETS */
5240 #define use_linux_regsets 0
5241 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5242 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5246 /* Return 1 if register REGNO is supported by one of the regset ptrace
5247 calls or 0 if it has to be transferred individually. */
5250 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
5252 unsigned char mask
= 1 << (regno
% 8);
5253 size_t index
= regno
/ 8;
5255 return (use_linux_regsets
5256 && (regs_info
->regset_bitmap
== NULL
5257 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
5260 #ifdef HAVE_LINUX_USRREGS
5263 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
5267 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
5268 error ("Invalid register number %d.", regnum
);
5270 addr
= usrregs
->regmap
[regnum
];
5277 linux_process_target::fetch_register (const usrregs_info
*usrregs
,
5278 regcache
*regcache
, int regno
)
5285 if (regno
>= usrregs
->num_regs
)
5287 if (low_cannot_fetch_register (regno
))
5290 regaddr
= register_addr (usrregs
, regno
);
5294 size
= ((register_size (regcache
->tdesc
, regno
)
5295 + sizeof (PTRACE_XFER_TYPE
) - 1)
5296 & -sizeof (PTRACE_XFER_TYPE
));
5297 buf
= (char *) alloca (size
);
5299 pid
= lwpid_of (current_thread
);
5300 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5303 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
5304 ptrace (PTRACE_PEEKUSER
, pid
,
5305 /* Coerce to a uintptr_t first to avoid potential gcc warning
5306 of coercing an 8 byte integer to a 4 byte pointer. */
5307 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
5308 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5311 /* Mark register REGNO unavailable. */
5312 supply_register (regcache
, regno
, NULL
);
5317 low_supply_ptrace_register (regcache
, regno
, buf
);
5321 linux_process_target::store_register (const usrregs_info
*usrregs
,
5322 regcache
*regcache
, int regno
)
5329 if (regno
>= usrregs
->num_regs
)
5331 if (low_cannot_store_register (regno
))
5334 regaddr
= register_addr (usrregs
, regno
);
5338 size
= ((register_size (regcache
->tdesc
, regno
)
5339 + sizeof (PTRACE_XFER_TYPE
) - 1)
5340 & -sizeof (PTRACE_XFER_TYPE
));
5341 buf
= (char *) alloca (size
);
5342 memset (buf
, 0, size
);
5344 low_collect_ptrace_register (regcache
, regno
, buf
);
5346 pid
= lwpid_of (current_thread
);
5347 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
5350 ptrace (PTRACE_POKEUSER
, pid
,
5351 /* Coerce to a uintptr_t first to avoid potential gcc warning
5352 about coercing an 8 byte integer to a 4 byte pointer. */
5353 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
5354 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
5357 /* At this point, ESRCH should mean the process is
5358 already gone, in which case we simply ignore attempts
5359 to change its registers. See also the related
5360 comment in resume_one_lwp. */
5365 if (!low_cannot_store_register (regno
))
5366 error ("writing register %d: %s", regno
, safe_strerror (errno
));
5368 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
5371 #endif /* HAVE_LINUX_USRREGS */
5374 linux_process_target::low_collect_ptrace_register (regcache
*regcache
,
5375 int regno
, char *buf
)
5377 collect_register (regcache
, regno
, buf
);
5381 linux_process_target::low_supply_ptrace_register (regcache
*regcache
,
5382 int regno
, const char *buf
)
5384 supply_register (regcache
, regno
, buf
);
5388 linux_process_target::usr_fetch_inferior_registers (const regs_info
*regs_info
,
5392 #ifdef HAVE_LINUX_USRREGS
5393 struct usrregs_info
*usr
= regs_info
->usrregs
;
5397 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5398 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5399 fetch_register (usr
, regcache
, regno
);
5402 fetch_register (usr
, regcache
, regno
);
5407 linux_process_target::usr_store_inferior_registers (const regs_info
*regs_info
,
5411 #ifdef HAVE_LINUX_USRREGS
5412 struct usrregs_info
*usr
= regs_info
->usrregs
;
5416 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
5417 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
5418 store_register (usr
, regcache
, regno
);
5421 store_register (usr
, regcache
, regno
);
5426 linux_process_target::fetch_registers (regcache
*regcache
, int regno
)
5430 const regs_info
*regs_info
= get_regs_info ();
5434 if (regs_info
->usrregs
!= NULL
)
5435 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
5436 low_fetch_register (regcache
, regno
);
5438 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
5439 if (regs_info
->usrregs
!= NULL
)
5440 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
5444 if (low_fetch_register (regcache
, regno
))
5447 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5449 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
5451 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5452 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
5457 linux_process_target::store_registers (regcache
*regcache
, int regno
)
5461 const regs_info
*regs_info
= get_regs_info ();
5465 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5467 if (regs_info
->usrregs
!= NULL
)
5468 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
5472 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5474 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5476 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5477 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
5482 linux_process_target::low_fetch_register (regcache
*regcache
, int regno
)
5487 /* A wrapper for the read_memory target op. */
5490 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
5492 return the_target
->read_memory (memaddr
, myaddr
, len
);
5495 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5496 to debugger memory starting at MYADDR. */
5499 linux_process_target::read_memory (CORE_ADDR memaddr
,
5500 unsigned char *myaddr
, int len
)
5502 int pid
= lwpid_of (current_thread
);
5503 PTRACE_XFER_TYPE
*buffer
;
5511 /* Try using /proc. Don't bother for one word. */
5512 if (len
>= 3 * sizeof (long))
5516 /* We could keep this file open and cache it - possibly one per
5517 thread. That requires some juggling, but is even faster. */
5518 sprintf (filename
, "/proc/%d/mem", pid
);
5519 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
5523 /* If pread64 is available, use it. It's faster if the kernel
5524 supports it (only one syscall), and it's 64-bit safe even on
5525 32-bit platforms (for instance, SPARC debugging a SPARC64
5528 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
5531 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
5532 bytes
= read (fd
, myaddr
, len
);
5539 /* Some data was read, we'll try to get the rest with ptrace. */
5549 /* Round starting address down to longword boundary. */
5550 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5551 /* Round ending address up; get number of longwords that makes. */
5552 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5553 / sizeof (PTRACE_XFER_TYPE
));
5554 /* Allocate buffer of that many longwords. */
5555 buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5557 /* Read all the longwords */
5559 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5561 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5562 about coercing an 8 byte integer to a 4 byte pointer. */
5563 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
5564 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5565 (PTRACE_TYPE_ARG4
) 0);
5571 /* Copy appropriate bytes out of the buffer. */
5574 i
*= sizeof (PTRACE_XFER_TYPE
);
5575 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
5577 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5584 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5585 memory at MEMADDR. On failure (cannot write to the inferior)
5586 returns the value of errno. Always succeeds if LEN is zero. */
5589 linux_process_target::write_memory (CORE_ADDR memaddr
,
5590 const unsigned char *myaddr
, int len
)
5593 /* Round starting address down to longword boundary. */
5594 CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5595 /* Round ending address up; get number of longwords that makes. */
5597 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5598 / sizeof (PTRACE_XFER_TYPE
);
5600 /* Allocate buffer of that many longwords. */
5601 PTRACE_XFER_TYPE
*buffer
= XALLOCAVEC (PTRACE_XFER_TYPE
, count
);
5603 int pid
= lwpid_of (current_thread
);
5607 /* Zero length write always succeeds. */
5613 /* Dump up to four bytes. */
5614 char str
[4 * 2 + 1];
5616 int dump
= len
< 4 ? len
: 4;
5618 for (i
= 0; i
< dump
; i
++)
5620 sprintf (p
, "%02x", myaddr
[i
]);
5625 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5626 str
, (long) memaddr
, pid
);
5629 /* Fill start and end extra bytes of buffer with existing memory data. */
5632 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5633 about coercing an 8 byte integer to a 4 byte pointer. */
5634 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
5635 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5636 (PTRACE_TYPE_ARG4
) 0);
5644 = ptrace (PTRACE_PEEKTEXT
, pid
,
5645 /* Coerce to a uintptr_t first to avoid potential gcc warning
5646 about coercing an 8 byte integer to a 4 byte pointer. */
5647 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
5648 * sizeof (PTRACE_XFER_TYPE
)),
5649 (PTRACE_TYPE_ARG4
) 0);
5654 /* Copy data to be written over corresponding part of buffer. */
5656 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5659 /* Write the entire buffer. */
5661 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5664 ptrace (PTRACE_POKETEXT
, pid
,
5665 /* Coerce to a uintptr_t first to avoid potential gcc warning
5666 about coercing an 8 byte integer to a 4 byte pointer. */
5667 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5668 (PTRACE_TYPE_ARG4
) buffer
[i
]);
5677 linux_process_target::look_up_symbols ()
5679 #ifdef USE_THREAD_DB
5680 struct process_info
*proc
= current_process ();
5682 if (proc
->priv
->thread_db
!= NULL
)
5690 linux_process_target::request_interrupt ()
5692 /* Send a SIGINT to the process group. This acts just like the user
5693 typed a ^C on the controlling terminal. */
5694 ::kill (-signal_pid
, SIGINT
);
5698 linux_process_target::supports_read_auxv ()
5703 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5704 to debugger memory starting at MYADDR. */
5707 linux_process_target::read_auxv (CORE_ADDR offset
, unsigned char *myaddr
,
5710 char filename
[PATH_MAX
];
5712 int pid
= lwpid_of (current_thread
);
5714 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5716 fd
= open (filename
, O_RDONLY
);
5720 if (offset
!= (CORE_ADDR
) 0
5721 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5724 n
= read (fd
, myaddr
, len
);
5732 linux_process_target::insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5733 int size
, raw_breakpoint
*bp
)
5735 if (type
== raw_bkpt_type_sw
)
5736 return insert_memory_breakpoint (bp
);
5738 return low_insert_point (type
, addr
, size
, bp
);
5742 linux_process_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
5743 int size
, raw_breakpoint
*bp
)
5745 /* Unsupported (see target.h). */
5750 linux_process_target::remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5751 int size
, raw_breakpoint
*bp
)
5753 if (type
== raw_bkpt_type_sw
)
5754 return remove_memory_breakpoint (bp
);
5756 return low_remove_point (type
, addr
, size
, bp
);
5760 linux_process_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
5761 int size
, raw_breakpoint
*bp
)
5763 /* Unsupported (see target.h). */
5767 /* Implement the stopped_by_sw_breakpoint target_ops
5771 linux_process_target::stopped_by_sw_breakpoint ()
5773 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5775 return (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
);
5778 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5782 linux_process_target::supports_stopped_by_sw_breakpoint ()
5784 return USE_SIGTRAP_SIGINFO
;
5787 /* Implement the stopped_by_hw_breakpoint target_ops
5791 linux_process_target::stopped_by_hw_breakpoint ()
5793 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5795 return (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
);
5798 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5802 linux_process_target::supports_stopped_by_hw_breakpoint ()
5804 return USE_SIGTRAP_SIGINFO
;
5807 /* Implement the supports_hardware_single_step target_ops method. */
5810 linux_process_target::supports_hardware_single_step ()
5816 linux_process_target::stopped_by_watchpoint ()
5818 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5820 return lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
5824 linux_process_target::stopped_data_address ()
5826 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5828 return lwp
->stopped_data_address
;
5831 /* This is only used for targets that define PT_TEXT_ADDR,
5832 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5833 the target has different ways of acquiring this information, like
5837 linux_process_target::supports_read_offsets ()
5839 #ifdef SUPPORTS_READ_OFFSETS
5846 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5847 to tell gdb about. */
5850 linux_process_target::read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
5852 #ifdef SUPPORTS_READ_OFFSETS
5853 unsigned long text
, text_end
, data
;
5854 int pid
= lwpid_of (current_thread
);
5858 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
5859 (PTRACE_TYPE_ARG4
) 0);
5860 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
5861 (PTRACE_TYPE_ARG4
) 0);
5862 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
5863 (PTRACE_TYPE_ARG4
) 0);
5867 /* Both text and data offsets produced at compile-time (and so
5868 used by gdb) are relative to the beginning of the program,
5869 with the data segment immediately following the text segment.
5870 However, the actual runtime layout in memory may put the data
5871 somewhere else, so when we send gdb a data base-address, we
5872 use the real data base address and subtract the compile-time
5873 data base-address from it (which is just the length of the
5874 text segment). BSS immediately follows data in both
5877 *data_p
= data
- (text_end
- text
);
5883 gdb_assert_not_reached ("target op read_offsets not supported");
5888 linux_process_target::supports_get_tls_address ()
5890 #ifdef USE_THREAD_DB
5898 linux_process_target::get_tls_address (thread_info
*thread
,
5900 CORE_ADDR load_module
,
5903 #ifdef USE_THREAD_DB
5904 return thread_db_get_tls_address (thread
, offset
, load_module
, address
);
5911 linux_process_target::supports_qxfer_osdata ()
5917 linux_process_target::qxfer_osdata (const char *annex
,
5918 unsigned char *readbuf
,
5919 unsigned const char *writebuf
,
5920 CORE_ADDR offset
, int len
)
5922 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
5926 linux_process_target::siginfo_fixup (siginfo_t
*siginfo
,
5927 gdb_byte
*inf_siginfo
, int direction
)
5929 bool done
= low_siginfo_fixup (siginfo
, inf_siginfo
, direction
);
5931 /* If there was no callback, or the callback didn't do anything,
5932 then just do a straight memcpy. */
5936 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
5938 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
5943 linux_process_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
5950 linux_process_target::supports_qxfer_siginfo ()
5956 linux_process_target::qxfer_siginfo (const char *annex
,
5957 unsigned char *readbuf
,
5958 unsigned const char *writebuf
,
5959 CORE_ADDR offset
, int len
)
5963 gdb_byte inf_siginfo
[sizeof (siginfo_t
)];
5965 if (current_thread
== NULL
)
5968 pid
= lwpid_of (current_thread
);
5971 debug_printf ("%s siginfo for lwp %d.\n",
5972 readbuf
!= NULL
? "Reading" : "Writing",
5975 if (offset
>= sizeof (siginfo
))
5978 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5981 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5982 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5983 inferior with a 64-bit GDBSERVER should look the same as debugging it
5984 with a 32-bit GDBSERVER, we need to convert it. */
5985 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
5987 if (offset
+ len
> sizeof (siginfo
))
5988 len
= sizeof (siginfo
) - offset
;
5990 if (readbuf
!= NULL
)
5991 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
5994 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
5996 /* Convert back to ptrace layout before flushing it out. */
5997 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
5999 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
6006 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6007 so we notice when children change state; as the handler for the
6008 sigsuspend in my_waitpid. */
6011 sigchld_handler (int signo
)
6013 int old_errno
= errno
;
6019 /* Use the async signal safe debug function. */
6020 if (debug_write ("sigchld_handler\n",
6021 sizeof ("sigchld_handler\n") - 1) < 0)
6022 break; /* just ignore */
6026 if (target_is_async_p ())
6027 async_file_mark (); /* trigger a linux_wait */
6033 linux_process_target::supports_non_stop ()
6039 linux_process_target::async (bool enable
)
6041 bool previous
= target_is_async_p ();
6044 debug_printf ("linux_async (%d), previous=%d\n",
6047 if (previous
!= enable
)
6050 sigemptyset (&mask
);
6051 sigaddset (&mask
, SIGCHLD
);
6053 gdb_sigmask (SIG_BLOCK
, &mask
, NULL
);
6057 if (pipe (linux_event_pipe
) == -1)
6059 linux_event_pipe
[0] = -1;
6060 linux_event_pipe
[1] = -1;
6061 gdb_sigmask (SIG_UNBLOCK
, &mask
, NULL
);
6063 warning ("creating event pipe failed.");
6067 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
6068 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
6070 /* Register the event loop handler. */
6071 add_file_handler (linux_event_pipe
[0],
6072 handle_target_event
, NULL
,
6075 /* Always trigger a linux_wait. */
6080 delete_file_handler (linux_event_pipe
[0]);
6082 close (linux_event_pipe
[0]);
6083 close (linux_event_pipe
[1]);
6084 linux_event_pipe
[0] = -1;
6085 linux_event_pipe
[1] = -1;
6088 gdb_sigmask (SIG_UNBLOCK
, &mask
, NULL
);
6095 linux_process_target::start_non_stop (bool nonstop
)
6097 /* Register or unregister from event-loop accordingly. */
6098 target_async (nonstop
);
6100 if (target_is_async_p () != (nonstop
!= false))
6107 linux_process_target::supports_multi_process ()
6112 /* Check if fork events are supported. */
6115 linux_process_target::supports_fork_events ()
6117 return linux_supports_tracefork ();
6120 /* Check if vfork events are supported. */
6123 linux_process_target::supports_vfork_events ()
6125 return linux_supports_tracefork ();
6128 /* Check if exec events are supported. */
6131 linux_process_target::supports_exec_events ()
6133 return linux_supports_traceexec ();
6136 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6137 ptrace flags for all inferiors. This is in case the new GDB connection
6138 doesn't support the same set of events that the previous one did. */
6141 linux_process_target::handle_new_gdb_connection ()
6143 /* Request that all the lwps reset their ptrace options. */
6144 for_each_thread ([] (thread_info
*thread
)
6146 struct lwp_info
*lwp
= get_thread_lwp (thread
);
6150 /* Stop the lwp so we can modify its ptrace options. */
6151 lwp
->must_set_ptrace_flags
= 1;
6152 linux_stop_lwp (lwp
);
6156 /* Already stopped; go ahead and set the ptrace options. */
6157 struct process_info
*proc
= find_process_pid (pid_of (thread
));
6158 int options
= linux_low_ptrace_options (proc
->attached
);
6160 linux_enable_event_reporting (lwpid_of (thread
), options
);
6161 lwp
->must_set_ptrace_flags
= 0;
6167 linux_process_target::handle_monitor_command (char *mon
)
6169 #ifdef USE_THREAD_DB
6170 return thread_db_handle_monitor_command (mon
);
6177 linux_process_target::core_of_thread (ptid_t ptid
)
6179 return linux_common_core_of_thread (ptid
);
6183 linux_process_target::supports_disable_randomization ()
6189 linux_process_target::supports_agent ()
6195 linux_process_target::supports_range_stepping ()
6197 if (supports_software_single_step ())
6200 return low_supports_range_stepping ();
6204 linux_process_target::low_supports_range_stepping ()
6210 linux_process_target::supports_pid_to_exec_file ()
6216 linux_process_target::pid_to_exec_file (int pid
)
6218 return linux_proc_pid_to_exec_file (pid
);
6222 linux_process_target::supports_multifs ()
6228 linux_process_target::multifs_open (int pid
, const char *filename
,
6229 int flags
, mode_t mode
)
6231 return linux_mntns_open_cloexec (pid
, filename
, flags
, mode
);
6235 linux_process_target::multifs_unlink (int pid
, const char *filename
)
6237 return linux_mntns_unlink (pid
, filename
);
6241 linux_process_target::multifs_readlink (int pid
, const char *filename
,
6242 char *buf
, size_t bufsiz
)
6244 return linux_mntns_readlink (pid
, filename
, buf
, bufsiz
);
6247 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6248 struct target_loadseg
6250 /* Core address to which the segment is mapped. */
6252 /* VMA recorded in the program header. */
6254 /* Size of this segment in memory. */
6258 # if defined PT_GETDSBT
6259 struct target_loadmap
6261 /* Protocol version number, must be zero. */
6263 /* Pointer to the DSBT table, its size, and the DSBT index. */
6264 unsigned *dsbt_table
;
6265 unsigned dsbt_size
, dsbt_index
;
6266 /* Number of segments in this map. */
6268 /* The actual memory map. */
6269 struct target_loadseg segs
[/*nsegs*/];
6271 # define LINUX_LOADMAP PT_GETDSBT
6272 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6273 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6275 struct target_loadmap
6277 /* Protocol version number, must be zero. */
6279 /* Number of segments in this map. */
6281 /* The actual memory map. */
6282 struct target_loadseg segs
[/*nsegs*/];
6284 # define LINUX_LOADMAP PTRACE_GETFDPIC
6285 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6286 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6290 linux_process_target::supports_read_loadmap ()
6296 linux_process_target::read_loadmap (const char *annex
, CORE_ADDR offset
,
6297 unsigned char *myaddr
, unsigned int len
)
6299 int pid
= lwpid_of (current_thread
);
6301 struct target_loadmap
*data
= NULL
;
6302 unsigned int actual_length
, copy_length
;
6304 if (strcmp (annex
, "exec") == 0)
6305 addr
= (int) LINUX_LOADMAP_EXEC
;
6306 else if (strcmp (annex
, "interp") == 0)
6307 addr
= (int) LINUX_LOADMAP_INTERP
;
6311 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
6317 actual_length
= sizeof (struct target_loadmap
)
6318 + sizeof (struct target_loadseg
) * data
->nsegs
;
6320 if (offset
< 0 || offset
> actual_length
)
6323 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
6324 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
6327 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6330 linux_process_target::supports_catch_syscall ()
6332 return (low_supports_catch_syscall ()
6333 && linux_supports_tracesysgood ());
6337 linux_process_target::low_supports_catch_syscall ()
6343 linux_process_target::read_pc (regcache
*regcache
)
6345 if (!low_supports_breakpoints ())
6348 return low_get_pc (regcache
);
6352 linux_process_target::write_pc (regcache
*regcache
, CORE_ADDR pc
)
6354 gdb_assert (low_supports_breakpoints ());
6356 low_set_pc (regcache
, pc
);
6360 linux_process_target::supports_thread_stopped ()
6366 linux_process_target::thread_stopped (thread_info
*thread
)
6368 return get_thread_lwp (thread
)->stopped
;
6371 /* This exposes stop-all-threads functionality to other modules. */
6374 linux_process_target::pause_all (bool freeze
)
6376 stop_all_lwps (freeze
, NULL
);
6379 /* This exposes unstop-all-threads functionality to other gdbserver
6383 linux_process_target::unpause_all (bool unfreeze
)
6385 unstop_all_lwps (unfreeze
, NULL
);
6389 linux_process_target::prepare_to_access_memory ()
6391 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6394 target_pause_all (true);
6399 linux_process_target::done_accessing_memory ()
6401 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6404 target_unpause_all (true);
6407 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6410 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
6411 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
6413 char filename
[PATH_MAX
];
6415 const int auxv_size
= is_elf64
6416 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
6417 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
6419 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
6421 fd
= open (filename
, O_RDONLY
);
6427 while (read (fd
, buf
, auxv_size
) == auxv_size
6428 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
6432 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
6434 switch (aux
->a_type
)
6437 *phdr_memaddr
= aux
->a_un
.a_val
;
6440 *num_phdr
= aux
->a_un
.a_val
;
6446 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
6448 switch (aux
->a_type
)
6451 *phdr_memaddr
= aux
->a_un
.a_val
;
6454 *num_phdr
= aux
->a_un
.a_val
;
6462 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
6464 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6465 "phdr_memaddr = %ld, phdr_num = %d",
6466 (long) *phdr_memaddr
, *num_phdr
);
6473 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6476 get_dynamic (const int pid
, const int is_elf64
)
6478 CORE_ADDR phdr_memaddr
, relocation
;
6480 unsigned char *phdr_buf
;
6481 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
6483 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
6486 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
6487 phdr_buf
= (unsigned char *) alloca (num_phdr
* phdr_size
);
6489 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
6492 /* Compute relocation: it is expected to be 0 for "regular" executables,
6493 non-zero for PIE ones. */
6495 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
6498 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6500 if (p
->p_type
== PT_PHDR
)
6501 relocation
= phdr_memaddr
- p
->p_vaddr
;
6505 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6507 if (p
->p_type
== PT_PHDR
)
6508 relocation
= phdr_memaddr
- p
->p_vaddr
;
6511 if (relocation
== -1)
6513 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6514 any real world executables, including PIE executables, have always
6515 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6516 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6517 or present DT_DEBUG anyway (fpc binaries are statically linked).
6519 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6521 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6526 for (i
= 0; i
< num_phdr
; i
++)
6530 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6532 if (p
->p_type
== PT_DYNAMIC
)
6533 return p
->p_vaddr
+ relocation
;
6537 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6539 if (p
->p_type
== PT_DYNAMIC
)
6540 return p
->p_vaddr
+ relocation
;
6547 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6548 can be 0 if the inferior does not yet have the library list initialized.
6549 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6550 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6553 get_r_debug (const int pid
, const int is_elf64
)
6555 CORE_ADDR dynamic_memaddr
;
6556 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
6557 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
6560 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
6561 if (dynamic_memaddr
== 0)
6564 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
6568 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
6569 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6573 unsigned char buf
[sizeof (Elf64_Xword
)];
6577 #ifdef DT_MIPS_RLD_MAP
6578 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6580 if (linux_read_memory (dyn
->d_un
.d_val
,
6581 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6586 #endif /* DT_MIPS_RLD_MAP */
6587 #ifdef DT_MIPS_RLD_MAP_REL
6588 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6590 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6591 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6596 #endif /* DT_MIPS_RLD_MAP_REL */
6598 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6599 map
= dyn
->d_un
.d_val
;
6601 if (dyn
->d_tag
== DT_NULL
)
6606 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
6607 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6611 unsigned char buf
[sizeof (Elf32_Word
)];
6615 #ifdef DT_MIPS_RLD_MAP
6616 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6618 if (linux_read_memory (dyn
->d_un
.d_val
,
6619 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6624 #endif /* DT_MIPS_RLD_MAP */
6625 #ifdef DT_MIPS_RLD_MAP_REL
6626 if (dyn
->d_tag
== DT_MIPS_RLD_MAP_REL
)
6628 if (linux_read_memory (dyn
->d_un
.d_val
+ dynamic_memaddr
,
6629 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6634 #endif /* DT_MIPS_RLD_MAP_REL */
6636 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6637 map
= dyn
->d_un
.d_val
;
6639 if (dyn
->d_tag
== DT_NULL
)
6643 dynamic_memaddr
+= dyn_size
;
6649 /* Read one pointer from MEMADDR in the inferior. */
6652 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
6656 /* Go through a union so this works on either big or little endian
6657 hosts, when the inferior's pointer size is smaller than the size
6658 of CORE_ADDR. It is assumed the inferior's endianness is the
6659 same of the superior's. */
6662 CORE_ADDR core_addr
;
6667 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
6670 if (ptr_size
== sizeof (CORE_ADDR
))
6671 *ptr
= addr
.core_addr
;
6672 else if (ptr_size
== sizeof (unsigned int))
6675 gdb_assert_not_reached ("unhandled pointer size");
6681 linux_process_target::supports_qxfer_libraries_svr4 ()
6686 struct link_map_offsets
6688 /* Offset and size of r_debug.r_version. */
6689 int r_version_offset
;
6691 /* Offset and size of r_debug.r_map. */
6694 /* Offset to l_addr field in struct link_map. */
6697 /* Offset to l_name field in struct link_map. */
6700 /* Offset to l_ld field in struct link_map. */
6703 /* Offset to l_next field in struct link_map. */
6706 /* Offset to l_prev field in struct link_map. */
6710 /* Construct qXfer:libraries-svr4:read reply. */
6713 linux_process_target::qxfer_libraries_svr4 (const char *annex
,
6714 unsigned char *readbuf
,
6715 unsigned const char *writebuf
,
6716 CORE_ADDR offset
, int len
)
6718 struct process_info_private
*const priv
= current_process ()->priv
;
6719 char filename
[PATH_MAX
];
6722 static const struct link_map_offsets lmo_32bit_offsets
=
6724 0, /* r_version offset. */
6725 4, /* r_debug.r_map offset. */
6726 0, /* l_addr offset in link_map. */
6727 4, /* l_name offset in link_map. */
6728 8, /* l_ld offset in link_map. */
6729 12, /* l_next offset in link_map. */
6730 16 /* l_prev offset in link_map. */
6733 static const struct link_map_offsets lmo_64bit_offsets
=
6735 0, /* r_version offset. */
6736 8, /* r_debug.r_map offset. */
6737 0, /* l_addr offset in link_map. */
6738 8, /* l_name offset in link_map. */
6739 16, /* l_ld offset in link_map. */
6740 24, /* l_next offset in link_map. */
6741 32 /* l_prev offset in link_map. */
6743 const struct link_map_offsets
*lmo
;
6744 unsigned int machine
;
6746 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
6747 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
6748 int header_done
= 0;
6750 if (writebuf
!= NULL
)
6752 if (readbuf
== NULL
)
6755 pid
= lwpid_of (current_thread
);
6756 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
6757 is_elf64
= elf_64_file_p (filename
, &machine
);
6758 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
6759 ptr_size
= is_elf64
? 8 : 4;
6761 while (annex
[0] != '\0')
6767 sep
= strchr (annex
, '=');
6771 name_len
= sep
- annex
;
6772 if (name_len
== 5 && startswith (annex
, "start"))
6774 else if (name_len
== 4 && startswith (annex
, "prev"))
6778 annex
= strchr (sep
, ';');
6785 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
6792 if (priv
->r_debug
== 0)
6793 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
6795 /* We failed to find DT_DEBUG. Such situation will not change
6796 for this inferior - do not retry it. Report it to GDB as
6797 E01, see for the reasons at the GDB solib-svr4.c side. */
6798 if (priv
->r_debug
== (CORE_ADDR
) -1)
6801 if (priv
->r_debug
!= 0)
6803 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
6804 (unsigned char *) &r_version
,
6805 sizeof (r_version
)) != 0
6808 warning ("unexpected r_debug version %d", r_version
);
6810 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
6811 &lm_addr
, ptr_size
) != 0)
6813 warning ("unable to read r_map from 0x%lx",
6814 (long) priv
->r_debug
+ lmo
->r_map_offset
);
6819 std::string document
= "<library-list-svr4 version=\"1.0\"";
6822 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
6823 &l_name
, ptr_size
) == 0
6824 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
6825 &l_addr
, ptr_size
) == 0
6826 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
6827 &l_ld
, ptr_size
) == 0
6828 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
6829 &l_prev
, ptr_size
) == 0
6830 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
6831 &l_next
, ptr_size
) == 0)
6833 unsigned char libname
[PATH_MAX
];
6835 if (lm_prev
!= l_prev
)
6837 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6838 (long) lm_prev
, (long) l_prev
);
6842 /* Ignore the first entry even if it has valid name as the first entry
6843 corresponds to the main executable. The first entry should not be
6844 skipped if the dynamic loader was loaded late by a static executable
6845 (see solib-svr4.c parameter ignore_first). But in such case the main
6846 executable does not have PT_DYNAMIC present and this function already
6847 exited above due to failed get_r_debug. */
6849 string_appendf (document
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
6852 /* Not checking for error because reading may stop before
6853 we've got PATH_MAX worth of characters. */
6855 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
6856 libname
[sizeof (libname
) - 1] = '\0';
6857 if (libname
[0] != '\0')
6861 /* Terminate `<library-list-svr4'. */
6866 string_appendf (document
, "<library name=\"");
6867 xml_escape_text_append (&document
, (char *) libname
);
6868 string_appendf (document
, "\" lm=\"0x%lx\" "
6869 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6870 (unsigned long) lm_addr
, (unsigned long) l_addr
,
6871 (unsigned long) l_ld
);
6881 /* Empty list; terminate `<library-list-svr4'. */
6885 document
+= "</library-list-svr4>";
6887 int document_len
= document
.length ();
6888 if (offset
< document_len
)
6889 document_len
-= offset
;
6892 if (len
> document_len
)
6895 memcpy (readbuf
, document
.data () + offset
, len
);
6900 #ifdef HAVE_LINUX_BTRACE
6902 btrace_target_info
*
6903 linux_process_target::enable_btrace (ptid_t ptid
,
6904 const btrace_config
*conf
)
6906 return linux_enable_btrace (ptid
, conf
);
6909 /* See to_disable_btrace target method. */
6912 linux_process_target::disable_btrace (btrace_target_info
*tinfo
)
6914 enum btrace_error err
;
6916 err
= linux_disable_btrace (tinfo
);
6917 return (err
== BTRACE_ERR_NONE
? 0 : -1);
6920 /* Encode an Intel Processor Trace configuration. */
6923 linux_low_encode_pt_config (struct buffer
*buffer
,
6924 const struct btrace_data_pt_config
*config
)
6926 buffer_grow_str (buffer
, "<pt-config>\n");
6928 switch (config
->cpu
.vendor
)
6931 buffer_xml_printf (buffer
, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6932 "model=\"%u\" stepping=\"%u\"/>\n",
6933 config
->cpu
.family
, config
->cpu
.model
,
6934 config
->cpu
.stepping
);
6941 buffer_grow_str (buffer
, "</pt-config>\n");
6944 /* Encode a raw buffer. */
6947 linux_low_encode_raw (struct buffer
*buffer
, const gdb_byte
*data
,
6953 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6954 buffer_grow_str (buffer
, "<raw>\n");
6960 elem
[0] = tohex ((*data
>> 4) & 0xf);
6961 elem
[1] = tohex (*data
++ & 0xf);
6963 buffer_grow (buffer
, elem
, 2);
6966 buffer_grow_str (buffer
, "</raw>\n");
6969 /* See to_read_btrace target method. */
6972 linux_process_target::read_btrace (btrace_target_info
*tinfo
,
6974 enum btrace_read_type type
)
6976 struct btrace_data btrace
;
6977 enum btrace_error err
;
6979 err
= linux_read_btrace (&btrace
, tinfo
, type
);
6980 if (err
!= BTRACE_ERR_NONE
)
6982 if (err
== BTRACE_ERR_OVERFLOW
)
6983 buffer_grow_str0 (buffer
, "E.Overflow.");
6985 buffer_grow_str0 (buffer
, "E.Generic Error.");
6990 switch (btrace
.format
)
6992 case BTRACE_FORMAT_NONE
:
6993 buffer_grow_str0 (buffer
, "E.No Trace.");
6996 case BTRACE_FORMAT_BTS
:
6997 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6998 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7000 for (const btrace_block
&block
: *btrace
.variant
.bts
.blocks
)
7001 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7002 paddress (block
.begin
), paddress (block
.end
));
7004 buffer_grow_str0 (buffer
, "</btrace>\n");
7007 case BTRACE_FORMAT_PT
:
7008 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7009 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
7010 buffer_grow_str (buffer
, "<pt>\n");
7012 linux_low_encode_pt_config (buffer
, &btrace
.variant
.pt
.config
);
7014 linux_low_encode_raw (buffer
, btrace
.variant
.pt
.data
,
7015 btrace
.variant
.pt
.size
);
7017 buffer_grow_str (buffer
, "</pt>\n");
7018 buffer_grow_str0 (buffer
, "</btrace>\n");
7022 buffer_grow_str0 (buffer
, "E.Unsupported Trace Format.");
7029 /* See to_btrace_conf target method. */
7032 linux_process_target::read_btrace_conf (const btrace_target_info
*tinfo
,
7035 const struct btrace_config
*conf
;
7037 buffer_grow_str (buffer
, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7038 buffer_grow_str (buffer
, "<btrace-conf version=\"1.0\">\n");
7040 conf
= linux_btrace_conf (tinfo
);
7043 switch (conf
->format
)
7045 case BTRACE_FORMAT_NONE
:
7048 case BTRACE_FORMAT_BTS
:
7049 buffer_xml_printf (buffer
, "<bts");
7050 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->bts
.size
);
7051 buffer_xml_printf (buffer
, " />\n");
7054 case BTRACE_FORMAT_PT
:
7055 buffer_xml_printf (buffer
, "<pt");
7056 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->pt
.size
);
7057 buffer_xml_printf (buffer
, "/>\n");
7062 buffer_grow_str0 (buffer
, "</btrace-conf>\n");
7065 #endif /* HAVE_LINUX_BTRACE */
7067 /* See nat/linux-nat.h. */
7070 current_lwp_ptid (void)
7072 return ptid_of (current_thread
);
7076 linux_process_target::thread_name (ptid_t thread
)
7078 return linux_proc_tid_get_name (thread
);
7083 linux_process_target::thread_handle (ptid_t ptid
, gdb_byte
**handle
,
7086 return thread_db_thread_handle (ptid
, handle
, handle_len
);
7091 linux_process_target::thread_pending_parent (thread_info
*thread
)
7093 lwp_info
*parent
= get_thread_lwp (thread
)->pending_parent ();
7095 if (parent
== nullptr)
7098 return get_lwp_thread (parent
);
7102 linux_process_target::thread_pending_child (thread_info
*thread
)
7104 lwp_info
*child
= get_thread_lwp (thread
)->pending_child ();
7106 if (child
== nullptr)
7109 return get_lwp_thread (child
);
7112 /* Default implementation of linux_target_ops method "set_pc" for
7113 32-bit pc register which is literally named "pc". */
7116 linux_set_pc_32bit (struct regcache
*regcache
, CORE_ADDR pc
)
7118 uint32_t newpc
= pc
;
7120 supply_register_by_name (regcache
, "pc", &newpc
);
7123 /* Default implementation of linux_target_ops method "get_pc" for
7124 32-bit pc register which is literally named "pc". */
7127 linux_get_pc_32bit (struct regcache
*regcache
)
7131 collect_register_by_name (regcache
, "pc", &pc
);
7133 debug_printf ("stop pc is 0x%" PRIx32
"\n", pc
);
7137 /* Default implementation of linux_target_ops method "set_pc" for
7138 64-bit pc register which is literally named "pc". */
7141 linux_set_pc_64bit (struct regcache
*regcache
, CORE_ADDR pc
)
7143 uint64_t newpc
= pc
;
7145 supply_register_by_name (regcache
, "pc", &newpc
);
7148 /* Default implementation of linux_target_ops method "get_pc" for
7149 64-bit pc register which is literally named "pc". */
7152 linux_get_pc_64bit (struct regcache
*regcache
)
7156 collect_register_by_name (regcache
, "pc", &pc
);
7158 debug_printf ("stop pc is 0x%" PRIx64
"\n", pc
);
7162 /* See linux-low.h. */
7165 linux_get_auxv (int wordsize
, CORE_ADDR match
, CORE_ADDR
*valp
)
7167 gdb_byte
*data
= (gdb_byte
*) alloca (2 * wordsize
);
7170 gdb_assert (wordsize
== 4 || wordsize
== 8);
7172 while (the_target
->read_auxv (offset
, data
, 2 * wordsize
) == 2 * wordsize
)
7176 uint32_t *data_p
= (uint32_t *) data
;
7177 if (data_p
[0] == match
)
7185 uint64_t *data_p
= (uint64_t *) data
;
7186 if (data_p
[0] == match
)
7193 offset
+= 2 * wordsize
;
7199 /* See linux-low.h. */
7202 linux_get_hwcap (int wordsize
)
7204 CORE_ADDR hwcap
= 0;
7205 linux_get_auxv (wordsize
, AT_HWCAP
, &hwcap
);
7209 /* See linux-low.h. */
7212 linux_get_hwcap2 (int wordsize
)
7214 CORE_ADDR hwcap2
= 0;
7215 linux_get_auxv (wordsize
, AT_HWCAP2
, &hwcap2
);
7219 #ifdef HAVE_LINUX_REGSETS
7221 initialize_regsets_info (struct regsets_info
*info
)
7223 for (info
->num_regsets
= 0;
7224 info
->regsets
[info
->num_regsets
].size
>= 0;
7225 info
->num_regsets
++)
7231 initialize_low (void)
7233 struct sigaction sigchld_action
;
7235 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
7236 set_target_ops (the_linux_target
);
7238 linux_ptrace_init_warnings ();
7239 linux_proc_init_warnings ();
7241 sigchld_action
.sa_handler
= sigchld_handler
;
7242 sigemptyset (&sigchld_action
.sa_mask
);
7243 sigchld_action
.sa_flags
= SA_RESTART
;
7244 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
7246 initialize_low_arch ();
7248 linux_check_ptrace_features ();