Remove WITH_SIM define
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2022 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/event-loop.h"
25 #include "gdbsupport/event-pipe.h"
26 #include "gdbsupport/rsp-low.h"
27 #include "gdbsupport/signals-state-save-restore.h"
28 #include "nat/linux-nat.h"
29 #include "nat/linux-waitpid.h"
30 #include "gdbsupport/gdb_wait.h"
31 #include "nat/gdb_ptrace.h"
32 #include "nat/linux-ptrace.h"
33 #include "nat/linux-procfs.h"
34 #include "nat/linux-personality.h"
35 #include <signal.h>
36 #include <sys/ioctl.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <ctype.h>
42 #include <pwd.h>
43 #include <sys/types.h>
44 #include <dirent.h>
45 #include <sys/stat.h>
46 #include <sys/vfs.h>
47 #include <sys/uio.h>
48 #include "gdbsupport/filestuff.h"
49 #include "tracepoint.h"
50 #include <inttypes.h>
51 #include "gdbsupport/common-inferior.h"
52 #include "nat/fork-inferior.h"
53 #include "gdbsupport/environ.h"
54 #include "gdbsupport/gdb-sigmask.h"
55 #include "gdbsupport/scoped_restore.h"
56 #ifndef ELFMAG0
57 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61 #include <elf.h>
62 #endif
63 #include "nat/linux-namespaces.h"
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef AT_HWCAP2
70 #define AT_HWCAP2 26
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* These are still undefined in 3.10 kernels. */
85 #elif defined(__TMS320C6X__)
86 #define PT_TEXT_ADDR (0x10000*4)
87 #define PT_DATA_ADDR (0x10004*4)
88 #define PT_TEXT_END_ADDR (0x10008*4)
89 #endif
90 #endif
91
92 #if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97 #define SUPPORTS_READ_OFFSETS
98 #endif
99
100 #ifdef HAVE_LINUX_BTRACE
101 # include "nat/linux-btrace.h"
102 # include "gdbsupport/btrace-common.h"
103 #endif
104
105 #ifndef HAVE_ELF32_AUXV_T
106 /* Copied from glibc's elf.h. */
107 typedef struct
108 {
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117 } Elf32_auxv_t;
118 #endif
119
120 #ifndef HAVE_ELF64_AUXV_T
121 /* Copied from glibc's elf.h. */
122 typedef struct
123 {
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132 } Elf64_auxv_t;
133 #endif
134
135 /* Does the current host support PTRACE_GETREGSET? */
136 int have_ptrace_getregset = -1;
137
138 /* Return TRUE if THREAD is the leader thread of the process. */
139
140 static bool
141 is_leader (thread_info *thread)
142 {
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145 }
146
147 /* LWP accessors. */
148
149 /* See nat/linux-nat.h. */
150
151 ptid_t
152 ptid_of_lwp (struct lwp_info *lwp)
153 {
154 return ptid_of (get_lwp_thread (lwp));
155 }
156
157 /* See nat/linux-nat.h. */
158
159 void
160 lwp_set_arch_private_info (struct lwp_info *lwp,
161 struct arch_lwp_info *info)
162 {
163 lwp->arch_private = info;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 struct arch_lwp_info *
169 lwp_arch_private_info (struct lwp_info *lwp)
170 {
171 return lwp->arch_private;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 int
177 lwp_is_stopped (struct lwp_info *lwp)
178 {
179 return lwp->stopped;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 enum target_stop_reason
185 lwp_stop_reason (struct lwp_info *lwp)
186 {
187 return lwp->stop_reason;
188 }
189
190 /* See nat/linux-nat.h. */
191
192 int
193 lwp_is_stepping (struct lwp_info *lwp)
194 {
195 return lwp->stepping;
196 }
197
198 /* A list of all unknown processes which receive stop signals. Some
199 other process will presumably claim each of these as forked
200 children momentarily. */
201
202 struct simple_pid_list
203 {
204 /* The process ID. */
205 int pid;
206
207 /* The status as reported by waitpid. */
208 int status;
209
210 /* Next in chain. */
211 struct simple_pid_list *next;
212 };
213 static struct simple_pid_list *stopped_pids;
214
215 /* Trivial list manipulation functions to keep track of a list of new
216 stopped processes. */
217
218 static void
219 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
220 {
221 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
222
223 new_pid->pid = pid;
224 new_pid->status = status;
225 new_pid->next = *listp;
226 *listp = new_pid;
227 }
228
229 static int
230 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
231 {
232 struct simple_pid_list **p;
233
234 for (p = listp; *p != NULL; p = &(*p)->next)
235 if ((*p)->pid == pid)
236 {
237 struct simple_pid_list *next = (*p)->next;
238
239 *statusp = (*p)->status;
240 xfree (*p);
241 *p = next;
242 return 1;
243 }
244 return 0;
245 }
246
247 enum stopping_threads_kind
248 {
249 /* Not stopping threads presently. */
250 NOT_STOPPING_THREADS,
251
252 /* Stopping threads. */
253 STOPPING_THREADS,
254
255 /* Stopping and suspending threads. */
256 STOPPING_AND_SUSPENDING_THREADS
257 };
258
259 /* This is set while stop_all_lwps is in effect. */
260 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
261
262 /* FIXME make into a target method? */
263 int using_threads = 1;
264
265 /* True if we're presently stabilizing threads (moving them out of
266 jump pads). */
267 static int stabilizing_threads;
268
269 static void unsuspend_all_lwps (struct lwp_info *except);
270 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
271 static int lwp_is_marked_dead (struct lwp_info *lwp);
272 static int kill_lwp (unsigned long lwpid, int signo);
273 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
274 static int linux_low_ptrace_options (int attached);
275 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
276
277 /* When the event-loop is doing a step-over, this points at the thread
278 being stepped. */
279 static ptid_t step_over_bkpt;
280
281 bool
282 linux_process_target::low_supports_breakpoints ()
283 {
284 return false;
285 }
286
287 CORE_ADDR
288 linux_process_target::low_get_pc (regcache *regcache)
289 {
290 return 0;
291 }
292
293 void
294 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
295 {
296 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
297 }
298
299 std::vector<CORE_ADDR>
300 linux_process_target::low_get_next_pcs (regcache *regcache)
301 {
302 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
303 "implemented");
304 }
305
306 int
307 linux_process_target::low_decr_pc_after_break ()
308 {
309 return 0;
310 }
311
312 /* True if LWP is stopped in its stepping range. */
313
314 static int
315 lwp_in_step_range (struct lwp_info *lwp)
316 {
317 CORE_ADDR pc = lwp->stop_pc;
318
319 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320 }
321
322 /* The event pipe registered as a waitable file in the event loop. */
323 static event_pipe linux_event_pipe;
324
325 /* True if we're currently in async mode. */
326 #define target_is_async_p() (linux_event_pipe.is_open ())
327
328 static void send_sigstop (struct lwp_info *lwp);
329
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
331
332 static int
333 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334 {
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
346 }
347
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
352 static int
353 elf_64_file_p (const char *file, unsigned int *machine)
354 {
355 Elf64_Ehdr header;
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
369 return elf_64_header_p (&header, machine);
370 }
371
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375 int
376 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 {
378 char file[PATH_MAX];
379
380 sprintf (file, "/proc/%d/exe", pid);
381 return elf_64_file_p (file, machine);
382 }
383
384 void
385 linux_process_target::delete_lwp (lwp_info *lwp)
386 {
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 threads_debug_printf ("deleting %ld", lwpid_of (thr));
390
391 remove_thread (thr);
392
393 low_delete_thread (lwp->arch_private);
394
395 delete lwp;
396 }
397
398 void
399 linux_process_target::low_delete_thread (arch_lwp_info *info)
400 {
401 /* Default implementation should be overridden if architecture-specific
402 info is being used. */
403 gdb_assert (info == nullptr);
404 }
405
406 /* Open the /proc/PID/mem file for PROC. */
407
408 static void
409 open_proc_mem_file (process_info *proc)
410 {
411 gdb_assert (proc->priv->mem_fd == -1);
412
413 char filename[64];
414 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
415
416 proc->priv->mem_fd
417 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
418 }
419
420 process_info *
421 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
422 {
423 struct process_info *proc;
424
425 proc = add_process (pid, attached);
426 proc->priv = XCNEW (struct process_info_private);
427
428 proc->priv->arch_private = low_new_process ();
429 proc->priv->mem_fd = -1;
430
431 return proc;
432 }
433
434
435 process_info *
436 linux_process_target::add_linux_process (int pid, int attached)
437 {
438 process_info *proc = add_linux_process_no_mem_file (pid, attached);
439 open_proc_mem_file (proc);
440 return proc;
441 }
442
443 arch_process_info *
444 linux_process_target::low_new_process ()
445 {
446 return nullptr;
447 }
448
449 void
450 linux_process_target::low_delete_process (arch_process_info *info)
451 {
452 /* Default implementation must be overridden if architecture-specific
453 info exists. */
454 gdb_assert (info == nullptr);
455 }
456
457 void
458 linux_process_target::low_new_fork (process_info *parent, process_info *child)
459 {
460 /* Nop. */
461 }
462
463 void
464 linux_process_target::arch_setup_thread (thread_info *thread)
465 {
466 scoped_restore_current_thread restore_thread;
467 switch_to_thread (thread);
468
469 low_arch_setup ();
470 }
471
472 int
473 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
474 int wstat)
475 {
476 client_state &cs = get_client_state ();
477 struct lwp_info *event_lwp = *orig_event_lwp;
478 int event = linux_ptrace_get_extended_event (wstat);
479 struct thread_info *event_thr = get_lwp_thread (event_lwp);
480 struct lwp_info *new_lwp;
481
482 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
483
484 /* All extended events we currently use are mid-syscall. Only
485 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
486 you have to be using PTRACE_SEIZE to get that. */
487 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
488
489 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
490 || (event == PTRACE_EVENT_CLONE))
491 {
492 ptid_t ptid;
493 unsigned long new_pid;
494 int ret, status;
495
496 /* Get the pid of the new lwp. */
497 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
498 &new_pid);
499
500 /* If we haven't already seen the new PID stop, wait for it now. */
501 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
502 {
503 /* The new child has a pending SIGSTOP. We can't affect it until it
504 hits the SIGSTOP, but we're already attached. */
505
506 ret = my_waitpid (new_pid, &status, __WALL);
507
508 if (ret == -1)
509 perror_with_name ("waiting for new child");
510 else if (ret != new_pid)
511 warning ("wait returned unexpected PID %d", ret);
512 else if (!WIFSTOPPED (status))
513 warning ("wait returned unexpected status 0x%x", status);
514 }
515
516 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
517 {
518 struct process_info *parent_proc;
519 struct process_info *child_proc;
520 struct lwp_info *child_lwp;
521 struct thread_info *child_thr;
522
523 ptid = ptid_t (new_pid, new_pid);
524
525 threads_debug_printf ("Got fork event from LWP %ld, "
526 "new child is %d",
527 ptid_of (event_thr).lwp (),
528 ptid.pid ());
529
530 /* Add the new process to the tables and clone the breakpoint
531 lists of the parent. We need to do this even if the new process
532 will be detached, since we will need the process object and the
533 breakpoints to remove any breakpoints from memory when we
534 detach, and the client side will access registers. */
535 child_proc = add_linux_process (new_pid, 0);
536 gdb_assert (child_proc != NULL);
537 child_lwp = add_lwp (ptid);
538 gdb_assert (child_lwp != NULL);
539 child_lwp->stopped = 1;
540 child_lwp->must_set_ptrace_flags = 1;
541 child_lwp->status_pending_p = 0;
542 child_thr = get_lwp_thread (child_lwp);
543 child_thr->last_resume_kind = resume_stop;
544 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
545
546 /* If we're suspending all threads, leave this one suspended
547 too. If the fork/clone parent is stepping over a breakpoint,
548 all other threads have been suspended already. Leave the
549 child suspended too. */
550 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
551 || event_lwp->bp_reinsert != 0)
552 {
553 threads_debug_printf ("leaving child suspended");
554 child_lwp->suspended = 1;
555 }
556
557 parent_proc = get_thread_process (event_thr);
558 child_proc->attached = parent_proc->attached;
559
560 if (event_lwp->bp_reinsert != 0
561 && supports_software_single_step ()
562 && event == PTRACE_EVENT_VFORK)
563 {
564 /* If we leave single-step breakpoints there, child will
565 hit it, so uninsert single-step breakpoints from parent
566 (and child). Once vfork child is done, reinsert
567 them back to parent. */
568 uninsert_single_step_breakpoints (event_thr);
569 }
570
571 clone_all_breakpoints (child_thr, event_thr);
572
573 target_desc_up tdesc = allocate_target_description ();
574 copy_target_description (tdesc.get (), parent_proc->tdesc);
575 child_proc->tdesc = tdesc.release ();
576
577 /* Clone arch-specific process data. */
578 low_new_fork (parent_proc, child_proc);
579
580 /* Save fork info in the parent thread. */
581 if (event == PTRACE_EVENT_FORK)
582 event_lwp->waitstatus.set_forked (ptid);
583 else if (event == PTRACE_EVENT_VFORK)
584 event_lwp->waitstatus.set_vforked (ptid);
585
586 /* The status_pending field contains bits denoting the
587 extended event, so when the pending event is handled,
588 the handler will look at lwp->waitstatus. */
589 event_lwp->status_pending_p = 1;
590 event_lwp->status_pending = wstat;
591
592 /* Link the threads until the parent event is passed on to
593 higher layers. */
594 event_lwp->fork_relative = child_lwp;
595 child_lwp->fork_relative = event_lwp;
596
597 /* If the parent thread is doing step-over with single-step
598 breakpoints, the list of single-step breakpoints are cloned
599 from the parent's. Remove them from the child process.
600 In case of vfork, we'll reinsert them back once vforked
601 child is done. */
602 if (event_lwp->bp_reinsert != 0
603 && supports_software_single_step ())
604 {
605 /* The child process is forked and stopped, so it is safe
606 to access its memory without stopping all other threads
607 from other processes. */
608 delete_single_step_breakpoints (child_thr);
609
610 gdb_assert (has_single_step_breakpoints (event_thr));
611 gdb_assert (!has_single_step_breakpoints (child_thr));
612 }
613
614 /* Report the event. */
615 return 0;
616 }
617
618 threads_debug_printf
619 ("Got clone event from LWP %ld, new child is LWP %ld",
620 lwpid_of (event_thr), new_pid);
621
622 ptid = ptid_t (pid_of (event_thr), new_pid);
623 new_lwp = add_lwp (ptid);
624
625 /* Either we're going to immediately resume the new thread
626 or leave it stopped. resume_one_lwp is a nop if it
627 thinks the thread is currently running, so set this first
628 before calling resume_one_lwp. */
629 new_lwp->stopped = 1;
630
631 /* If we're suspending all threads, leave this one suspended
632 too. If the fork/clone parent is stepping over a breakpoint,
633 all other threads have been suspended already. Leave the
634 child suspended too. */
635 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
636 || event_lwp->bp_reinsert != 0)
637 new_lwp->suspended = 1;
638
639 /* Normally we will get the pending SIGSTOP. But in some cases
640 we might get another signal delivered to the group first.
641 If we do get another signal, be sure not to lose it. */
642 if (WSTOPSIG (status) != SIGSTOP)
643 {
644 new_lwp->stop_expected = 1;
645 new_lwp->status_pending_p = 1;
646 new_lwp->status_pending = status;
647 }
648 else if (cs.report_thread_events)
649 {
650 new_lwp->waitstatus.set_thread_created ();
651 new_lwp->status_pending_p = 1;
652 new_lwp->status_pending = status;
653 }
654
655 #ifdef USE_THREAD_DB
656 thread_db_notice_clone (event_thr, ptid);
657 #endif
658
659 /* Don't report the event. */
660 return 1;
661 }
662 else if (event == PTRACE_EVENT_VFORK_DONE)
663 {
664 event_lwp->waitstatus.set_vfork_done ();
665
666 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
667 {
668 reinsert_single_step_breakpoints (event_thr);
669
670 gdb_assert (has_single_step_breakpoints (event_thr));
671 }
672
673 /* Report the event. */
674 return 0;
675 }
676 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
677 {
678 struct process_info *proc;
679 std::vector<int> syscalls_to_catch;
680 ptid_t event_ptid;
681 pid_t event_pid;
682
683 threads_debug_printf ("Got exec event from LWP %ld",
684 lwpid_of (event_thr));
685
686 /* Get the event ptid. */
687 event_ptid = ptid_of (event_thr);
688 event_pid = event_ptid.pid ();
689
690 /* Save the syscall list from the execing process. */
691 proc = get_thread_process (event_thr);
692 syscalls_to_catch = std::move (proc->syscalls_to_catch);
693
694 /* Delete the execing process and all its threads. */
695 mourn (proc);
696 switch_to_thread (nullptr);
697
698 /* Create a new process/lwp/thread. */
699 proc = add_linux_process (event_pid, 0);
700 event_lwp = add_lwp (event_ptid);
701 event_thr = get_lwp_thread (event_lwp);
702 gdb_assert (current_thread == event_thr);
703 arch_setup_thread (event_thr);
704
705 /* Set the event status. */
706 event_lwp->waitstatus.set_execd
707 (make_unique_xstrdup
708 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
709
710 /* Mark the exec status as pending. */
711 event_lwp->stopped = 1;
712 event_lwp->status_pending_p = 1;
713 event_lwp->status_pending = wstat;
714 event_thr->last_resume_kind = resume_continue;
715 event_thr->last_status.set_ignore ();
716
717 /* Update syscall state in the new lwp, effectively mid-syscall too. */
718 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
719
720 /* Restore the list to catch. Don't rely on the client, which is free
721 to avoid sending a new list when the architecture doesn't change.
722 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
723 proc->syscalls_to_catch = std::move (syscalls_to_catch);
724
725 /* Report the event. */
726 *orig_event_lwp = event_lwp;
727 return 0;
728 }
729
730 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
731 }
732
733 CORE_ADDR
734 linux_process_target::get_pc (lwp_info *lwp)
735 {
736 struct regcache *regcache;
737 CORE_ADDR pc;
738
739 if (!low_supports_breakpoints ())
740 return 0;
741
742 scoped_restore_current_thread restore_thread;
743 switch_to_thread (get_lwp_thread (lwp));
744
745 regcache = get_thread_regcache (current_thread, 1);
746 pc = low_get_pc (regcache);
747
748 threads_debug_printf ("pc is 0x%lx", (long) pc);
749
750 return pc;
751 }
752
753 void
754 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
755 {
756 struct regcache *regcache;
757
758 scoped_restore_current_thread restore_thread;
759 switch_to_thread (get_lwp_thread (lwp));
760
761 regcache = get_thread_regcache (current_thread, 1);
762 low_get_syscall_trapinfo (regcache, sysno);
763
764 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
765 }
766
767 void
768 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
769 {
770 /* By default, report an unknown system call number. */
771 *sysno = UNKNOWN_SYSCALL;
772 }
773
774 bool
775 linux_process_target::save_stop_reason (lwp_info *lwp)
776 {
777 CORE_ADDR pc;
778 CORE_ADDR sw_breakpoint_pc;
779 #if USE_SIGTRAP_SIGINFO
780 siginfo_t siginfo;
781 #endif
782
783 if (!low_supports_breakpoints ())
784 return false;
785
786 pc = get_pc (lwp);
787 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
788
789 /* breakpoint_at reads from the current thread. */
790 scoped_restore_current_thread restore_thread;
791 switch_to_thread (get_lwp_thread (lwp));
792
793 #if USE_SIGTRAP_SIGINFO
794 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
795 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
796 {
797 if (siginfo.si_signo == SIGTRAP)
798 {
799 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
800 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
801 {
802 /* The si_code is ambiguous on this arch -- check debug
803 registers. */
804 if (!check_stopped_by_watchpoint (lwp))
805 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
806 }
807 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
808 {
809 /* If we determine the LWP stopped for a SW breakpoint,
810 trust it. Particularly don't check watchpoint
811 registers, because at least on s390, we'd find
812 stopped-by-watchpoint as long as there's a watchpoint
813 set. */
814 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
815 }
816 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
817 {
818 /* This can indicate either a hardware breakpoint or
819 hardware watchpoint. Check debug registers. */
820 if (!check_stopped_by_watchpoint (lwp))
821 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
822 }
823 else if (siginfo.si_code == TRAP_TRACE)
824 {
825 /* We may have single stepped an instruction that
826 triggered a watchpoint. In that case, on some
827 architectures (such as x86), instead of TRAP_HWBKPT,
828 si_code indicates TRAP_TRACE, and we need to check
829 the debug registers separately. */
830 if (!check_stopped_by_watchpoint (lwp))
831 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
832 }
833 }
834 }
835 #else
836 /* We may have just stepped a breakpoint instruction. E.g., in
837 non-stop mode, GDB first tells the thread A to step a range, and
838 then the user inserts a breakpoint inside the range. In that
839 case we need to report the breakpoint PC. */
840 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
841 && low_breakpoint_at (sw_breakpoint_pc))
842 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
843
844 if (hardware_breakpoint_inserted_here (pc))
845 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
846
847 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
848 check_stopped_by_watchpoint (lwp);
849 #endif
850
851 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
852 {
853 threads_debug_printf
854 ("%s stopped by software breakpoint",
855 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
856
857 /* Back up the PC if necessary. */
858 if (pc != sw_breakpoint_pc)
859 {
860 struct regcache *regcache
861 = get_thread_regcache (current_thread, 1);
862 low_set_pc (regcache, sw_breakpoint_pc);
863 }
864
865 /* Update this so we record the correct stop PC below. */
866 pc = sw_breakpoint_pc;
867 }
868 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
869 threads_debug_printf
870 ("%s stopped by hardware breakpoint",
871 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
872 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
873 threads_debug_printf
874 ("%s stopped by hardware watchpoint",
875 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
876 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
877 threads_debug_printf
878 ("%s stopped by trace",
879 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
880
881 lwp->stop_pc = pc;
882 return true;
883 }
884
885 lwp_info *
886 linux_process_target::add_lwp (ptid_t ptid)
887 {
888 lwp_info *lwp = new lwp_info;
889
890 lwp->thread = add_thread (ptid, lwp);
891
892 low_new_thread (lwp);
893
894 return lwp;
895 }
896
897 void
898 linux_process_target::low_new_thread (lwp_info *info)
899 {
900 /* Nop. */
901 }
902
903 /* Callback to be used when calling fork_inferior, responsible for
904 actually initiating the tracing of the inferior. */
905
906 static void
907 linux_ptrace_fun ()
908 {
909 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
910 (PTRACE_TYPE_ARG4) 0) < 0)
911 trace_start_error_with_name ("ptrace");
912
913 if (setpgid (0, 0) < 0)
914 trace_start_error_with_name ("setpgid");
915
916 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
917 stdout to stderr so that inferior i/o doesn't corrupt the connection.
918 Also, redirect stdin to /dev/null. */
919 if (remote_connection_is_stdio ())
920 {
921 if (close (0) < 0)
922 trace_start_error_with_name ("close");
923 if (open ("/dev/null", O_RDONLY) < 0)
924 trace_start_error_with_name ("open");
925 if (dup2 (2, 1) < 0)
926 trace_start_error_with_name ("dup2");
927 if (write (2, "stdin/stdout redirected\n",
928 sizeof ("stdin/stdout redirected\n") - 1) < 0)
929 {
930 /* Errors ignored. */;
931 }
932 }
933 }
934
935 /* Start an inferior process and returns its pid.
936 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
937 are its arguments. */
938
939 int
940 linux_process_target::create_inferior (const char *program,
941 const std::vector<char *> &program_args)
942 {
943 client_state &cs = get_client_state ();
944 struct lwp_info *new_lwp;
945 int pid;
946 ptid_t ptid;
947
948 {
949 maybe_disable_address_space_randomization restore_personality
950 (cs.disable_randomization);
951 std::string str_program_args = construct_inferior_arguments (program_args);
952
953 pid = fork_inferior (program,
954 str_program_args.c_str (),
955 get_environ ()->envp (), linux_ptrace_fun,
956 NULL, NULL, NULL, NULL);
957 }
958
959 /* When spawning a new process, we can't open the mem file yet. We
960 still have to nurse the process through the shell, and that execs
961 a couple times. The address space a /proc/PID/mem file is
962 accessing is destroyed on exec. */
963 process_info *proc = add_linux_process_no_mem_file (pid, 0);
964
965 ptid = ptid_t (pid, pid);
966 new_lwp = add_lwp (ptid);
967 new_lwp->must_set_ptrace_flags = 1;
968
969 post_fork_inferior (pid, program);
970
971 /* PROC is now past the shell running the program we want, so we can
972 open the /proc/PID/mem file. */
973 open_proc_mem_file (proc);
974
975 return pid;
976 }
977
978 /* Implement the post_create_inferior target_ops method. */
979
980 void
981 linux_process_target::post_create_inferior ()
982 {
983 struct lwp_info *lwp = get_thread_lwp (current_thread);
984
985 low_arch_setup ();
986
987 if (lwp->must_set_ptrace_flags)
988 {
989 struct process_info *proc = current_process ();
990 int options = linux_low_ptrace_options (proc->attached);
991
992 linux_enable_event_reporting (lwpid_of (current_thread), options);
993 lwp->must_set_ptrace_flags = 0;
994 }
995 }
996
997 int
998 linux_process_target::attach_lwp (ptid_t ptid)
999 {
1000 struct lwp_info *new_lwp;
1001 int lwpid = ptid.lwp ();
1002
1003 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1004 != 0)
1005 return errno;
1006
1007 new_lwp = add_lwp (ptid);
1008
1009 /* We need to wait for SIGSTOP before being able to make the next
1010 ptrace call on this LWP. */
1011 new_lwp->must_set_ptrace_flags = 1;
1012
1013 if (linux_proc_pid_is_stopped (lwpid))
1014 {
1015 threads_debug_printf ("Attached to a stopped process");
1016
1017 /* The process is definitely stopped. It is in a job control
1018 stop, unless the kernel predates the TASK_STOPPED /
1019 TASK_TRACED distinction, in which case it might be in a
1020 ptrace stop. Make sure it is in a ptrace stop; from there we
1021 can kill it, signal it, et cetera.
1022
1023 First make sure there is a pending SIGSTOP. Since we are
1024 already attached, the process can not transition from stopped
1025 to running without a PTRACE_CONT; so we know this signal will
1026 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1027 probably already in the queue (unless this kernel is old
1028 enough to use TASK_STOPPED for ptrace stops); but since
1029 SIGSTOP is not an RT signal, it can only be queued once. */
1030 kill_lwp (lwpid, SIGSTOP);
1031
1032 /* Finally, resume the stopped process. This will deliver the
1033 SIGSTOP (or a higher priority signal, just like normal
1034 PTRACE_ATTACH), which we'll catch later on. */
1035 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1036 }
1037
1038 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1039 brings it to a halt.
1040
1041 There are several cases to consider here:
1042
1043 1) gdbserver has already attached to the process and is being notified
1044 of a new thread that is being created.
1045 In this case we should ignore that SIGSTOP and resume the
1046 process. This is handled below by setting stop_expected = 1,
1047 and the fact that add_thread sets last_resume_kind ==
1048 resume_continue.
1049
1050 2) This is the first thread (the process thread), and we're attaching
1051 to it via attach_inferior.
1052 In this case we want the process thread to stop.
1053 This is handled by having linux_attach set last_resume_kind ==
1054 resume_stop after we return.
1055
1056 If the pid we are attaching to is also the tgid, we attach to and
1057 stop all the existing threads. Otherwise, we attach to pid and
1058 ignore any other threads in the same group as this pid.
1059
1060 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1061 existing threads.
1062 In this case we want the thread to stop.
1063 FIXME: This case is currently not properly handled.
1064 We should wait for the SIGSTOP but don't. Things work apparently
1065 because enough time passes between when we ptrace (ATTACH) and when
1066 gdb makes the next ptrace call on the thread.
1067
1068 On the other hand, if we are currently trying to stop all threads, we
1069 should treat the new thread as if we had sent it a SIGSTOP. This works
1070 because we are guaranteed that the add_lwp call above added us to the
1071 end of the list, and so the new thread has not yet reached
1072 wait_for_sigstop (but will). */
1073 new_lwp->stop_expected = 1;
1074
1075 return 0;
1076 }
1077
1078 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1079 already attached. Returns true if a new LWP is found, false
1080 otherwise. */
1081
1082 static int
1083 attach_proc_task_lwp_callback (ptid_t ptid)
1084 {
1085 /* Is this a new thread? */
1086 if (find_thread_ptid (ptid) == NULL)
1087 {
1088 int lwpid = ptid.lwp ();
1089 int err;
1090
1091 threads_debug_printf ("Found new lwp %d", lwpid);
1092
1093 err = the_linux_target->attach_lwp (ptid);
1094
1095 /* Be quiet if we simply raced with the thread exiting. EPERM
1096 is returned if the thread's task still exists, and is marked
1097 as exited or zombie, as well as other conditions, so in that
1098 case, confirm the status in /proc/PID/status. */
1099 if (err == ESRCH
1100 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1101 threads_debug_printf
1102 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1103 lwpid, err, safe_strerror (err));
1104 else if (err != 0)
1105 {
1106 std::string reason
1107 = linux_ptrace_attach_fail_reason_string (ptid, err);
1108
1109 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1110 }
1111
1112 return 1;
1113 }
1114 return 0;
1115 }
1116
1117 static void async_file_mark (void);
1118
1119 /* Attach to PID. If PID is the tgid, attach to it and all
1120 of its threads. */
1121
1122 int
1123 linux_process_target::attach (unsigned long pid)
1124 {
1125 struct process_info *proc;
1126 struct thread_info *initial_thread;
1127 ptid_t ptid = ptid_t (pid, pid);
1128 int err;
1129
1130 /* Delay opening the /proc/PID/mem file until we've successfully
1131 attached. */
1132 proc = add_linux_process_no_mem_file (pid, 1);
1133
1134 /* Attach to PID. We will check for other threads
1135 soon. */
1136 err = attach_lwp (ptid);
1137 if (err != 0)
1138 {
1139 remove_process (proc);
1140
1141 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1142 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1143 }
1144
1145 open_proc_mem_file (proc);
1146
1147 /* Don't ignore the initial SIGSTOP if we just attached to this
1148 process. It will be collected by wait shortly. */
1149 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1150 initial_thread->last_resume_kind = resume_stop;
1151
1152 /* We must attach to every LWP. If /proc is mounted, use that to
1153 find them now. On the one hand, the inferior may be using raw
1154 clone instead of using pthreads. On the other hand, even if it
1155 is using pthreads, GDB may not be connected yet (thread_db needs
1156 to do symbol lookups, through qSymbol). Also, thread_db walks
1157 structures in the inferior's address space to find the list of
1158 threads/LWPs, and those structures may well be corrupted. Note
1159 that once thread_db is loaded, we'll still use it to list threads
1160 and associate pthread info with each LWP. */
1161 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1162
1163 /* GDB will shortly read the xml target description for this
1164 process, to figure out the process' architecture. But the target
1165 description is only filled in when the first process/thread in
1166 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1167 that now, otherwise, if GDB is fast enough, it could read the
1168 target description _before_ that initial stop. */
1169 if (non_stop)
1170 {
1171 struct lwp_info *lwp;
1172 int wstat, lwpid;
1173 ptid_t pid_ptid = ptid_t (pid);
1174
1175 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1176 gdb_assert (lwpid > 0);
1177
1178 lwp = find_lwp_pid (ptid_t (lwpid));
1179
1180 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1181 {
1182 lwp->status_pending_p = 1;
1183 lwp->status_pending = wstat;
1184 }
1185
1186 initial_thread->last_resume_kind = resume_continue;
1187
1188 async_file_mark ();
1189
1190 gdb_assert (proc->tdesc != NULL);
1191 }
1192
1193 return 0;
1194 }
1195
1196 static int
1197 last_thread_of_process_p (int pid)
1198 {
1199 bool seen_one = false;
1200
1201 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1202 {
1203 if (!seen_one)
1204 {
1205 /* This is the first thread of this process we see. */
1206 seen_one = true;
1207 return false;
1208 }
1209 else
1210 {
1211 /* This is the second thread of this process we see. */
1212 return true;
1213 }
1214 });
1215
1216 return thread == NULL;
1217 }
1218
1219 /* Kill LWP. */
1220
1221 static void
1222 linux_kill_one_lwp (struct lwp_info *lwp)
1223 {
1224 struct thread_info *thr = get_lwp_thread (lwp);
1225 int pid = lwpid_of (thr);
1226
1227 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1228 there is no signal context, and ptrace(PTRACE_KILL) (or
1229 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1230 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1231 alternative is to kill with SIGKILL. We only need one SIGKILL
1232 per process, not one for each thread. But since we still support
1233 support debugging programs using raw clone without CLONE_THREAD,
1234 we send one for each thread. For years, we used PTRACE_KILL
1235 only, so we're being a bit paranoid about some old kernels where
1236 PTRACE_KILL might work better (dubious if there are any such, but
1237 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1238 second, and so we're fine everywhere. */
1239
1240 errno = 0;
1241 kill_lwp (pid, SIGKILL);
1242 if (debug_threads)
1243 {
1244 int save_errno = errno;
1245
1246 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1247 target_pid_to_str (ptid_of (thr)).c_str (),
1248 save_errno ? safe_strerror (save_errno) : "OK");
1249 }
1250
1251 errno = 0;
1252 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1253 if (debug_threads)
1254 {
1255 int save_errno = errno;
1256
1257 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1258 target_pid_to_str (ptid_of (thr)).c_str (),
1259 save_errno ? safe_strerror (save_errno) : "OK");
1260 }
1261 }
1262
1263 /* Kill LWP and wait for it to die. */
1264
1265 static void
1266 kill_wait_lwp (struct lwp_info *lwp)
1267 {
1268 struct thread_info *thr = get_lwp_thread (lwp);
1269 int pid = ptid_of (thr).pid ();
1270 int lwpid = ptid_of (thr).lwp ();
1271 int wstat;
1272 int res;
1273
1274 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1275
1276 do
1277 {
1278 linux_kill_one_lwp (lwp);
1279
1280 /* Make sure it died. Notes:
1281
1282 - The loop is most likely unnecessary.
1283
1284 - We don't use wait_for_event as that could delete lwps
1285 while we're iterating over them. We're not interested in
1286 any pending status at this point, only in making sure all
1287 wait status on the kernel side are collected until the
1288 process is reaped.
1289
1290 - We don't use __WALL here as the __WALL emulation relies on
1291 SIGCHLD, and killing a stopped process doesn't generate
1292 one, nor an exit status.
1293 */
1294 res = my_waitpid (lwpid, &wstat, 0);
1295 if (res == -1 && errno == ECHILD)
1296 res = my_waitpid (lwpid, &wstat, __WCLONE);
1297 } while (res > 0 && WIFSTOPPED (wstat));
1298
1299 /* Even if it was stopped, the child may have already disappeared.
1300 E.g., if it was killed by SIGKILL. */
1301 if (res < 0 && errno != ECHILD)
1302 perror_with_name ("kill_wait_lwp");
1303 }
1304
1305 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1306 except the leader. */
1307
1308 static void
1309 kill_one_lwp_callback (thread_info *thread, int pid)
1310 {
1311 struct lwp_info *lwp = get_thread_lwp (thread);
1312
1313 /* We avoid killing the first thread here, because of a Linux kernel (at
1314 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1315 the children get a chance to be reaped, it will remain a zombie
1316 forever. */
1317
1318 if (lwpid_of (thread) == pid)
1319 {
1320 threads_debug_printf ("is last of process %s",
1321 target_pid_to_str (thread->id).c_str ());
1322 return;
1323 }
1324
1325 kill_wait_lwp (lwp);
1326 }
1327
1328 int
1329 linux_process_target::kill (process_info *process)
1330 {
1331 int pid = process->pid;
1332
1333 /* If we're killing a running inferior, make sure it is stopped
1334 first, as PTRACE_KILL will not work otherwise. */
1335 stop_all_lwps (0, NULL);
1336
1337 for_each_thread (pid, [&] (thread_info *thread)
1338 {
1339 kill_one_lwp_callback (thread, pid);
1340 });
1341
1342 /* See the comment in linux_kill_one_lwp. We did not kill the first
1343 thread in the list, so do so now. */
1344 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1345
1346 if (lwp == NULL)
1347 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1348 else
1349 kill_wait_lwp (lwp);
1350
1351 mourn (process);
1352
1353 /* Since we presently can only stop all lwps of all processes, we
1354 need to unstop lwps of other processes. */
1355 unstop_all_lwps (0, NULL);
1356 return 0;
1357 }
1358
1359 /* Get pending signal of THREAD, for detaching purposes. This is the
1360 signal the thread last stopped for, which we need to deliver to the
1361 thread when detaching, otherwise, it'd be suppressed/lost. */
1362
1363 static int
1364 get_detach_signal (struct thread_info *thread)
1365 {
1366 client_state &cs = get_client_state ();
1367 enum gdb_signal signo = GDB_SIGNAL_0;
1368 int status;
1369 struct lwp_info *lp = get_thread_lwp (thread);
1370
1371 if (lp->status_pending_p)
1372 status = lp->status_pending;
1373 else
1374 {
1375 /* If the thread had been suspended by gdbserver, and it stopped
1376 cleanly, then it'll have stopped with SIGSTOP. But we don't
1377 want to deliver that SIGSTOP. */
1378 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1379 || thread->last_status.sig () == GDB_SIGNAL_0)
1380 return 0;
1381
1382 /* Otherwise, we may need to deliver the signal we
1383 intercepted. */
1384 status = lp->last_status;
1385 }
1386
1387 if (!WIFSTOPPED (status))
1388 {
1389 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1390 target_pid_to_str (ptid_of (thread)).c_str ());
1391 return 0;
1392 }
1393
1394 /* Extended wait statuses aren't real SIGTRAPs. */
1395 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1396 {
1397 threads_debug_printf ("lwp %s had stopped with extended "
1398 "status: no pending signal",
1399 target_pid_to_str (ptid_of (thread)).c_str ());
1400 return 0;
1401 }
1402
1403 signo = gdb_signal_from_host (WSTOPSIG (status));
1404
1405 if (cs.program_signals_p && !cs.program_signals[signo])
1406 {
1407 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1408 target_pid_to_str (ptid_of (thread)).c_str (),
1409 gdb_signal_to_string (signo));
1410 return 0;
1411 }
1412 else if (!cs.program_signals_p
1413 /* If we have no way to know which signals GDB does not
1414 want to have passed to the program, assume
1415 SIGTRAP/SIGINT, which is GDB's default. */
1416 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1417 {
1418 threads_debug_printf ("lwp %s had signal %s, "
1419 "but we don't know if we should pass it. "
1420 "Default to not.",
1421 target_pid_to_str (ptid_of (thread)).c_str (),
1422 gdb_signal_to_string (signo));
1423 return 0;
1424 }
1425 else
1426 {
1427 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1428 target_pid_to_str (ptid_of (thread)).c_str (),
1429 gdb_signal_to_string (signo));
1430
1431 return WSTOPSIG (status);
1432 }
1433 }
1434
1435 void
1436 linux_process_target::detach_one_lwp (lwp_info *lwp)
1437 {
1438 struct thread_info *thread = get_lwp_thread (lwp);
1439 int sig;
1440 int lwpid;
1441
1442 /* If there is a pending SIGSTOP, get rid of it. */
1443 if (lwp->stop_expected)
1444 {
1445 threads_debug_printf ("Sending SIGCONT to %s",
1446 target_pid_to_str (ptid_of (thread)).c_str ());
1447
1448 kill_lwp (lwpid_of (thread), SIGCONT);
1449 lwp->stop_expected = 0;
1450 }
1451
1452 /* Pass on any pending signal for this thread. */
1453 sig = get_detach_signal (thread);
1454
1455 /* Preparing to resume may try to write registers, and fail if the
1456 lwp is zombie. If that happens, ignore the error. We'll handle
1457 it below, when detach fails with ESRCH. */
1458 try
1459 {
1460 /* Flush any pending changes to the process's registers. */
1461 regcache_invalidate_thread (thread);
1462
1463 /* Finally, let it resume. */
1464 low_prepare_to_resume (lwp);
1465 }
1466 catch (const gdb_exception_error &ex)
1467 {
1468 if (!check_ptrace_stopped_lwp_gone (lwp))
1469 throw;
1470 }
1471
1472 lwpid = lwpid_of (thread);
1473 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1474 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1475 {
1476 int save_errno = errno;
1477
1478 /* We know the thread exists, so ESRCH must mean the lwp is
1479 zombie. This can happen if one of the already-detached
1480 threads exits the whole thread group. In that case we're
1481 still attached, and must reap the lwp. */
1482 if (save_errno == ESRCH)
1483 {
1484 int ret, status;
1485
1486 ret = my_waitpid (lwpid, &status, __WALL);
1487 if (ret == -1)
1488 {
1489 warning (_("Couldn't reap LWP %d while detaching: %s"),
1490 lwpid, safe_strerror (errno));
1491 }
1492 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1493 {
1494 warning (_("Reaping LWP %d while detaching "
1495 "returned unexpected status 0x%x"),
1496 lwpid, status);
1497 }
1498 }
1499 else
1500 {
1501 error (_("Can't detach %s: %s"),
1502 target_pid_to_str (ptid_of (thread)).c_str (),
1503 safe_strerror (save_errno));
1504 }
1505 }
1506 else
1507 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1508 target_pid_to_str (ptid_of (thread)).c_str (),
1509 strsignal (sig));
1510
1511 delete_lwp (lwp);
1512 }
1513
1514 int
1515 linux_process_target::detach (process_info *process)
1516 {
1517 struct lwp_info *main_lwp;
1518
1519 /* As there's a step over already in progress, let it finish first,
1520 otherwise nesting a stabilize_threads operation on top gets real
1521 messy. */
1522 complete_ongoing_step_over ();
1523
1524 /* Stop all threads before detaching. First, ptrace requires that
1525 the thread is stopped to successfully detach. Second, thread_db
1526 may need to uninstall thread event breakpoints from memory, which
1527 only works with a stopped process anyway. */
1528 stop_all_lwps (0, NULL);
1529
1530 #ifdef USE_THREAD_DB
1531 thread_db_detach (process);
1532 #endif
1533
1534 /* Stabilize threads (move out of jump pads). */
1535 target_stabilize_threads ();
1536
1537 /* Detach from the clone lwps first. If the thread group exits just
1538 while we're detaching, we must reap the clone lwps before we're
1539 able to reap the leader. */
1540 for_each_thread (process->pid, [this] (thread_info *thread)
1541 {
1542 /* We don't actually detach from the thread group leader just yet.
1543 If the thread group exits, we must reap the zombie clone lwps
1544 before we're able to reap the leader. */
1545 if (thread->id.pid () == thread->id.lwp ())
1546 return;
1547
1548 lwp_info *lwp = get_thread_lwp (thread);
1549 detach_one_lwp (lwp);
1550 });
1551
1552 main_lwp = find_lwp_pid (ptid_t (process->pid));
1553 detach_one_lwp (main_lwp);
1554
1555 mourn (process);
1556
1557 /* Since we presently can only stop all lwps of all processes, we
1558 need to unstop lwps of other processes. */
1559 unstop_all_lwps (0, NULL);
1560 return 0;
1561 }
1562
1563 /* Remove all LWPs that belong to process PROC from the lwp list. */
1564
1565 void
1566 linux_process_target::mourn (process_info *process)
1567 {
1568 struct process_info_private *priv;
1569
1570 #ifdef USE_THREAD_DB
1571 thread_db_mourn (process);
1572 #endif
1573
1574 for_each_thread (process->pid, [this] (thread_info *thread)
1575 {
1576 delete_lwp (get_thread_lwp (thread));
1577 });
1578
1579 /* Freeing all private data. */
1580 priv = process->priv;
1581 close (priv->mem_fd);
1582 low_delete_process (priv->arch_private);
1583 free (priv);
1584 process->priv = NULL;
1585
1586 remove_process (process);
1587 }
1588
1589 void
1590 linux_process_target::join (int pid)
1591 {
1592 int status, ret;
1593
1594 do {
1595 ret = my_waitpid (pid, &status, 0);
1596 if (WIFEXITED (status) || WIFSIGNALED (status))
1597 break;
1598 } while (ret != -1 || errno != ECHILD);
1599 }
1600
1601 /* Return true if the given thread is still alive. */
1602
1603 bool
1604 linux_process_target::thread_alive (ptid_t ptid)
1605 {
1606 struct lwp_info *lwp = find_lwp_pid (ptid);
1607
1608 /* We assume we always know if a thread exits. If a whole process
1609 exited but we still haven't been able to report it to GDB, we'll
1610 hold on to the last lwp of the dead process. */
1611 if (lwp != NULL)
1612 return !lwp_is_marked_dead (lwp);
1613 else
1614 return 0;
1615 }
1616
1617 bool
1618 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1619 {
1620 struct lwp_info *lp = get_thread_lwp (thread);
1621
1622 if (!lp->status_pending_p)
1623 return 0;
1624
1625 if (thread->last_resume_kind != resume_stop
1626 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1627 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1628 {
1629 CORE_ADDR pc;
1630 int discard = 0;
1631
1632 gdb_assert (lp->last_status != 0);
1633
1634 pc = get_pc (lp);
1635
1636 scoped_restore_current_thread restore_thread;
1637 switch_to_thread (thread);
1638
1639 if (pc != lp->stop_pc)
1640 {
1641 threads_debug_printf ("PC of %ld changed",
1642 lwpid_of (thread));
1643 discard = 1;
1644 }
1645
1646 #if !USE_SIGTRAP_SIGINFO
1647 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1648 && !low_breakpoint_at (pc))
1649 {
1650 threads_debug_printf ("previous SW breakpoint of %ld gone",
1651 lwpid_of (thread));
1652 discard = 1;
1653 }
1654 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1655 && !hardware_breakpoint_inserted_here (pc))
1656 {
1657 threads_debug_printf ("previous HW breakpoint of %ld gone",
1658 lwpid_of (thread));
1659 discard = 1;
1660 }
1661 #endif
1662
1663 if (discard)
1664 {
1665 threads_debug_printf ("discarding pending breakpoint status");
1666 lp->status_pending_p = 0;
1667 return 0;
1668 }
1669 }
1670
1671 return 1;
1672 }
1673
1674 /* Returns true if LWP is resumed from the client's perspective. */
1675
1676 static int
1677 lwp_resumed (struct lwp_info *lwp)
1678 {
1679 struct thread_info *thread = get_lwp_thread (lwp);
1680
1681 if (thread->last_resume_kind != resume_stop)
1682 return 1;
1683
1684 /* Did gdb send us a `vCont;t', but we haven't reported the
1685 corresponding stop to gdb yet? If so, the thread is still
1686 resumed/running from gdb's perspective. */
1687 if (thread->last_resume_kind == resume_stop
1688 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1689 return 1;
1690
1691 return 0;
1692 }
1693
1694 bool
1695 linux_process_target::status_pending_p_callback (thread_info *thread,
1696 ptid_t ptid)
1697 {
1698 struct lwp_info *lp = get_thread_lwp (thread);
1699
1700 /* Check if we're only interested in events from a specific process
1701 or a specific LWP. */
1702 if (!thread->id.matches (ptid))
1703 return 0;
1704
1705 if (!lwp_resumed (lp))
1706 return 0;
1707
1708 if (lp->status_pending_p
1709 && !thread_still_has_status_pending (thread))
1710 {
1711 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1712 return 0;
1713 }
1714
1715 return lp->status_pending_p;
1716 }
1717
1718 struct lwp_info *
1719 find_lwp_pid (ptid_t ptid)
1720 {
1721 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1722 {
1723 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1724 return thr_arg->id.lwp () == lwp;
1725 });
1726
1727 if (thread == NULL)
1728 return NULL;
1729
1730 return get_thread_lwp (thread);
1731 }
1732
1733 /* Return the number of known LWPs in the tgid given by PID. */
1734
1735 static int
1736 num_lwps (int pid)
1737 {
1738 int count = 0;
1739
1740 for_each_thread (pid, [&] (thread_info *thread)
1741 {
1742 count++;
1743 });
1744
1745 return count;
1746 }
1747
1748 /* See nat/linux-nat.h. */
1749
1750 struct lwp_info *
1751 iterate_over_lwps (ptid_t filter,
1752 gdb::function_view<iterate_over_lwps_ftype> callback)
1753 {
1754 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1755 {
1756 lwp_info *lwp = get_thread_lwp (thr_arg);
1757
1758 return callback (lwp);
1759 });
1760
1761 if (thread == NULL)
1762 return NULL;
1763
1764 return get_thread_lwp (thread);
1765 }
1766
1767 void
1768 linux_process_target::check_zombie_leaders ()
1769 {
1770 for_each_process ([this] (process_info *proc)
1771 {
1772 pid_t leader_pid = pid_of (proc);
1773 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1774
1775 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1776 "num_lwps=%d, zombie=%d",
1777 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1778 linux_proc_pid_is_zombie (leader_pid));
1779
1780 if (leader_lp != NULL && !leader_lp->stopped
1781 /* Check if there are other threads in the group, as we may
1782 have raced with the inferior simply exiting. Note this
1783 isn't a watertight check. If the inferior is
1784 multi-threaded and is exiting, it may be we see the
1785 leader as zombie before we reap all the non-leader
1786 threads. See comments below. */
1787 && !last_thread_of_process_p (leader_pid)
1788 && linux_proc_pid_is_zombie (leader_pid))
1789 {
1790 /* A zombie leader in a multi-threaded program can mean one
1791 of three things:
1792
1793 #1 - Only the leader exited, not the whole program, e.g.,
1794 with pthread_exit. Since we can't reap the leader's exit
1795 status until all other threads are gone and reaped too,
1796 we want to delete the zombie leader right away, as it
1797 can't be debugged, we can't read its registers, etc.
1798 This is the main reason we check for zombie leaders
1799 disappearing.
1800
1801 #2 - The whole thread-group/process exited (a group exit,
1802 via e.g. exit(3), and there is (or will be shortly) an
1803 exit reported for each thread in the process, and then
1804 finally an exit for the leader once the non-leaders are
1805 reaped.
1806
1807 #3 - There are 3 or more threads in the group, and a
1808 thread other than the leader exec'd. See comments on
1809 exec events at the top of the file.
1810
1811 Ideally we would never delete the leader for case #2.
1812 Instead, we want to collect the exit status of each
1813 non-leader thread, and then finally collect the exit
1814 status of the leader as normal and use its exit code as
1815 whole-process exit code. Unfortunately, there's no
1816 race-free way to distinguish cases #1 and #2. We can't
1817 assume the exit events for the non-leaders threads are
1818 already pending in the kernel, nor can we assume the
1819 non-leader threads are in zombie state already. Between
1820 the leader becoming zombie and the non-leaders exiting
1821 and becoming zombie themselves, there's a small time
1822 window, so such a check would be racy. Temporarily
1823 pausing all threads and checking to see if all threads
1824 exit or not before re-resuming them would work in the
1825 case that all threads are running right now, but it
1826 wouldn't work if some thread is currently already
1827 ptrace-stopped, e.g., due to scheduler-locking.
1828
1829 So what we do is we delete the leader anyhow, and then
1830 later on when we see its exit status, we re-add it back.
1831 We also make sure that we only report a whole-process
1832 exit when we see the leader exiting, as opposed to when
1833 the last LWP in the LWP list exits, which can be a
1834 non-leader if we deleted the leader here. */
1835 threads_debug_printf ("Thread group leader %d zombie "
1836 "(it exited, or another thread execd), "
1837 "deleting it.",
1838 leader_pid);
1839 delete_lwp (leader_lp);
1840 }
1841 });
1842 }
1843
1844 /* Callback for `find_thread'. Returns the first LWP that is not
1845 stopped. */
1846
1847 static bool
1848 not_stopped_callback (thread_info *thread, ptid_t filter)
1849 {
1850 if (!thread->id.matches (filter))
1851 return false;
1852
1853 lwp_info *lwp = get_thread_lwp (thread);
1854
1855 return !lwp->stopped;
1856 }
1857
1858 /* Increment LWP's suspend count. */
1859
1860 static void
1861 lwp_suspended_inc (struct lwp_info *lwp)
1862 {
1863 lwp->suspended++;
1864
1865 if (lwp->suspended > 4)
1866 threads_debug_printf
1867 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1868 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1869 }
1870
1871 /* Decrement LWP's suspend count. */
1872
1873 static void
1874 lwp_suspended_decr (struct lwp_info *lwp)
1875 {
1876 lwp->suspended--;
1877
1878 if (lwp->suspended < 0)
1879 {
1880 struct thread_info *thread = get_lwp_thread (lwp);
1881
1882 internal_error (__FILE__, __LINE__,
1883 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1884 lwp->suspended);
1885 }
1886 }
1887
1888 /* This function should only be called if the LWP got a SIGTRAP.
1889
1890 Handle any tracepoint steps or hits. Return true if a tracepoint
1891 event was handled, 0 otherwise. */
1892
1893 static int
1894 handle_tracepoints (struct lwp_info *lwp)
1895 {
1896 struct thread_info *tinfo = get_lwp_thread (lwp);
1897 int tpoint_related_event = 0;
1898
1899 gdb_assert (lwp->suspended == 0);
1900
1901 /* If this tracepoint hit causes a tracing stop, we'll immediately
1902 uninsert tracepoints. To do this, we temporarily pause all
1903 threads, unpatch away, and then unpause threads. We need to make
1904 sure the unpausing doesn't resume LWP too. */
1905 lwp_suspended_inc (lwp);
1906
1907 /* And we need to be sure that any all-threads-stopping doesn't try
1908 to move threads out of the jump pads, as it could deadlock the
1909 inferior (LWP could be in the jump pad, maybe even holding the
1910 lock.) */
1911
1912 /* Do any necessary step collect actions. */
1913 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1914
1915 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1916
1917 /* See if we just hit a tracepoint and do its main collect
1918 actions. */
1919 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1920
1921 lwp_suspended_decr (lwp);
1922
1923 gdb_assert (lwp->suspended == 0);
1924 gdb_assert (!stabilizing_threads
1925 || (lwp->collecting_fast_tracepoint
1926 != fast_tpoint_collect_result::not_collecting));
1927
1928 if (tpoint_related_event)
1929 {
1930 threads_debug_printf ("got a tracepoint event");
1931 return 1;
1932 }
1933
1934 return 0;
1935 }
1936
1937 fast_tpoint_collect_result
1938 linux_process_target::linux_fast_tracepoint_collecting
1939 (lwp_info *lwp, fast_tpoint_collect_status *status)
1940 {
1941 CORE_ADDR thread_area;
1942 struct thread_info *thread = get_lwp_thread (lwp);
1943
1944 /* Get the thread area address. This is used to recognize which
1945 thread is which when tracing with the in-process agent library.
1946 We don't read anything from the address, and treat it as opaque;
1947 it's the address itself that we assume is unique per-thread. */
1948 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1949 return fast_tpoint_collect_result::not_collecting;
1950
1951 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1952 }
1953
1954 int
1955 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1956 {
1957 return -1;
1958 }
1959
1960 bool
1961 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1962 {
1963 scoped_restore_current_thread restore_thread;
1964 switch_to_thread (get_lwp_thread (lwp));
1965
1966 if ((wstat == NULL
1967 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1968 && supports_fast_tracepoints ()
1969 && agent_loaded_p ())
1970 {
1971 struct fast_tpoint_collect_status status;
1972
1973 threads_debug_printf
1974 ("Checking whether LWP %ld needs to move out of the jump pad.",
1975 lwpid_of (current_thread));
1976
1977 fast_tpoint_collect_result r
1978 = linux_fast_tracepoint_collecting (lwp, &status);
1979
1980 if (wstat == NULL
1981 || (WSTOPSIG (*wstat) != SIGILL
1982 && WSTOPSIG (*wstat) != SIGFPE
1983 && WSTOPSIG (*wstat) != SIGSEGV
1984 && WSTOPSIG (*wstat) != SIGBUS))
1985 {
1986 lwp->collecting_fast_tracepoint = r;
1987
1988 if (r != fast_tpoint_collect_result::not_collecting)
1989 {
1990 if (r == fast_tpoint_collect_result::before_insn
1991 && lwp->exit_jump_pad_bkpt == NULL)
1992 {
1993 /* Haven't executed the original instruction yet.
1994 Set breakpoint there, and wait till it's hit,
1995 then single-step until exiting the jump pad. */
1996 lwp->exit_jump_pad_bkpt
1997 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1998 }
1999
2000 threads_debug_printf
2001 ("Checking whether LWP %ld needs to move out of the jump pad..."
2002 " it does", lwpid_of (current_thread));
2003
2004 return true;
2005 }
2006 }
2007 else
2008 {
2009 /* If we get a synchronous signal while collecting, *and*
2010 while executing the (relocated) original instruction,
2011 reset the PC to point at the tpoint address, before
2012 reporting to GDB. Otherwise, it's an IPA lib bug: just
2013 report the signal to GDB, and pray for the best. */
2014
2015 lwp->collecting_fast_tracepoint
2016 = fast_tpoint_collect_result::not_collecting;
2017
2018 if (r != fast_tpoint_collect_result::not_collecting
2019 && (status.adjusted_insn_addr <= lwp->stop_pc
2020 && lwp->stop_pc < status.adjusted_insn_addr_end))
2021 {
2022 siginfo_t info;
2023 struct regcache *regcache;
2024
2025 /* The si_addr on a few signals references the address
2026 of the faulting instruction. Adjust that as
2027 well. */
2028 if ((WSTOPSIG (*wstat) == SIGILL
2029 || WSTOPSIG (*wstat) == SIGFPE
2030 || WSTOPSIG (*wstat) == SIGBUS
2031 || WSTOPSIG (*wstat) == SIGSEGV)
2032 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2033 (PTRACE_TYPE_ARG3) 0, &info) == 0
2034 /* Final check just to make sure we don't clobber
2035 the siginfo of non-kernel-sent signals. */
2036 && (uintptr_t) info.si_addr == lwp->stop_pc)
2037 {
2038 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2039 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2040 (PTRACE_TYPE_ARG3) 0, &info);
2041 }
2042
2043 regcache = get_thread_regcache (current_thread, 1);
2044 low_set_pc (regcache, status.tpoint_addr);
2045 lwp->stop_pc = status.tpoint_addr;
2046
2047 /* Cancel any fast tracepoint lock this thread was
2048 holding. */
2049 force_unlock_trace_buffer ();
2050 }
2051
2052 if (lwp->exit_jump_pad_bkpt != NULL)
2053 {
2054 threads_debug_printf
2055 ("Cancelling fast exit-jump-pad: removing bkpt."
2056 "stopping all threads momentarily.");
2057
2058 stop_all_lwps (1, lwp);
2059
2060 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2061 lwp->exit_jump_pad_bkpt = NULL;
2062
2063 unstop_all_lwps (1, lwp);
2064
2065 gdb_assert (lwp->suspended >= 0);
2066 }
2067 }
2068 }
2069
2070 threads_debug_printf
2071 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2072 lwpid_of (current_thread));
2073
2074 return false;
2075 }
2076
2077 /* Enqueue one signal in the "signals to report later when out of the
2078 jump pad" list. */
2079
2080 static void
2081 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2082 {
2083 struct thread_info *thread = get_lwp_thread (lwp);
2084
2085 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2086 WSTOPSIG (*wstat), lwpid_of (thread));
2087
2088 if (debug_threads)
2089 {
2090 for (const auto &sig : lwp->pending_signals_to_report)
2091 threads_debug_printf (" Already queued %d", sig.signal);
2092
2093 threads_debug_printf (" (no more currently queued signals)");
2094 }
2095
2096 /* Don't enqueue non-RT signals if they are already in the deferred
2097 queue. (SIGSTOP being the easiest signal to see ending up here
2098 twice) */
2099 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2100 {
2101 for (const auto &sig : lwp->pending_signals_to_report)
2102 {
2103 if (sig.signal == WSTOPSIG (*wstat))
2104 {
2105 threads_debug_printf
2106 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2107 sig.signal, lwpid_of (thread));
2108 return;
2109 }
2110 }
2111 }
2112
2113 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2114
2115 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2116 &lwp->pending_signals_to_report.back ().info);
2117 }
2118
2119 /* Dequeue one signal from the "signals to report later when out of
2120 the jump pad" list. */
2121
2122 static int
2123 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2124 {
2125 struct thread_info *thread = get_lwp_thread (lwp);
2126
2127 if (!lwp->pending_signals_to_report.empty ())
2128 {
2129 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2130
2131 *wstat = W_STOPCODE (p_sig.signal);
2132 if (p_sig.info.si_signo != 0)
2133 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2134 &p_sig.info);
2135
2136 lwp->pending_signals_to_report.pop_front ();
2137
2138 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2139 WSTOPSIG (*wstat), lwpid_of (thread));
2140
2141 if (debug_threads)
2142 {
2143 for (const auto &sig : lwp->pending_signals_to_report)
2144 threads_debug_printf (" Still queued %d", sig.signal);
2145
2146 threads_debug_printf (" (no more queued signals)");
2147 }
2148
2149 return 1;
2150 }
2151
2152 return 0;
2153 }
2154
2155 bool
2156 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2157 {
2158 scoped_restore_current_thread restore_thread;
2159 switch_to_thread (get_lwp_thread (child));
2160
2161 if (low_stopped_by_watchpoint ())
2162 {
2163 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2164 child->stopped_data_address = low_stopped_data_address ();
2165 }
2166
2167 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2168 }
2169
2170 bool
2171 linux_process_target::low_stopped_by_watchpoint ()
2172 {
2173 return false;
2174 }
2175
2176 CORE_ADDR
2177 linux_process_target::low_stopped_data_address ()
2178 {
2179 return 0;
2180 }
2181
2182 /* Return the ptrace options that we want to try to enable. */
2183
2184 static int
2185 linux_low_ptrace_options (int attached)
2186 {
2187 client_state &cs = get_client_state ();
2188 int options = 0;
2189
2190 if (!attached)
2191 options |= PTRACE_O_EXITKILL;
2192
2193 if (cs.report_fork_events)
2194 options |= PTRACE_O_TRACEFORK;
2195
2196 if (cs.report_vfork_events)
2197 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2198
2199 if (cs.report_exec_events)
2200 options |= PTRACE_O_TRACEEXEC;
2201
2202 options |= PTRACE_O_TRACESYSGOOD;
2203
2204 return options;
2205 }
2206
2207 void
2208 linux_process_target::filter_event (int lwpid, int wstat)
2209 {
2210 client_state &cs = get_client_state ();
2211 struct lwp_info *child;
2212 struct thread_info *thread;
2213 int have_stop_pc = 0;
2214
2215 child = find_lwp_pid (ptid_t (lwpid));
2216
2217 /* Check for events reported by anything not in our LWP list. */
2218 if (child == nullptr)
2219 {
2220 if (WIFSTOPPED (wstat))
2221 {
2222 if (WSTOPSIG (wstat) == SIGTRAP
2223 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2224 {
2225 /* A non-leader thread exec'ed after we've seen the
2226 leader zombie, and removed it from our lists (in
2227 check_zombie_leaders). The non-leader thread changes
2228 its tid to the tgid. */
2229 threads_debug_printf
2230 ("Re-adding thread group leader LWP %d after exec.",
2231 lwpid);
2232
2233 child = add_lwp (ptid_t (lwpid, lwpid));
2234 child->stopped = 1;
2235 switch_to_thread (child->thread);
2236 }
2237 else
2238 {
2239 /* A process we are controlling has forked and the new
2240 child's stop was reported to us by the kernel. Save
2241 its PID and go back to waiting for the fork event to
2242 be reported - the stopped process might be returned
2243 from waitpid before or after the fork event is. */
2244 threads_debug_printf
2245 ("Saving LWP %d status %s in stopped_pids list",
2246 lwpid, status_to_str (wstat).c_str ());
2247 add_to_pid_list (&stopped_pids, lwpid, wstat);
2248 }
2249 }
2250 else
2251 {
2252 /* Don't report an event for the exit of an LWP not in our
2253 list, i.e. not part of any inferior we're debugging.
2254 This can happen if we detach from a program we originally
2255 forked and then it exits. However, note that we may have
2256 earlier deleted a leader of an inferior we're debugging,
2257 in check_zombie_leaders. Re-add it back here if so. */
2258 find_process ([&] (process_info *proc)
2259 {
2260 if (proc->pid == lwpid)
2261 {
2262 threads_debug_printf
2263 ("Re-adding thread group leader LWP %d after exit.",
2264 lwpid);
2265
2266 child = add_lwp (ptid_t (lwpid, lwpid));
2267 return true;
2268 }
2269 return false;
2270 });
2271 }
2272
2273 if (child == nullptr)
2274 return;
2275 }
2276
2277 thread = get_lwp_thread (child);
2278
2279 child->stopped = 1;
2280
2281 child->last_status = wstat;
2282
2283 /* Check if the thread has exited. */
2284 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2285 {
2286 threads_debug_printf ("%d exited", lwpid);
2287
2288 if (finish_step_over (child))
2289 {
2290 /* Unsuspend all other LWPs, and set them back running again. */
2291 unsuspend_all_lwps (child);
2292 }
2293
2294 /* If this is not the leader LWP, then the exit signal was not
2295 the end of the debugged application and should be ignored,
2296 unless GDB wants to hear about thread exits. */
2297 if (cs.report_thread_events || is_leader (thread))
2298 {
2299 /* Since events are serialized to GDB core, and we can't
2300 report this one right now. Leave the status pending for
2301 the next time we're able to report it. */
2302 mark_lwp_dead (child, wstat);
2303 return;
2304 }
2305 else
2306 {
2307 delete_lwp (child);
2308 return;
2309 }
2310 }
2311
2312 gdb_assert (WIFSTOPPED (wstat));
2313
2314 if (WIFSTOPPED (wstat))
2315 {
2316 struct process_info *proc;
2317
2318 /* Architecture-specific setup after inferior is running. */
2319 proc = find_process_pid (pid_of (thread));
2320 if (proc->tdesc == NULL)
2321 {
2322 if (proc->attached)
2323 {
2324 /* This needs to happen after we have attached to the
2325 inferior and it is stopped for the first time, but
2326 before we access any inferior registers. */
2327 arch_setup_thread (thread);
2328 }
2329 else
2330 {
2331 /* The process is started, but GDBserver will do
2332 architecture-specific setup after the program stops at
2333 the first instruction. */
2334 child->status_pending_p = 1;
2335 child->status_pending = wstat;
2336 return;
2337 }
2338 }
2339 }
2340
2341 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2342 {
2343 struct process_info *proc = find_process_pid (pid_of (thread));
2344 int options = linux_low_ptrace_options (proc->attached);
2345
2346 linux_enable_event_reporting (lwpid, options);
2347 child->must_set_ptrace_flags = 0;
2348 }
2349
2350 /* Always update syscall_state, even if it will be filtered later. */
2351 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2352 {
2353 child->syscall_state
2354 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2355 ? TARGET_WAITKIND_SYSCALL_RETURN
2356 : TARGET_WAITKIND_SYSCALL_ENTRY);
2357 }
2358 else
2359 {
2360 /* Almost all other ptrace-stops are known to be outside of system
2361 calls, with further exceptions in handle_extended_wait. */
2362 child->syscall_state = TARGET_WAITKIND_IGNORE;
2363 }
2364
2365 /* Be careful to not overwrite stop_pc until save_stop_reason is
2366 called. */
2367 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2368 && linux_is_extended_waitstatus (wstat))
2369 {
2370 child->stop_pc = get_pc (child);
2371 if (handle_extended_wait (&child, wstat))
2372 {
2373 /* The event has been handled, so just return without
2374 reporting it. */
2375 return;
2376 }
2377 }
2378
2379 if (linux_wstatus_maybe_breakpoint (wstat))
2380 {
2381 if (save_stop_reason (child))
2382 have_stop_pc = 1;
2383 }
2384
2385 if (!have_stop_pc)
2386 child->stop_pc = get_pc (child);
2387
2388 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2389 && child->stop_expected)
2390 {
2391 threads_debug_printf ("Expected stop.");
2392
2393 child->stop_expected = 0;
2394
2395 if (thread->last_resume_kind == resume_stop)
2396 {
2397 /* We want to report the stop to the core. Treat the
2398 SIGSTOP as a normal event. */
2399 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2400 target_pid_to_str (ptid_of (thread)).c_str ());
2401 }
2402 else if (stopping_threads != NOT_STOPPING_THREADS)
2403 {
2404 /* Stopping threads. We don't want this SIGSTOP to end up
2405 pending. */
2406 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2407 target_pid_to_str (ptid_of (thread)).c_str ());
2408 return;
2409 }
2410 else
2411 {
2412 /* This is a delayed SIGSTOP. Filter out the event. */
2413 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2414 child->stepping ? "step" : "continue",
2415 target_pid_to_str (ptid_of (thread)).c_str ());
2416
2417 resume_one_lwp (child, child->stepping, 0, NULL);
2418 return;
2419 }
2420 }
2421
2422 child->status_pending_p = 1;
2423 child->status_pending = wstat;
2424 return;
2425 }
2426
2427 bool
2428 linux_process_target::maybe_hw_step (thread_info *thread)
2429 {
2430 if (supports_hardware_single_step ())
2431 return true;
2432 else
2433 {
2434 /* GDBserver must insert single-step breakpoint for software
2435 single step. */
2436 gdb_assert (has_single_step_breakpoints (thread));
2437 return false;
2438 }
2439 }
2440
2441 void
2442 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2443 {
2444 struct lwp_info *lp = get_thread_lwp (thread);
2445
2446 if (lp->stopped
2447 && !lp->suspended
2448 && !lp->status_pending_p
2449 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2450 {
2451 int step = 0;
2452
2453 if (thread->last_resume_kind == resume_step)
2454 step = maybe_hw_step (thread);
2455
2456 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2457 target_pid_to_str (ptid_of (thread)).c_str (),
2458 paddress (lp->stop_pc), step);
2459
2460 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2461 }
2462 }
2463
2464 int
2465 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2466 ptid_t filter_ptid,
2467 int *wstatp, int options)
2468 {
2469 struct thread_info *event_thread;
2470 struct lwp_info *event_child, *requested_child;
2471 sigset_t block_mask, prev_mask;
2472
2473 retry:
2474 /* N.B. event_thread points to the thread_info struct that contains
2475 event_child. Keep them in sync. */
2476 event_thread = NULL;
2477 event_child = NULL;
2478 requested_child = NULL;
2479
2480 /* Check for a lwp with a pending status. */
2481
2482 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2483 {
2484 event_thread = find_thread_in_random ([&] (thread_info *thread)
2485 {
2486 return status_pending_p_callback (thread, filter_ptid);
2487 });
2488
2489 if (event_thread != NULL)
2490 {
2491 event_child = get_thread_lwp (event_thread);
2492 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2493 }
2494 }
2495 else if (filter_ptid != null_ptid)
2496 {
2497 requested_child = find_lwp_pid (filter_ptid);
2498
2499 if (stopping_threads == NOT_STOPPING_THREADS
2500 && requested_child->status_pending_p
2501 && (requested_child->collecting_fast_tracepoint
2502 != fast_tpoint_collect_result::not_collecting))
2503 {
2504 enqueue_one_deferred_signal (requested_child,
2505 &requested_child->status_pending);
2506 requested_child->status_pending_p = 0;
2507 requested_child->status_pending = 0;
2508 resume_one_lwp (requested_child, 0, 0, NULL);
2509 }
2510
2511 if (requested_child->suspended
2512 && requested_child->status_pending_p)
2513 {
2514 internal_error (__FILE__, __LINE__,
2515 "requesting an event out of a"
2516 " suspended child?");
2517 }
2518
2519 if (requested_child->status_pending_p)
2520 {
2521 event_child = requested_child;
2522 event_thread = get_lwp_thread (event_child);
2523 }
2524 }
2525
2526 if (event_child != NULL)
2527 {
2528 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2529 lwpid_of (event_thread),
2530 event_child->status_pending);
2531
2532 *wstatp = event_child->status_pending;
2533 event_child->status_pending_p = 0;
2534 event_child->status_pending = 0;
2535 switch_to_thread (event_thread);
2536 return lwpid_of (event_thread);
2537 }
2538
2539 /* But if we don't find a pending event, we'll have to wait.
2540
2541 We only enter this loop if no process has a pending wait status.
2542 Thus any action taken in response to a wait status inside this
2543 loop is responding as soon as we detect the status, not after any
2544 pending events. */
2545
2546 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2547 all signals while here. */
2548 sigfillset (&block_mask);
2549 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2550
2551 /* Always pull all events out of the kernel. We'll randomly select
2552 an event LWP out of all that have events, to prevent
2553 starvation. */
2554 while (event_child == NULL)
2555 {
2556 pid_t ret = 0;
2557
2558 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2559 quirks:
2560
2561 - If the thread group leader exits while other threads in the
2562 thread group still exist, waitpid(TGID, ...) hangs. That
2563 waitpid won't return an exit status until the other threads
2564 in the group are reaped.
2565
2566 - When a non-leader thread execs, that thread just vanishes
2567 without reporting an exit (so we'd hang if we waited for it
2568 explicitly in that case). The exec event is reported to
2569 the TGID pid. */
2570 errno = 0;
2571 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2572
2573 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2574 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2575
2576 if (ret > 0)
2577 {
2578 threads_debug_printf ("waitpid %ld received %s",
2579 (long) ret, status_to_str (*wstatp).c_str ());
2580
2581 /* Filter all events. IOW, leave all events pending. We'll
2582 randomly select an event LWP out of all that have events
2583 below. */
2584 filter_event (ret, *wstatp);
2585 /* Retry until nothing comes out of waitpid. A single
2586 SIGCHLD can indicate more than one child stopped. */
2587 continue;
2588 }
2589
2590 /* Now that we've pulled all events out of the kernel, resume
2591 LWPs that don't have an interesting event to report. */
2592 if (stopping_threads == NOT_STOPPING_THREADS)
2593 for_each_thread ([this] (thread_info *thread)
2594 {
2595 resume_stopped_resumed_lwps (thread);
2596 });
2597
2598 /* ... and find an LWP with a status to report to the core, if
2599 any. */
2600 event_thread = find_thread_in_random ([&] (thread_info *thread)
2601 {
2602 return status_pending_p_callback (thread, filter_ptid);
2603 });
2604
2605 if (event_thread != NULL)
2606 {
2607 event_child = get_thread_lwp (event_thread);
2608 *wstatp = event_child->status_pending;
2609 event_child->status_pending_p = 0;
2610 event_child->status_pending = 0;
2611 break;
2612 }
2613
2614 /* Check for zombie thread group leaders. Those can't be reaped
2615 until all other threads in the thread group are. */
2616 check_zombie_leaders ();
2617
2618 auto not_stopped = [&] (thread_info *thread)
2619 {
2620 return not_stopped_callback (thread, wait_ptid);
2621 };
2622
2623 /* If there are no resumed children left in the set of LWPs we
2624 want to wait for, bail. We can't just block in
2625 waitpid/sigsuspend, because lwps might have been left stopped
2626 in trace-stop state, and we'd be stuck forever waiting for
2627 their status to change (which would only happen if we resumed
2628 them). Even if WNOHANG is set, this return code is preferred
2629 over 0 (below), as it is more detailed. */
2630 if (find_thread (not_stopped) == NULL)
2631 {
2632 threads_debug_printf ("exit (no unwaited-for LWP)");
2633
2634 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2635 return -1;
2636 }
2637
2638 /* No interesting event to report to the caller. */
2639 if ((options & WNOHANG))
2640 {
2641 threads_debug_printf ("WNOHANG set, no event found");
2642
2643 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2644 return 0;
2645 }
2646
2647 /* Block until we get an event reported with SIGCHLD. */
2648 threads_debug_printf ("sigsuspend'ing");
2649
2650 sigsuspend (&prev_mask);
2651 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2652 goto retry;
2653 }
2654
2655 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2656
2657 switch_to_thread (event_thread);
2658
2659 return lwpid_of (event_thread);
2660 }
2661
2662 int
2663 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2664 {
2665 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2666 }
2667
2668 /* Select one LWP out of those that have events pending. */
2669
2670 static void
2671 select_event_lwp (struct lwp_info **orig_lp)
2672 {
2673 struct thread_info *event_thread = NULL;
2674
2675 /* In all-stop, give preference to the LWP that is being
2676 single-stepped. There will be at most one, and it's the LWP that
2677 the core is most interested in. If we didn't do this, then we'd
2678 have to handle pending step SIGTRAPs somehow in case the core
2679 later continues the previously-stepped thread, otherwise we'd
2680 report the pending SIGTRAP, and the core, not having stepped the
2681 thread, wouldn't understand what the trap was for, and therefore
2682 would report it to the user as a random signal. */
2683 if (!non_stop)
2684 {
2685 event_thread = find_thread ([] (thread_info *thread)
2686 {
2687 lwp_info *lp = get_thread_lwp (thread);
2688
2689 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2690 && thread->last_resume_kind == resume_step
2691 && lp->status_pending_p);
2692 });
2693
2694 if (event_thread != NULL)
2695 threads_debug_printf
2696 ("Select single-step %s",
2697 target_pid_to_str (ptid_of (event_thread)).c_str ());
2698 }
2699 if (event_thread == NULL)
2700 {
2701 /* No single-stepping LWP. Select one at random, out of those
2702 which have had events. */
2703
2704 event_thread = find_thread_in_random ([&] (thread_info *thread)
2705 {
2706 lwp_info *lp = get_thread_lwp (thread);
2707
2708 /* Only resumed LWPs that have an event pending. */
2709 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2710 && lp->status_pending_p);
2711 });
2712 }
2713
2714 if (event_thread != NULL)
2715 {
2716 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2717
2718 /* Switch the event LWP. */
2719 *orig_lp = event_lp;
2720 }
2721 }
2722
2723 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2724 NULL. */
2725
2726 static void
2727 unsuspend_all_lwps (struct lwp_info *except)
2728 {
2729 for_each_thread ([&] (thread_info *thread)
2730 {
2731 lwp_info *lwp = get_thread_lwp (thread);
2732
2733 if (lwp != except)
2734 lwp_suspended_decr (lwp);
2735 });
2736 }
2737
2738 static bool lwp_running (thread_info *thread);
2739
2740 /* Stabilize threads (move out of jump pads).
2741
2742 If a thread is midway collecting a fast tracepoint, we need to
2743 finish the collection and move it out of the jump pad before
2744 reporting the signal.
2745
2746 This avoids recursion while collecting (when a signal arrives
2747 midway, and the signal handler itself collects), which would trash
2748 the trace buffer. In case the user set a breakpoint in a signal
2749 handler, this avoids the backtrace showing the jump pad, etc..
2750 Most importantly, there are certain things we can't do safely if
2751 threads are stopped in a jump pad (or in its callee's). For
2752 example:
2753
2754 - starting a new trace run. A thread still collecting the
2755 previous run, could trash the trace buffer when resumed. The trace
2756 buffer control structures would have been reset but the thread had
2757 no way to tell. The thread could even midway memcpy'ing to the
2758 buffer, which would mean that when resumed, it would clobber the
2759 trace buffer that had been set for a new run.
2760
2761 - we can't rewrite/reuse the jump pads for new tracepoints
2762 safely. Say you do tstart while a thread is stopped midway while
2763 collecting. When the thread is later resumed, it finishes the
2764 collection, and returns to the jump pad, to execute the original
2765 instruction that was under the tracepoint jump at the time the
2766 older run had been started. If the jump pad had been rewritten
2767 since for something else in the new run, the thread would now
2768 execute the wrong / random instructions. */
2769
2770 void
2771 linux_process_target::stabilize_threads ()
2772 {
2773 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2774 {
2775 return stuck_in_jump_pad (thread);
2776 });
2777
2778 if (thread_stuck != NULL)
2779 {
2780 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2781 lwpid_of (thread_stuck));
2782 return;
2783 }
2784
2785 scoped_restore_current_thread restore_thread;
2786
2787 stabilizing_threads = 1;
2788
2789 /* Kick 'em all. */
2790 for_each_thread ([this] (thread_info *thread)
2791 {
2792 move_out_of_jump_pad (thread);
2793 });
2794
2795 /* Loop until all are stopped out of the jump pads. */
2796 while (find_thread (lwp_running) != NULL)
2797 {
2798 struct target_waitstatus ourstatus;
2799 struct lwp_info *lwp;
2800 int wstat;
2801
2802 /* Note that we go through the full wait even loop. While
2803 moving threads out of jump pad, we need to be able to step
2804 over internal breakpoints and such. */
2805 wait_1 (minus_one_ptid, &ourstatus, 0);
2806
2807 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2808 {
2809 lwp = get_thread_lwp (current_thread);
2810
2811 /* Lock it. */
2812 lwp_suspended_inc (lwp);
2813
2814 if (ourstatus.sig () != GDB_SIGNAL_0
2815 || current_thread->last_resume_kind == resume_stop)
2816 {
2817 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2818 enqueue_one_deferred_signal (lwp, &wstat);
2819 }
2820 }
2821 }
2822
2823 unsuspend_all_lwps (NULL);
2824
2825 stabilizing_threads = 0;
2826
2827 if (debug_threads)
2828 {
2829 thread_stuck = find_thread ([this] (thread_info *thread)
2830 {
2831 return stuck_in_jump_pad (thread);
2832 });
2833
2834 if (thread_stuck != NULL)
2835 threads_debug_printf
2836 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2837 lwpid_of (thread_stuck));
2838 }
2839 }
2840
2841 /* Convenience function that is called when the kernel reports an
2842 event that is not passed out to GDB. */
2843
2844 static ptid_t
2845 ignore_event (struct target_waitstatus *ourstatus)
2846 {
2847 /* If we got an event, there may still be others, as a single
2848 SIGCHLD can indicate more than one child stopped. This forces
2849 another target_wait call. */
2850 async_file_mark ();
2851
2852 ourstatus->set_ignore ();
2853 return null_ptid;
2854 }
2855
2856 ptid_t
2857 linux_process_target::filter_exit_event (lwp_info *event_child,
2858 target_waitstatus *ourstatus)
2859 {
2860 client_state &cs = get_client_state ();
2861 struct thread_info *thread = get_lwp_thread (event_child);
2862 ptid_t ptid = ptid_of (thread);
2863
2864 if (!is_leader (thread))
2865 {
2866 if (cs.report_thread_events)
2867 ourstatus->set_thread_exited (0);
2868 else
2869 ourstatus->set_ignore ();
2870
2871 delete_lwp (event_child);
2872 }
2873 return ptid;
2874 }
2875
2876 /* Returns 1 if GDB is interested in any event_child syscalls. */
2877
2878 static int
2879 gdb_catching_syscalls_p (struct lwp_info *event_child)
2880 {
2881 struct thread_info *thread = get_lwp_thread (event_child);
2882 struct process_info *proc = get_thread_process (thread);
2883
2884 return !proc->syscalls_to_catch.empty ();
2885 }
2886
2887 bool
2888 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2889 {
2890 int sysno;
2891 struct thread_info *thread = get_lwp_thread (event_child);
2892 struct process_info *proc = get_thread_process (thread);
2893
2894 if (proc->syscalls_to_catch.empty ())
2895 return false;
2896
2897 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2898 return true;
2899
2900 get_syscall_trapinfo (event_child, &sysno);
2901
2902 for (int iter : proc->syscalls_to_catch)
2903 if (iter == sysno)
2904 return true;
2905
2906 return false;
2907 }
2908
2909 ptid_t
2910 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2911 target_wait_flags target_options)
2912 {
2913 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2914
2915 client_state &cs = get_client_state ();
2916 int w;
2917 struct lwp_info *event_child;
2918 int options;
2919 int pid;
2920 int step_over_finished;
2921 int bp_explains_trap;
2922 int maybe_internal_trap;
2923 int report_to_gdb;
2924 int trace_event;
2925 int in_step_range;
2926 int any_resumed;
2927
2928 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2929
2930 /* Translate generic target options into linux options. */
2931 options = __WALL;
2932 if (target_options & TARGET_WNOHANG)
2933 options |= WNOHANG;
2934
2935 bp_explains_trap = 0;
2936 trace_event = 0;
2937 in_step_range = 0;
2938 ourstatus->set_ignore ();
2939
2940 auto status_pending_p_any = [&] (thread_info *thread)
2941 {
2942 return status_pending_p_callback (thread, minus_one_ptid);
2943 };
2944
2945 auto not_stopped = [&] (thread_info *thread)
2946 {
2947 return not_stopped_callback (thread, minus_one_ptid);
2948 };
2949
2950 /* Find a resumed LWP, if any. */
2951 if (find_thread (status_pending_p_any) != NULL)
2952 any_resumed = 1;
2953 else if (find_thread (not_stopped) != NULL)
2954 any_resumed = 1;
2955 else
2956 any_resumed = 0;
2957
2958 if (step_over_bkpt == null_ptid)
2959 pid = wait_for_event (ptid, &w, options);
2960 else
2961 {
2962 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2963 target_pid_to_str (step_over_bkpt).c_str ());
2964 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2965 }
2966
2967 if (pid == 0 || (pid == -1 && !any_resumed))
2968 {
2969 gdb_assert (target_options & TARGET_WNOHANG);
2970
2971 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
2972
2973 ourstatus->set_ignore ();
2974 return null_ptid;
2975 }
2976 else if (pid == -1)
2977 {
2978 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
2979
2980 ourstatus->set_no_resumed ();
2981 return null_ptid;
2982 }
2983
2984 event_child = get_thread_lwp (current_thread);
2985
2986 /* wait_for_event only returns an exit status for the last
2987 child of a process. Report it. */
2988 if (WIFEXITED (w) || WIFSIGNALED (w))
2989 {
2990 if (WIFEXITED (w))
2991 {
2992 ourstatus->set_exited (WEXITSTATUS (w));
2993
2994 threads_debug_printf
2995 ("ret = %s, exited with retcode %d",
2996 target_pid_to_str (ptid_of (current_thread)).c_str (),
2997 WEXITSTATUS (w));
2998 }
2999 else
3000 {
3001 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3002
3003 threads_debug_printf
3004 ("ret = %s, terminated with signal %d",
3005 target_pid_to_str (ptid_of (current_thread)).c_str (),
3006 WTERMSIG (w));
3007 }
3008
3009 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3010 return filter_exit_event (event_child, ourstatus);
3011
3012 return ptid_of (current_thread);
3013 }
3014
3015 /* If step-over executes a breakpoint instruction, in the case of a
3016 hardware single step it means a gdb/gdbserver breakpoint had been
3017 planted on top of a permanent breakpoint, in the case of a software
3018 single step it may just mean that gdbserver hit the reinsert breakpoint.
3019 The PC has been adjusted by save_stop_reason to point at
3020 the breakpoint address.
3021 So in the case of the hardware single step advance the PC manually
3022 past the breakpoint and in the case of software single step advance only
3023 if it's not the single_step_breakpoint we are hitting.
3024 This avoids that a program would keep trapping a permanent breakpoint
3025 forever. */
3026 if (step_over_bkpt != null_ptid
3027 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3028 && (event_child->stepping
3029 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3030 {
3031 int increment_pc = 0;
3032 int breakpoint_kind = 0;
3033 CORE_ADDR stop_pc = event_child->stop_pc;
3034
3035 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3036 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3037
3038 threads_debug_printf
3039 ("step-over for %s executed software breakpoint",
3040 target_pid_to_str (ptid_of (current_thread)).c_str ());
3041
3042 if (increment_pc != 0)
3043 {
3044 struct regcache *regcache
3045 = get_thread_regcache (current_thread, 1);
3046
3047 event_child->stop_pc += increment_pc;
3048 low_set_pc (regcache, event_child->stop_pc);
3049
3050 if (!low_breakpoint_at (event_child->stop_pc))
3051 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3052 }
3053 }
3054
3055 /* If this event was not handled before, and is not a SIGTRAP, we
3056 report it. SIGILL and SIGSEGV are also treated as traps in case
3057 a breakpoint is inserted at the current PC. If this target does
3058 not support internal breakpoints at all, we also report the
3059 SIGTRAP without further processing; it's of no concern to us. */
3060 maybe_internal_trap
3061 = (low_supports_breakpoints ()
3062 && (WSTOPSIG (w) == SIGTRAP
3063 || ((WSTOPSIG (w) == SIGILL
3064 || WSTOPSIG (w) == SIGSEGV)
3065 && low_breakpoint_at (event_child->stop_pc))));
3066
3067 if (maybe_internal_trap)
3068 {
3069 /* Handle anything that requires bookkeeping before deciding to
3070 report the event or continue waiting. */
3071
3072 /* First check if we can explain the SIGTRAP with an internal
3073 breakpoint, or if we should possibly report the event to GDB.
3074 Do this before anything that may remove or insert a
3075 breakpoint. */
3076 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3077
3078 /* We have a SIGTRAP, possibly a step-over dance has just
3079 finished. If so, tweak the state machine accordingly,
3080 reinsert breakpoints and delete any single-step
3081 breakpoints. */
3082 step_over_finished = finish_step_over (event_child);
3083
3084 /* Now invoke the callbacks of any internal breakpoints there. */
3085 check_breakpoints (event_child->stop_pc);
3086
3087 /* Handle tracepoint data collecting. This may overflow the
3088 trace buffer, and cause a tracing stop, removing
3089 breakpoints. */
3090 trace_event = handle_tracepoints (event_child);
3091
3092 if (bp_explains_trap)
3093 threads_debug_printf ("Hit a gdbserver breakpoint.");
3094 }
3095 else
3096 {
3097 /* We have some other signal, possibly a step-over dance was in
3098 progress, and it should be cancelled too. */
3099 step_over_finished = finish_step_over (event_child);
3100 }
3101
3102 /* We have all the data we need. Either report the event to GDB, or
3103 resume threads and keep waiting for more. */
3104
3105 /* If we're collecting a fast tracepoint, finish the collection and
3106 move out of the jump pad before delivering a signal. See
3107 linux_stabilize_threads. */
3108
3109 if (WIFSTOPPED (w)
3110 && WSTOPSIG (w) != SIGTRAP
3111 && supports_fast_tracepoints ()
3112 && agent_loaded_p ())
3113 {
3114 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3115 "to defer or adjust it.",
3116 WSTOPSIG (w), lwpid_of (current_thread));
3117
3118 /* Allow debugging the jump pad itself. */
3119 if (current_thread->last_resume_kind != resume_step
3120 && maybe_move_out_of_jump_pad (event_child, &w))
3121 {
3122 enqueue_one_deferred_signal (event_child, &w);
3123
3124 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3125 WSTOPSIG (w), lwpid_of (current_thread));
3126
3127 resume_one_lwp (event_child, 0, 0, NULL);
3128
3129 return ignore_event (ourstatus);
3130 }
3131 }
3132
3133 if (event_child->collecting_fast_tracepoint
3134 != fast_tpoint_collect_result::not_collecting)
3135 {
3136 threads_debug_printf
3137 ("LWP %ld was trying to move out of the jump pad (%d). "
3138 "Check if we're already there.",
3139 lwpid_of (current_thread),
3140 (int) event_child->collecting_fast_tracepoint);
3141
3142 trace_event = 1;
3143
3144 event_child->collecting_fast_tracepoint
3145 = linux_fast_tracepoint_collecting (event_child, NULL);
3146
3147 if (event_child->collecting_fast_tracepoint
3148 != fast_tpoint_collect_result::before_insn)
3149 {
3150 /* No longer need this breakpoint. */
3151 if (event_child->exit_jump_pad_bkpt != NULL)
3152 {
3153 threads_debug_printf
3154 ("No longer need exit-jump-pad bkpt; removing it."
3155 "stopping all threads momentarily.");
3156
3157 /* Other running threads could hit this breakpoint.
3158 We don't handle moribund locations like GDB does,
3159 instead we always pause all threads when removing
3160 breakpoints, so that any step-over or
3161 decr_pc_after_break adjustment is always taken
3162 care of while the breakpoint is still
3163 inserted. */
3164 stop_all_lwps (1, event_child);
3165
3166 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3167 event_child->exit_jump_pad_bkpt = NULL;
3168
3169 unstop_all_lwps (1, event_child);
3170
3171 gdb_assert (event_child->suspended >= 0);
3172 }
3173 }
3174
3175 if (event_child->collecting_fast_tracepoint
3176 == fast_tpoint_collect_result::not_collecting)
3177 {
3178 threads_debug_printf
3179 ("fast tracepoint finished collecting successfully.");
3180
3181 /* We may have a deferred signal to report. */
3182 if (dequeue_one_deferred_signal (event_child, &w))
3183 threads_debug_printf ("dequeued one signal.");
3184 else
3185 {
3186 threads_debug_printf ("no deferred signals.");
3187
3188 if (stabilizing_threads)
3189 {
3190 ourstatus->set_stopped (GDB_SIGNAL_0);
3191
3192 threads_debug_printf
3193 ("ret = %s, stopped while stabilizing threads",
3194 target_pid_to_str (ptid_of (current_thread)).c_str ());
3195
3196 return ptid_of (current_thread);
3197 }
3198 }
3199 }
3200 }
3201
3202 /* Check whether GDB would be interested in this event. */
3203
3204 /* Check if GDB is interested in this syscall. */
3205 if (WIFSTOPPED (w)
3206 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3207 && !gdb_catch_this_syscall (event_child))
3208 {
3209 threads_debug_printf ("Ignored syscall for LWP %ld.",
3210 lwpid_of (current_thread));
3211
3212 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3213
3214 return ignore_event (ourstatus);
3215 }
3216
3217 /* If GDB is not interested in this signal, don't stop other
3218 threads, and don't report it to GDB. Just resume the inferior
3219 right away. We do this for threading-related signals as well as
3220 any that GDB specifically requested we ignore. But never ignore
3221 SIGSTOP if we sent it ourselves, and do not ignore signals when
3222 stepping - they may require special handling to skip the signal
3223 handler. Also never ignore signals that could be caused by a
3224 breakpoint. */
3225 if (WIFSTOPPED (w)
3226 && current_thread->last_resume_kind != resume_step
3227 && (
3228 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3229 (current_process ()->priv->thread_db != NULL
3230 && (WSTOPSIG (w) == __SIGRTMIN
3231 || WSTOPSIG (w) == __SIGRTMIN + 1))
3232 ||
3233 #endif
3234 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3235 && !(WSTOPSIG (w) == SIGSTOP
3236 && current_thread->last_resume_kind == resume_stop)
3237 && !linux_wstatus_maybe_breakpoint (w))))
3238 {
3239 siginfo_t info, *info_p;
3240
3241 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3242 WSTOPSIG (w), lwpid_of (current_thread));
3243
3244 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3245 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3246 info_p = &info;
3247 else
3248 info_p = NULL;
3249
3250 if (step_over_finished)
3251 {
3252 /* We cancelled this thread's step-over above. We still
3253 need to unsuspend all other LWPs, and set them back
3254 running again while the signal handler runs. */
3255 unsuspend_all_lwps (event_child);
3256
3257 /* Enqueue the pending signal info so that proceed_all_lwps
3258 doesn't lose it. */
3259 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3260
3261 proceed_all_lwps ();
3262 }
3263 else
3264 {
3265 resume_one_lwp (event_child, event_child->stepping,
3266 WSTOPSIG (w), info_p);
3267 }
3268
3269 return ignore_event (ourstatus);
3270 }
3271
3272 /* Note that all addresses are always "out of the step range" when
3273 there's no range to begin with. */
3274 in_step_range = lwp_in_step_range (event_child);
3275
3276 /* If GDB wanted this thread to single step, and the thread is out
3277 of the step range, we always want to report the SIGTRAP, and let
3278 GDB handle it. Watchpoints should always be reported. So should
3279 signals we can't explain. A SIGTRAP we can't explain could be a
3280 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3281 do, we're be able to handle GDB breakpoints on top of internal
3282 breakpoints, by handling the internal breakpoint and still
3283 reporting the event to GDB. If we don't, we're out of luck, GDB
3284 won't see the breakpoint hit. If we see a single-step event but
3285 the thread should be continuing, don't pass the trap to gdb.
3286 That indicates that we had previously finished a single-step but
3287 left the single-step pending -- see
3288 complete_ongoing_step_over. */
3289 report_to_gdb = (!maybe_internal_trap
3290 || (current_thread->last_resume_kind == resume_step
3291 && !in_step_range)
3292 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3293 || (!in_step_range
3294 && !bp_explains_trap
3295 && !trace_event
3296 && !step_over_finished
3297 && !(current_thread->last_resume_kind == resume_continue
3298 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3299 || (gdb_breakpoint_here (event_child->stop_pc)
3300 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3301 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3302 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3303
3304 run_breakpoint_commands (event_child->stop_pc);
3305
3306 /* We found no reason GDB would want us to stop. We either hit one
3307 of our own breakpoints, or finished an internal step GDB
3308 shouldn't know about. */
3309 if (!report_to_gdb)
3310 {
3311 if (bp_explains_trap)
3312 threads_debug_printf ("Hit a gdbserver breakpoint.");
3313
3314 if (step_over_finished)
3315 threads_debug_printf ("Step-over finished.");
3316
3317 if (trace_event)
3318 threads_debug_printf ("Tracepoint event.");
3319
3320 if (lwp_in_step_range (event_child))
3321 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3322 paddress (event_child->stop_pc),
3323 paddress (event_child->step_range_start),
3324 paddress (event_child->step_range_end));
3325
3326 /* We're not reporting this breakpoint to GDB, so apply the
3327 decr_pc_after_break adjustment to the inferior's regcache
3328 ourselves. */
3329
3330 if (low_supports_breakpoints ())
3331 {
3332 struct regcache *regcache
3333 = get_thread_regcache (current_thread, 1);
3334 low_set_pc (regcache, event_child->stop_pc);
3335 }
3336
3337 if (step_over_finished)
3338 {
3339 /* If we have finished stepping over a breakpoint, we've
3340 stopped and suspended all LWPs momentarily except the
3341 stepping one. This is where we resume them all again.
3342 We're going to keep waiting, so use proceed, which
3343 handles stepping over the next breakpoint. */
3344 unsuspend_all_lwps (event_child);
3345 }
3346 else
3347 {
3348 /* Remove the single-step breakpoints if any. Note that
3349 there isn't single-step breakpoint if we finished stepping
3350 over. */
3351 if (supports_software_single_step ()
3352 && has_single_step_breakpoints (current_thread))
3353 {
3354 stop_all_lwps (0, event_child);
3355 delete_single_step_breakpoints (current_thread);
3356 unstop_all_lwps (0, event_child);
3357 }
3358 }
3359
3360 threads_debug_printf ("proceeding all threads.");
3361
3362 proceed_all_lwps ();
3363
3364 return ignore_event (ourstatus);
3365 }
3366
3367 if (debug_threads)
3368 {
3369 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3370 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3371 lwpid_of (get_lwp_thread (event_child)),
3372 event_child->waitstatus.to_string ().c_str ());
3373
3374 if (current_thread->last_resume_kind == resume_step)
3375 {
3376 if (event_child->step_range_start == event_child->step_range_end)
3377 threads_debug_printf
3378 ("GDB wanted to single-step, reporting event.");
3379 else if (!lwp_in_step_range (event_child))
3380 threads_debug_printf ("Out of step range, reporting event.");
3381 }
3382
3383 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3384 threads_debug_printf ("Stopped by watchpoint.");
3385 else if (gdb_breakpoint_here (event_child->stop_pc))
3386 threads_debug_printf ("Stopped by GDB breakpoint.");
3387 }
3388
3389 threads_debug_printf ("Hit a non-gdbserver trap event.");
3390
3391 /* Alright, we're going to report a stop. */
3392
3393 /* Remove single-step breakpoints. */
3394 if (supports_software_single_step ())
3395 {
3396 /* Remove single-step breakpoints or not. It it is true, stop all
3397 lwps, so that other threads won't hit the breakpoint in the
3398 staled memory. */
3399 int remove_single_step_breakpoints_p = 0;
3400
3401 if (non_stop)
3402 {
3403 remove_single_step_breakpoints_p
3404 = has_single_step_breakpoints (current_thread);
3405 }
3406 else
3407 {
3408 /* In all-stop, a stop reply cancels all previous resume
3409 requests. Delete all single-step breakpoints. */
3410
3411 find_thread ([&] (thread_info *thread) {
3412 if (has_single_step_breakpoints (thread))
3413 {
3414 remove_single_step_breakpoints_p = 1;
3415 return true;
3416 }
3417
3418 return false;
3419 });
3420 }
3421
3422 if (remove_single_step_breakpoints_p)
3423 {
3424 /* If we remove single-step breakpoints from memory, stop all lwps,
3425 so that other threads won't hit the breakpoint in the staled
3426 memory. */
3427 stop_all_lwps (0, event_child);
3428
3429 if (non_stop)
3430 {
3431 gdb_assert (has_single_step_breakpoints (current_thread));
3432 delete_single_step_breakpoints (current_thread);
3433 }
3434 else
3435 {
3436 for_each_thread ([] (thread_info *thread){
3437 if (has_single_step_breakpoints (thread))
3438 delete_single_step_breakpoints (thread);
3439 });
3440 }
3441
3442 unstop_all_lwps (0, event_child);
3443 }
3444 }
3445
3446 if (!stabilizing_threads)
3447 {
3448 /* In all-stop, stop all threads. */
3449 if (!non_stop)
3450 stop_all_lwps (0, NULL);
3451
3452 if (step_over_finished)
3453 {
3454 if (!non_stop)
3455 {
3456 /* If we were doing a step-over, all other threads but
3457 the stepping one had been paused in start_step_over,
3458 with their suspend counts incremented. We don't want
3459 to do a full unstop/unpause, because we're in
3460 all-stop mode (so we want threads stopped), but we
3461 still need to unsuspend the other threads, to
3462 decrement their `suspended' count back. */
3463 unsuspend_all_lwps (event_child);
3464 }
3465 else
3466 {
3467 /* If we just finished a step-over, then all threads had
3468 been momentarily paused. In all-stop, that's fine,
3469 we want threads stopped by now anyway. In non-stop,
3470 we need to re-resume threads that GDB wanted to be
3471 running. */
3472 unstop_all_lwps (1, event_child);
3473 }
3474 }
3475
3476 /* If we're not waiting for a specific LWP, choose an event LWP
3477 from among those that have had events. Giving equal priority
3478 to all LWPs that have had events helps prevent
3479 starvation. */
3480 if (ptid == minus_one_ptid)
3481 {
3482 event_child->status_pending_p = 1;
3483 event_child->status_pending = w;
3484
3485 select_event_lwp (&event_child);
3486
3487 /* current_thread and event_child must stay in sync. */
3488 switch_to_thread (get_lwp_thread (event_child));
3489
3490 event_child->status_pending_p = 0;
3491 w = event_child->status_pending;
3492 }
3493
3494
3495 /* Stabilize threads (move out of jump pads). */
3496 if (!non_stop)
3497 target_stabilize_threads ();
3498 }
3499 else
3500 {
3501 /* If we just finished a step-over, then all threads had been
3502 momentarily paused. In all-stop, that's fine, we want
3503 threads stopped by now anyway. In non-stop, we need to
3504 re-resume threads that GDB wanted to be running. */
3505 if (step_over_finished)
3506 unstop_all_lwps (1, event_child);
3507 }
3508
3509 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3510 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3511
3512 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3513 {
3514 /* If the reported event is an exit, fork, vfork or exec, let
3515 GDB know. */
3516
3517 /* Break the unreported fork relationship chain. */
3518 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3519 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3520 {
3521 event_child->fork_relative->fork_relative = NULL;
3522 event_child->fork_relative = NULL;
3523 }
3524
3525 *ourstatus = event_child->waitstatus;
3526 /* Clear the event lwp's waitstatus since we handled it already. */
3527 event_child->waitstatus.set_ignore ();
3528 }
3529 else
3530 {
3531 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3532 event_chid->waitstatus wasn't filled in with the details, so look at
3533 the wait status W. */
3534 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3535 {
3536 int syscall_number;
3537
3538 get_syscall_trapinfo (event_child, &syscall_number);
3539 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3540 ourstatus->set_syscall_entry (syscall_number);
3541 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3542 ourstatus->set_syscall_return (syscall_number);
3543 else
3544 gdb_assert_not_reached ("unexpected syscall state");
3545 }
3546 else if (current_thread->last_resume_kind == resume_stop
3547 && WSTOPSIG (w) == SIGSTOP)
3548 {
3549 /* A thread that has been requested to stop by GDB with vCont;t,
3550 and it stopped cleanly, so report as SIG0. The use of
3551 SIGSTOP is an implementation detail. */
3552 ourstatus->set_stopped (GDB_SIGNAL_0);
3553 }
3554 else
3555 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3556 }
3557
3558 /* Now that we've selected our final event LWP, un-adjust its PC if
3559 it was a software breakpoint, and the client doesn't know we can
3560 adjust the breakpoint ourselves. */
3561 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3562 && !cs.swbreak_feature)
3563 {
3564 int decr_pc = low_decr_pc_after_break ();
3565
3566 if (decr_pc != 0)
3567 {
3568 struct regcache *regcache
3569 = get_thread_regcache (current_thread, 1);
3570 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3571 }
3572 }
3573
3574 gdb_assert (step_over_bkpt == null_ptid);
3575
3576 threads_debug_printf ("ret = %s, %s",
3577 target_pid_to_str (ptid_of (current_thread)).c_str (),
3578 ourstatus->to_string ().c_str ());
3579
3580 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3581 return filter_exit_event (event_child, ourstatus);
3582
3583 return ptid_of (current_thread);
3584 }
3585
3586 /* Get rid of any pending event in the pipe. */
3587 static void
3588 async_file_flush (void)
3589 {
3590 linux_event_pipe.flush ();
3591 }
3592
3593 /* Put something in the pipe, so the event loop wakes up. */
3594 static void
3595 async_file_mark (void)
3596 {
3597 linux_event_pipe.mark ();
3598 }
3599
3600 ptid_t
3601 linux_process_target::wait (ptid_t ptid,
3602 target_waitstatus *ourstatus,
3603 target_wait_flags target_options)
3604 {
3605 ptid_t event_ptid;
3606
3607 /* Flush the async file first. */
3608 if (target_is_async_p ())
3609 async_file_flush ();
3610
3611 do
3612 {
3613 event_ptid = wait_1 (ptid, ourstatus, target_options);
3614 }
3615 while ((target_options & TARGET_WNOHANG) == 0
3616 && event_ptid == null_ptid
3617 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3618
3619 /* If at least one stop was reported, there may be more. A single
3620 SIGCHLD can signal more than one child stop. */
3621 if (target_is_async_p ()
3622 && (target_options & TARGET_WNOHANG) != 0
3623 && event_ptid != null_ptid)
3624 async_file_mark ();
3625
3626 return event_ptid;
3627 }
3628
3629 /* Send a signal to an LWP. */
3630
3631 static int
3632 kill_lwp (unsigned long lwpid, int signo)
3633 {
3634 int ret;
3635
3636 errno = 0;
3637 ret = syscall (__NR_tkill, lwpid, signo);
3638 if (errno == ENOSYS)
3639 {
3640 /* If tkill fails, then we are not using nptl threads, a
3641 configuration we no longer support. */
3642 perror_with_name (("tkill"));
3643 }
3644 return ret;
3645 }
3646
3647 void
3648 linux_stop_lwp (struct lwp_info *lwp)
3649 {
3650 send_sigstop (lwp);
3651 }
3652
3653 static void
3654 send_sigstop (struct lwp_info *lwp)
3655 {
3656 int pid;
3657
3658 pid = lwpid_of (get_lwp_thread (lwp));
3659
3660 /* If we already have a pending stop signal for this process, don't
3661 send another. */
3662 if (lwp->stop_expected)
3663 {
3664 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3665
3666 return;
3667 }
3668
3669 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3670
3671 lwp->stop_expected = 1;
3672 kill_lwp (pid, SIGSTOP);
3673 }
3674
3675 static void
3676 send_sigstop (thread_info *thread, lwp_info *except)
3677 {
3678 struct lwp_info *lwp = get_thread_lwp (thread);
3679
3680 /* Ignore EXCEPT. */
3681 if (lwp == except)
3682 return;
3683
3684 if (lwp->stopped)
3685 return;
3686
3687 send_sigstop (lwp);
3688 }
3689
3690 /* Increment the suspend count of an LWP, and stop it, if not stopped
3691 yet. */
3692 static void
3693 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3694 {
3695 struct lwp_info *lwp = get_thread_lwp (thread);
3696
3697 /* Ignore EXCEPT. */
3698 if (lwp == except)
3699 return;
3700
3701 lwp_suspended_inc (lwp);
3702
3703 send_sigstop (thread, except);
3704 }
3705
3706 static void
3707 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3708 {
3709 /* Store the exit status for later. */
3710 lwp->status_pending_p = 1;
3711 lwp->status_pending = wstat;
3712
3713 /* Store in waitstatus as well, as there's nothing else to process
3714 for this event. */
3715 if (WIFEXITED (wstat))
3716 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3717 else if (WIFSIGNALED (wstat))
3718 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3719
3720 /* Prevent trying to stop it. */
3721 lwp->stopped = 1;
3722
3723 /* No further stops are expected from a dead lwp. */
3724 lwp->stop_expected = 0;
3725 }
3726
3727 /* Return true if LWP has exited already, and has a pending exit event
3728 to report to GDB. */
3729
3730 static int
3731 lwp_is_marked_dead (struct lwp_info *lwp)
3732 {
3733 return (lwp->status_pending_p
3734 && (WIFEXITED (lwp->status_pending)
3735 || WIFSIGNALED (lwp->status_pending)));
3736 }
3737
3738 void
3739 linux_process_target::wait_for_sigstop ()
3740 {
3741 struct thread_info *saved_thread;
3742 ptid_t saved_tid;
3743 int wstat;
3744 int ret;
3745
3746 saved_thread = current_thread;
3747 if (saved_thread != NULL)
3748 saved_tid = saved_thread->id;
3749 else
3750 saved_tid = null_ptid; /* avoid bogus unused warning */
3751
3752 scoped_restore_current_thread restore_thread;
3753
3754 threads_debug_printf ("pulling events");
3755
3756 /* Passing NULL_PTID as filter indicates we want all events to be
3757 left pending. Eventually this returns when there are no
3758 unwaited-for children left. */
3759 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3760 gdb_assert (ret == -1);
3761
3762 if (saved_thread == NULL || mythread_alive (saved_tid))
3763 return;
3764 else
3765 {
3766 threads_debug_printf ("Previously current thread died.");
3767
3768 /* We can't change the current inferior behind GDB's back,
3769 otherwise, a subsequent command may apply to the wrong
3770 process. */
3771 restore_thread.dont_restore ();
3772 switch_to_thread (nullptr);
3773 }
3774 }
3775
3776 bool
3777 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3778 {
3779 struct lwp_info *lwp = get_thread_lwp (thread);
3780
3781 if (lwp->suspended != 0)
3782 {
3783 internal_error (__FILE__, __LINE__,
3784 "LWP %ld is suspended, suspended=%d\n",
3785 lwpid_of (thread), lwp->suspended);
3786 }
3787 gdb_assert (lwp->stopped);
3788
3789 /* Allow debugging the jump pad, gdb_collect, etc.. */
3790 return (supports_fast_tracepoints ()
3791 && agent_loaded_p ()
3792 && (gdb_breakpoint_here (lwp->stop_pc)
3793 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3794 || thread->last_resume_kind == resume_step)
3795 && (linux_fast_tracepoint_collecting (lwp, NULL)
3796 != fast_tpoint_collect_result::not_collecting));
3797 }
3798
3799 void
3800 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3801 {
3802 struct lwp_info *lwp = get_thread_lwp (thread);
3803 int *wstat;
3804
3805 if (lwp->suspended != 0)
3806 {
3807 internal_error (__FILE__, __LINE__,
3808 "LWP %ld is suspended, suspended=%d\n",
3809 lwpid_of (thread), lwp->suspended);
3810 }
3811 gdb_assert (lwp->stopped);
3812
3813 /* For gdb_breakpoint_here. */
3814 scoped_restore_current_thread restore_thread;
3815 switch_to_thread (thread);
3816
3817 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3818
3819 /* Allow debugging the jump pad, gdb_collect, etc. */
3820 if (!gdb_breakpoint_here (lwp->stop_pc)
3821 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3822 && thread->last_resume_kind != resume_step
3823 && maybe_move_out_of_jump_pad (lwp, wstat))
3824 {
3825 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3826 lwpid_of (thread));
3827
3828 if (wstat)
3829 {
3830 lwp->status_pending_p = 0;
3831 enqueue_one_deferred_signal (lwp, wstat);
3832
3833 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3834 WSTOPSIG (*wstat), lwpid_of (thread));
3835 }
3836
3837 resume_one_lwp (lwp, 0, 0, NULL);
3838 }
3839 else
3840 lwp_suspended_inc (lwp);
3841 }
3842
3843 static bool
3844 lwp_running (thread_info *thread)
3845 {
3846 struct lwp_info *lwp = get_thread_lwp (thread);
3847
3848 if (lwp_is_marked_dead (lwp))
3849 return false;
3850
3851 return !lwp->stopped;
3852 }
3853
3854 void
3855 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3856 {
3857 /* Should not be called recursively. */
3858 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3859
3860 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3861
3862 threads_debug_printf
3863 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3864 (except != NULL
3865 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3866 : "none"));
3867
3868 stopping_threads = (suspend
3869 ? STOPPING_AND_SUSPENDING_THREADS
3870 : STOPPING_THREADS);
3871
3872 if (suspend)
3873 for_each_thread ([&] (thread_info *thread)
3874 {
3875 suspend_and_send_sigstop (thread, except);
3876 });
3877 else
3878 for_each_thread ([&] (thread_info *thread)
3879 {
3880 send_sigstop (thread, except);
3881 });
3882
3883 wait_for_sigstop ();
3884 stopping_threads = NOT_STOPPING_THREADS;
3885
3886 threads_debug_printf ("setting stopping_threads back to !stopping");
3887 }
3888
3889 /* Enqueue one signal in the chain of signals which need to be
3890 delivered to this process on next resume. */
3891
3892 static void
3893 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3894 {
3895 lwp->pending_signals.emplace_back (signal);
3896 if (info == nullptr)
3897 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3898 else
3899 lwp->pending_signals.back ().info = *info;
3900 }
3901
3902 void
3903 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3904 {
3905 struct thread_info *thread = get_lwp_thread (lwp);
3906 struct regcache *regcache = get_thread_regcache (thread, 1);
3907
3908 scoped_restore_current_thread restore_thread;
3909
3910 switch_to_thread (thread);
3911 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3912
3913 for (CORE_ADDR pc : next_pcs)
3914 set_single_step_breakpoint (pc, current_ptid);
3915 }
3916
3917 int
3918 linux_process_target::single_step (lwp_info* lwp)
3919 {
3920 int step = 0;
3921
3922 if (supports_hardware_single_step ())
3923 {
3924 step = 1;
3925 }
3926 else if (supports_software_single_step ())
3927 {
3928 install_software_single_step_breakpoints (lwp);
3929 step = 0;
3930 }
3931 else
3932 threads_debug_printf ("stepping is not implemented on this target");
3933
3934 return step;
3935 }
3936
3937 /* The signal can be delivered to the inferior if we are not trying to
3938 finish a fast tracepoint collect. Since signal can be delivered in
3939 the step-over, the program may go to signal handler and trap again
3940 after return from the signal handler. We can live with the spurious
3941 double traps. */
3942
3943 static int
3944 lwp_signal_can_be_delivered (struct lwp_info *lwp)
3945 {
3946 return (lwp->collecting_fast_tracepoint
3947 == fast_tpoint_collect_result::not_collecting);
3948 }
3949
3950 void
3951 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3952 int signal, siginfo_t *info)
3953 {
3954 struct thread_info *thread = get_lwp_thread (lwp);
3955 int ptrace_request;
3956 struct process_info *proc = get_thread_process (thread);
3957
3958 /* Note that target description may not be initialised
3959 (proc->tdesc == NULL) at this point because the program hasn't
3960 stopped at the first instruction yet. It means GDBserver skips
3961 the extra traps from the wrapper program (see option --wrapper).
3962 Code in this function that requires register access should be
3963 guarded by proc->tdesc == NULL or something else. */
3964
3965 if (lwp->stopped == 0)
3966 return;
3967
3968 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
3969
3970 fast_tpoint_collect_result fast_tp_collecting
3971 = lwp->collecting_fast_tracepoint;
3972
3973 gdb_assert (!stabilizing_threads
3974 || (fast_tp_collecting
3975 != fast_tpoint_collect_result::not_collecting));
3976
3977 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3978 user used the "jump" command, or "set $pc = foo"). */
3979 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3980 {
3981 /* Collecting 'while-stepping' actions doesn't make sense
3982 anymore. */
3983 release_while_stepping_state_list (thread);
3984 }
3985
3986 /* If we have pending signals or status, and a new signal, enqueue the
3987 signal. Also enqueue the signal if it can't be delivered to the
3988 inferior right now. */
3989 if (signal != 0
3990 && (lwp->status_pending_p
3991 || !lwp->pending_signals.empty ()
3992 || !lwp_signal_can_be_delivered (lwp)))
3993 {
3994 enqueue_pending_signal (lwp, signal, info);
3995
3996 /* Postpone any pending signal. It was enqueued above. */
3997 signal = 0;
3998 }
3999
4000 if (lwp->status_pending_p)
4001 {
4002 threads_debug_printf
4003 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4004 lwpid_of (thread), step ? "step" : "continue",
4005 lwp->stop_expected ? "expected" : "not expected");
4006 return;
4007 }
4008
4009 scoped_restore_current_thread restore_thread;
4010 switch_to_thread (thread);
4011
4012 /* This bit needs some thinking about. If we get a signal that
4013 we must report while a single-step reinsert is still pending,
4014 we often end up resuming the thread. It might be better to
4015 (ew) allow a stack of pending events; then we could be sure that
4016 the reinsert happened right away and not lose any signals.
4017
4018 Making this stack would also shrink the window in which breakpoints are
4019 uninserted (see comment in linux_wait_for_lwp) but not enough for
4020 complete correctness, so it won't solve that problem. It may be
4021 worthwhile just to solve this one, however. */
4022 if (lwp->bp_reinsert != 0)
4023 {
4024 threads_debug_printf (" pending reinsert at 0x%s",
4025 paddress (lwp->bp_reinsert));
4026
4027 if (supports_hardware_single_step ())
4028 {
4029 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4030 {
4031 if (step == 0)
4032 warning ("BAD - reinserting but not stepping.");
4033 if (lwp->suspended)
4034 warning ("BAD - reinserting and suspended(%d).",
4035 lwp->suspended);
4036 }
4037 }
4038
4039 step = maybe_hw_step (thread);
4040 }
4041
4042 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4043 threads_debug_printf
4044 ("lwp %ld wants to get out of fast tracepoint jump pad "
4045 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4046
4047 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4048 {
4049 threads_debug_printf
4050 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4051 lwpid_of (thread));
4052
4053 if (supports_hardware_single_step ())
4054 step = 1;
4055 else
4056 {
4057 internal_error (__FILE__, __LINE__,
4058 "moving out of jump pad single-stepping"
4059 " not implemented on this target");
4060 }
4061 }
4062
4063 /* If we have while-stepping actions in this thread set it stepping.
4064 If we have a signal to deliver, it may or may not be set to
4065 SIG_IGN, we don't know. Assume so, and allow collecting
4066 while-stepping into a signal handler. A possible smart thing to
4067 do would be to set an internal breakpoint at the signal return
4068 address, continue, and carry on catching this while-stepping
4069 action only when that breakpoint is hit. A future
4070 enhancement. */
4071 if (thread->while_stepping != NULL)
4072 {
4073 threads_debug_printf
4074 ("lwp %ld has a while-stepping action -> forcing step.",
4075 lwpid_of (thread));
4076
4077 step = single_step (lwp);
4078 }
4079
4080 if (proc->tdesc != NULL && low_supports_breakpoints ())
4081 {
4082 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4083
4084 lwp->stop_pc = low_get_pc (regcache);
4085
4086 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4087 (long) lwp->stop_pc);
4088 }
4089
4090 /* If we have pending signals, consume one if it can be delivered to
4091 the inferior. */
4092 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4093 {
4094 const pending_signal &p_sig = lwp->pending_signals.front ();
4095
4096 signal = p_sig.signal;
4097 if (p_sig.info.si_signo != 0)
4098 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4099 &p_sig.info);
4100
4101 lwp->pending_signals.pop_front ();
4102 }
4103
4104 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4105 lwpid_of (thread), step ? "step" : "continue", signal,
4106 lwp->stop_expected ? "expected" : "not expected");
4107
4108 low_prepare_to_resume (lwp);
4109
4110 regcache_invalidate_thread (thread);
4111 errno = 0;
4112 lwp->stepping = step;
4113 if (step)
4114 ptrace_request = PTRACE_SINGLESTEP;
4115 else if (gdb_catching_syscalls_p (lwp))
4116 ptrace_request = PTRACE_SYSCALL;
4117 else
4118 ptrace_request = PTRACE_CONT;
4119 ptrace (ptrace_request,
4120 lwpid_of (thread),
4121 (PTRACE_TYPE_ARG3) 0,
4122 /* Coerce to a uintptr_t first to avoid potential gcc warning
4123 of coercing an 8 byte integer to a 4 byte pointer. */
4124 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4125
4126 if (errno)
4127 {
4128 int saved_errno = errno;
4129
4130 threads_debug_printf ("ptrace errno = %d (%s)",
4131 saved_errno, strerror (saved_errno));
4132
4133 errno = saved_errno;
4134 perror_with_name ("resuming thread");
4135 }
4136
4137 /* Successfully resumed. Clear state that no longer makes sense,
4138 and mark the LWP as running. Must not do this before resuming
4139 otherwise if that fails other code will be confused. E.g., we'd
4140 later try to stop the LWP and hang forever waiting for a stop
4141 status. Note that we must not throw after this is cleared,
4142 otherwise handle_zombie_lwp_error would get confused. */
4143 lwp->stopped = 0;
4144 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4145 }
4146
4147 void
4148 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4149 {
4150 /* Nop. */
4151 }
4152
4153 /* Called when we try to resume a stopped LWP and that errors out. If
4154 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4155 or about to become), discard the error, clear any pending status
4156 the LWP may have, and return true (we'll collect the exit status
4157 soon enough). Otherwise, return false. */
4158
4159 static int
4160 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4161 {
4162 struct thread_info *thread = get_lwp_thread (lp);
4163
4164 /* If we get an error after resuming the LWP successfully, we'd
4165 confuse !T state for the LWP being gone. */
4166 gdb_assert (lp->stopped);
4167
4168 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4169 because even if ptrace failed with ESRCH, the tracee may be "not
4170 yet fully dead", but already refusing ptrace requests. In that
4171 case the tracee has 'R (Running)' state for a little bit
4172 (observed in Linux 3.18). See also the note on ESRCH in the
4173 ptrace(2) man page. Instead, check whether the LWP has any state
4174 other than ptrace-stopped. */
4175
4176 /* Don't assume anything if /proc/PID/status can't be read. */
4177 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4178 {
4179 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4180 lp->status_pending_p = 0;
4181 return 1;
4182 }
4183 return 0;
4184 }
4185
4186 void
4187 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4188 siginfo_t *info)
4189 {
4190 try
4191 {
4192 resume_one_lwp_throw (lwp, step, signal, info);
4193 }
4194 catch (const gdb_exception_error &ex)
4195 {
4196 if (check_ptrace_stopped_lwp_gone (lwp))
4197 {
4198 /* This could because we tried to resume an LWP after its leader
4199 exited. Mark it as resumed, so we can collect an exit event
4200 from it. */
4201 lwp->stopped = 0;
4202 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4203 }
4204 else
4205 throw;
4206 }
4207 }
4208
4209 /* This function is called once per thread via for_each_thread.
4210 We look up which resume request applies to THREAD and mark it with a
4211 pointer to the appropriate resume request.
4212
4213 This algorithm is O(threads * resume elements), but resume elements
4214 is small (and will remain small at least until GDB supports thread
4215 suspension). */
4216
4217 static void
4218 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4219 {
4220 struct lwp_info *lwp = get_thread_lwp (thread);
4221
4222 for (int ndx = 0; ndx < n; ndx++)
4223 {
4224 ptid_t ptid = resume[ndx].thread;
4225 if (ptid == minus_one_ptid
4226 || ptid == thread->id
4227 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4228 of PID'. */
4229 || (ptid.pid () == pid_of (thread)
4230 && (ptid.is_pid ()
4231 || ptid.lwp () == -1)))
4232 {
4233 if (resume[ndx].kind == resume_stop
4234 && thread->last_resume_kind == resume_stop)
4235 {
4236 threads_debug_printf
4237 ("already %s LWP %ld at GDB's request",
4238 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4239 ? "stopped" : "stopping"),
4240 lwpid_of (thread));
4241
4242 continue;
4243 }
4244
4245 /* Ignore (wildcard) resume requests for already-resumed
4246 threads. */
4247 if (resume[ndx].kind != resume_stop
4248 && thread->last_resume_kind != resume_stop)
4249 {
4250 threads_debug_printf
4251 ("already %s LWP %ld at GDB's request",
4252 (thread->last_resume_kind == resume_step
4253 ? "stepping" : "continuing"),
4254 lwpid_of (thread));
4255 continue;
4256 }
4257
4258 /* Don't let wildcard resumes resume fork children that GDB
4259 does not yet know are new fork children. */
4260 if (lwp->fork_relative != NULL)
4261 {
4262 struct lwp_info *rel = lwp->fork_relative;
4263
4264 if (rel->status_pending_p
4265 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4266 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4267 {
4268 threads_debug_printf
4269 ("not resuming LWP %ld: has queued stop reply",
4270 lwpid_of (thread));
4271 continue;
4272 }
4273 }
4274
4275 /* If the thread has a pending event that has already been
4276 reported to GDBserver core, but GDB has not pulled the
4277 event out of the vStopped queue yet, likewise, ignore the
4278 (wildcard) resume request. */
4279 if (in_queued_stop_replies (thread->id))
4280 {
4281 threads_debug_printf
4282 ("not resuming LWP %ld: has queued stop reply",
4283 lwpid_of (thread));
4284 continue;
4285 }
4286
4287 lwp->resume = &resume[ndx];
4288 thread->last_resume_kind = lwp->resume->kind;
4289
4290 lwp->step_range_start = lwp->resume->step_range_start;
4291 lwp->step_range_end = lwp->resume->step_range_end;
4292
4293 /* If we had a deferred signal to report, dequeue one now.
4294 This can happen if LWP gets more than one signal while
4295 trying to get out of a jump pad. */
4296 if (lwp->stopped
4297 && !lwp->status_pending_p
4298 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4299 {
4300 lwp->status_pending_p = 1;
4301
4302 threads_debug_printf
4303 ("Dequeueing deferred signal %d for LWP %ld, "
4304 "leaving status pending.",
4305 WSTOPSIG (lwp->status_pending),
4306 lwpid_of (thread));
4307 }
4308
4309 return;
4310 }
4311 }
4312
4313 /* No resume action for this thread. */
4314 lwp->resume = NULL;
4315 }
4316
4317 bool
4318 linux_process_target::resume_status_pending (thread_info *thread)
4319 {
4320 struct lwp_info *lwp = get_thread_lwp (thread);
4321
4322 /* LWPs which will not be resumed are not interesting, because
4323 we might not wait for them next time through linux_wait. */
4324 if (lwp->resume == NULL)
4325 return false;
4326
4327 return thread_still_has_status_pending (thread);
4328 }
4329
4330 bool
4331 linux_process_target::thread_needs_step_over (thread_info *thread)
4332 {
4333 struct lwp_info *lwp = get_thread_lwp (thread);
4334 CORE_ADDR pc;
4335 struct process_info *proc = get_thread_process (thread);
4336
4337 /* GDBserver is skipping the extra traps from the wrapper program,
4338 don't have to do step over. */
4339 if (proc->tdesc == NULL)
4340 return false;
4341
4342 /* LWPs which will not be resumed are not interesting, because we
4343 might not wait for them next time through linux_wait. */
4344
4345 if (!lwp->stopped)
4346 {
4347 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4348 lwpid_of (thread));
4349 return false;
4350 }
4351
4352 if (thread->last_resume_kind == resume_stop)
4353 {
4354 threads_debug_printf
4355 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4356 lwpid_of (thread));
4357 return false;
4358 }
4359
4360 gdb_assert (lwp->suspended >= 0);
4361
4362 if (lwp->suspended)
4363 {
4364 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4365 lwpid_of (thread));
4366 return false;
4367 }
4368
4369 if (lwp->status_pending_p)
4370 {
4371 threads_debug_printf
4372 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4373 lwpid_of (thread));
4374 return false;
4375 }
4376
4377 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4378 or we have. */
4379 pc = get_pc (lwp);
4380
4381 /* If the PC has changed since we stopped, then don't do anything,
4382 and let the breakpoint/tracepoint be hit. This happens if, for
4383 instance, GDB handled the decr_pc_after_break subtraction itself,
4384 GDB is OOL stepping this thread, or the user has issued a "jump"
4385 command, or poked thread's registers herself. */
4386 if (pc != lwp->stop_pc)
4387 {
4388 threads_debug_printf
4389 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4390 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4391 paddress (lwp->stop_pc), paddress (pc));
4392 return false;
4393 }
4394
4395 /* On software single step target, resume the inferior with signal
4396 rather than stepping over. */
4397 if (supports_software_single_step ()
4398 && !lwp->pending_signals.empty ()
4399 && lwp_signal_can_be_delivered (lwp))
4400 {
4401 threads_debug_printf
4402 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4403 lwpid_of (thread));
4404
4405 return false;
4406 }
4407
4408 scoped_restore_current_thread restore_thread;
4409 switch_to_thread (thread);
4410
4411 /* We can only step over breakpoints we know about. */
4412 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4413 {
4414 /* Don't step over a breakpoint that GDB expects to hit
4415 though. If the condition is being evaluated on the target's side
4416 and it evaluate to false, step over this breakpoint as well. */
4417 if (gdb_breakpoint_here (pc)
4418 && gdb_condition_true_at_breakpoint (pc)
4419 && gdb_no_commands_at_breakpoint (pc))
4420 {
4421 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4422 " GDB breakpoint at 0x%s; skipping step over",
4423 lwpid_of (thread), paddress (pc));
4424
4425 return false;
4426 }
4427 else
4428 {
4429 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4430 "found breakpoint at 0x%s",
4431 lwpid_of (thread), paddress (pc));
4432
4433 /* We've found an lwp that needs stepping over --- return 1 so
4434 that find_thread stops looking. */
4435 return true;
4436 }
4437 }
4438
4439 threads_debug_printf
4440 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4441 lwpid_of (thread), paddress (pc));
4442
4443 return false;
4444 }
4445
4446 void
4447 linux_process_target::start_step_over (lwp_info *lwp)
4448 {
4449 struct thread_info *thread = get_lwp_thread (lwp);
4450 CORE_ADDR pc;
4451
4452 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4453 lwpid_of (thread));
4454
4455 stop_all_lwps (1, lwp);
4456
4457 if (lwp->suspended != 0)
4458 {
4459 internal_error (__FILE__, __LINE__,
4460 "LWP %ld suspended=%d\n", lwpid_of (thread),
4461 lwp->suspended);
4462 }
4463
4464 threads_debug_printf ("Done stopping all threads for step-over.");
4465
4466 /* Note, we should always reach here with an already adjusted PC,
4467 either by GDB (if we're resuming due to GDB's request), or by our
4468 caller, if we just finished handling an internal breakpoint GDB
4469 shouldn't care about. */
4470 pc = get_pc (lwp);
4471
4472 bool step = false;
4473 {
4474 scoped_restore_current_thread restore_thread;
4475 switch_to_thread (thread);
4476
4477 lwp->bp_reinsert = pc;
4478 uninsert_breakpoints_at (pc);
4479 uninsert_fast_tracepoint_jumps_at (pc);
4480
4481 step = single_step (lwp);
4482 }
4483
4484 resume_one_lwp (lwp, step, 0, NULL);
4485
4486 /* Require next event from this LWP. */
4487 step_over_bkpt = thread->id;
4488 }
4489
4490 bool
4491 linux_process_target::finish_step_over (lwp_info *lwp)
4492 {
4493 if (lwp->bp_reinsert != 0)
4494 {
4495 scoped_restore_current_thread restore_thread;
4496
4497 threads_debug_printf ("Finished step over.");
4498
4499 switch_to_thread (get_lwp_thread (lwp));
4500
4501 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4502 may be no breakpoint to reinsert there by now. */
4503 reinsert_breakpoints_at (lwp->bp_reinsert);
4504 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4505
4506 lwp->bp_reinsert = 0;
4507
4508 /* Delete any single-step breakpoints. No longer needed. We
4509 don't have to worry about other threads hitting this trap,
4510 and later not being able to explain it, because we were
4511 stepping over a breakpoint, and we hold all threads but
4512 LWP stopped while doing that. */
4513 if (!supports_hardware_single_step ())
4514 {
4515 gdb_assert (has_single_step_breakpoints (current_thread));
4516 delete_single_step_breakpoints (current_thread);
4517 }
4518
4519 step_over_bkpt = null_ptid;
4520 return true;
4521 }
4522 else
4523 return false;
4524 }
4525
4526 void
4527 linux_process_target::complete_ongoing_step_over ()
4528 {
4529 if (step_over_bkpt != null_ptid)
4530 {
4531 struct lwp_info *lwp;
4532 int wstat;
4533 int ret;
4534
4535 threads_debug_printf ("detach: step over in progress, finish it first");
4536
4537 /* Passing NULL_PTID as filter indicates we want all events to
4538 be left pending. Eventually this returns when there are no
4539 unwaited-for children left. */
4540 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4541 __WALL);
4542 gdb_assert (ret == -1);
4543
4544 lwp = find_lwp_pid (step_over_bkpt);
4545 if (lwp != NULL)
4546 {
4547 finish_step_over (lwp);
4548
4549 /* If we got our step SIGTRAP, don't leave it pending,
4550 otherwise we would report it to GDB as a spurious
4551 SIGTRAP. */
4552 gdb_assert (lwp->status_pending_p);
4553 if (WIFSTOPPED (lwp->status_pending)
4554 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4555 {
4556 thread_info *thread = get_lwp_thread (lwp);
4557 if (thread->last_resume_kind != resume_step)
4558 {
4559 threads_debug_printf ("detach: discard step-over SIGTRAP");
4560
4561 lwp->status_pending_p = 0;
4562 lwp->status_pending = 0;
4563 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4564 }
4565 else
4566 threads_debug_printf
4567 ("detach: resume_step, not discarding step-over SIGTRAP");
4568 }
4569 }
4570 step_over_bkpt = null_ptid;
4571 unsuspend_all_lwps (lwp);
4572 }
4573 }
4574
4575 void
4576 linux_process_target::resume_one_thread (thread_info *thread,
4577 bool leave_all_stopped)
4578 {
4579 struct lwp_info *lwp = get_thread_lwp (thread);
4580 int leave_pending;
4581
4582 if (lwp->resume == NULL)
4583 return;
4584
4585 if (lwp->resume->kind == resume_stop)
4586 {
4587 threads_debug_printf ("resume_stop request for LWP %ld",
4588 lwpid_of (thread));
4589
4590 if (!lwp->stopped)
4591 {
4592 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4593
4594 /* Stop the thread, and wait for the event asynchronously,
4595 through the event loop. */
4596 send_sigstop (lwp);
4597 }
4598 else
4599 {
4600 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4601
4602 /* The LWP may have been stopped in an internal event that
4603 was not meant to be notified back to GDB (e.g., gdbserver
4604 breakpoint), so we should be reporting a stop event in
4605 this case too. */
4606
4607 /* If the thread already has a pending SIGSTOP, this is a
4608 no-op. Otherwise, something later will presumably resume
4609 the thread and this will cause it to cancel any pending
4610 operation, due to last_resume_kind == resume_stop. If
4611 the thread already has a pending status to report, we
4612 will still report it the next time we wait - see
4613 status_pending_p_callback. */
4614
4615 /* If we already have a pending signal to report, then
4616 there's no need to queue a SIGSTOP, as this means we're
4617 midway through moving the LWP out of the jumppad, and we
4618 will report the pending signal as soon as that is
4619 finished. */
4620 if (lwp->pending_signals_to_report.empty ())
4621 send_sigstop (lwp);
4622 }
4623
4624 /* For stop requests, we're done. */
4625 lwp->resume = NULL;
4626 thread->last_status.set_ignore ();
4627 return;
4628 }
4629
4630 /* If this thread which is about to be resumed has a pending status,
4631 then don't resume it - we can just report the pending status.
4632 Likewise if it is suspended, because e.g., another thread is
4633 stepping past a breakpoint. Make sure to queue any signals that
4634 would otherwise be sent. In all-stop mode, we do this decision
4635 based on if *any* thread has a pending status. If there's a
4636 thread that needs the step-over-breakpoint dance, then don't
4637 resume any other thread but that particular one. */
4638 leave_pending = (lwp->suspended
4639 || lwp->status_pending_p
4640 || leave_all_stopped);
4641
4642 /* If we have a new signal, enqueue the signal. */
4643 if (lwp->resume->sig != 0)
4644 {
4645 siginfo_t info, *info_p;
4646
4647 /* If this is the same signal we were previously stopped by,
4648 make sure to queue its siginfo. */
4649 if (WIFSTOPPED (lwp->last_status)
4650 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4651 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4652 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4653 info_p = &info;
4654 else
4655 info_p = NULL;
4656
4657 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4658 }
4659
4660 if (!leave_pending)
4661 {
4662 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4663
4664 proceed_one_lwp (thread, NULL);
4665 }
4666 else
4667 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4668
4669 thread->last_status.set_ignore ();
4670 lwp->resume = NULL;
4671 }
4672
4673 void
4674 linux_process_target::resume (thread_resume *resume_info, size_t n)
4675 {
4676 struct thread_info *need_step_over = NULL;
4677
4678 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4679
4680 for_each_thread ([&] (thread_info *thread)
4681 {
4682 linux_set_resume_request (thread, resume_info, n);
4683 });
4684
4685 /* If there is a thread which would otherwise be resumed, which has
4686 a pending status, then don't resume any threads - we can just
4687 report the pending status. Make sure to queue any signals that
4688 would otherwise be sent. In non-stop mode, we'll apply this
4689 logic to each thread individually. We consume all pending events
4690 before considering to start a step-over (in all-stop). */
4691 bool any_pending = false;
4692 if (!non_stop)
4693 any_pending = find_thread ([this] (thread_info *thread)
4694 {
4695 return resume_status_pending (thread);
4696 }) != nullptr;
4697
4698 /* If there is a thread which would otherwise be resumed, which is
4699 stopped at a breakpoint that needs stepping over, then don't
4700 resume any threads - have it step over the breakpoint with all
4701 other threads stopped, then resume all threads again. Make sure
4702 to queue any signals that would otherwise be delivered or
4703 queued. */
4704 if (!any_pending && low_supports_breakpoints ())
4705 need_step_over = find_thread ([this] (thread_info *thread)
4706 {
4707 return thread_needs_step_over (thread);
4708 });
4709
4710 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4711
4712 if (need_step_over != NULL)
4713 threads_debug_printf ("Not resuming all, need step over");
4714 else if (any_pending)
4715 threads_debug_printf ("Not resuming, all-stop and found "
4716 "an LWP with pending status");
4717 else
4718 threads_debug_printf ("Resuming, no pending status or step over needed");
4719
4720 /* Even if we're leaving threads stopped, queue all signals we'd
4721 otherwise deliver. */
4722 for_each_thread ([&] (thread_info *thread)
4723 {
4724 resume_one_thread (thread, leave_all_stopped);
4725 });
4726
4727 if (need_step_over)
4728 start_step_over (get_thread_lwp (need_step_over));
4729
4730 /* We may have events that were pending that can/should be sent to
4731 the client now. Trigger a linux_wait call. */
4732 if (target_is_async_p ())
4733 async_file_mark ();
4734 }
4735
4736 void
4737 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4738 {
4739 struct lwp_info *lwp = get_thread_lwp (thread);
4740 int step;
4741
4742 if (lwp == except)
4743 return;
4744
4745 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4746
4747 if (!lwp->stopped)
4748 {
4749 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4750 return;
4751 }
4752
4753 if (thread->last_resume_kind == resume_stop
4754 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4755 {
4756 threads_debug_printf (" client wants LWP to remain %ld stopped",
4757 lwpid_of (thread));
4758 return;
4759 }
4760
4761 if (lwp->status_pending_p)
4762 {
4763 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4764 lwpid_of (thread));
4765 return;
4766 }
4767
4768 gdb_assert (lwp->suspended >= 0);
4769
4770 if (lwp->suspended)
4771 {
4772 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4773 return;
4774 }
4775
4776 if (thread->last_resume_kind == resume_stop
4777 && lwp->pending_signals_to_report.empty ()
4778 && (lwp->collecting_fast_tracepoint
4779 == fast_tpoint_collect_result::not_collecting))
4780 {
4781 /* We haven't reported this LWP as stopped yet (otherwise, the
4782 last_status.kind check above would catch it, and we wouldn't
4783 reach here. This LWP may have been momentarily paused by a
4784 stop_all_lwps call while handling for example, another LWP's
4785 step-over. In that case, the pending expected SIGSTOP signal
4786 that was queued at vCont;t handling time will have already
4787 been consumed by wait_for_sigstop, and so we need to requeue
4788 another one here. Note that if the LWP already has a SIGSTOP
4789 pending, this is a no-op. */
4790
4791 threads_debug_printf
4792 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4793 lwpid_of (thread));
4794
4795 send_sigstop (lwp);
4796 }
4797
4798 if (thread->last_resume_kind == resume_step)
4799 {
4800 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4801 lwpid_of (thread));
4802
4803 /* If resume_step is requested by GDB, install single-step
4804 breakpoints when the thread is about to be actually resumed if
4805 the single-step breakpoints weren't removed. */
4806 if (supports_software_single_step ()
4807 && !has_single_step_breakpoints (thread))
4808 install_software_single_step_breakpoints (lwp);
4809
4810 step = maybe_hw_step (thread);
4811 }
4812 else if (lwp->bp_reinsert != 0)
4813 {
4814 threads_debug_printf (" stepping LWP %ld, reinsert set",
4815 lwpid_of (thread));
4816
4817 step = maybe_hw_step (thread);
4818 }
4819 else
4820 step = 0;
4821
4822 resume_one_lwp (lwp, step, 0, NULL);
4823 }
4824
4825 void
4826 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4827 lwp_info *except)
4828 {
4829 struct lwp_info *lwp = get_thread_lwp (thread);
4830
4831 if (lwp == except)
4832 return;
4833
4834 lwp_suspended_decr (lwp);
4835
4836 proceed_one_lwp (thread, except);
4837 }
4838
4839 void
4840 linux_process_target::proceed_all_lwps ()
4841 {
4842 struct thread_info *need_step_over;
4843
4844 /* If there is a thread which would otherwise be resumed, which is
4845 stopped at a breakpoint that needs stepping over, then don't
4846 resume any threads - have it step over the breakpoint with all
4847 other threads stopped, then resume all threads again. */
4848
4849 if (low_supports_breakpoints ())
4850 {
4851 need_step_over = find_thread ([this] (thread_info *thread)
4852 {
4853 return thread_needs_step_over (thread);
4854 });
4855
4856 if (need_step_over != NULL)
4857 {
4858 threads_debug_printf ("found thread %ld needing a step-over",
4859 lwpid_of (need_step_over));
4860
4861 start_step_over (get_thread_lwp (need_step_over));
4862 return;
4863 }
4864 }
4865
4866 threads_debug_printf ("Proceeding, no step-over needed");
4867
4868 for_each_thread ([this] (thread_info *thread)
4869 {
4870 proceed_one_lwp (thread, NULL);
4871 });
4872 }
4873
4874 void
4875 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4876 {
4877 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4878
4879 if (except)
4880 threads_debug_printf ("except=(LWP %ld)",
4881 lwpid_of (get_lwp_thread (except)));
4882 else
4883 threads_debug_printf ("except=nullptr");
4884
4885 if (unsuspend)
4886 for_each_thread ([&] (thread_info *thread)
4887 {
4888 unsuspend_and_proceed_one_lwp (thread, except);
4889 });
4890 else
4891 for_each_thread ([&] (thread_info *thread)
4892 {
4893 proceed_one_lwp (thread, except);
4894 });
4895 }
4896
4897
4898 #ifdef HAVE_LINUX_REGSETS
4899
4900 #define use_linux_regsets 1
4901
4902 /* Returns true if REGSET has been disabled. */
4903
4904 static int
4905 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4906 {
4907 return (info->disabled_regsets != NULL
4908 && info->disabled_regsets[regset - info->regsets]);
4909 }
4910
4911 /* Disable REGSET. */
4912
4913 static void
4914 disable_regset (struct regsets_info *info, struct regset_info *regset)
4915 {
4916 int dr_offset;
4917
4918 dr_offset = regset - info->regsets;
4919 if (info->disabled_regsets == NULL)
4920 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4921 info->disabled_regsets[dr_offset] = 1;
4922 }
4923
4924 static int
4925 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4926 struct regcache *regcache)
4927 {
4928 struct regset_info *regset;
4929 int saw_general_regs = 0;
4930 int pid;
4931 struct iovec iov;
4932
4933 pid = lwpid_of (current_thread);
4934 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4935 {
4936 void *buf, *data;
4937 int nt_type, res;
4938
4939 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4940 continue;
4941
4942 buf = xmalloc (regset->size);
4943
4944 nt_type = regset->nt_type;
4945 if (nt_type)
4946 {
4947 iov.iov_base = buf;
4948 iov.iov_len = regset->size;
4949 data = (void *) &iov;
4950 }
4951 else
4952 data = buf;
4953
4954 #ifndef __sparc__
4955 res = ptrace (regset->get_request, pid,
4956 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4957 #else
4958 res = ptrace (regset->get_request, pid, data, nt_type);
4959 #endif
4960 if (res < 0)
4961 {
4962 if (errno == EIO
4963 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4964 {
4965 /* If we get EIO on a regset, or an EINVAL and the regset is
4966 optional, do not try it again for this process mode. */
4967 disable_regset (regsets_info, regset);
4968 }
4969 else if (errno == ENODATA)
4970 {
4971 /* ENODATA may be returned if the regset is currently
4972 not "active". This can happen in normal operation,
4973 so suppress the warning in this case. */
4974 }
4975 else if (errno == ESRCH)
4976 {
4977 /* At this point, ESRCH should mean the process is
4978 already gone, in which case we simply ignore attempts
4979 to read its registers. */
4980 }
4981 else
4982 {
4983 char s[256];
4984 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4985 pid);
4986 perror (s);
4987 }
4988 }
4989 else
4990 {
4991 if (regset->type == GENERAL_REGS)
4992 saw_general_regs = 1;
4993 regset->store_function (regcache, buf);
4994 }
4995 free (buf);
4996 }
4997 if (saw_general_regs)
4998 return 0;
4999 else
5000 return 1;
5001 }
5002
5003 static int
5004 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5005 struct regcache *regcache)
5006 {
5007 struct regset_info *regset;
5008 int saw_general_regs = 0;
5009 int pid;
5010 struct iovec iov;
5011
5012 pid = lwpid_of (current_thread);
5013 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5014 {
5015 void *buf, *data;
5016 int nt_type, res;
5017
5018 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5019 || regset->fill_function == NULL)
5020 continue;
5021
5022 buf = xmalloc (regset->size);
5023
5024 /* First fill the buffer with the current register set contents,
5025 in case there are any items in the kernel's regset that are
5026 not in gdbserver's regcache. */
5027
5028 nt_type = regset->nt_type;
5029 if (nt_type)
5030 {
5031 iov.iov_base = buf;
5032 iov.iov_len = regset->size;
5033 data = (void *) &iov;
5034 }
5035 else
5036 data = buf;
5037
5038 #ifndef __sparc__
5039 res = ptrace (regset->get_request, pid,
5040 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5041 #else
5042 res = ptrace (regset->get_request, pid, data, nt_type);
5043 #endif
5044
5045 if (res == 0)
5046 {
5047 /* Then overlay our cached registers on that. */
5048 regset->fill_function (regcache, buf);
5049
5050 /* Only now do we write the register set. */
5051 #ifndef __sparc__
5052 res = ptrace (regset->set_request, pid,
5053 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5054 #else
5055 res = ptrace (regset->set_request, pid, data, nt_type);
5056 #endif
5057 }
5058
5059 if (res < 0)
5060 {
5061 if (errno == EIO
5062 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5063 {
5064 /* If we get EIO on a regset, or an EINVAL and the regset is
5065 optional, do not try it again for this process mode. */
5066 disable_regset (regsets_info, regset);
5067 }
5068 else if (errno == ESRCH)
5069 {
5070 /* At this point, ESRCH should mean the process is
5071 already gone, in which case we simply ignore attempts
5072 to change its registers. See also the related
5073 comment in resume_one_lwp. */
5074 free (buf);
5075 return 0;
5076 }
5077 else
5078 {
5079 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5080 }
5081 }
5082 else if (regset->type == GENERAL_REGS)
5083 saw_general_regs = 1;
5084 free (buf);
5085 }
5086 if (saw_general_regs)
5087 return 0;
5088 else
5089 return 1;
5090 }
5091
5092 #else /* !HAVE_LINUX_REGSETS */
5093
5094 #define use_linux_regsets 0
5095 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5096 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5097
5098 #endif
5099
5100 /* Return 1 if register REGNO is supported by one of the regset ptrace
5101 calls or 0 if it has to be transferred individually. */
5102
5103 static int
5104 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5105 {
5106 unsigned char mask = 1 << (regno % 8);
5107 size_t index = regno / 8;
5108
5109 return (use_linux_regsets
5110 && (regs_info->regset_bitmap == NULL
5111 || (regs_info->regset_bitmap[index] & mask) != 0));
5112 }
5113
5114 #ifdef HAVE_LINUX_USRREGS
5115
5116 static int
5117 register_addr (const struct usrregs_info *usrregs, int regnum)
5118 {
5119 int addr;
5120
5121 if (regnum < 0 || regnum >= usrregs->num_regs)
5122 error ("Invalid register number %d.", regnum);
5123
5124 addr = usrregs->regmap[regnum];
5125
5126 return addr;
5127 }
5128
5129
5130 void
5131 linux_process_target::fetch_register (const usrregs_info *usrregs,
5132 regcache *regcache, int regno)
5133 {
5134 CORE_ADDR regaddr;
5135 int i, size;
5136 char *buf;
5137 int pid;
5138
5139 if (regno >= usrregs->num_regs)
5140 return;
5141 if (low_cannot_fetch_register (regno))
5142 return;
5143
5144 regaddr = register_addr (usrregs, regno);
5145 if (regaddr == -1)
5146 return;
5147
5148 size = ((register_size (regcache->tdesc, regno)
5149 + sizeof (PTRACE_XFER_TYPE) - 1)
5150 & -sizeof (PTRACE_XFER_TYPE));
5151 buf = (char *) alloca (size);
5152
5153 pid = lwpid_of (current_thread);
5154 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5155 {
5156 errno = 0;
5157 *(PTRACE_XFER_TYPE *) (buf + i) =
5158 ptrace (PTRACE_PEEKUSER, pid,
5159 /* Coerce to a uintptr_t first to avoid potential gcc warning
5160 of coercing an 8 byte integer to a 4 byte pointer. */
5161 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5162 regaddr += sizeof (PTRACE_XFER_TYPE);
5163 if (errno != 0)
5164 {
5165 /* Mark register REGNO unavailable. */
5166 supply_register (regcache, regno, NULL);
5167 return;
5168 }
5169 }
5170
5171 low_supply_ptrace_register (regcache, regno, buf);
5172 }
5173
5174 void
5175 linux_process_target::store_register (const usrregs_info *usrregs,
5176 regcache *regcache, int regno)
5177 {
5178 CORE_ADDR regaddr;
5179 int i, size;
5180 char *buf;
5181 int pid;
5182
5183 if (regno >= usrregs->num_regs)
5184 return;
5185 if (low_cannot_store_register (regno))
5186 return;
5187
5188 regaddr = register_addr (usrregs, regno);
5189 if (regaddr == -1)
5190 return;
5191
5192 size = ((register_size (regcache->tdesc, regno)
5193 + sizeof (PTRACE_XFER_TYPE) - 1)
5194 & -sizeof (PTRACE_XFER_TYPE));
5195 buf = (char *) alloca (size);
5196 memset (buf, 0, size);
5197
5198 low_collect_ptrace_register (regcache, regno, buf);
5199
5200 pid = lwpid_of (current_thread);
5201 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5202 {
5203 errno = 0;
5204 ptrace (PTRACE_POKEUSER, pid,
5205 /* Coerce to a uintptr_t first to avoid potential gcc warning
5206 about coercing an 8 byte integer to a 4 byte pointer. */
5207 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5208 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5209 if (errno != 0)
5210 {
5211 /* At this point, ESRCH should mean the process is
5212 already gone, in which case we simply ignore attempts
5213 to change its registers. See also the related
5214 comment in resume_one_lwp. */
5215 if (errno == ESRCH)
5216 return;
5217
5218
5219 if (!low_cannot_store_register (regno))
5220 error ("writing register %d: %s", regno, safe_strerror (errno));
5221 }
5222 regaddr += sizeof (PTRACE_XFER_TYPE);
5223 }
5224 }
5225 #endif /* HAVE_LINUX_USRREGS */
5226
5227 void
5228 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5229 int regno, char *buf)
5230 {
5231 collect_register (regcache, regno, buf);
5232 }
5233
5234 void
5235 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5236 int regno, const char *buf)
5237 {
5238 supply_register (regcache, regno, buf);
5239 }
5240
5241 void
5242 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5243 regcache *regcache,
5244 int regno, int all)
5245 {
5246 #ifdef HAVE_LINUX_USRREGS
5247 struct usrregs_info *usr = regs_info->usrregs;
5248
5249 if (regno == -1)
5250 {
5251 for (regno = 0; regno < usr->num_regs; regno++)
5252 if (all || !linux_register_in_regsets (regs_info, regno))
5253 fetch_register (usr, regcache, regno);
5254 }
5255 else
5256 fetch_register (usr, regcache, regno);
5257 #endif
5258 }
5259
5260 void
5261 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5262 regcache *regcache,
5263 int regno, int all)
5264 {
5265 #ifdef HAVE_LINUX_USRREGS
5266 struct usrregs_info *usr = regs_info->usrregs;
5267
5268 if (regno == -1)
5269 {
5270 for (regno = 0; regno < usr->num_regs; regno++)
5271 if (all || !linux_register_in_regsets (regs_info, regno))
5272 store_register (usr, regcache, regno);
5273 }
5274 else
5275 store_register (usr, regcache, regno);
5276 #endif
5277 }
5278
5279 void
5280 linux_process_target::fetch_registers (regcache *regcache, int regno)
5281 {
5282 int use_regsets;
5283 int all = 0;
5284 const regs_info *regs_info = get_regs_info ();
5285
5286 if (regno == -1)
5287 {
5288 if (regs_info->usrregs != NULL)
5289 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5290 low_fetch_register (regcache, regno);
5291
5292 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5293 if (regs_info->usrregs != NULL)
5294 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5295 }
5296 else
5297 {
5298 if (low_fetch_register (regcache, regno))
5299 return;
5300
5301 use_regsets = linux_register_in_regsets (regs_info, regno);
5302 if (use_regsets)
5303 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5304 regcache);
5305 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5306 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5307 }
5308 }
5309
5310 void
5311 linux_process_target::store_registers (regcache *regcache, int regno)
5312 {
5313 int use_regsets;
5314 int all = 0;
5315 const regs_info *regs_info = get_regs_info ();
5316
5317 if (regno == -1)
5318 {
5319 all = regsets_store_inferior_registers (regs_info->regsets_info,
5320 regcache);
5321 if (regs_info->usrregs != NULL)
5322 usr_store_inferior_registers (regs_info, regcache, regno, all);
5323 }
5324 else
5325 {
5326 use_regsets = linux_register_in_regsets (regs_info, regno);
5327 if (use_regsets)
5328 all = regsets_store_inferior_registers (regs_info->regsets_info,
5329 regcache);
5330 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5331 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5332 }
5333 }
5334
5335 bool
5336 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5337 {
5338 return false;
5339 }
5340
5341 /* A wrapper for the read_memory target op. */
5342
5343 static int
5344 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5345 {
5346 return the_target->read_memory (memaddr, myaddr, len);
5347 }
5348
5349
5350 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5351 we can use a single read/write call, this can be much more
5352 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5353 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5354 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5355 not null, then we're reading, otherwise we're writing. */
5356
5357 static int
5358 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5359 const gdb_byte *writebuf, int len)
5360 {
5361 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5362
5363 process_info *proc = current_process ();
5364
5365 int fd = proc->priv->mem_fd;
5366 if (fd == -1)
5367 return EIO;
5368
5369 while (len > 0)
5370 {
5371 int bytes;
5372
5373 /* If pread64 is available, use it. It's faster if the kernel
5374 supports it (only one syscall), and it's 64-bit safe even on
5375 32-bit platforms (for instance, SPARC debugging a SPARC64
5376 application). */
5377 #ifdef HAVE_PREAD64
5378 bytes = (readbuf != nullptr
5379 ? pread64 (fd, readbuf, len, memaddr)
5380 : pwrite64 (fd, writebuf, len, memaddr));
5381 #else
5382 bytes = -1;
5383 if (lseek (fd, memaddr, SEEK_SET) != -1)
5384 bytes = (readbuf != nullptr
5385 ? read (fd, readbuf, len)
5386 ? write (fd, writebuf, len));
5387 #endif
5388
5389 if (bytes < 0)
5390 return errno;
5391 else if (bytes == 0)
5392 {
5393 /* EOF means the address space is gone, the whole process
5394 exited or execed. */
5395 return EIO;
5396 }
5397
5398 memaddr += bytes;
5399 if (readbuf != nullptr)
5400 readbuf += bytes;
5401 else
5402 writebuf += bytes;
5403 len -= bytes;
5404 }
5405
5406 return 0;
5407 }
5408
5409 int
5410 linux_process_target::read_memory (CORE_ADDR memaddr,
5411 unsigned char *myaddr, int len)
5412 {
5413 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5414 }
5415
5416 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5417 memory at MEMADDR. On failure (cannot write to the inferior)
5418 returns the value of errno. Always succeeds if LEN is zero. */
5419
5420 int
5421 linux_process_target::write_memory (CORE_ADDR memaddr,
5422 const unsigned char *myaddr, int len)
5423 {
5424 if (debug_threads)
5425 {
5426 /* Dump up to four bytes. */
5427 char str[4 * 2 + 1];
5428 char *p = str;
5429 int dump = len < 4 ? len : 4;
5430
5431 for (int i = 0; i < dump; i++)
5432 {
5433 sprintf (p, "%02x", myaddr[i]);
5434 p += 2;
5435 }
5436 *p = '\0';
5437
5438 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5439 str, (long) memaddr, current_process ()->pid);
5440 }
5441
5442 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5443 }
5444
5445 void
5446 linux_process_target::look_up_symbols ()
5447 {
5448 #ifdef USE_THREAD_DB
5449 struct process_info *proc = current_process ();
5450
5451 if (proc->priv->thread_db != NULL)
5452 return;
5453
5454 thread_db_init ();
5455 #endif
5456 }
5457
5458 void
5459 linux_process_target::request_interrupt ()
5460 {
5461 /* Send a SIGINT to the process group. This acts just like the user
5462 typed a ^C on the controlling terminal. */
5463 ::kill (-signal_pid, SIGINT);
5464 }
5465
5466 bool
5467 linux_process_target::supports_read_auxv ()
5468 {
5469 return true;
5470 }
5471
5472 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5473 to debugger memory starting at MYADDR. */
5474
5475 int
5476 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5477 unsigned int len)
5478 {
5479 char filename[PATH_MAX];
5480 int fd, n;
5481 int pid = lwpid_of (current_thread);
5482
5483 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5484
5485 fd = open (filename, O_RDONLY);
5486 if (fd < 0)
5487 return -1;
5488
5489 if (offset != (CORE_ADDR) 0
5490 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5491 n = -1;
5492 else
5493 n = read (fd, myaddr, len);
5494
5495 close (fd);
5496
5497 return n;
5498 }
5499
5500 int
5501 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5502 int size, raw_breakpoint *bp)
5503 {
5504 if (type == raw_bkpt_type_sw)
5505 return insert_memory_breakpoint (bp);
5506 else
5507 return low_insert_point (type, addr, size, bp);
5508 }
5509
5510 int
5511 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5512 int size, raw_breakpoint *bp)
5513 {
5514 /* Unsupported (see target.h). */
5515 return 1;
5516 }
5517
5518 int
5519 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5520 int size, raw_breakpoint *bp)
5521 {
5522 if (type == raw_bkpt_type_sw)
5523 return remove_memory_breakpoint (bp);
5524 else
5525 return low_remove_point (type, addr, size, bp);
5526 }
5527
5528 int
5529 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5530 int size, raw_breakpoint *bp)
5531 {
5532 /* Unsupported (see target.h). */
5533 return 1;
5534 }
5535
5536 /* Implement the stopped_by_sw_breakpoint target_ops
5537 method. */
5538
5539 bool
5540 linux_process_target::stopped_by_sw_breakpoint ()
5541 {
5542 struct lwp_info *lwp = get_thread_lwp (current_thread);
5543
5544 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5545 }
5546
5547 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5548 method. */
5549
5550 bool
5551 linux_process_target::supports_stopped_by_sw_breakpoint ()
5552 {
5553 return USE_SIGTRAP_SIGINFO;
5554 }
5555
5556 /* Implement the stopped_by_hw_breakpoint target_ops
5557 method. */
5558
5559 bool
5560 linux_process_target::stopped_by_hw_breakpoint ()
5561 {
5562 struct lwp_info *lwp = get_thread_lwp (current_thread);
5563
5564 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5565 }
5566
5567 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5568 method. */
5569
5570 bool
5571 linux_process_target::supports_stopped_by_hw_breakpoint ()
5572 {
5573 return USE_SIGTRAP_SIGINFO;
5574 }
5575
5576 /* Implement the supports_hardware_single_step target_ops method. */
5577
5578 bool
5579 linux_process_target::supports_hardware_single_step ()
5580 {
5581 return true;
5582 }
5583
5584 bool
5585 linux_process_target::stopped_by_watchpoint ()
5586 {
5587 struct lwp_info *lwp = get_thread_lwp (current_thread);
5588
5589 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5590 }
5591
5592 CORE_ADDR
5593 linux_process_target::stopped_data_address ()
5594 {
5595 struct lwp_info *lwp = get_thread_lwp (current_thread);
5596
5597 return lwp->stopped_data_address;
5598 }
5599
5600 /* This is only used for targets that define PT_TEXT_ADDR,
5601 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5602 the target has different ways of acquiring this information, like
5603 loadmaps. */
5604
5605 bool
5606 linux_process_target::supports_read_offsets ()
5607 {
5608 #ifdef SUPPORTS_READ_OFFSETS
5609 return true;
5610 #else
5611 return false;
5612 #endif
5613 }
5614
5615 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5616 to tell gdb about. */
5617
5618 int
5619 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5620 {
5621 #ifdef SUPPORTS_READ_OFFSETS
5622 unsigned long text, text_end, data;
5623 int pid = lwpid_of (current_thread);
5624
5625 errno = 0;
5626
5627 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5628 (PTRACE_TYPE_ARG4) 0);
5629 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5630 (PTRACE_TYPE_ARG4) 0);
5631 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5632 (PTRACE_TYPE_ARG4) 0);
5633
5634 if (errno == 0)
5635 {
5636 /* Both text and data offsets produced at compile-time (and so
5637 used by gdb) are relative to the beginning of the program,
5638 with the data segment immediately following the text segment.
5639 However, the actual runtime layout in memory may put the data
5640 somewhere else, so when we send gdb a data base-address, we
5641 use the real data base address and subtract the compile-time
5642 data base-address from it (which is just the length of the
5643 text segment). BSS immediately follows data in both
5644 cases. */
5645 *text_p = text;
5646 *data_p = data - (text_end - text);
5647
5648 return 1;
5649 }
5650 return 0;
5651 #else
5652 gdb_assert_not_reached ("target op read_offsets not supported");
5653 #endif
5654 }
5655
5656 bool
5657 linux_process_target::supports_get_tls_address ()
5658 {
5659 #ifdef USE_THREAD_DB
5660 return true;
5661 #else
5662 return false;
5663 #endif
5664 }
5665
5666 int
5667 linux_process_target::get_tls_address (thread_info *thread,
5668 CORE_ADDR offset,
5669 CORE_ADDR load_module,
5670 CORE_ADDR *address)
5671 {
5672 #ifdef USE_THREAD_DB
5673 return thread_db_get_tls_address (thread, offset, load_module, address);
5674 #else
5675 return -1;
5676 #endif
5677 }
5678
5679 bool
5680 linux_process_target::supports_qxfer_osdata ()
5681 {
5682 return true;
5683 }
5684
5685 int
5686 linux_process_target::qxfer_osdata (const char *annex,
5687 unsigned char *readbuf,
5688 unsigned const char *writebuf,
5689 CORE_ADDR offset, int len)
5690 {
5691 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5692 }
5693
5694 void
5695 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5696 gdb_byte *inf_siginfo, int direction)
5697 {
5698 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5699
5700 /* If there was no callback, or the callback didn't do anything,
5701 then just do a straight memcpy. */
5702 if (!done)
5703 {
5704 if (direction == 1)
5705 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5706 else
5707 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5708 }
5709 }
5710
5711 bool
5712 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5713 int direction)
5714 {
5715 return false;
5716 }
5717
5718 bool
5719 linux_process_target::supports_qxfer_siginfo ()
5720 {
5721 return true;
5722 }
5723
5724 int
5725 linux_process_target::qxfer_siginfo (const char *annex,
5726 unsigned char *readbuf,
5727 unsigned const char *writebuf,
5728 CORE_ADDR offset, int len)
5729 {
5730 int pid;
5731 siginfo_t siginfo;
5732 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5733
5734 if (current_thread == NULL)
5735 return -1;
5736
5737 pid = lwpid_of (current_thread);
5738
5739 threads_debug_printf ("%s siginfo for lwp %d.",
5740 readbuf != NULL ? "Reading" : "Writing",
5741 pid);
5742
5743 if (offset >= sizeof (siginfo))
5744 return -1;
5745
5746 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5747 return -1;
5748
5749 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5750 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5751 inferior with a 64-bit GDBSERVER should look the same as debugging it
5752 with a 32-bit GDBSERVER, we need to convert it. */
5753 siginfo_fixup (&siginfo, inf_siginfo, 0);
5754
5755 if (offset + len > sizeof (siginfo))
5756 len = sizeof (siginfo) - offset;
5757
5758 if (readbuf != NULL)
5759 memcpy (readbuf, inf_siginfo + offset, len);
5760 else
5761 {
5762 memcpy (inf_siginfo + offset, writebuf, len);
5763
5764 /* Convert back to ptrace layout before flushing it out. */
5765 siginfo_fixup (&siginfo, inf_siginfo, 1);
5766
5767 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5768 return -1;
5769 }
5770
5771 return len;
5772 }
5773
5774 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5775 so we notice when children change state; as the handler for the
5776 sigsuspend in my_waitpid. */
5777
5778 static void
5779 sigchld_handler (int signo)
5780 {
5781 int old_errno = errno;
5782
5783 if (debug_threads)
5784 {
5785 do
5786 {
5787 /* Use the async signal safe debug function. */
5788 if (debug_write ("sigchld_handler\n",
5789 sizeof ("sigchld_handler\n") - 1) < 0)
5790 break; /* just ignore */
5791 } while (0);
5792 }
5793
5794 if (target_is_async_p ())
5795 async_file_mark (); /* trigger a linux_wait */
5796
5797 errno = old_errno;
5798 }
5799
5800 bool
5801 linux_process_target::supports_non_stop ()
5802 {
5803 return true;
5804 }
5805
5806 bool
5807 linux_process_target::async (bool enable)
5808 {
5809 bool previous = target_is_async_p ();
5810
5811 threads_debug_printf ("async (%d), previous=%d",
5812 enable, previous);
5813
5814 if (previous != enable)
5815 {
5816 sigset_t mask;
5817 sigemptyset (&mask);
5818 sigaddset (&mask, SIGCHLD);
5819
5820 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5821
5822 if (enable)
5823 {
5824 if (!linux_event_pipe.open_pipe ())
5825 {
5826 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5827
5828 warning ("creating event pipe failed.");
5829 return previous;
5830 }
5831
5832 /* Register the event loop handler. */
5833 add_file_handler (linux_event_pipe.event_fd (),
5834 handle_target_event, NULL,
5835 "linux-low");
5836
5837 /* Always trigger a linux_wait. */
5838 async_file_mark ();
5839 }
5840 else
5841 {
5842 delete_file_handler (linux_event_pipe.event_fd ());
5843
5844 linux_event_pipe.close_pipe ();
5845 }
5846
5847 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5848 }
5849
5850 return previous;
5851 }
5852
5853 int
5854 linux_process_target::start_non_stop (bool nonstop)
5855 {
5856 /* Register or unregister from event-loop accordingly. */
5857 target_async (nonstop);
5858
5859 if (target_is_async_p () != (nonstop != false))
5860 return -1;
5861
5862 return 0;
5863 }
5864
5865 bool
5866 linux_process_target::supports_multi_process ()
5867 {
5868 return true;
5869 }
5870
5871 /* Check if fork events are supported. */
5872
5873 bool
5874 linux_process_target::supports_fork_events ()
5875 {
5876 return true;
5877 }
5878
5879 /* Check if vfork events are supported. */
5880
5881 bool
5882 linux_process_target::supports_vfork_events ()
5883 {
5884 return true;
5885 }
5886
5887 /* Check if exec events are supported. */
5888
5889 bool
5890 linux_process_target::supports_exec_events ()
5891 {
5892 return true;
5893 }
5894
5895 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5896 ptrace flags for all inferiors. This is in case the new GDB connection
5897 doesn't support the same set of events that the previous one did. */
5898
5899 void
5900 linux_process_target::handle_new_gdb_connection ()
5901 {
5902 /* Request that all the lwps reset their ptrace options. */
5903 for_each_thread ([] (thread_info *thread)
5904 {
5905 struct lwp_info *lwp = get_thread_lwp (thread);
5906
5907 if (!lwp->stopped)
5908 {
5909 /* Stop the lwp so we can modify its ptrace options. */
5910 lwp->must_set_ptrace_flags = 1;
5911 linux_stop_lwp (lwp);
5912 }
5913 else
5914 {
5915 /* Already stopped; go ahead and set the ptrace options. */
5916 struct process_info *proc = find_process_pid (pid_of (thread));
5917 int options = linux_low_ptrace_options (proc->attached);
5918
5919 linux_enable_event_reporting (lwpid_of (thread), options);
5920 lwp->must_set_ptrace_flags = 0;
5921 }
5922 });
5923 }
5924
5925 int
5926 linux_process_target::handle_monitor_command (char *mon)
5927 {
5928 #ifdef USE_THREAD_DB
5929 return thread_db_handle_monitor_command (mon);
5930 #else
5931 return 0;
5932 #endif
5933 }
5934
5935 int
5936 linux_process_target::core_of_thread (ptid_t ptid)
5937 {
5938 return linux_common_core_of_thread (ptid);
5939 }
5940
5941 bool
5942 linux_process_target::supports_disable_randomization ()
5943 {
5944 return true;
5945 }
5946
5947 bool
5948 linux_process_target::supports_agent ()
5949 {
5950 return true;
5951 }
5952
5953 bool
5954 linux_process_target::supports_range_stepping ()
5955 {
5956 if (supports_software_single_step ())
5957 return true;
5958
5959 return low_supports_range_stepping ();
5960 }
5961
5962 bool
5963 linux_process_target::low_supports_range_stepping ()
5964 {
5965 return false;
5966 }
5967
5968 bool
5969 linux_process_target::supports_pid_to_exec_file ()
5970 {
5971 return true;
5972 }
5973
5974 const char *
5975 linux_process_target::pid_to_exec_file (int pid)
5976 {
5977 return linux_proc_pid_to_exec_file (pid);
5978 }
5979
5980 bool
5981 linux_process_target::supports_multifs ()
5982 {
5983 return true;
5984 }
5985
5986 int
5987 linux_process_target::multifs_open (int pid, const char *filename,
5988 int flags, mode_t mode)
5989 {
5990 return linux_mntns_open_cloexec (pid, filename, flags, mode);
5991 }
5992
5993 int
5994 linux_process_target::multifs_unlink (int pid, const char *filename)
5995 {
5996 return linux_mntns_unlink (pid, filename);
5997 }
5998
5999 ssize_t
6000 linux_process_target::multifs_readlink (int pid, const char *filename,
6001 char *buf, size_t bufsiz)
6002 {
6003 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6004 }
6005
6006 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6007 struct target_loadseg
6008 {
6009 /* Core address to which the segment is mapped. */
6010 Elf32_Addr addr;
6011 /* VMA recorded in the program header. */
6012 Elf32_Addr p_vaddr;
6013 /* Size of this segment in memory. */
6014 Elf32_Word p_memsz;
6015 };
6016
6017 # if defined PT_GETDSBT
6018 struct target_loadmap
6019 {
6020 /* Protocol version number, must be zero. */
6021 Elf32_Word version;
6022 /* Pointer to the DSBT table, its size, and the DSBT index. */
6023 unsigned *dsbt_table;
6024 unsigned dsbt_size, dsbt_index;
6025 /* Number of segments in this map. */
6026 Elf32_Word nsegs;
6027 /* The actual memory map. */
6028 struct target_loadseg segs[/*nsegs*/];
6029 };
6030 # define LINUX_LOADMAP PT_GETDSBT
6031 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6032 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6033 # else
6034 struct target_loadmap
6035 {
6036 /* Protocol version number, must be zero. */
6037 Elf32_Half version;
6038 /* Number of segments in this map. */
6039 Elf32_Half nsegs;
6040 /* The actual memory map. */
6041 struct target_loadseg segs[/*nsegs*/];
6042 };
6043 # define LINUX_LOADMAP PTRACE_GETFDPIC
6044 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6045 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6046 # endif
6047
6048 bool
6049 linux_process_target::supports_read_loadmap ()
6050 {
6051 return true;
6052 }
6053
6054 int
6055 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6056 unsigned char *myaddr, unsigned int len)
6057 {
6058 int pid = lwpid_of (current_thread);
6059 int addr = -1;
6060 struct target_loadmap *data = NULL;
6061 unsigned int actual_length, copy_length;
6062
6063 if (strcmp (annex, "exec") == 0)
6064 addr = (int) LINUX_LOADMAP_EXEC;
6065 else if (strcmp (annex, "interp") == 0)
6066 addr = (int) LINUX_LOADMAP_INTERP;
6067 else
6068 return -1;
6069
6070 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6071 return -1;
6072
6073 if (data == NULL)
6074 return -1;
6075
6076 actual_length = sizeof (struct target_loadmap)
6077 + sizeof (struct target_loadseg) * data->nsegs;
6078
6079 if (offset < 0 || offset > actual_length)
6080 return -1;
6081
6082 copy_length = actual_length - offset < len ? actual_length - offset : len;
6083 memcpy (myaddr, (char *) data + offset, copy_length);
6084 return copy_length;
6085 }
6086 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6087
6088 bool
6089 linux_process_target::supports_catch_syscall ()
6090 {
6091 return low_supports_catch_syscall ();
6092 }
6093
6094 bool
6095 linux_process_target::low_supports_catch_syscall ()
6096 {
6097 return false;
6098 }
6099
6100 CORE_ADDR
6101 linux_process_target::read_pc (regcache *regcache)
6102 {
6103 if (!low_supports_breakpoints ())
6104 return 0;
6105
6106 return low_get_pc (regcache);
6107 }
6108
6109 void
6110 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6111 {
6112 gdb_assert (low_supports_breakpoints ());
6113
6114 low_set_pc (regcache, pc);
6115 }
6116
6117 bool
6118 linux_process_target::supports_thread_stopped ()
6119 {
6120 return true;
6121 }
6122
6123 bool
6124 linux_process_target::thread_stopped (thread_info *thread)
6125 {
6126 return get_thread_lwp (thread)->stopped;
6127 }
6128
6129 /* This exposes stop-all-threads functionality to other modules. */
6130
6131 void
6132 linux_process_target::pause_all (bool freeze)
6133 {
6134 stop_all_lwps (freeze, NULL);
6135 }
6136
6137 /* This exposes unstop-all-threads functionality to other gdbserver
6138 modules. */
6139
6140 void
6141 linux_process_target::unpause_all (bool unfreeze)
6142 {
6143 unstop_all_lwps (unfreeze, NULL);
6144 }
6145
6146 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6147
6148 static int
6149 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6150 CORE_ADDR *phdr_memaddr, int *num_phdr)
6151 {
6152 char filename[PATH_MAX];
6153 int fd;
6154 const int auxv_size = is_elf64
6155 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6156 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6157
6158 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6159
6160 fd = open (filename, O_RDONLY);
6161 if (fd < 0)
6162 return 1;
6163
6164 *phdr_memaddr = 0;
6165 *num_phdr = 0;
6166 while (read (fd, buf, auxv_size) == auxv_size
6167 && (*phdr_memaddr == 0 || *num_phdr == 0))
6168 {
6169 if (is_elf64)
6170 {
6171 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6172
6173 switch (aux->a_type)
6174 {
6175 case AT_PHDR:
6176 *phdr_memaddr = aux->a_un.a_val;
6177 break;
6178 case AT_PHNUM:
6179 *num_phdr = aux->a_un.a_val;
6180 break;
6181 }
6182 }
6183 else
6184 {
6185 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6186
6187 switch (aux->a_type)
6188 {
6189 case AT_PHDR:
6190 *phdr_memaddr = aux->a_un.a_val;
6191 break;
6192 case AT_PHNUM:
6193 *num_phdr = aux->a_un.a_val;
6194 break;
6195 }
6196 }
6197 }
6198
6199 close (fd);
6200
6201 if (*phdr_memaddr == 0 || *num_phdr == 0)
6202 {
6203 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6204 "phdr_memaddr = %ld, phdr_num = %d",
6205 (long) *phdr_memaddr, *num_phdr);
6206 return 2;
6207 }
6208
6209 return 0;
6210 }
6211
6212 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6213
6214 static CORE_ADDR
6215 get_dynamic (const int pid, const int is_elf64)
6216 {
6217 CORE_ADDR phdr_memaddr, relocation;
6218 int num_phdr, i;
6219 unsigned char *phdr_buf;
6220 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6221
6222 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6223 return 0;
6224
6225 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6226 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6227
6228 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6229 return 0;
6230
6231 /* Compute relocation: it is expected to be 0 for "regular" executables,
6232 non-zero for PIE ones. */
6233 relocation = -1;
6234 for (i = 0; relocation == -1 && i < num_phdr; i++)
6235 if (is_elf64)
6236 {
6237 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6238
6239 if (p->p_type == PT_PHDR)
6240 relocation = phdr_memaddr - p->p_vaddr;
6241 }
6242 else
6243 {
6244 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6245
6246 if (p->p_type == PT_PHDR)
6247 relocation = phdr_memaddr - p->p_vaddr;
6248 }
6249
6250 if (relocation == -1)
6251 {
6252 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6253 any real world executables, including PIE executables, have always
6254 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6255 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6256 or present DT_DEBUG anyway (fpc binaries are statically linked).
6257
6258 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6259
6260 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6261
6262 return 0;
6263 }
6264
6265 for (i = 0; i < num_phdr; i++)
6266 {
6267 if (is_elf64)
6268 {
6269 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6270
6271 if (p->p_type == PT_DYNAMIC)
6272 return p->p_vaddr + relocation;
6273 }
6274 else
6275 {
6276 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6277
6278 if (p->p_type == PT_DYNAMIC)
6279 return p->p_vaddr + relocation;
6280 }
6281 }
6282
6283 return 0;
6284 }
6285
6286 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6287 can be 0 if the inferior does not yet have the library list initialized.
6288 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6289 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6290
6291 static CORE_ADDR
6292 get_r_debug (const int pid, const int is_elf64)
6293 {
6294 CORE_ADDR dynamic_memaddr;
6295 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6296 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6297 CORE_ADDR map = -1;
6298
6299 dynamic_memaddr = get_dynamic (pid, is_elf64);
6300 if (dynamic_memaddr == 0)
6301 return map;
6302
6303 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6304 {
6305 if (is_elf64)
6306 {
6307 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6308 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6309 union
6310 {
6311 Elf64_Xword map;
6312 unsigned char buf[sizeof (Elf64_Xword)];
6313 }
6314 rld_map;
6315 #endif
6316 #ifdef DT_MIPS_RLD_MAP
6317 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6318 {
6319 if (linux_read_memory (dyn->d_un.d_val,
6320 rld_map.buf, sizeof (rld_map.buf)) == 0)
6321 return rld_map.map;
6322 else
6323 break;
6324 }
6325 #endif /* DT_MIPS_RLD_MAP */
6326 #ifdef DT_MIPS_RLD_MAP_REL
6327 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6328 {
6329 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6330 rld_map.buf, sizeof (rld_map.buf)) == 0)
6331 return rld_map.map;
6332 else
6333 break;
6334 }
6335 #endif /* DT_MIPS_RLD_MAP_REL */
6336
6337 if (dyn->d_tag == DT_DEBUG && map == -1)
6338 map = dyn->d_un.d_val;
6339
6340 if (dyn->d_tag == DT_NULL)
6341 break;
6342 }
6343 else
6344 {
6345 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6346 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6347 union
6348 {
6349 Elf32_Word map;
6350 unsigned char buf[sizeof (Elf32_Word)];
6351 }
6352 rld_map;
6353 #endif
6354 #ifdef DT_MIPS_RLD_MAP
6355 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6356 {
6357 if (linux_read_memory (dyn->d_un.d_val,
6358 rld_map.buf, sizeof (rld_map.buf)) == 0)
6359 return rld_map.map;
6360 else
6361 break;
6362 }
6363 #endif /* DT_MIPS_RLD_MAP */
6364 #ifdef DT_MIPS_RLD_MAP_REL
6365 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6366 {
6367 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6368 rld_map.buf, sizeof (rld_map.buf)) == 0)
6369 return rld_map.map;
6370 else
6371 break;
6372 }
6373 #endif /* DT_MIPS_RLD_MAP_REL */
6374
6375 if (dyn->d_tag == DT_DEBUG && map == -1)
6376 map = dyn->d_un.d_val;
6377
6378 if (dyn->d_tag == DT_NULL)
6379 break;
6380 }
6381
6382 dynamic_memaddr += dyn_size;
6383 }
6384
6385 return map;
6386 }
6387
6388 /* Read one pointer from MEMADDR in the inferior. */
6389
6390 static int
6391 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6392 {
6393 int ret;
6394
6395 /* Go through a union so this works on either big or little endian
6396 hosts, when the inferior's pointer size is smaller than the size
6397 of CORE_ADDR. It is assumed the inferior's endianness is the
6398 same of the superior's. */
6399 union
6400 {
6401 CORE_ADDR core_addr;
6402 unsigned int ui;
6403 unsigned char uc;
6404 } addr;
6405
6406 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6407 if (ret == 0)
6408 {
6409 if (ptr_size == sizeof (CORE_ADDR))
6410 *ptr = addr.core_addr;
6411 else if (ptr_size == sizeof (unsigned int))
6412 *ptr = addr.ui;
6413 else
6414 gdb_assert_not_reached ("unhandled pointer size");
6415 }
6416 return ret;
6417 }
6418
6419 bool
6420 linux_process_target::supports_qxfer_libraries_svr4 ()
6421 {
6422 return true;
6423 }
6424
6425 struct link_map_offsets
6426 {
6427 /* Offset and size of r_debug.r_version. */
6428 int r_version_offset;
6429
6430 /* Offset and size of r_debug.r_map. */
6431 int r_map_offset;
6432
6433 /* Offset to l_addr field in struct link_map. */
6434 int l_addr_offset;
6435
6436 /* Offset to l_name field in struct link_map. */
6437 int l_name_offset;
6438
6439 /* Offset to l_ld field in struct link_map. */
6440 int l_ld_offset;
6441
6442 /* Offset to l_next field in struct link_map. */
6443 int l_next_offset;
6444
6445 /* Offset to l_prev field in struct link_map. */
6446 int l_prev_offset;
6447 };
6448
6449 /* Construct qXfer:libraries-svr4:read reply. */
6450
6451 int
6452 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6453 unsigned char *readbuf,
6454 unsigned const char *writebuf,
6455 CORE_ADDR offset, int len)
6456 {
6457 struct process_info_private *const priv = current_process ()->priv;
6458 char filename[PATH_MAX];
6459 int pid, is_elf64;
6460
6461 static const struct link_map_offsets lmo_32bit_offsets =
6462 {
6463 0, /* r_version offset. */
6464 4, /* r_debug.r_map offset. */
6465 0, /* l_addr offset in link_map. */
6466 4, /* l_name offset in link_map. */
6467 8, /* l_ld offset in link_map. */
6468 12, /* l_next offset in link_map. */
6469 16 /* l_prev offset in link_map. */
6470 };
6471
6472 static const struct link_map_offsets lmo_64bit_offsets =
6473 {
6474 0, /* r_version offset. */
6475 8, /* r_debug.r_map offset. */
6476 0, /* l_addr offset in link_map. */
6477 8, /* l_name offset in link_map. */
6478 16, /* l_ld offset in link_map. */
6479 24, /* l_next offset in link_map. */
6480 32 /* l_prev offset in link_map. */
6481 };
6482 const struct link_map_offsets *lmo;
6483 unsigned int machine;
6484 int ptr_size;
6485 CORE_ADDR lm_addr = 0, lm_prev = 0;
6486 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6487 int header_done = 0;
6488
6489 if (writebuf != NULL)
6490 return -2;
6491 if (readbuf == NULL)
6492 return -1;
6493
6494 pid = lwpid_of (current_thread);
6495 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6496 is_elf64 = elf_64_file_p (filename, &machine);
6497 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6498 ptr_size = is_elf64 ? 8 : 4;
6499
6500 while (annex[0] != '\0')
6501 {
6502 const char *sep;
6503 CORE_ADDR *addrp;
6504 int name_len;
6505
6506 sep = strchr (annex, '=');
6507 if (sep == NULL)
6508 break;
6509
6510 name_len = sep - annex;
6511 if (name_len == 5 && startswith (annex, "start"))
6512 addrp = &lm_addr;
6513 else if (name_len == 4 && startswith (annex, "prev"))
6514 addrp = &lm_prev;
6515 else
6516 {
6517 annex = strchr (sep, ';');
6518 if (annex == NULL)
6519 break;
6520 annex++;
6521 continue;
6522 }
6523
6524 annex = decode_address_to_semicolon (addrp, sep + 1);
6525 }
6526
6527 if (lm_addr == 0)
6528 {
6529 int r_version = 0;
6530
6531 if (priv->r_debug == 0)
6532 priv->r_debug = get_r_debug (pid, is_elf64);
6533
6534 /* We failed to find DT_DEBUG. Such situation will not change
6535 for this inferior - do not retry it. Report it to GDB as
6536 E01, see for the reasons at the GDB solib-svr4.c side. */
6537 if (priv->r_debug == (CORE_ADDR) -1)
6538 return -1;
6539
6540 if (priv->r_debug != 0)
6541 {
6542 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6543 (unsigned char *) &r_version,
6544 sizeof (r_version)) != 0
6545 || r_version < 1)
6546 {
6547 warning ("unexpected r_debug version %d", r_version);
6548 }
6549 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6550 &lm_addr, ptr_size) != 0)
6551 {
6552 warning ("unable to read r_map from 0x%lx",
6553 (long) priv->r_debug + lmo->r_map_offset);
6554 }
6555 }
6556 }
6557
6558 std::string document = "<library-list-svr4 version=\"1.0\"";
6559
6560 while (lm_addr
6561 && read_one_ptr (lm_addr + lmo->l_name_offset,
6562 &l_name, ptr_size) == 0
6563 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6564 &l_addr, ptr_size) == 0
6565 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6566 &l_ld, ptr_size) == 0
6567 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6568 &l_prev, ptr_size) == 0
6569 && read_one_ptr (lm_addr + lmo->l_next_offset,
6570 &l_next, ptr_size) == 0)
6571 {
6572 unsigned char libname[PATH_MAX];
6573
6574 if (lm_prev != l_prev)
6575 {
6576 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6577 (long) lm_prev, (long) l_prev);
6578 break;
6579 }
6580
6581 /* Ignore the first entry even if it has valid name as the first entry
6582 corresponds to the main executable. The first entry should not be
6583 skipped if the dynamic loader was loaded late by a static executable
6584 (see solib-svr4.c parameter ignore_first). But in such case the main
6585 executable does not have PT_DYNAMIC present and this function already
6586 exited above due to failed get_r_debug. */
6587 if (lm_prev == 0)
6588 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6589 else
6590 {
6591 /* Not checking for error because reading may stop before
6592 we've got PATH_MAX worth of characters. */
6593 libname[0] = '\0';
6594 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6595 libname[sizeof (libname) - 1] = '\0';
6596 if (libname[0] != '\0')
6597 {
6598 if (!header_done)
6599 {
6600 /* Terminate `<library-list-svr4'. */
6601 document += '>';
6602 header_done = 1;
6603 }
6604
6605 string_appendf (document, "<library name=\"");
6606 xml_escape_text_append (&document, (char *) libname);
6607 string_appendf (document, "\" lm=\"0x%lx\" "
6608 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6609 (unsigned long) lm_addr, (unsigned long) l_addr,
6610 (unsigned long) l_ld);
6611 }
6612 }
6613
6614 lm_prev = lm_addr;
6615 lm_addr = l_next;
6616 }
6617
6618 if (!header_done)
6619 {
6620 /* Empty list; terminate `<library-list-svr4'. */
6621 document += "/>";
6622 }
6623 else
6624 document += "</library-list-svr4>";
6625
6626 int document_len = document.length ();
6627 if (offset < document_len)
6628 document_len -= offset;
6629 else
6630 document_len = 0;
6631 if (len > document_len)
6632 len = document_len;
6633
6634 memcpy (readbuf, document.data () + offset, len);
6635
6636 return len;
6637 }
6638
6639 #ifdef HAVE_LINUX_BTRACE
6640
6641 btrace_target_info *
6642 linux_process_target::enable_btrace (thread_info *tp,
6643 const btrace_config *conf)
6644 {
6645 return linux_enable_btrace (tp->id, conf);
6646 }
6647
6648 /* See to_disable_btrace target method. */
6649
6650 int
6651 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6652 {
6653 enum btrace_error err;
6654
6655 err = linux_disable_btrace (tinfo);
6656 return (err == BTRACE_ERR_NONE ? 0 : -1);
6657 }
6658
6659 /* Encode an Intel Processor Trace configuration. */
6660
6661 static void
6662 linux_low_encode_pt_config (struct buffer *buffer,
6663 const struct btrace_data_pt_config *config)
6664 {
6665 buffer_grow_str (buffer, "<pt-config>\n");
6666
6667 switch (config->cpu.vendor)
6668 {
6669 case CV_INTEL:
6670 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6671 "model=\"%u\" stepping=\"%u\"/>\n",
6672 config->cpu.family, config->cpu.model,
6673 config->cpu.stepping);
6674 break;
6675
6676 default:
6677 break;
6678 }
6679
6680 buffer_grow_str (buffer, "</pt-config>\n");
6681 }
6682
6683 /* Encode a raw buffer. */
6684
6685 static void
6686 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6687 unsigned int size)
6688 {
6689 if (size == 0)
6690 return;
6691
6692 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6693 buffer_grow_str (buffer, "<raw>\n");
6694
6695 while (size-- > 0)
6696 {
6697 char elem[2];
6698
6699 elem[0] = tohex ((*data >> 4) & 0xf);
6700 elem[1] = tohex (*data++ & 0xf);
6701
6702 buffer_grow (buffer, elem, 2);
6703 }
6704
6705 buffer_grow_str (buffer, "</raw>\n");
6706 }
6707
6708 /* See to_read_btrace target method. */
6709
6710 int
6711 linux_process_target::read_btrace (btrace_target_info *tinfo,
6712 buffer *buffer,
6713 enum btrace_read_type type)
6714 {
6715 struct btrace_data btrace;
6716 enum btrace_error err;
6717
6718 err = linux_read_btrace (&btrace, tinfo, type);
6719 if (err != BTRACE_ERR_NONE)
6720 {
6721 if (err == BTRACE_ERR_OVERFLOW)
6722 buffer_grow_str0 (buffer, "E.Overflow.");
6723 else
6724 buffer_grow_str0 (buffer, "E.Generic Error.");
6725
6726 return -1;
6727 }
6728
6729 switch (btrace.format)
6730 {
6731 case BTRACE_FORMAT_NONE:
6732 buffer_grow_str0 (buffer, "E.No Trace.");
6733 return -1;
6734
6735 case BTRACE_FORMAT_BTS:
6736 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6737 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6738
6739 for (const btrace_block &block : *btrace.variant.bts.blocks)
6740 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6741 paddress (block.begin), paddress (block.end));
6742
6743 buffer_grow_str0 (buffer, "</btrace>\n");
6744 break;
6745
6746 case BTRACE_FORMAT_PT:
6747 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6748 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6749 buffer_grow_str (buffer, "<pt>\n");
6750
6751 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6752
6753 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6754 btrace.variant.pt.size);
6755
6756 buffer_grow_str (buffer, "</pt>\n");
6757 buffer_grow_str0 (buffer, "</btrace>\n");
6758 break;
6759
6760 default:
6761 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6762 return -1;
6763 }
6764
6765 return 0;
6766 }
6767
6768 /* See to_btrace_conf target method. */
6769
6770 int
6771 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6772 buffer *buffer)
6773 {
6774 const struct btrace_config *conf;
6775
6776 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6777 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6778
6779 conf = linux_btrace_conf (tinfo);
6780 if (conf != NULL)
6781 {
6782 switch (conf->format)
6783 {
6784 case BTRACE_FORMAT_NONE:
6785 break;
6786
6787 case BTRACE_FORMAT_BTS:
6788 buffer_xml_printf (buffer, "<bts");
6789 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6790 buffer_xml_printf (buffer, " />\n");
6791 break;
6792
6793 case BTRACE_FORMAT_PT:
6794 buffer_xml_printf (buffer, "<pt");
6795 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6796 buffer_xml_printf (buffer, "/>\n");
6797 break;
6798 }
6799 }
6800
6801 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6802 return 0;
6803 }
6804 #endif /* HAVE_LINUX_BTRACE */
6805
6806 /* See nat/linux-nat.h. */
6807
6808 ptid_t
6809 current_lwp_ptid (void)
6810 {
6811 return ptid_of (current_thread);
6812 }
6813
6814 const char *
6815 linux_process_target::thread_name (ptid_t thread)
6816 {
6817 return linux_proc_tid_get_name (thread);
6818 }
6819
6820 #if USE_THREAD_DB
6821 bool
6822 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6823 int *handle_len)
6824 {
6825 return thread_db_thread_handle (ptid, handle, handle_len);
6826 }
6827 #endif
6828
6829 thread_info *
6830 linux_process_target::thread_pending_parent (thread_info *thread)
6831 {
6832 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6833
6834 if (parent == nullptr)
6835 return nullptr;
6836
6837 return get_lwp_thread (parent);
6838 }
6839
6840 thread_info *
6841 linux_process_target::thread_pending_child (thread_info *thread)
6842 {
6843 lwp_info *child = get_thread_lwp (thread)->pending_child ();
6844
6845 if (child == nullptr)
6846 return nullptr;
6847
6848 return get_lwp_thread (child);
6849 }
6850
6851 /* Default implementation of linux_target_ops method "set_pc" for
6852 32-bit pc register which is literally named "pc". */
6853
6854 void
6855 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6856 {
6857 uint32_t newpc = pc;
6858
6859 supply_register_by_name (regcache, "pc", &newpc);
6860 }
6861
6862 /* Default implementation of linux_target_ops method "get_pc" for
6863 32-bit pc register which is literally named "pc". */
6864
6865 CORE_ADDR
6866 linux_get_pc_32bit (struct regcache *regcache)
6867 {
6868 uint32_t pc;
6869
6870 collect_register_by_name (regcache, "pc", &pc);
6871 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
6872 return pc;
6873 }
6874
6875 /* Default implementation of linux_target_ops method "set_pc" for
6876 64-bit pc register which is literally named "pc". */
6877
6878 void
6879 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6880 {
6881 uint64_t newpc = pc;
6882
6883 supply_register_by_name (regcache, "pc", &newpc);
6884 }
6885
6886 /* Default implementation of linux_target_ops method "get_pc" for
6887 64-bit pc register which is literally named "pc". */
6888
6889 CORE_ADDR
6890 linux_get_pc_64bit (struct regcache *regcache)
6891 {
6892 uint64_t pc;
6893
6894 collect_register_by_name (regcache, "pc", &pc);
6895 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6896 return pc;
6897 }
6898
6899 /* See linux-low.h. */
6900
6901 int
6902 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
6903 {
6904 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
6905 int offset = 0;
6906
6907 gdb_assert (wordsize == 4 || wordsize == 8);
6908
6909 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
6910 {
6911 if (wordsize == 4)
6912 {
6913 uint32_t *data_p = (uint32_t *) data;
6914 if (data_p[0] == match)
6915 {
6916 *valp = data_p[1];
6917 return 1;
6918 }
6919 }
6920 else
6921 {
6922 uint64_t *data_p = (uint64_t *) data;
6923 if (data_p[0] == match)
6924 {
6925 *valp = data_p[1];
6926 return 1;
6927 }
6928 }
6929
6930 offset += 2 * wordsize;
6931 }
6932
6933 return 0;
6934 }
6935
6936 /* See linux-low.h. */
6937
6938 CORE_ADDR
6939 linux_get_hwcap (int wordsize)
6940 {
6941 CORE_ADDR hwcap = 0;
6942 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
6943 return hwcap;
6944 }
6945
6946 /* See linux-low.h. */
6947
6948 CORE_ADDR
6949 linux_get_hwcap2 (int wordsize)
6950 {
6951 CORE_ADDR hwcap2 = 0;
6952 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
6953 return hwcap2;
6954 }
6955
6956 #ifdef HAVE_LINUX_REGSETS
6957 void
6958 initialize_regsets_info (struct regsets_info *info)
6959 {
6960 for (info->num_regsets = 0;
6961 info->regsets[info->num_regsets].size >= 0;
6962 info->num_regsets++)
6963 ;
6964 }
6965 #endif
6966
6967 void
6968 initialize_low (void)
6969 {
6970 struct sigaction sigchld_action;
6971
6972 memset (&sigchld_action, 0, sizeof (sigchld_action));
6973 set_target_ops (the_linux_target);
6974
6975 linux_ptrace_init_warnings ();
6976 linux_proc_init_warnings ();
6977
6978 sigchld_action.sa_handler = sigchld_handler;
6979 sigemptyset (&sigchld_action.sa_mask);
6980 sigchld_action.sa_flags = SA_RESTART;
6981 sigaction (SIGCHLD, &sigchld_action, NULL);
6982
6983 initialize_low_arch ();
6984
6985 linux_check_ptrace_features ();
6986 }