e1806ade82f076672d6a8d5e37b94b7ace315e71
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/event-loop.h"
25 #include "gdbsupport/event-pipe.h"
26 #include "gdbsupport/rsp-low.h"
27 #include "gdbsupport/signals-state-save-restore.h"
28 #include "nat/linux-nat.h"
29 #include "nat/linux-waitpid.h"
30 #include "gdbsupport/gdb_wait.h"
31 #include "nat/gdb_ptrace.h"
32 #include "nat/linux-ptrace.h"
33 #include "nat/linux-procfs.h"
34 #include "nat/linux-personality.h"
35 #include <signal.h>
36 #include <sys/ioctl.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <ctype.h>
42 #include <pwd.h>
43 #include <sys/types.h>
44 #include <dirent.h>
45 #include <sys/stat.h>
46 #include <sys/vfs.h>
47 #include <sys/uio.h>
48 #include "gdbsupport/filestuff.h"
49 #include "tracepoint.h"
50 #include <inttypes.h>
51 #include "gdbsupport/common-inferior.h"
52 #include "nat/fork-inferior.h"
53 #include "gdbsupport/environ.h"
54 #include "gdbsupport/gdb-sigmask.h"
55 #include "gdbsupport/scoped_restore.h"
56 #ifndef ELFMAG0
57 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61 #include <elf.h>
62 #endif
63 #include "nat/linux-namespaces.h"
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef AT_HWCAP2
70 #define AT_HWCAP2 26
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* These are still undefined in 3.10 kernels. */
85 #elif defined(__TMS320C6X__)
86 #define PT_TEXT_ADDR (0x10000*4)
87 #define PT_DATA_ADDR (0x10004*4)
88 #define PT_TEXT_END_ADDR (0x10008*4)
89 #endif
90 #endif
91
92 #if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97 #define SUPPORTS_READ_OFFSETS
98 #endif
99
100 #ifdef HAVE_LINUX_BTRACE
101 # include "nat/linux-btrace.h"
102 # include "gdbsupport/btrace-common.h"
103 #endif
104
105 #ifndef HAVE_ELF32_AUXV_T
106 /* Copied from glibc's elf.h. */
107 typedef struct
108 {
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117 } Elf32_auxv_t;
118 #endif
119
120 #ifndef HAVE_ELF64_AUXV_T
121 /* Copied from glibc's elf.h. */
122 typedef struct
123 {
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132 } Elf64_auxv_t;
133 #endif
134
135 /* Does the current host support PTRACE_GETREGSET? */
136 int have_ptrace_getregset = -1;
137
138 /* Return TRUE if THREAD is the leader thread of the process. */
139
140 static bool
141 is_leader (thread_info *thread)
142 {
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145 }
146
147 /* LWP accessors. */
148
149 /* See nat/linux-nat.h. */
150
151 ptid_t
152 ptid_of_lwp (struct lwp_info *lwp)
153 {
154 return ptid_of (get_lwp_thread (lwp));
155 }
156
157 /* See nat/linux-nat.h. */
158
159 void
160 lwp_set_arch_private_info (struct lwp_info *lwp,
161 struct arch_lwp_info *info)
162 {
163 lwp->arch_private = info;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 struct arch_lwp_info *
169 lwp_arch_private_info (struct lwp_info *lwp)
170 {
171 return lwp->arch_private;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 int
177 lwp_is_stopped (struct lwp_info *lwp)
178 {
179 return lwp->stopped;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 enum target_stop_reason
185 lwp_stop_reason (struct lwp_info *lwp)
186 {
187 return lwp->stop_reason;
188 }
189
190 /* See nat/linux-nat.h. */
191
192 int
193 lwp_is_stepping (struct lwp_info *lwp)
194 {
195 return lwp->stepping;
196 }
197
198 /* A list of all unknown processes which receive stop signals. Some
199 other process will presumably claim each of these as forked
200 children momentarily. */
201
202 struct simple_pid_list
203 {
204 /* The process ID. */
205 int pid;
206
207 /* The status as reported by waitpid. */
208 int status;
209
210 /* Next in chain. */
211 struct simple_pid_list *next;
212 };
213 static struct simple_pid_list *stopped_pids;
214
215 /* Trivial list manipulation functions to keep track of a list of new
216 stopped processes. */
217
218 static void
219 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
220 {
221 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
222
223 new_pid->pid = pid;
224 new_pid->status = status;
225 new_pid->next = *listp;
226 *listp = new_pid;
227 }
228
229 static int
230 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
231 {
232 struct simple_pid_list **p;
233
234 for (p = listp; *p != NULL; p = &(*p)->next)
235 if ((*p)->pid == pid)
236 {
237 struct simple_pid_list *next = (*p)->next;
238
239 *statusp = (*p)->status;
240 xfree (*p);
241 *p = next;
242 return 1;
243 }
244 return 0;
245 }
246
247 enum stopping_threads_kind
248 {
249 /* Not stopping threads presently. */
250 NOT_STOPPING_THREADS,
251
252 /* Stopping threads. */
253 STOPPING_THREADS,
254
255 /* Stopping and suspending threads. */
256 STOPPING_AND_SUSPENDING_THREADS
257 };
258
259 /* This is set while stop_all_lwps is in effect. */
260 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
261
262 /* FIXME make into a target method? */
263 int using_threads = 1;
264
265 /* True if we're presently stabilizing threads (moving them out of
266 jump pads). */
267 static int stabilizing_threads;
268
269 static void unsuspend_all_lwps (struct lwp_info *except);
270 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
271 static int lwp_is_marked_dead (struct lwp_info *lwp);
272 static int kill_lwp (unsigned long lwpid, int signo);
273 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
274 static int linux_low_ptrace_options (int attached);
275 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
276
277 /* When the event-loop is doing a step-over, this points at the thread
278 being stepped. */
279 static ptid_t step_over_bkpt;
280
281 bool
282 linux_process_target::low_supports_breakpoints ()
283 {
284 return false;
285 }
286
287 CORE_ADDR
288 linux_process_target::low_get_pc (regcache *regcache)
289 {
290 return 0;
291 }
292
293 void
294 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
295 {
296 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
297 }
298
299 std::vector<CORE_ADDR>
300 linux_process_target::low_get_next_pcs (regcache *regcache)
301 {
302 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
303 "implemented");
304 }
305
306 int
307 linux_process_target::low_decr_pc_after_break ()
308 {
309 return 0;
310 }
311
312 /* True if LWP is stopped in its stepping range. */
313
314 static int
315 lwp_in_step_range (struct lwp_info *lwp)
316 {
317 CORE_ADDR pc = lwp->stop_pc;
318
319 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320 }
321
322 /* The event pipe registered as a waitable file in the event loop. */
323 static event_pipe linux_event_pipe;
324
325 /* True if we're currently in async mode. */
326 #define target_is_async_p() (linux_event_pipe.is_open ())
327
328 static void send_sigstop (struct lwp_info *lwp);
329
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
331
332 static int
333 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334 {
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
346 }
347
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
352 static int
353 elf_64_file_p (const char *file, unsigned int *machine)
354 {
355 Elf64_Ehdr header;
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
369 return elf_64_header_p (&header, machine);
370 }
371
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375 int
376 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 {
378 char file[PATH_MAX];
379
380 sprintf (file, "/proc/%d/exe", pid);
381 return elf_64_file_p (file, machine);
382 }
383
384 void
385 linux_process_target::delete_lwp (lwp_info *lwp)
386 {
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 threads_debug_printf ("deleting %ld", lwpid_of (thr));
390
391 remove_thread (thr);
392
393 low_delete_thread (lwp->arch_private);
394
395 delete lwp;
396 }
397
398 void
399 linux_process_target::low_delete_thread (arch_lwp_info *info)
400 {
401 /* Default implementation should be overridden if architecture-specific
402 info is being used. */
403 gdb_assert (info == nullptr);
404 }
405
406 /* Open the /proc/PID/mem file for PROC. */
407
408 static void
409 open_proc_mem_file (process_info *proc)
410 {
411 gdb_assert (proc->priv->mem_fd == -1);
412
413 char filename[64];
414 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
415
416 proc->priv->mem_fd
417 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
418 }
419
420 process_info *
421 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
422 {
423 struct process_info *proc;
424
425 proc = add_process (pid, attached);
426 proc->priv = XCNEW (struct process_info_private);
427
428 proc->priv->arch_private = low_new_process ();
429 proc->priv->mem_fd = -1;
430
431 return proc;
432 }
433
434
435 process_info *
436 linux_process_target::add_linux_process (int pid, int attached)
437 {
438 process_info *proc = add_linux_process_no_mem_file (pid, attached);
439 open_proc_mem_file (proc);
440 return proc;
441 }
442
443 void
444 linux_process_target::remove_linux_process (process_info *proc)
445 {
446 if (proc->priv->mem_fd >= 0)
447 close (proc->priv->mem_fd);
448
449 this->low_delete_process (proc->priv->arch_private);
450
451 xfree (proc->priv);
452 proc->priv = nullptr;
453
454 remove_process (proc);
455 }
456
457 arch_process_info *
458 linux_process_target::low_new_process ()
459 {
460 return nullptr;
461 }
462
463 void
464 linux_process_target::low_delete_process (arch_process_info *info)
465 {
466 /* Default implementation must be overridden if architecture-specific
467 info exists. */
468 gdb_assert (info == nullptr);
469 }
470
471 void
472 linux_process_target::low_new_fork (process_info *parent, process_info *child)
473 {
474 /* Nop. */
475 }
476
477 void
478 linux_process_target::arch_setup_thread (thread_info *thread)
479 {
480 scoped_restore_current_thread restore_thread;
481 switch_to_thread (thread);
482
483 low_arch_setup ();
484 }
485
486 int
487 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
488 int wstat)
489 {
490 client_state &cs = get_client_state ();
491 struct lwp_info *event_lwp = *orig_event_lwp;
492 int event = linux_ptrace_get_extended_event (wstat);
493 struct thread_info *event_thr = get_lwp_thread (event_lwp);
494 struct lwp_info *new_lwp;
495
496 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
497
498 /* All extended events we currently use are mid-syscall. Only
499 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
500 you have to be using PTRACE_SEIZE to get that. */
501 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
502
503 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
504 || (event == PTRACE_EVENT_CLONE))
505 {
506 ptid_t ptid;
507 unsigned long new_pid;
508 int ret, status;
509
510 /* Get the pid of the new lwp. */
511 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
512 &new_pid);
513
514 /* If we haven't already seen the new PID stop, wait for it now. */
515 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
516 {
517 /* The new child has a pending SIGSTOP. We can't affect it until it
518 hits the SIGSTOP, but we're already attached. */
519
520 ret = my_waitpid (new_pid, &status, __WALL);
521
522 if (ret == -1)
523 perror_with_name ("waiting for new child");
524 else if (ret != new_pid)
525 warning ("wait returned unexpected PID %d", ret);
526 else if (!WIFSTOPPED (status))
527 warning ("wait returned unexpected status 0x%x", status);
528 }
529
530 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
531 {
532 struct process_info *parent_proc;
533 struct process_info *child_proc;
534 struct lwp_info *child_lwp;
535 struct thread_info *child_thr;
536
537 ptid = ptid_t (new_pid, new_pid);
538
539 threads_debug_printf ("Got fork event from LWP %ld, "
540 "new child is %d",
541 ptid_of (event_thr).lwp (),
542 ptid.pid ());
543
544 /* Add the new process to the tables and clone the breakpoint
545 lists of the parent. We need to do this even if the new process
546 will be detached, since we will need the process object and the
547 breakpoints to remove any breakpoints from memory when we
548 detach, and the client side will access registers. */
549 child_proc = add_linux_process (new_pid, 0);
550 gdb_assert (child_proc != NULL);
551 child_lwp = add_lwp (ptid);
552 gdb_assert (child_lwp != NULL);
553 child_lwp->stopped = 1;
554 child_lwp->must_set_ptrace_flags = 1;
555 child_lwp->status_pending_p = 0;
556 child_thr = get_lwp_thread (child_lwp);
557 child_thr->last_resume_kind = resume_stop;
558 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
559
560 /* If we're suspending all threads, leave this one suspended
561 too. If the fork/clone parent is stepping over a breakpoint,
562 all other threads have been suspended already. Leave the
563 child suspended too. */
564 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
565 || event_lwp->bp_reinsert != 0)
566 {
567 threads_debug_printf ("leaving child suspended");
568 child_lwp->suspended = 1;
569 }
570
571 parent_proc = get_thread_process (event_thr);
572 child_proc->attached = parent_proc->attached;
573
574 if (event_lwp->bp_reinsert != 0
575 && supports_software_single_step ()
576 && event == PTRACE_EVENT_VFORK)
577 {
578 /* If we leave single-step breakpoints there, child will
579 hit it, so uninsert single-step breakpoints from parent
580 (and child). Once vfork child is done, reinsert
581 them back to parent. */
582 uninsert_single_step_breakpoints (event_thr);
583 }
584
585 clone_all_breakpoints (child_thr, event_thr);
586
587 target_desc_up tdesc = allocate_target_description ();
588 copy_target_description (tdesc.get (), parent_proc->tdesc);
589 child_proc->tdesc = tdesc.release ();
590
591 /* Clone arch-specific process data. */
592 low_new_fork (parent_proc, child_proc);
593
594 /* Save fork info in the parent thread. */
595 if (event == PTRACE_EVENT_FORK)
596 event_lwp->waitstatus.set_forked (ptid);
597 else if (event == PTRACE_EVENT_VFORK)
598 event_lwp->waitstatus.set_vforked (ptid);
599
600 /* The status_pending field contains bits denoting the
601 extended event, so when the pending event is handled,
602 the handler will look at lwp->waitstatus. */
603 event_lwp->status_pending_p = 1;
604 event_lwp->status_pending = wstat;
605
606 /* Link the threads until the parent event is passed on to
607 higher layers. */
608 event_lwp->fork_relative = child_lwp;
609 child_lwp->fork_relative = event_lwp;
610
611 /* If the parent thread is doing step-over with single-step
612 breakpoints, the list of single-step breakpoints are cloned
613 from the parent's. Remove them from the child process.
614 In case of vfork, we'll reinsert them back once vforked
615 child is done. */
616 if (event_lwp->bp_reinsert != 0
617 && supports_software_single_step ())
618 {
619 /* The child process is forked and stopped, so it is safe
620 to access its memory without stopping all other threads
621 from other processes. */
622 delete_single_step_breakpoints (child_thr);
623
624 gdb_assert (has_single_step_breakpoints (event_thr));
625 gdb_assert (!has_single_step_breakpoints (child_thr));
626 }
627
628 /* Report the event. */
629 return 0;
630 }
631
632 threads_debug_printf
633 ("Got clone event from LWP %ld, new child is LWP %ld",
634 lwpid_of (event_thr), new_pid);
635
636 ptid = ptid_t (pid_of (event_thr), new_pid);
637 new_lwp = add_lwp (ptid);
638
639 /* Either we're going to immediately resume the new thread
640 or leave it stopped. resume_one_lwp is a nop if it
641 thinks the thread is currently running, so set this first
642 before calling resume_one_lwp. */
643 new_lwp->stopped = 1;
644
645 /* If we're suspending all threads, leave this one suspended
646 too. If the fork/clone parent is stepping over a breakpoint,
647 all other threads have been suspended already. Leave the
648 child suspended too. */
649 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
650 || event_lwp->bp_reinsert != 0)
651 new_lwp->suspended = 1;
652
653 /* Normally we will get the pending SIGSTOP. But in some cases
654 we might get another signal delivered to the group first.
655 If we do get another signal, be sure not to lose it. */
656 if (WSTOPSIG (status) != SIGSTOP)
657 {
658 new_lwp->stop_expected = 1;
659 new_lwp->status_pending_p = 1;
660 new_lwp->status_pending = status;
661 }
662 else if (cs.report_thread_events)
663 {
664 new_lwp->waitstatus.set_thread_created ();
665 new_lwp->status_pending_p = 1;
666 new_lwp->status_pending = status;
667 }
668
669 #ifdef USE_THREAD_DB
670 thread_db_notice_clone (event_thr, ptid);
671 #endif
672
673 /* Don't report the event. */
674 return 1;
675 }
676 else if (event == PTRACE_EVENT_VFORK_DONE)
677 {
678 event_lwp->waitstatus.set_vfork_done ();
679
680 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
681 {
682 reinsert_single_step_breakpoints (event_thr);
683
684 gdb_assert (has_single_step_breakpoints (event_thr));
685 }
686
687 /* Report the event. */
688 return 0;
689 }
690 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
691 {
692 struct process_info *proc;
693 std::vector<int> syscalls_to_catch;
694 ptid_t event_ptid;
695 pid_t event_pid;
696
697 threads_debug_printf ("Got exec event from LWP %ld",
698 lwpid_of (event_thr));
699
700 /* Get the event ptid. */
701 event_ptid = ptid_of (event_thr);
702 event_pid = event_ptid.pid ();
703
704 /* Save the syscall list from the execing process. */
705 proc = get_thread_process (event_thr);
706 syscalls_to_catch = std::move (proc->syscalls_to_catch);
707
708 /* Delete the execing process and all its threads. */
709 mourn (proc);
710 switch_to_thread (nullptr);
711
712 /* Create a new process/lwp/thread. */
713 proc = add_linux_process (event_pid, 0);
714 event_lwp = add_lwp (event_ptid);
715 event_thr = get_lwp_thread (event_lwp);
716 gdb_assert (current_thread == event_thr);
717 arch_setup_thread (event_thr);
718
719 /* Set the event status. */
720 event_lwp->waitstatus.set_execd
721 (make_unique_xstrdup
722 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
723
724 /* Mark the exec status as pending. */
725 event_lwp->stopped = 1;
726 event_lwp->status_pending_p = 1;
727 event_lwp->status_pending = wstat;
728 event_thr->last_resume_kind = resume_continue;
729 event_thr->last_status.set_ignore ();
730
731 /* Update syscall state in the new lwp, effectively mid-syscall too. */
732 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
733
734 /* Restore the list to catch. Don't rely on the client, which is free
735 to avoid sending a new list when the architecture doesn't change.
736 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
737 proc->syscalls_to_catch = std::move (syscalls_to_catch);
738
739 /* Report the event. */
740 *orig_event_lwp = event_lwp;
741 return 0;
742 }
743
744 internal_error (_("unknown ptrace event %d"), event);
745 }
746
747 CORE_ADDR
748 linux_process_target::get_pc (lwp_info *lwp)
749 {
750 process_info *proc = get_thread_process (get_lwp_thread (lwp));
751 gdb_assert (!proc->starting_up);
752
753 if (!low_supports_breakpoints ())
754 return 0;
755
756 scoped_restore_current_thread restore_thread;
757 switch_to_thread (get_lwp_thread (lwp));
758
759 struct regcache *regcache = get_thread_regcache (current_thread, 1);
760 CORE_ADDR pc = low_get_pc (regcache);
761
762 threads_debug_printf ("pc is 0x%lx", (long) pc);
763
764 return pc;
765 }
766
767 void
768 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
769 {
770 struct regcache *regcache;
771
772 scoped_restore_current_thread restore_thread;
773 switch_to_thread (get_lwp_thread (lwp));
774
775 regcache = get_thread_regcache (current_thread, 1);
776 low_get_syscall_trapinfo (regcache, sysno);
777
778 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
779 }
780
781 void
782 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
783 {
784 /* By default, report an unknown system call number. */
785 *sysno = UNKNOWN_SYSCALL;
786 }
787
788 bool
789 linux_process_target::save_stop_reason (lwp_info *lwp)
790 {
791 CORE_ADDR pc;
792 CORE_ADDR sw_breakpoint_pc;
793 #if USE_SIGTRAP_SIGINFO
794 siginfo_t siginfo;
795 #endif
796
797 if (!low_supports_breakpoints ())
798 return false;
799
800 process_info *proc = get_thread_process (get_lwp_thread (lwp));
801 if (proc->starting_up)
802 {
803 /* Claim we have the stop PC so that the caller doesn't try to
804 fetch it itself. */
805 return true;
806 }
807
808 pc = get_pc (lwp);
809 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
810
811 /* breakpoint_at reads from the current thread. */
812 scoped_restore_current_thread restore_thread;
813 switch_to_thread (get_lwp_thread (lwp));
814
815 #if USE_SIGTRAP_SIGINFO
816 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
817 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
818 {
819 if (siginfo.si_signo == SIGTRAP)
820 {
821 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
822 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
823 {
824 /* The si_code is ambiguous on this arch -- check debug
825 registers. */
826 if (!check_stopped_by_watchpoint (lwp))
827 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
828 }
829 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
830 {
831 /* If we determine the LWP stopped for a SW breakpoint,
832 trust it. Particularly don't check watchpoint
833 registers, because at least on s390, we'd find
834 stopped-by-watchpoint as long as there's a watchpoint
835 set. */
836 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
837 }
838 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
839 {
840 /* This can indicate either a hardware breakpoint or
841 hardware watchpoint. Check debug registers. */
842 if (!check_stopped_by_watchpoint (lwp))
843 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
844 }
845 else if (siginfo.si_code == TRAP_TRACE)
846 {
847 /* We may have single stepped an instruction that
848 triggered a watchpoint. In that case, on some
849 architectures (such as x86), instead of TRAP_HWBKPT,
850 si_code indicates TRAP_TRACE, and we need to check
851 the debug registers separately. */
852 if (!check_stopped_by_watchpoint (lwp))
853 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
854 }
855 }
856 }
857 #else
858 /* We may have just stepped a breakpoint instruction. E.g., in
859 non-stop mode, GDB first tells the thread A to step a range, and
860 then the user inserts a breakpoint inside the range. In that
861 case we need to report the breakpoint PC. */
862 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
863 && low_breakpoint_at (sw_breakpoint_pc))
864 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
865
866 if (hardware_breakpoint_inserted_here (pc))
867 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
868
869 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
870 check_stopped_by_watchpoint (lwp);
871 #endif
872
873 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
874 {
875 threads_debug_printf
876 ("%s stopped by software breakpoint",
877 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
878
879 /* Back up the PC if necessary. */
880 if (pc != sw_breakpoint_pc)
881 {
882 struct regcache *regcache
883 = get_thread_regcache (current_thread, 1);
884 low_set_pc (regcache, sw_breakpoint_pc);
885 }
886
887 /* Update this so we record the correct stop PC below. */
888 pc = sw_breakpoint_pc;
889 }
890 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
891 threads_debug_printf
892 ("%s stopped by hardware breakpoint",
893 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
894 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
895 threads_debug_printf
896 ("%s stopped by hardware watchpoint",
897 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
898 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
899 threads_debug_printf
900 ("%s stopped by trace",
901 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
902
903 lwp->stop_pc = pc;
904 return true;
905 }
906
907 lwp_info *
908 linux_process_target::add_lwp (ptid_t ptid)
909 {
910 lwp_info *lwp = new lwp_info;
911
912 lwp->thread = add_thread (ptid, lwp);
913
914 low_new_thread (lwp);
915
916 return lwp;
917 }
918
919 void
920 linux_process_target::low_new_thread (lwp_info *info)
921 {
922 /* Nop. */
923 }
924
925 /* Callback to be used when calling fork_inferior, responsible for
926 actually initiating the tracing of the inferior. */
927
928 static void
929 linux_ptrace_fun ()
930 {
931 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
932 (PTRACE_TYPE_ARG4) 0) < 0)
933 trace_start_error_with_name ("ptrace");
934
935 if (setpgid (0, 0) < 0)
936 trace_start_error_with_name ("setpgid");
937
938 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
939 stdout to stderr so that inferior i/o doesn't corrupt the connection.
940 Also, redirect stdin to /dev/null. */
941 if (remote_connection_is_stdio ())
942 {
943 if (close (0) < 0)
944 trace_start_error_with_name ("close");
945 if (open ("/dev/null", O_RDONLY) < 0)
946 trace_start_error_with_name ("open");
947 if (dup2 (2, 1) < 0)
948 trace_start_error_with_name ("dup2");
949 if (write (2, "stdin/stdout redirected\n",
950 sizeof ("stdin/stdout redirected\n") - 1) < 0)
951 {
952 /* Errors ignored. */;
953 }
954 }
955 }
956
957 /* Start an inferior process and returns its pid.
958 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
959 are its arguments. */
960
961 int
962 linux_process_target::create_inferior (const char *program,
963 const std::vector<char *> &program_args)
964 {
965 client_state &cs = get_client_state ();
966 struct lwp_info *new_lwp;
967 int pid;
968 ptid_t ptid;
969
970 {
971 maybe_disable_address_space_randomization restore_personality
972 (cs.disable_randomization);
973 std::string str_program_args = construct_inferior_arguments (program_args);
974
975 pid = fork_inferior (program,
976 str_program_args.c_str (),
977 get_environ ()->envp (), linux_ptrace_fun,
978 NULL, NULL, NULL, NULL);
979 }
980
981 /* When spawning a new process, we can't open the mem file yet. We
982 still have to nurse the process through the shell, and that execs
983 a couple times. The address space a /proc/PID/mem file is
984 accessing is destroyed on exec. */
985 process_info *proc = add_linux_process_no_mem_file (pid, 0);
986
987 ptid = ptid_t (pid, pid);
988 new_lwp = add_lwp (ptid);
989 new_lwp->must_set_ptrace_flags = 1;
990
991 post_fork_inferior (pid, program);
992
993 /* PROC is now past the shell running the program we want, so we can
994 open the /proc/PID/mem file. */
995 open_proc_mem_file (proc);
996
997 return pid;
998 }
999
1000 /* Implement the post_create_inferior target_ops method. */
1001
1002 void
1003 linux_process_target::post_create_inferior ()
1004 {
1005 struct lwp_info *lwp = get_thread_lwp (current_thread);
1006
1007 low_arch_setup ();
1008
1009 if (lwp->must_set_ptrace_flags)
1010 {
1011 struct process_info *proc = current_process ();
1012 int options = linux_low_ptrace_options (proc->attached);
1013
1014 linux_enable_event_reporting (lwpid_of (current_thread), options);
1015 lwp->must_set_ptrace_flags = 0;
1016 }
1017 }
1018
1019 int
1020 linux_process_target::attach_lwp (ptid_t ptid)
1021 {
1022 struct lwp_info *new_lwp;
1023 int lwpid = ptid.lwp ();
1024
1025 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1026 != 0)
1027 return errno;
1028
1029 new_lwp = add_lwp (ptid);
1030
1031 /* We need to wait for SIGSTOP before being able to make the next
1032 ptrace call on this LWP. */
1033 new_lwp->must_set_ptrace_flags = 1;
1034
1035 if (linux_proc_pid_is_stopped (lwpid))
1036 {
1037 threads_debug_printf ("Attached to a stopped process");
1038
1039 /* The process is definitely stopped. It is in a job control
1040 stop, unless the kernel predates the TASK_STOPPED /
1041 TASK_TRACED distinction, in which case it might be in a
1042 ptrace stop. Make sure it is in a ptrace stop; from there we
1043 can kill it, signal it, et cetera.
1044
1045 First make sure there is a pending SIGSTOP. Since we are
1046 already attached, the process can not transition from stopped
1047 to running without a PTRACE_CONT; so we know this signal will
1048 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1049 probably already in the queue (unless this kernel is old
1050 enough to use TASK_STOPPED for ptrace stops); but since
1051 SIGSTOP is not an RT signal, it can only be queued once. */
1052 kill_lwp (lwpid, SIGSTOP);
1053
1054 /* Finally, resume the stopped process. This will deliver the
1055 SIGSTOP (or a higher priority signal, just like normal
1056 PTRACE_ATTACH), which we'll catch later on. */
1057 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1058 }
1059
1060 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1061 brings it to a halt.
1062
1063 There are several cases to consider here:
1064
1065 1) gdbserver has already attached to the process and is being notified
1066 of a new thread that is being created.
1067 In this case we should ignore that SIGSTOP and resume the
1068 process. This is handled below by setting stop_expected = 1,
1069 and the fact that add_thread sets last_resume_kind ==
1070 resume_continue.
1071
1072 2) This is the first thread (the process thread), and we're attaching
1073 to it via attach_inferior.
1074 In this case we want the process thread to stop.
1075 This is handled by having linux_attach set last_resume_kind ==
1076 resume_stop after we return.
1077
1078 If the pid we are attaching to is also the tgid, we attach to and
1079 stop all the existing threads. Otherwise, we attach to pid and
1080 ignore any other threads in the same group as this pid.
1081
1082 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1083 existing threads.
1084 In this case we want the thread to stop.
1085 FIXME: This case is currently not properly handled.
1086 We should wait for the SIGSTOP but don't. Things work apparently
1087 because enough time passes between when we ptrace (ATTACH) and when
1088 gdb makes the next ptrace call on the thread.
1089
1090 On the other hand, if we are currently trying to stop all threads, we
1091 should treat the new thread as if we had sent it a SIGSTOP. This works
1092 because we are guaranteed that the add_lwp call above added us to the
1093 end of the list, and so the new thread has not yet reached
1094 wait_for_sigstop (but will). */
1095 new_lwp->stop_expected = 1;
1096
1097 return 0;
1098 }
1099
1100 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1101 already attached. Returns true if a new LWP is found, false
1102 otherwise. */
1103
1104 static int
1105 attach_proc_task_lwp_callback (ptid_t ptid)
1106 {
1107 /* Is this a new thread? */
1108 if (find_thread_ptid (ptid) == NULL)
1109 {
1110 int lwpid = ptid.lwp ();
1111 int err;
1112
1113 threads_debug_printf ("Found new lwp %d", lwpid);
1114
1115 err = the_linux_target->attach_lwp (ptid);
1116
1117 /* Be quiet if we simply raced with the thread exiting. EPERM
1118 is returned if the thread's task still exists, and is marked
1119 as exited or zombie, as well as other conditions, so in that
1120 case, confirm the status in /proc/PID/status. */
1121 if (err == ESRCH
1122 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1123 threads_debug_printf
1124 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1125 lwpid, err, safe_strerror (err));
1126 else if (err != 0)
1127 {
1128 std::string reason
1129 = linux_ptrace_attach_fail_reason_string (ptid, err);
1130
1131 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1132 }
1133
1134 return 1;
1135 }
1136 return 0;
1137 }
1138
1139 static void async_file_mark (void);
1140
1141 /* Attach to PID. If PID is the tgid, attach to it and all
1142 of its threads. */
1143
1144 int
1145 linux_process_target::attach (unsigned long pid)
1146 {
1147 struct process_info *proc;
1148 struct thread_info *initial_thread;
1149 ptid_t ptid = ptid_t (pid, pid);
1150 int err;
1151
1152 /* Delay opening the /proc/PID/mem file until we've successfully
1153 attached. */
1154 proc = add_linux_process_no_mem_file (pid, 1);
1155
1156 /* Attach to PID. We will check for other threads
1157 soon. */
1158 err = attach_lwp (ptid);
1159 if (err != 0)
1160 {
1161 this->remove_linux_process (proc);
1162
1163 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1164 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1165 }
1166
1167 open_proc_mem_file (proc);
1168
1169 /* Don't ignore the initial SIGSTOP if we just attached to this
1170 process. It will be collected by wait shortly. */
1171 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1172 initial_thread->last_resume_kind = resume_stop;
1173
1174 /* We must attach to every LWP. If /proc is mounted, use that to
1175 find them now. On the one hand, the inferior may be using raw
1176 clone instead of using pthreads. On the other hand, even if it
1177 is using pthreads, GDB may not be connected yet (thread_db needs
1178 to do symbol lookups, through qSymbol). Also, thread_db walks
1179 structures in the inferior's address space to find the list of
1180 threads/LWPs, and those structures may well be corrupted. Note
1181 that once thread_db is loaded, we'll still use it to list threads
1182 and associate pthread info with each LWP. */
1183 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1184
1185 /* GDB will shortly read the xml target description for this
1186 process, to figure out the process' architecture. But the target
1187 description is only filled in when the first process/thread in
1188 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1189 that now, otherwise, if GDB is fast enough, it could read the
1190 target description _before_ that initial stop. */
1191 if (non_stop)
1192 {
1193 struct lwp_info *lwp;
1194 int wstat, lwpid;
1195 ptid_t pid_ptid = ptid_t (pid);
1196
1197 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1198 gdb_assert (lwpid > 0);
1199
1200 lwp = find_lwp_pid (ptid_t (lwpid));
1201
1202 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1203 {
1204 lwp->status_pending_p = 1;
1205 lwp->status_pending = wstat;
1206 }
1207
1208 initial_thread->last_resume_kind = resume_continue;
1209
1210 async_file_mark ();
1211
1212 gdb_assert (proc->tdesc != NULL);
1213 }
1214
1215 return 0;
1216 }
1217
1218 static int
1219 last_thread_of_process_p (int pid)
1220 {
1221 bool seen_one = false;
1222
1223 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1224 {
1225 if (!seen_one)
1226 {
1227 /* This is the first thread of this process we see. */
1228 seen_one = true;
1229 return false;
1230 }
1231 else
1232 {
1233 /* This is the second thread of this process we see. */
1234 return true;
1235 }
1236 });
1237
1238 return thread == NULL;
1239 }
1240
1241 /* Kill LWP. */
1242
1243 static void
1244 linux_kill_one_lwp (struct lwp_info *lwp)
1245 {
1246 struct thread_info *thr = get_lwp_thread (lwp);
1247 int pid = lwpid_of (thr);
1248
1249 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1250 there is no signal context, and ptrace(PTRACE_KILL) (or
1251 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1252 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1253 alternative is to kill with SIGKILL. We only need one SIGKILL
1254 per process, not one for each thread. But since we still support
1255 support debugging programs using raw clone without CLONE_THREAD,
1256 we send one for each thread. For years, we used PTRACE_KILL
1257 only, so we're being a bit paranoid about some old kernels where
1258 PTRACE_KILL might work better (dubious if there are any such, but
1259 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1260 second, and so we're fine everywhere. */
1261
1262 errno = 0;
1263 kill_lwp (pid, SIGKILL);
1264 if (debug_threads)
1265 {
1266 int save_errno = errno;
1267
1268 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1269 target_pid_to_str (ptid_of (thr)).c_str (),
1270 save_errno ? safe_strerror (save_errno) : "OK");
1271 }
1272
1273 errno = 0;
1274 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1275 if (debug_threads)
1276 {
1277 int save_errno = errno;
1278
1279 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1280 target_pid_to_str (ptid_of (thr)).c_str (),
1281 save_errno ? safe_strerror (save_errno) : "OK");
1282 }
1283 }
1284
1285 /* Kill LWP and wait for it to die. */
1286
1287 static void
1288 kill_wait_lwp (struct lwp_info *lwp)
1289 {
1290 struct thread_info *thr = get_lwp_thread (lwp);
1291 int pid = ptid_of (thr).pid ();
1292 int lwpid = ptid_of (thr).lwp ();
1293 int wstat;
1294 int res;
1295
1296 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1297
1298 do
1299 {
1300 linux_kill_one_lwp (lwp);
1301
1302 /* Make sure it died. Notes:
1303
1304 - The loop is most likely unnecessary.
1305
1306 - We don't use wait_for_event as that could delete lwps
1307 while we're iterating over them. We're not interested in
1308 any pending status at this point, only in making sure all
1309 wait status on the kernel side are collected until the
1310 process is reaped.
1311
1312 - We don't use __WALL here as the __WALL emulation relies on
1313 SIGCHLD, and killing a stopped process doesn't generate
1314 one, nor an exit status.
1315 */
1316 res = my_waitpid (lwpid, &wstat, 0);
1317 if (res == -1 && errno == ECHILD)
1318 res = my_waitpid (lwpid, &wstat, __WCLONE);
1319 } while (res > 0 && WIFSTOPPED (wstat));
1320
1321 /* Even if it was stopped, the child may have already disappeared.
1322 E.g., if it was killed by SIGKILL. */
1323 if (res < 0 && errno != ECHILD)
1324 perror_with_name ("kill_wait_lwp");
1325 }
1326
1327 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1328 except the leader. */
1329
1330 static void
1331 kill_one_lwp_callback (thread_info *thread, int pid)
1332 {
1333 struct lwp_info *lwp = get_thread_lwp (thread);
1334
1335 /* We avoid killing the first thread here, because of a Linux kernel (at
1336 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1337 the children get a chance to be reaped, it will remain a zombie
1338 forever. */
1339
1340 if (lwpid_of (thread) == pid)
1341 {
1342 threads_debug_printf ("is last of process %s",
1343 target_pid_to_str (thread->id).c_str ());
1344 return;
1345 }
1346
1347 kill_wait_lwp (lwp);
1348 }
1349
1350 int
1351 linux_process_target::kill (process_info *process)
1352 {
1353 int pid = process->pid;
1354
1355 /* If we're killing a running inferior, make sure it is stopped
1356 first, as PTRACE_KILL will not work otherwise. */
1357 stop_all_lwps (0, NULL);
1358
1359 for_each_thread (pid, [&] (thread_info *thread)
1360 {
1361 kill_one_lwp_callback (thread, pid);
1362 });
1363
1364 /* See the comment in linux_kill_one_lwp. We did not kill the first
1365 thread in the list, so do so now. */
1366 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1367
1368 if (lwp == NULL)
1369 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1370 else
1371 kill_wait_lwp (lwp);
1372
1373 mourn (process);
1374
1375 /* Since we presently can only stop all lwps of all processes, we
1376 need to unstop lwps of other processes. */
1377 unstop_all_lwps (0, NULL);
1378 return 0;
1379 }
1380
1381 /* Get pending signal of THREAD, for detaching purposes. This is the
1382 signal the thread last stopped for, which we need to deliver to the
1383 thread when detaching, otherwise, it'd be suppressed/lost. */
1384
1385 static int
1386 get_detach_signal (struct thread_info *thread)
1387 {
1388 client_state &cs = get_client_state ();
1389 enum gdb_signal signo = GDB_SIGNAL_0;
1390 int status;
1391 struct lwp_info *lp = get_thread_lwp (thread);
1392
1393 if (lp->status_pending_p)
1394 status = lp->status_pending;
1395 else
1396 {
1397 /* If the thread had been suspended by gdbserver, and it stopped
1398 cleanly, then it'll have stopped with SIGSTOP. But we don't
1399 want to deliver that SIGSTOP. */
1400 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1401 || thread->last_status.sig () == GDB_SIGNAL_0)
1402 return 0;
1403
1404 /* Otherwise, we may need to deliver the signal we
1405 intercepted. */
1406 status = lp->last_status;
1407 }
1408
1409 if (!WIFSTOPPED (status))
1410 {
1411 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1412 target_pid_to_str (ptid_of (thread)).c_str ());
1413 return 0;
1414 }
1415
1416 /* Extended wait statuses aren't real SIGTRAPs. */
1417 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1418 {
1419 threads_debug_printf ("lwp %s had stopped with extended "
1420 "status: no pending signal",
1421 target_pid_to_str (ptid_of (thread)).c_str ());
1422 return 0;
1423 }
1424
1425 signo = gdb_signal_from_host (WSTOPSIG (status));
1426
1427 if (cs.program_signals_p && !cs.program_signals[signo])
1428 {
1429 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1430 target_pid_to_str (ptid_of (thread)).c_str (),
1431 gdb_signal_to_string (signo));
1432 return 0;
1433 }
1434 else if (!cs.program_signals_p
1435 /* If we have no way to know which signals GDB does not
1436 want to have passed to the program, assume
1437 SIGTRAP/SIGINT, which is GDB's default. */
1438 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1439 {
1440 threads_debug_printf ("lwp %s had signal %s, "
1441 "but we don't know if we should pass it. "
1442 "Default to not.",
1443 target_pid_to_str (ptid_of (thread)).c_str (),
1444 gdb_signal_to_string (signo));
1445 return 0;
1446 }
1447 else
1448 {
1449 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1450 target_pid_to_str (ptid_of (thread)).c_str (),
1451 gdb_signal_to_string (signo));
1452
1453 return WSTOPSIG (status);
1454 }
1455 }
1456
1457 void
1458 linux_process_target::detach_one_lwp (lwp_info *lwp)
1459 {
1460 struct thread_info *thread = get_lwp_thread (lwp);
1461 int sig;
1462 int lwpid;
1463
1464 /* If there is a pending SIGSTOP, get rid of it. */
1465 if (lwp->stop_expected)
1466 {
1467 threads_debug_printf ("Sending SIGCONT to %s",
1468 target_pid_to_str (ptid_of (thread)).c_str ());
1469
1470 kill_lwp (lwpid_of (thread), SIGCONT);
1471 lwp->stop_expected = 0;
1472 }
1473
1474 /* Pass on any pending signal for this thread. */
1475 sig = get_detach_signal (thread);
1476
1477 /* Preparing to resume may try to write registers, and fail if the
1478 lwp is zombie. If that happens, ignore the error. We'll handle
1479 it below, when detach fails with ESRCH. */
1480 try
1481 {
1482 /* Flush any pending changes to the process's registers. */
1483 regcache_invalidate_thread (thread);
1484
1485 /* Finally, let it resume. */
1486 low_prepare_to_resume (lwp);
1487 }
1488 catch (const gdb_exception_error &ex)
1489 {
1490 if (!check_ptrace_stopped_lwp_gone (lwp))
1491 throw;
1492 }
1493
1494 lwpid = lwpid_of (thread);
1495 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1496 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1497 {
1498 int save_errno = errno;
1499
1500 /* We know the thread exists, so ESRCH must mean the lwp is
1501 zombie. This can happen if one of the already-detached
1502 threads exits the whole thread group. In that case we're
1503 still attached, and must reap the lwp. */
1504 if (save_errno == ESRCH)
1505 {
1506 int ret, status;
1507
1508 ret = my_waitpid (lwpid, &status, __WALL);
1509 if (ret == -1)
1510 {
1511 warning (_("Couldn't reap LWP %d while detaching: %s"),
1512 lwpid, safe_strerror (errno));
1513 }
1514 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1515 {
1516 warning (_("Reaping LWP %d while detaching "
1517 "returned unexpected status 0x%x"),
1518 lwpid, status);
1519 }
1520 }
1521 else
1522 {
1523 error (_("Can't detach %s: %s"),
1524 target_pid_to_str (ptid_of (thread)).c_str (),
1525 safe_strerror (save_errno));
1526 }
1527 }
1528 else
1529 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1530 target_pid_to_str (ptid_of (thread)).c_str (),
1531 strsignal (sig));
1532
1533 delete_lwp (lwp);
1534 }
1535
1536 int
1537 linux_process_target::detach (process_info *process)
1538 {
1539 struct lwp_info *main_lwp;
1540
1541 /* As there's a step over already in progress, let it finish first,
1542 otherwise nesting a stabilize_threads operation on top gets real
1543 messy. */
1544 complete_ongoing_step_over ();
1545
1546 /* Stop all threads before detaching. First, ptrace requires that
1547 the thread is stopped to successfully detach. Second, thread_db
1548 may need to uninstall thread event breakpoints from memory, which
1549 only works with a stopped process anyway. */
1550 stop_all_lwps (0, NULL);
1551
1552 #ifdef USE_THREAD_DB
1553 thread_db_detach (process);
1554 #endif
1555
1556 /* Stabilize threads (move out of jump pads). */
1557 target_stabilize_threads ();
1558
1559 /* Detach from the clone lwps first. If the thread group exits just
1560 while we're detaching, we must reap the clone lwps before we're
1561 able to reap the leader. */
1562 for_each_thread (process->pid, [this] (thread_info *thread)
1563 {
1564 /* We don't actually detach from the thread group leader just yet.
1565 If the thread group exits, we must reap the zombie clone lwps
1566 before we're able to reap the leader. */
1567 if (thread->id.pid () == thread->id.lwp ())
1568 return;
1569
1570 lwp_info *lwp = get_thread_lwp (thread);
1571 detach_one_lwp (lwp);
1572 });
1573
1574 main_lwp = find_lwp_pid (ptid_t (process->pid));
1575 detach_one_lwp (main_lwp);
1576
1577 mourn (process);
1578
1579 /* Since we presently can only stop all lwps of all processes, we
1580 need to unstop lwps of other processes. */
1581 unstop_all_lwps (0, NULL);
1582 return 0;
1583 }
1584
1585 /* Remove all LWPs that belong to process PROC from the lwp list. */
1586
1587 void
1588 linux_process_target::mourn (process_info *process)
1589 {
1590 #ifdef USE_THREAD_DB
1591 thread_db_mourn (process);
1592 #endif
1593
1594 for_each_thread (process->pid, [this] (thread_info *thread)
1595 {
1596 delete_lwp (get_thread_lwp (thread));
1597 });
1598
1599 this->remove_linux_process (process);
1600 }
1601
1602 void
1603 linux_process_target::join (int pid)
1604 {
1605 int status, ret;
1606
1607 do {
1608 ret = my_waitpid (pid, &status, 0);
1609 if (WIFEXITED (status) || WIFSIGNALED (status))
1610 break;
1611 } while (ret != -1 || errno != ECHILD);
1612 }
1613
1614 /* Return true if the given thread is still alive. */
1615
1616 bool
1617 linux_process_target::thread_alive (ptid_t ptid)
1618 {
1619 struct lwp_info *lwp = find_lwp_pid (ptid);
1620
1621 /* We assume we always know if a thread exits. If a whole process
1622 exited but we still haven't been able to report it to GDB, we'll
1623 hold on to the last lwp of the dead process. */
1624 if (lwp != NULL)
1625 return !lwp_is_marked_dead (lwp);
1626 else
1627 return 0;
1628 }
1629
1630 bool
1631 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1632 {
1633 struct lwp_info *lp = get_thread_lwp (thread);
1634
1635 if (!lp->status_pending_p)
1636 return 0;
1637
1638 if (thread->last_resume_kind != resume_stop
1639 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1640 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1641 {
1642 CORE_ADDR pc;
1643 int discard = 0;
1644
1645 gdb_assert (lp->last_status != 0);
1646
1647 pc = get_pc (lp);
1648
1649 scoped_restore_current_thread restore_thread;
1650 switch_to_thread (thread);
1651
1652 if (pc != lp->stop_pc)
1653 {
1654 threads_debug_printf ("PC of %ld changed",
1655 lwpid_of (thread));
1656 discard = 1;
1657 }
1658
1659 #if !USE_SIGTRAP_SIGINFO
1660 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1661 && !low_breakpoint_at (pc))
1662 {
1663 threads_debug_printf ("previous SW breakpoint of %ld gone",
1664 lwpid_of (thread));
1665 discard = 1;
1666 }
1667 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1668 && !hardware_breakpoint_inserted_here (pc))
1669 {
1670 threads_debug_printf ("previous HW breakpoint of %ld gone",
1671 lwpid_of (thread));
1672 discard = 1;
1673 }
1674 #endif
1675
1676 if (discard)
1677 {
1678 threads_debug_printf ("discarding pending breakpoint status");
1679 lp->status_pending_p = 0;
1680 return 0;
1681 }
1682 }
1683
1684 return 1;
1685 }
1686
1687 /* Returns true if LWP is resumed from the client's perspective. */
1688
1689 static int
1690 lwp_resumed (struct lwp_info *lwp)
1691 {
1692 struct thread_info *thread = get_lwp_thread (lwp);
1693
1694 if (thread->last_resume_kind != resume_stop)
1695 return 1;
1696
1697 /* Did gdb send us a `vCont;t', but we haven't reported the
1698 corresponding stop to gdb yet? If so, the thread is still
1699 resumed/running from gdb's perspective. */
1700 if (thread->last_resume_kind == resume_stop
1701 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1702 return 1;
1703
1704 return 0;
1705 }
1706
1707 bool
1708 linux_process_target::status_pending_p_callback (thread_info *thread,
1709 ptid_t ptid)
1710 {
1711 struct lwp_info *lp = get_thread_lwp (thread);
1712
1713 /* Check if we're only interested in events from a specific process
1714 or a specific LWP. */
1715 if (!thread->id.matches (ptid))
1716 return 0;
1717
1718 if (!lwp_resumed (lp))
1719 return 0;
1720
1721 if (lp->status_pending_p
1722 && !thread_still_has_status_pending (thread))
1723 {
1724 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1725 return 0;
1726 }
1727
1728 return lp->status_pending_p;
1729 }
1730
1731 struct lwp_info *
1732 find_lwp_pid (ptid_t ptid)
1733 {
1734 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1735 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1736 {
1737 return thr_arg->id.lwp () == lwp;
1738 });
1739
1740 if (thread == NULL)
1741 return NULL;
1742
1743 return get_thread_lwp (thread);
1744 }
1745
1746 /* Return the number of known LWPs in the tgid given by PID. */
1747
1748 static int
1749 num_lwps (int pid)
1750 {
1751 int count = 0;
1752
1753 for_each_thread (pid, [&] (thread_info *thread)
1754 {
1755 count++;
1756 });
1757
1758 return count;
1759 }
1760
1761 /* See nat/linux-nat.h. */
1762
1763 struct lwp_info *
1764 iterate_over_lwps (ptid_t filter,
1765 gdb::function_view<iterate_over_lwps_ftype> callback)
1766 {
1767 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1768 {
1769 lwp_info *lwp = get_thread_lwp (thr_arg);
1770
1771 return callback (lwp);
1772 });
1773
1774 if (thread == NULL)
1775 return NULL;
1776
1777 return get_thread_lwp (thread);
1778 }
1779
1780 void
1781 linux_process_target::check_zombie_leaders ()
1782 {
1783 for_each_process ([this] (process_info *proc)
1784 {
1785 pid_t leader_pid = pid_of (proc);
1786 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1787
1788 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1789 "num_lwps=%d, zombie=%d",
1790 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1791 linux_proc_pid_is_zombie (leader_pid));
1792
1793 if (leader_lp != NULL && !leader_lp->stopped
1794 /* Check if there are other threads in the group, as we may
1795 have raced with the inferior simply exiting. Note this
1796 isn't a watertight check. If the inferior is
1797 multi-threaded and is exiting, it may be we see the
1798 leader as zombie before we reap all the non-leader
1799 threads. See comments below. */
1800 && !last_thread_of_process_p (leader_pid)
1801 && linux_proc_pid_is_zombie (leader_pid))
1802 {
1803 /* A zombie leader in a multi-threaded program can mean one
1804 of three things:
1805
1806 #1 - Only the leader exited, not the whole program, e.g.,
1807 with pthread_exit. Since we can't reap the leader's exit
1808 status until all other threads are gone and reaped too,
1809 we want to delete the zombie leader right away, as it
1810 can't be debugged, we can't read its registers, etc.
1811 This is the main reason we check for zombie leaders
1812 disappearing.
1813
1814 #2 - The whole thread-group/process exited (a group exit,
1815 via e.g. exit(3), and there is (or will be shortly) an
1816 exit reported for each thread in the process, and then
1817 finally an exit for the leader once the non-leaders are
1818 reaped.
1819
1820 #3 - There are 3 or more threads in the group, and a
1821 thread other than the leader exec'd. See comments on
1822 exec events at the top of the file.
1823
1824 Ideally we would never delete the leader for case #2.
1825 Instead, we want to collect the exit status of each
1826 non-leader thread, and then finally collect the exit
1827 status of the leader as normal and use its exit code as
1828 whole-process exit code. Unfortunately, there's no
1829 race-free way to distinguish cases #1 and #2. We can't
1830 assume the exit events for the non-leaders threads are
1831 already pending in the kernel, nor can we assume the
1832 non-leader threads are in zombie state already. Between
1833 the leader becoming zombie and the non-leaders exiting
1834 and becoming zombie themselves, there's a small time
1835 window, so such a check would be racy. Temporarily
1836 pausing all threads and checking to see if all threads
1837 exit or not before re-resuming them would work in the
1838 case that all threads are running right now, but it
1839 wouldn't work if some thread is currently already
1840 ptrace-stopped, e.g., due to scheduler-locking.
1841
1842 So what we do is we delete the leader anyhow, and then
1843 later on when we see its exit status, we re-add it back.
1844 We also make sure that we only report a whole-process
1845 exit when we see the leader exiting, as opposed to when
1846 the last LWP in the LWP list exits, which can be a
1847 non-leader if we deleted the leader here. */
1848 threads_debug_printf ("Thread group leader %d zombie "
1849 "(it exited, or another thread execd), "
1850 "deleting it.",
1851 leader_pid);
1852 delete_lwp (leader_lp);
1853 }
1854 });
1855 }
1856
1857 /* Callback for `find_thread'. Returns the first LWP that is not
1858 stopped. */
1859
1860 static bool
1861 not_stopped_callback (thread_info *thread, ptid_t filter)
1862 {
1863 if (!thread->id.matches (filter))
1864 return false;
1865
1866 lwp_info *lwp = get_thread_lwp (thread);
1867
1868 return !lwp->stopped;
1869 }
1870
1871 /* Increment LWP's suspend count. */
1872
1873 static void
1874 lwp_suspended_inc (struct lwp_info *lwp)
1875 {
1876 lwp->suspended++;
1877
1878 if (lwp->suspended > 4)
1879 threads_debug_printf
1880 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1881 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1882 }
1883
1884 /* Decrement LWP's suspend count. */
1885
1886 static void
1887 lwp_suspended_decr (struct lwp_info *lwp)
1888 {
1889 lwp->suspended--;
1890
1891 if (lwp->suspended < 0)
1892 {
1893 struct thread_info *thread = get_lwp_thread (lwp);
1894
1895 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1896 lwp->suspended);
1897 }
1898 }
1899
1900 /* This function should only be called if the LWP got a SIGTRAP.
1901
1902 Handle any tracepoint steps or hits. Return true if a tracepoint
1903 event was handled, 0 otherwise. */
1904
1905 static int
1906 handle_tracepoints (struct lwp_info *lwp)
1907 {
1908 struct thread_info *tinfo = get_lwp_thread (lwp);
1909 int tpoint_related_event = 0;
1910
1911 gdb_assert (lwp->suspended == 0);
1912
1913 /* If this tracepoint hit causes a tracing stop, we'll immediately
1914 uninsert tracepoints. To do this, we temporarily pause all
1915 threads, unpatch away, and then unpause threads. We need to make
1916 sure the unpausing doesn't resume LWP too. */
1917 lwp_suspended_inc (lwp);
1918
1919 /* And we need to be sure that any all-threads-stopping doesn't try
1920 to move threads out of the jump pads, as it could deadlock the
1921 inferior (LWP could be in the jump pad, maybe even holding the
1922 lock.) */
1923
1924 /* Do any necessary step collect actions. */
1925 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1926
1927 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1928
1929 /* See if we just hit a tracepoint and do its main collect
1930 actions. */
1931 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1932
1933 lwp_suspended_decr (lwp);
1934
1935 gdb_assert (lwp->suspended == 0);
1936 gdb_assert (!stabilizing_threads
1937 || (lwp->collecting_fast_tracepoint
1938 != fast_tpoint_collect_result::not_collecting));
1939
1940 if (tpoint_related_event)
1941 {
1942 threads_debug_printf ("got a tracepoint event");
1943 return 1;
1944 }
1945
1946 return 0;
1947 }
1948
1949 fast_tpoint_collect_result
1950 linux_process_target::linux_fast_tracepoint_collecting
1951 (lwp_info *lwp, fast_tpoint_collect_status *status)
1952 {
1953 CORE_ADDR thread_area;
1954 struct thread_info *thread = get_lwp_thread (lwp);
1955
1956 /* Get the thread area address. This is used to recognize which
1957 thread is which when tracing with the in-process agent library.
1958 We don't read anything from the address, and treat it as opaque;
1959 it's the address itself that we assume is unique per-thread. */
1960 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1961 return fast_tpoint_collect_result::not_collecting;
1962
1963 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1964 }
1965
1966 int
1967 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1968 {
1969 return -1;
1970 }
1971
1972 bool
1973 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1974 {
1975 scoped_restore_current_thread restore_thread;
1976 switch_to_thread (get_lwp_thread (lwp));
1977
1978 if ((wstat == NULL
1979 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1980 && supports_fast_tracepoints ()
1981 && agent_loaded_p ())
1982 {
1983 struct fast_tpoint_collect_status status;
1984
1985 threads_debug_printf
1986 ("Checking whether LWP %ld needs to move out of the jump pad.",
1987 lwpid_of (current_thread));
1988
1989 fast_tpoint_collect_result r
1990 = linux_fast_tracepoint_collecting (lwp, &status);
1991
1992 if (wstat == NULL
1993 || (WSTOPSIG (*wstat) != SIGILL
1994 && WSTOPSIG (*wstat) != SIGFPE
1995 && WSTOPSIG (*wstat) != SIGSEGV
1996 && WSTOPSIG (*wstat) != SIGBUS))
1997 {
1998 lwp->collecting_fast_tracepoint = r;
1999
2000 if (r != fast_tpoint_collect_result::not_collecting)
2001 {
2002 if (r == fast_tpoint_collect_result::before_insn
2003 && lwp->exit_jump_pad_bkpt == NULL)
2004 {
2005 /* Haven't executed the original instruction yet.
2006 Set breakpoint there, and wait till it's hit,
2007 then single-step until exiting the jump pad. */
2008 lwp->exit_jump_pad_bkpt
2009 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2010 }
2011
2012 threads_debug_printf
2013 ("Checking whether LWP %ld needs to move out of the jump pad..."
2014 " it does", lwpid_of (current_thread));
2015
2016 return true;
2017 }
2018 }
2019 else
2020 {
2021 /* If we get a synchronous signal while collecting, *and*
2022 while executing the (relocated) original instruction,
2023 reset the PC to point at the tpoint address, before
2024 reporting to GDB. Otherwise, it's an IPA lib bug: just
2025 report the signal to GDB, and pray for the best. */
2026
2027 lwp->collecting_fast_tracepoint
2028 = fast_tpoint_collect_result::not_collecting;
2029
2030 if (r != fast_tpoint_collect_result::not_collecting
2031 && (status.adjusted_insn_addr <= lwp->stop_pc
2032 && lwp->stop_pc < status.adjusted_insn_addr_end))
2033 {
2034 siginfo_t info;
2035 struct regcache *regcache;
2036
2037 /* The si_addr on a few signals references the address
2038 of the faulting instruction. Adjust that as
2039 well. */
2040 if ((WSTOPSIG (*wstat) == SIGILL
2041 || WSTOPSIG (*wstat) == SIGFPE
2042 || WSTOPSIG (*wstat) == SIGBUS
2043 || WSTOPSIG (*wstat) == SIGSEGV)
2044 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2045 (PTRACE_TYPE_ARG3) 0, &info) == 0
2046 /* Final check just to make sure we don't clobber
2047 the siginfo of non-kernel-sent signals. */
2048 && (uintptr_t) info.si_addr == lwp->stop_pc)
2049 {
2050 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2051 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2052 (PTRACE_TYPE_ARG3) 0, &info);
2053 }
2054
2055 regcache = get_thread_regcache (current_thread, 1);
2056 low_set_pc (regcache, status.tpoint_addr);
2057 lwp->stop_pc = status.tpoint_addr;
2058
2059 /* Cancel any fast tracepoint lock this thread was
2060 holding. */
2061 force_unlock_trace_buffer ();
2062 }
2063
2064 if (lwp->exit_jump_pad_bkpt != NULL)
2065 {
2066 threads_debug_printf
2067 ("Cancelling fast exit-jump-pad: removing bkpt."
2068 "stopping all threads momentarily.");
2069
2070 stop_all_lwps (1, lwp);
2071
2072 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2073 lwp->exit_jump_pad_bkpt = NULL;
2074
2075 unstop_all_lwps (1, lwp);
2076
2077 gdb_assert (lwp->suspended >= 0);
2078 }
2079 }
2080 }
2081
2082 threads_debug_printf
2083 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2084 lwpid_of (current_thread));
2085
2086 return false;
2087 }
2088
2089 /* Enqueue one signal in the "signals to report later when out of the
2090 jump pad" list. */
2091
2092 static void
2093 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2094 {
2095 struct thread_info *thread = get_lwp_thread (lwp);
2096
2097 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2098 WSTOPSIG (*wstat), lwpid_of (thread));
2099
2100 if (debug_threads)
2101 {
2102 for (const auto &sig : lwp->pending_signals_to_report)
2103 threads_debug_printf (" Already queued %d", sig.signal);
2104
2105 threads_debug_printf (" (no more currently queued signals)");
2106 }
2107
2108 /* Don't enqueue non-RT signals if they are already in the deferred
2109 queue. (SIGSTOP being the easiest signal to see ending up here
2110 twice) */
2111 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2112 {
2113 for (const auto &sig : lwp->pending_signals_to_report)
2114 {
2115 if (sig.signal == WSTOPSIG (*wstat))
2116 {
2117 threads_debug_printf
2118 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2119 sig.signal, lwpid_of (thread));
2120 return;
2121 }
2122 }
2123 }
2124
2125 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2126
2127 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2128 &lwp->pending_signals_to_report.back ().info);
2129 }
2130
2131 /* Dequeue one signal from the "signals to report later when out of
2132 the jump pad" list. */
2133
2134 static int
2135 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2136 {
2137 struct thread_info *thread = get_lwp_thread (lwp);
2138
2139 if (!lwp->pending_signals_to_report.empty ())
2140 {
2141 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2142
2143 *wstat = W_STOPCODE (p_sig.signal);
2144 if (p_sig.info.si_signo != 0)
2145 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2146 &p_sig.info);
2147
2148 lwp->pending_signals_to_report.pop_front ();
2149
2150 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2151 WSTOPSIG (*wstat), lwpid_of (thread));
2152
2153 if (debug_threads)
2154 {
2155 for (const auto &sig : lwp->pending_signals_to_report)
2156 threads_debug_printf (" Still queued %d", sig.signal);
2157
2158 threads_debug_printf (" (no more queued signals)");
2159 }
2160
2161 return 1;
2162 }
2163
2164 return 0;
2165 }
2166
2167 bool
2168 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2169 {
2170 scoped_restore_current_thread restore_thread;
2171 switch_to_thread (get_lwp_thread (child));
2172
2173 if (low_stopped_by_watchpoint ())
2174 {
2175 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2176 child->stopped_data_address = low_stopped_data_address ();
2177 }
2178
2179 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2180 }
2181
2182 bool
2183 linux_process_target::low_stopped_by_watchpoint ()
2184 {
2185 return false;
2186 }
2187
2188 CORE_ADDR
2189 linux_process_target::low_stopped_data_address ()
2190 {
2191 return 0;
2192 }
2193
2194 /* Return the ptrace options that we want to try to enable. */
2195
2196 static int
2197 linux_low_ptrace_options (int attached)
2198 {
2199 client_state &cs = get_client_state ();
2200 int options = 0;
2201
2202 if (!attached)
2203 options |= PTRACE_O_EXITKILL;
2204
2205 if (cs.report_fork_events)
2206 options |= PTRACE_O_TRACEFORK;
2207
2208 if (cs.report_vfork_events)
2209 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2210
2211 if (cs.report_exec_events)
2212 options |= PTRACE_O_TRACEEXEC;
2213
2214 options |= PTRACE_O_TRACESYSGOOD;
2215
2216 return options;
2217 }
2218
2219 void
2220 linux_process_target::filter_event (int lwpid, int wstat)
2221 {
2222 client_state &cs = get_client_state ();
2223 struct lwp_info *child;
2224 struct thread_info *thread;
2225 int have_stop_pc = 0;
2226
2227 child = find_lwp_pid (ptid_t (lwpid));
2228
2229 /* Check for events reported by anything not in our LWP list. */
2230 if (child == nullptr)
2231 {
2232 if (WIFSTOPPED (wstat))
2233 {
2234 if (WSTOPSIG (wstat) == SIGTRAP
2235 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2236 {
2237 /* A non-leader thread exec'ed after we've seen the
2238 leader zombie, and removed it from our lists (in
2239 check_zombie_leaders). The non-leader thread changes
2240 its tid to the tgid. */
2241 threads_debug_printf
2242 ("Re-adding thread group leader LWP %d after exec.",
2243 lwpid);
2244
2245 child = add_lwp (ptid_t (lwpid, lwpid));
2246 child->stopped = 1;
2247 switch_to_thread (child->thread);
2248 }
2249 else
2250 {
2251 /* A process we are controlling has forked and the new
2252 child's stop was reported to us by the kernel. Save
2253 its PID and go back to waiting for the fork event to
2254 be reported - the stopped process might be returned
2255 from waitpid before or after the fork event is. */
2256 threads_debug_printf
2257 ("Saving LWP %d status %s in stopped_pids list",
2258 lwpid, status_to_str (wstat).c_str ());
2259 add_to_pid_list (&stopped_pids, lwpid, wstat);
2260 }
2261 }
2262 else
2263 {
2264 /* Don't report an event for the exit of an LWP not in our
2265 list, i.e. not part of any inferior we're debugging.
2266 This can happen if we detach from a program we originally
2267 forked and then it exits. However, note that we may have
2268 earlier deleted a leader of an inferior we're debugging,
2269 in check_zombie_leaders. Re-add it back here if so. */
2270 find_process ([&] (process_info *proc)
2271 {
2272 if (proc->pid == lwpid)
2273 {
2274 threads_debug_printf
2275 ("Re-adding thread group leader LWP %d after exit.",
2276 lwpid);
2277
2278 child = add_lwp (ptid_t (lwpid, lwpid));
2279 return true;
2280 }
2281 return false;
2282 });
2283 }
2284
2285 if (child == nullptr)
2286 return;
2287 }
2288
2289 thread = get_lwp_thread (child);
2290
2291 child->stopped = 1;
2292
2293 child->last_status = wstat;
2294
2295 /* Check if the thread has exited. */
2296 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2297 {
2298 threads_debug_printf ("%d exited", lwpid);
2299
2300 if (finish_step_over (child))
2301 {
2302 /* Unsuspend all other LWPs, and set them back running again. */
2303 unsuspend_all_lwps (child);
2304 }
2305
2306 /* If this is not the leader LWP, then the exit signal was not
2307 the end of the debugged application and should be ignored,
2308 unless GDB wants to hear about thread exits. */
2309 if (cs.report_thread_events || is_leader (thread))
2310 {
2311 /* Since events are serialized to GDB core, and we can't
2312 report this one right now. Leave the status pending for
2313 the next time we're able to report it. */
2314 mark_lwp_dead (child, wstat);
2315 return;
2316 }
2317 else
2318 {
2319 delete_lwp (child);
2320 return;
2321 }
2322 }
2323
2324 gdb_assert (WIFSTOPPED (wstat));
2325
2326 if (WIFSTOPPED (wstat))
2327 {
2328 struct process_info *proc;
2329
2330 /* Architecture-specific setup after inferior is running. */
2331 proc = find_process_pid (pid_of (thread));
2332 if (proc->tdesc == NULL)
2333 {
2334 if (proc->attached)
2335 {
2336 /* This needs to happen after we have attached to the
2337 inferior and it is stopped for the first time, but
2338 before we access any inferior registers. */
2339 arch_setup_thread (thread);
2340 }
2341 else
2342 {
2343 /* The process is started, but GDBserver will do
2344 architecture-specific setup after the program stops at
2345 the first instruction. */
2346 child->status_pending_p = 1;
2347 child->status_pending = wstat;
2348 return;
2349 }
2350 }
2351 }
2352
2353 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2354 {
2355 struct process_info *proc = find_process_pid (pid_of (thread));
2356 int options = linux_low_ptrace_options (proc->attached);
2357
2358 linux_enable_event_reporting (lwpid, options);
2359 child->must_set_ptrace_flags = 0;
2360 }
2361
2362 /* Always update syscall_state, even if it will be filtered later. */
2363 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2364 {
2365 child->syscall_state
2366 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2367 ? TARGET_WAITKIND_SYSCALL_RETURN
2368 : TARGET_WAITKIND_SYSCALL_ENTRY);
2369 }
2370 else
2371 {
2372 /* Almost all other ptrace-stops are known to be outside of system
2373 calls, with further exceptions in handle_extended_wait. */
2374 child->syscall_state = TARGET_WAITKIND_IGNORE;
2375 }
2376
2377 /* Be careful to not overwrite stop_pc until save_stop_reason is
2378 called. */
2379 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2380 && linux_is_extended_waitstatus (wstat))
2381 {
2382 child->stop_pc = get_pc (child);
2383 if (handle_extended_wait (&child, wstat))
2384 {
2385 /* The event has been handled, so just return without
2386 reporting it. */
2387 return;
2388 }
2389 }
2390
2391 if (linux_wstatus_maybe_breakpoint (wstat))
2392 {
2393 if (save_stop_reason (child))
2394 have_stop_pc = 1;
2395 }
2396
2397 if (!have_stop_pc)
2398 child->stop_pc = get_pc (child);
2399
2400 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2401 && child->stop_expected)
2402 {
2403 threads_debug_printf ("Expected stop.");
2404
2405 child->stop_expected = 0;
2406
2407 if (thread->last_resume_kind == resume_stop)
2408 {
2409 /* We want to report the stop to the core. Treat the
2410 SIGSTOP as a normal event. */
2411 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2412 target_pid_to_str (ptid_of (thread)).c_str ());
2413 }
2414 else if (stopping_threads != NOT_STOPPING_THREADS)
2415 {
2416 /* Stopping threads. We don't want this SIGSTOP to end up
2417 pending. */
2418 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2419 target_pid_to_str (ptid_of (thread)).c_str ());
2420 return;
2421 }
2422 else
2423 {
2424 /* This is a delayed SIGSTOP. Filter out the event. */
2425 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2426 child->stepping ? "step" : "continue",
2427 target_pid_to_str (ptid_of (thread)).c_str ());
2428
2429 resume_one_lwp (child, child->stepping, 0, NULL);
2430 return;
2431 }
2432 }
2433
2434 child->status_pending_p = 1;
2435 child->status_pending = wstat;
2436 return;
2437 }
2438
2439 bool
2440 linux_process_target::maybe_hw_step (thread_info *thread)
2441 {
2442 if (supports_hardware_single_step ())
2443 return true;
2444 else
2445 {
2446 /* GDBserver must insert single-step breakpoint for software
2447 single step. */
2448 gdb_assert (has_single_step_breakpoints (thread));
2449 return false;
2450 }
2451 }
2452
2453 void
2454 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2455 {
2456 struct lwp_info *lp = get_thread_lwp (thread);
2457
2458 if (lp->stopped
2459 && !lp->suspended
2460 && !lp->status_pending_p
2461 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2462 {
2463 int step = 0;
2464
2465 if (thread->last_resume_kind == resume_step)
2466 {
2467 if (supports_software_single_step ())
2468 install_software_single_step_breakpoints (lp);
2469
2470 step = maybe_hw_step (thread);
2471 }
2472
2473 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2474 target_pid_to_str (ptid_of (thread)).c_str (),
2475 paddress (lp->stop_pc), step);
2476
2477 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2478 }
2479 }
2480
2481 int
2482 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2483 ptid_t filter_ptid,
2484 int *wstatp, int options)
2485 {
2486 struct thread_info *event_thread;
2487 struct lwp_info *event_child, *requested_child;
2488 sigset_t block_mask, prev_mask;
2489
2490 retry:
2491 /* N.B. event_thread points to the thread_info struct that contains
2492 event_child. Keep them in sync. */
2493 event_thread = NULL;
2494 event_child = NULL;
2495 requested_child = NULL;
2496
2497 /* Check for a lwp with a pending status. */
2498
2499 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2500 {
2501 event_thread = find_thread_in_random ([&] (thread_info *thread)
2502 {
2503 return status_pending_p_callback (thread, filter_ptid);
2504 });
2505
2506 if (event_thread != NULL)
2507 {
2508 event_child = get_thread_lwp (event_thread);
2509 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2510 }
2511 }
2512 else if (filter_ptid != null_ptid)
2513 {
2514 requested_child = find_lwp_pid (filter_ptid);
2515
2516 if (stopping_threads == NOT_STOPPING_THREADS
2517 && requested_child->status_pending_p
2518 && (requested_child->collecting_fast_tracepoint
2519 != fast_tpoint_collect_result::not_collecting))
2520 {
2521 enqueue_one_deferred_signal (requested_child,
2522 &requested_child->status_pending);
2523 requested_child->status_pending_p = 0;
2524 requested_child->status_pending = 0;
2525 resume_one_lwp (requested_child, 0, 0, NULL);
2526 }
2527
2528 if (requested_child->suspended
2529 && requested_child->status_pending_p)
2530 {
2531 internal_error ("requesting an event out of a"
2532 " suspended child?");
2533 }
2534
2535 if (requested_child->status_pending_p)
2536 {
2537 event_child = requested_child;
2538 event_thread = get_lwp_thread (event_child);
2539 }
2540 }
2541
2542 if (event_child != NULL)
2543 {
2544 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2545 lwpid_of (event_thread),
2546 event_child->status_pending);
2547
2548 *wstatp = event_child->status_pending;
2549 event_child->status_pending_p = 0;
2550 event_child->status_pending = 0;
2551 switch_to_thread (event_thread);
2552 return lwpid_of (event_thread);
2553 }
2554
2555 /* But if we don't find a pending event, we'll have to wait.
2556
2557 We only enter this loop if no process has a pending wait status.
2558 Thus any action taken in response to a wait status inside this
2559 loop is responding as soon as we detect the status, not after any
2560 pending events. */
2561
2562 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2563 all signals while here. */
2564 sigfillset (&block_mask);
2565 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2566
2567 /* Always pull all events out of the kernel. We'll randomly select
2568 an event LWP out of all that have events, to prevent
2569 starvation. */
2570 while (event_child == NULL)
2571 {
2572 pid_t ret = 0;
2573
2574 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2575 quirks:
2576
2577 - If the thread group leader exits while other threads in the
2578 thread group still exist, waitpid(TGID, ...) hangs. That
2579 waitpid won't return an exit status until the other threads
2580 in the group are reaped.
2581
2582 - When a non-leader thread execs, that thread just vanishes
2583 without reporting an exit (so we'd hang if we waited for it
2584 explicitly in that case). The exec event is reported to
2585 the TGID pid. */
2586 errno = 0;
2587 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2588
2589 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2590 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2591
2592 if (ret > 0)
2593 {
2594 threads_debug_printf ("waitpid %ld received %s",
2595 (long) ret, status_to_str (*wstatp).c_str ());
2596
2597 /* Filter all events. IOW, leave all events pending. We'll
2598 randomly select an event LWP out of all that have events
2599 below. */
2600 filter_event (ret, *wstatp);
2601 /* Retry until nothing comes out of waitpid. A single
2602 SIGCHLD can indicate more than one child stopped. */
2603 continue;
2604 }
2605
2606 /* Now that we've pulled all events out of the kernel, resume
2607 LWPs that don't have an interesting event to report. */
2608 if (stopping_threads == NOT_STOPPING_THREADS)
2609 for_each_thread ([this] (thread_info *thread)
2610 {
2611 resume_stopped_resumed_lwps (thread);
2612 });
2613
2614 /* ... and find an LWP with a status to report to the core, if
2615 any. */
2616 event_thread = find_thread_in_random ([&] (thread_info *thread)
2617 {
2618 return status_pending_p_callback (thread, filter_ptid);
2619 });
2620
2621 if (event_thread != NULL)
2622 {
2623 event_child = get_thread_lwp (event_thread);
2624 *wstatp = event_child->status_pending;
2625 event_child->status_pending_p = 0;
2626 event_child->status_pending = 0;
2627 break;
2628 }
2629
2630 /* Check for zombie thread group leaders. Those can't be reaped
2631 until all other threads in the thread group are. */
2632 check_zombie_leaders ();
2633
2634 auto not_stopped = [&] (thread_info *thread)
2635 {
2636 return not_stopped_callback (thread, wait_ptid);
2637 };
2638
2639 /* If there are no resumed children left in the set of LWPs we
2640 want to wait for, bail. We can't just block in
2641 waitpid/sigsuspend, because lwps might have been left stopped
2642 in trace-stop state, and we'd be stuck forever waiting for
2643 their status to change (which would only happen if we resumed
2644 them). Even if WNOHANG is set, this return code is preferred
2645 over 0 (below), as it is more detailed. */
2646 if (find_thread (not_stopped) == NULL)
2647 {
2648 threads_debug_printf ("exit (no unwaited-for LWP)");
2649
2650 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2651 return -1;
2652 }
2653
2654 /* No interesting event to report to the caller. */
2655 if ((options & WNOHANG))
2656 {
2657 threads_debug_printf ("WNOHANG set, no event found");
2658
2659 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2660 return 0;
2661 }
2662
2663 /* Block until we get an event reported with SIGCHLD. */
2664 threads_debug_printf ("sigsuspend'ing");
2665
2666 sigsuspend (&prev_mask);
2667 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2668 goto retry;
2669 }
2670
2671 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2672
2673 switch_to_thread (event_thread);
2674
2675 return lwpid_of (event_thread);
2676 }
2677
2678 int
2679 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2680 {
2681 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2682 }
2683
2684 /* Select one LWP out of those that have events pending. */
2685
2686 static void
2687 select_event_lwp (struct lwp_info **orig_lp)
2688 {
2689 struct thread_info *event_thread = NULL;
2690
2691 /* In all-stop, give preference to the LWP that is being
2692 single-stepped. There will be at most one, and it's the LWP that
2693 the core is most interested in. If we didn't do this, then we'd
2694 have to handle pending step SIGTRAPs somehow in case the core
2695 later continues the previously-stepped thread, otherwise we'd
2696 report the pending SIGTRAP, and the core, not having stepped the
2697 thread, wouldn't understand what the trap was for, and therefore
2698 would report it to the user as a random signal. */
2699 if (!non_stop)
2700 {
2701 event_thread = find_thread ([] (thread_info *thread)
2702 {
2703 lwp_info *lp = get_thread_lwp (thread);
2704
2705 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2706 && thread->last_resume_kind == resume_step
2707 && lp->status_pending_p);
2708 });
2709
2710 if (event_thread != NULL)
2711 threads_debug_printf
2712 ("Select single-step %s",
2713 target_pid_to_str (ptid_of (event_thread)).c_str ());
2714 }
2715 if (event_thread == NULL)
2716 {
2717 /* No single-stepping LWP. Select one at random, out of those
2718 which have had events. */
2719
2720 event_thread = find_thread_in_random ([&] (thread_info *thread)
2721 {
2722 lwp_info *lp = get_thread_lwp (thread);
2723
2724 /* Only resumed LWPs that have an event pending. */
2725 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2726 && lp->status_pending_p);
2727 });
2728 }
2729
2730 if (event_thread != NULL)
2731 {
2732 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2733
2734 /* Switch the event LWP. */
2735 *orig_lp = event_lp;
2736 }
2737 }
2738
2739 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2740 NULL. */
2741
2742 static void
2743 unsuspend_all_lwps (struct lwp_info *except)
2744 {
2745 for_each_thread ([&] (thread_info *thread)
2746 {
2747 lwp_info *lwp = get_thread_lwp (thread);
2748
2749 if (lwp != except)
2750 lwp_suspended_decr (lwp);
2751 });
2752 }
2753
2754 static bool lwp_running (thread_info *thread);
2755
2756 /* Stabilize threads (move out of jump pads).
2757
2758 If a thread is midway collecting a fast tracepoint, we need to
2759 finish the collection and move it out of the jump pad before
2760 reporting the signal.
2761
2762 This avoids recursion while collecting (when a signal arrives
2763 midway, and the signal handler itself collects), which would trash
2764 the trace buffer. In case the user set a breakpoint in a signal
2765 handler, this avoids the backtrace showing the jump pad, etc..
2766 Most importantly, there are certain things we can't do safely if
2767 threads are stopped in a jump pad (or in its callee's). For
2768 example:
2769
2770 - starting a new trace run. A thread still collecting the
2771 previous run, could trash the trace buffer when resumed. The trace
2772 buffer control structures would have been reset but the thread had
2773 no way to tell. The thread could even midway memcpy'ing to the
2774 buffer, which would mean that when resumed, it would clobber the
2775 trace buffer that had been set for a new run.
2776
2777 - we can't rewrite/reuse the jump pads for new tracepoints
2778 safely. Say you do tstart while a thread is stopped midway while
2779 collecting. When the thread is later resumed, it finishes the
2780 collection, and returns to the jump pad, to execute the original
2781 instruction that was under the tracepoint jump at the time the
2782 older run had been started. If the jump pad had been rewritten
2783 since for something else in the new run, the thread would now
2784 execute the wrong / random instructions. */
2785
2786 void
2787 linux_process_target::stabilize_threads ()
2788 {
2789 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2790 {
2791 return stuck_in_jump_pad (thread);
2792 });
2793
2794 if (thread_stuck != NULL)
2795 {
2796 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2797 lwpid_of (thread_stuck));
2798 return;
2799 }
2800
2801 scoped_restore_current_thread restore_thread;
2802
2803 stabilizing_threads = 1;
2804
2805 /* Kick 'em all. */
2806 for_each_thread ([this] (thread_info *thread)
2807 {
2808 move_out_of_jump_pad (thread);
2809 });
2810
2811 /* Loop until all are stopped out of the jump pads. */
2812 while (find_thread (lwp_running) != NULL)
2813 {
2814 struct target_waitstatus ourstatus;
2815 struct lwp_info *lwp;
2816 int wstat;
2817
2818 /* Note that we go through the full wait even loop. While
2819 moving threads out of jump pad, we need to be able to step
2820 over internal breakpoints and such. */
2821 wait_1 (minus_one_ptid, &ourstatus, 0);
2822
2823 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2824 {
2825 lwp = get_thread_lwp (current_thread);
2826
2827 /* Lock it. */
2828 lwp_suspended_inc (lwp);
2829
2830 if (ourstatus.sig () != GDB_SIGNAL_0
2831 || current_thread->last_resume_kind == resume_stop)
2832 {
2833 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2834 enqueue_one_deferred_signal (lwp, &wstat);
2835 }
2836 }
2837 }
2838
2839 unsuspend_all_lwps (NULL);
2840
2841 stabilizing_threads = 0;
2842
2843 if (debug_threads)
2844 {
2845 thread_stuck = find_thread ([this] (thread_info *thread)
2846 {
2847 return stuck_in_jump_pad (thread);
2848 });
2849
2850 if (thread_stuck != NULL)
2851 threads_debug_printf
2852 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2853 lwpid_of (thread_stuck));
2854 }
2855 }
2856
2857 /* Convenience function that is called when the kernel reports an
2858 event that is not passed out to GDB. */
2859
2860 static ptid_t
2861 ignore_event (struct target_waitstatus *ourstatus)
2862 {
2863 /* If we got an event, there may still be others, as a single
2864 SIGCHLD can indicate more than one child stopped. This forces
2865 another target_wait call. */
2866 async_file_mark ();
2867
2868 ourstatus->set_ignore ();
2869 return null_ptid;
2870 }
2871
2872 ptid_t
2873 linux_process_target::filter_exit_event (lwp_info *event_child,
2874 target_waitstatus *ourstatus)
2875 {
2876 client_state &cs = get_client_state ();
2877 struct thread_info *thread = get_lwp_thread (event_child);
2878 ptid_t ptid = ptid_of (thread);
2879
2880 if (!is_leader (thread))
2881 {
2882 if (cs.report_thread_events)
2883 ourstatus->set_thread_exited (0);
2884 else
2885 ourstatus->set_ignore ();
2886
2887 delete_lwp (event_child);
2888 }
2889 return ptid;
2890 }
2891
2892 /* Returns 1 if GDB is interested in any event_child syscalls. */
2893
2894 static int
2895 gdb_catching_syscalls_p (struct lwp_info *event_child)
2896 {
2897 struct thread_info *thread = get_lwp_thread (event_child);
2898 struct process_info *proc = get_thread_process (thread);
2899
2900 return !proc->syscalls_to_catch.empty ();
2901 }
2902
2903 bool
2904 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2905 {
2906 int sysno;
2907 struct thread_info *thread = get_lwp_thread (event_child);
2908 struct process_info *proc = get_thread_process (thread);
2909
2910 if (proc->syscalls_to_catch.empty ())
2911 return false;
2912
2913 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2914 return true;
2915
2916 get_syscall_trapinfo (event_child, &sysno);
2917
2918 for (int iter : proc->syscalls_to_catch)
2919 if (iter == sysno)
2920 return true;
2921
2922 return false;
2923 }
2924
2925 ptid_t
2926 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2927 target_wait_flags target_options)
2928 {
2929 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2930
2931 client_state &cs = get_client_state ();
2932 int w;
2933 struct lwp_info *event_child;
2934 int options;
2935 int pid;
2936 int step_over_finished;
2937 int bp_explains_trap;
2938 int maybe_internal_trap;
2939 int report_to_gdb;
2940 int trace_event;
2941 int in_step_range;
2942 int any_resumed;
2943
2944 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2945
2946 /* Translate generic target options into linux options. */
2947 options = __WALL;
2948 if (target_options & TARGET_WNOHANG)
2949 options |= WNOHANG;
2950
2951 bp_explains_trap = 0;
2952 trace_event = 0;
2953 in_step_range = 0;
2954 ourstatus->set_ignore ();
2955
2956 auto status_pending_p_any = [&] (thread_info *thread)
2957 {
2958 return status_pending_p_callback (thread, minus_one_ptid);
2959 };
2960
2961 auto not_stopped = [&] (thread_info *thread)
2962 {
2963 return not_stopped_callback (thread, minus_one_ptid);
2964 };
2965
2966 /* Find a resumed LWP, if any. */
2967 if (find_thread (status_pending_p_any) != NULL)
2968 any_resumed = 1;
2969 else if (find_thread (not_stopped) != NULL)
2970 any_resumed = 1;
2971 else
2972 any_resumed = 0;
2973
2974 if (step_over_bkpt == null_ptid)
2975 pid = wait_for_event (ptid, &w, options);
2976 else
2977 {
2978 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2979 target_pid_to_str (step_over_bkpt).c_str ());
2980 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2981 }
2982
2983 if (pid == 0 || (pid == -1 && !any_resumed))
2984 {
2985 gdb_assert (target_options & TARGET_WNOHANG);
2986
2987 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
2988
2989 ourstatus->set_ignore ();
2990 return null_ptid;
2991 }
2992 else if (pid == -1)
2993 {
2994 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
2995
2996 ourstatus->set_no_resumed ();
2997 return null_ptid;
2998 }
2999
3000 event_child = get_thread_lwp (current_thread);
3001
3002 /* wait_for_event only returns an exit status for the last
3003 child of a process. Report it. */
3004 if (WIFEXITED (w) || WIFSIGNALED (w))
3005 {
3006 if (WIFEXITED (w))
3007 {
3008 ourstatus->set_exited (WEXITSTATUS (w));
3009
3010 threads_debug_printf
3011 ("ret = %s, exited with retcode %d",
3012 target_pid_to_str (ptid_of (current_thread)).c_str (),
3013 WEXITSTATUS (w));
3014 }
3015 else
3016 {
3017 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3018
3019 threads_debug_printf
3020 ("ret = %s, terminated with signal %d",
3021 target_pid_to_str (ptid_of (current_thread)).c_str (),
3022 WTERMSIG (w));
3023 }
3024
3025 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3026 return filter_exit_event (event_child, ourstatus);
3027
3028 return ptid_of (current_thread);
3029 }
3030
3031 /* If step-over executes a breakpoint instruction, in the case of a
3032 hardware single step it means a gdb/gdbserver breakpoint had been
3033 planted on top of a permanent breakpoint, in the case of a software
3034 single step it may just mean that gdbserver hit the reinsert breakpoint.
3035 The PC has been adjusted by save_stop_reason to point at
3036 the breakpoint address.
3037 So in the case of the hardware single step advance the PC manually
3038 past the breakpoint and in the case of software single step advance only
3039 if it's not the single_step_breakpoint we are hitting.
3040 This avoids that a program would keep trapping a permanent breakpoint
3041 forever. */
3042 if (step_over_bkpt != null_ptid
3043 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3044 && (event_child->stepping
3045 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3046 {
3047 int increment_pc = 0;
3048 int breakpoint_kind = 0;
3049 CORE_ADDR stop_pc = event_child->stop_pc;
3050
3051 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3052 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3053
3054 threads_debug_printf
3055 ("step-over for %s executed software breakpoint",
3056 target_pid_to_str (ptid_of (current_thread)).c_str ());
3057
3058 if (increment_pc != 0)
3059 {
3060 struct regcache *regcache
3061 = get_thread_regcache (current_thread, 1);
3062
3063 event_child->stop_pc += increment_pc;
3064 low_set_pc (regcache, event_child->stop_pc);
3065
3066 if (!low_breakpoint_at (event_child->stop_pc))
3067 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3068 }
3069 }
3070
3071 /* If this event was not handled before, and is not a SIGTRAP, we
3072 report it. SIGILL and SIGSEGV are also treated as traps in case
3073 a breakpoint is inserted at the current PC. If this target does
3074 not support internal breakpoints at all, we also report the
3075 SIGTRAP without further processing; it's of no concern to us. */
3076 maybe_internal_trap
3077 = (low_supports_breakpoints ()
3078 && (WSTOPSIG (w) == SIGTRAP
3079 || ((WSTOPSIG (w) == SIGILL
3080 || WSTOPSIG (w) == SIGSEGV)
3081 && low_breakpoint_at (event_child->stop_pc))));
3082
3083 if (maybe_internal_trap)
3084 {
3085 /* Handle anything that requires bookkeeping before deciding to
3086 report the event or continue waiting. */
3087
3088 /* First check if we can explain the SIGTRAP with an internal
3089 breakpoint, or if we should possibly report the event to GDB.
3090 Do this before anything that may remove or insert a
3091 breakpoint. */
3092 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3093
3094 /* We have a SIGTRAP, possibly a step-over dance has just
3095 finished. If so, tweak the state machine accordingly,
3096 reinsert breakpoints and delete any single-step
3097 breakpoints. */
3098 step_over_finished = finish_step_over (event_child);
3099
3100 /* Now invoke the callbacks of any internal breakpoints there. */
3101 check_breakpoints (event_child->stop_pc);
3102
3103 /* Handle tracepoint data collecting. This may overflow the
3104 trace buffer, and cause a tracing stop, removing
3105 breakpoints. */
3106 trace_event = handle_tracepoints (event_child);
3107
3108 if (bp_explains_trap)
3109 threads_debug_printf ("Hit a gdbserver breakpoint.");
3110 }
3111 else
3112 {
3113 /* We have some other signal, possibly a step-over dance was in
3114 progress, and it should be cancelled too. */
3115 step_over_finished = finish_step_over (event_child);
3116 }
3117
3118 /* We have all the data we need. Either report the event to GDB, or
3119 resume threads and keep waiting for more. */
3120
3121 /* If we're collecting a fast tracepoint, finish the collection and
3122 move out of the jump pad before delivering a signal. See
3123 linux_stabilize_threads. */
3124
3125 if (WIFSTOPPED (w)
3126 && WSTOPSIG (w) != SIGTRAP
3127 && supports_fast_tracepoints ()
3128 && agent_loaded_p ())
3129 {
3130 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3131 "to defer or adjust it.",
3132 WSTOPSIG (w), lwpid_of (current_thread));
3133
3134 /* Allow debugging the jump pad itself. */
3135 if (current_thread->last_resume_kind != resume_step
3136 && maybe_move_out_of_jump_pad (event_child, &w))
3137 {
3138 enqueue_one_deferred_signal (event_child, &w);
3139
3140 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3141 WSTOPSIG (w), lwpid_of (current_thread));
3142
3143 resume_one_lwp (event_child, 0, 0, NULL);
3144
3145 return ignore_event (ourstatus);
3146 }
3147 }
3148
3149 if (event_child->collecting_fast_tracepoint
3150 != fast_tpoint_collect_result::not_collecting)
3151 {
3152 threads_debug_printf
3153 ("LWP %ld was trying to move out of the jump pad (%d). "
3154 "Check if we're already there.",
3155 lwpid_of (current_thread),
3156 (int) event_child->collecting_fast_tracepoint);
3157
3158 trace_event = 1;
3159
3160 event_child->collecting_fast_tracepoint
3161 = linux_fast_tracepoint_collecting (event_child, NULL);
3162
3163 if (event_child->collecting_fast_tracepoint
3164 != fast_tpoint_collect_result::before_insn)
3165 {
3166 /* No longer need this breakpoint. */
3167 if (event_child->exit_jump_pad_bkpt != NULL)
3168 {
3169 threads_debug_printf
3170 ("No longer need exit-jump-pad bkpt; removing it."
3171 "stopping all threads momentarily.");
3172
3173 /* Other running threads could hit this breakpoint.
3174 We don't handle moribund locations like GDB does,
3175 instead we always pause all threads when removing
3176 breakpoints, so that any step-over or
3177 decr_pc_after_break adjustment is always taken
3178 care of while the breakpoint is still
3179 inserted. */
3180 stop_all_lwps (1, event_child);
3181
3182 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3183 event_child->exit_jump_pad_bkpt = NULL;
3184
3185 unstop_all_lwps (1, event_child);
3186
3187 gdb_assert (event_child->suspended >= 0);
3188 }
3189 }
3190
3191 if (event_child->collecting_fast_tracepoint
3192 == fast_tpoint_collect_result::not_collecting)
3193 {
3194 threads_debug_printf
3195 ("fast tracepoint finished collecting successfully.");
3196
3197 /* We may have a deferred signal to report. */
3198 if (dequeue_one_deferred_signal (event_child, &w))
3199 threads_debug_printf ("dequeued one signal.");
3200 else
3201 {
3202 threads_debug_printf ("no deferred signals.");
3203
3204 if (stabilizing_threads)
3205 {
3206 ourstatus->set_stopped (GDB_SIGNAL_0);
3207
3208 threads_debug_printf
3209 ("ret = %s, stopped while stabilizing threads",
3210 target_pid_to_str (ptid_of (current_thread)).c_str ());
3211
3212 return ptid_of (current_thread);
3213 }
3214 }
3215 }
3216 }
3217
3218 /* Check whether GDB would be interested in this event. */
3219
3220 /* Check if GDB is interested in this syscall. */
3221 if (WIFSTOPPED (w)
3222 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3223 && !gdb_catch_this_syscall (event_child))
3224 {
3225 threads_debug_printf ("Ignored syscall for LWP %ld.",
3226 lwpid_of (current_thread));
3227
3228 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3229
3230 return ignore_event (ourstatus);
3231 }
3232
3233 /* If GDB is not interested in this signal, don't stop other
3234 threads, and don't report it to GDB. Just resume the inferior
3235 right away. We do this for threading-related signals as well as
3236 any that GDB specifically requested we ignore. But never ignore
3237 SIGSTOP if we sent it ourselves, and do not ignore signals when
3238 stepping - they may require special handling to skip the signal
3239 handler. Also never ignore signals that could be caused by a
3240 breakpoint. */
3241 if (WIFSTOPPED (w)
3242 && current_thread->last_resume_kind != resume_step
3243 && (
3244 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3245 (current_process ()->priv->thread_db != NULL
3246 && (WSTOPSIG (w) == __SIGRTMIN
3247 || WSTOPSIG (w) == __SIGRTMIN + 1))
3248 ||
3249 #endif
3250 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3251 && !(WSTOPSIG (w) == SIGSTOP
3252 && current_thread->last_resume_kind == resume_stop)
3253 && !linux_wstatus_maybe_breakpoint (w))))
3254 {
3255 siginfo_t info, *info_p;
3256
3257 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3258 WSTOPSIG (w), lwpid_of (current_thread));
3259
3260 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3261 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3262 info_p = &info;
3263 else
3264 info_p = NULL;
3265
3266 if (step_over_finished)
3267 {
3268 /* We cancelled this thread's step-over above. We still
3269 need to unsuspend all other LWPs, and set them back
3270 running again while the signal handler runs. */
3271 unsuspend_all_lwps (event_child);
3272
3273 /* Enqueue the pending signal info so that proceed_all_lwps
3274 doesn't lose it. */
3275 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3276
3277 proceed_all_lwps ();
3278 }
3279 else
3280 {
3281 resume_one_lwp (event_child, event_child->stepping,
3282 WSTOPSIG (w), info_p);
3283 }
3284
3285 return ignore_event (ourstatus);
3286 }
3287
3288 /* Note that all addresses are always "out of the step range" when
3289 there's no range to begin with. */
3290 in_step_range = lwp_in_step_range (event_child);
3291
3292 /* If GDB wanted this thread to single step, and the thread is out
3293 of the step range, we always want to report the SIGTRAP, and let
3294 GDB handle it. Watchpoints should always be reported. So should
3295 signals we can't explain. A SIGTRAP we can't explain could be a
3296 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3297 do, we're be able to handle GDB breakpoints on top of internal
3298 breakpoints, by handling the internal breakpoint and still
3299 reporting the event to GDB. If we don't, we're out of luck, GDB
3300 won't see the breakpoint hit. If we see a single-step event but
3301 the thread should be continuing, don't pass the trap to gdb.
3302 That indicates that we had previously finished a single-step but
3303 left the single-step pending -- see
3304 complete_ongoing_step_over. */
3305 report_to_gdb = (!maybe_internal_trap
3306 || (current_thread->last_resume_kind == resume_step
3307 && !in_step_range)
3308 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3309 || (!in_step_range
3310 && !bp_explains_trap
3311 && !trace_event
3312 && !step_over_finished
3313 && !(current_thread->last_resume_kind == resume_continue
3314 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3315 || (gdb_breakpoint_here (event_child->stop_pc)
3316 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3317 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3318 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3319
3320 run_breakpoint_commands (event_child->stop_pc);
3321
3322 /* We found no reason GDB would want us to stop. We either hit one
3323 of our own breakpoints, or finished an internal step GDB
3324 shouldn't know about. */
3325 if (!report_to_gdb)
3326 {
3327 if (bp_explains_trap)
3328 threads_debug_printf ("Hit a gdbserver breakpoint.");
3329
3330 if (step_over_finished)
3331 threads_debug_printf ("Step-over finished.");
3332
3333 if (trace_event)
3334 threads_debug_printf ("Tracepoint event.");
3335
3336 if (lwp_in_step_range (event_child))
3337 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3338 paddress (event_child->stop_pc),
3339 paddress (event_child->step_range_start),
3340 paddress (event_child->step_range_end));
3341
3342 /* We're not reporting this breakpoint to GDB, so apply the
3343 decr_pc_after_break adjustment to the inferior's regcache
3344 ourselves. */
3345
3346 if (low_supports_breakpoints ())
3347 {
3348 struct regcache *regcache
3349 = get_thread_regcache (current_thread, 1);
3350 low_set_pc (regcache, event_child->stop_pc);
3351 }
3352
3353 if (step_over_finished)
3354 {
3355 /* If we have finished stepping over a breakpoint, we've
3356 stopped and suspended all LWPs momentarily except the
3357 stepping one. This is where we resume them all again.
3358 We're going to keep waiting, so use proceed, which
3359 handles stepping over the next breakpoint. */
3360 unsuspend_all_lwps (event_child);
3361 }
3362 else
3363 {
3364 /* Remove the single-step breakpoints if any. Note that
3365 there isn't single-step breakpoint if we finished stepping
3366 over. */
3367 if (supports_software_single_step ()
3368 && has_single_step_breakpoints (current_thread))
3369 {
3370 stop_all_lwps (0, event_child);
3371 delete_single_step_breakpoints (current_thread);
3372 unstop_all_lwps (0, event_child);
3373 }
3374 }
3375
3376 threads_debug_printf ("proceeding all threads.");
3377
3378 proceed_all_lwps ();
3379
3380 return ignore_event (ourstatus);
3381 }
3382
3383 if (debug_threads)
3384 {
3385 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3386 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3387 lwpid_of (get_lwp_thread (event_child)),
3388 event_child->waitstatus.to_string ().c_str ());
3389
3390 if (current_thread->last_resume_kind == resume_step)
3391 {
3392 if (event_child->step_range_start == event_child->step_range_end)
3393 threads_debug_printf
3394 ("GDB wanted to single-step, reporting event.");
3395 else if (!lwp_in_step_range (event_child))
3396 threads_debug_printf ("Out of step range, reporting event.");
3397 }
3398
3399 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3400 threads_debug_printf ("Stopped by watchpoint.");
3401 else if (gdb_breakpoint_here (event_child->stop_pc))
3402 threads_debug_printf ("Stopped by GDB breakpoint.");
3403 }
3404
3405 threads_debug_printf ("Hit a non-gdbserver trap event.");
3406
3407 /* Alright, we're going to report a stop. */
3408
3409 /* Remove single-step breakpoints. */
3410 if (supports_software_single_step ())
3411 {
3412 /* Remove single-step breakpoints or not. It it is true, stop all
3413 lwps, so that other threads won't hit the breakpoint in the
3414 staled memory. */
3415 int remove_single_step_breakpoints_p = 0;
3416
3417 if (non_stop)
3418 {
3419 remove_single_step_breakpoints_p
3420 = has_single_step_breakpoints (current_thread);
3421 }
3422 else
3423 {
3424 /* In all-stop, a stop reply cancels all previous resume
3425 requests. Delete all single-step breakpoints. */
3426
3427 find_thread ([&] (thread_info *thread) {
3428 if (has_single_step_breakpoints (thread))
3429 {
3430 remove_single_step_breakpoints_p = 1;
3431 return true;
3432 }
3433
3434 return false;
3435 });
3436 }
3437
3438 if (remove_single_step_breakpoints_p)
3439 {
3440 /* If we remove single-step breakpoints from memory, stop all lwps,
3441 so that other threads won't hit the breakpoint in the staled
3442 memory. */
3443 stop_all_lwps (0, event_child);
3444
3445 if (non_stop)
3446 {
3447 gdb_assert (has_single_step_breakpoints (current_thread));
3448 delete_single_step_breakpoints (current_thread);
3449 }
3450 else
3451 {
3452 for_each_thread ([] (thread_info *thread){
3453 if (has_single_step_breakpoints (thread))
3454 delete_single_step_breakpoints (thread);
3455 });
3456 }
3457
3458 unstop_all_lwps (0, event_child);
3459 }
3460 }
3461
3462 if (!stabilizing_threads)
3463 {
3464 /* In all-stop, stop all threads. */
3465 if (!non_stop)
3466 stop_all_lwps (0, NULL);
3467
3468 if (step_over_finished)
3469 {
3470 if (!non_stop)
3471 {
3472 /* If we were doing a step-over, all other threads but
3473 the stepping one had been paused in start_step_over,
3474 with their suspend counts incremented. We don't want
3475 to do a full unstop/unpause, because we're in
3476 all-stop mode (so we want threads stopped), but we
3477 still need to unsuspend the other threads, to
3478 decrement their `suspended' count back. */
3479 unsuspend_all_lwps (event_child);
3480 }
3481 else
3482 {
3483 /* If we just finished a step-over, then all threads had
3484 been momentarily paused. In all-stop, that's fine,
3485 we want threads stopped by now anyway. In non-stop,
3486 we need to re-resume threads that GDB wanted to be
3487 running. */
3488 unstop_all_lwps (1, event_child);
3489 }
3490 }
3491
3492 /* If we're not waiting for a specific LWP, choose an event LWP
3493 from among those that have had events. Giving equal priority
3494 to all LWPs that have had events helps prevent
3495 starvation. */
3496 if (ptid == minus_one_ptid)
3497 {
3498 event_child->status_pending_p = 1;
3499 event_child->status_pending = w;
3500
3501 select_event_lwp (&event_child);
3502
3503 /* current_thread and event_child must stay in sync. */
3504 switch_to_thread (get_lwp_thread (event_child));
3505
3506 event_child->status_pending_p = 0;
3507 w = event_child->status_pending;
3508 }
3509
3510
3511 /* Stabilize threads (move out of jump pads). */
3512 if (!non_stop)
3513 target_stabilize_threads ();
3514 }
3515 else
3516 {
3517 /* If we just finished a step-over, then all threads had been
3518 momentarily paused. In all-stop, that's fine, we want
3519 threads stopped by now anyway. In non-stop, we need to
3520 re-resume threads that GDB wanted to be running. */
3521 if (step_over_finished)
3522 unstop_all_lwps (1, event_child);
3523 }
3524
3525 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3526 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3527
3528 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3529 {
3530 /* If the reported event is an exit, fork, vfork or exec, let
3531 GDB know. */
3532
3533 /* Break the unreported fork relationship chain. */
3534 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3535 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3536 {
3537 event_child->fork_relative->fork_relative = NULL;
3538 event_child->fork_relative = NULL;
3539 }
3540
3541 *ourstatus = event_child->waitstatus;
3542 /* Clear the event lwp's waitstatus since we handled it already. */
3543 event_child->waitstatus.set_ignore ();
3544 }
3545 else
3546 {
3547 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3548 event_child->waitstatus wasn't filled in with the details, so look at
3549 the wait status W. */
3550 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3551 {
3552 int syscall_number;
3553
3554 get_syscall_trapinfo (event_child, &syscall_number);
3555 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3556 ourstatus->set_syscall_entry (syscall_number);
3557 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3558 ourstatus->set_syscall_return (syscall_number);
3559 else
3560 gdb_assert_not_reached ("unexpected syscall state");
3561 }
3562 else if (current_thread->last_resume_kind == resume_stop
3563 && WSTOPSIG (w) == SIGSTOP)
3564 {
3565 /* A thread that has been requested to stop by GDB with vCont;t,
3566 and it stopped cleanly, so report as SIG0. The use of
3567 SIGSTOP is an implementation detail. */
3568 ourstatus->set_stopped (GDB_SIGNAL_0);
3569 }
3570 else
3571 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3572 }
3573
3574 /* Now that we've selected our final event LWP, un-adjust its PC if
3575 it was a software breakpoint, and the client doesn't know we can
3576 adjust the breakpoint ourselves. */
3577 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3578 && !cs.swbreak_feature)
3579 {
3580 int decr_pc = low_decr_pc_after_break ();
3581
3582 if (decr_pc != 0)
3583 {
3584 struct regcache *regcache
3585 = get_thread_regcache (current_thread, 1);
3586 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3587 }
3588 }
3589
3590 gdb_assert (step_over_bkpt == null_ptid);
3591
3592 threads_debug_printf ("ret = %s, %s",
3593 target_pid_to_str (ptid_of (current_thread)).c_str (),
3594 ourstatus->to_string ().c_str ());
3595
3596 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3597 return filter_exit_event (event_child, ourstatus);
3598
3599 return ptid_of (current_thread);
3600 }
3601
3602 /* Get rid of any pending event in the pipe. */
3603 static void
3604 async_file_flush (void)
3605 {
3606 linux_event_pipe.flush ();
3607 }
3608
3609 /* Put something in the pipe, so the event loop wakes up. */
3610 static void
3611 async_file_mark (void)
3612 {
3613 linux_event_pipe.mark ();
3614 }
3615
3616 ptid_t
3617 linux_process_target::wait (ptid_t ptid,
3618 target_waitstatus *ourstatus,
3619 target_wait_flags target_options)
3620 {
3621 ptid_t event_ptid;
3622
3623 /* Flush the async file first. */
3624 if (target_is_async_p ())
3625 async_file_flush ();
3626
3627 do
3628 {
3629 event_ptid = wait_1 (ptid, ourstatus, target_options);
3630 }
3631 while ((target_options & TARGET_WNOHANG) == 0
3632 && event_ptid == null_ptid
3633 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3634
3635 /* If at least one stop was reported, there may be more. A single
3636 SIGCHLD can signal more than one child stop. */
3637 if (target_is_async_p ()
3638 && (target_options & TARGET_WNOHANG) != 0
3639 && event_ptid != null_ptid)
3640 async_file_mark ();
3641
3642 return event_ptid;
3643 }
3644
3645 /* Send a signal to an LWP. */
3646
3647 static int
3648 kill_lwp (unsigned long lwpid, int signo)
3649 {
3650 int ret;
3651
3652 errno = 0;
3653 ret = syscall (__NR_tkill, lwpid, signo);
3654 if (errno == ENOSYS)
3655 {
3656 /* If tkill fails, then we are not using nptl threads, a
3657 configuration we no longer support. */
3658 perror_with_name (("tkill"));
3659 }
3660 return ret;
3661 }
3662
3663 void
3664 linux_stop_lwp (struct lwp_info *lwp)
3665 {
3666 send_sigstop (lwp);
3667 }
3668
3669 static void
3670 send_sigstop (struct lwp_info *lwp)
3671 {
3672 int pid;
3673
3674 pid = lwpid_of (get_lwp_thread (lwp));
3675
3676 /* If we already have a pending stop signal for this process, don't
3677 send another. */
3678 if (lwp->stop_expected)
3679 {
3680 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3681
3682 return;
3683 }
3684
3685 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3686
3687 lwp->stop_expected = 1;
3688 kill_lwp (pid, SIGSTOP);
3689 }
3690
3691 static void
3692 send_sigstop (thread_info *thread, lwp_info *except)
3693 {
3694 struct lwp_info *lwp = get_thread_lwp (thread);
3695
3696 /* Ignore EXCEPT. */
3697 if (lwp == except)
3698 return;
3699
3700 if (lwp->stopped)
3701 return;
3702
3703 send_sigstop (lwp);
3704 }
3705
3706 /* Increment the suspend count of an LWP, and stop it, if not stopped
3707 yet. */
3708 static void
3709 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3710 {
3711 struct lwp_info *lwp = get_thread_lwp (thread);
3712
3713 /* Ignore EXCEPT. */
3714 if (lwp == except)
3715 return;
3716
3717 lwp_suspended_inc (lwp);
3718
3719 send_sigstop (thread, except);
3720 }
3721
3722 static void
3723 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3724 {
3725 /* Store the exit status for later. */
3726 lwp->status_pending_p = 1;
3727 lwp->status_pending = wstat;
3728
3729 /* Store in waitstatus as well, as there's nothing else to process
3730 for this event. */
3731 if (WIFEXITED (wstat))
3732 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3733 else if (WIFSIGNALED (wstat))
3734 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3735
3736 /* Prevent trying to stop it. */
3737 lwp->stopped = 1;
3738
3739 /* No further stops are expected from a dead lwp. */
3740 lwp->stop_expected = 0;
3741 }
3742
3743 /* Return true if LWP has exited already, and has a pending exit event
3744 to report to GDB. */
3745
3746 static int
3747 lwp_is_marked_dead (struct lwp_info *lwp)
3748 {
3749 return (lwp->status_pending_p
3750 && (WIFEXITED (lwp->status_pending)
3751 || WIFSIGNALED (lwp->status_pending)));
3752 }
3753
3754 void
3755 linux_process_target::wait_for_sigstop ()
3756 {
3757 struct thread_info *saved_thread;
3758 ptid_t saved_tid;
3759 int wstat;
3760 int ret;
3761
3762 saved_thread = current_thread;
3763 if (saved_thread != NULL)
3764 saved_tid = saved_thread->id;
3765 else
3766 saved_tid = null_ptid; /* avoid bogus unused warning */
3767
3768 scoped_restore_current_thread restore_thread;
3769
3770 threads_debug_printf ("pulling events");
3771
3772 /* Passing NULL_PTID as filter indicates we want all events to be
3773 left pending. Eventually this returns when there are no
3774 unwaited-for children left. */
3775 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3776 gdb_assert (ret == -1);
3777
3778 if (saved_thread == NULL || mythread_alive (saved_tid))
3779 return;
3780 else
3781 {
3782 threads_debug_printf ("Previously current thread died.");
3783
3784 /* We can't change the current inferior behind GDB's back,
3785 otherwise, a subsequent command may apply to the wrong
3786 process. */
3787 restore_thread.dont_restore ();
3788 switch_to_thread (nullptr);
3789 }
3790 }
3791
3792 bool
3793 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3794 {
3795 struct lwp_info *lwp = get_thread_lwp (thread);
3796
3797 if (lwp->suspended != 0)
3798 {
3799 internal_error ("LWP %ld is suspended, suspended=%d\n",
3800 lwpid_of (thread), lwp->suspended);
3801 }
3802 gdb_assert (lwp->stopped);
3803
3804 /* Allow debugging the jump pad, gdb_collect, etc.. */
3805 return (supports_fast_tracepoints ()
3806 && agent_loaded_p ()
3807 && (gdb_breakpoint_here (lwp->stop_pc)
3808 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3809 || thread->last_resume_kind == resume_step)
3810 && (linux_fast_tracepoint_collecting (lwp, NULL)
3811 != fast_tpoint_collect_result::not_collecting));
3812 }
3813
3814 void
3815 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3816 {
3817 struct lwp_info *lwp = get_thread_lwp (thread);
3818 int *wstat;
3819
3820 if (lwp->suspended != 0)
3821 {
3822 internal_error ("LWP %ld is suspended, suspended=%d\n",
3823 lwpid_of (thread), lwp->suspended);
3824 }
3825 gdb_assert (lwp->stopped);
3826
3827 /* For gdb_breakpoint_here. */
3828 scoped_restore_current_thread restore_thread;
3829 switch_to_thread (thread);
3830
3831 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3832
3833 /* Allow debugging the jump pad, gdb_collect, etc. */
3834 if (!gdb_breakpoint_here (lwp->stop_pc)
3835 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3836 && thread->last_resume_kind != resume_step
3837 && maybe_move_out_of_jump_pad (lwp, wstat))
3838 {
3839 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3840 lwpid_of (thread));
3841
3842 if (wstat)
3843 {
3844 lwp->status_pending_p = 0;
3845 enqueue_one_deferred_signal (lwp, wstat);
3846
3847 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3848 WSTOPSIG (*wstat), lwpid_of (thread));
3849 }
3850
3851 resume_one_lwp (lwp, 0, 0, NULL);
3852 }
3853 else
3854 lwp_suspended_inc (lwp);
3855 }
3856
3857 static bool
3858 lwp_running (thread_info *thread)
3859 {
3860 struct lwp_info *lwp = get_thread_lwp (thread);
3861
3862 if (lwp_is_marked_dead (lwp))
3863 return false;
3864
3865 return !lwp->stopped;
3866 }
3867
3868 void
3869 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3870 {
3871 /* Should not be called recursively. */
3872 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3873
3874 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3875
3876 threads_debug_printf
3877 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3878 (except != NULL
3879 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3880 : "none"));
3881
3882 stopping_threads = (suspend
3883 ? STOPPING_AND_SUSPENDING_THREADS
3884 : STOPPING_THREADS);
3885
3886 if (suspend)
3887 for_each_thread ([&] (thread_info *thread)
3888 {
3889 suspend_and_send_sigstop (thread, except);
3890 });
3891 else
3892 for_each_thread ([&] (thread_info *thread)
3893 {
3894 send_sigstop (thread, except);
3895 });
3896
3897 wait_for_sigstop ();
3898 stopping_threads = NOT_STOPPING_THREADS;
3899
3900 threads_debug_printf ("setting stopping_threads back to !stopping");
3901 }
3902
3903 /* Enqueue one signal in the chain of signals which need to be
3904 delivered to this process on next resume. */
3905
3906 static void
3907 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3908 {
3909 lwp->pending_signals.emplace_back (signal);
3910 if (info == nullptr)
3911 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3912 else
3913 lwp->pending_signals.back ().info = *info;
3914 }
3915
3916 void
3917 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3918 {
3919 struct thread_info *thread = get_lwp_thread (lwp);
3920 struct regcache *regcache = get_thread_regcache (thread, 1);
3921
3922 scoped_restore_current_thread restore_thread;
3923
3924 switch_to_thread (thread);
3925 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3926
3927 for (CORE_ADDR pc : next_pcs)
3928 set_single_step_breakpoint (pc, current_ptid);
3929 }
3930
3931 int
3932 linux_process_target::single_step (lwp_info* lwp)
3933 {
3934 int step = 0;
3935
3936 if (supports_hardware_single_step ())
3937 {
3938 step = 1;
3939 }
3940 else if (supports_software_single_step ())
3941 {
3942 install_software_single_step_breakpoints (lwp);
3943 step = 0;
3944 }
3945 else
3946 threads_debug_printf ("stepping is not implemented on this target");
3947
3948 return step;
3949 }
3950
3951 /* The signal can be delivered to the inferior if we are not trying to
3952 finish a fast tracepoint collect. Since signal can be delivered in
3953 the step-over, the program may go to signal handler and trap again
3954 after return from the signal handler. We can live with the spurious
3955 double traps. */
3956
3957 static int
3958 lwp_signal_can_be_delivered (struct lwp_info *lwp)
3959 {
3960 return (lwp->collecting_fast_tracepoint
3961 == fast_tpoint_collect_result::not_collecting);
3962 }
3963
3964 void
3965 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3966 int signal, siginfo_t *info)
3967 {
3968 struct thread_info *thread = get_lwp_thread (lwp);
3969 int ptrace_request;
3970 struct process_info *proc = get_thread_process (thread);
3971
3972 /* Note that target description may not be initialised
3973 (proc->tdesc == NULL) at this point because the program hasn't
3974 stopped at the first instruction yet. It means GDBserver skips
3975 the extra traps from the wrapper program (see option --wrapper).
3976 Code in this function that requires register access should be
3977 guarded by proc->tdesc == NULL or something else. */
3978
3979 if (lwp->stopped == 0)
3980 return;
3981
3982 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
3983
3984 fast_tpoint_collect_result fast_tp_collecting
3985 = lwp->collecting_fast_tracepoint;
3986
3987 gdb_assert (!stabilizing_threads
3988 || (fast_tp_collecting
3989 != fast_tpoint_collect_result::not_collecting));
3990
3991 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3992 user used the "jump" command, or "set $pc = foo"). */
3993 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3994 {
3995 /* Collecting 'while-stepping' actions doesn't make sense
3996 anymore. */
3997 release_while_stepping_state_list (thread);
3998 }
3999
4000 /* If we have pending signals or status, and a new signal, enqueue the
4001 signal. Also enqueue the signal if it can't be delivered to the
4002 inferior right now. */
4003 if (signal != 0
4004 && (lwp->status_pending_p
4005 || !lwp->pending_signals.empty ()
4006 || !lwp_signal_can_be_delivered (lwp)))
4007 {
4008 enqueue_pending_signal (lwp, signal, info);
4009
4010 /* Postpone any pending signal. It was enqueued above. */
4011 signal = 0;
4012 }
4013
4014 if (lwp->status_pending_p)
4015 {
4016 threads_debug_printf
4017 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4018 lwpid_of (thread), step ? "step" : "continue",
4019 lwp->stop_expected ? "expected" : "not expected");
4020 return;
4021 }
4022
4023 scoped_restore_current_thread restore_thread;
4024 switch_to_thread (thread);
4025
4026 /* This bit needs some thinking about. If we get a signal that
4027 we must report while a single-step reinsert is still pending,
4028 we often end up resuming the thread. It might be better to
4029 (ew) allow a stack of pending events; then we could be sure that
4030 the reinsert happened right away and not lose any signals.
4031
4032 Making this stack would also shrink the window in which breakpoints are
4033 uninserted (see comment in linux_wait_for_lwp) but not enough for
4034 complete correctness, so it won't solve that problem. It may be
4035 worthwhile just to solve this one, however. */
4036 if (lwp->bp_reinsert != 0)
4037 {
4038 threads_debug_printf (" pending reinsert at 0x%s",
4039 paddress (lwp->bp_reinsert));
4040
4041 if (supports_hardware_single_step ())
4042 {
4043 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4044 {
4045 if (step == 0)
4046 warning ("BAD - reinserting but not stepping.");
4047 if (lwp->suspended)
4048 warning ("BAD - reinserting and suspended(%d).",
4049 lwp->suspended);
4050 }
4051 }
4052
4053 step = maybe_hw_step (thread);
4054 }
4055
4056 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4057 threads_debug_printf
4058 ("lwp %ld wants to get out of fast tracepoint jump pad "
4059 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4060
4061 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4062 {
4063 threads_debug_printf
4064 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4065 lwpid_of (thread));
4066
4067 if (supports_hardware_single_step ())
4068 step = 1;
4069 else
4070 {
4071 internal_error ("moving out of jump pad single-stepping"
4072 " not implemented on this target");
4073 }
4074 }
4075
4076 /* If we have while-stepping actions in this thread set it stepping.
4077 If we have a signal to deliver, it may or may not be set to
4078 SIG_IGN, we don't know. Assume so, and allow collecting
4079 while-stepping into a signal handler. A possible smart thing to
4080 do would be to set an internal breakpoint at the signal return
4081 address, continue, and carry on catching this while-stepping
4082 action only when that breakpoint is hit. A future
4083 enhancement. */
4084 if (thread->while_stepping != NULL)
4085 {
4086 threads_debug_printf
4087 ("lwp %ld has a while-stepping action -> forcing step.",
4088 lwpid_of (thread));
4089
4090 step = single_step (lwp);
4091 }
4092
4093 if (proc->tdesc != NULL && low_supports_breakpoints ())
4094 {
4095 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4096
4097 lwp->stop_pc = low_get_pc (regcache);
4098
4099 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4100 (long) lwp->stop_pc);
4101 }
4102
4103 /* If we have pending signals, consume one if it can be delivered to
4104 the inferior. */
4105 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4106 {
4107 const pending_signal &p_sig = lwp->pending_signals.front ();
4108
4109 signal = p_sig.signal;
4110 if (p_sig.info.si_signo != 0)
4111 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4112 &p_sig.info);
4113
4114 lwp->pending_signals.pop_front ();
4115 }
4116
4117 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4118 lwpid_of (thread), step ? "step" : "continue", signal,
4119 lwp->stop_expected ? "expected" : "not expected");
4120
4121 low_prepare_to_resume (lwp);
4122
4123 regcache_invalidate_thread (thread);
4124 errno = 0;
4125 lwp->stepping = step;
4126 if (step)
4127 ptrace_request = PTRACE_SINGLESTEP;
4128 else if (gdb_catching_syscalls_p (lwp))
4129 ptrace_request = PTRACE_SYSCALL;
4130 else
4131 ptrace_request = PTRACE_CONT;
4132 ptrace (ptrace_request,
4133 lwpid_of (thread),
4134 (PTRACE_TYPE_ARG3) 0,
4135 /* Coerce to a uintptr_t first to avoid potential gcc warning
4136 of coercing an 8 byte integer to a 4 byte pointer. */
4137 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4138
4139 if (errno)
4140 {
4141 int saved_errno = errno;
4142
4143 threads_debug_printf ("ptrace errno = %d (%s)",
4144 saved_errno, strerror (saved_errno));
4145
4146 errno = saved_errno;
4147 perror_with_name ("resuming thread");
4148 }
4149
4150 /* Successfully resumed. Clear state that no longer makes sense,
4151 and mark the LWP as running. Must not do this before resuming
4152 otherwise if that fails other code will be confused. E.g., we'd
4153 later try to stop the LWP and hang forever waiting for a stop
4154 status. Note that we must not throw after this is cleared,
4155 otherwise handle_zombie_lwp_error would get confused. */
4156 lwp->stopped = 0;
4157 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4158 }
4159
4160 void
4161 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4162 {
4163 /* Nop. */
4164 }
4165
4166 /* Called when we try to resume a stopped LWP and that errors out. If
4167 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4168 or about to become), discard the error, clear any pending status
4169 the LWP may have, and return true (we'll collect the exit status
4170 soon enough). Otherwise, return false. */
4171
4172 static int
4173 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4174 {
4175 struct thread_info *thread = get_lwp_thread (lp);
4176
4177 /* If we get an error after resuming the LWP successfully, we'd
4178 confuse !T state for the LWP being gone. */
4179 gdb_assert (lp->stopped);
4180
4181 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4182 because even if ptrace failed with ESRCH, the tracee may be "not
4183 yet fully dead", but already refusing ptrace requests. In that
4184 case the tracee has 'R (Running)' state for a little bit
4185 (observed in Linux 3.18). See also the note on ESRCH in the
4186 ptrace(2) man page. Instead, check whether the LWP has any state
4187 other than ptrace-stopped. */
4188
4189 /* Don't assume anything if /proc/PID/status can't be read. */
4190 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4191 {
4192 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4193 lp->status_pending_p = 0;
4194 return 1;
4195 }
4196 return 0;
4197 }
4198
4199 void
4200 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4201 siginfo_t *info)
4202 {
4203 try
4204 {
4205 resume_one_lwp_throw (lwp, step, signal, info);
4206 }
4207 catch (const gdb_exception_error &ex)
4208 {
4209 if (check_ptrace_stopped_lwp_gone (lwp))
4210 {
4211 /* This could because we tried to resume an LWP after its leader
4212 exited. Mark it as resumed, so we can collect an exit event
4213 from it. */
4214 lwp->stopped = 0;
4215 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4216 }
4217 else
4218 throw;
4219 }
4220 }
4221
4222 /* This function is called once per thread via for_each_thread.
4223 We look up which resume request applies to THREAD and mark it with a
4224 pointer to the appropriate resume request.
4225
4226 This algorithm is O(threads * resume elements), but resume elements
4227 is small (and will remain small at least until GDB supports thread
4228 suspension). */
4229
4230 static void
4231 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4232 {
4233 struct lwp_info *lwp = get_thread_lwp (thread);
4234
4235 for (int ndx = 0; ndx < n; ndx++)
4236 {
4237 ptid_t ptid = resume[ndx].thread;
4238 if (ptid == minus_one_ptid
4239 || ptid == thread->id
4240 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4241 of PID'. */
4242 || (ptid.pid () == pid_of (thread)
4243 && (ptid.is_pid ()
4244 || ptid.lwp () == -1)))
4245 {
4246 if (resume[ndx].kind == resume_stop
4247 && thread->last_resume_kind == resume_stop)
4248 {
4249 threads_debug_printf
4250 ("already %s LWP %ld at GDB's request",
4251 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4252 ? "stopped" : "stopping"),
4253 lwpid_of (thread));
4254
4255 continue;
4256 }
4257
4258 /* Ignore (wildcard) resume requests for already-resumed
4259 threads. */
4260 if (resume[ndx].kind != resume_stop
4261 && thread->last_resume_kind != resume_stop)
4262 {
4263 threads_debug_printf
4264 ("already %s LWP %ld at GDB's request",
4265 (thread->last_resume_kind == resume_step
4266 ? "stepping" : "continuing"),
4267 lwpid_of (thread));
4268 continue;
4269 }
4270
4271 /* Don't let wildcard resumes resume fork children that GDB
4272 does not yet know are new fork children. */
4273 if (lwp->fork_relative != NULL)
4274 {
4275 struct lwp_info *rel = lwp->fork_relative;
4276
4277 if (rel->status_pending_p
4278 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4279 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4280 {
4281 threads_debug_printf
4282 ("not resuming LWP %ld: has queued stop reply",
4283 lwpid_of (thread));
4284 continue;
4285 }
4286 }
4287
4288 /* If the thread has a pending event that has already been
4289 reported to GDBserver core, but GDB has not pulled the
4290 event out of the vStopped queue yet, likewise, ignore the
4291 (wildcard) resume request. */
4292 if (in_queued_stop_replies (thread->id))
4293 {
4294 threads_debug_printf
4295 ("not resuming LWP %ld: has queued stop reply",
4296 lwpid_of (thread));
4297 continue;
4298 }
4299
4300 lwp->resume = &resume[ndx];
4301 thread->last_resume_kind = lwp->resume->kind;
4302
4303 lwp->step_range_start = lwp->resume->step_range_start;
4304 lwp->step_range_end = lwp->resume->step_range_end;
4305
4306 /* If we had a deferred signal to report, dequeue one now.
4307 This can happen if LWP gets more than one signal while
4308 trying to get out of a jump pad. */
4309 if (lwp->stopped
4310 && !lwp->status_pending_p
4311 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4312 {
4313 lwp->status_pending_p = 1;
4314
4315 threads_debug_printf
4316 ("Dequeueing deferred signal %d for LWP %ld, "
4317 "leaving status pending.",
4318 WSTOPSIG (lwp->status_pending),
4319 lwpid_of (thread));
4320 }
4321
4322 return;
4323 }
4324 }
4325
4326 /* No resume action for this thread. */
4327 lwp->resume = NULL;
4328 }
4329
4330 bool
4331 linux_process_target::resume_status_pending (thread_info *thread)
4332 {
4333 struct lwp_info *lwp = get_thread_lwp (thread);
4334
4335 /* LWPs which will not be resumed are not interesting, because
4336 we might not wait for them next time through linux_wait. */
4337 if (lwp->resume == NULL)
4338 return false;
4339
4340 return thread_still_has_status_pending (thread);
4341 }
4342
4343 bool
4344 linux_process_target::thread_needs_step_over (thread_info *thread)
4345 {
4346 struct lwp_info *lwp = get_thread_lwp (thread);
4347 CORE_ADDR pc;
4348 struct process_info *proc = get_thread_process (thread);
4349
4350 /* GDBserver is skipping the extra traps from the wrapper program,
4351 don't have to do step over. */
4352 if (proc->tdesc == NULL)
4353 return false;
4354
4355 /* LWPs which will not be resumed are not interesting, because we
4356 might not wait for them next time through linux_wait. */
4357
4358 if (!lwp->stopped)
4359 {
4360 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4361 lwpid_of (thread));
4362 return false;
4363 }
4364
4365 if (thread->last_resume_kind == resume_stop)
4366 {
4367 threads_debug_printf
4368 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4369 lwpid_of (thread));
4370 return false;
4371 }
4372
4373 gdb_assert (lwp->suspended >= 0);
4374
4375 if (lwp->suspended)
4376 {
4377 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4378 lwpid_of (thread));
4379 return false;
4380 }
4381
4382 if (lwp->status_pending_p)
4383 {
4384 threads_debug_printf
4385 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4386 lwpid_of (thread));
4387 return false;
4388 }
4389
4390 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4391 or we have. */
4392 pc = get_pc (lwp);
4393
4394 /* If the PC has changed since we stopped, then don't do anything,
4395 and let the breakpoint/tracepoint be hit. This happens if, for
4396 instance, GDB handled the decr_pc_after_break subtraction itself,
4397 GDB is OOL stepping this thread, or the user has issued a "jump"
4398 command, or poked thread's registers herself. */
4399 if (pc != lwp->stop_pc)
4400 {
4401 threads_debug_printf
4402 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4403 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4404 paddress (lwp->stop_pc), paddress (pc));
4405 return false;
4406 }
4407
4408 /* On software single step target, resume the inferior with signal
4409 rather than stepping over. */
4410 if (supports_software_single_step ()
4411 && !lwp->pending_signals.empty ()
4412 && lwp_signal_can_be_delivered (lwp))
4413 {
4414 threads_debug_printf
4415 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4416 lwpid_of (thread));
4417
4418 return false;
4419 }
4420
4421 scoped_restore_current_thread restore_thread;
4422 switch_to_thread (thread);
4423
4424 /* We can only step over breakpoints we know about. */
4425 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4426 {
4427 /* Don't step over a breakpoint that GDB expects to hit
4428 though. If the condition is being evaluated on the target's side
4429 and it evaluate to false, step over this breakpoint as well. */
4430 if (gdb_breakpoint_here (pc)
4431 && gdb_condition_true_at_breakpoint (pc)
4432 && gdb_no_commands_at_breakpoint (pc))
4433 {
4434 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4435 " GDB breakpoint at 0x%s; skipping step over",
4436 lwpid_of (thread), paddress (pc));
4437
4438 return false;
4439 }
4440 else
4441 {
4442 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4443 "found breakpoint at 0x%s",
4444 lwpid_of (thread), paddress (pc));
4445
4446 /* We've found an lwp that needs stepping over --- return 1 so
4447 that find_thread stops looking. */
4448 return true;
4449 }
4450 }
4451
4452 threads_debug_printf
4453 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4454 lwpid_of (thread), paddress (pc));
4455
4456 return false;
4457 }
4458
4459 void
4460 linux_process_target::start_step_over (lwp_info *lwp)
4461 {
4462 struct thread_info *thread = get_lwp_thread (lwp);
4463 CORE_ADDR pc;
4464
4465 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4466 lwpid_of (thread));
4467
4468 stop_all_lwps (1, lwp);
4469
4470 if (lwp->suspended != 0)
4471 {
4472 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
4473 lwp->suspended);
4474 }
4475
4476 threads_debug_printf ("Done stopping all threads for step-over.");
4477
4478 /* Note, we should always reach here with an already adjusted PC,
4479 either by GDB (if we're resuming due to GDB's request), or by our
4480 caller, if we just finished handling an internal breakpoint GDB
4481 shouldn't care about. */
4482 pc = get_pc (lwp);
4483
4484 bool step = false;
4485 {
4486 scoped_restore_current_thread restore_thread;
4487 switch_to_thread (thread);
4488
4489 lwp->bp_reinsert = pc;
4490 uninsert_breakpoints_at (pc);
4491 uninsert_fast_tracepoint_jumps_at (pc);
4492
4493 step = single_step (lwp);
4494 }
4495
4496 resume_one_lwp (lwp, step, 0, NULL);
4497
4498 /* Require next event from this LWP. */
4499 step_over_bkpt = thread->id;
4500 }
4501
4502 bool
4503 linux_process_target::finish_step_over (lwp_info *lwp)
4504 {
4505 if (lwp->bp_reinsert != 0)
4506 {
4507 scoped_restore_current_thread restore_thread;
4508
4509 threads_debug_printf ("Finished step over.");
4510
4511 switch_to_thread (get_lwp_thread (lwp));
4512
4513 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4514 may be no breakpoint to reinsert there by now. */
4515 reinsert_breakpoints_at (lwp->bp_reinsert);
4516 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4517
4518 lwp->bp_reinsert = 0;
4519
4520 /* Delete any single-step breakpoints. No longer needed. We
4521 don't have to worry about other threads hitting this trap,
4522 and later not being able to explain it, because we were
4523 stepping over a breakpoint, and we hold all threads but
4524 LWP stopped while doing that. */
4525 if (!supports_hardware_single_step ())
4526 {
4527 gdb_assert (has_single_step_breakpoints (current_thread));
4528 delete_single_step_breakpoints (current_thread);
4529 }
4530
4531 step_over_bkpt = null_ptid;
4532 return true;
4533 }
4534 else
4535 return false;
4536 }
4537
4538 void
4539 linux_process_target::complete_ongoing_step_over ()
4540 {
4541 if (step_over_bkpt != null_ptid)
4542 {
4543 struct lwp_info *lwp;
4544 int wstat;
4545 int ret;
4546
4547 threads_debug_printf ("detach: step over in progress, finish it first");
4548
4549 /* Passing NULL_PTID as filter indicates we want all events to
4550 be left pending. Eventually this returns when there are no
4551 unwaited-for children left. */
4552 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4553 __WALL);
4554 gdb_assert (ret == -1);
4555
4556 lwp = find_lwp_pid (step_over_bkpt);
4557 if (lwp != NULL)
4558 {
4559 finish_step_over (lwp);
4560
4561 /* If we got our step SIGTRAP, don't leave it pending,
4562 otherwise we would report it to GDB as a spurious
4563 SIGTRAP. */
4564 gdb_assert (lwp->status_pending_p);
4565 if (WIFSTOPPED (lwp->status_pending)
4566 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4567 {
4568 thread_info *thread = get_lwp_thread (lwp);
4569 if (thread->last_resume_kind != resume_step)
4570 {
4571 threads_debug_printf ("detach: discard step-over SIGTRAP");
4572
4573 lwp->status_pending_p = 0;
4574 lwp->status_pending = 0;
4575 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4576 }
4577 else
4578 threads_debug_printf
4579 ("detach: resume_step, not discarding step-over SIGTRAP");
4580 }
4581 }
4582 step_over_bkpt = null_ptid;
4583 unsuspend_all_lwps (lwp);
4584 }
4585 }
4586
4587 void
4588 linux_process_target::resume_one_thread (thread_info *thread,
4589 bool leave_all_stopped)
4590 {
4591 struct lwp_info *lwp = get_thread_lwp (thread);
4592 int leave_pending;
4593
4594 if (lwp->resume == NULL)
4595 return;
4596
4597 if (lwp->resume->kind == resume_stop)
4598 {
4599 threads_debug_printf ("resume_stop request for LWP %ld",
4600 lwpid_of (thread));
4601
4602 if (!lwp->stopped)
4603 {
4604 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4605
4606 /* Stop the thread, and wait for the event asynchronously,
4607 through the event loop. */
4608 send_sigstop (lwp);
4609 }
4610 else
4611 {
4612 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4613
4614 /* The LWP may have been stopped in an internal event that
4615 was not meant to be notified back to GDB (e.g., gdbserver
4616 breakpoint), so we should be reporting a stop event in
4617 this case too. */
4618
4619 /* If the thread already has a pending SIGSTOP, this is a
4620 no-op. Otherwise, something later will presumably resume
4621 the thread and this will cause it to cancel any pending
4622 operation, due to last_resume_kind == resume_stop. If
4623 the thread already has a pending status to report, we
4624 will still report it the next time we wait - see
4625 status_pending_p_callback. */
4626
4627 /* If we already have a pending signal to report, then
4628 there's no need to queue a SIGSTOP, as this means we're
4629 midway through moving the LWP out of the jumppad, and we
4630 will report the pending signal as soon as that is
4631 finished. */
4632 if (lwp->pending_signals_to_report.empty ())
4633 send_sigstop (lwp);
4634 }
4635
4636 /* For stop requests, we're done. */
4637 lwp->resume = NULL;
4638 thread->last_status.set_ignore ();
4639 return;
4640 }
4641
4642 /* If this thread which is about to be resumed has a pending status,
4643 then don't resume it - we can just report the pending status.
4644 Likewise if it is suspended, because e.g., another thread is
4645 stepping past a breakpoint. Make sure to queue any signals that
4646 would otherwise be sent. In all-stop mode, we do this decision
4647 based on if *any* thread has a pending status. If there's a
4648 thread that needs the step-over-breakpoint dance, then don't
4649 resume any other thread but that particular one. */
4650 leave_pending = (lwp->suspended
4651 || lwp->status_pending_p
4652 || leave_all_stopped);
4653
4654 /* If we have a new signal, enqueue the signal. */
4655 if (lwp->resume->sig != 0)
4656 {
4657 siginfo_t info, *info_p;
4658
4659 /* If this is the same signal we were previously stopped by,
4660 make sure to queue its siginfo. */
4661 if (WIFSTOPPED (lwp->last_status)
4662 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4663 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4664 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4665 info_p = &info;
4666 else
4667 info_p = NULL;
4668
4669 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4670 }
4671
4672 if (!leave_pending)
4673 {
4674 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4675
4676 proceed_one_lwp (thread, NULL);
4677 }
4678 else
4679 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4680
4681 thread->last_status.set_ignore ();
4682 lwp->resume = NULL;
4683 }
4684
4685 void
4686 linux_process_target::resume (thread_resume *resume_info, size_t n)
4687 {
4688 struct thread_info *need_step_over = NULL;
4689
4690 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4691
4692 for_each_thread ([&] (thread_info *thread)
4693 {
4694 linux_set_resume_request (thread, resume_info, n);
4695 });
4696
4697 /* If there is a thread which would otherwise be resumed, which has
4698 a pending status, then don't resume any threads - we can just
4699 report the pending status. Make sure to queue any signals that
4700 would otherwise be sent. In non-stop mode, we'll apply this
4701 logic to each thread individually. We consume all pending events
4702 before considering to start a step-over (in all-stop). */
4703 bool any_pending = false;
4704 if (!non_stop)
4705 any_pending = find_thread ([this] (thread_info *thread)
4706 {
4707 return resume_status_pending (thread);
4708 }) != nullptr;
4709
4710 /* If there is a thread which would otherwise be resumed, which is
4711 stopped at a breakpoint that needs stepping over, then don't
4712 resume any threads - have it step over the breakpoint with all
4713 other threads stopped, then resume all threads again. Make sure
4714 to queue any signals that would otherwise be delivered or
4715 queued. */
4716 if (!any_pending && low_supports_breakpoints ())
4717 need_step_over = find_thread ([this] (thread_info *thread)
4718 {
4719 return thread_needs_step_over (thread);
4720 });
4721
4722 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4723
4724 if (need_step_over != NULL)
4725 threads_debug_printf ("Not resuming all, need step over");
4726 else if (any_pending)
4727 threads_debug_printf ("Not resuming, all-stop and found "
4728 "an LWP with pending status");
4729 else
4730 threads_debug_printf ("Resuming, no pending status or step over needed");
4731
4732 /* Even if we're leaving threads stopped, queue all signals we'd
4733 otherwise deliver. */
4734 for_each_thread ([&] (thread_info *thread)
4735 {
4736 resume_one_thread (thread, leave_all_stopped);
4737 });
4738
4739 if (need_step_over)
4740 start_step_over (get_thread_lwp (need_step_over));
4741
4742 /* We may have events that were pending that can/should be sent to
4743 the client now. Trigger a linux_wait call. */
4744 if (target_is_async_p ())
4745 async_file_mark ();
4746 }
4747
4748 void
4749 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4750 {
4751 struct lwp_info *lwp = get_thread_lwp (thread);
4752 int step;
4753
4754 if (lwp == except)
4755 return;
4756
4757 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4758
4759 if (!lwp->stopped)
4760 {
4761 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4762 return;
4763 }
4764
4765 if (thread->last_resume_kind == resume_stop
4766 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4767 {
4768 threads_debug_printf (" client wants LWP to remain %ld stopped",
4769 lwpid_of (thread));
4770 return;
4771 }
4772
4773 if (lwp->status_pending_p)
4774 {
4775 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4776 lwpid_of (thread));
4777 return;
4778 }
4779
4780 gdb_assert (lwp->suspended >= 0);
4781
4782 if (lwp->suspended)
4783 {
4784 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4785 return;
4786 }
4787
4788 if (thread->last_resume_kind == resume_stop
4789 && lwp->pending_signals_to_report.empty ()
4790 && (lwp->collecting_fast_tracepoint
4791 == fast_tpoint_collect_result::not_collecting))
4792 {
4793 /* We haven't reported this LWP as stopped yet (otherwise, the
4794 last_status.kind check above would catch it, and we wouldn't
4795 reach here. This LWP may have been momentarily paused by a
4796 stop_all_lwps call while handling for example, another LWP's
4797 step-over. In that case, the pending expected SIGSTOP signal
4798 that was queued at vCont;t handling time will have already
4799 been consumed by wait_for_sigstop, and so we need to requeue
4800 another one here. Note that if the LWP already has a SIGSTOP
4801 pending, this is a no-op. */
4802
4803 threads_debug_printf
4804 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4805 lwpid_of (thread));
4806
4807 send_sigstop (lwp);
4808 }
4809
4810 if (thread->last_resume_kind == resume_step)
4811 {
4812 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4813 lwpid_of (thread));
4814
4815 /* If resume_step is requested by GDB, install single-step
4816 breakpoints when the thread is about to be actually resumed if
4817 the single-step breakpoints weren't removed. */
4818 if (supports_software_single_step ()
4819 && !has_single_step_breakpoints (thread))
4820 install_software_single_step_breakpoints (lwp);
4821
4822 step = maybe_hw_step (thread);
4823 }
4824 else if (lwp->bp_reinsert != 0)
4825 {
4826 threads_debug_printf (" stepping LWP %ld, reinsert set",
4827 lwpid_of (thread));
4828
4829 step = maybe_hw_step (thread);
4830 }
4831 else
4832 step = 0;
4833
4834 resume_one_lwp (lwp, step, 0, NULL);
4835 }
4836
4837 void
4838 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4839 lwp_info *except)
4840 {
4841 struct lwp_info *lwp = get_thread_lwp (thread);
4842
4843 if (lwp == except)
4844 return;
4845
4846 lwp_suspended_decr (lwp);
4847
4848 proceed_one_lwp (thread, except);
4849 }
4850
4851 void
4852 linux_process_target::proceed_all_lwps ()
4853 {
4854 struct thread_info *need_step_over;
4855
4856 /* If there is a thread which would otherwise be resumed, which is
4857 stopped at a breakpoint that needs stepping over, then don't
4858 resume any threads - have it step over the breakpoint with all
4859 other threads stopped, then resume all threads again. */
4860
4861 if (low_supports_breakpoints ())
4862 {
4863 need_step_over = find_thread ([this] (thread_info *thread)
4864 {
4865 return thread_needs_step_over (thread);
4866 });
4867
4868 if (need_step_over != NULL)
4869 {
4870 threads_debug_printf ("found thread %ld needing a step-over",
4871 lwpid_of (need_step_over));
4872
4873 start_step_over (get_thread_lwp (need_step_over));
4874 return;
4875 }
4876 }
4877
4878 threads_debug_printf ("Proceeding, no step-over needed");
4879
4880 for_each_thread ([this] (thread_info *thread)
4881 {
4882 proceed_one_lwp (thread, NULL);
4883 });
4884 }
4885
4886 void
4887 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4888 {
4889 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4890
4891 if (except)
4892 threads_debug_printf ("except=(LWP %ld)",
4893 lwpid_of (get_lwp_thread (except)));
4894 else
4895 threads_debug_printf ("except=nullptr");
4896
4897 if (unsuspend)
4898 for_each_thread ([&] (thread_info *thread)
4899 {
4900 unsuspend_and_proceed_one_lwp (thread, except);
4901 });
4902 else
4903 for_each_thread ([&] (thread_info *thread)
4904 {
4905 proceed_one_lwp (thread, except);
4906 });
4907 }
4908
4909
4910 #ifdef HAVE_LINUX_REGSETS
4911
4912 #define use_linux_regsets 1
4913
4914 /* Returns true if REGSET has been disabled. */
4915
4916 static int
4917 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4918 {
4919 return (info->disabled_regsets != NULL
4920 && info->disabled_regsets[regset - info->regsets]);
4921 }
4922
4923 /* Disable REGSET. */
4924
4925 static void
4926 disable_regset (struct regsets_info *info, struct regset_info *regset)
4927 {
4928 int dr_offset;
4929
4930 dr_offset = regset - info->regsets;
4931 if (info->disabled_regsets == NULL)
4932 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4933 info->disabled_regsets[dr_offset] = 1;
4934 }
4935
4936 static int
4937 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4938 struct regcache *regcache)
4939 {
4940 struct regset_info *regset;
4941 int saw_general_regs = 0;
4942 int pid;
4943 struct iovec iov;
4944
4945 pid = lwpid_of (current_thread);
4946 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4947 {
4948 void *buf, *data;
4949 int nt_type, res;
4950
4951 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4952 continue;
4953
4954 buf = xmalloc (regset->size);
4955
4956 nt_type = regset->nt_type;
4957 if (nt_type)
4958 {
4959 iov.iov_base = buf;
4960 iov.iov_len = regset->size;
4961 data = (void *) &iov;
4962 }
4963 else
4964 data = buf;
4965
4966 #ifndef __sparc__
4967 res = ptrace (regset->get_request, pid,
4968 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4969 #else
4970 res = ptrace (regset->get_request, pid, data, nt_type);
4971 #endif
4972 if (res < 0)
4973 {
4974 if (errno == EIO
4975 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4976 {
4977 /* If we get EIO on a regset, or an EINVAL and the regset is
4978 optional, do not try it again for this process mode. */
4979 disable_regset (regsets_info, regset);
4980 }
4981 else if (errno == ENODATA)
4982 {
4983 /* ENODATA may be returned if the regset is currently
4984 not "active". This can happen in normal operation,
4985 so suppress the warning in this case. */
4986 }
4987 else if (errno == ESRCH)
4988 {
4989 /* At this point, ESRCH should mean the process is
4990 already gone, in which case we simply ignore attempts
4991 to read its registers. */
4992 }
4993 else
4994 {
4995 char s[256];
4996 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4997 pid);
4998 perror (s);
4999 }
5000 }
5001 else
5002 {
5003 if (regset->type == GENERAL_REGS)
5004 saw_general_regs = 1;
5005 regset->store_function (regcache, buf);
5006 }
5007 free (buf);
5008 }
5009 if (saw_general_regs)
5010 return 0;
5011 else
5012 return 1;
5013 }
5014
5015 static int
5016 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5017 struct regcache *regcache)
5018 {
5019 struct regset_info *regset;
5020 int saw_general_regs = 0;
5021 int pid;
5022 struct iovec iov;
5023
5024 pid = lwpid_of (current_thread);
5025 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5026 {
5027 void *buf, *data;
5028 int nt_type, res;
5029
5030 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5031 || regset->fill_function == NULL)
5032 continue;
5033
5034 buf = xmalloc (regset->size);
5035
5036 /* First fill the buffer with the current register set contents,
5037 in case there are any items in the kernel's regset that are
5038 not in gdbserver's regcache. */
5039
5040 nt_type = regset->nt_type;
5041 if (nt_type)
5042 {
5043 iov.iov_base = buf;
5044 iov.iov_len = regset->size;
5045 data = (void *) &iov;
5046 }
5047 else
5048 data = buf;
5049
5050 #ifndef __sparc__
5051 res = ptrace (regset->get_request, pid,
5052 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5053 #else
5054 res = ptrace (regset->get_request, pid, data, nt_type);
5055 #endif
5056
5057 if (res == 0)
5058 {
5059 /* Then overlay our cached registers on that. */
5060 regset->fill_function (regcache, buf);
5061
5062 /* Only now do we write the register set. */
5063 #ifndef __sparc__
5064 res = ptrace (regset->set_request, pid,
5065 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5066 #else
5067 res = ptrace (regset->set_request, pid, data, nt_type);
5068 #endif
5069 }
5070
5071 if (res < 0)
5072 {
5073 if (errno == EIO
5074 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5075 {
5076 /* If we get EIO on a regset, or an EINVAL and the regset is
5077 optional, do not try it again for this process mode. */
5078 disable_regset (regsets_info, regset);
5079 }
5080 else if (errno == ESRCH)
5081 {
5082 /* At this point, ESRCH should mean the process is
5083 already gone, in which case we simply ignore attempts
5084 to change its registers. See also the related
5085 comment in resume_one_lwp. */
5086 free (buf);
5087 return 0;
5088 }
5089 else
5090 {
5091 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5092 }
5093 }
5094 else if (regset->type == GENERAL_REGS)
5095 saw_general_regs = 1;
5096 free (buf);
5097 }
5098 if (saw_general_regs)
5099 return 0;
5100 else
5101 return 1;
5102 }
5103
5104 #else /* !HAVE_LINUX_REGSETS */
5105
5106 #define use_linux_regsets 0
5107 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5108 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5109
5110 #endif
5111
5112 /* Return 1 if register REGNO is supported by one of the regset ptrace
5113 calls or 0 if it has to be transferred individually. */
5114
5115 static int
5116 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5117 {
5118 unsigned char mask = 1 << (regno % 8);
5119 size_t index = regno / 8;
5120
5121 return (use_linux_regsets
5122 && (regs_info->regset_bitmap == NULL
5123 || (regs_info->regset_bitmap[index] & mask) != 0));
5124 }
5125
5126 #ifdef HAVE_LINUX_USRREGS
5127
5128 static int
5129 register_addr (const struct usrregs_info *usrregs, int regnum)
5130 {
5131 int addr;
5132
5133 if (regnum < 0 || regnum >= usrregs->num_regs)
5134 error ("Invalid register number %d.", regnum);
5135
5136 addr = usrregs->regmap[regnum];
5137
5138 return addr;
5139 }
5140
5141
5142 void
5143 linux_process_target::fetch_register (const usrregs_info *usrregs,
5144 regcache *regcache, int regno)
5145 {
5146 CORE_ADDR regaddr;
5147 int i, size;
5148 char *buf;
5149 int pid;
5150
5151 if (regno >= usrregs->num_regs)
5152 return;
5153 if (low_cannot_fetch_register (regno))
5154 return;
5155
5156 regaddr = register_addr (usrregs, regno);
5157 if (regaddr == -1)
5158 return;
5159
5160 size = ((register_size (regcache->tdesc, regno)
5161 + sizeof (PTRACE_XFER_TYPE) - 1)
5162 & -sizeof (PTRACE_XFER_TYPE));
5163 buf = (char *) alloca (size);
5164
5165 pid = lwpid_of (current_thread);
5166 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5167 {
5168 errno = 0;
5169 *(PTRACE_XFER_TYPE *) (buf + i) =
5170 ptrace (PTRACE_PEEKUSER, pid,
5171 /* Coerce to a uintptr_t first to avoid potential gcc warning
5172 of coercing an 8 byte integer to a 4 byte pointer. */
5173 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5174 regaddr += sizeof (PTRACE_XFER_TYPE);
5175 if (errno != 0)
5176 {
5177 /* Mark register REGNO unavailable. */
5178 supply_register (regcache, regno, NULL);
5179 return;
5180 }
5181 }
5182
5183 low_supply_ptrace_register (regcache, regno, buf);
5184 }
5185
5186 void
5187 linux_process_target::store_register (const usrregs_info *usrregs,
5188 regcache *regcache, int regno)
5189 {
5190 CORE_ADDR regaddr;
5191 int i, size;
5192 char *buf;
5193 int pid;
5194
5195 if (regno >= usrregs->num_regs)
5196 return;
5197 if (low_cannot_store_register (regno))
5198 return;
5199
5200 regaddr = register_addr (usrregs, regno);
5201 if (regaddr == -1)
5202 return;
5203
5204 size = ((register_size (regcache->tdesc, regno)
5205 + sizeof (PTRACE_XFER_TYPE) - 1)
5206 & -sizeof (PTRACE_XFER_TYPE));
5207 buf = (char *) alloca (size);
5208 memset (buf, 0, size);
5209
5210 low_collect_ptrace_register (regcache, regno, buf);
5211
5212 pid = lwpid_of (current_thread);
5213 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5214 {
5215 errno = 0;
5216 ptrace (PTRACE_POKEUSER, pid,
5217 /* Coerce to a uintptr_t first to avoid potential gcc warning
5218 about coercing an 8 byte integer to a 4 byte pointer. */
5219 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5220 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5221 if (errno != 0)
5222 {
5223 /* At this point, ESRCH should mean the process is
5224 already gone, in which case we simply ignore attempts
5225 to change its registers. See also the related
5226 comment in resume_one_lwp. */
5227 if (errno == ESRCH)
5228 return;
5229
5230
5231 if (!low_cannot_store_register (regno))
5232 error ("writing register %d: %s", regno, safe_strerror (errno));
5233 }
5234 regaddr += sizeof (PTRACE_XFER_TYPE);
5235 }
5236 }
5237 #endif /* HAVE_LINUX_USRREGS */
5238
5239 void
5240 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5241 int regno, char *buf)
5242 {
5243 collect_register (regcache, regno, buf);
5244 }
5245
5246 void
5247 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5248 int regno, const char *buf)
5249 {
5250 supply_register (regcache, regno, buf);
5251 }
5252
5253 void
5254 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5255 regcache *regcache,
5256 int regno, int all)
5257 {
5258 #ifdef HAVE_LINUX_USRREGS
5259 struct usrregs_info *usr = regs_info->usrregs;
5260
5261 if (regno == -1)
5262 {
5263 for (regno = 0; regno < usr->num_regs; regno++)
5264 if (all || !linux_register_in_regsets (regs_info, regno))
5265 fetch_register (usr, regcache, regno);
5266 }
5267 else
5268 fetch_register (usr, regcache, regno);
5269 #endif
5270 }
5271
5272 void
5273 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5274 regcache *regcache,
5275 int regno, int all)
5276 {
5277 #ifdef HAVE_LINUX_USRREGS
5278 struct usrregs_info *usr = regs_info->usrregs;
5279
5280 if (regno == -1)
5281 {
5282 for (regno = 0; regno < usr->num_regs; regno++)
5283 if (all || !linux_register_in_regsets (regs_info, regno))
5284 store_register (usr, regcache, regno);
5285 }
5286 else
5287 store_register (usr, regcache, regno);
5288 #endif
5289 }
5290
5291 void
5292 linux_process_target::fetch_registers (regcache *regcache, int regno)
5293 {
5294 int use_regsets;
5295 int all = 0;
5296 const regs_info *regs_info = get_regs_info ();
5297
5298 if (regno == -1)
5299 {
5300 if (regs_info->usrregs != NULL)
5301 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5302 low_fetch_register (regcache, regno);
5303
5304 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5305 if (regs_info->usrregs != NULL)
5306 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5307 }
5308 else
5309 {
5310 if (low_fetch_register (regcache, regno))
5311 return;
5312
5313 use_regsets = linux_register_in_regsets (regs_info, regno);
5314 if (use_regsets)
5315 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5316 regcache);
5317 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5318 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5319 }
5320 }
5321
5322 void
5323 linux_process_target::store_registers (regcache *regcache, int regno)
5324 {
5325 int use_regsets;
5326 int all = 0;
5327 const regs_info *regs_info = get_regs_info ();
5328
5329 if (regno == -1)
5330 {
5331 all = regsets_store_inferior_registers (regs_info->regsets_info,
5332 regcache);
5333 if (regs_info->usrregs != NULL)
5334 usr_store_inferior_registers (regs_info, regcache, regno, all);
5335 }
5336 else
5337 {
5338 use_regsets = linux_register_in_regsets (regs_info, regno);
5339 if (use_regsets)
5340 all = regsets_store_inferior_registers (regs_info->regsets_info,
5341 regcache);
5342 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5343 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5344 }
5345 }
5346
5347 bool
5348 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5349 {
5350 return false;
5351 }
5352
5353 /* A wrapper for the read_memory target op. */
5354
5355 static int
5356 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5357 {
5358 return the_target->read_memory (memaddr, myaddr, len);
5359 }
5360
5361
5362 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5363 we can use a single read/write call, this can be much more
5364 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5365 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5366 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5367 not null, then we're reading, otherwise we're writing. */
5368
5369 static int
5370 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5371 const gdb_byte *writebuf, int len)
5372 {
5373 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5374
5375 process_info *proc = current_process ();
5376
5377 int fd = proc->priv->mem_fd;
5378 if (fd == -1)
5379 return EIO;
5380
5381 while (len > 0)
5382 {
5383 int bytes;
5384
5385 /* Use pread64/pwrite64 if available, since they save a syscall
5386 and can handle 64-bit offsets even on 32-bit platforms (for
5387 instance, SPARC debugging a SPARC64 application). But only
5388 use them if the offset isn't so high that when cast to off_t
5389 it'd be negative, as seen on SPARC64. pread64/pwrite64
5390 outright reject such offsets. lseek does not. */
5391 #ifdef HAVE_PREAD64
5392 if ((off_t) memaddr >= 0)
5393 bytes = (readbuf != nullptr
5394 ? pread64 (fd, readbuf, len, memaddr)
5395 : pwrite64 (fd, writebuf, len, memaddr));
5396 else
5397 #endif
5398 {
5399 bytes = -1;
5400 if (lseek (fd, memaddr, SEEK_SET) != -1)
5401 bytes = (readbuf != nullptr
5402 ? read (fd, readbuf, len)
5403 : write (fd, writebuf, len));
5404 }
5405
5406 if (bytes < 0)
5407 return errno;
5408 else if (bytes == 0)
5409 {
5410 /* EOF means the address space is gone, the whole process
5411 exited or execed. */
5412 return EIO;
5413 }
5414
5415 memaddr += bytes;
5416 if (readbuf != nullptr)
5417 readbuf += bytes;
5418 else
5419 writebuf += bytes;
5420 len -= bytes;
5421 }
5422
5423 return 0;
5424 }
5425
5426 int
5427 linux_process_target::read_memory (CORE_ADDR memaddr,
5428 unsigned char *myaddr, int len)
5429 {
5430 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5431 }
5432
5433 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5434 memory at MEMADDR. On failure (cannot write to the inferior)
5435 returns the value of errno. Always succeeds if LEN is zero. */
5436
5437 int
5438 linux_process_target::write_memory (CORE_ADDR memaddr,
5439 const unsigned char *myaddr, int len)
5440 {
5441 if (debug_threads)
5442 {
5443 /* Dump up to four bytes. */
5444 char str[4 * 2 + 1];
5445 char *p = str;
5446 int dump = len < 4 ? len : 4;
5447
5448 for (int i = 0; i < dump; i++)
5449 {
5450 sprintf (p, "%02x", myaddr[i]);
5451 p += 2;
5452 }
5453 *p = '\0';
5454
5455 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5456 str, (long) memaddr, current_process ()->pid);
5457 }
5458
5459 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5460 }
5461
5462 void
5463 linux_process_target::look_up_symbols ()
5464 {
5465 #ifdef USE_THREAD_DB
5466 struct process_info *proc = current_process ();
5467
5468 if (proc->priv->thread_db != NULL)
5469 return;
5470
5471 thread_db_init ();
5472 #endif
5473 }
5474
5475 void
5476 linux_process_target::request_interrupt ()
5477 {
5478 /* Send a SIGINT to the process group. This acts just like the user
5479 typed a ^C on the controlling terminal. */
5480 int res = ::kill (-signal_pid, SIGINT);
5481 if (res == -1)
5482 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5483 signal_pid, safe_strerror (errno));
5484 }
5485
5486 bool
5487 linux_process_target::supports_read_auxv ()
5488 {
5489 return true;
5490 }
5491
5492 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5493 to debugger memory starting at MYADDR. */
5494
5495 int
5496 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5497 unsigned char *myaddr, unsigned int len)
5498 {
5499 char filename[PATH_MAX];
5500 int fd, n;
5501
5502 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5503
5504 fd = open (filename, O_RDONLY);
5505 if (fd < 0)
5506 return -1;
5507
5508 if (offset != (CORE_ADDR) 0
5509 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5510 n = -1;
5511 else
5512 n = read (fd, myaddr, len);
5513
5514 close (fd);
5515
5516 return n;
5517 }
5518
5519 int
5520 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5521 int size, raw_breakpoint *bp)
5522 {
5523 if (type == raw_bkpt_type_sw)
5524 return insert_memory_breakpoint (bp);
5525 else
5526 return low_insert_point (type, addr, size, bp);
5527 }
5528
5529 int
5530 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5531 int size, raw_breakpoint *bp)
5532 {
5533 /* Unsupported (see target.h). */
5534 return 1;
5535 }
5536
5537 int
5538 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5539 int size, raw_breakpoint *bp)
5540 {
5541 if (type == raw_bkpt_type_sw)
5542 return remove_memory_breakpoint (bp);
5543 else
5544 return low_remove_point (type, addr, size, bp);
5545 }
5546
5547 int
5548 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5549 int size, raw_breakpoint *bp)
5550 {
5551 /* Unsupported (see target.h). */
5552 return 1;
5553 }
5554
5555 /* Implement the stopped_by_sw_breakpoint target_ops
5556 method. */
5557
5558 bool
5559 linux_process_target::stopped_by_sw_breakpoint ()
5560 {
5561 struct lwp_info *lwp = get_thread_lwp (current_thread);
5562
5563 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5564 }
5565
5566 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5567 method. */
5568
5569 bool
5570 linux_process_target::supports_stopped_by_sw_breakpoint ()
5571 {
5572 return USE_SIGTRAP_SIGINFO;
5573 }
5574
5575 /* Implement the stopped_by_hw_breakpoint target_ops
5576 method. */
5577
5578 bool
5579 linux_process_target::stopped_by_hw_breakpoint ()
5580 {
5581 struct lwp_info *lwp = get_thread_lwp (current_thread);
5582
5583 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5584 }
5585
5586 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5587 method. */
5588
5589 bool
5590 linux_process_target::supports_stopped_by_hw_breakpoint ()
5591 {
5592 return USE_SIGTRAP_SIGINFO;
5593 }
5594
5595 /* Implement the supports_hardware_single_step target_ops method. */
5596
5597 bool
5598 linux_process_target::supports_hardware_single_step ()
5599 {
5600 return true;
5601 }
5602
5603 bool
5604 linux_process_target::stopped_by_watchpoint ()
5605 {
5606 struct lwp_info *lwp = get_thread_lwp (current_thread);
5607
5608 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5609 }
5610
5611 CORE_ADDR
5612 linux_process_target::stopped_data_address ()
5613 {
5614 struct lwp_info *lwp = get_thread_lwp (current_thread);
5615
5616 return lwp->stopped_data_address;
5617 }
5618
5619 /* This is only used for targets that define PT_TEXT_ADDR,
5620 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5621 the target has different ways of acquiring this information, like
5622 loadmaps. */
5623
5624 bool
5625 linux_process_target::supports_read_offsets ()
5626 {
5627 #ifdef SUPPORTS_READ_OFFSETS
5628 return true;
5629 #else
5630 return false;
5631 #endif
5632 }
5633
5634 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5635 to tell gdb about. */
5636
5637 int
5638 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5639 {
5640 #ifdef SUPPORTS_READ_OFFSETS
5641 unsigned long text, text_end, data;
5642 int pid = lwpid_of (current_thread);
5643
5644 errno = 0;
5645
5646 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5647 (PTRACE_TYPE_ARG4) 0);
5648 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5649 (PTRACE_TYPE_ARG4) 0);
5650 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5651 (PTRACE_TYPE_ARG4) 0);
5652
5653 if (errno == 0)
5654 {
5655 /* Both text and data offsets produced at compile-time (and so
5656 used by gdb) are relative to the beginning of the program,
5657 with the data segment immediately following the text segment.
5658 However, the actual runtime layout in memory may put the data
5659 somewhere else, so when we send gdb a data base-address, we
5660 use the real data base address and subtract the compile-time
5661 data base-address from it (which is just the length of the
5662 text segment). BSS immediately follows data in both
5663 cases. */
5664 *text_p = text;
5665 *data_p = data - (text_end - text);
5666
5667 return 1;
5668 }
5669 return 0;
5670 #else
5671 gdb_assert_not_reached ("target op read_offsets not supported");
5672 #endif
5673 }
5674
5675 bool
5676 linux_process_target::supports_get_tls_address ()
5677 {
5678 #ifdef USE_THREAD_DB
5679 return true;
5680 #else
5681 return false;
5682 #endif
5683 }
5684
5685 int
5686 linux_process_target::get_tls_address (thread_info *thread,
5687 CORE_ADDR offset,
5688 CORE_ADDR load_module,
5689 CORE_ADDR *address)
5690 {
5691 #ifdef USE_THREAD_DB
5692 return thread_db_get_tls_address (thread, offset, load_module, address);
5693 #else
5694 return -1;
5695 #endif
5696 }
5697
5698 bool
5699 linux_process_target::supports_qxfer_osdata ()
5700 {
5701 return true;
5702 }
5703
5704 int
5705 linux_process_target::qxfer_osdata (const char *annex,
5706 unsigned char *readbuf,
5707 unsigned const char *writebuf,
5708 CORE_ADDR offset, int len)
5709 {
5710 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5711 }
5712
5713 void
5714 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5715 gdb_byte *inf_siginfo, int direction)
5716 {
5717 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5718
5719 /* If there was no callback, or the callback didn't do anything,
5720 then just do a straight memcpy. */
5721 if (!done)
5722 {
5723 if (direction == 1)
5724 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5725 else
5726 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5727 }
5728 }
5729
5730 bool
5731 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5732 int direction)
5733 {
5734 return false;
5735 }
5736
5737 bool
5738 linux_process_target::supports_qxfer_siginfo ()
5739 {
5740 return true;
5741 }
5742
5743 int
5744 linux_process_target::qxfer_siginfo (const char *annex,
5745 unsigned char *readbuf,
5746 unsigned const char *writebuf,
5747 CORE_ADDR offset, int len)
5748 {
5749 int pid;
5750 siginfo_t siginfo;
5751 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5752
5753 if (current_thread == NULL)
5754 return -1;
5755
5756 pid = lwpid_of (current_thread);
5757
5758 threads_debug_printf ("%s siginfo for lwp %d.",
5759 readbuf != NULL ? "Reading" : "Writing",
5760 pid);
5761
5762 if (offset >= sizeof (siginfo))
5763 return -1;
5764
5765 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5766 return -1;
5767
5768 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5769 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5770 inferior with a 64-bit GDBSERVER should look the same as debugging it
5771 with a 32-bit GDBSERVER, we need to convert it. */
5772 siginfo_fixup (&siginfo, inf_siginfo, 0);
5773
5774 if (offset + len > sizeof (siginfo))
5775 len = sizeof (siginfo) - offset;
5776
5777 if (readbuf != NULL)
5778 memcpy (readbuf, inf_siginfo + offset, len);
5779 else
5780 {
5781 memcpy (inf_siginfo + offset, writebuf, len);
5782
5783 /* Convert back to ptrace layout before flushing it out. */
5784 siginfo_fixup (&siginfo, inf_siginfo, 1);
5785
5786 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5787 return -1;
5788 }
5789
5790 return len;
5791 }
5792
5793 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5794 so we notice when children change state; as the handler for the
5795 sigsuspend in my_waitpid. */
5796
5797 static void
5798 sigchld_handler (int signo)
5799 {
5800 int old_errno = errno;
5801
5802 if (debug_threads)
5803 {
5804 do
5805 {
5806 /* Use the async signal safe debug function. */
5807 if (debug_write ("sigchld_handler\n",
5808 sizeof ("sigchld_handler\n") - 1) < 0)
5809 break; /* just ignore */
5810 } while (0);
5811 }
5812
5813 if (target_is_async_p ())
5814 async_file_mark (); /* trigger a linux_wait */
5815
5816 errno = old_errno;
5817 }
5818
5819 bool
5820 linux_process_target::supports_non_stop ()
5821 {
5822 return true;
5823 }
5824
5825 bool
5826 linux_process_target::async (bool enable)
5827 {
5828 bool previous = target_is_async_p ();
5829
5830 threads_debug_printf ("async (%d), previous=%d",
5831 enable, previous);
5832
5833 if (previous != enable)
5834 {
5835 sigset_t mask;
5836 sigemptyset (&mask);
5837 sigaddset (&mask, SIGCHLD);
5838
5839 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5840
5841 if (enable)
5842 {
5843 if (!linux_event_pipe.open_pipe ())
5844 {
5845 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5846
5847 warning ("creating event pipe failed.");
5848 return previous;
5849 }
5850
5851 /* Register the event loop handler. */
5852 add_file_handler (linux_event_pipe.event_fd (),
5853 handle_target_event, NULL,
5854 "linux-low");
5855
5856 /* Always trigger a linux_wait. */
5857 async_file_mark ();
5858 }
5859 else
5860 {
5861 delete_file_handler (linux_event_pipe.event_fd ());
5862
5863 linux_event_pipe.close_pipe ();
5864 }
5865
5866 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5867 }
5868
5869 return previous;
5870 }
5871
5872 int
5873 linux_process_target::start_non_stop (bool nonstop)
5874 {
5875 /* Register or unregister from event-loop accordingly. */
5876 target_async (nonstop);
5877
5878 if (target_is_async_p () != (nonstop != false))
5879 return -1;
5880
5881 return 0;
5882 }
5883
5884 bool
5885 linux_process_target::supports_multi_process ()
5886 {
5887 return true;
5888 }
5889
5890 /* Check if fork events are supported. */
5891
5892 bool
5893 linux_process_target::supports_fork_events ()
5894 {
5895 return true;
5896 }
5897
5898 /* Check if vfork events are supported. */
5899
5900 bool
5901 linux_process_target::supports_vfork_events ()
5902 {
5903 return true;
5904 }
5905
5906 /* Check if exec events are supported. */
5907
5908 bool
5909 linux_process_target::supports_exec_events ()
5910 {
5911 return true;
5912 }
5913
5914 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5915 ptrace flags for all inferiors. This is in case the new GDB connection
5916 doesn't support the same set of events that the previous one did. */
5917
5918 void
5919 linux_process_target::handle_new_gdb_connection ()
5920 {
5921 /* Request that all the lwps reset their ptrace options. */
5922 for_each_thread ([] (thread_info *thread)
5923 {
5924 struct lwp_info *lwp = get_thread_lwp (thread);
5925
5926 if (!lwp->stopped)
5927 {
5928 /* Stop the lwp so we can modify its ptrace options. */
5929 lwp->must_set_ptrace_flags = 1;
5930 linux_stop_lwp (lwp);
5931 }
5932 else
5933 {
5934 /* Already stopped; go ahead and set the ptrace options. */
5935 struct process_info *proc = find_process_pid (pid_of (thread));
5936 int options = linux_low_ptrace_options (proc->attached);
5937
5938 linux_enable_event_reporting (lwpid_of (thread), options);
5939 lwp->must_set_ptrace_flags = 0;
5940 }
5941 });
5942 }
5943
5944 int
5945 linux_process_target::handle_monitor_command (char *mon)
5946 {
5947 #ifdef USE_THREAD_DB
5948 return thread_db_handle_monitor_command (mon);
5949 #else
5950 return 0;
5951 #endif
5952 }
5953
5954 int
5955 linux_process_target::core_of_thread (ptid_t ptid)
5956 {
5957 return linux_common_core_of_thread (ptid);
5958 }
5959
5960 bool
5961 linux_process_target::supports_disable_randomization ()
5962 {
5963 return true;
5964 }
5965
5966 bool
5967 linux_process_target::supports_agent ()
5968 {
5969 return true;
5970 }
5971
5972 bool
5973 linux_process_target::supports_range_stepping ()
5974 {
5975 if (supports_software_single_step ())
5976 return true;
5977
5978 return low_supports_range_stepping ();
5979 }
5980
5981 bool
5982 linux_process_target::low_supports_range_stepping ()
5983 {
5984 return false;
5985 }
5986
5987 bool
5988 linux_process_target::supports_pid_to_exec_file ()
5989 {
5990 return true;
5991 }
5992
5993 const char *
5994 linux_process_target::pid_to_exec_file (int pid)
5995 {
5996 return linux_proc_pid_to_exec_file (pid);
5997 }
5998
5999 bool
6000 linux_process_target::supports_multifs ()
6001 {
6002 return true;
6003 }
6004
6005 int
6006 linux_process_target::multifs_open (int pid, const char *filename,
6007 int flags, mode_t mode)
6008 {
6009 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6010 }
6011
6012 int
6013 linux_process_target::multifs_unlink (int pid, const char *filename)
6014 {
6015 return linux_mntns_unlink (pid, filename);
6016 }
6017
6018 ssize_t
6019 linux_process_target::multifs_readlink (int pid, const char *filename,
6020 char *buf, size_t bufsiz)
6021 {
6022 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6023 }
6024
6025 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6026 struct target_loadseg
6027 {
6028 /* Core address to which the segment is mapped. */
6029 Elf32_Addr addr;
6030 /* VMA recorded in the program header. */
6031 Elf32_Addr p_vaddr;
6032 /* Size of this segment in memory. */
6033 Elf32_Word p_memsz;
6034 };
6035
6036 # if defined PT_GETDSBT
6037 struct target_loadmap
6038 {
6039 /* Protocol version number, must be zero. */
6040 Elf32_Word version;
6041 /* Pointer to the DSBT table, its size, and the DSBT index. */
6042 unsigned *dsbt_table;
6043 unsigned dsbt_size, dsbt_index;
6044 /* Number of segments in this map. */
6045 Elf32_Word nsegs;
6046 /* The actual memory map. */
6047 struct target_loadseg segs[/*nsegs*/];
6048 };
6049 # define LINUX_LOADMAP PT_GETDSBT
6050 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6051 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6052 # else
6053 struct target_loadmap
6054 {
6055 /* Protocol version number, must be zero. */
6056 Elf32_Half version;
6057 /* Number of segments in this map. */
6058 Elf32_Half nsegs;
6059 /* The actual memory map. */
6060 struct target_loadseg segs[/*nsegs*/];
6061 };
6062 # define LINUX_LOADMAP PTRACE_GETFDPIC
6063 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6064 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6065 # endif
6066
6067 bool
6068 linux_process_target::supports_read_loadmap ()
6069 {
6070 return true;
6071 }
6072
6073 int
6074 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6075 unsigned char *myaddr, unsigned int len)
6076 {
6077 int pid = lwpid_of (current_thread);
6078 int addr = -1;
6079 struct target_loadmap *data = NULL;
6080 unsigned int actual_length, copy_length;
6081
6082 if (strcmp (annex, "exec") == 0)
6083 addr = (int) LINUX_LOADMAP_EXEC;
6084 else if (strcmp (annex, "interp") == 0)
6085 addr = (int) LINUX_LOADMAP_INTERP;
6086 else
6087 return -1;
6088
6089 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6090 return -1;
6091
6092 if (data == NULL)
6093 return -1;
6094
6095 actual_length = sizeof (struct target_loadmap)
6096 + sizeof (struct target_loadseg) * data->nsegs;
6097
6098 if (offset < 0 || offset > actual_length)
6099 return -1;
6100
6101 copy_length = actual_length - offset < len ? actual_length - offset : len;
6102 memcpy (myaddr, (char *) data + offset, copy_length);
6103 return copy_length;
6104 }
6105 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6106
6107 bool
6108 linux_process_target::supports_catch_syscall ()
6109 {
6110 return low_supports_catch_syscall ();
6111 }
6112
6113 bool
6114 linux_process_target::low_supports_catch_syscall ()
6115 {
6116 return false;
6117 }
6118
6119 CORE_ADDR
6120 linux_process_target::read_pc (regcache *regcache)
6121 {
6122 if (!low_supports_breakpoints ())
6123 return 0;
6124
6125 return low_get_pc (regcache);
6126 }
6127
6128 void
6129 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6130 {
6131 gdb_assert (low_supports_breakpoints ());
6132
6133 low_set_pc (regcache, pc);
6134 }
6135
6136 bool
6137 linux_process_target::supports_thread_stopped ()
6138 {
6139 return true;
6140 }
6141
6142 bool
6143 linux_process_target::thread_stopped (thread_info *thread)
6144 {
6145 return get_thread_lwp (thread)->stopped;
6146 }
6147
6148 /* This exposes stop-all-threads functionality to other modules. */
6149
6150 void
6151 linux_process_target::pause_all (bool freeze)
6152 {
6153 stop_all_lwps (freeze, NULL);
6154 }
6155
6156 /* This exposes unstop-all-threads functionality to other gdbserver
6157 modules. */
6158
6159 void
6160 linux_process_target::unpause_all (bool unfreeze)
6161 {
6162 unstop_all_lwps (unfreeze, NULL);
6163 }
6164
6165 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6166
6167 static int
6168 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6169 CORE_ADDR *phdr_memaddr, int *num_phdr)
6170 {
6171 char filename[PATH_MAX];
6172 int fd;
6173 const int auxv_size = is_elf64
6174 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6175 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6176
6177 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6178
6179 fd = open (filename, O_RDONLY);
6180 if (fd < 0)
6181 return 1;
6182
6183 *phdr_memaddr = 0;
6184 *num_phdr = 0;
6185 while (read (fd, buf, auxv_size) == auxv_size
6186 && (*phdr_memaddr == 0 || *num_phdr == 0))
6187 {
6188 if (is_elf64)
6189 {
6190 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6191
6192 switch (aux->a_type)
6193 {
6194 case AT_PHDR:
6195 *phdr_memaddr = aux->a_un.a_val;
6196 break;
6197 case AT_PHNUM:
6198 *num_phdr = aux->a_un.a_val;
6199 break;
6200 }
6201 }
6202 else
6203 {
6204 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6205
6206 switch (aux->a_type)
6207 {
6208 case AT_PHDR:
6209 *phdr_memaddr = aux->a_un.a_val;
6210 break;
6211 case AT_PHNUM:
6212 *num_phdr = aux->a_un.a_val;
6213 break;
6214 }
6215 }
6216 }
6217
6218 close (fd);
6219
6220 if (*phdr_memaddr == 0 || *num_phdr == 0)
6221 {
6222 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6223 "phdr_memaddr = %ld, phdr_num = %d",
6224 (long) *phdr_memaddr, *num_phdr);
6225 return 2;
6226 }
6227
6228 return 0;
6229 }
6230
6231 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6232
6233 static CORE_ADDR
6234 get_dynamic (const int pid, const int is_elf64)
6235 {
6236 CORE_ADDR phdr_memaddr, relocation;
6237 int num_phdr, i;
6238 unsigned char *phdr_buf;
6239 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6240
6241 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6242 return 0;
6243
6244 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6245 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6246
6247 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6248 return 0;
6249
6250 /* Compute relocation: it is expected to be 0 for "regular" executables,
6251 non-zero for PIE ones. */
6252 relocation = -1;
6253 for (i = 0; relocation == -1 && i < num_phdr; i++)
6254 if (is_elf64)
6255 {
6256 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6257
6258 if (p->p_type == PT_PHDR)
6259 relocation = phdr_memaddr - p->p_vaddr;
6260 }
6261 else
6262 {
6263 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6264
6265 if (p->p_type == PT_PHDR)
6266 relocation = phdr_memaddr - p->p_vaddr;
6267 }
6268
6269 if (relocation == -1)
6270 {
6271 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6272 any real world executables, including PIE executables, have always
6273 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6274 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6275 or present DT_DEBUG anyway (fpc binaries are statically linked).
6276
6277 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6278
6279 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6280
6281 return 0;
6282 }
6283
6284 for (i = 0; i < num_phdr; i++)
6285 {
6286 if (is_elf64)
6287 {
6288 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6289
6290 if (p->p_type == PT_DYNAMIC)
6291 return p->p_vaddr + relocation;
6292 }
6293 else
6294 {
6295 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6296
6297 if (p->p_type == PT_DYNAMIC)
6298 return p->p_vaddr + relocation;
6299 }
6300 }
6301
6302 return 0;
6303 }
6304
6305 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6306 can be 0 if the inferior does not yet have the library list initialized.
6307 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6308 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6309
6310 static CORE_ADDR
6311 get_r_debug (const int pid, const int is_elf64)
6312 {
6313 CORE_ADDR dynamic_memaddr;
6314 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6315 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6316 CORE_ADDR map = -1;
6317
6318 dynamic_memaddr = get_dynamic (pid, is_elf64);
6319 if (dynamic_memaddr == 0)
6320 return map;
6321
6322 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6323 {
6324 if (is_elf64)
6325 {
6326 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6327 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6328 union
6329 {
6330 Elf64_Xword map;
6331 unsigned char buf[sizeof (Elf64_Xword)];
6332 }
6333 rld_map;
6334 #endif
6335 #ifdef DT_MIPS_RLD_MAP
6336 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6337 {
6338 if (linux_read_memory (dyn->d_un.d_val,
6339 rld_map.buf, sizeof (rld_map.buf)) == 0)
6340 return rld_map.map;
6341 else
6342 break;
6343 }
6344 #endif /* DT_MIPS_RLD_MAP */
6345 #ifdef DT_MIPS_RLD_MAP_REL
6346 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6347 {
6348 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6349 rld_map.buf, sizeof (rld_map.buf)) == 0)
6350 return rld_map.map;
6351 else
6352 break;
6353 }
6354 #endif /* DT_MIPS_RLD_MAP_REL */
6355
6356 if (dyn->d_tag == DT_DEBUG && map == -1)
6357 map = dyn->d_un.d_val;
6358
6359 if (dyn->d_tag == DT_NULL)
6360 break;
6361 }
6362 else
6363 {
6364 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6365 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6366 union
6367 {
6368 Elf32_Word map;
6369 unsigned char buf[sizeof (Elf32_Word)];
6370 }
6371 rld_map;
6372 #endif
6373 #ifdef DT_MIPS_RLD_MAP
6374 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6375 {
6376 if (linux_read_memory (dyn->d_un.d_val,
6377 rld_map.buf, sizeof (rld_map.buf)) == 0)
6378 return rld_map.map;
6379 else
6380 break;
6381 }
6382 #endif /* DT_MIPS_RLD_MAP */
6383 #ifdef DT_MIPS_RLD_MAP_REL
6384 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6385 {
6386 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6387 rld_map.buf, sizeof (rld_map.buf)) == 0)
6388 return rld_map.map;
6389 else
6390 break;
6391 }
6392 #endif /* DT_MIPS_RLD_MAP_REL */
6393
6394 if (dyn->d_tag == DT_DEBUG && map == -1)
6395 map = dyn->d_un.d_val;
6396
6397 if (dyn->d_tag == DT_NULL)
6398 break;
6399 }
6400
6401 dynamic_memaddr += dyn_size;
6402 }
6403
6404 return map;
6405 }
6406
6407 /* Read one pointer from MEMADDR in the inferior. */
6408
6409 static int
6410 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6411 {
6412 int ret;
6413
6414 /* Go through a union so this works on either big or little endian
6415 hosts, when the inferior's pointer size is smaller than the size
6416 of CORE_ADDR. It is assumed the inferior's endianness is the
6417 same of the superior's. */
6418 union
6419 {
6420 CORE_ADDR core_addr;
6421 unsigned int ui;
6422 unsigned char uc;
6423 } addr;
6424
6425 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6426 if (ret == 0)
6427 {
6428 if (ptr_size == sizeof (CORE_ADDR))
6429 *ptr = addr.core_addr;
6430 else if (ptr_size == sizeof (unsigned int))
6431 *ptr = addr.ui;
6432 else
6433 gdb_assert_not_reached ("unhandled pointer size");
6434 }
6435 return ret;
6436 }
6437
6438 bool
6439 linux_process_target::supports_qxfer_libraries_svr4 ()
6440 {
6441 return true;
6442 }
6443
6444 struct link_map_offsets
6445 {
6446 /* Offset and size of r_debug.r_version. */
6447 int r_version_offset;
6448
6449 /* Offset and size of r_debug.r_map. */
6450 int r_map_offset;
6451
6452 /* Offset of r_debug_extended.r_next. */
6453 int r_next_offset;
6454
6455 /* Offset to l_addr field in struct link_map. */
6456 int l_addr_offset;
6457
6458 /* Offset to l_name field in struct link_map. */
6459 int l_name_offset;
6460
6461 /* Offset to l_ld field in struct link_map. */
6462 int l_ld_offset;
6463
6464 /* Offset to l_next field in struct link_map. */
6465 int l_next_offset;
6466
6467 /* Offset to l_prev field in struct link_map. */
6468 int l_prev_offset;
6469 };
6470
6471 static const link_map_offsets lmo_32bit_offsets =
6472 {
6473 0, /* r_version offset. */
6474 4, /* r_debug.r_map offset. */
6475 20, /* r_debug_extended.r_next. */
6476 0, /* l_addr offset in link_map. */
6477 4, /* l_name offset in link_map. */
6478 8, /* l_ld offset in link_map. */
6479 12, /* l_next offset in link_map. */
6480 16 /* l_prev offset in link_map. */
6481 };
6482
6483 static const link_map_offsets lmo_64bit_offsets =
6484 {
6485 0, /* r_version offset. */
6486 8, /* r_debug.r_map offset. */
6487 40, /* r_debug_extended.r_next. */
6488 0, /* l_addr offset in link_map. */
6489 8, /* l_name offset in link_map. */
6490 16, /* l_ld offset in link_map. */
6491 24, /* l_next offset in link_map. */
6492 32 /* l_prev offset in link_map. */
6493 };
6494
6495 /* Get the loaded shared libraries from one namespace. */
6496
6497 static void
6498 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6499 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6500 {
6501 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6502
6503 while (lm_addr
6504 && read_one_ptr (lm_addr + lmo->l_name_offset,
6505 &l_name, ptr_size) == 0
6506 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6507 &l_addr, ptr_size) == 0
6508 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6509 &l_ld, ptr_size) == 0
6510 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6511 &l_prev, ptr_size) == 0
6512 && read_one_ptr (lm_addr + lmo->l_next_offset,
6513 &l_next, ptr_size) == 0)
6514 {
6515 unsigned char libname[PATH_MAX];
6516
6517 if (lm_prev != l_prev)
6518 {
6519 warning ("Corrupted shared library list: 0x%s != 0x%s",
6520 paddress (lm_prev), paddress (l_prev));
6521 break;
6522 }
6523
6524 /* Not checking for error because reading may stop before we've got
6525 PATH_MAX worth of characters. */
6526 libname[0] = '\0';
6527 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6528 libname[sizeof (libname) - 1] = '\0';
6529 if (libname[0] != '\0')
6530 {
6531 string_appendf (document, "<library name=\"");
6532 xml_escape_text_append (document, (char *) libname);
6533 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6534 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6535 paddress (lm_addr), paddress (l_addr),
6536 paddress (l_ld), paddress (lmid));
6537 }
6538
6539 lm_prev = lm_addr;
6540 lm_addr = l_next;
6541 }
6542 }
6543
6544 /* Construct qXfer:libraries-svr4:read reply. */
6545
6546 int
6547 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6548 unsigned char *readbuf,
6549 unsigned const char *writebuf,
6550 CORE_ADDR offset, int len)
6551 {
6552 struct process_info_private *const priv = current_process ()->priv;
6553 char filename[PATH_MAX];
6554 int pid, is_elf64;
6555 unsigned int machine;
6556 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6557
6558 if (writebuf != NULL)
6559 return -2;
6560 if (readbuf == NULL)
6561 return -1;
6562
6563 pid = lwpid_of (current_thread);
6564 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6565 is_elf64 = elf_64_file_p (filename, &machine);
6566 const link_map_offsets *lmo;
6567 int ptr_size;
6568 if (is_elf64)
6569 {
6570 lmo = &lmo_64bit_offsets;
6571 ptr_size = 8;
6572 }
6573 else
6574 {
6575 lmo = &lmo_32bit_offsets;
6576 ptr_size = 4;
6577 }
6578
6579 while (annex[0] != '\0')
6580 {
6581 const char *sep;
6582 CORE_ADDR *addrp;
6583 int name_len;
6584
6585 sep = strchr (annex, '=');
6586 if (sep == NULL)
6587 break;
6588
6589 name_len = sep - annex;
6590 if (name_len == 4 && startswith (annex, "lmid"))
6591 addrp = &lmid;
6592 else if (name_len == 5 && startswith (annex, "start"))
6593 addrp = &lm_addr;
6594 else if (name_len == 4 && startswith (annex, "prev"))
6595 addrp = &lm_prev;
6596 else
6597 {
6598 annex = strchr (sep, ';');
6599 if (annex == NULL)
6600 break;
6601 annex++;
6602 continue;
6603 }
6604
6605 annex = decode_address_to_semicolon (addrp, sep + 1);
6606 }
6607
6608 std::string document = "<library-list-svr4 version=\"1.0\"";
6609
6610 /* When the starting LM_ADDR is passed in the annex, only traverse that
6611 namespace, which is assumed to be identified by LMID.
6612
6613 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6614 if (lm_addr != 0)
6615 {
6616 document += ">";
6617 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6618 }
6619 else
6620 {
6621 if (lm_prev != 0)
6622 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6623
6624 /* We could interpret LMID as 'provide only the libraries for this
6625 namespace' but GDB is currently only providing lmid, start, and
6626 prev, or nothing. */
6627 if (lmid != 0)
6628 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6629
6630 CORE_ADDR r_debug = priv->r_debug;
6631 if (r_debug == 0)
6632 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6633
6634 /* We failed to find DT_DEBUG. Such situation will not change
6635 for this inferior - do not retry it. Report it to GDB as
6636 E01, see for the reasons at the GDB solib-svr4.c side. */
6637 if (r_debug == (CORE_ADDR) -1)
6638 return -1;
6639
6640 /* Terminate the header if we end up with an empty list. */
6641 if (r_debug == 0)
6642 document += ">";
6643
6644 while (r_debug != 0)
6645 {
6646 int r_version = 0;
6647 if (linux_read_memory (r_debug + lmo->r_version_offset,
6648 (unsigned char *) &r_version,
6649 sizeof (r_version)) != 0)
6650 {
6651 warning ("unable to read r_version from 0x%s",
6652 paddress (r_debug + lmo->r_version_offset));
6653 break;
6654 }
6655
6656 if (r_version < 1)
6657 {
6658 warning ("unexpected r_debug version %d", r_version);
6659 break;
6660 }
6661
6662 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6663 ptr_size) != 0)
6664 {
6665 warning ("unable to read r_map from 0x%s",
6666 paddress (r_debug + lmo->r_map_offset));
6667 break;
6668 }
6669
6670 /* We read the entire namespace. */
6671 lm_prev = 0;
6672
6673 /* The first entry corresponds to the main executable unless the
6674 dynamic loader was loaded late by a static executable. But
6675 in such case the main executable does not have PT_DYNAMIC
6676 present and we would not have gotten here. */
6677 if (r_debug == priv->r_debug)
6678 {
6679 if (lm_addr != 0)
6680 string_appendf (document, " main-lm=\"0x%s\">",
6681 paddress (lm_addr));
6682 else
6683 document += ">";
6684
6685 lm_prev = lm_addr;
6686 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6687 &lm_addr, ptr_size) != 0)
6688 {
6689 warning ("unable to read l_next from 0x%s",
6690 paddress (lm_addr + lmo->l_next_offset));
6691 break;
6692 }
6693 }
6694
6695 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6696
6697 if (r_version < 2)
6698 break;
6699
6700 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6701 ptr_size) != 0)
6702 {
6703 warning ("unable to read r_next from 0x%s",
6704 paddress (r_debug + lmo->r_next_offset));
6705 break;
6706 }
6707 }
6708 }
6709
6710 document += "</library-list-svr4>";
6711
6712 int document_len = document.length ();
6713 if (offset < document_len)
6714 document_len -= offset;
6715 else
6716 document_len = 0;
6717 if (len > document_len)
6718 len = document_len;
6719
6720 memcpy (readbuf, document.data () + offset, len);
6721
6722 return len;
6723 }
6724
6725 #ifdef HAVE_LINUX_BTRACE
6726
6727 bool
6728 linux_process_target::supports_btrace ()
6729 {
6730 return true;
6731 }
6732
6733 btrace_target_info *
6734 linux_process_target::enable_btrace (thread_info *tp,
6735 const btrace_config *conf)
6736 {
6737 return linux_enable_btrace (tp->id, conf);
6738 }
6739
6740 /* See to_disable_btrace target method. */
6741
6742 int
6743 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6744 {
6745 enum btrace_error err;
6746
6747 err = linux_disable_btrace (tinfo);
6748 return (err == BTRACE_ERR_NONE ? 0 : -1);
6749 }
6750
6751 /* Encode an Intel Processor Trace configuration. */
6752
6753 static void
6754 linux_low_encode_pt_config (std::string *buffer,
6755 const struct btrace_data_pt_config *config)
6756 {
6757 *buffer += "<pt-config>\n";
6758
6759 switch (config->cpu.vendor)
6760 {
6761 case CV_INTEL:
6762 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6763 "model=\"%u\" stepping=\"%u\"/>\n",
6764 config->cpu.family, config->cpu.model,
6765 config->cpu.stepping);
6766 break;
6767
6768 default:
6769 break;
6770 }
6771
6772 *buffer += "</pt-config>\n";
6773 }
6774
6775 /* Encode a raw buffer. */
6776
6777 static void
6778 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6779 unsigned int size)
6780 {
6781 if (size == 0)
6782 return;
6783
6784 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6785 *buffer += "<raw>\n";
6786
6787 while (size-- > 0)
6788 {
6789 char elem[2];
6790
6791 elem[0] = tohex ((*data >> 4) & 0xf);
6792 elem[1] = tohex (*data++ & 0xf);
6793
6794 buffer->append (elem, 2);
6795 }
6796
6797 *buffer += "</raw>\n";
6798 }
6799
6800 /* See to_read_btrace target method. */
6801
6802 int
6803 linux_process_target::read_btrace (btrace_target_info *tinfo,
6804 std::string *buffer,
6805 enum btrace_read_type type)
6806 {
6807 struct btrace_data btrace;
6808 enum btrace_error err;
6809
6810 err = linux_read_btrace (&btrace, tinfo, type);
6811 if (err != BTRACE_ERR_NONE)
6812 {
6813 if (err == BTRACE_ERR_OVERFLOW)
6814 *buffer += "E.Overflow.";
6815 else
6816 *buffer += "E.Generic Error.";
6817
6818 return -1;
6819 }
6820
6821 switch (btrace.format)
6822 {
6823 case BTRACE_FORMAT_NONE:
6824 *buffer += "E.No Trace.";
6825 return -1;
6826
6827 case BTRACE_FORMAT_BTS:
6828 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6829 *buffer += "<btrace version=\"1.0\">\n";
6830
6831 for (const btrace_block &block : *btrace.variant.bts.blocks)
6832 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6833 paddress (block.begin), paddress (block.end));
6834
6835 *buffer += "</btrace>\n";
6836 break;
6837
6838 case BTRACE_FORMAT_PT:
6839 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6840 *buffer += "<btrace version=\"1.0\">\n";
6841 *buffer += "<pt>\n";
6842
6843 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6844
6845 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6846 btrace.variant.pt.size);
6847
6848 *buffer += "</pt>\n";
6849 *buffer += "</btrace>\n";
6850 break;
6851
6852 default:
6853 *buffer += "E.Unsupported Trace Format.";
6854 return -1;
6855 }
6856
6857 return 0;
6858 }
6859
6860 /* See to_btrace_conf target method. */
6861
6862 int
6863 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6864 std::string *buffer)
6865 {
6866 const struct btrace_config *conf;
6867
6868 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6869 *buffer += "<btrace-conf version=\"1.0\">\n";
6870
6871 conf = linux_btrace_conf (tinfo);
6872 if (conf != NULL)
6873 {
6874 switch (conf->format)
6875 {
6876 case BTRACE_FORMAT_NONE:
6877 break;
6878
6879 case BTRACE_FORMAT_BTS:
6880 string_xml_appendf (*buffer, "<bts");
6881 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6882 string_xml_appendf (*buffer, " />\n");
6883 break;
6884
6885 case BTRACE_FORMAT_PT:
6886 string_xml_appendf (*buffer, "<pt");
6887 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6888 string_xml_appendf (*buffer, "/>\n");
6889 break;
6890 }
6891 }
6892
6893 *buffer += "</btrace-conf>\n";
6894 return 0;
6895 }
6896 #endif /* HAVE_LINUX_BTRACE */
6897
6898 /* See nat/linux-nat.h. */
6899
6900 ptid_t
6901 current_lwp_ptid (void)
6902 {
6903 return ptid_of (current_thread);
6904 }
6905
6906 const char *
6907 linux_process_target::thread_name (ptid_t thread)
6908 {
6909 return linux_proc_tid_get_name (thread);
6910 }
6911
6912 #if USE_THREAD_DB
6913 bool
6914 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6915 int *handle_len)
6916 {
6917 return thread_db_thread_handle (ptid, handle, handle_len);
6918 }
6919 #endif
6920
6921 thread_info *
6922 linux_process_target::thread_pending_parent (thread_info *thread)
6923 {
6924 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6925
6926 if (parent == nullptr)
6927 return nullptr;
6928
6929 return get_lwp_thread (parent);
6930 }
6931
6932 thread_info *
6933 linux_process_target::thread_pending_child (thread_info *thread)
6934 {
6935 lwp_info *child = get_thread_lwp (thread)->pending_child ();
6936
6937 if (child == nullptr)
6938 return nullptr;
6939
6940 return get_lwp_thread (child);
6941 }
6942
6943 /* Default implementation of linux_target_ops method "set_pc" for
6944 32-bit pc register which is literally named "pc". */
6945
6946 void
6947 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6948 {
6949 uint32_t newpc = pc;
6950
6951 supply_register_by_name (regcache, "pc", &newpc);
6952 }
6953
6954 /* Default implementation of linux_target_ops method "get_pc" for
6955 32-bit pc register which is literally named "pc". */
6956
6957 CORE_ADDR
6958 linux_get_pc_32bit (struct regcache *regcache)
6959 {
6960 uint32_t pc;
6961
6962 collect_register_by_name (regcache, "pc", &pc);
6963 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
6964 return pc;
6965 }
6966
6967 /* Default implementation of linux_target_ops method "set_pc" for
6968 64-bit pc register which is literally named "pc". */
6969
6970 void
6971 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6972 {
6973 uint64_t newpc = pc;
6974
6975 supply_register_by_name (regcache, "pc", &newpc);
6976 }
6977
6978 /* Default implementation of linux_target_ops method "get_pc" for
6979 64-bit pc register which is literally named "pc". */
6980
6981 CORE_ADDR
6982 linux_get_pc_64bit (struct regcache *regcache)
6983 {
6984 uint64_t pc;
6985
6986 collect_register_by_name (regcache, "pc", &pc);
6987 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6988 return pc;
6989 }
6990
6991 /* See linux-low.h. */
6992
6993 int
6994 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
6995 {
6996 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
6997 int offset = 0;
6998
6999 gdb_assert (wordsize == 4 || wordsize == 8);
7000
7001 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7002 == 2 * wordsize)
7003 {
7004 if (wordsize == 4)
7005 {
7006 uint32_t *data_p = (uint32_t *) data;
7007 if (data_p[0] == match)
7008 {
7009 *valp = data_p[1];
7010 return 1;
7011 }
7012 }
7013 else
7014 {
7015 uint64_t *data_p = (uint64_t *) data;
7016 if (data_p[0] == match)
7017 {
7018 *valp = data_p[1];
7019 return 1;
7020 }
7021 }
7022
7023 offset += 2 * wordsize;
7024 }
7025
7026 return 0;
7027 }
7028
7029 /* See linux-low.h. */
7030
7031 CORE_ADDR
7032 linux_get_hwcap (int pid, int wordsize)
7033 {
7034 CORE_ADDR hwcap = 0;
7035 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7036 return hwcap;
7037 }
7038
7039 /* See linux-low.h. */
7040
7041 CORE_ADDR
7042 linux_get_hwcap2 (int pid, int wordsize)
7043 {
7044 CORE_ADDR hwcap2 = 0;
7045 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7046 return hwcap2;
7047 }
7048
7049 #ifdef HAVE_LINUX_REGSETS
7050 void
7051 initialize_regsets_info (struct regsets_info *info)
7052 {
7053 for (info->num_regsets = 0;
7054 info->regsets[info->num_regsets].size >= 0;
7055 info->num_regsets++)
7056 ;
7057 }
7058 #endif
7059
7060 void
7061 initialize_low (void)
7062 {
7063 struct sigaction sigchld_action;
7064
7065 memset (&sigchld_action, 0, sizeof (sigchld_action));
7066 set_target_ops (the_linux_target);
7067
7068 linux_ptrace_init_warnings ();
7069 linux_proc_init_warnings ();
7070
7071 sigchld_action.sa_handler = sigchld_handler;
7072 sigemptyset (&sigchld_action.sa_mask);
7073 sigchld_action.sa_flags = SA_RESTART;
7074 sigaction (SIGCHLD, &sigchld_action, NULL);
7075
7076 initialize_low_arch ();
7077
7078 linux_check_ptrace_features ();
7079 }