gdbserver: report correct status in thread stop race condition
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2022 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/event-loop.h"
25 #include "gdbsupport/event-pipe.h"
26 #include "gdbsupport/rsp-low.h"
27 #include "gdbsupport/signals-state-save-restore.h"
28 #include "nat/linux-nat.h"
29 #include "nat/linux-waitpid.h"
30 #include "gdbsupport/gdb_wait.h"
31 #include "nat/gdb_ptrace.h"
32 #include "nat/linux-ptrace.h"
33 #include "nat/linux-procfs.h"
34 #include "nat/linux-personality.h"
35 #include <signal.h>
36 #include <sys/ioctl.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <ctype.h>
42 #include <pwd.h>
43 #include <sys/types.h>
44 #include <dirent.h>
45 #include <sys/stat.h>
46 #include <sys/vfs.h>
47 #include <sys/uio.h>
48 #include "gdbsupport/filestuff.h"
49 #include "tracepoint.h"
50 #include <inttypes.h>
51 #include "gdbsupport/common-inferior.h"
52 #include "nat/fork-inferior.h"
53 #include "gdbsupport/environ.h"
54 #include "gdbsupport/gdb-sigmask.h"
55 #include "gdbsupport/scoped_restore.h"
56 #ifndef ELFMAG0
57 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61 #include <elf.h>
62 #endif
63 #include "nat/linux-namespaces.h"
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef AT_HWCAP2
70 #define AT_HWCAP2 26
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* These are still undefined in 3.10 kernels. */
85 #elif defined(__TMS320C6X__)
86 #define PT_TEXT_ADDR (0x10000*4)
87 #define PT_DATA_ADDR (0x10004*4)
88 #define PT_TEXT_END_ADDR (0x10008*4)
89 #endif
90 #endif
91
92 #if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97 #define SUPPORTS_READ_OFFSETS
98 #endif
99
100 #ifdef HAVE_LINUX_BTRACE
101 # include "nat/linux-btrace.h"
102 # include "gdbsupport/btrace-common.h"
103 #endif
104
105 #ifndef HAVE_ELF32_AUXV_T
106 /* Copied from glibc's elf.h. */
107 typedef struct
108 {
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117 } Elf32_auxv_t;
118 #endif
119
120 #ifndef HAVE_ELF64_AUXV_T
121 /* Copied from glibc's elf.h. */
122 typedef struct
123 {
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132 } Elf64_auxv_t;
133 #endif
134
135 /* Does the current host support PTRACE_GETREGSET? */
136 int have_ptrace_getregset = -1;
137
138 /* Return TRUE if THREAD is the leader thread of the process. */
139
140 static bool
141 is_leader (thread_info *thread)
142 {
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145 }
146
147 /* LWP accessors. */
148
149 /* See nat/linux-nat.h. */
150
151 ptid_t
152 ptid_of_lwp (struct lwp_info *lwp)
153 {
154 return ptid_of (get_lwp_thread (lwp));
155 }
156
157 /* See nat/linux-nat.h. */
158
159 void
160 lwp_set_arch_private_info (struct lwp_info *lwp,
161 struct arch_lwp_info *info)
162 {
163 lwp->arch_private = info;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 struct arch_lwp_info *
169 lwp_arch_private_info (struct lwp_info *lwp)
170 {
171 return lwp->arch_private;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 int
177 lwp_is_stopped (struct lwp_info *lwp)
178 {
179 return lwp->stopped;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 enum target_stop_reason
185 lwp_stop_reason (struct lwp_info *lwp)
186 {
187 return lwp->stop_reason;
188 }
189
190 /* See nat/linux-nat.h. */
191
192 int
193 lwp_is_stepping (struct lwp_info *lwp)
194 {
195 return lwp->stepping;
196 }
197
198 /* A list of all unknown processes which receive stop signals. Some
199 other process will presumably claim each of these as forked
200 children momentarily. */
201
202 struct simple_pid_list
203 {
204 /* The process ID. */
205 int pid;
206
207 /* The status as reported by waitpid. */
208 int status;
209
210 /* Next in chain. */
211 struct simple_pid_list *next;
212 };
213 static struct simple_pid_list *stopped_pids;
214
215 /* Trivial list manipulation functions to keep track of a list of new
216 stopped processes. */
217
218 static void
219 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
220 {
221 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
222
223 new_pid->pid = pid;
224 new_pid->status = status;
225 new_pid->next = *listp;
226 *listp = new_pid;
227 }
228
229 static int
230 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
231 {
232 struct simple_pid_list **p;
233
234 for (p = listp; *p != NULL; p = &(*p)->next)
235 if ((*p)->pid == pid)
236 {
237 struct simple_pid_list *next = (*p)->next;
238
239 *statusp = (*p)->status;
240 xfree (*p);
241 *p = next;
242 return 1;
243 }
244 return 0;
245 }
246
247 enum stopping_threads_kind
248 {
249 /* Not stopping threads presently. */
250 NOT_STOPPING_THREADS,
251
252 /* Stopping threads. */
253 STOPPING_THREADS,
254
255 /* Stopping and suspending threads. */
256 STOPPING_AND_SUSPENDING_THREADS
257 };
258
259 /* This is set while stop_all_lwps is in effect. */
260 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
261
262 /* FIXME make into a target method? */
263 int using_threads = 1;
264
265 /* True if we're presently stabilizing threads (moving them out of
266 jump pads). */
267 static int stabilizing_threads;
268
269 static void unsuspend_all_lwps (struct lwp_info *except);
270 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
271 static int lwp_is_marked_dead (struct lwp_info *lwp);
272 static int kill_lwp (unsigned long lwpid, int signo);
273 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
274 static int linux_low_ptrace_options (int attached);
275 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
276
277 /* When the event-loop is doing a step-over, this points at the thread
278 being stepped. */
279 static ptid_t step_over_bkpt;
280
281 bool
282 linux_process_target::low_supports_breakpoints ()
283 {
284 return false;
285 }
286
287 CORE_ADDR
288 linux_process_target::low_get_pc (regcache *regcache)
289 {
290 return 0;
291 }
292
293 void
294 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
295 {
296 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
297 }
298
299 std::vector<CORE_ADDR>
300 linux_process_target::low_get_next_pcs (regcache *regcache)
301 {
302 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
303 "implemented");
304 }
305
306 int
307 linux_process_target::low_decr_pc_after_break ()
308 {
309 return 0;
310 }
311
312 /* True if LWP is stopped in its stepping range. */
313
314 static int
315 lwp_in_step_range (struct lwp_info *lwp)
316 {
317 CORE_ADDR pc = lwp->stop_pc;
318
319 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320 }
321
322 /* The event pipe registered as a waitable file in the event loop. */
323 static event_pipe linux_event_pipe;
324
325 /* True if we're currently in async mode. */
326 #define target_is_async_p() (linux_event_pipe.is_open ())
327
328 static void send_sigstop (struct lwp_info *lwp);
329
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
331
332 static int
333 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334 {
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
346 }
347
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
352 static int
353 elf_64_file_p (const char *file, unsigned int *machine)
354 {
355 Elf64_Ehdr header;
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
369 return elf_64_header_p (&header, machine);
370 }
371
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375 int
376 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 {
378 char file[PATH_MAX];
379
380 sprintf (file, "/proc/%d/exe", pid);
381 return elf_64_file_p (file, machine);
382 }
383
384 void
385 linux_process_target::delete_lwp (lwp_info *lwp)
386 {
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 threads_debug_printf ("deleting %ld", lwpid_of (thr));
390
391 remove_thread (thr);
392
393 low_delete_thread (lwp->arch_private);
394
395 delete lwp;
396 }
397
398 void
399 linux_process_target::low_delete_thread (arch_lwp_info *info)
400 {
401 /* Default implementation should be overridden if architecture-specific
402 info is being used. */
403 gdb_assert (info == nullptr);
404 }
405
406 process_info *
407 linux_process_target::add_linux_process (int pid, int attached)
408 {
409 struct process_info *proc;
410
411 proc = add_process (pid, attached);
412 proc->priv = XCNEW (struct process_info_private);
413
414 proc->priv->arch_private = low_new_process ();
415
416 return proc;
417 }
418
419 arch_process_info *
420 linux_process_target::low_new_process ()
421 {
422 return nullptr;
423 }
424
425 void
426 linux_process_target::low_delete_process (arch_process_info *info)
427 {
428 /* Default implementation must be overridden if architecture-specific
429 info exists. */
430 gdb_assert (info == nullptr);
431 }
432
433 void
434 linux_process_target::low_new_fork (process_info *parent, process_info *child)
435 {
436 /* Nop. */
437 }
438
439 void
440 linux_process_target::arch_setup_thread (thread_info *thread)
441 {
442 scoped_restore_current_thread restore_thread;
443 switch_to_thread (thread);
444
445 low_arch_setup ();
446 }
447
448 int
449 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
450 int wstat)
451 {
452 client_state &cs = get_client_state ();
453 struct lwp_info *event_lwp = *orig_event_lwp;
454 int event = linux_ptrace_get_extended_event (wstat);
455 struct thread_info *event_thr = get_lwp_thread (event_lwp);
456 struct lwp_info *new_lwp;
457
458 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
459
460 /* All extended events we currently use are mid-syscall. Only
461 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
462 you have to be using PTRACE_SEIZE to get that. */
463 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
464
465 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
466 || (event == PTRACE_EVENT_CLONE))
467 {
468 ptid_t ptid;
469 unsigned long new_pid;
470 int ret, status;
471
472 /* Get the pid of the new lwp. */
473 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
474 &new_pid);
475
476 /* If we haven't already seen the new PID stop, wait for it now. */
477 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
478 {
479 /* The new child has a pending SIGSTOP. We can't affect it until it
480 hits the SIGSTOP, but we're already attached. */
481
482 ret = my_waitpid (new_pid, &status, __WALL);
483
484 if (ret == -1)
485 perror_with_name ("waiting for new child");
486 else if (ret != new_pid)
487 warning ("wait returned unexpected PID %d", ret);
488 else if (!WIFSTOPPED (status))
489 warning ("wait returned unexpected status 0x%x", status);
490 }
491
492 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
493 {
494 struct process_info *parent_proc;
495 struct process_info *child_proc;
496 struct lwp_info *child_lwp;
497 struct thread_info *child_thr;
498
499 ptid = ptid_t (new_pid, new_pid);
500
501 threads_debug_printf ("Got fork event from LWP %ld, "
502 "new child is %d",
503 ptid_of (event_thr).lwp (),
504 ptid.pid ());
505
506 /* Add the new process to the tables and clone the breakpoint
507 lists of the parent. We need to do this even if the new process
508 will be detached, since we will need the process object and the
509 breakpoints to remove any breakpoints from memory when we
510 detach, and the client side will access registers. */
511 child_proc = add_linux_process (new_pid, 0);
512 gdb_assert (child_proc != NULL);
513 child_lwp = add_lwp (ptid);
514 gdb_assert (child_lwp != NULL);
515 child_lwp->stopped = 1;
516 child_lwp->must_set_ptrace_flags = 1;
517 child_lwp->status_pending_p = 0;
518 child_thr = get_lwp_thread (child_lwp);
519 child_thr->last_resume_kind = resume_stop;
520 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
521
522 /* If we're suspending all threads, leave this one suspended
523 too. If the fork/clone parent is stepping over a breakpoint,
524 all other threads have been suspended already. Leave the
525 child suspended too. */
526 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
527 || event_lwp->bp_reinsert != 0)
528 {
529 threads_debug_printf ("leaving child suspended");
530 child_lwp->suspended = 1;
531 }
532
533 parent_proc = get_thread_process (event_thr);
534 child_proc->attached = parent_proc->attached;
535
536 if (event_lwp->bp_reinsert != 0
537 && supports_software_single_step ()
538 && event == PTRACE_EVENT_VFORK)
539 {
540 /* If we leave single-step breakpoints there, child will
541 hit it, so uninsert single-step breakpoints from parent
542 (and child). Once vfork child is done, reinsert
543 them back to parent. */
544 uninsert_single_step_breakpoints (event_thr);
545 }
546
547 clone_all_breakpoints (child_thr, event_thr);
548
549 target_desc_up tdesc = allocate_target_description ();
550 copy_target_description (tdesc.get (), parent_proc->tdesc);
551 child_proc->tdesc = tdesc.release ();
552
553 /* Clone arch-specific process data. */
554 low_new_fork (parent_proc, child_proc);
555
556 /* Save fork info in the parent thread. */
557 if (event == PTRACE_EVENT_FORK)
558 event_lwp->waitstatus.set_forked (ptid);
559 else if (event == PTRACE_EVENT_VFORK)
560 event_lwp->waitstatus.set_vforked (ptid);
561
562 /* The status_pending field contains bits denoting the
563 extended event, so when the pending event is handled,
564 the handler will look at lwp->waitstatus. */
565 event_lwp->status_pending_p = 1;
566 event_lwp->status_pending = wstat;
567
568 /* Link the threads until the parent event is passed on to
569 higher layers. */
570 event_lwp->fork_relative = child_lwp;
571 child_lwp->fork_relative = event_lwp;
572
573 /* If the parent thread is doing step-over with single-step
574 breakpoints, the list of single-step breakpoints are cloned
575 from the parent's. Remove them from the child process.
576 In case of vfork, we'll reinsert them back once vforked
577 child is done. */
578 if (event_lwp->bp_reinsert != 0
579 && supports_software_single_step ())
580 {
581 /* The child process is forked and stopped, so it is safe
582 to access its memory without stopping all other threads
583 from other processes. */
584 delete_single_step_breakpoints (child_thr);
585
586 gdb_assert (has_single_step_breakpoints (event_thr));
587 gdb_assert (!has_single_step_breakpoints (child_thr));
588 }
589
590 /* Report the event. */
591 return 0;
592 }
593
594 threads_debug_printf
595 ("Got clone event from LWP %ld, new child is LWP %ld",
596 lwpid_of (event_thr), new_pid);
597
598 ptid = ptid_t (pid_of (event_thr), new_pid);
599 new_lwp = add_lwp (ptid);
600
601 /* Either we're going to immediately resume the new thread
602 or leave it stopped. resume_one_lwp is a nop if it
603 thinks the thread is currently running, so set this first
604 before calling resume_one_lwp. */
605 new_lwp->stopped = 1;
606
607 /* If we're suspending all threads, leave this one suspended
608 too. If the fork/clone parent is stepping over a breakpoint,
609 all other threads have been suspended already. Leave the
610 child suspended too. */
611 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
612 || event_lwp->bp_reinsert != 0)
613 new_lwp->suspended = 1;
614
615 /* Normally we will get the pending SIGSTOP. But in some cases
616 we might get another signal delivered to the group first.
617 If we do get another signal, be sure not to lose it. */
618 if (WSTOPSIG (status) != SIGSTOP)
619 {
620 new_lwp->stop_expected = 1;
621 new_lwp->status_pending_p = 1;
622 new_lwp->status_pending = status;
623 }
624 else if (cs.report_thread_events)
625 {
626 new_lwp->waitstatus.set_thread_created ();
627 new_lwp->status_pending_p = 1;
628 new_lwp->status_pending = status;
629 }
630
631 #ifdef USE_THREAD_DB
632 thread_db_notice_clone (event_thr, ptid);
633 #endif
634
635 /* Don't report the event. */
636 return 1;
637 }
638 else if (event == PTRACE_EVENT_VFORK_DONE)
639 {
640 event_lwp->waitstatus.set_vfork_done ();
641
642 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
643 {
644 reinsert_single_step_breakpoints (event_thr);
645
646 gdb_assert (has_single_step_breakpoints (event_thr));
647 }
648
649 /* Report the event. */
650 return 0;
651 }
652 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
653 {
654 struct process_info *proc;
655 std::vector<int> syscalls_to_catch;
656 ptid_t event_ptid;
657 pid_t event_pid;
658
659 threads_debug_printf ("Got exec event from LWP %ld",
660 lwpid_of (event_thr));
661
662 /* Get the event ptid. */
663 event_ptid = ptid_of (event_thr);
664 event_pid = event_ptid.pid ();
665
666 /* Save the syscall list from the execing process. */
667 proc = get_thread_process (event_thr);
668 syscalls_to_catch = std::move (proc->syscalls_to_catch);
669
670 /* Delete the execing process and all its threads. */
671 mourn (proc);
672 switch_to_thread (nullptr);
673
674 /* Create a new process/lwp/thread. */
675 proc = add_linux_process (event_pid, 0);
676 event_lwp = add_lwp (event_ptid);
677 event_thr = get_lwp_thread (event_lwp);
678 gdb_assert (current_thread == event_thr);
679 arch_setup_thread (event_thr);
680
681 /* Set the event status. */
682 event_lwp->waitstatus.set_execd
683 (make_unique_xstrdup
684 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
685
686 /* Mark the exec status as pending. */
687 event_lwp->stopped = 1;
688 event_lwp->status_pending_p = 1;
689 event_lwp->status_pending = wstat;
690 event_thr->last_resume_kind = resume_continue;
691 event_thr->last_status.set_ignore ();
692
693 /* Update syscall state in the new lwp, effectively mid-syscall too. */
694 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
695
696 /* Restore the list to catch. Don't rely on the client, which is free
697 to avoid sending a new list when the architecture doesn't change.
698 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
699 proc->syscalls_to_catch = std::move (syscalls_to_catch);
700
701 /* Report the event. */
702 *orig_event_lwp = event_lwp;
703 return 0;
704 }
705
706 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
707 }
708
709 CORE_ADDR
710 linux_process_target::get_pc (lwp_info *lwp)
711 {
712 struct regcache *regcache;
713 CORE_ADDR pc;
714
715 if (!low_supports_breakpoints ())
716 return 0;
717
718 scoped_restore_current_thread restore_thread;
719 switch_to_thread (get_lwp_thread (lwp));
720
721 regcache = get_thread_regcache (current_thread, 1);
722 pc = low_get_pc (regcache);
723
724 threads_debug_printf ("pc is 0x%lx", (long) pc);
725
726 return pc;
727 }
728
729 void
730 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
731 {
732 struct regcache *regcache;
733
734 scoped_restore_current_thread restore_thread;
735 switch_to_thread (get_lwp_thread (lwp));
736
737 regcache = get_thread_regcache (current_thread, 1);
738 low_get_syscall_trapinfo (regcache, sysno);
739
740 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
741 }
742
743 void
744 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
745 {
746 /* By default, report an unknown system call number. */
747 *sysno = UNKNOWN_SYSCALL;
748 }
749
750 bool
751 linux_process_target::save_stop_reason (lwp_info *lwp)
752 {
753 CORE_ADDR pc;
754 CORE_ADDR sw_breakpoint_pc;
755 #if USE_SIGTRAP_SIGINFO
756 siginfo_t siginfo;
757 #endif
758
759 if (!low_supports_breakpoints ())
760 return false;
761
762 pc = get_pc (lwp);
763 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
764
765 /* breakpoint_at reads from the current thread. */
766 scoped_restore_current_thread restore_thread;
767 switch_to_thread (get_lwp_thread (lwp));
768
769 #if USE_SIGTRAP_SIGINFO
770 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
771 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
772 {
773 if (siginfo.si_signo == SIGTRAP)
774 {
775 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
776 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
777 {
778 /* The si_code is ambiguous on this arch -- check debug
779 registers. */
780 if (!check_stopped_by_watchpoint (lwp))
781 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
782 }
783 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
784 {
785 /* If we determine the LWP stopped for a SW breakpoint,
786 trust it. Particularly don't check watchpoint
787 registers, because at least on s390, we'd find
788 stopped-by-watchpoint as long as there's a watchpoint
789 set. */
790 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
791 }
792 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
793 {
794 /* This can indicate either a hardware breakpoint or
795 hardware watchpoint. Check debug registers. */
796 if (!check_stopped_by_watchpoint (lwp))
797 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
798 }
799 else if (siginfo.si_code == TRAP_TRACE)
800 {
801 /* We may have single stepped an instruction that
802 triggered a watchpoint. In that case, on some
803 architectures (such as x86), instead of TRAP_HWBKPT,
804 si_code indicates TRAP_TRACE, and we need to check
805 the debug registers separately. */
806 if (!check_stopped_by_watchpoint (lwp))
807 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
808 }
809 }
810 }
811 #else
812 /* We may have just stepped a breakpoint instruction. E.g., in
813 non-stop mode, GDB first tells the thread A to step a range, and
814 then the user inserts a breakpoint inside the range. In that
815 case we need to report the breakpoint PC. */
816 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
817 && low_breakpoint_at (sw_breakpoint_pc))
818 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
819
820 if (hardware_breakpoint_inserted_here (pc))
821 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
822
823 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
824 check_stopped_by_watchpoint (lwp);
825 #endif
826
827 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
828 {
829 threads_debug_printf
830 ("%s stopped by software breakpoint",
831 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
832
833 /* Back up the PC if necessary. */
834 if (pc != sw_breakpoint_pc)
835 {
836 struct regcache *regcache
837 = get_thread_regcache (current_thread, 1);
838 low_set_pc (regcache, sw_breakpoint_pc);
839 }
840
841 /* Update this so we record the correct stop PC below. */
842 pc = sw_breakpoint_pc;
843 }
844 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
845 threads_debug_printf
846 ("%s stopped by hardware breakpoint",
847 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
848 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
849 threads_debug_printf
850 ("%s stopped by hardware watchpoint",
851 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
852 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
853 threads_debug_printf
854 ("%s stopped by trace",
855 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
856
857 lwp->stop_pc = pc;
858 return true;
859 }
860
861 lwp_info *
862 linux_process_target::add_lwp (ptid_t ptid)
863 {
864 lwp_info *lwp = new lwp_info;
865
866 lwp->thread = add_thread (ptid, lwp);
867
868 low_new_thread (lwp);
869
870 return lwp;
871 }
872
873 void
874 linux_process_target::low_new_thread (lwp_info *info)
875 {
876 /* Nop. */
877 }
878
879 /* Callback to be used when calling fork_inferior, responsible for
880 actually initiating the tracing of the inferior. */
881
882 static void
883 linux_ptrace_fun ()
884 {
885 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
886 (PTRACE_TYPE_ARG4) 0) < 0)
887 trace_start_error_with_name ("ptrace");
888
889 if (setpgid (0, 0) < 0)
890 trace_start_error_with_name ("setpgid");
891
892 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
893 stdout to stderr so that inferior i/o doesn't corrupt the connection.
894 Also, redirect stdin to /dev/null. */
895 if (remote_connection_is_stdio ())
896 {
897 if (close (0) < 0)
898 trace_start_error_with_name ("close");
899 if (open ("/dev/null", O_RDONLY) < 0)
900 trace_start_error_with_name ("open");
901 if (dup2 (2, 1) < 0)
902 trace_start_error_with_name ("dup2");
903 if (write (2, "stdin/stdout redirected\n",
904 sizeof ("stdin/stdout redirected\n") - 1) < 0)
905 {
906 /* Errors ignored. */;
907 }
908 }
909 }
910
911 /* Start an inferior process and returns its pid.
912 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
913 are its arguments. */
914
915 int
916 linux_process_target::create_inferior (const char *program,
917 const std::vector<char *> &program_args)
918 {
919 client_state &cs = get_client_state ();
920 struct lwp_info *new_lwp;
921 int pid;
922 ptid_t ptid;
923
924 {
925 maybe_disable_address_space_randomization restore_personality
926 (cs.disable_randomization);
927 std::string str_program_args = construct_inferior_arguments (program_args);
928
929 pid = fork_inferior (program,
930 str_program_args.c_str (),
931 get_environ ()->envp (), linux_ptrace_fun,
932 NULL, NULL, NULL, NULL);
933 }
934
935 add_linux_process (pid, 0);
936
937 ptid = ptid_t (pid, pid);
938 new_lwp = add_lwp (ptid);
939 new_lwp->must_set_ptrace_flags = 1;
940
941 post_fork_inferior (pid, program);
942
943 return pid;
944 }
945
946 /* Implement the post_create_inferior target_ops method. */
947
948 void
949 linux_process_target::post_create_inferior ()
950 {
951 struct lwp_info *lwp = get_thread_lwp (current_thread);
952
953 low_arch_setup ();
954
955 if (lwp->must_set_ptrace_flags)
956 {
957 struct process_info *proc = current_process ();
958 int options = linux_low_ptrace_options (proc->attached);
959
960 linux_enable_event_reporting (lwpid_of (current_thread), options);
961 lwp->must_set_ptrace_flags = 0;
962 }
963 }
964
965 int
966 linux_process_target::attach_lwp (ptid_t ptid)
967 {
968 struct lwp_info *new_lwp;
969 int lwpid = ptid.lwp ();
970
971 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
972 != 0)
973 return errno;
974
975 new_lwp = add_lwp (ptid);
976
977 /* We need to wait for SIGSTOP before being able to make the next
978 ptrace call on this LWP. */
979 new_lwp->must_set_ptrace_flags = 1;
980
981 if (linux_proc_pid_is_stopped (lwpid))
982 {
983 threads_debug_printf ("Attached to a stopped process");
984
985 /* The process is definitely stopped. It is in a job control
986 stop, unless the kernel predates the TASK_STOPPED /
987 TASK_TRACED distinction, in which case it might be in a
988 ptrace stop. Make sure it is in a ptrace stop; from there we
989 can kill it, signal it, et cetera.
990
991 First make sure there is a pending SIGSTOP. Since we are
992 already attached, the process can not transition from stopped
993 to running without a PTRACE_CONT; so we know this signal will
994 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
995 probably already in the queue (unless this kernel is old
996 enough to use TASK_STOPPED for ptrace stops); but since
997 SIGSTOP is not an RT signal, it can only be queued once. */
998 kill_lwp (lwpid, SIGSTOP);
999
1000 /* Finally, resume the stopped process. This will deliver the
1001 SIGSTOP (or a higher priority signal, just like normal
1002 PTRACE_ATTACH), which we'll catch later on. */
1003 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1004 }
1005
1006 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1007 brings it to a halt.
1008
1009 There are several cases to consider here:
1010
1011 1) gdbserver has already attached to the process and is being notified
1012 of a new thread that is being created.
1013 In this case we should ignore that SIGSTOP and resume the
1014 process. This is handled below by setting stop_expected = 1,
1015 and the fact that add_thread sets last_resume_kind ==
1016 resume_continue.
1017
1018 2) This is the first thread (the process thread), and we're attaching
1019 to it via attach_inferior.
1020 In this case we want the process thread to stop.
1021 This is handled by having linux_attach set last_resume_kind ==
1022 resume_stop after we return.
1023
1024 If the pid we are attaching to is also the tgid, we attach to and
1025 stop all the existing threads. Otherwise, we attach to pid and
1026 ignore any other threads in the same group as this pid.
1027
1028 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1029 existing threads.
1030 In this case we want the thread to stop.
1031 FIXME: This case is currently not properly handled.
1032 We should wait for the SIGSTOP but don't. Things work apparently
1033 because enough time passes between when we ptrace (ATTACH) and when
1034 gdb makes the next ptrace call on the thread.
1035
1036 On the other hand, if we are currently trying to stop all threads, we
1037 should treat the new thread as if we had sent it a SIGSTOP. This works
1038 because we are guaranteed that the add_lwp call above added us to the
1039 end of the list, and so the new thread has not yet reached
1040 wait_for_sigstop (but will). */
1041 new_lwp->stop_expected = 1;
1042
1043 return 0;
1044 }
1045
1046 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1047 already attached. Returns true if a new LWP is found, false
1048 otherwise. */
1049
1050 static int
1051 attach_proc_task_lwp_callback (ptid_t ptid)
1052 {
1053 /* Is this a new thread? */
1054 if (find_thread_ptid (ptid) == NULL)
1055 {
1056 int lwpid = ptid.lwp ();
1057 int err;
1058
1059 threads_debug_printf ("Found new lwp %d", lwpid);
1060
1061 err = the_linux_target->attach_lwp (ptid);
1062
1063 /* Be quiet if we simply raced with the thread exiting. EPERM
1064 is returned if the thread's task still exists, and is marked
1065 as exited or zombie, as well as other conditions, so in that
1066 case, confirm the status in /proc/PID/status. */
1067 if (err == ESRCH
1068 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1069 threads_debug_printf
1070 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1071 lwpid, err, safe_strerror (err));
1072 else if (err != 0)
1073 {
1074 std::string reason
1075 = linux_ptrace_attach_fail_reason_string (ptid, err);
1076
1077 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1078 }
1079
1080 return 1;
1081 }
1082 return 0;
1083 }
1084
1085 static void async_file_mark (void);
1086
1087 /* Attach to PID. If PID is the tgid, attach to it and all
1088 of its threads. */
1089
1090 int
1091 linux_process_target::attach (unsigned long pid)
1092 {
1093 struct process_info *proc;
1094 struct thread_info *initial_thread;
1095 ptid_t ptid = ptid_t (pid, pid);
1096 int err;
1097
1098 proc = add_linux_process (pid, 1);
1099
1100 /* Attach to PID. We will check for other threads
1101 soon. */
1102 err = attach_lwp (ptid);
1103 if (err != 0)
1104 {
1105 remove_process (proc);
1106
1107 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1108 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1109 }
1110
1111 /* Don't ignore the initial SIGSTOP if we just attached to this
1112 process. It will be collected by wait shortly. */
1113 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1114 initial_thread->last_resume_kind = resume_stop;
1115
1116 /* We must attach to every LWP. If /proc is mounted, use that to
1117 find them now. On the one hand, the inferior may be using raw
1118 clone instead of using pthreads. On the other hand, even if it
1119 is using pthreads, GDB may not be connected yet (thread_db needs
1120 to do symbol lookups, through qSymbol). Also, thread_db walks
1121 structures in the inferior's address space to find the list of
1122 threads/LWPs, and those structures may well be corrupted. Note
1123 that once thread_db is loaded, we'll still use it to list threads
1124 and associate pthread info with each LWP. */
1125 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1126
1127 /* GDB will shortly read the xml target description for this
1128 process, to figure out the process' architecture. But the target
1129 description is only filled in when the first process/thread in
1130 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1131 that now, otherwise, if GDB is fast enough, it could read the
1132 target description _before_ that initial stop. */
1133 if (non_stop)
1134 {
1135 struct lwp_info *lwp;
1136 int wstat, lwpid;
1137 ptid_t pid_ptid = ptid_t (pid);
1138
1139 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1140 gdb_assert (lwpid > 0);
1141
1142 lwp = find_lwp_pid (ptid_t (lwpid));
1143
1144 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1145 {
1146 lwp->status_pending_p = 1;
1147 lwp->status_pending = wstat;
1148 }
1149
1150 initial_thread->last_resume_kind = resume_continue;
1151
1152 async_file_mark ();
1153
1154 gdb_assert (proc->tdesc != NULL);
1155 }
1156
1157 return 0;
1158 }
1159
1160 static int
1161 last_thread_of_process_p (int pid)
1162 {
1163 bool seen_one = false;
1164
1165 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1166 {
1167 if (!seen_one)
1168 {
1169 /* This is the first thread of this process we see. */
1170 seen_one = true;
1171 return false;
1172 }
1173 else
1174 {
1175 /* This is the second thread of this process we see. */
1176 return true;
1177 }
1178 });
1179
1180 return thread == NULL;
1181 }
1182
1183 /* Kill LWP. */
1184
1185 static void
1186 linux_kill_one_lwp (struct lwp_info *lwp)
1187 {
1188 struct thread_info *thr = get_lwp_thread (lwp);
1189 int pid = lwpid_of (thr);
1190
1191 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1192 there is no signal context, and ptrace(PTRACE_KILL) (or
1193 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1194 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1195 alternative is to kill with SIGKILL. We only need one SIGKILL
1196 per process, not one for each thread. But since we still support
1197 support debugging programs using raw clone without CLONE_THREAD,
1198 we send one for each thread. For years, we used PTRACE_KILL
1199 only, so we're being a bit paranoid about some old kernels where
1200 PTRACE_KILL might work better (dubious if there are any such, but
1201 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1202 second, and so we're fine everywhere. */
1203
1204 errno = 0;
1205 kill_lwp (pid, SIGKILL);
1206 if (debug_threads)
1207 {
1208 int save_errno = errno;
1209
1210 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1211 target_pid_to_str (ptid_of (thr)).c_str (),
1212 save_errno ? safe_strerror (save_errno) : "OK");
1213 }
1214
1215 errno = 0;
1216 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1217 if (debug_threads)
1218 {
1219 int save_errno = errno;
1220
1221 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1222 target_pid_to_str (ptid_of (thr)).c_str (),
1223 save_errno ? safe_strerror (save_errno) : "OK");
1224 }
1225 }
1226
1227 /* Kill LWP and wait for it to die. */
1228
1229 static void
1230 kill_wait_lwp (struct lwp_info *lwp)
1231 {
1232 struct thread_info *thr = get_lwp_thread (lwp);
1233 int pid = ptid_of (thr).pid ();
1234 int lwpid = ptid_of (thr).lwp ();
1235 int wstat;
1236 int res;
1237
1238 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1239
1240 do
1241 {
1242 linux_kill_one_lwp (lwp);
1243
1244 /* Make sure it died. Notes:
1245
1246 - The loop is most likely unnecessary.
1247
1248 - We don't use wait_for_event as that could delete lwps
1249 while we're iterating over them. We're not interested in
1250 any pending status at this point, only in making sure all
1251 wait status on the kernel side are collected until the
1252 process is reaped.
1253
1254 - We don't use __WALL here as the __WALL emulation relies on
1255 SIGCHLD, and killing a stopped process doesn't generate
1256 one, nor an exit status.
1257 */
1258 res = my_waitpid (lwpid, &wstat, 0);
1259 if (res == -1 && errno == ECHILD)
1260 res = my_waitpid (lwpid, &wstat, __WCLONE);
1261 } while (res > 0 && WIFSTOPPED (wstat));
1262
1263 /* Even if it was stopped, the child may have already disappeared.
1264 E.g., if it was killed by SIGKILL. */
1265 if (res < 0 && errno != ECHILD)
1266 perror_with_name ("kill_wait_lwp");
1267 }
1268
1269 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1270 except the leader. */
1271
1272 static void
1273 kill_one_lwp_callback (thread_info *thread, int pid)
1274 {
1275 struct lwp_info *lwp = get_thread_lwp (thread);
1276
1277 /* We avoid killing the first thread here, because of a Linux kernel (at
1278 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1279 the children get a chance to be reaped, it will remain a zombie
1280 forever. */
1281
1282 if (lwpid_of (thread) == pid)
1283 {
1284 threads_debug_printf ("is last of process %s",
1285 target_pid_to_str (thread->id).c_str ());
1286 return;
1287 }
1288
1289 kill_wait_lwp (lwp);
1290 }
1291
1292 int
1293 linux_process_target::kill (process_info *process)
1294 {
1295 int pid = process->pid;
1296
1297 /* If we're killing a running inferior, make sure it is stopped
1298 first, as PTRACE_KILL will not work otherwise. */
1299 stop_all_lwps (0, NULL);
1300
1301 for_each_thread (pid, [&] (thread_info *thread)
1302 {
1303 kill_one_lwp_callback (thread, pid);
1304 });
1305
1306 /* See the comment in linux_kill_one_lwp. We did not kill the first
1307 thread in the list, so do so now. */
1308 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1309
1310 if (lwp == NULL)
1311 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1312 else
1313 kill_wait_lwp (lwp);
1314
1315 mourn (process);
1316
1317 /* Since we presently can only stop all lwps of all processes, we
1318 need to unstop lwps of other processes. */
1319 unstop_all_lwps (0, NULL);
1320 return 0;
1321 }
1322
1323 /* Get pending signal of THREAD, for detaching purposes. This is the
1324 signal the thread last stopped for, which we need to deliver to the
1325 thread when detaching, otherwise, it'd be suppressed/lost. */
1326
1327 static int
1328 get_detach_signal (struct thread_info *thread)
1329 {
1330 client_state &cs = get_client_state ();
1331 enum gdb_signal signo = GDB_SIGNAL_0;
1332 int status;
1333 struct lwp_info *lp = get_thread_lwp (thread);
1334
1335 if (lp->status_pending_p)
1336 status = lp->status_pending;
1337 else
1338 {
1339 /* If the thread had been suspended by gdbserver, and it stopped
1340 cleanly, then it'll have stopped with SIGSTOP. But we don't
1341 want to deliver that SIGSTOP. */
1342 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1343 || thread->last_status.sig () == GDB_SIGNAL_0)
1344 return 0;
1345
1346 /* Otherwise, we may need to deliver the signal we
1347 intercepted. */
1348 status = lp->last_status;
1349 }
1350
1351 if (!WIFSTOPPED (status))
1352 {
1353 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1354 target_pid_to_str (ptid_of (thread)).c_str ());
1355 return 0;
1356 }
1357
1358 /* Extended wait statuses aren't real SIGTRAPs. */
1359 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1360 {
1361 threads_debug_printf ("lwp %s had stopped with extended "
1362 "status: no pending signal",
1363 target_pid_to_str (ptid_of (thread)).c_str ());
1364 return 0;
1365 }
1366
1367 signo = gdb_signal_from_host (WSTOPSIG (status));
1368
1369 if (cs.program_signals_p && !cs.program_signals[signo])
1370 {
1371 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1372 target_pid_to_str (ptid_of (thread)).c_str (),
1373 gdb_signal_to_string (signo));
1374 return 0;
1375 }
1376 else if (!cs.program_signals_p
1377 /* If we have no way to know which signals GDB does not
1378 want to have passed to the program, assume
1379 SIGTRAP/SIGINT, which is GDB's default. */
1380 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1381 {
1382 threads_debug_printf ("lwp %s had signal %s, "
1383 "but we don't know if we should pass it. "
1384 "Default to not.",
1385 target_pid_to_str (ptid_of (thread)).c_str (),
1386 gdb_signal_to_string (signo));
1387 return 0;
1388 }
1389 else
1390 {
1391 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1392 target_pid_to_str (ptid_of (thread)).c_str (),
1393 gdb_signal_to_string (signo));
1394
1395 return WSTOPSIG (status);
1396 }
1397 }
1398
1399 void
1400 linux_process_target::detach_one_lwp (lwp_info *lwp)
1401 {
1402 struct thread_info *thread = get_lwp_thread (lwp);
1403 int sig;
1404 int lwpid;
1405
1406 /* If there is a pending SIGSTOP, get rid of it. */
1407 if (lwp->stop_expected)
1408 {
1409 threads_debug_printf ("Sending SIGCONT to %s",
1410 target_pid_to_str (ptid_of (thread)).c_str ());
1411
1412 kill_lwp (lwpid_of (thread), SIGCONT);
1413 lwp->stop_expected = 0;
1414 }
1415
1416 /* Pass on any pending signal for this thread. */
1417 sig = get_detach_signal (thread);
1418
1419 /* Preparing to resume may try to write registers, and fail if the
1420 lwp is zombie. If that happens, ignore the error. We'll handle
1421 it below, when detach fails with ESRCH. */
1422 try
1423 {
1424 /* Flush any pending changes to the process's registers. */
1425 regcache_invalidate_thread (thread);
1426
1427 /* Finally, let it resume. */
1428 low_prepare_to_resume (lwp);
1429 }
1430 catch (const gdb_exception_error &ex)
1431 {
1432 if (!check_ptrace_stopped_lwp_gone (lwp))
1433 throw;
1434 }
1435
1436 lwpid = lwpid_of (thread);
1437 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1438 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1439 {
1440 int save_errno = errno;
1441
1442 /* We know the thread exists, so ESRCH must mean the lwp is
1443 zombie. This can happen if one of the already-detached
1444 threads exits the whole thread group. In that case we're
1445 still attached, and must reap the lwp. */
1446 if (save_errno == ESRCH)
1447 {
1448 int ret, status;
1449
1450 ret = my_waitpid (lwpid, &status, __WALL);
1451 if (ret == -1)
1452 {
1453 warning (_("Couldn't reap LWP %d while detaching: %s"),
1454 lwpid, safe_strerror (errno));
1455 }
1456 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1457 {
1458 warning (_("Reaping LWP %d while detaching "
1459 "returned unexpected status 0x%x"),
1460 lwpid, status);
1461 }
1462 }
1463 else
1464 {
1465 error (_("Can't detach %s: %s"),
1466 target_pid_to_str (ptid_of (thread)).c_str (),
1467 safe_strerror (save_errno));
1468 }
1469 }
1470 else
1471 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1472 target_pid_to_str (ptid_of (thread)).c_str (),
1473 strsignal (sig));
1474
1475 delete_lwp (lwp);
1476 }
1477
1478 int
1479 linux_process_target::detach (process_info *process)
1480 {
1481 struct lwp_info *main_lwp;
1482
1483 /* As there's a step over already in progress, let it finish first,
1484 otherwise nesting a stabilize_threads operation on top gets real
1485 messy. */
1486 complete_ongoing_step_over ();
1487
1488 /* Stop all threads before detaching. First, ptrace requires that
1489 the thread is stopped to successfully detach. Second, thread_db
1490 may need to uninstall thread event breakpoints from memory, which
1491 only works with a stopped process anyway. */
1492 stop_all_lwps (0, NULL);
1493
1494 #ifdef USE_THREAD_DB
1495 thread_db_detach (process);
1496 #endif
1497
1498 /* Stabilize threads (move out of jump pads). */
1499 target_stabilize_threads ();
1500
1501 /* Detach from the clone lwps first. If the thread group exits just
1502 while we're detaching, we must reap the clone lwps before we're
1503 able to reap the leader. */
1504 for_each_thread (process->pid, [this] (thread_info *thread)
1505 {
1506 /* We don't actually detach from the thread group leader just yet.
1507 If the thread group exits, we must reap the zombie clone lwps
1508 before we're able to reap the leader. */
1509 if (thread->id.pid () == thread->id.lwp ())
1510 return;
1511
1512 lwp_info *lwp = get_thread_lwp (thread);
1513 detach_one_lwp (lwp);
1514 });
1515
1516 main_lwp = find_lwp_pid (ptid_t (process->pid));
1517 detach_one_lwp (main_lwp);
1518
1519 mourn (process);
1520
1521 /* Since we presently can only stop all lwps of all processes, we
1522 need to unstop lwps of other processes. */
1523 unstop_all_lwps (0, NULL);
1524 return 0;
1525 }
1526
1527 /* Remove all LWPs that belong to process PROC from the lwp list. */
1528
1529 void
1530 linux_process_target::mourn (process_info *process)
1531 {
1532 struct process_info_private *priv;
1533
1534 #ifdef USE_THREAD_DB
1535 thread_db_mourn (process);
1536 #endif
1537
1538 for_each_thread (process->pid, [this] (thread_info *thread)
1539 {
1540 delete_lwp (get_thread_lwp (thread));
1541 });
1542
1543 /* Freeing all private data. */
1544 priv = process->priv;
1545 low_delete_process (priv->arch_private);
1546 free (priv);
1547 process->priv = NULL;
1548
1549 remove_process (process);
1550 }
1551
1552 void
1553 linux_process_target::join (int pid)
1554 {
1555 int status, ret;
1556
1557 do {
1558 ret = my_waitpid (pid, &status, 0);
1559 if (WIFEXITED (status) || WIFSIGNALED (status))
1560 break;
1561 } while (ret != -1 || errno != ECHILD);
1562 }
1563
1564 /* Return true if the given thread is still alive. */
1565
1566 bool
1567 linux_process_target::thread_alive (ptid_t ptid)
1568 {
1569 struct lwp_info *lwp = find_lwp_pid (ptid);
1570
1571 /* We assume we always know if a thread exits. If a whole process
1572 exited but we still haven't been able to report it to GDB, we'll
1573 hold on to the last lwp of the dead process. */
1574 if (lwp != NULL)
1575 return !lwp_is_marked_dead (lwp);
1576 else
1577 return 0;
1578 }
1579
1580 bool
1581 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1582 {
1583 struct lwp_info *lp = get_thread_lwp (thread);
1584
1585 if (!lp->status_pending_p)
1586 return 0;
1587
1588 if (thread->last_resume_kind != resume_stop
1589 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1590 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1591 {
1592 CORE_ADDR pc;
1593 int discard = 0;
1594
1595 gdb_assert (lp->last_status != 0);
1596
1597 pc = get_pc (lp);
1598
1599 scoped_restore_current_thread restore_thread;
1600 switch_to_thread (thread);
1601
1602 if (pc != lp->stop_pc)
1603 {
1604 threads_debug_printf ("PC of %ld changed",
1605 lwpid_of (thread));
1606 discard = 1;
1607 }
1608
1609 #if !USE_SIGTRAP_SIGINFO
1610 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1611 && !low_breakpoint_at (pc))
1612 {
1613 threads_debug_printf ("previous SW breakpoint of %ld gone",
1614 lwpid_of (thread));
1615 discard = 1;
1616 }
1617 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1618 && !hardware_breakpoint_inserted_here (pc))
1619 {
1620 threads_debug_printf ("previous HW breakpoint of %ld gone",
1621 lwpid_of (thread));
1622 discard = 1;
1623 }
1624 #endif
1625
1626 if (discard)
1627 {
1628 threads_debug_printf ("discarding pending breakpoint status");
1629 lp->status_pending_p = 0;
1630 return 0;
1631 }
1632 }
1633
1634 return 1;
1635 }
1636
1637 /* Returns true if LWP is resumed from the client's perspective. */
1638
1639 static int
1640 lwp_resumed (struct lwp_info *lwp)
1641 {
1642 struct thread_info *thread = get_lwp_thread (lwp);
1643
1644 if (thread->last_resume_kind != resume_stop)
1645 return 1;
1646
1647 /* Did gdb send us a `vCont;t', but we haven't reported the
1648 corresponding stop to gdb yet? If so, the thread is still
1649 resumed/running from gdb's perspective. */
1650 if (thread->last_resume_kind == resume_stop
1651 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1652 return 1;
1653
1654 return 0;
1655 }
1656
1657 bool
1658 linux_process_target::status_pending_p_callback (thread_info *thread,
1659 ptid_t ptid)
1660 {
1661 struct lwp_info *lp = get_thread_lwp (thread);
1662
1663 /* Check if we're only interested in events from a specific process
1664 or a specific LWP. */
1665 if (!thread->id.matches (ptid))
1666 return 0;
1667
1668 if (!lwp_resumed (lp))
1669 return 0;
1670
1671 if (lp->status_pending_p
1672 && !thread_still_has_status_pending (thread))
1673 {
1674 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1675 return 0;
1676 }
1677
1678 return lp->status_pending_p;
1679 }
1680
1681 struct lwp_info *
1682 find_lwp_pid (ptid_t ptid)
1683 {
1684 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1685 {
1686 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1687 return thr_arg->id.lwp () == lwp;
1688 });
1689
1690 if (thread == NULL)
1691 return NULL;
1692
1693 return get_thread_lwp (thread);
1694 }
1695
1696 /* Return the number of known LWPs in the tgid given by PID. */
1697
1698 static int
1699 num_lwps (int pid)
1700 {
1701 int count = 0;
1702
1703 for_each_thread (pid, [&] (thread_info *thread)
1704 {
1705 count++;
1706 });
1707
1708 return count;
1709 }
1710
1711 /* See nat/linux-nat.h. */
1712
1713 struct lwp_info *
1714 iterate_over_lwps (ptid_t filter,
1715 gdb::function_view<iterate_over_lwps_ftype> callback)
1716 {
1717 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1718 {
1719 lwp_info *lwp = get_thread_lwp (thr_arg);
1720
1721 return callback (lwp);
1722 });
1723
1724 if (thread == NULL)
1725 return NULL;
1726
1727 return get_thread_lwp (thread);
1728 }
1729
1730 void
1731 linux_process_target::check_zombie_leaders ()
1732 {
1733 for_each_process ([this] (process_info *proc)
1734 {
1735 pid_t leader_pid = pid_of (proc);
1736 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1737
1738 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1739 "num_lwps=%d, zombie=%d",
1740 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1741 linux_proc_pid_is_zombie (leader_pid));
1742
1743 if (leader_lp != NULL && !leader_lp->stopped
1744 /* Check if there are other threads in the group, as we may
1745 have raced with the inferior simply exiting. Note this
1746 isn't a watertight check. If the inferior is
1747 multi-threaded and is exiting, it may be we see the
1748 leader as zombie before we reap all the non-leader
1749 threads. See comments below. */
1750 && !last_thread_of_process_p (leader_pid)
1751 && linux_proc_pid_is_zombie (leader_pid))
1752 {
1753 /* A zombie leader in a multi-threaded program can mean one
1754 of three things:
1755
1756 #1 - Only the leader exited, not the whole program, e.g.,
1757 with pthread_exit. Since we can't reap the leader's exit
1758 status until all other threads are gone and reaped too,
1759 we want to delete the zombie leader right away, as it
1760 can't be debugged, we can't read its registers, etc.
1761 This is the main reason we check for zombie leaders
1762 disappearing.
1763
1764 #2 - The whole thread-group/process exited (a group exit,
1765 via e.g. exit(3), and there is (or will be shortly) an
1766 exit reported for each thread in the process, and then
1767 finally an exit for the leader once the non-leaders are
1768 reaped.
1769
1770 #3 - There are 3 or more threads in the group, and a
1771 thread other than the leader exec'd. See comments on
1772 exec events at the top of the file.
1773
1774 Ideally we would never delete the leader for case #2.
1775 Instead, we want to collect the exit status of each
1776 non-leader thread, and then finally collect the exit
1777 status of the leader as normal and use its exit code as
1778 whole-process exit code. Unfortunately, there's no
1779 race-free way to distinguish cases #1 and #2. We can't
1780 assume the exit events for the non-leaders threads are
1781 already pending in the kernel, nor can we assume the
1782 non-leader threads are in zombie state already. Between
1783 the leader becoming zombie and the non-leaders exiting
1784 and becoming zombie themselves, there's a small time
1785 window, so such a check would be racy. Temporarily
1786 pausing all threads and checking to see if all threads
1787 exit or not before re-resuming them would work in the
1788 case that all threads are running right now, but it
1789 wouldn't work if some thread is currently already
1790 ptrace-stopped, e.g., due to scheduler-locking.
1791
1792 So what we do is we delete the leader anyhow, and then
1793 later on when we see its exit status, we re-add it back.
1794 We also make sure that we only report a whole-process
1795 exit when we see the leader exiting, as opposed to when
1796 the last LWP in the LWP list exits, which can be a
1797 non-leader if we deleted the leader here. */
1798 threads_debug_printf ("Thread group leader %d zombie "
1799 "(it exited, or another thread execd), "
1800 "deleting it.",
1801 leader_pid);
1802 delete_lwp (leader_lp);
1803 }
1804 });
1805 }
1806
1807 /* Callback for `find_thread'. Returns the first LWP that is not
1808 stopped. */
1809
1810 static bool
1811 not_stopped_callback (thread_info *thread, ptid_t filter)
1812 {
1813 if (!thread->id.matches (filter))
1814 return false;
1815
1816 lwp_info *lwp = get_thread_lwp (thread);
1817
1818 return !lwp->stopped;
1819 }
1820
1821 /* Increment LWP's suspend count. */
1822
1823 static void
1824 lwp_suspended_inc (struct lwp_info *lwp)
1825 {
1826 lwp->suspended++;
1827
1828 if (lwp->suspended > 4)
1829 threads_debug_printf
1830 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1831 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1832 }
1833
1834 /* Decrement LWP's suspend count. */
1835
1836 static void
1837 lwp_suspended_decr (struct lwp_info *lwp)
1838 {
1839 lwp->suspended--;
1840
1841 if (lwp->suspended < 0)
1842 {
1843 struct thread_info *thread = get_lwp_thread (lwp);
1844
1845 internal_error (__FILE__, __LINE__,
1846 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1847 lwp->suspended);
1848 }
1849 }
1850
1851 /* This function should only be called if the LWP got a SIGTRAP.
1852
1853 Handle any tracepoint steps or hits. Return true if a tracepoint
1854 event was handled, 0 otherwise. */
1855
1856 static int
1857 handle_tracepoints (struct lwp_info *lwp)
1858 {
1859 struct thread_info *tinfo = get_lwp_thread (lwp);
1860 int tpoint_related_event = 0;
1861
1862 gdb_assert (lwp->suspended == 0);
1863
1864 /* If this tracepoint hit causes a tracing stop, we'll immediately
1865 uninsert tracepoints. To do this, we temporarily pause all
1866 threads, unpatch away, and then unpause threads. We need to make
1867 sure the unpausing doesn't resume LWP too. */
1868 lwp_suspended_inc (lwp);
1869
1870 /* And we need to be sure that any all-threads-stopping doesn't try
1871 to move threads out of the jump pads, as it could deadlock the
1872 inferior (LWP could be in the jump pad, maybe even holding the
1873 lock.) */
1874
1875 /* Do any necessary step collect actions. */
1876 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1877
1878 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1879
1880 /* See if we just hit a tracepoint and do its main collect
1881 actions. */
1882 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1883
1884 lwp_suspended_decr (lwp);
1885
1886 gdb_assert (lwp->suspended == 0);
1887 gdb_assert (!stabilizing_threads
1888 || (lwp->collecting_fast_tracepoint
1889 != fast_tpoint_collect_result::not_collecting));
1890
1891 if (tpoint_related_event)
1892 {
1893 threads_debug_printf ("got a tracepoint event");
1894 return 1;
1895 }
1896
1897 return 0;
1898 }
1899
1900 fast_tpoint_collect_result
1901 linux_process_target::linux_fast_tracepoint_collecting
1902 (lwp_info *lwp, fast_tpoint_collect_status *status)
1903 {
1904 CORE_ADDR thread_area;
1905 struct thread_info *thread = get_lwp_thread (lwp);
1906
1907 /* Get the thread area address. This is used to recognize which
1908 thread is which when tracing with the in-process agent library.
1909 We don't read anything from the address, and treat it as opaque;
1910 it's the address itself that we assume is unique per-thread. */
1911 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1912 return fast_tpoint_collect_result::not_collecting;
1913
1914 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1915 }
1916
1917 int
1918 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1919 {
1920 return -1;
1921 }
1922
1923 bool
1924 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1925 {
1926 scoped_restore_current_thread restore_thread;
1927 switch_to_thread (get_lwp_thread (lwp));
1928
1929 if ((wstat == NULL
1930 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1931 && supports_fast_tracepoints ()
1932 && agent_loaded_p ())
1933 {
1934 struct fast_tpoint_collect_status status;
1935
1936 threads_debug_printf
1937 ("Checking whether LWP %ld needs to move out of the jump pad.",
1938 lwpid_of (current_thread));
1939
1940 fast_tpoint_collect_result r
1941 = linux_fast_tracepoint_collecting (lwp, &status);
1942
1943 if (wstat == NULL
1944 || (WSTOPSIG (*wstat) != SIGILL
1945 && WSTOPSIG (*wstat) != SIGFPE
1946 && WSTOPSIG (*wstat) != SIGSEGV
1947 && WSTOPSIG (*wstat) != SIGBUS))
1948 {
1949 lwp->collecting_fast_tracepoint = r;
1950
1951 if (r != fast_tpoint_collect_result::not_collecting)
1952 {
1953 if (r == fast_tpoint_collect_result::before_insn
1954 && lwp->exit_jump_pad_bkpt == NULL)
1955 {
1956 /* Haven't executed the original instruction yet.
1957 Set breakpoint there, and wait till it's hit,
1958 then single-step until exiting the jump pad. */
1959 lwp->exit_jump_pad_bkpt
1960 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1961 }
1962
1963 threads_debug_printf
1964 ("Checking whether LWP %ld needs to move out of the jump pad..."
1965 " it does", lwpid_of (current_thread));
1966
1967 return true;
1968 }
1969 }
1970 else
1971 {
1972 /* If we get a synchronous signal while collecting, *and*
1973 while executing the (relocated) original instruction,
1974 reset the PC to point at the tpoint address, before
1975 reporting to GDB. Otherwise, it's an IPA lib bug: just
1976 report the signal to GDB, and pray for the best. */
1977
1978 lwp->collecting_fast_tracepoint
1979 = fast_tpoint_collect_result::not_collecting;
1980
1981 if (r != fast_tpoint_collect_result::not_collecting
1982 && (status.adjusted_insn_addr <= lwp->stop_pc
1983 && lwp->stop_pc < status.adjusted_insn_addr_end))
1984 {
1985 siginfo_t info;
1986 struct regcache *regcache;
1987
1988 /* The si_addr on a few signals references the address
1989 of the faulting instruction. Adjust that as
1990 well. */
1991 if ((WSTOPSIG (*wstat) == SIGILL
1992 || WSTOPSIG (*wstat) == SIGFPE
1993 || WSTOPSIG (*wstat) == SIGBUS
1994 || WSTOPSIG (*wstat) == SIGSEGV)
1995 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1996 (PTRACE_TYPE_ARG3) 0, &info) == 0
1997 /* Final check just to make sure we don't clobber
1998 the siginfo of non-kernel-sent signals. */
1999 && (uintptr_t) info.si_addr == lwp->stop_pc)
2000 {
2001 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2002 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2003 (PTRACE_TYPE_ARG3) 0, &info);
2004 }
2005
2006 regcache = get_thread_regcache (current_thread, 1);
2007 low_set_pc (regcache, status.tpoint_addr);
2008 lwp->stop_pc = status.tpoint_addr;
2009
2010 /* Cancel any fast tracepoint lock this thread was
2011 holding. */
2012 force_unlock_trace_buffer ();
2013 }
2014
2015 if (lwp->exit_jump_pad_bkpt != NULL)
2016 {
2017 threads_debug_printf
2018 ("Cancelling fast exit-jump-pad: removing bkpt."
2019 "stopping all threads momentarily.");
2020
2021 stop_all_lwps (1, lwp);
2022
2023 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2024 lwp->exit_jump_pad_bkpt = NULL;
2025
2026 unstop_all_lwps (1, lwp);
2027
2028 gdb_assert (lwp->suspended >= 0);
2029 }
2030 }
2031 }
2032
2033 threads_debug_printf
2034 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2035 lwpid_of (current_thread));
2036
2037 return false;
2038 }
2039
2040 /* Enqueue one signal in the "signals to report later when out of the
2041 jump pad" list. */
2042
2043 static void
2044 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2045 {
2046 struct thread_info *thread = get_lwp_thread (lwp);
2047
2048 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2049 WSTOPSIG (*wstat), lwpid_of (thread));
2050
2051 if (debug_threads)
2052 {
2053 for (const auto &sig : lwp->pending_signals_to_report)
2054 threads_debug_printf (" Already queued %d", sig.signal);
2055
2056 threads_debug_printf (" (no more currently queued signals)");
2057 }
2058
2059 /* Don't enqueue non-RT signals if they are already in the deferred
2060 queue. (SIGSTOP being the easiest signal to see ending up here
2061 twice) */
2062 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2063 {
2064 for (const auto &sig : lwp->pending_signals_to_report)
2065 {
2066 if (sig.signal == WSTOPSIG (*wstat))
2067 {
2068 threads_debug_printf
2069 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2070 sig.signal, lwpid_of (thread));
2071 return;
2072 }
2073 }
2074 }
2075
2076 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2077
2078 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2079 &lwp->pending_signals_to_report.back ().info);
2080 }
2081
2082 /* Dequeue one signal from the "signals to report later when out of
2083 the jump pad" list. */
2084
2085 static int
2086 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2087 {
2088 struct thread_info *thread = get_lwp_thread (lwp);
2089
2090 if (!lwp->pending_signals_to_report.empty ())
2091 {
2092 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2093
2094 *wstat = W_STOPCODE (p_sig.signal);
2095 if (p_sig.info.si_signo != 0)
2096 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2097 &p_sig.info);
2098
2099 lwp->pending_signals_to_report.pop_front ();
2100
2101 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2102 WSTOPSIG (*wstat), lwpid_of (thread));
2103
2104 if (debug_threads)
2105 {
2106 for (const auto &sig : lwp->pending_signals_to_report)
2107 threads_debug_printf (" Still queued %d", sig.signal);
2108
2109 threads_debug_printf (" (no more queued signals)");
2110 }
2111
2112 return 1;
2113 }
2114
2115 return 0;
2116 }
2117
2118 bool
2119 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2120 {
2121 scoped_restore_current_thread restore_thread;
2122 switch_to_thread (get_lwp_thread (child));
2123
2124 if (low_stopped_by_watchpoint ())
2125 {
2126 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2127 child->stopped_data_address = low_stopped_data_address ();
2128 }
2129
2130 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2131 }
2132
2133 bool
2134 linux_process_target::low_stopped_by_watchpoint ()
2135 {
2136 return false;
2137 }
2138
2139 CORE_ADDR
2140 linux_process_target::low_stopped_data_address ()
2141 {
2142 return 0;
2143 }
2144
2145 /* Return the ptrace options that we want to try to enable. */
2146
2147 static int
2148 linux_low_ptrace_options (int attached)
2149 {
2150 client_state &cs = get_client_state ();
2151 int options = 0;
2152
2153 if (!attached)
2154 options |= PTRACE_O_EXITKILL;
2155
2156 if (cs.report_fork_events)
2157 options |= PTRACE_O_TRACEFORK;
2158
2159 if (cs.report_vfork_events)
2160 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2161
2162 if (cs.report_exec_events)
2163 options |= PTRACE_O_TRACEEXEC;
2164
2165 options |= PTRACE_O_TRACESYSGOOD;
2166
2167 return options;
2168 }
2169
2170 void
2171 linux_process_target::filter_event (int lwpid, int wstat)
2172 {
2173 client_state &cs = get_client_state ();
2174 struct lwp_info *child;
2175 struct thread_info *thread;
2176 int have_stop_pc = 0;
2177
2178 child = find_lwp_pid (ptid_t (lwpid));
2179
2180 /* Check for events reported by anything not in our LWP list. */
2181 if (child == nullptr)
2182 {
2183 if (WIFSTOPPED (wstat))
2184 {
2185 if (WSTOPSIG (wstat) == SIGTRAP
2186 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2187 {
2188 /* A non-leader thread exec'ed after we've seen the
2189 leader zombie, and removed it from our lists (in
2190 check_zombie_leaders). The non-leader thread changes
2191 its tid to the tgid. */
2192 threads_debug_printf
2193 ("Re-adding thread group leader LWP %d after exec.",
2194 lwpid);
2195
2196 child = add_lwp (ptid_t (lwpid, lwpid));
2197 child->stopped = 1;
2198 switch_to_thread (child->thread);
2199 }
2200 else
2201 {
2202 /* A process we are controlling has forked and the new
2203 child's stop was reported to us by the kernel. Save
2204 its PID and go back to waiting for the fork event to
2205 be reported - the stopped process might be returned
2206 from waitpid before or after the fork event is. */
2207 threads_debug_printf
2208 ("Saving LWP %d status %s in stopped_pids list",
2209 lwpid, status_to_str (wstat).c_str ());
2210 add_to_pid_list (&stopped_pids, lwpid, wstat);
2211 }
2212 }
2213 else
2214 {
2215 /* Don't report an event for the exit of an LWP not in our
2216 list, i.e. not part of any inferior we're debugging.
2217 This can happen if we detach from a program we originally
2218 forked and then it exits. However, note that we may have
2219 earlier deleted a leader of an inferior we're debugging,
2220 in check_zombie_leaders. Re-add it back here if so. */
2221 find_process ([&] (process_info *proc)
2222 {
2223 if (proc->pid == lwpid)
2224 {
2225 threads_debug_printf
2226 ("Re-adding thread group leader LWP %d after exit.",
2227 lwpid);
2228
2229 child = add_lwp (ptid_t (lwpid, lwpid));
2230 return true;
2231 }
2232 return false;
2233 });
2234 }
2235
2236 if (child == nullptr)
2237 return;
2238 }
2239
2240 thread = get_lwp_thread (child);
2241
2242 child->stopped = 1;
2243
2244 child->last_status = wstat;
2245
2246 /* Check if the thread has exited. */
2247 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2248 {
2249 threads_debug_printf ("%d exited", lwpid);
2250
2251 if (finish_step_over (child))
2252 {
2253 /* Unsuspend all other LWPs, and set them back running again. */
2254 unsuspend_all_lwps (child);
2255 }
2256
2257 /* If this is not the leader LWP, then the exit signal was not
2258 the end of the debugged application and should be ignored,
2259 unless GDB wants to hear about thread exits. */
2260 if (cs.report_thread_events || is_leader (thread))
2261 {
2262 /* Since events are serialized to GDB core, and we can't
2263 report this one right now. Leave the status pending for
2264 the next time we're able to report it. */
2265 mark_lwp_dead (child, wstat);
2266 return;
2267 }
2268 else
2269 {
2270 delete_lwp (child);
2271 return;
2272 }
2273 }
2274
2275 gdb_assert (WIFSTOPPED (wstat));
2276
2277 if (WIFSTOPPED (wstat))
2278 {
2279 struct process_info *proc;
2280
2281 /* Architecture-specific setup after inferior is running. */
2282 proc = find_process_pid (pid_of (thread));
2283 if (proc->tdesc == NULL)
2284 {
2285 if (proc->attached)
2286 {
2287 /* This needs to happen after we have attached to the
2288 inferior and it is stopped for the first time, but
2289 before we access any inferior registers. */
2290 arch_setup_thread (thread);
2291 }
2292 else
2293 {
2294 /* The process is started, but GDBserver will do
2295 architecture-specific setup after the program stops at
2296 the first instruction. */
2297 child->status_pending_p = 1;
2298 child->status_pending = wstat;
2299 return;
2300 }
2301 }
2302 }
2303
2304 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2305 {
2306 struct process_info *proc = find_process_pid (pid_of (thread));
2307 int options = linux_low_ptrace_options (proc->attached);
2308
2309 linux_enable_event_reporting (lwpid, options);
2310 child->must_set_ptrace_flags = 0;
2311 }
2312
2313 /* Always update syscall_state, even if it will be filtered later. */
2314 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2315 {
2316 child->syscall_state
2317 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2318 ? TARGET_WAITKIND_SYSCALL_RETURN
2319 : TARGET_WAITKIND_SYSCALL_ENTRY);
2320 }
2321 else
2322 {
2323 /* Almost all other ptrace-stops are known to be outside of system
2324 calls, with further exceptions in handle_extended_wait. */
2325 child->syscall_state = TARGET_WAITKIND_IGNORE;
2326 }
2327
2328 /* Be careful to not overwrite stop_pc until save_stop_reason is
2329 called. */
2330 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2331 && linux_is_extended_waitstatus (wstat))
2332 {
2333 child->stop_pc = get_pc (child);
2334 if (handle_extended_wait (&child, wstat))
2335 {
2336 /* The event has been handled, so just return without
2337 reporting it. */
2338 return;
2339 }
2340 }
2341
2342 if (linux_wstatus_maybe_breakpoint (wstat))
2343 {
2344 if (save_stop_reason (child))
2345 have_stop_pc = 1;
2346 }
2347
2348 if (!have_stop_pc)
2349 child->stop_pc = get_pc (child);
2350
2351 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2352 && child->stop_expected)
2353 {
2354 threads_debug_printf ("Expected stop.");
2355
2356 child->stop_expected = 0;
2357
2358 if (thread->last_resume_kind == resume_stop)
2359 {
2360 /* We want to report the stop to the core. Treat the
2361 SIGSTOP as a normal event. */
2362 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2363 target_pid_to_str (ptid_of (thread)).c_str ());
2364 }
2365 else if (stopping_threads != NOT_STOPPING_THREADS)
2366 {
2367 /* Stopping threads. We don't want this SIGSTOP to end up
2368 pending. */
2369 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2370 target_pid_to_str (ptid_of (thread)).c_str ());
2371 return;
2372 }
2373 else
2374 {
2375 /* This is a delayed SIGSTOP. Filter out the event. */
2376 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2377 child->stepping ? "step" : "continue",
2378 target_pid_to_str (ptid_of (thread)).c_str ());
2379
2380 resume_one_lwp (child, child->stepping, 0, NULL);
2381 return;
2382 }
2383 }
2384
2385 child->status_pending_p = 1;
2386 child->status_pending = wstat;
2387 return;
2388 }
2389
2390 bool
2391 linux_process_target::maybe_hw_step (thread_info *thread)
2392 {
2393 if (supports_hardware_single_step ())
2394 return true;
2395 else
2396 {
2397 /* GDBserver must insert single-step breakpoint for software
2398 single step. */
2399 gdb_assert (has_single_step_breakpoints (thread));
2400 return false;
2401 }
2402 }
2403
2404 void
2405 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2406 {
2407 struct lwp_info *lp = get_thread_lwp (thread);
2408
2409 if (lp->stopped
2410 && !lp->suspended
2411 && !lp->status_pending_p
2412 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2413 {
2414 int step = 0;
2415
2416 if (thread->last_resume_kind == resume_step)
2417 step = maybe_hw_step (thread);
2418
2419 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2420 target_pid_to_str (ptid_of (thread)).c_str (),
2421 paddress (lp->stop_pc), step);
2422
2423 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2424 }
2425 }
2426
2427 int
2428 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2429 ptid_t filter_ptid,
2430 int *wstatp, int options)
2431 {
2432 struct thread_info *event_thread;
2433 struct lwp_info *event_child, *requested_child;
2434 sigset_t block_mask, prev_mask;
2435
2436 retry:
2437 /* N.B. event_thread points to the thread_info struct that contains
2438 event_child. Keep them in sync. */
2439 event_thread = NULL;
2440 event_child = NULL;
2441 requested_child = NULL;
2442
2443 /* Check for a lwp with a pending status. */
2444
2445 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2446 {
2447 event_thread = find_thread_in_random ([&] (thread_info *thread)
2448 {
2449 return status_pending_p_callback (thread, filter_ptid);
2450 });
2451
2452 if (event_thread != NULL)
2453 {
2454 event_child = get_thread_lwp (event_thread);
2455 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2456 }
2457 }
2458 else if (filter_ptid != null_ptid)
2459 {
2460 requested_child = find_lwp_pid (filter_ptid);
2461
2462 if (stopping_threads == NOT_STOPPING_THREADS
2463 && requested_child->status_pending_p
2464 && (requested_child->collecting_fast_tracepoint
2465 != fast_tpoint_collect_result::not_collecting))
2466 {
2467 enqueue_one_deferred_signal (requested_child,
2468 &requested_child->status_pending);
2469 requested_child->status_pending_p = 0;
2470 requested_child->status_pending = 0;
2471 resume_one_lwp (requested_child, 0, 0, NULL);
2472 }
2473
2474 if (requested_child->suspended
2475 && requested_child->status_pending_p)
2476 {
2477 internal_error (__FILE__, __LINE__,
2478 "requesting an event out of a"
2479 " suspended child?");
2480 }
2481
2482 if (requested_child->status_pending_p)
2483 {
2484 event_child = requested_child;
2485 event_thread = get_lwp_thread (event_child);
2486 }
2487 }
2488
2489 if (event_child != NULL)
2490 {
2491 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2492 lwpid_of (event_thread),
2493 event_child->status_pending);
2494
2495 *wstatp = event_child->status_pending;
2496 event_child->status_pending_p = 0;
2497 event_child->status_pending = 0;
2498 switch_to_thread (event_thread);
2499 return lwpid_of (event_thread);
2500 }
2501
2502 /* But if we don't find a pending event, we'll have to wait.
2503
2504 We only enter this loop if no process has a pending wait status.
2505 Thus any action taken in response to a wait status inside this
2506 loop is responding as soon as we detect the status, not after any
2507 pending events. */
2508
2509 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2510 all signals while here. */
2511 sigfillset (&block_mask);
2512 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2513
2514 /* Always pull all events out of the kernel. We'll randomly select
2515 an event LWP out of all that have events, to prevent
2516 starvation. */
2517 while (event_child == NULL)
2518 {
2519 pid_t ret = 0;
2520
2521 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2522 quirks:
2523
2524 - If the thread group leader exits while other threads in the
2525 thread group still exist, waitpid(TGID, ...) hangs. That
2526 waitpid won't return an exit status until the other threads
2527 in the group are reaped.
2528
2529 - When a non-leader thread execs, that thread just vanishes
2530 without reporting an exit (so we'd hang if we waited for it
2531 explicitly in that case). The exec event is reported to
2532 the TGID pid. */
2533 errno = 0;
2534 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2535
2536 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2537 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2538
2539 if (ret > 0)
2540 {
2541 threads_debug_printf ("waitpid %ld received %s",
2542 (long) ret, status_to_str (*wstatp).c_str ());
2543
2544 /* Filter all events. IOW, leave all events pending. We'll
2545 randomly select an event LWP out of all that have events
2546 below. */
2547 filter_event (ret, *wstatp);
2548 /* Retry until nothing comes out of waitpid. A single
2549 SIGCHLD can indicate more than one child stopped. */
2550 continue;
2551 }
2552
2553 /* Now that we've pulled all events out of the kernel, resume
2554 LWPs that don't have an interesting event to report. */
2555 if (stopping_threads == NOT_STOPPING_THREADS)
2556 for_each_thread ([this] (thread_info *thread)
2557 {
2558 resume_stopped_resumed_lwps (thread);
2559 });
2560
2561 /* ... and find an LWP with a status to report to the core, if
2562 any. */
2563 event_thread = find_thread_in_random ([&] (thread_info *thread)
2564 {
2565 return status_pending_p_callback (thread, filter_ptid);
2566 });
2567
2568 if (event_thread != NULL)
2569 {
2570 event_child = get_thread_lwp (event_thread);
2571 *wstatp = event_child->status_pending;
2572 event_child->status_pending_p = 0;
2573 event_child->status_pending = 0;
2574 break;
2575 }
2576
2577 /* Check for zombie thread group leaders. Those can't be reaped
2578 until all other threads in the thread group are. */
2579 check_zombie_leaders ();
2580
2581 auto not_stopped = [&] (thread_info *thread)
2582 {
2583 return not_stopped_callback (thread, wait_ptid);
2584 };
2585
2586 /* If there are no resumed children left in the set of LWPs we
2587 want to wait for, bail. We can't just block in
2588 waitpid/sigsuspend, because lwps might have been left stopped
2589 in trace-stop state, and we'd be stuck forever waiting for
2590 their status to change (which would only happen if we resumed
2591 them). Even if WNOHANG is set, this return code is preferred
2592 over 0 (below), as it is more detailed. */
2593 if (find_thread (not_stopped) == NULL)
2594 {
2595 threads_debug_printf ("exit (no unwaited-for LWP)");
2596
2597 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2598 return -1;
2599 }
2600
2601 /* No interesting event to report to the caller. */
2602 if ((options & WNOHANG))
2603 {
2604 threads_debug_printf ("WNOHANG set, no event found");
2605
2606 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2607 return 0;
2608 }
2609
2610 /* Block until we get an event reported with SIGCHLD. */
2611 threads_debug_printf ("sigsuspend'ing");
2612
2613 sigsuspend (&prev_mask);
2614 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2615 goto retry;
2616 }
2617
2618 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2619
2620 switch_to_thread (event_thread);
2621
2622 return lwpid_of (event_thread);
2623 }
2624
2625 int
2626 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2627 {
2628 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2629 }
2630
2631 /* Select one LWP out of those that have events pending. */
2632
2633 static void
2634 select_event_lwp (struct lwp_info **orig_lp)
2635 {
2636 struct thread_info *event_thread = NULL;
2637
2638 /* In all-stop, give preference to the LWP that is being
2639 single-stepped. There will be at most one, and it's the LWP that
2640 the core is most interested in. If we didn't do this, then we'd
2641 have to handle pending step SIGTRAPs somehow in case the core
2642 later continues the previously-stepped thread, otherwise we'd
2643 report the pending SIGTRAP, and the core, not having stepped the
2644 thread, wouldn't understand what the trap was for, and therefore
2645 would report it to the user as a random signal. */
2646 if (!non_stop)
2647 {
2648 event_thread = find_thread ([] (thread_info *thread)
2649 {
2650 lwp_info *lp = get_thread_lwp (thread);
2651
2652 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2653 && thread->last_resume_kind == resume_step
2654 && lp->status_pending_p);
2655 });
2656
2657 if (event_thread != NULL)
2658 threads_debug_printf
2659 ("Select single-step %s",
2660 target_pid_to_str (ptid_of (event_thread)).c_str ());
2661 }
2662 if (event_thread == NULL)
2663 {
2664 /* No single-stepping LWP. Select one at random, out of those
2665 which have had events. */
2666
2667 event_thread = find_thread_in_random ([&] (thread_info *thread)
2668 {
2669 lwp_info *lp = get_thread_lwp (thread);
2670
2671 /* Only resumed LWPs that have an event pending. */
2672 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2673 && lp->status_pending_p);
2674 });
2675 }
2676
2677 if (event_thread != NULL)
2678 {
2679 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2680
2681 /* Switch the event LWP. */
2682 *orig_lp = event_lp;
2683 }
2684 }
2685
2686 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2687 NULL. */
2688
2689 static void
2690 unsuspend_all_lwps (struct lwp_info *except)
2691 {
2692 for_each_thread ([&] (thread_info *thread)
2693 {
2694 lwp_info *lwp = get_thread_lwp (thread);
2695
2696 if (lwp != except)
2697 lwp_suspended_decr (lwp);
2698 });
2699 }
2700
2701 static bool lwp_running (thread_info *thread);
2702
2703 /* Stabilize threads (move out of jump pads).
2704
2705 If a thread is midway collecting a fast tracepoint, we need to
2706 finish the collection and move it out of the jump pad before
2707 reporting the signal.
2708
2709 This avoids recursion while collecting (when a signal arrives
2710 midway, and the signal handler itself collects), which would trash
2711 the trace buffer. In case the user set a breakpoint in a signal
2712 handler, this avoids the backtrace showing the jump pad, etc..
2713 Most importantly, there are certain things we can't do safely if
2714 threads are stopped in a jump pad (or in its callee's). For
2715 example:
2716
2717 - starting a new trace run. A thread still collecting the
2718 previous run, could trash the trace buffer when resumed. The trace
2719 buffer control structures would have been reset but the thread had
2720 no way to tell. The thread could even midway memcpy'ing to the
2721 buffer, which would mean that when resumed, it would clobber the
2722 trace buffer that had been set for a new run.
2723
2724 - we can't rewrite/reuse the jump pads for new tracepoints
2725 safely. Say you do tstart while a thread is stopped midway while
2726 collecting. When the thread is later resumed, it finishes the
2727 collection, and returns to the jump pad, to execute the original
2728 instruction that was under the tracepoint jump at the time the
2729 older run had been started. If the jump pad had been rewritten
2730 since for something else in the new run, the thread would now
2731 execute the wrong / random instructions. */
2732
2733 void
2734 linux_process_target::stabilize_threads ()
2735 {
2736 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2737 {
2738 return stuck_in_jump_pad (thread);
2739 });
2740
2741 if (thread_stuck != NULL)
2742 {
2743 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2744 lwpid_of (thread_stuck));
2745 return;
2746 }
2747
2748 scoped_restore_current_thread restore_thread;
2749
2750 stabilizing_threads = 1;
2751
2752 /* Kick 'em all. */
2753 for_each_thread ([this] (thread_info *thread)
2754 {
2755 move_out_of_jump_pad (thread);
2756 });
2757
2758 /* Loop until all are stopped out of the jump pads. */
2759 while (find_thread (lwp_running) != NULL)
2760 {
2761 struct target_waitstatus ourstatus;
2762 struct lwp_info *lwp;
2763 int wstat;
2764
2765 /* Note that we go through the full wait even loop. While
2766 moving threads out of jump pad, we need to be able to step
2767 over internal breakpoints and such. */
2768 wait_1 (minus_one_ptid, &ourstatus, 0);
2769
2770 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2771 {
2772 lwp = get_thread_lwp (current_thread);
2773
2774 /* Lock it. */
2775 lwp_suspended_inc (lwp);
2776
2777 if (ourstatus.sig () != GDB_SIGNAL_0
2778 || current_thread->last_resume_kind == resume_stop)
2779 {
2780 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2781 enqueue_one_deferred_signal (lwp, &wstat);
2782 }
2783 }
2784 }
2785
2786 unsuspend_all_lwps (NULL);
2787
2788 stabilizing_threads = 0;
2789
2790 if (debug_threads)
2791 {
2792 thread_stuck = find_thread ([this] (thread_info *thread)
2793 {
2794 return stuck_in_jump_pad (thread);
2795 });
2796
2797 if (thread_stuck != NULL)
2798 threads_debug_printf
2799 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2800 lwpid_of (thread_stuck));
2801 }
2802 }
2803
2804 /* Convenience function that is called when the kernel reports an
2805 event that is not passed out to GDB. */
2806
2807 static ptid_t
2808 ignore_event (struct target_waitstatus *ourstatus)
2809 {
2810 /* If we got an event, there may still be others, as a single
2811 SIGCHLD can indicate more than one child stopped. This forces
2812 another target_wait call. */
2813 async_file_mark ();
2814
2815 ourstatus->set_ignore ();
2816 return null_ptid;
2817 }
2818
2819 ptid_t
2820 linux_process_target::filter_exit_event (lwp_info *event_child,
2821 target_waitstatus *ourstatus)
2822 {
2823 client_state &cs = get_client_state ();
2824 struct thread_info *thread = get_lwp_thread (event_child);
2825 ptid_t ptid = ptid_of (thread);
2826
2827 if (!is_leader (thread))
2828 {
2829 if (cs.report_thread_events)
2830 ourstatus->set_thread_exited (0);
2831 else
2832 ourstatus->set_ignore ();
2833
2834 delete_lwp (event_child);
2835 }
2836 return ptid;
2837 }
2838
2839 /* Returns 1 if GDB is interested in any event_child syscalls. */
2840
2841 static int
2842 gdb_catching_syscalls_p (struct lwp_info *event_child)
2843 {
2844 struct thread_info *thread = get_lwp_thread (event_child);
2845 struct process_info *proc = get_thread_process (thread);
2846
2847 return !proc->syscalls_to_catch.empty ();
2848 }
2849
2850 bool
2851 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2852 {
2853 int sysno;
2854 struct thread_info *thread = get_lwp_thread (event_child);
2855 struct process_info *proc = get_thread_process (thread);
2856
2857 if (proc->syscalls_to_catch.empty ())
2858 return false;
2859
2860 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2861 return true;
2862
2863 get_syscall_trapinfo (event_child, &sysno);
2864
2865 for (int iter : proc->syscalls_to_catch)
2866 if (iter == sysno)
2867 return true;
2868
2869 return false;
2870 }
2871
2872 ptid_t
2873 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2874 target_wait_flags target_options)
2875 {
2876 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2877
2878 client_state &cs = get_client_state ();
2879 int w;
2880 struct lwp_info *event_child;
2881 int options;
2882 int pid;
2883 int step_over_finished;
2884 int bp_explains_trap;
2885 int maybe_internal_trap;
2886 int report_to_gdb;
2887 int trace_event;
2888 int in_step_range;
2889 int any_resumed;
2890
2891 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2892
2893 /* Translate generic target options into linux options. */
2894 options = __WALL;
2895 if (target_options & TARGET_WNOHANG)
2896 options |= WNOHANG;
2897
2898 bp_explains_trap = 0;
2899 trace_event = 0;
2900 in_step_range = 0;
2901 ourstatus->set_ignore ();
2902
2903 auto status_pending_p_any = [&] (thread_info *thread)
2904 {
2905 return status_pending_p_callback (thread, minus_one_ptid);
2906 };
2907
2908 auto not_stopped = [&] (thread_info *thread)
2909 {
2910 return not_stopped_callback (thread, minus_one_ptid);
2911 };
2912
2913 /* Find a resumed LWP, if any. */
2914 if (find_thread (status_pending_p_any) != NULL)
2915 any_resumed = 1;
2916 else if (find_thread (not_stopped) != NULL)
2917 any_resumed = 1;
2918 else
2919 any_resumed = 0;
2920
2921 if (step_over_bkpt == null_ptid)
2922 pid = wait_for_event (ptid, &w, options);
2923 else
2924 {
2925 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2926 target_pid_to_str (step_over_bkpt).c_str ());
2927 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2928 }
2929
2930 if (pid == 0 || (pid == -1 && !any_resumed))
2931 {
2932 gdb_assert (target_options & TARGET_WNOHANG);
2933
2934 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
2935
2936 ourstatus->set_ignore ();
2937 return null_ptid;
2938 }
2939 else if (pid == -1)
2940 {
2941 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
2942
2943 ourstatus->set_no_resumed ();
2944 return null_ptid;
2945 }
2946
2947 event_child = get_thread_lwp (current_thread);
2948
2949 /* wait_for_event only returns an exit status for the last
2950 child of a process. Report it. */
2951 if (WIFEXITED (w) || WIFSIGNALED (w))
2952 {
2953 if (WIFEXITED (w))
2954 {
2955 ourstatus->set_exited (WEXITSTATUS (w));
2956
2957 threads_debug_printf
2958 ("ret = %s, exited with retcode %d",
2959 target_pid_to_str (ptid_of (current_thread)).c_str (),
2960 WEXITSTATUS (w));
2961 }
2962 else
2963 {
2964 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
2965
2966 threads_debug_printf
2967 ("ret = %s, terminated with signal %d",
2968 target_pid_to_str (ptid_of (current_thread)).c_str (),
2969 WTERMSIG (w));
2970 }
2971
2972 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
2973 return filter_exit_event (event_child, ourstatus);
2974
2975 return ptid_of (current_thread);
2976 }
2977
2978 /* If step-over executes a breakpoint instruction, in the case of a
2979 hardware single step it means a gdb/gdbserver breakpoint had been
2980 planted on top of a permanent breakpoint, in the case of a software
2981 single step it may just mean that gdbserver hit the reinsert breakpoint.
2982 The PC has been adjusted by save_stop_reason to point at
2983 the breakpoint address.
2984 So in the case of the hardware single step advance the PC manually
2985 past the breakpoint and in the case of software single step advance only
2986 if it's not the single_step_breakpoint we are hitting.
2987 This avoids that a program would keep trapping a permanent breakpoint
2988 forever. */
2989 if (step_over_bkpt != null_ptid
2990 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2991 && (event_child->stepping
2992 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
2993 {
2994 int increment_pc = 0;
2995 int breakpoint_kind = 0;
2996 CORE_ADDR stop_pc = event_child->stop_pc;
2997
2998 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
2999 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3000
3001 threads_debug_printf
3002 ("step-over for %s executed software breakpoint",
3003 target_pid_to_str (ptid_of (current_thread)).c_str ());
3004
3005 if (increment_pc != 0)
3006 {
3007 struct regcache *regcache
3008 = get_thread_regcache (current_thread, 1);
3009
3010 event_child->stop_pc += increment_pc;
3011 low_set_pc (regcache, event_child->stop_pc);
3012
3013 if (!low_breakpoint_at (event_child->stop_pc))
3014 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3015 }
3016 }
3017
3018 /* If this event was not handled before, and is not a SIGTRAP, we
3019 report it. SIGILL and SIGSEGV are also treated as traps in case
3020 a breakpoint is inserted at the current PC. If this target does
3021 not support internal breakpoints at all, we also report the
3022 SIGTRAP without further processing; it's of no concern to us. */
3023 maybe_internal_trap
3024 = (low_supports_breakpoints ()
3025 && (WSTOPSIG (w) == SIGTRAP
3026 || ((WSTOPSIG (w) == SIGILL
3027 || WSTOPSIG (w) == SIGSEGV)
3028 && low_breakpoint_at (event_child->stop_pc))));
3029
3030 if (maybe_internal_trap)
3031 {
3032 /* Handle anything that requires bookkeeping before deciding to
3033 report the event or continue waiting. */
3034
3035 /* First check if we can explain the SIGTRAP with an internal
3036 breakpoint, or if we should possibly report the event to GDB.
3037 Do this before anything that may remove or insert a
3038 breakpoint. */
3039 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3040
3041 /* We have a SIGTRAP, possibly a step-over dance has just
3042 finished. If so, tweak the state machine accordingly,
3043 reinsert breakpoints and delete any single-step
3044 breakpoints. */
3045 step_over_finished = finish_step_over (event_child);
3046
3047 /* Now invoke the callbacks of any internal breakpoints there. */
3048 check_breakpoints (event_child->stop_pc);
3049
3050 /* Handle tracepoint data collecting. This may overflow the
3051 trace buffer, and cause a tracing stop, removing
3052 breakpoints. */
3053 trace_event = handle_tracepoints (event_child);
3054
3055 if (bp_explains_trap)
3056 threads_debug_printf ("Hit a gdbserver breakpoint.");
3057 }
3058 else
3059 {
3060 /* We have some other signal, possibly a step-over dance was in
3061 progress, and it should be cancelled too. */
3062 step_over_finished = finish_step_over (event_child);
3063 }
3064
3065 /* We have all the data we need. Either report the event to GDB, or
3066 resume threads and keep waiting for more. */
3067
3068 /* If we're collecting a fast tracepoint, finish the collection and
3069 move out of the jump pad before delivering a signal. See
3070 linux_stabilize_threads. */
3071
3072 if (WIFSTOPPED (w)
3073 && WSTOPSIG (w) != SIGTRAP
3074 && supports_fast_tracepoints ()
3075 && agent_loaded_p ())
3076 {
3077 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3078 "to defer or adjust it.",
3079 WSTOPSIG (w), lwpid_of (current_thread));
3080
3081 /* Allow debugging the jump pad itself. */
3082 if (current_thread->last_resume_kind != resume_step
3083 && maybe_move_out_of_jump_pad (event_child, &w))
3084 {
3085 enqueue_one_deferred_signal (event_child, &w);
3086
3087 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3088 WSTOPSIG (w), lwpid_of (current_thread));
3089
3090 resume_one_lwp (event_child, 0, 0, NULL);
3091
3092 return ignore_event (ourstatus);
3093 }
3094 }
3095
3096 if (event_child->collecting_fast_tracepoint
3097 != fast_tpoint_collect_result::not_collecting)
3098 {
3099 threads_debug_printf
3100 ("LWP %ld was trying to move out of the jump pad (%d). "
3101 "Check if we're already there.",
3102 lwpid_of (current_thread),
3103 (int) event_child->collecting_fast_tracepoint);
3104
3105 trace_event = 1;
3106
3107 event_child->collecting_fast_tracepoint
3108 = linux_fast_tracepoint_collecting (event_child, NULL);
3109
3110 if (event_child->collecting_fast_tracepoint
3111 != fast_tpoint_collect_result::before_insn)
3112 {
3113 /* No longer need this breakpoint. */
3114 if (event_child->exit_jump_pad_bkpt != NULL)
3115 {
3116 threads_debug_printf
3117 ("No longer need exit-jump-pad bkpt; removing it."
3118 "stopping all threads momentarily.");
3119
3120 /* Other running threads could hit this breakpoint.
3121 We don't handle moribund locations like GDB does,
3122 instead we always pause all threads when removing
3123 breakpoints, so that any step-over or
3124 decr_pc_after_break adjustment is always taken
3125 care of while the breakpoint is still
3126 inserted. */
3127 stop_all_lwps (1, event_child);
3128
3129 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3130 event_child->exit_jump_pad_bkpt = NULL;
3131
3132 unstop_all_lwps (1, event_child);
3133
3134 gdb_assert (event_child->suspended >= 0);
3135 }
3136 }
3137
3138 if (event_child->collecting_fast_tracepoint
3139 == fast_tpoint_collect_result::not_collecting)
3140 {
3141 threads_debug_printf
3142 ("fast tracepoint finished collecting successfully.");
3143
3144 /* We may have a deferred signal to report. */
3145 if (dequeue_one_deferred_signal (event_child, &w))
3146 threads_debug_printf ("dequeued one signal.");
3147 else
3148 {
3149 threads_debug_printf ("no deferred signals.");
3150
3151 if (stabilizing_threads)
3152 {
3153 ourstatus->set_stopped (GDB_SIGNAL_0);
3154
3155 threads_debug_printf
3156 ("ret = %s, stopped while stabilizing threads",
3157 target_pid_to_str (ptid_of (current_thread)).c_str ());
3158
3159 return ptid_of (current_thread);
3160 }
3161 }
3162 }
3163 }
3164
3165 /* Check whether GDB would be interested in this event. */
3166
3167 /* Check if GDB is interested in this syscall. */
3168 if (WIFSTOPPED (w)
3169 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3170 && !gdb_catch_this_syscall (event_child))
3171 {
3172 threads_debug_printf ("Ignored syscall for LWP %ld.",
3173 lwpid_of (current_thread));
3174
3175 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3176
3177 return ignore_event (ourstatus);
3178 }
3179
3180 /* If GDB is not interested in this signal, don't stop other
3181 threads, and don't report it to GDB. Just resume the inferior
3182 right away. We do this for threading-related signals as well as
3183 any that GDB specifically requested we ignore. But never ignore
3184 SIGSTOP if we sent it ourselves, and do not ignore signals when
3185 stepping - they may require special handling to skip the signal
3186 handler. Also never ignore signals that could be caused by a
3187 breakpoint. */
3188 if (WIFSTOPPED (w)
3189 && current_thread->last_resume_kind != resume_step
3190 && (
3191 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3192 (current_process ()->priv->thread_db != NULL
3193 && (WSTOPSIG (w) == __SIGRTMIN
3194 || WSTOPSIG (w) == __SIGRTMIN + 1))
3195 ||
3196 #endif
3197 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3198 && !(WSTOPSIG (w) == SIGSTOP
3199 && current_thread->last_resume_kind == resume_stop)
3200 && !linux_wstatus_maybe_breakpoint (w))))
3201 {
3202 siginfo_t info, *info_p;
3203
3204 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3205 WSTOPSIG (w), lwpid_of (current_thread));
3206
3207 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3208 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3209 info_p = &info;
3210 else
3211 info_p = NULL;
3212
3213 if (step_over_finished)
3214 {
3215 /* We cancelled this thread's step-over above. We still
3216 need to unsuspend all other LWPs, and set them back
3217 running again while the signal handler runs. */
3218 unsuspend_all_lwps (event_child);
3219
3220 /* Enqueue the pending signal info so that proceed_all_lwps
3221 doesn't lose it. */
3222 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3223
3224 proceed_all_lwps ();
3225 }
3226 else
3227 {
3228 resume_one_lwp (event_child, event_child->stepping,
3229 WSTOPSIG (w), info_p);
3230 }
3231
3232 return ignore_event (ourstatus);
3233 }
3234
3235 /* Note that all addresses are always "out of the step range" when
3236 there's no range to begin with. */
3237 in_step_range = lwp_in_step_range (event_child);
3238
3239 /* If GDB wanted this thread to single step, and the thread is out
3240 of the step range, we always want to report the SIGTRAP, and let
3241 GDB handle it. Watchpoints should always be reported. So should
3242 signals we can't explain. A SIGTRAP we can't explain could be a
3243 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3244 do, we're be able to handle GDB breakpoints on top of internal
3245 breakpoints, by handling the internal breakpoint and still
3246 reporting the event to GDB. If we don't, we're out of luck, GDB
3247 won't see the breakpoint hit. If we see a single-step event but
3248 the thread should be continuing, don't pass the trap to gdb.
3249 That indicates that we had previously finished a single-step but
3250 left the single-step pending -- see
3251 complete_ongoing_step_over. */
3252 report_to_gdb = (!maybe_internal_trap
3253 || (current_thread->last_resume_kind == resume_step
3254 && !in_step_range)
3255 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3256 || (!in_step_range
3257 && !bp_explains_trap
3258 && !trace_event
3259 && !step_over_finished
3260 && !(current_thread->last_resume_kind == resume_continue
3261 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3262 || (gdb_breakpoint_here (event_child->stop_pc)
3263 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3264 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3265 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3266
3267 run_breakpoint_commands (event_child->stop_pc);
3268
3269 /* We found no reason GDB would want us to stop. We either hit one
3270 of our own breakpoints, or finished an internal step GDB
3271 shouldn't know about. */
3272 if (!report_to_gdb)
3273 {
3274 if (bp_explains_trap)
3275 threads_debug_printf ("Hit a gdbserver breakpoint.");
3276
3277 if (step_over_finished)
3278 threads_debug_printf ("Step-over finished.");
3279
3280 if (trace_event)
3281 threads_debug_printf ("Tracepoint event.");
3282
3283 if (lwp_in_step_range (event_child))
3284 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3285 paddress (event_child->stop_pc),
3286 paddress (event_child->step_range_start),
3287 paddress (event_child->step_range_end));
3288
3289 /* We're not reporting this breakpoint to GDB, so apply the
3290 decr_pc_after_break adjustment to the inferior's regcache
3291 ourselves. */
3292
3293 if (low_supports_breakpoints ())
3294 {
3295 struct regcache *regcache
3296 = get_thread_regcache (current_thread, 1);
3297 low_set_pc (regcache, event_child->stop_pc);
3298 }
3299
3300 if (step_over_finished)
3301 {
3302 /* If we have finished stepping over a breakpoint, we've
3303 stopped and suspended all LWPs momentarily except the
3304 stepping one. This is where we resume them all again.
3305 We're going to keep waiting, so use proceed, which
3306 handles stepping over the next breakpoint. */
3307 unsuspend_all_lwps (event_child);
3308 }
3309 else
3310 {
3311 /* Remove the single-step breakpoints if any. Note that
3312 there isn't single-step breakpoint if we finished stepping
3313 over. */
3314 if (supports_software_single_step ()
3315 && has_single_step_breakpoints (current_thread))
3316 {
3317 stop_all_lwps (0, event_child);
3318 delete_single_step_breakpoints (current_thread);
3319 unstop_all_lwps (0, event_child);
3320 }
3321 }
3322
3323 threads_debug_printf ("proceeding all threads.");
3324
3325 proceed_all_lwps ();
3326
3327 return ignore_event (ourstatus);
3328 }
3329
3330 if (debug_threads)
3331 {
3332 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3333 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3334 lwpid_of (get_lwp_thread (event_child)),
3335 event_child->waitstatus.to_string ().c_str ());
3336
3337 if (current_thread->last_resume_kind == resume_step)
3338 {
3339 if (event_child->step_range_start == event_child->step_range_end)
3340 threads_debug_printf
3341 ("GDB wanted to single-step, reporting event.");
3342 else if (!lwp_in_step_range (event_child))
3343 threads_debug_printf ("Out of step range, reporting event.");
3344 }
3345
3346 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3347 threads_debug_printf ("Stopped by watchpoint.");
3348 else if (gdb_breakpoint_here (event_child->stop_pc))
3349 threads_debug_printf ("Stopped by GDB breakpoint.");
3350 }
3351
3352 threads_debug_printf ("Hit a non-gdbserver trap event.");
3353
3354 /* Alright, we're going to report a stop. */
3355
3356 /* Remove single-step breakpoints. */
3357 if (supports_software_single_step ())
3358 {
3359 /* Remove single-step breakpoints or not. It it is true, stop all
3360 lwps, so that other threads won't hit the breakpoint in the
3361 staled memory. */
3362 int remove_single_step_breakpoints_p = 0;
3363
3364 if (non_stop)
3365 {
3366 remove_single_step_breakpoints_p
3367 = has_single_step_breakpoints (current_thread);
3368 }
3369 else
3370 {
3371 /* In all-stop, a stop reply cancels all previous resume
3372 requests. Delete all single-step breakpoints. */
3373
3374 find_thread ([&] (thread_info *thread) {
3375 if (has_single_step_breakpoints (thread))
3376 {
3377 remove_single_step_breakpoints_p = 1;
3378 return true;
3379 }
3380
3381 return false;
3382 });
3383 }
3384
3385 if (remove_single_step_breakpoints_p)
3386 {
3387 /* If we remove single-step breakpoints from memory, stop all lwps,
3388 so that other threads won't hit the breakpoint in the staled
3389 memory. */
3390 stop_all_lwps (0, event_child);
3391
3392 if (non_stop)
3393 {
3394 gdb_assert (has_single_step_breakpoints (current_thread));
3395 delete_single_step_breakpoints (current_thread);
3396 }
3397 else
3398 {
3399 for_each_thread ([] (thread_info *thread){
3400 if (has_single_step_breakpoints (thread))
3401 delete_single_step_breakpoints (thread);
3402 });
3403 }
3404
3405 unstop_all_lwps (0, event_child);
3406 }
3407 }
3408
3409 if (!stabilizing_threads)
3410 {
3411 /* In all-stop, stop all threads. */
3412 if (!non_stop)
3413 stop_all_lwps (0, NULL);
3414
3415 if (step_over_finished)
3416 {
3417 if (!non_stop)
3418 {
3419 /* If we were doing a step-over, all other threads but
3420 the stepping one had been paused in start_step_over,
3421 with their suspend counts incremented. We don't want
3422 to do a full unstop/unpause, because we're in
3423 all-stop mode (so we want threads stopped), but we
3424 still need to unsuspend the other threads, to
3425 decrement their `suspended' count back. */
3426 unsuspend_all_lwps (event_child);
3427 }
3428 else
3429 {
3430 /* If we just finished a step-over, then all threads had
3431 been momentarily paused. In all-stop, that's fine,
3432 we want threads stopped by now anyway. In non-stop,
3433 we need to re-resume threads that GDB wanted to be
3434 running. */
3435 unstop_all_lwps (1, event_child);
3436 }
3437 }
3438
3439 /* If we're not waiting for a specific LWP, choose an event LWP
3440 from among those that have had events. Giving equal priority
3441 to all LWPs that have had events helps prevent
3442 starvation. */
3443 if (ptid == minus_one_ptid)
3444 {
3445 event_child->status_pending_p = 1;
3446 event_child->status_pending = w;
3447
3448 select_event_lwp (&event_child);
3449
3450 /* current_thread and event_child must stay in sync. */
3451 switch_to_thread (get_lwp_thread (event_child));
3452
3453 event_child->status_pending_p = 0;
3454 w = event_child->status_pending;
3455 }
3456
3457
3458 /* Stabilize threads (move out of jump pads). */
3459 if (!non_stop)
3460 target_stabilize_threads ();
3461 }
3462 else
3463 {
3464 /* If we just finished a step-over, then all threads had been
3465 momentarily paused. In all-stop, that's fine, we want
3466 threads stopped by now anyway. In non-stop, we need to
3467 re-resume threads that GDB wanted to be running. */
3468 if (step_over_finished)
3469 unstop_all_lwps (1, event_child);
3470 }
3471
3472 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3473 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3474
3475 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3476 {
3477 /* If the reported event is an exit, fork, vfork or exec, let
3478 GDB know. */
3479
3480 /* Break the unreported fork relationship chain. */
3481 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3482 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3483 {
3484 event_child->fork_relative->fork_relative = NULL;
3485 event_child->fork_relative = NULL;
3486 }
3487
3488 *ourstatus = event_child->waitstatus;
3489 /* Clear the event lwp's waitstatus since we handled it already. */
3490 event_child->waitstatus.set_ignore ();
3491 }
3492 else
3493 {
3494 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3495 event_chid->waitstatus wasn't filled in with the details, so look at
3496 the wait status W. */
3497 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3498 {
3499 int syscall_number;
3500
3501 get_syscall_trapinfo (event_child, &syscall_number);
3502 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3503 ourstatus->set_syscall_entry (syscall_number);
3504 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3505 ourstatus->set_syscall_return (syscall_number);
3506 else
3507 gdb_assert_not_reached ("unexpected syscall state");
3508 }
3509 else if (current_thread->last_resume_kind == resume_stop
3510 && WSTOPSIG (w) == SIGSTOP)
3511 {
3512 /* A thread that has been requested to stop by GDB with vCont;t,
3513 and it stopped cleanly, so report as SIG0. The use of
3514 SIGSTOP is an implementation detail. */
3515 ourstatus->set_stopped (GDB_SIGNAL_0);
3516 }
3517 else
3518 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3519 }
3520
3521 /* Now that we've selected our final event LWP, un-adjust its PC if
3522 it was a software breakpoint, and the client doesn't know we can
3523 adjust the breakpoint ourselves. */
3524 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3525 && !cs.swbreak_feature)
3526 {
3527 int decr_pc = low_decr_pc_after_break ();
3528
3529 if (decr_pc != 0)
3530 {
3531 struct regcache *regcache
3532 = get_thread_regcache (current_thread, 1);
3533 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3534 }
3535 }
3536
3537 gdb_assert (step_over_bkpt == null_ptid);
3538
3539 threads_debug_printf ("ret = %s, %s",
3540 target_pid_to_str (ptid_of (current_thread)).c_str (),
3541 ourstatus->to_string ().c_str ());
3542
3543 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3544 return filter_exit_event (event_child, ourstatus);
3545
3546 return ptid_of (current_thread);
3547 }
3548
3549 /* Get rid of any pending event in the pipe. */
3550 static void
3551 async_file_flush (void)
3552 {
3553 linux_event_pipe.flush ();
3554 }
3555
3556 /* Put something in the pipe, so the event loop wakes up. */
3557 static void
3558 async_file_mark (void)
3559 {
3560 linux_event_pipe.mark ();
3561 }
3562
3563 ptid_t
3564 linux_process_target::wait (ptid_t ptid,
3565 target_waitstatus *ourstatus,
3566 target_wait_flags target_options)
3567 {
3568 ptid_t event_ptid;
3569
3570 /* Flush the async file first. */
3571 if (target_is_async_p ())
3572 async_file_flush ();
3573
3574 do
3575 {
3576 event_ptid = wait_1 (ptid, ourstatus, target_options);
3577 }
3578 while ((target_options & TARGET_WNOHANG) == 0
3579 && event_ptid == null_ptid
3580 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3581
3582 /* If at least one stop was reported, there may be more. A single
3583 SIGCHLD can signal more than one child stop. */
3584 if (target_is_async_p ()
3585 && (target_options & TARGET_WNOHANG) != 0
3586 && event_ptid != null_ptid)
3587 async_file_mark ();
3588
3589 return event_ptid;
3590 }
3591
3592 /* Send a signal to an LWP. */
3593
3594 static int
3595 kill_lwp (unsigned long lwpid, int signo)
3596 {
3597 int ret;
3598
3599 errno = 0;
3600 ret = syscall (__NR_tkill, lwpid, signo);
3601 if (errno == ENOSYS)
3602 {
3603 /* If tkill fails, then we are not using nptl threads, a
3604 configuration we no longer support. */
3605 perror_with_name (("tkill"));
3606 }
3607 return ret;
3608 }
3609
3610 void
3611 linux_stop_lwp (struct lwp_info *lwp)
3612 {
3613 send_sigstop (lwp);
3614 }
3615
3616 static void
3617 send_sigstop (struct lwp_info *lwp)
3618 {
3619 int pid;
3620
3621 pid = lwpid_of (get_lwp_thread (lwp));
3622
3623 /* If we already have a pending stop signal for this process, don't
3624 send another. */
3625 if (lwp->stop_expected)
3626 {
3627 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3628
3629 return;
3630 }
3631
3632 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3633
3634 lwp->stop_expected = 1;
3635 kill_lwp (pid, SIGSTOP);
3636 }
3637
3638 static void
3639 send_sigstop (thread_info *thread, lwp_info *except)
3640 {
3641 struct lwp_info *lwp = get_thread_lwp (thread);
3642
3643 /* Ignore EXCEPT. */
3644 if (lwp == except)
3645 return;
3646
3647 if (lwp->stopped)
3648 return;
3649
3650 send_sigstop (lwp);
3651 }
3652
3653 /* Increment the suspend count of an LWP, and stop it, if not stopped
3654 yet. */
3655 static void
3656 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3657 {
3658 struct lwp_info *lwp = get_thread_lwp (thread);
3659
3660 /* Ignore EXCEPT. */
3661 if (lwp == except)
3662 return;
3663
3664 lwp_suspended_inc (lwp);
3665
3666 send_sigstop (thread, except);
3667 }
3668
3669 static void
3670 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3671 {
3672 /* Store the exit status for later. */
3673 lwp->status_pending_p = 1;
3674 lwp->status_pending = wstat;
3675
3676 /* Store in waitstatus as well, as there's nothing else to process
3677 for this event. */
3678 if (WIFEXITED (wstat))
3679 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3680 else if (WIFSIGNALED (wstat))
3681 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3682
3683 /* Prevent trying to stop it. */
3684 lwp->stopped = 1;
3685
3686 /* No further stops are expected from a dead lwp. */
3687 lwp->stop_expected = 0;
3688 }
3689
3690 /* Return true if LWP has exited already, and has a pending exit event
3691 to report to GDB. */
3692
3693 static int
3694 lwp_is_marked_dead (struct lwp_info *lwp)
3695 {
3696 return (lwp->status_pending_p
3697 && (WIFEXITED (lwp->status_pending)
3698 || WIFSIGNALED (lwp->status_pending)));
3699 }
3700
3701 void
3702 linux_process_target::wait_for_sigstop ()
3703 {
3704 struct thread_info *saved_thread;
3705 ptid_t saved_tid;
3706 int wstat;
3707 int ret;
3708
3709 saved_thread = current_thread;
3710 if (saved_thread != NULL)
3711 saved_tid = saved_thread->id;
3712 else
3713 saved_tid = null_ptid; /* avoid bogus unused warning */
3714
3715 scoped_restore_current_thread restore_thread;
3716
3717 threads_debug_printf ("pulling events");
3718
3719 /* Passing NULL_PTID as filter indicates we want all events to be
3720 left pending. Eventually this returns when there are no
3721 unwaited-for children left. */
3722 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3723 gdb_assert (ret == -1);
3724
3725 if (saved_thread == NULL || mythread_alive (saved_tid))
3726 return;
3727 else
3728 {
3729 threads_debug_printf ("Previously current thread died.");
3730
3731 /* We can't change the current inferior behind GDB's back,
3732 otherwise, a subsequent command may apply to the wrong
3733 process. */
3734 restore_thread.dont_restore ();
3735 switch_to_thread (nullptr);
3736 }
3737 }
3738
3739 bool
3740 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3741 {
3742 struct lwp_info *lwp = get_thread_lwp (thread);
3743
3744 if (lwp->suspended != 0)
3745 {
3746 internal_error (__FILE__, __LINE__,
3747 "LWP %ld is suspended, suspended=%d\n",
3748 lwpid_of (thread), lwp->suspended);
3749 }
3750 gdb_assert (lwp->stopped);
3751
3752 /* Allow debugging the jump pad, gdb_collect, etc.. */
3753 return (supports_fast_tracepoints ()
3754 && agent_loaded_p ()
3755 && (gdb_breakpoint_here (lwp->stop_pc)
3756 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3757 || thread->last_resume_kind == resume_step)
3758 && (linux_fast_tracepoint_collecting (lwp, NULL)
3759 != fast_tpoint_collect_result::not_collecting));
3760 }
3761
3762 void
3763 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3764 {
3765 struct lwp_info *lwp = get_thread_lwp (thread);
3766 int *wstat;
3767
3768 if (lwp->suspended != 0)
3769 {
3770 internal_error (__FILE__, __LINE__,
3771 "LWP %ld is suspended, suspended=%d\n",
3772 lwpid_of (thread), lwp->suspended);
3773 }
3774 gdb_assert (lwp->stopped);
3775
3776 /* For gdb_breakpoint_here. */
3777 scoped_restore_current_thread restore_thread;
3778 switch_to_thread (thread);
3779
3780 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3781
3782 /* Allow debugging the jump pad, gdb_collect, etc. */
3783 if (!gdb_breakpoint_here (lwp->stop_pc)
3784 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3785 && thread->last_resume_kind != resume_step
3786 && maybe_move_out_of_jump_pad (lwp, wstat))
3787 {
3788 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3789 lwpid_of (thread));
3790
3791 if (wstat)
3792 {
3793 lwp->status_pending_p = 0;
3794 enqueue_one_deferred_signal (lwp, wstat);
3795
3796 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3797 WSTOPSIG (*wstat), lwpid_of (thread));
3798 }
3799
3800 resume_one_lwp (lwp, 0, 0, NULL);
3801 }
3802 else
3803 lwp_suspended_inc (lwp);
3804 }
3805
3806 static bool
3807 lwp_running (thread_info *thread)
3808 {
3809 struct lwp_info *lwp = get_thread_lwp (thread);
3810
3811 if (lwp_is_marked_dead (lwp))
3812 return false;
3813
3814 return !lwp->stopped;
3815 }
3816
3817 void
3818 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3819 {
3820 /* Should not be called recursively. */
3821 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3822
3823 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3824
3825 threads_debug_printf
3826 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3827 (except != NULL
3828 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3829 : "none"));
3830
3831 stopping_threads = (suspend
3832 ? STOPPING_AND_SUSPENDING_THREADS
3833 : STOPPING_THREADS);
3834
3835 if (suspend)
3836 for_each_thread ([&] (thread_info *thread)
3837 {
3838 suspend_and_send_sigstop (thread, except);
3839 });
3840 else
3841 for_each_thread ([&] (thread_info *thread)
3842 {
3843 send_sigstop (thread, except);
3844 });
3845
3846 wait_for_sigstop ();
3847 stopping_threads = NOT_STOPPING_THREADS;
3848
3849 threads_debug_printf ("setting stopping_threads back to !stopping");
3850 }
3851
3852 /* Enqueue one signal in the chain of signals which need to be
3853 delivered to this process on next resume. */
3854
3855 static void
3856 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3857 {
3858 lwp->pending_signals.emplace_back (signal);
3859 if (info == nullptr)
3860 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3861 else
3862 lwp->pending_signals.back ().info = *info;
3863 }
3864
3865 void
3866 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3867 {
3868 struct thread_info *thread = get_lwp_thread (lwp);
3869 struct regcache *regcache = get_thread_regcache (thread, 1);
3870
3871 scoped_restore_current_thread restore_thread;
3872
3873 switch_to_thread (thread);
3874 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3875
3876 for (CORE_ADDR pc : next_pcs)
3877 set_single_step_breakpoint (pc, current_ptid);
3878 }
3879
3880 int
3881 linux_process_target::single_step (lwp_info* lwp)
3882 {
3883 int step = 0;
3884
3885 if (supports_hardware_single_step ())
3886 {
3887 step = 1;
3888 }
3889 else if (supports_software_single_step ())
3890 {
3891 install_software_single_step_breakpoints (lwp);
3892 step = 0;
3893 }
3894 else
3895 threads_debug_printf ("stepping is not implemented on this target");
3896
3897 return step;
3898 }
3899
3900 /* The signal can be delivered to the inferior if we are not trying to
3901 finish a fast tracepoint collect. Since signal can be delivered in
3902 the step-over, the program may go to signal handler and trap again
3903 after return from the signal handler. We can live with the spurious
3904 double traps. */
3905
3906 static int
3907 lwp_signal_can_be_delivered (struct lwp_info *lwp)
3908 {
3909 return (lwp->collecting_fast_tracepoint
3910 == fast_tpoint_collect_result::not_collecting);
3911 }
3912
3913 void
3914 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3915 int signal, siginfo_t *info)
3916 {
3917 struct thread_info *thread = get_lwp_thread (lwp);
3918 int ptrace_request;
3919 struct process_info *proc = get_thread_process (thread);
3920
3921 /* Note that target description may not be initialised
3922 (proc->tdesc == NULL) at this point because the program hasn't
3923 stopped at the first instruction yet. It means GDBserver skips
3924 the extra traps from the wrapper program (see option --wrapper).
3925 Code in this function that requires register access should be
3926 guarded by proc->tdesc == NULL or something else. */
3927
3928 if (lwp->stopped == 0)
3929 return;
3930
3931 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
3932
3933 fast_tpoint_collect_result fast_tp_collecting
3934 = lwp->collecting_fast_tracepoint;
3935
3936 gdb_assert (!stabilizing_threads
3937 || (fast_tp_collecting
3938 != fast_tpoint_collect_result::not_collecting));
3939
3940 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3941 user used the "jump" command, or "set $pc = foo"). */
3942 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3943 {
3944 /* Collecting 'while-stepping' actions doesn't make sense
3945 anymore. */
3946 release_while_stepping_state_list (thread);
3947 }
3948
3949 /* If we have pending signals or status, and a new signal, enqueue the
3950 signal. Also enqueue the signal if it can't be delivered to the
3951 inferior right now. */
3952 if (signal != 0
3953 && (lwp->status_pending_p
3954 || !lwp->pending_signals.empty ()
3955 || !lwp_signal_can_be_delivered (lwp)))
3956 {
3957 enqueue_pending_signal (lwp, signal, info);
3958
3959 /* Postpone any pending signal. It was enqueued above. */
3960 signal = 0;
3961 }
3962
3963 if (lwp->status_pending_p)
3964 {
3965 threads_debug_printf
3966 ("Not resuming lwp %ld (%s, stop %s); has pending status",
3967 lwpid_of (thread), step ? "step" : "continue",
3968 lwp->stop_expected ? "expected" : "not expected");
3969 return;
3970 }
3971
3972 scoped_restore_current_thread restore_thread;
3973 switch_to_thread (thread);
3974
3975 /* This bit needs some thinking about. If we get a signal that
3976 we must report while a single-step reinsert is still pending,
3977 we often end up resuming the thread. It might be better to
3978 (ew) allow a stack of pending events; then we could be sure that
3979 the reinsert happened right away and not lose any signals.
3980
3981 Making this stack would also shrink the window in which breakpoints are
3982 uninserted (see comment in linux_wait_for_lwp) but not enough for
3983 complete correctness, so it won't solve that problem. It may be
3984 worthwhile just to solve this one, however. */
3985 if (lwp->bp_reinsert != 0)
3986 {
3987 threads_debug_printf (" pending reinsert at 0x%s",
3988 paddress (lwp->bp_reinsert));
3989
3990 if (supports_hardware_single_step ())
3991 {
3992 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
3993 {
3994 if (step == 0)
3995 warning ("BAD - reinserting but not stepping.");
3996 if (lwp->suspended)
3997 warning ("BAD - reinserting and suspended(%d).",
3998 lwp->suspended);
3999 }
4000 }
4001
4002 step = maybe_hw_step (thread);
4003 }
4004
4005 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4006 threads_debug_printf
4007 ("lwp %ld wants to get out of fast tracepoint jump pad "
4008 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4009
4010 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4011 {
4012 threads_debug_printf
4013 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4014 lwpid_of (thread));
4015
4016 if (supports_hardware_single_step ())
4017 step = 1;
4018 else
4019 {
4020 internal_error (__FILE__, __LINE__,
4021 "moving out of jump pad single-stepping"
4022 " not implemented on this target");
4023 }
4024 }
4025
4026 /* If we have while-stepping actions in this thread set it stepping.
4027 If we have a signal to deliver, it may or may not be set to
4028 SIG_IGN, we don't know. Assume so, and allow collecting
4029 while-stepping into a signal handler. A possible smart thing to
4030 do would be to set an internal breakpoint at the signal return
4031 address, continue, and carry on catching this while-stepping
4032 action only when that breakpoint is hit. A future
4033 enhancement. */
4034 if (thread->while_stepping != NULL)
4035 {
4036 threads_debug_printf
4037 ("lwp %ld has a while-stepping action -> forcing step.",
4038 lwpid_of (thread));
4039
4040 step = single_step (lwp);
4041 }
4042
4043 if (proc->tdesc != NULL && low_supports_breakpoints ())
4044 {
4045 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4046
4047 lwp->stop_pc = low_get_pc (regcache);
4048
4049 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4050 (long) lwp->stop_pc);
4051 }
4052
4053 /* If we have pending signals, consume one if it can be delivered to
4054 the inferior. */
4055 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4056 {
4057 const pending_signal &p_sig = lwp->pending_signals.front ();
4058
4059 signal = p_sig.signal;
4060 if (p_sig.info.si_signo != 0)
4061 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4062 &p_sig.info);
4063
4064 lwp->pending_signals.pop_front ();
4065 }
4066
4067 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4068 lwpid_of (thread), step ? "step" : "continue", signal,
4069 lwp->stop_expected ? "expected" : "not expected");
4070
4071 low_prepare_to_resume (lwp);
4072
4073 regcache_invalidate_thread (thread);
4074 errno = 0;
4075 lwp->stepping = step;
4076 if (step)
4077 ptrace_request = PTRACE_SINGLESTEP;
4078 else if (gdb_catching_syscalls_p (lwp))
4079 ptrace_request = PTRACE_SYSCALL;
4080 else
4081 ptrace_request = PTRACE_CONT;
4082 ptrace (ptrace_request,
4083 lwpid_of (thread),
4084 (PTRACE_TYPE_ARG3) 0,
4085 /* Coerce to a uintptr_t first to avoid potential gcc warning
4086 of coercing an 8 byte integer to a 4 byte pointer. */
4087 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4088
4089 if (errno)
4090 {
4091 int saved_errno = errno;
4092
4093 threads_debug_printf ("ptrace errno = %d (%s)",
4094 saved_errno, strerror (saved_errno));
4095
4096 errno = saved_errno;
4097 perror_with_name ("resuming thread");
4098 }
4099
4100 /* Successfully resumed. Clear state that no longer makes sense,
4101 and mark the LWP as running. Must not do this before resuming
4102 otherwise if that fails other code will be confused. E.g., we'd
4103 later try to stop the LWP and hang forever waiting for a stop
4104 status. Note that we must not throw after this is cleared,
4105 otherwise handle_zombie_lwp_error would get confused. */
4106 lwp->stopped = 0;
4107 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4108 }
4109
4110 void
4111 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4112 {
4113 /* Nop. */
4114 }
4115
4116 /* Called when we try to resume a stopped LWP and that errors out. If
4117 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4118 or about to become), discard the error, clear any pending status
4119 the LWP may have, and return true (we'll collect the exit status
4120 soon enough). Otherwise, return false. */
4121
4122 static int
4123 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4124 {
4125 struct thread_info *thread = get_lwp_thread (lp);
4126
4127 /* If we get an error after resuming the LWP successfully, we'd
4128 confuse !T state for the LWP being gone. */
4129 gdb_assert (lp->stopped);
4130
4131 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4132 because even if ptrace failed with ESRCH, the tracee may be "not
4133 yet fully dead", but already refusing ptrace requests. In that
4134 case the tracee has 'R (Running)' state for a little bit
4135 (observed in Linux 3.18). See also the note on ESRCH in the
4136 ptrace(2) man page. Instead, check whether the LWP has any state
4137 other than ptrace-stopped. */
4138
4139 /* Don't assume anything if /proc/PID/status can't be read. */
4140 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4141 {
4142 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4143 lp->status_pending_p = 0;
4144 return 1;
4145 }
4146 return 0;
4147 }
4148
4149 void
4150 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4151 siginfo_t *info)
4152 {
4153 try
4154 {
4155 resume_one_lwp_throw (lwp, step, signal, info);
4156 }
4157 catch (const gdb_exception_error &ex)
4158 {
4159 if (check_ptrace_stopped_lwp_gone (lwp))
4160 {
4161 /* This could because we tried to resume an LWP after its leader
4162 exited. Mark it as resumed, so we can collect an exit event
4163 from it. */
4164 lwp->stopped = 0;
4165 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4166 }
4167 else
4168 throw;
4169 }
4170 }
4171
4172 /* This function is called once per thread via for_each_thread.
4173 We look up which resume request applies to THREAD and mark it with a
4174 pointer to the appropriate resume request.
4175
4176 This algorithm is O(threads * resume elements), but resume elements
4177 is small (and will remain small at least until GDB supports thread
4178 suspension). */
4179
4180 static void
4181 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4182 {
4183 struct lwp_info *lwp = get_thread_lwp (thread);
4184
4185 for (int ndx = 0; ndx < n; ndx++)
4186 {
4187 ptid_t ptid = resume[ndx].thread;
4188 if (ptid == minus_one_ptid
4189 || ptid == thread->id
4190 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4191 of PID'. */
4192 || (ptid.pid () == pid_of (thread)
4193 && (ptid.is_pid ()
4194 || ptid.lwp () == -1)))
4195 {
4196 if (resume[ndx].kind == resume_stop
4197 && thread->last_resume_kind == resume_stop)
4198 {
4199 threads_debug_printf
4200 ("already %s LWP %ld at GDB's request",
4201 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4202 ? "stopped" : "stopping"),
4203 lwpid_of (thread));
4204
4205 continue;
4206 }
4207
4208 /* Ignore (wildcard) resume requests for already-resumed
4209 threads. */
4210 if (resume[ndx].kind != resume_stop
4211 && thread->last_resume_kind != resume_stop)
4212 {
4213 threads_debug_printf
4214 ("already %s LWP %ld at GDB's request",
4215 (thread->last_resume_kind == resume_step
4216 ? "stepping" : "continuing"),
4217 lwpid_of (thread));
4218 continue;
4219 }
4220
4221 /* Don't let wildcard resumes resume fork children that GDB
4222 does not yet know are new fork children. */
4223 if (lwp->fork_relative != NULL)
4224 {
4225 struct lwp_info *rel = lwp->fork_relative;
4226
4227 if (rel->status_pending_p
4228 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4229 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4230 {
4231 threads_debug_printf
4232 ("not resuming LWP %ld: has queued stop reply",
4233 lwpid_of (thread));
4234 continue;
4235 }
4236 }
4237
4238 /* If the thread has a pending event that has already been
4239 reported to GDBserver core, but GDB has not pulled the
4240 event out of the vStopped queue yet, likewise, ignore the
4241 (wildcard) resume request. */
4242 if (in_queued_stop_replies (thread->id))
4243 {
4244 threads_debug_printf
4245 ("not resuming LWP %ld: has queued stop reply",
4246 lwpid_of (thread));
4247 continue;
4248 }
4249
4250 lwp->resume = &resume[ndx];
4251 thread->last_resume_kind = lwp->resume->kind;
4252
4253 lwp->step_range_start = lwp->resume->step_range_start;
4254 lwp->step_range_end = lwp->resume->step_range_end;
4255
4256 /* If we had a deferred signal to report, dequeue one now.
4257 This can happen if LWP gets more than one signal while
4258 trying to get out of a jump pad. */
4259 if (lwp->stopped
4260 && !lwp->status_pending_p
4261 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4262 {
4263 lwp->status_pending_p = 1;
4264
4265 threads_debug_printf
4266 ("Dequeueing deferred signal %d for LWP %ld, "
4267 "leaving status pending.",
4268 WSTOPSIG (lwp->status_pending),
4269 lwpid_of (thread));
4270 }
4271
4272 return;
4273 }
4274 }
4275
4276 /* No resume action for this thread. */
4277 lwp->resume = NULL;
4278 }
4279
4280 bool
4281 linux_process_target::resume_status_pending (thread_info *thread)
4282 {
4283 struct lwp_info *lwp = get_thread_lwp (thread);
4284
4285 /* LWPs which will not be resumed are not interesting, because
4286 we might not wait for them next time through linux_wait. */
4287 if (lwp->resume == NULL)
4288 return false;
4289
4290 return thread_still_has_status_pending (thread);
4291 }
4292
4293 bool
4294 linux_process_target::thread_needs_step_over (thread_info *thread)
4295 {
4296 struct lwp_info *lwp = get_thread_lwp (thread);
4297 CORE_ADDR pc;
4298 struct process_info *proc = get_thread_process (thread);
4299
4300 /* GDBserver is skipping the extra traps from the wrapper program,
4301 don't have to do step over. */
4302 if (proc->tdesc == NULL)
4303 return false;
4304
4305 /* LWPs which will not be resumed are not interesting, because we
4306 might not wait for them next time through linux_wait. */
4307
4308 if (!lwp->stopped)
4309 {
4310 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4311 lwpid_of (thread));
4312 return false;
4313 }
4314
4315 if (thread->last_resume_kind == resume_stop)
4316 {
4317 threads_debug_printf
4318 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4319 lwpid_of (thread));
4320 return false;
4321 }
4322
4323 gdb_assert (lwp->suspended >= 0);
4324
4325 if (lwp->suspended)
4326 {
4327 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4328 lwpid_of (thread));
4329 return false;
4330 }
4331
4332 if (lwp->status_pending_p)
4333 {
4334 threads_debug_printf
4335 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4336 lwpid_of (thread));
4337 return false;
4338 }
4339
4340 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4341 or we have. */
4342 pc = get_pc (lwp);
4343
4344 /* If the PC has changed since we stopped, then don't do anything,
4345 and let the breakpoint/tracepoint be hit. This happens if, for
4346 instance, GDB handled the decr_pc_after_break subtraction itself,
4347 GDB is OOL stepping this thread, or the user has issued a "jump"
4348 command, or poked thread's registers herself. */
4349 if (pc != lwp->stop_pc)
4350 {
4351 threads_debug_printf
4352 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4353 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4354 paddress (lwp->stop_pc), paddress (pc));
4355 return false;
4356 }
4357
4358 /* On software single step target, resume the inferior with signal
4359 rather than stepping over. */
4360 if (supports_software_single_step ()
4361 && !lwp->pending_signals.empty ()
4362 && lwp_signal_can_be_delivered (lwp))
4363 {
4364 threads_debug_printf
4365 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4366 lwpid_of (thread));
4367
4368 return false;
4369 }
4370
4371 scoped_restore_current_thread restore_thread;
4372 switch_to_thread (thread);
4373
4374 /* We can only step over breakpoints we know about. */
4375 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4376 {
4377 /* Don't step over a breakpoint that GDB expects to hit
4378 though. If the condition is being evaluated on the target's side
4379 and it evaluate to false, step over this breakpoint as well. */
4380 if (gdb_breakpoint_here (pc)
4381 && gdb_condition_true_at_breakpoint (pc)
4382 && gdb_no_commands_at_breakpoint (pc))
4383 {
4384 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4385 " GDB breakpoint at 0x%s; skipping step over",
4386 lwpid_of (thread), paddress (pc));
4387
4388 return false;
4389 }
4390 else
4391 {
4392 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4393 "found breakpoint at 0x%s",
4394 lwpid_of (thread), paddress (pc));
4395
4396 /* We've found an lwp that needs stepping over --- return 1 so
4397 that find_thread stops looking. */
4398 return true;
4399 }
4400 }
4401
4402 threads_debug_printf
4403 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4404 lwpid_of (thread), paddress (pc));
4405
4406 return false;
4407 }
4408
4409 void
4410 linux_process_target::start_step_over (lwp_info *lwp)
4411 {
4412 struct thread_info *thread = get_lwp_thread (lwp);
4413 CORE_ADDR pc;
4414
4415 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4416 lwpid_of (thread));
4417
4418 stop_all_lwps (1, lwp);
4419
4420 if (lwp->suspended != 0)
4421 {
4422 internal_error (__FILE__, __LINE__,
4423 "LWP %ld suspended=%d\n", lwpid_of (thread),
4424 lwp->suspended);
4425 }
4426
4427 threads_debug_printf ("Done stopping all threads for step-over.");
4428
4429 /* Note, we should always reach here with an already adjusted PC,
4430 either by GDB (if we're resuming due to GDB's request), or by our
4431 caller, if we just finished handling an internal breakpoint GDB
4432 shouldn't care about. */
4433 pc = get_pc (lwp);
4434
4435 bool step = false;
4436 {
4437 scoped_restore_current_thread restore_thread;
4438 switch_to_thread (thread);
4439
4440 lwp->bp_reinsert = pc;
4441 uninsert_breakpoints_at (pc);
4442 uninsert_fast_tracepoint_jumps_at (pc);
4443
4444 step = single_step (lwp);
4445 }
4446
4447 resume_one_lwp (lwp, step, 0, NULL);
4448
4449 /* Require next event from this LWP. */
4450 step_over_bkpt = thread->id;
4451 }
4452
4453 bool
4454 linux_process_target::finish_step_over (lwp_info *lwp)
4455 {
4456 if (lwp->bp_reinsert != 0)
4457 {
4458 scoped_restore_current_thread restore_thread;
4459
4460 threads_debug_printf ("Finished step over.");
4461
4462 switch_to_thread (get_lwp_thread (lwp));
4463
4464 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4465 may be no breakpoint to reinsert there by now. */
4466 reinsert_breakpoints_at (lwp->bp_reinsert);
4467 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4468
4469 lwp->bp_reinsert = 0;
4470
4471 /* Delete any single-step breakpoints. No longer needed. We
4472 don't have to worry about other threads hitting this trap,
4473 and later not being able to explain it, because we were
4474 stepping over a breakpoint, and we hold all threads but
4475 LWP stopped while doing that. */
4476 if (!supports_hardware_single_step ())
4477 {
4478 gdb_assert (has_single_step_breakpoints (current_thread));
4479 delete_single_step_breakpoints (current_thread);
4480 }
4481
4482 step_over_bkpt = null_ptid;
4483 return true;
4484 }
4485 else
4486 return false;
4487 }
4488
4489 void
4490 linux_process_target::complete_ongoing_step_over ()
4491 {
4492 if (step_over_bkpt != null_ptid)
4493 {
4494 struct lwp_info *lwp;
4495 int wstat;
4496 int ret;
4497
4498 threads_debug_printf ("detach: step over in progress, finish it first");
4499
4500 /* Passing NULL_PTID as filter indicates we want all events to
4501 be left pending. Eventually this returns when there are no
4502 unwaited-for children left. */
4503 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4504 __WALL);
4505 gdb_assert (ret == -1);
4506
4507 lwp = find_lwp_pid (step_over_bkpt);
4508 if (lwp != NULL)
4509 {
4510 finish_step_over (lwp);
4511
4512 /* If we got our step SIGTRAP, don't leave it pending,
4513 otherwise we would report it to GDB as a spurious
4514 SIGTRAP. */
4515 gdb_assert (lwp->status_pending_p);
4516 if (WIFSTOPPED (lwp->status_pending)
4517 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4518 {
4519 thread_info *thread = get_lwp_thread (lwp);
4520 if (thread->last_resume_kind != resume_step)
4521 {
4522 threads_debug_printf ("detach: discard step-over SIGTRAP");
4523
4524 lwp->status_pending_p = 0;
4525 lwp->status_pending = 0;
4526 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4527 }
4528 else
4529 threads_debug_printf
4530 ("detach: resume_step, not discarding step-over SIGTRAP");
4531 }
4532 }
4533 step_over_bkpt = null_ptid;
4534 unsuspend_all_lwps (lwp);
4535 }
4536 }
4537
4538 void
4539 linux_process_target::resume_one_thread (thread_info *thread,
4540 bool leave_all_stopped)
4541 {
4542 struct lwp_info *lwp = get_thread_lwp (thread);
4543 int leave_pending;
4544
4545 if (lwp->resume == NULL)
4546 return;
4547
4548 if (lwp->resume->kind == resume_stop)
4549 {
4550 threads_debug_printf ("resume_stop request for LWP %ld",
4551 lwpid_of (thread));
4552
4553 if (!lwp->stopped)
4554 {
4555 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4556
4557 /* Stop the thread, and wait for the event asynchronously,
4558 through the event loop. */
4559 send_sigstop (lwp);
4560 }
4561 else
4562 {
4563 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4564
4565 /* The LWP may have been stopped in an internal event that
4566 was not meant to be notified back to GDB (e.g., gdbserver
4567 breakpoint), so we should be reporting a stop event in
4568 this case too. */
4569
4570 /* If the thread already has a pending SIGSTOP, this is a
4571 no-op. Otherwise, something later will presumably resume
4572 the thread and this will cause it to cancel any pending
4573 operation, due to last_resume_kind == resume_stop. If
4574 the thread already has a pending status to report, we
4575 will still report it the next time we wait - see
4576 status_pending_p_callback. */
4577
4578 /* If we already have a pending signal to report, then
4579 there's no need to queue a SIGSTOP, as this means we're
4580 midway through moving the LWP out of the jumppad, and we
4581 will report the pending signal as soon as that is
4582 finished. */
4583 if (lwp->pending_signals_to_report.empty ())
4584 send_sigstop (lwp);
4585 }
4586
4587 /* For stop requests, we're done. */
4588 lwp->resume = NULL;
4589 thread->last_status.set_ignore ();
4590 return;
4591 }
4592
4593 /* If this thread which is about to be resumed has a pending status,
4594 then don't resume it - we can just report the pending status.
4595 Likewise if it is suspended, because e.g., another thread is
4596 stepping past a breakpoint. Make sure to queue any signals that
4597 would otherwise be sent. In all-stop mode, we do this decision
4598 based on if *any* thread has a pending status. If there's a
4599 thread that needs the step-over-breakpoint dance, then don't
4600 resume any other thread but that particular one. */
4601 leave_pending = (lwp->suspended
4602 || lwp->status_pending_p
4603 || leave_all_stopped);
4604
4605 /* If we have a new signal, enqueue the signal. */
4606 if (lwp->resume->sig != 0)
4607 {
4608 siginfo_t info, *info_p;
4609
4610 /* If this is the same signal we were previously stopped by,
4611 make sure to queue its siginfo. */
4612 if (WIFSTOPPED (lwp->last_status)
4613 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4614 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4615 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4616 info_p = &info;
4617 else
4618 info_p = NULL;
4619
4620 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4621 }
4622
4623 if (!leave_pending)
4624 {
4625 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4626
4627 proceed_one_lwp (thread, NULL);
4628 }
4629 else
4630 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4631
4632 thread->last_status.set_ignore ();
4633 lwp->resume = NULL;
4634 }
4635
4636 void
4637 linux_process_target::resume (thread_resume *resume_info, size_t n)
4638 {
4639 struct thread_info *need_step_over = NULL;
4640
4641 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4642
4643 for_each_thread ([&] (thread_info *thread)
4644 {
4645 linux_set_resume_request (thread, resume_info, n);
4646 });
4647
4648 /* If there is a thread which would otherwise be resumed, which has
4649 a pending status, then don't resume any threads - we can just
4650 report the pending status. Make sure to queue any signals that
4651 would otherwise be sent. In non-stop mode, we'll apply this
4652 logic to each thread individually. We consume all pending events
4653 before considering to start a step-over (in all-stop). */
4654 bool any_pending = false;
4655 if (!non_stop)
4656 any_pending = find_thread ([this] (thread_info *thread)
4657 {
4658 return resume_status_pending (thread);
4659 }) != nullptr;
4660
4661 /* If there is a thread which would otherwise be resumed, which is
4662 stopped at a breakpoint that needs stepping over, then don't
4663 resume any threads - have it step over the breakpoint with all
4664 other threads stopped, then resume all threads again. Make sure
4665 to queue any signals that would otherwise be delivered or
4666 queued. */
4667 if (!any_pending && low_supports_breakpoints ())
4668 need_step_over = find_thread ([this] (thread_info *thread)
4669 {
4670 return thread_needs_step_over (thread);
4671 });
4672
4673 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4674
4675 if (need_step_over != NULL)
4676 threads_debug_printf ("Not resuming all, need step over");
4677 else if (any_pending)
4678 threads_debug_printf ("Not resuming, all-stop and found "
4679 "an LWP with pending status");
4680 else
4681 threads_debug_printf ("Resuming, no pending status or step over needed");
4682
4683 /* Even if we're leaving threads stopped, queue all signals we'd
4684 otherwise deliver. */
4685 for_each_thread ([&] (thread_info *thread)
4686 {
4687 resume_one_thread (thread, leave_all_stopped);
4688 });
4689
4690 if (need_step_over)
4691 start_step_over (get_thread_lwp (need_step_over));
4692
4693 /* We may have events that were pending that can/should be sent to
4694 the client now. Trigger a linux_wait call. */
4695 if (target_is_async_p ())
4696 async_file_mark ();
4697 }
4698
4699 void
4700 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4701 {
4702 struct lwp_info *lwp = get_thread_lwp (thread);
4703 int step;
4704
4705 if (lwp == except)
4706 return;
4707
4708 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4709
4710 if (!lwp->stopped)
4711 {
4712 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4713 return;
4714 }
4715
4716 if (thread->last_resume_kind == resume_stop
4717 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4718 {
4719 threads_debug_printf (" client wants LWP to remain %ld stopped",
4720 lwpid_of (thread));
4721 return;
4722 }
4723
4724 if (lwp->status_pending_p)
4725 {
4726 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4727 lwpid_of (thread));
4728 return;
4729 }
4730
4731 gdb_assert (lwp->suspended >= 0);
4732
4733 if (lwp->suspended)
4734 {
4735 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4736 return;
4737 }
4738
4739 if (thread->last_resume_kind == resume_stop
4740 && lwp->pending_signals_to_report.empty ()
4741 && (lwp->collecting_fast_tracepoint
4742 == fast_tpoint_collect_result::not_collecting))
4743 {
4744 /* We haven't reported this LWP as stopped yet (otherwise, the
4745 last_status.kind check above would catch it, and we wouldn't
4746 reach here. This LWP may have been momentarily paused by a
4747 stop_all_lwps call while handling for example, another LWP's
4748 step-over. In that case, the pending expected SIGSTOP signal
4749 that was queued at vCont;t handling time will have already
4750 been consumed by wait_for_sigstop, and so we need to requeue
4751 another one here. Note that if the LWP already has a SIGSTOP
4752 pending, this is a no-op. */
4753
4754 threads_debug_printf
4755 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4756 lwpid_of (thread));
4757
4758 send_sigstop (lwp);
4759 }
4760
4761 if (thread->last_resume_kind == resume_step)
4762 {
4763 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4764 lwpid_of (thread));
4765
4766 /* If resume_step is requested by GDB, install single-step
4767 breakpoints when the thread is about to be actually resumed if
4768 the single-step breakpoints weren't removed. */
4769 if (supports_software_single_step ()
4770 && !has_single_step_breakpoints (thread))
4771 install_software_single_step_breakpoints (lwp);
4772
4773 step = maybe_hw_step (thread);
4774 }
4775 else if (lwp->bp_reinsert != 0)
4776 {
4777 threads_debug_printf (" stepping LWP %ld, reinsert set",
4778 lwpid_of (thread));
4779
4780 step = maybe_hw_step (thread);
4781 }
4782 else
4783 step = 0;
4784
4785 resume_one_lwp (lwp, step, 0, NULL);
4786 }
4787
4788 void
4789 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4790 lwp_info *except)
4791 {
4792 struct lwp_info *lwp = get_thread_lwp (thread);
4793
4794 if (lwp == except)
4795 return;
4796
4797 lwp_suspended_decr (lwp);
4798
4799 proceed_one_lwp (thread, except);
4800 }
4801
4802 void
4803 linux_process_target::proceed_all_lwps ()
4804 {
4805 struct thread_info *need_step_over;
4806
4807 /* If there is a thread which would otherwise be resumed, which is
4808 stopped at a breakpoint that needs stepping over, then don't
4809 resume any threads - have it step over the breakpoint with all
4810 other threads stopped, then resume all threads again. */
4811
4812 if (low_supports_breakpoints ())
4813 {
4814 need_step_over = find_thread ([this] (thread_info *thread)
4815 {
4816 return thread_needs_step_over (thread);
4817 });
4818
4819 if (need_step_over != NULL)
4820 {
4821 threads_debug_printf ("found thread %ld needing a step-over",
4822 lwpid_of (need_step_over));
4823
4824 start_step_over (get_thread_lwp (need_step_over));
4825 return;
4826 }
4827 }
4828
4829 threads_debug_printf ("Proceeding, no step-over needed");
4830
4831 for_each_thread ([this] (thread_info *thread)
4832 {
4833 proceed_one_lwp (thread, NULL);
4834 });
4835 }
4836
4837 void
4838 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4839 {
4840 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4841
4842 if (except)
4843 threads_debug_printf ("except=(LWP %ld)",
4844 lwpid_of (get_lwp_thread (except)));
4845 else
4846 threads_debug_printf ("except=nullptr");
4847
4848 if (unsuspend)
4849 for_each_thread ([&] (thread_info *thread)
4850 {
4851 unsuspend_and_proceed_one_lwp (thread, except);
4852 });
4853 else
4854 for_each_thread ([&] (thread_info *thread)
4855 {
4856 proceed_one_lwp (thread, except);
4857 });
4858 }
4859
4860
4861 #ifdef HAVE_LINUX_REGSETS
4862
4863 #define use_linux_regsets 1
4864
4865 /* Returns true if REGSET has been disabled. */
4866
4867 static int
4868 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4869 {
4870 return (info->disabled_regsets != NULL
4871 && info->disabled_regsets[regset - info->regsets]);
4872 }
4873
4874 /* Disable REGSET. */
4875
4876 static void
4877 disable_regset (struct regsets_info *info, struct regset_info *regset)
4878 {
4879 int dr_offset;
4880
4881 dr_offset = regset - info->regsets;
4882 if (info->disabled_regsets == NULL)
4883 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4884 info->disabled_regsets[dr_offset] = 1;
4885 }
4886
4887 static int
4888 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4889 struct regcache *regcache)
4890 {
4891 struct regset_info *regset;
4892 int saw_general_regs = 0;
4893 int pid;
4894 struct iovec iov;
4895
4896 pid = lwpid_of (current_thread);
4897 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4898 {
4899 void *buf, *data;
4900 int nt_type, res;
4901
4902 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4903 continue;
4904
4905 buf = xmalloc (regset->size);
4906
4907 nt_type = regset->nt_type;
4908 if (nt_type)
4909 {
4910 iov.iov_base = buf;
4911 iov.iov_len = regset->size;
4912 data = (void *) &iov;
4913 }
4914 else
4915 data = buf;
4916
4917 #ifndef __sparc__
4918 res = ptrace (regset->get_request, pid,
4919 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4920 #else
4921 res = ptrace (regset->get_request, pid, data, nt_type);
4922 #endif
4923 if (res < 0)
4924 {
4925 if (errno == EIO
4926 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4927 {
4928 /* If we get EIO on a regset, or an EINVAL and the regset is
4929 optional, do not try it again for this process mode. */
4930 disable_regset (regsets_info, regset);
4931 }
4932 else if (errno == ENODATA)
4933 {
4934 /* ENODATA may be returned if the regset is currently
4935 not "active". This can happen in normal operation,
4936 so suppress the warning in this case. */
4937 }
4938 else if (errno == ESRCH)
4939 {
4940 /* At this point, ESRCH should mean the process is
4941 already gone, in which case we simply ignore attempts
4942 to read its registers. */
4943 }
4944 else
4945 {
4946 char s[256];
4947 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4948 pid);
4949 perror (s);
4950 }
4951 }
4952 else
4953 {
4954 if (regset->type == GENERAL_REGS)
4955 saw_general_regs = 1;
4956 regset->store_function (regcache, buf);
4957 }
4958 free (buf);
4959 }
4960 if (saw_general_regs)
4961 return 0;
4962 else
4963 return 1;
4964 }
4965
4966 static int
4967 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4968 struct regcache *regcache)
4969 {
4970 struct regset_info *regset;
4971 int saw_general_regs = 0;
4972 int pid;
4973 struct iovec iov;
4974
4975 pid = lwpid_of (current_thread);
4976 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4977 {
4978 void *buf, *data;
4979 int nt_type, res;
4980
4981 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4982 || regset->fill_function == NULL)
4983 continue;
4984
4985 buf = xmalloc (regset->size);
4986
4987 /* First fill the buffer with the current register set contents,
4988 in case there are any items in the kernel's regset that are
4989 not in gdbserver's regcache. */
4990
4991 nt_type = regset->nt_type;
4992 if (nt_type)
4993 {
4994 iov.iov_base = buf;
4995 iov.iov_len = regset->size;
4996 data = (void *) &iov;
4997 }
4998 else
4999 data = buf;
5000
5001 #ifndef __sparc__
5002 res = ptrace (regset->get_request, pid,
5003 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5004 #else
5005 res = ptrace (regset->get_request, pid, data, nt_type);
5006 #endif
5007
5008 if (res == 0)
5009 {
5010 /* Then overlay our cached registers on that. */
5011 regset->fill_function (regcache, buf);
5012
5013 /* Only now do we write the register set. */
5014 #ifndef __sparc__
5015 res = ptrace (regset->set_request, pid,
5016 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5017 #else
5018 res = ptrace (regset->set_request, pid, data, nt_type);
5019 #endif
5020 }
5021
5022 if (res < 0)
5023 {
5024 if (errno == EIO
5025 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5026 {
5027 /* If we get EIO on a regset, or an EINVAL and the regset is
5028 optional, do not try it again for this process mode. */
5029 disable_regset (regsets_info, regset);
5030 }
5031 else if (errno == ESRCH)
5032 {
5033 /* At this point, ESRCH should mean the process is
5034 already gone, in which case we simply ignore attempts
5035 to change its registers. See also the related
5036 comment in resume_one_lwp. */
5037 free (buf);
5038 return 0;
5039 }
5040 else
5041 {
5042 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5043 }
5044 }
5045 else if (regset->type == GENERAL_REGS)
5046 saw_general_regs = 1;
5047 free (buf);
5048 }
5049 if (saw_general_regs)
5050 return 0;
5051 else
5052 return 1;
5053 }
5054
5055 #else /* !HAVE_LINUX_REGSETS */
5056
5057 #define use_linux_regsets 0
5058 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5059 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5060
5061 #endif
5062
5063 /* Return 1 if register REGNO is supported by one of the regset ptrace
5064 calls or 0 if it has to be transferred individually. */
5065
5066 static int
5067 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5068 {
5069 unsigned char mask = 1 << (regno % 8);
5070 size_t index = regno / 8;
5071
5072 return (use_linux_regsets
5073 && (regs_info->regset_bitmap == NULL
5074 || (regs_info->regset_bitmap[index] & mask) != 0));
5075 }
5076
5077 #ifdef HAVE_LINUX_USRREGS
5078
5079 static int
5080 register_addr (const struct usrregs_info *usrregs, int regnum)
5081 {
5082 int addr;
5083
5084 if (regnum < 0 || regnum >= usrregs->num_regs)
5085 error ("Invalid register number %d.", regnum);
5086
5087 addr = usrregs->regmap[regnum];
5088
5089 return addr;
5090 }
5091
5092
5093 void
5094 linux_process_target::fetch_register (const usrregs_info *usrregs,
5095 regcache *regcache, int regno)
5096 {
5097 CORE_ADDR regaddr;
5098 int i, size;
5099 char *buf;
5100 int pid;
5101
5102 if (regno >= usrregs->num_regs)
5103 return;
5104 if (low_cannot_fetch_register (regno))
5105 return;
5106
5107 regaddr = register_addr (usrregs, regno);
5108 if (regaddr == -1)
5109 return;
5110
5111 size = ((register_size (regcache->tdesc, regno)
5112 + sizeof (PTRACE_XFER_TYPE) - 1)
5113 & -sizeof (PTRACE_XFER_TYPE));
5114 buf = (char *) alloca (size);
5115
5116 pid = lwpid_of (current_thread);
5117 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5118 {
5119 errno = 0;
5120 *(PTRACE_XFER_TYPE *) (buf + i) =
5121 ptrace (PTRACE_PEEKUSER, pid,
5122 /* Coerce to a uintptr_t first to avoid potential gcc warning
5123 of coercing an 8 byte integer to a 4 byte pointer. */
5124 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5125 regaddr += sizeof (PTRACE_XFER_TYPE);
5126 if (errno != 0)
5127 {
5128 /* Mark register REGNO unavailable. */
5129 supply_register (regcache, regno, NULL);
5130 return;
5131 }
5132 }
5133
5134 low_supply_ptrace_register (regcache, regno, buf);
5135 }
5136
5137 void
5138 linux_process_target::store_register (const usrregs_info *usrregs,
5139 regcache *regcache, int regno)
5140 {
5141 CORE_ADDR regaddr;
5142 int i, size;
5143 char *buf;
5144 int pid;
5145
5146 if (regno >= usrregs->num_regs)
5147 return;
5148 if (low_cannot_store_register (regno))
5149 return;
5150
5151 regaddr = register_addr (usrregs, regno);
5152 if (regaddr == -1)
5153 return;
5154
5155 size = ((register_size (regcache->tdesc, regno)
5156 + sizeof (PTRACE_XFER_TYPE) - 1)
5157 & -sizeof (PTRACE_XFER_TYPE));
5158 buf = (char *) alloca (size);
5159 memset (buf, 0, size);
5160
5161 low_collect_ptrace_register (regcache, regno, buf);
5162
5163 pid = lwpid_of (current_thread);
5164 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5165 {
5166 errno = 0;
5167 ptrace (PTRACE_POKEUSER, pid,
5168 /* Coerce to a uintptr_t first to avoid potential gcc warning
5169 about coercing an 8 byte integer to a 4 byte pointer. */
5170 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5171 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5172 if (errno != 0)
5173 {
5174 /* At this point, ESRCH should mean the process is
5175 already gone, in which case we simply ignore attempts
5176 to change its registers. See also the related
5177 comment in resume_one_lwp. */
5178 if (errno == ESRCH)
5179 return;
5180
5181
5182 if (!low_cannot_store_register (regno))
5183 error ("writing register %d: %s", regno, safe_strerror (errno));
5184 }
5185 regaddr += sizeof (PTRACE_XFER_TYPE);
5186 }
5187 }
5188 #endif /* HAVE_LINUX_USRREGS */
5189
5190 void
5191 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5192 int regno, char *buf)
5193 {
5194 collect_register (regcache, regno, buf);
5195 }
5196
5197 void
5198 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5199 int regno, const char *buf)
5200 {
5201 supply_register (regcache, regno, buf);
5202 }
5203
5204 void
5205 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5206 regcache *regcache,
5207 int regno, int all)
5208 {
5209 #ifdef HAVE_LINUX_USRREGS
5210 struct usrregs_info *usr = regs_info->usrregs;
5211
5212 if (regno == -1)
5213 {
5214 for (regno = 0; regno < usr->num_regs; regno++)
5215 if (all || !linux_register_in_regsets (regs_info, regno))
5216 fetch_register (usr, regcache, regno);
5217 }
5218 else
5219 fetch_register (usr, regcache, regno);
5220 #endif
5221 }
5222
5223 void
5224 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5225 regcache *regcache,
5226 int regno, int all)
5227 {
5228 #ifdef HAVE_LINUX_USRREGS
5229 struct usrregs_info *usr = regs_info->usrregs;
5230
5231 if (regno == -1)
5232 {
5233 for (regno = 0; regno < usr->num_regs; regno++)
5234 if (all || !linux_register_in_regsets (regs_info, regno))
5235 store_register (usr, regcache, regno);
5236 }
5237 else
5238 store_register (usr, regcache, regno);
5239 #endif
5240 }
5241
5242 void
5243 linux_process_target::fetch_registers (regcache *regcache, int regno)
5244 {
5245 int use_regsets;
5246 int all = 0;
5247 const regs_info *regs_info = get_regs_info ();
5248
5249 if (regno == -1)
5250 {
5251 if (regs_info->usrregs != NULL)
5252 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5253 low_fetch_register (regcache, regno);
5254
5255 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5256 if (regs_info->usrregs != NULL)
5257 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5258 }
5259 else
5260 {
5261 if (low_fetch_register (regcache, regno))
5262 return;
5263
5264 use_regsets = linux_register_in_regsets (regs_info, regno);
5265 if (use_regsets)
5266 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5267 regcache);
5268 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5269 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5270 }
5271 }
5272
5273 void
5274 linux_process_target::store_registers (regcache *regcache, int regno)
5275 {
5276 int use_regsets;
5277 int all = 0;
5278 const regs_info *regs_info = get_regs_info ();
5279
5280 if (regno == -1)
5281 {
5282 all = regsets_store_inferior_registers (regs_info->regsets_info,
5283 regcache);
5284 if (regs_info->usrregs != NULL)
5285 usr_store_inferior_registers (regs_info, regcache, regno, all);
5286 }
5287 else
5288 {
5289 use_regsets = linux_register_in_regsets (regs_info, regno);
5290 if (use_regsets)
5291 all = regsets_store_inferior_registers (regs_info->regsets_info,
5292 regcache);
5293 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5294 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5295 }
5296 }
5297
5298 bool
5299 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5300 {
5301 return false;
5302 }
5303
5304 /* A wrapper for the read_memory target op. */
5305
5306 static int
5307 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5308 {
5309 return the_target->read_memory (memaddr, myaddr, len);
5310 }
5311
5312 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5313 to debugger memory starting at MYADDR. */
5314
5315 int
5316 linux_process_target::read_memory (CORE_ADDR memaddr,
5317 unsigned char *myaddr, int len)
5318 {
5319 int pid = lwpid_of (current_thread);
5320 PTRACE_XFER_TYPE *buffer;
5321 CORE_ADDR addr;
5322 int count;
5323 char filename[64];
5324 int i;
5325 int ret;
5326 int fd;
5327
5328 /* Try using /proc. Don't bother for one word. */
5329 if (len >= 3 * sizeof (long))
5330 {
5331 int bytes;
5332
5333 /* We could keep this file open and cache it - possibly one per
5334 thread. That requires some juggling, but is even faster. */
5335 sprintf (filename, "/proc/%d/mem", pid);
5336 fd = open (filename, O_RDONLY | O_LARGEFILE);
5337 if (fd == -1)
5338 goto no_proc;
5339
5340 /* If pread64 is available, use it. It's faster if the kernel
5341 supports it (only one syscall), and it's 64-bit safe even on
5342 32-bit platforms (for instance, SPARC debugging a SPARC64
5343 application). */
5344 #ifdef HAVE_PREAD64
5345 bytes = pread64 (fd, myaddr, len, memaddr);
5346 #else
5347 bytes = -1;
5348 if (lseek (fd, memaddr, SEEK_SET) != -1)
5349 bytes = read (fd, myaddr, len);
5350 #endif
5351
5352 close (fd);
5353 if (bytes == len)
5354 return 0;
5355
5356 /* Some data was read, we'll try to get the rest with ptrace. */
5357 if (bytes > 0)
5358 {
5359 memaddr += bytes;
5360 myaddr += bytes;
5361 len -= bytes;
5362 }
5363 }
5364
5365 no_proc:
5366 /* Round starting address down to longword boundary. */
5367 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5368 /* Round ending address up; get number of longwords that makes. */
5369 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5370 / sizeof (PTRACE_XFER_TYPE));
5371 /* Allocate buffer of that many longwords. */
5372 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5373
5374 /* Read all the longwords */
5375 errno = 0;
5376 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5377 {
5378 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5379 about coercing an 8 byte integer to a 4 byte pointer. */
5380 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5381 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5382 (PTRACE_TYPE_ARG4) 0);
5383 if (errno)
5384 break;
5385 }
5386 ret = errno;
5387
5388 /* Copy appropriate bytes out of the buffer. */
5389 if (i > 0)
5390 {
5391 i *= sizeof (PTRACE_XFER_TYPE);
5392 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5393 memcpy (myaddr,
5394 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5395 i < len ? i : len);
5396 }
5397
5398 return ret;
5399 }
5400
5401 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5402 memory at MEMADDR. On failure (cannot write to the inferior)
5403 returns the value of errno. Always succeeds if LEN is zero. */
5404
5405 int
5406 linux_process_target::write_memory (CORE_ADDR memaddr,
5407 const unsigned char *myaddr, int len)
5408 {
5409 int i;
5410 /* Round starting address down to longword boundary. */
5411 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5412 /* Round ending address up; get number of longwords that makes. */
5413 int count
5414 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5415 / sizeof (PTRACE_XFER_TYPE);
5416
5417 /* Allocate buffer of that many longwords. */
5418 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5419
5420 int pid = lwpid_of (current_thread);
5421
5422 if (len == 0)
5423 {
5424 /* Zero length write always succeeds. */
5425 return 0;
5426 }
5427
5428 if (debug_threads)
5429 {
5430 /* Dump up to four bytes. */
5431 char str[4 * 2 + 1];
5432 char *p = str;
5433 int dump = len < 4 ? len : 4;
5434
5435 for (i = 0; i < dump; i++)
5436 {
5437 sprintf (p, "%02x", myaddr[i]);
5438 p += 2;
5439 }
5440 *p = '\0';
5441
5442 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5443 str, (long) memaddr, pid);
5444 }
5445
5446 /* Fill start and end extra bytes of buffer with existing memory data. */
5447
5448 errno = 0;
5449 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5450 about coercing an 8 byte integer to a 4 byte pointer. */
5451 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5452 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5453 (PTRACE_TYPE_ARG4) 0);
5454 if (errno)
5455 return errno;
5456
5457 if (count > 1)
5458 {
5459 errno = 0;
5460 buffer[count - 1]
5461 = ptrace (PTRACE_PEEKTEXT, pid,
5462 /* Coerce to a uintptr_t first to avoid potential gcc warning
5463 about coercing an 8 byte integer to a 4 byte pointer. */
5464 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5465 * sizeof (PTRACE_XFER_TYPE)),
5466 (PTRACE_TYPE_ARG4) 0);
5467 if (errno)
5468 return errno;
5469 }
5470
5471 /* Copy data to be written over corresponding part of buffer. */
5472
5473 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5474 myaddr, len);
5475
5476 /* Write the entire buffer. */
5477
5478 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5479 {
5480 errno = 0;
5481 ptrace (PTRACE_POKETEXT, pid,
5482 /* Coerce to a uintptr_t first to avoid potential gcc warning
5483 about coercing an 8 byte integer to a 4 byte pointer. */
5484 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5485 (PTRACE_TYPE_ARG4) buffer[i]);
5486 if (errno)
5487 return errno;
5488 }
5489
5490 return 0;
5491 }
5492
5493 void
5494 linux_process_target::look_up_symbols ()
5495 {
5496 #ifdef USE_THREAD_DB
5497 struct process_info *proc = current_process ();
5498
5499 if (proc->priv->thread_db != NULL)
5500 return;
5501
5502 thread_db_init ();
5503 #endif
5504 }
5505
5506 void
5507 linux_process_target::request_interrupt ()
5508 {
5509 /* Send a SIGINT to the process group. This acts just like the user
5510 typed a ^C on the controlling terminal. */
5511 ::kill (-signal_pid, SIGINT);
5512 }
5513
5514 bool
5515 linux_process_target::supports_read_auxv ()
5516 {
5517 return true;
5518 }
5519
5520 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5521 to debugger memory starting at MYADDR. */
5522
5523 int
5524 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5525 unsigned int len)
5526 {
5527 char filename[PATH_MAX];
5528 int fd, n;
5529 int pid = lwpid_of (current_thread);
5530
5531 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5532
5533 fd = open (filename, O_RDONLY);
5534 if (fd < 0)
5535 return -1;
5536
5537 if (offset != (CORE_ADDR) 0
5538 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5539 n = -1;
5540 else
5541 n = read (fd, myaddr, len);
5542
5543 close (fd);
5544
5545 return n;
5546 }
5547
5548 int
5549 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5550 int size, raw_breakpoint *bp)
5551 {
5552 if (type == raw_bkpt_type_sw)
5553 return insert_memory_breakpoint (bp);
5554 else
5555 return low_insert_point (type, addr, size, bp);
5556 }
5557
5558 int
5559 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5560 int size, raw_breakpoint *bp)
5561 {
5562 /* Unsupported (see target.h). */
5563 return 1;
5564 }
5565
5566 int
5567 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5568 int size, raw_breakpoint *bp)
5569 {
5570 if (type == raw_bkpt_type_sw)
5571 return remove_memory_breakpoint (bp);
5572 else
5573 return low_remove_point (type, addr, size, bp);
5574 }
5575
5576 int
5577 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5578 int size, raw_breakpoint *bp)
5579 {
5580 /* Unsupported (see target.h). */
5581 return 1;
5582 }
5583
5584 /* Implement the stopped_by_sw_breakpoint target_ops
5585 method. */
5586
5587 bool
5588 linux_process_target::stopped_by_sw_breakpoint ()
5589 {
5590 struct lwp_info *lwp = get_thread_lwp (current_thread);
5591
5592 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5593 }
5594
5595 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5596 method. */
5597
5598 bool
5599 linux_process_target::supports_stopped_by_sw_breakpoint ()
5600 {
5601 return USE_SIGTRAP_SIGINFO;
5602 }
5603
5604 /* Implement the stopped_by_hw_breakpoint target_ops
5605 method. */
5606
5607 bool
5608 linux_process_target::stopped_by_hw_breakpoint ()
5609 {
5610 struct lwp_info *lwp = get_thread_lwp (current_thread);
5611
5612 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5613 }
5614
5615 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5616 method. */
5617
5618 bool
5619 linux_process_target::supports_stopped_by_hw_breakpoint ()
5620 {
5621 return USE_SIGTRAP_SIGINFO;
5622 }
5623
5624 /* Implement the supports_hardware_single_step target_ops method. */
5625
5626 bool
5627 linux_process_target::supports_hardware_single_step ()
5628 {
5629 return true;
5630 }
5631
5632 bool
5633 linux_process_target::stopped_by_watchpoint ()
5634 {
5635 struct lwp_info *lwp = get_thread_lwp (current_thread);
5636
5637 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5638 }
5639
5640 CORE_ADDR
5641 linux_process_target::stopped_data_address ()
5642 {
5643 struct lwp_info *lwp = get_thread_lwp (current_thread);
5644
5645 return lwp->stopped_data_address;
5646 }
5647
5648 /* This is only used for targets that define PT_TEXT_ADDR,
5649 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5650 the target has different ways of acquiring this information, like
5651 loadmaps. */
5652
5653 bool
5654 linux_process_target::supports_read_offsets ()
5655 {
5656 #ifdef SUPPORTS_READ_OFFSETS
5657 return true;
5658 #else
5659 return false;
5660 #endif
5661 }
5662
5663 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5664 to tell gdb about. */
5665
5666 int
5667 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5668 {
5669 #ifdef SUPPORTS_READ_OFFSETS
5670 unsigned long text, text_end, data;
5671 int pid = lwpid_of (current_thread);
5672
5673 errno = 0;
5674
5675 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5676 (PTRACE_TYPE_ARG4) 0);
5677 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5678 (PTRACE_TYPE_ARG4) 0);
5679 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5680 (PTRACE_TYPE_ARG4) 0);
5681
5682 if (errno == 0)
5683 {
5684 /* Both text and data offsets produced at compile-time (and so
5685 used by gdb) are relative to the beginning of the program,
5686 with the data segment immediately following the text segment.
5687 However, the actual runtime layout in memory may put the data
5688 somewhere else, so when we send gdb a data base-address, we
5689 use the real data base address and subtract the compile-time
5690 data base-address from it (which is just the length of the
5691 text segment). BSS immediately follows data in both
5692 cases. */
5693 *text_p = text;
5694 *data_p = data - (text_end - text);
5695
5696 return 1;
5697 }
5698 return 0;
5699 #else
5700 gdb_assert_not_reached ("target op read_offsets not supported");
5701 #endif
5702 }
5703
5704 bool
5705 linux_process_target::supports_get_tls_address ()
5706 {
5707 #ifdef USE_THREAD_DB
5708 return true;
5709 #else
5710 return false;
5711 #endif
5712 }
5713
5714 int
5715 linux_process_target::get_tls_address (thread_info *thread,
5716 CORE_ADDR offset,
5717 CORE_ADDR load_module,
5718 CORE_ADDR *address)
5719 {
5720 #ifdef USE_THREAD_DB
5721 return thread_db_get_tls_address (thread, offset, load_module, address);
5722 #else
5723 return -1;
5724 #endif
5725 }
5726
5727 bool
5728 linux_process_target::supports_qxfer_osdata ()
5729 {
5730 return true;
5731 }
5732
5733 int
5734 linux_process_target::qxfer_osdata (const char *annex,
5735 unsigned char *readbuf,
5736 unsigned const char *writebuf,
5737 CORE_ADDR offset, int len)
5738 {
5739 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5740 }
5741
5742 void
5743 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5744 gdb_byte *inf_siginfo, int direction)
5745 {
5746 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5747
5748 /* If there was no callback, or the callback didn't do anything,
5749 then just do a straight memcpy. */
5750 if (!done)
5751 {
5752 if (direction == 1)
5753 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5754 else
5755 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5756 }
5757 }
5758
5759 bool
5760 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5761 int direction)
5762 {
5763 return false;
5764 }
5765
5766 bool
5767 linux_process_target::supports_qxfer_siginfo ()
5768 {
5769 return true;
5770 }
5771
5772 int
5773 linux_process_target::qxfer_siginfo (const char *annex,
5774 unsigned char *readbuf,
5775 unsigned const char *writebuf,
5776 CORE_ADDR offset, int len)
5777 {
5778 int pid;
5779 siginfo_t siginfo;
5780 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5781
5782 if (current_thread == NULL)
5783 return -1;
5784
5785 pid = lwpid_of (current_thread);
5786
5787 threads_debug_printf ("%s siginfo for lwp %d.",
5788 readbuf != NULL ? "Reading" : "Writing",
5789 pid);
5790
5791 if (offset >= sizeof (siginfo))
5792 return -1;
5793
5794 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5795 return -1;
5796
5797 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5798 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5799 inferior with a 64-bit GDBSERVER should look the same as debugging it
5800 with a 32-bit GDBSERVER, we need to convert it. */
5801 siginfo_fixup (&siginfo, inf_siginfo, 0);
5802
5803 if (offset + len > sizeof (siginfo))
5804 len = sizeof (siginfo) - offset;
5805
5806 if (readbuf != NULL)
5807 memcpy (readbuf, inf_siginfo + offset, len);
5808 else
5809 {
5810 memcpy (inf_siginfo + offset, writebuf, len);
5811
5812 /* Convert back to ptrace layout before flushing it out. */
5813 siginfo_fixup (&siginfo, inf_siginfo, 1);
5814
5815 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5816 return -1;
5817 }
5818
5819 return len;
5820 }
5821
5822 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5823 so we notice when children change state; as the handler for the
5824 sigsuspend in my_waitpid. */
5825
5826 static void
5827 sigchld_handler (int signo)
5828 {
5829 int old_errno = errno;
5830
5831 if (debug_threads)
5832 {
5833 do
5834 {
5835 /* Use the async signal safe debug function. */
5836 if (debug_write ("sigchld_handler\n",
5837 sizeof ("sigchld_handler\n") - 1) < 0)
5838 break; /* just ignore */
5839 } while (0);
5840 }
5841
5842 if (target_is_async_p ())
5843 async_file_mark (); /* trigger a linux_wait */
5844
5845 errno = old_errno;
5846 }
5847
5848 bool
5849 linux_process_target::supports_non_stop ()
5850 {
5851 return true;
5852 }
5853
5854 bool
5855 linux_process_target::async (bool enable)
5856 {
5857 bool previous = target_is_async_p ();
5858
5859 threads_debug_printf ("async (%d), previous=%d",
5860 enable, previous);
5861
5862 if (previous != enable)
5863 {
5864 sigset_t mask;
5865 sigemptyset (&mask);
5866 sigaddset (&mask, SIGCHLD);
5867
5868 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5869
5870 if (enable)
5871 {
5872 if (!linux_event_pipe.open_pipe ())
5873 {
5874 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5875
5876 warning ("creating event pipe failed.");
5877 return previous;
5878 }
5879
5880 /* Register the event loop handler. */
5881 add_file_handler (linux_event_pipe.event_fd (),
5882 handle_target_event, NULL,
5883 "linux-low");
5884
5885 /* Always trigger a linux_wait. */
5886 async_file_mark ();
5887 }
5888 else
5889 {
5890 delete_file_handler (linux_event_pipe.event_fd ());
5891
5892 linux_event_pipe.close_pipe ();
5893 }
5894
5895 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5896 }
5897
5898 return previous;
5899 }
5900
5901 int
5902 linux_process_target::start_non_stop (bool nonstop)
5903 {
5904 /* Register or unregister from event-loop accordingly. */
5905 target_async (nonstop);
5906
5907 if (target_is_async_p () != (nonstop != false))
5908 return -1;
5909
5910 return 0;
5911 }
5912
5913 bool
5914 linux_process_target::supports_multi_process ()
5915 {
5916 return true;
5917 }
5918
5919 /* Check if fork events are supported. */
5920
5921 bool
5922 linux_process_target::supports_fork_events ()
5923 {
5924 return true;
5925 }
5926
5927 /* Check if vfork events are supported. */
5928
5929 bool
5930 linux_process_target::supports_vfork_events ()
5931 {
5932 return true;
5933 }
5934
5935 /* Check if exec events are supported. */
5936
5937 bool
5938 linux_process_target::supports_exec_events ()
5939 {
5940 return true;
5941 }
5942
5943 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5944 ptrace flags for all inferiors. This is in case the new GDB connection
5945 doesn't support the same set of events that the previous one did. */
5946
5947 void
5948 linux_process_target::handle_new_gdb_connection ()
5949 {
5950 /* Request that all the lwps reset their ptrace options. */
5951 for_each_thread ([] (thread_info *thread)
5952 {
5953 struct lwp_info *lwp = get_thread_lwp (thread);
5954
5955 if (!lwp->stopped)
5956 {
5957 /* Stop the lwp so we can modify its ptrace options. */
5958 lwp->must_set_ptrace_flags = 1;
5959 linux_stop_lwp (lwp);
5960 }
5961 else
5962 {
5963 /* Already stopped; go ahead and set the ptrace options. */
5964 struct process_info *proc = find_process_pid (pid_of (thread));
5965 int options = linux_low_ptrace_options (proc->attached);
5966
5967 linux_enable_event_reporting (lwpid_of (thread), options);
5968 lwp->must_set_ptrace_flags = 0;
5969 }
5970 });
5971 }
5972
5973 int
5974 linux_process_target::handle_monitor_command (char *mon)
5975 {
5976 #ifdef USE_THREAD_DB
5977 return thread_db_handle_monitor_command (mon);
5978 #else
5979 return 0;
5980 #endif
5981 }
5982
5983 int
5984 linux_process_target::core_of_thread (ptid_t ptid)
5985 {
5986 return linux_common_core_of_thread (ptid);
5987 }
5988
5989 bool
5990 linux_process_target::supports_disable_randomization ()
5991 {
5992 return true;
5993 }
5994
5995 bool
5996 linux_process_target::supports_agent ()
5997 {
5998 return true;
5999 }
6000
6001 bool
6002 linux_process_target::supports_range_stepping ()
6003 {
6004 if (supports_software_single_step ())
6005 return true;
6006
6007 return low_supports_range_stepping ();
6008 }
6009
6010 bool
6011 linux_process_target::low_supports_range_stepping ()
6012 {
6013 return false;
6014 }
6015
6016 bool
6017 linux_process_target::supports_pid_to_exec_file ()
6018 {
6019 return true;
6020 }
6021
6022 const char *
6023 linux_process_target::pid_to_exec_file (int pid)
6024 {
6025 return linux_proc_pid_to_exec_file (pid);
6026 }
6027
6028 bool
6029 linux_process_target::supports_multifs ()
6030 {
6031 return true;
6032 }
6033
6034 int
6035 linux_process_target::multifs_open (int pid, const char *filename,
6036 int flags, mode_t mode)
6037 {
6038 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6039 }
6040
6041 int
6042 linux_process_target::multifs_unlink (int pid, const char *filename)
6043 {
6044 return linux_mntns_unlink (pid, filename);
6045 }
6046
6047 ssize_t
6048 linux_process_target::multifs_readlink (int pid, const char *filename,
6049 char *buf, size_t bufsiz)
6050 {
6051 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6052 }
6053
6054 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6055 struct target_loadseg
6056 {
6057 /* Core address to which the segment is mapped. */
6058 Elf32_Addr addr;
6059 /* VMA recorded in the program header. */
6060 Elf32_Addr p_vaddr;
6061 /* Size of this segment in memory. */
6062 Elf32_Word p_memsz;
6063 };
6064
6065 # if defined PT_GETDSBT
6066 struct target_loadmap
6067 {
6068 /* Protocol version number, must be zero. */
6069 Elf32_Word version;
6070 /* Pointer to the DSBT table, its size, and the DSBT index. */
6071 unsigned *dsbt_table;
6072 unsigned dsbt_size, dsbt_index;
6073 /* Number of segments in this map. */
6074 Elf32_Word nsegs;
6075 /* The actual memory map. */
6076 struct target_loadseg segs[/*nsegs*/];
6077 };
6078 # define LINUX_LOADMAP PT_GETDSBT
6079 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6080 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6081 # else
6082 struct target_loadmap
6083 {
6084 /* Protocol version number, must be zero. */
6085 Elf32_Half version;
6086 /* Number of segments in this map. */
6087 Elf32_Half nsegs;
6088 /* The actual memory map. */
6089 struct target_loadseg segs[/*nsegs*/];
6090 };
6091 # define LINUX_LOADMAP PTRACE_GETFDPIC
6092 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6093 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6094 # endif
6095
6096 bool
6097 linux_process_target::supports_read_loadmap ()
6098 {
6099 return true;
6100 }
6101
6102 int
6103 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6104 unsigned char *myaddr, unsigned int len)
6105 {
6106 int pid = lwpid_of (current_thread);
6107 int addr = -1;
6108 struct target_loadmap *data = NULL;
6109 unsigned int actual_length, copy_length;
6110
6111 if (strcmp (annex, "exec") == 0)
6112 addr = (int) LINUX_LOADMAP_EXEC;
6113 else if (strcmp (annex, "interp") == 0)
6114 addr = (int) LINUX_LOADMAP_INTERP;
6115 else
6116 return -1;
6117
6118 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6119 return -1;
6120
6121 if (data == NULL)
6122 return -1;
6123
6124 actual_length = sizeof (struct target_loadmap)
6125 + sizeof (struct target_loadseg) * data->nsegs;
6126
6127 if (offset < 0 || offset > actual_length)
6128 return -1;
6129
6130 copy_length = actual_length - offset < len ? actual_length - offset : len;
6131 memcpy (myaddr, (char *) data + offset, copy_length);
6132 return copy_length;
6133 }
6134 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6135
6136 bool
6137 linux_process_target::supports_catch_syscall ()
6138 {
6139 return low_supports_catch_syscall ();
6140 }
6141
6142 bool
6143 linux_process_target::low_supports_catch_syscall ()
6144 {
6145 return false;
6146 }
6147
6148 CORE_ADDR
6149 linux_process_target::read_pc (regcache *regcache)
6150 {
6151 if (!low_supports_breakpoints ())
6152 return 0;
6153
6154 return low_get_pc (regcache);
6155 }
6156
6157 void
6158 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6159 {
6160 gdb_assert (low_supports_breakpoints ());
6161
6162 low_set_pc (regcache, pc);
6163 }
6164
6165 bool
6166 linux_process_target::supports_thread_stopped ()
6167 {
6168 return true;
6169 }
6170
6171 bool
6172 linux_process_target::thread_stopped (thread_info *thread)
6173 {
6174 return get_thread_lwp (thread)->stopped;
6175 }
6176
6177 /* This exposes stop-all-threads functionality to other modules. */
6178
6179 void
6180 linux_process_target::pause_all (bool freeze)
6181 {
6182 stop_all_lwps (freeze, NULL);
6183 }
6184
6185 /* This exposes unstop-all-threads functionality to other gdbserver
6186 modules. */
6187
6188 void
6189 linux_process_target::unpause_all (bool unfreeze)
6190 {
6191 unstop_all_lwps (unfreeze, NULL);
6192 }
6193
6194 int
6195 linux_process_target::prepare_to_access_memory ()
6196 {
6197 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6198 running LWP. */
6199 if (non_stop)
6200 target_pause_all (true);
6201 return 0;
6202 }
6203
6204 void
6205 linux_process_target::done_accessing_memory ()
6206 {
6207 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6208 running LWP. */
6209 if (non_stop)
6210 target_unpause_all (true);
6211 }
6212
6213 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6214
6215 static int
6216 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6217 CORE_ADDR *phdr_memaddr, int *num_phdr)
6218 {
6219 char filename[PATH_MAX];
6220 int fd;
6221 const int auxv_size = is_elf64
6222 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6223 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6224
6225 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6226
6227 fd = open (filename, O_RDONLY);
6228 if (fd < 0)
6229 return 1;
6230
6231 *phdr_memaddr = 0;
6232 *num_phdr = 0;
6233 while (read (fd, buf, auxv_size) == auxv_size
6234 && (*phdr_memaddr == 0 || *num_phdr == 0))
6235 {
6236 if (is_elf64)
6237 {
6238 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6239
6240 switch (aux->a_type)
6241 {
6242 case AT_PHDR:
6243 *phdr_memaddr = aux->a_un.a_val;
6244 break;
6245 case AT_PHNUM:
6246 *num_phdr = aux->a_un.a_val;
6247 break;
6248 }
6249 }
6250 else
6251 {
6252 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6253
6254 switch (aux->a_type)
6255 {
6256 case AT_PHDR:
6257 *phdr_memaddr = aux->a_un.a_val;
6258 break;
6259 case AT_PHNUM:
6260 *num_phdr = aux->a_un.a_val;
6261 break;
6262 }
6263 }
6264 }
6265
6266 close (fd);
6267
6268 if (*phdr_memaddr == 0 || *num_phdr == 0)
6269 {
6270 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6271 "phdr_memaddr = %ld, phdr_num = %d",
6272 (long) *phdr_memaddr, *num_phdr);
6273 return 2;
6274 }
6275
6276 return 0;
6277 }
6278
6279 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6280
6281 static CORE_ADDR
6282 get_dynamic (const int pid, const int is_elf64)
6283 {
6284 CORE_ADDR phdr_memaddr, relocation;
6285 int num_phdr, i;
6286 unsigned char *phdr_buf;
6287 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6288
6289 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6290 return 0;
6291
6292 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6293 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6294
6295 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6296 return 0;
6297
6298 /* Compute relocation: it is expected to be 0 for "regular" executables,
6299 non-zero for PIE ones. */
6300 relocation = -1;
6301 for (i = 0; relocation == -1 && i < num_phdr; i++)
6302 if (is_elf64)
6303 {
6304 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6305
6306 if (p->p_type == PT_PHDR)
6307 relocation = phdr_memaddr - p->p_vaddr;
6308 }
6309 else
6310 {
6311 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6312
6313 if (p->p_type == PT_PHDR)
6314 relocation = phdr_memaddr - p->p_vaddr;
6315 }
6316
6317 if (relocation == -1)
6318 {
6319 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6320 any real world executables, including PIE executables, have always
6321 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6322 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6323 or present DT_DEBUG anyway (fpc binaries are statically linked).
6324
6325 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6326
6327 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6328
6329 return 0;
6330 }
6331
6332 for (i = 0; i < num_phdr; i++)
6333 {
6334 if (is_elf64)
6335 {
6336 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6337
6338 if (p->p_type == PT_DYNAMIC)
6339 return p->p_vaddr + relocation;
6340 }
6341 else
6342 {
6343 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6344
6345 if (p->p_type == PT_DYNAMIC)
6346 return p->p_vaddr + relocation;
6347 }
6348 }
6349
6350 return 0;
6351 }
6352
6353 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6354 can be 0 if the inferior does not yet have the library list initialized.
6355 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6356 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6357
6358 static CORE_ADDR
6359 get_r_debug (const int pid, const int is_elf64)
6360 {
6361 CORE_ADDR dynamic_memaddr;
6362 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6363 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6364 CORE_ADDR map = -1;
6365
6366 dynamic_memaddr = get_dynamic (pid, is_elf64);
6367 if (dynamic_memaddr == 0)
6368 return map;
6369
6370 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6371 {
6372 if (is_elf64)
6373 {
6374 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6375 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6376 union
6377 {
6378 Elf64_Xword map;
6379 unsigned char buf[sizeof (Elf64_Xword)];
6380 }
6381 rld_map;
6382 #endif
6383 #ifdef DT_MIPS_RLD_MAP
6384 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6385 {
6386 if (linux_read_memory (dyn->d_un.d_val,
6387 rld_map.buf, sizeof (rld_map.buf)) == 0)
6388 return rld_map.map;
6389 else
6390 break;
6391 }
6392 #endif /* DT_MIPS_RLD_MAP */
6393 #ifdef DT_MIPS_RLD_MAP_REL
6394 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6395 {
6396 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6397 rld_map.buf, sizeof (rld_map.buf)) == 0)
6398 return rld_map.map;
6399 else
6400 break;
6401 }
6402 #endif /* DT_MIPS_RLD_MAP_REL */
6403
6404 if (dyn->d_tag == DT_DEBUG && map == -1)
6405 map = dyn->d_un.d_val;
6406
6407 if (dyn->d_tag == DT_NULL)
6408 break;
6409 }
6410 else
6411 {
6412 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6413 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6414 union
6415 {
6416 Elf32_Word map;
6417 unsigned char buf[sizeof (Elf32_Word)];
6418 }
6419 rld_map;
6420 #endif
6421 #ifdef DT_MIPS_RLD_MAP
6422 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6423 {
6424 if (linux_read_memory (dyn->d_un.d_val,
6425 rld_map.buf, sizeof (rld_map.buf)) == 0)
6426 return rld_map.map;
6427 else
6428 break;
6429 }
6430 #endif /* DT_MIPS_RLD_MAP */
6431 #ifdef DT_MIPS_RLD_MAP_REL
6432 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6433 {
6434 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6435 rld_map.buf, sizeof (rld_map.buf)) == 0)
6436 return rld_map.map;
6437 else
6438 break;
6439 }
6440 #endif /* DT_MIPS_RLD_MAP_REL */
6441
6442 if (dyn->d_tag == DT_DEBUG && map == -1)
6443 map = dyn->d_un.d_val;
6444
6445 if (dyn->d_tag == DT_NULL)
6446 break;
6447 }
6448
6449 dynamic_memaddr += dyn_size;
6450 }
6451
6452 return map;
6453 }
6454
6455 /* Read one pointer from MEMADDR in the inferior. */
6456
6457 static int
6458 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6459 {
6460 int ret;
6461
6462 /* Go through a union so this works on either big or little endian
6463 hosts, when the inferior's pointer size is smaller than the size
6464 of CORE_ADDR. It is assumed the inferior's endianness is the
6465 same of the superior's. */
6466 union
6467 {
6468 CORE_ADDR core_addr;
6469 unsigned int ui;
6470 unsigned char uc;
6471 } addr;
6472
6473 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6474 if (ret == 0)
6475 {
6476 if (ptr_size == sizeof (CORE_ADDR))
6477 *ptr = addr.core_addr;
6478 else if (ptr_size == sizeof (unsigned int))
6479 *ptr = addr.ui;
6480 else
6481 gdb_assert_not_reached ("unhandled pointer size");
6482 }
6483 return ret;
6484 }
6485
6486 bool
6487 linux_process_target::supports_qxfer_libraries_svr4 ()
6488 {
6489 return true;
6490 }
6491
6492 struct link_map_offsets
6493 {
6494 /* Offset and size of r_debug.r_version. */
6495 int r_version_offset;
6496
6497 /* Offset and size of r_debug.r_map. */
6498 int r_map_offset;
6499
6500 /* Offset to l_addr field in struct link_map. */
6501 int l_addr_offset;
6502
6503 /* Offset to l_name field in struct link_map. */
6504 int l_name_offset;
6505
6506 /* Offset to l_ld field in struct link_map. */
6507 int l_ld_offset;
6508
6509 /* Offset to l_next field in struct link_map. */
6510 int l_next_offset;
6511
6512 /* Offset to l_prev field in struct link_map. */
6513 int l_prev_offset;
6514 };
6515
6516 /* Construct qXfer:libraries-svr4:read reply. */
6517
6518 int
6519 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6520 unsigned char *readbuf,
6521 unsigned const char *writebuf,
6522 CORE_ADDR offset, int len)
6523 {
6524 struct process_info_private *const priv = current_process ()->priv;
6525 char filename[PATH_MAX];
6526 int pid, is_elf64;
6527
6528 static const struct link_map_offsets lmo_32bit_offsets =
6529 {
6530 0, /* r_version offset. */
6531 4, /* r_debug.r_map offset. */
6532 0, /* l_addr offset in link_map. */
6533 4, /* l_name offset in link_map. */
6534 8, /* l_ld offset in link_map. */
6535 12, /* l_next offset in link_map. */
6536 16 /* l_prev offset in link_map. */
6537 };
6538
6539 static const struct link_map_offsets lmo_64bit_offsets =
6540 {
6541 0, /* r_version offset. */
6542 8, /* r_debug.r_map offset. */
6543 0, /* l_addr offset in link_map. */
6544 8, /* l_name offset in link_map. */
6545 16, /* l_ld offset in link_map. */
6546 24, /* l_next offset in link_map. */
6547 32 /* l_prev offset in link_map. */
6548 };
6549 const struct link_map_offsets *lmo;
6550 unsigned int machine;
6551 int ptr_size;
6552 CORE_ADDR lm_addr = 0, lm_prev = 0;
6553 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6554 int header_done = 0;
6555
6556 if (writebuf != NULL)
6557 return -2;
6558 if (readbuf == NULL)
6559 return -1;
6560
6561 pid = lwpid_of (current_thread);
6562 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6563 is_elf64 = elf_64_file_p (filename, &machine);
6564 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6565 ptr_size = is_elf64 ? 8 : 4;
6566
6567 while (annex[0] != '\0')
6568 {
6569 const char *sep;
6570 CORE_ADDR *addrp;
6571 int name_len;
6572
6573 sep = strchr (annex, '=');
6574 if (sep == NULL)
6575 break;
6576
6577 name_len = sep - annex;
6578 if (name_len == 5 && startswith (annex, "start"))
6579 addrp = &lm_addr;
6580 else if (name_len == 4 && startswith (annex, "prev"))
6581 addrp = &lm_prev;
6582 else
6583 {
6584 annex = strchr (sep, ';');
6585 if (annex == NULL)
6586 break;
6587 annex++;
6588 continue;
6589 }
6590
6591 annex = decode_address_to_semicolon (addrp, sep + 1);
6592 }
6593
6594 if (lm_addr == 0)
6595 {
6596 int r_version = 0;
6597
6598 if (priv->r_debug == 0)
6599 priv->r_debug = get_r_debug (pid, is_elf64);
6600
6601 /* We failed to find DT_DEBUG. Such situation will not change
6602 for this inferior - do not retry it. Report it to GDB as
6603 E01, see for the reasons at the GDB solib-svr4.c side. */
6604 if (priv->r_debug == (CORE_ADDR) -1)
6605 return -1;
6606
6607 if (priv->r_debug != 0)
6608 {
6609 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6610 (unsigned char *) &r_version,
6611 sizeof (r_version)) != 0
6612 || r_version < 1)
6613 {
6614 warning ("unexpected r_debug version %d", r_version);
6615 }
6616 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6617 &lm_addr, ptr_size) != 0)
6618 {
6619 warning ("unable to read r_map from 0x%lx",
6620 (long) priv->r_debug + lmo->r_map_offset);
6621 }
6622 }
6623 }
6624
6625 std::string document = "<library-list-svr4 version=\"1.0\"";
6626
6627 while (lm_addr
6628 && read_one_ptr (lm_addr + lmo->l_name_offset,
6629 &l_name, ptr_size) == 0
6630 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6631 &l_addr, ptr_size) == 0
6632 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6633 &l_ld, ptr_size) == 0
6634 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6635 &l_prev, ptr_size) == 0
6636 && read_one_ptr (lm_addr + lmo->l_next_offset,
6637 &l_next, ptr_size) == 0)
6638 {
6639 unsigned char libname[PATH_MAX];
6640
6641 if (lm_prev != l_prev)
6642 {
6643 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6644 (long) lm_prev, (long) l_prev);
6645 break;
6646 }
6647
6648 /* Ignore the first entry even if it has valid name as the first entry
6649 corresponds to the main executable. The first entry should not be
6650 skipped if the dynamic loader was loaded late by a static executable
6651 (see solib-svr4.c parameter ignore_first). But in such case the main
6652 executable does not have PT_DYNAMIC present and this function already
6653 exited above due to failed get_r_debug. */
6654 if (lm_prev == 0)
6655 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6656 else
6657 {
6658 /* Not checking for error because reading may stop before
6659 we've got PATH_MAX worth of characters. */
6660 libname[0] = '\0';
6661 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6662 libname[sizeof (libname) - 1] = '\0';
6663 if (libname[0] != '\0')
6664 {
6665 if (!header_done)
6666 {
6667 /* Terminate `<library-list-svr4'. */
6668 document += '>';
6669 header_done = 1;
6670 }
6671
6672 string_appendf (document, "<library name=\"");
6673 xml_escape_text_append (&document, (char *) libname);
6674 string_appendf (document, "\" lm=\"0x%lx\" "
6675 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6676 (unsigned long) lm_addr, (unsigned long) l_addr,
6677 (unsigned long) l_ld);
6678 }
6679 }
6680
6681 lm_prev = lm_addr;
6682 lm_addr = l_next;
6683 }
6684
6685 if (!header_done)
6686 {
6687 /* Empty list; terminate `<library-list-svr4'. */
6688 document += "/>";
6689 }
6690 else
6691 document += "</library-list-svr4>";
6692
6693 int document_len = document.length ();
6694 if (offset < document_len)
6695 document_len -= offset;
6696 else
6697 document_len = 0;
6698 if (len > document_len)
6699 len = document_len;
6700
6701 memcpy (readbuf, document.data () + offset, len);
6702
6703 return len;
6704 }
6705
6706 #ifdef HAVE_LINUX_BTRACE
6707
6708 btrace_target_info *
6709 linux_process_target::enable_btrace (thread_info *tp,
6710 const btrace_config *conf)
6711 {
6712 return linux_enable_btrace (tp->id, conf);
6713 }
6714
6715 /* See to_disable_btrace target method. */
6716
6717 int
6718 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6719 {
6720 enum btrace_error err;
6721
6722 err = linux_disable_btrace (tinfo);
6723 return (err == BTRACE_ERR_NONE ? 0 : -1);
6724 }
6725
6726 /* Encode an Intel Processor Trace configuration. */
6727
6728 static void
6729 linux_low_encode_pt_config (struct buffer *buffer,
6730 const struct btrace_data_pt_config *config)
6731 {
6732 buffer_grow_str (buffer, "<pt-config>\n");
6733
6734 switch (config->cpu.vendor)
6735 {
6736 case CV_INTEL:
6737 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6738 "model=\"%u\" stepping=\"%u\"/>\n",
6739 config->cpu.family, config->cpu.model,
6740 config->cpu.stepping);
6741 break;
6742
6743 default:
6744 break;
6745 }
6746
6747 buffer_grow_str (buffer, "</pt-config>\n");
6748 }
6749
6750 /* Encode a raw buffer. */
6751
6752 static void
6753 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6754 unsigned int size)
6755 {
6756 if (size == 0)
6757 return;
6758
6759 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6760 buffer_grow_str (buffer, "<raw>\n");
6761
6762 while (size-- > 0)
6763 {
6764 char elem[2];
6765
6766 elem[0] = tohex ((*data >> 4) & 0xf);
6767 elem[1] = tohex (*data++ & 0xf);
6768
6769 buffer_grow (buffer, elem, 2);
6770 }
6771
6772 buffer_grow_str (buffer, "</raw>\n");
6773 }
6774
6775 /* See to_read_btrace target method. */
6776
6777 int
6778 linux_process_target::read_btrace (btrace_target_info *tinfo,
6779 buffer *buffer,
6780 enum btrace_read_type type)
6781 {
6782 struct btrace_data btrace;
6783 enum btrace_error err;
6784
6785 err = linux_read_btrace (&btrace, tinfo, type);
6786 if (err != BTRACE_ERR_NONE)
6787 {
6788 if (err == BTRACE_ERR_OVERFLOW)
6789 buffer_grow_str0 (buffer, "E.Overflow.");
6790 else
6791 buffer_grow_str0 (buffer, "E.Generic Error.");
6792
6793 return -1;
6794 }
6795
6796 switch (btrace.format)
6797 {
6798 case BTRACE_FORMAT_NONE:
6799 buffer_grow_str0 (buffer, "E.No Trace.");
6800 return -1;
6801
6802 case BTRACE_FORMAT_BTS:
6803 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6804 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6805
6806 for (const btrace_block &block : *btrace.variant.bts.blocks)
6807 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6808 paddress (block.begin), paddress (block.end));
6809
6810 buffer_grow_str0 (buffer, "</btrace>\n");
6811 break;
6812
6813 case BTRACE_FORMAT_PT:
6814 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6815 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6816 buffer_grow_str (buffer, "<pt>\n");
6817
6818 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6819
6820 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6821 btrace.variant.pt.size);
6822
6823 buffer_grow_str (buffer, "</pt>\n");
6824 buffer_grow_str0 (buffer, "</btrace>\n");
6825 break;
6826
6827 default:
6828 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6829 return -1;
6830 }
6831
6832 return 0;
6833 }
6834
6835 /* See to_btrace_conf target method. */
6836
6837 int
6838 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6839 buffer *buffer)
6840 {
6841 const struct btrace_config *conf;
6842
6843 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6844 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6845
6846 conf = linux_btrace_conf (tinfo);
6847 if (conf != NULL)
6848 {
6849 switch (conf->format)
6850 {
6851 case BTRACE_FORMAT_NONE:
6852 break;
6853
6854 case BTRACE_FORMAT_BTS:
6855 buffer_xml_printf (buffer, "<bts");
6856 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6857 buffer_xml_printf (buffer, " />\n");
6858 break;
6859
6860 case BTRACE_FORMAT_PT:
6861 buffer_xml_printf (buffer, "<pt");
6862 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6863 buffer_xml_printf (buffer, "/>\n");
6864 break;
6865 }
6866 }
6867
6868 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6869 return 0;
6870 }
6871 #endif /* HAVE_LINUX_BTRACE */
6872
6873 /* See nat/linux-nat.h. */
6874
6875 ptid_t
6876 current_lwp_ptid (void)
6877 {
6878 return ptid_of (current_thread);
6879 }
6880
6881 const char *
6882 linux_process_target::thread_name (ptid_t thread)
6883 {
6884 return linux_proc_tid_get_name (thread);
6885 }
6886
6887 #if USE_THREAD_DB
6888 bool
6889 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6890 int *handle_len)
6891 {
6892 return thread_db_thread_handle (ptid, handle, handle_len);
6893 }
6894 #endif
6895
6896 thread_info *
6897 linux_process_target::thread_pending_parent (thread_info *thread)
6898 {
6899 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6900
6901 if (parent == nullptr)
6902 return nullptr;
6903
6904 return get_lwp_thread (parent);
6905 }
6906
6907 thread_info *
6908 linux_process_target::thread_pending_child (thread_info *thread)
6909 {
6910 lwp_info *child = get_thread_lwp (thread)->pending_child ();
6911
6912 if (child == nullptr)
6913 return nullptr;
6914
6915 return get_lwp_thread (child);
6916 }
6917
6918 /* Default implementation of linux_target_ops method "set_pc" for
6919 32-bit pc register which is literally named "pc". */
6920
6921 void
6922 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6923 {
6924 uint32_t newpc = pc;
6925
6926 supply_register_by_name (regcache, "pc", &newpc);
6927 }
6928
6929 /* Default implementation of linux_target_ops method "get_pc" for
6930 32-bit pc register which is literally named "pc". */
6931
6932 CORE_ADDR
6933 linux_get_pc_32bit (struct regcache *regcache)
6934 {
6935 uint32_t pc;
6936
6937 collect_register_by_name (regcache, "pc", &pc);
6938 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
6939 return pc;
6940 }
6941
6942 /* Default implementation of linux_target_ops method "set_pc" for
6943 64-bit pc register which is literally named "pc". */
6944
6945 void
6946 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6947 {
6948 uint64_t newpc = pc;
6949
6950 supply_register_by_name (regcache, "pc", &newpc);
6951 }
6952
6953 /* Default implementation of linux_target_ops method "get_pc" for
6954 64-bit pc register which is literally named "pc". */
6955
6956 CORE_ADDR
6957 linux_get_pc_64bit (struct regcache *regcache)
6958 {
6959 uint64_t pc;
6960
6961 collect_register_by_name (regcache, "pc", &pc);
6962 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6963 return pc;
6964 }
6965
6966 /* See linux-low.h. */
6967
6968 int
6969 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
6970 {
6971 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
6972 int offset = 0;
6973
6974 gdb_assert (wordsize == 4 || wordsize == 8);
6975
6976 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
6977 {
6978 if (wordsize == 4)
6979 {
6980 uint32_t *data_p = (uint32_t *) data;
6981 if (data_p[0] == match)
6982 {
6983 *valp = data_p[1];
6984 return 1;
6985 }
6986 }
6987 else
6988 {
6989 uint64_t *data_p = (uint64_t *) data;
6990 if (data_p[0] == match)
6991 {
6992 *valp = data_p[1];
6993 return 1;
6994 }
6995 }
6996
6997 offset += 2 * wordsize;
6998 }
6999
7000 return 0;
7001 }
7002
7003 /* See linux-low.h. */
7004
7005 CORE_ADDR
7006 linux_get_hwcap (int wordsize)
7007 {
7008 CORE_ADDR hwcap = 0;
7009 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7010 return hwcap;
7011 }
7012
7013 /* See linux-low.h. */
7014
7015 CORE_ADDR
7016 linux_get_hwcap2 (int wordsize)
7017 {
7018 CORE_ADDR hwcap2 = 0;
7019 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7020 return hwcap2;
7021 }
7022
7023 #ifdef HAVE_LINUX_REGSETS
7024 void
7025 initialize_regsets_info (struct regsets_info *info)
7026 {
7027 for (info->num_regsets = 0;
7028 info->regsets[info->num_regsets].size >= 0;
7029 info->num_regsets++)
7030 ;
7031 }
7032 #endif
7033
7034 void
7035 initialize_low (void)
7036 {
7037 struct sigaction sigchld_action;
7038
7039 memset (&sigchld_action, 0, sizeof (sigchld_action));
7040 set_target_ops (the_linux_target);
7041
7042 linux_ptrace_init_warnings ();
7043 linux_proc_init_warnings ();
7044
7045 sigchld_action.sa_handler = sigchld_handler;
7046 sigemptyset (&sigchld_action.sa_mask);
7047 sigchld_action.sa_flags = SA_RESTART;
7048 sigaction (SIGCHLD, &sigchld_action, NULL);
7049
7050 initialize_low_arch ();
7051
7052 linux_check_ptrace_features ();
7053 }